summaryrefslogtreecommitdiff
path: root/boost/asio/detail/impl
diff options
context:
space:
mode:
authorAnas Nashif <anas.nashif@intel.com>2012-10-30 12:57:26 -0700
committerAnas Nashif <anas.nashif@intel.com>2012-10-30 12:57:26 -0700
commit1a78a62555be32868418fe52f8e330c9d0f95d5a (patch)
treed3765a80e7d3b9640ec2e930743630cd6b9fce2b /boost/asio/detail/impl
downloadboost-1a78a62555be32868418fe52f8e330c9d0f95d5a.tar.gz
boost-1a78a62555be32868418fe52f8e330c9d0f95d5a.tar.bz2
boost-1a78a62555be32868418fe52f8e330c9d0f95d5a.zip
Imported Upstream version 1.49.0upstream/1.49.0
Diffstat (limited to 'boost/asio/detail/impl')
-rw-r--r--boost/asio/detail/impl/descriptor_ops.ipp445
-rw-r--r--boost/asio/detail/impl/dev_poll_reactor.hpp80
-rw-r--r--boost/asio/detail/impl/dev_poll_reactor.ipp447
-rw-r--r--boost/asio/detail/impl/epoll_reactor.hpp78
-rw-r--r--boost/asio/detail/impl/epoll_reactor.ipp637
-rw-r--r--boost/asio/detail/impl/eventfd_select_interrupter.ipp166
-rw-r--r--boost/asio/detail/impl/handler_tracking.ipp299
-rw-r--r--boost/asio/detail/impl/kqueue_reactor.hpp82
-rw-r--r--boost/asio/detail/impl/kqueue_reactor.ipp526
-rw-r--r--boost/asio/detail/impl/pipe_select_interrupter.ipp123
-rw-r--r--boost/asio/detail/impl/posix_event.ipp48
-rw-r--r--boost/asio/detail/impl/posix_mutex.ipp48
-rw-r--r--boost/asio/detail/impl/posix_thread.ipp76
-rw-r--r--boost/asio/detail/impl/posix_tss_ptr.ipp48
-rw-r--r--boost/asio/detail/impl/reactive_descriptor_service.ipp205
-rw-r--r--boost/asio/detail/impl/reactive_serial_port_service.ipp153
-rw-r--r--boost/asio/detail/impl/reactive_socket_service_base.ipp265
-rw-r--r--boost/asio/detail/impl/resolver_service_base.ipp132
-rw-r--r--boost/asio/detail/impl/select_reactor.hpp87
-rw-r--r--boost/asio/detail/impl/select_reactor.ipp314
-rw-r--r--boost/asio/detail/impl/service_registry.hpp90
-rw-r--r--boost/asio/detail/impl/service_registry.ipp190
-rw-r--r--boost/asio/detail/impl/signal_set_service.ipp593
-rw-r--r--boost/asio/detail/impl/socket_ops.ipp3062
-rw-r--r--boost/asio/detail/impl/socket_select_interrupter.ipp173
-rw-r--r--boost/asio/detail/impl/strand_service.hpp121
-rw-r--r--boost/asio/detail/impl/strand_service.ipp171
-rw-r--r--boost/asio/detail/impl/task_io_service.hpp75
-rw-r--r--boost/asio/detail/impl/task_io_service.ipp494
-rw-r--r--boost/asio/detail/impl/throw_error.ipp47
-rw-r--r--boost/asio/detail/impl/timer_queue.ipp87
-rw-r--r--boost/asio/detail/impl/timer_queue_ptime.ipp82
-rw-r--r--boost/asio/detail/impl/timer_queue_set.ipp103
-rw-r--r--boost/asio/detail/impl/win_event.ipp52
-rw-r--r--boost/asio/detail/impl/win_iocp_handle_service.ipp526
-rw-r--r--boost/asio/detail/impl/win_iocp_io_service.hpp131
-rw-r--r--boost/asio/detail/impl/win_iocp_io_service.ipp509
-rw-r--r--boost/asio/detail/impl/win_iocp_serial_port_service.ipp182
-rw-r--r--boost/asio/detail/impl/win_iocp_socket_service_base.ipp657
-rw-r--r--boost/asio/detail/impl/win_mutex.ipp80
-rw-r--r--boost/asio/detail/impl/win_object_handle_service.ipp446
-rw-r--r--boost/asio/detail/impl/win_static_mutex.ipp120
-rw-r--r--boost/asio/detail/impl/win_thread.ipp141
-rw-r--r--boost/asio/detail/impl/win_tss_ptr.ipp59
-rw-r--r--boost/asio/detail/impl/winsock_init.ipp71
45 files changed, 12521 insertions, 0 deletions
diff --git a/boost/asio/detail/impl/descriptor_ops.ipp b/boost/asio/detail/impl/descriptor_ops.ipp
new file mode 100644
index 0000000000..6c3528f309
--- /dev/null
+++ b/boost/asio/detail/impl/descriptor_ops.ipp
@@ -0,0 +1,445 @@
+//
+// detail/impl/descriptor_ops.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
+#define BOOST_ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <cerrno>
+#include <boost/asio/detail/descriptor_ops.hpp>
+#include <boost/asio/error.hpp>
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+namespace descriptor_ops {
+
+int open(const char* path, int flags, boost::system::error_code& ec)
+{
+ errno = 0;
+ int result = error_wrapper(::open(path, flags), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int close(int d, state_type& state, boost::system::error_code& ec)
+{
+ int result = 0;
+ if (d != -1)
+ {
+ errno = 0;
+ result = error_wrapper(::close(d), ec);
+
+ if (result != 0
+ && (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again))
+ {
+ // According to UNIX Network Programming Vol. 1, it is possible for
+ // close() to fail with EWOULDBLOCK under certain circumstances. What
+ // isn't clear is the state of the descriptor after this error. The one
+ // current OS where this behaviour is seen, Windows, says that the socket
+ // remains open. Therefore we'll put the descriptor back into blocking
+ // mode and have another attempt at closing it.
+#if defined(__SYMBIAN32__)
+ int flags = ::fcntl(d, F_GETFL, 0);
+ if (flags >= 0)
+ ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
+#else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = 0;
+ ::ioctl(d, FIONBIO, &arg);
+#endif // defined(__SYMBIAN32__)
+ state &= ~non_blocking;
+
+ errno = 0;
+ result = error_wrapper(::close(d), ec);
+ }
+ }
+
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+bool set_user_non_blocking(int d, state_type& state,
+ bool value, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ errno = 0;
+#if defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ errno = 0;
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);
+ }
+#else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);
+#endif // defined(__SYMBIAN32__)
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= user_set_non_blocking;
+ else
+ {
+ // Clearing the user-set non-blocking mode always overrides any
+ // internally-set non-blocking flag. Any subsequent asynchronous
+ // operations will need to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+bool set_internal_non_blocking(int d, state_type& state,
+ bool value, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ if (!value && (state & user_set_non_blocking))
+ {
+ // It does not make sense to clear the internal non-blocking flag if the
+ // user still wants non-blocking behaviour. Return an error and let the
+ // caller figure out whether to update the user-set non-blocking flag.
+ ec = boost::asio::error::invalid_argument;
+ return false;
+ }
+
+ errno = 0;
+#if defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ errno = 0;
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);
+ }
+#else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);
+#endif // defined(__SYMBIAN32__)
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= internal_non_blocking;
+ else
+ state &= ~internal_non_blocking;
+ return true;
+ }
+
+ return false;
+}
+
+std::size_t sync_read(int d, state_type state, buf* bufs,
+ std::size_t count, bool all_empty, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to read 0 bytes on a stream is a no-op.
+ if (all_empty)
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ errno = 0;
+ int bytes = error_wrapper(::readv(d, bufs, static_cast<int>(count)), ec);
+
+ // Check if operation succeeded.
+ if (bytes > 0)
+ return bytes;
+
+ // Check for EOF.
+ if (bytes == 0)
+ {
+ ec = boost::asio::error::eof;
+ return 0;
+ }
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for descriptor to become ready.
+ if (descriptor_ops::poll_read(d, 0, ec) < 0)
+ return 0;
+ }
+}
+
+bool non_blocking_read(int d, buf* bufs, std::size_t count,
+ boost::system::error_code& ec, std::size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Read some data.
+ errno = 0;
+ int bytes = error_wrapper(::readv(d, bufs, static_cast<int>(count)), ec);
+
+ // Check for end of stream.
+ if (bytes == 0)
+ {
+ ec = boost::asio::error::eof;
+ return true;
+ }
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes > 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+std::size_t sync_write(int d, state_type state, const buf* bufs,
+ std::size_t count, bool all_empty, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to write 0 bytes on a stream is a no-op.
+ if (all_empty)
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ // Write some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ errno = 0;
+ int bytes = error_wrapper(::writev(d, bufs, static_cast<int>(count)), ec);
+
+ // Check if operation succeeded.
+ if (bytes > 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for descriptor to become ready.
+ if (descriptor_ops::poll_write(d, 0, ec) < 0)
+ return 0;
+ }
+}
+
+bool non_blocking_write(int d, const buf* bufs, std::size_t count,
+ boost::system::error_code& ec, std::size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Write some data.
+ errno = 0;
+ int bytes = error_wrapper(::writev(d, bufs, static_cast<int>(count)), ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+int ioctl(int d, state_type& state, long cmd,
+ ioctl_arg_type* arg, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ errno = 0;
+ int result = error_wrapper(::ioctl(d, cmd, arg), ec);
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+
+ // When updating the non-blocking mode we always perform the ioctl syscall,
+ // even if the flags would otherwise indicate that the descriptor is
+ // already in the correct state. This ensures that the underlying
+ // descriptor is put into the state that has been requested by the user. If
+ // the ioctl syscall was successful then we need to update the flags to
+ // match.
+ if (cmd == static_cast<long>(FIONBIO))
+ {
+ if (*arg)
+ {
+ state |= user_set_non_blocking;
+ }
+ else
+ {
+ // Clearing the non-blocking mode always overrides any internally-set
+ // non-blocking flag. Any subsequent asynchronous operations will need
+ // to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ }
+ }
+
+ return result;
+}
+
+int fcntl(int d, long cmd, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ errno = 0;
+ int result = error_wrapper(::fcntl(d, cmd), ec);
+ if (result != -1)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int fcntl(int d, long cmd, long arg, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ errno = 0;
+ int result = error_wrapper(::fcntl(d, cmd, arg), ec);
+ if (result != -1)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int poll_read(int d, state_type state, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ pollfd fds;
+ fds.fd = d;
+ fds.events = POLLIN;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ errno = 0;
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int poll_write(int d, state_type state, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ pollfd fds;
+ fds.fd = d;
+ fds.events = POLLOUT;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ errno = 0;
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+} // namespace descriptor_ops
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
diff --git a/boost/asio/detail/impl/dev_poll_reactor.hpp b/boost/asio/detail/impl/dev_poll_reactor.hpp
new file mode 100644
index 0000000000..12860af93f
--- /dev/null
+++ b/boost/asio/detail/impl/dev_poll_reactor.hpp
@@ -0,0 +1,80 @@
+//
+// detail/impl/dev_poll_reactor.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
+#define BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_DEV_POLL)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Time_Traits>
+void dev_poll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_add_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void dev_poll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
+ const typename Time_Traits::time_type& time,
+ typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+
+ bool earliest = queue.enqueue_timer(time, timer, op);
+ io_service_.work_started();
+ if (earliest)
+ interrupter_.interrupt();
+}
+
+template <typename Time_Traits>
+std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+ lock.unlock();
+ io_service_.post_deferred_completions(ops);
+ return n;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_DEV_POLL)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
diff --git a/boost/asio/detail/impl/dev_poll_reactor.ipp b/boost/asio/detail/impl/dev_poll_reactor.ipp
new file mode 100644
index 0000000000..a648bf1532
--- /dev/null
+++ b/boost/asio/detail/impl/dev_poll_reactor.ipp
@@ -0,0 +1,447 @@
+//
+// detail/impl/dev_poll_reactor.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_DEV_POLL)
+
+#include <boost/assert.hpp>
+#include <boost/asio/detail/dev_poll_reactor.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+dev_poll_reactor::dev_poll_reactor(boost::asio::io_service& io_service)
+ : boost::asio::detail::service_base<dev_poll_reactor>(io_service),
+ io_service_(use_service<io_service_impl>(io_service)),
+ mutex_(),
+ dev_poll_fd_(do_dev_poll_create()),
+ interrupter_(),
+ shutdown_(false)
+{
+ // Add the interrupter's descriptor to /dev/poll.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = interrupter_.read_descriptor();
+ ev.events = POLLIN | POLLERR;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+}
+
+dev_poll_reactor::~dev_poll_reactor()
+{
+ shutdown_service();
+ ::close(dev_poll_fd_);
+}
+
+void dev_poll_reactor::shutdown_service()
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ shutdown_ = true;
+ lock.unlock();
+
+ op_queue<operation> ops;
+
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].get_all_operations(ops);
+
+ timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+// Helper class to re-register all descriptors with /dev/poll.
+class dev_poll_reactor::fork_helper
+{
+public:
+ fork_helper(dev_poll_reactor* reactor, short events)
+ : reactor_(reactor), events_(events)
+ {
+ }
+
+ bool set(int descriptor)
+ {
+ ::pollfd& ev = reactor_->add_pending_event_change(descriptor);
+ ev.events = events_;
+ return true;
+ }
+
+private:
+ dev_poll_reactor* reactor_;
+ short events_;
+};
+
+void dev_poll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ detail::mutex::scoped_lock lock(mutex_);
+
+ if (dev_poll_fd_ != -1)
+ ::close(dev_poll_fd_);
+ dev_poll_fd_ = -1;
+ dev_poll_fd_ = do_dev_poll_create();
+
+ interrupter_.recreate();
+
+ // Add the interrupter's descriptor to /dev/poll.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = interrupter_.read_descriptor();
+ ev.events = POLLIN | POLLERR;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+
+ // Re-register all descriptors with /dev/poll. The changes will be written
+ // to the /dev/poll descriptor the next time the reactor is run.
+ op_queue<operation> ops;
+ fork_helper read_op_helper(this, POLLERR | POLLHUP | POLLIN);
+ op_queue_[read_op].get_descriptors(read_op_helper, ops);
+ fork_helper write_op_helper(this, POLLERR | POLLHUP | POLLOUT);
+ op_queue_[write_op].get_descriptors(write_op_helper, ops);
+ fork_helper except_op_helper(this, POLLERR | POLLHUP | POLLPRI);
+ op_queue_[except_op].get_descriptors(except_op_helper, ops);
+ interrupter_.interrupt();
+
+ // The ops op_queue will always be empty because the fork_helper's set()
+ // member function never returns false.
+ BOOST_ASSERT(ops.empty());
+ }
+}
+
+void dev_poll_reactor::init_task()
+{
+ io_service_.init_task();
+}
+
+int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
+{
+ return 0;
+}
+
+int dev_poll_reactor::register_internal_descriptor(int op_type,
+ socket_type descriptor, per_descriptor_data&, reactor_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ op_queue_[op_type].enqueue_operation(descriptor, op);
+ ::pollfd& ev = add_pending_event_change(descriptor);
+ ev.events = POLLERR | POLLHUP;
+ switch (op_type)
+ {
+ case read_op: ev.events |= POLLIN; break;
+ case write_op: ev.events |= POLLOUT; break;
+ case except_op: ev.events |= POLLPRI; break;
+ default: break;
+ }
+ interrupter_.interrupt();
+
+ return 0;
+}
+
+void dev_poll_reactor::move_descriptor(socket_type,
+ dev_poll_reactor::per_descriptor_data&,
+ dev_poll_reactor::per_descriptor_data&)
+{
+}
+
+void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
+ dev_poll_reactor::per_descriptor_data&,
+ reactor_op* op, bool allow_speculative)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ post_immediate_completion(op);
+ return;
+ }
+
+ if (allow_speculative)
+ {
+ if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor))
+ {
+ if (!op_queue_[op_type].has_operation(descriptor))
+ {
+ if (op->perform())
+ {
+ lock.unlock();
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+ }
+ }
+ }
+
+ bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
+ io_service_.work_started();
+ if (first)
+ {
+ ::pollfd& ev = add_pending_event_change(descriptor);
+ ev.events = POLLERR | POLLHUP;
+ if (op_type == read_op
+ || op_queue_[read_op].has_operation(descriptor))
+ ev.events |= POLLIN;
+ if (op_type == write_op
+ || op_queue_[write_op].has_operation(descriptor))
+ ev.events |= POLLOUT;
+ if (op_type == except_op
+ || op_queue_[except_op].has_operation(descriptor))
+ ev.events |= POLLPRI;
+ interrupter_.interrupt();
+ }
+}
+
+void dev_poll_reactor::cancel_ops(socket_type descriptor,
+ dev_poll_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
+}
+
+void dev_poll_reactor::deregister_descriptor(socket_type descriptor,
+ dev_poll_reactor::per_descriptor_data&, bool)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // Remove the descriptor from /dev/poll.
+ ::pollfd& ev = add_pending_event_change(descriptor);
+ ev.events = POLLREMOVE;
+ interrupter_.interrupt();
+
+ // Cancel any outstanding operations associated with the descriptor.
+ cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
+}
+
+void dev_poll_reactor::deregister_internal_descriptor(
+ socket_type descriptor, dev_poll_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // Remove the descriptor from /dev/poll. Since this function is only called
+ // during a fork, we can apply the change immediately.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = descriptor;
+ ev.events = POLLREMOVE;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+
+ // Destroy all operations associated with the descriptor.
+ op_queue<operation> ops;
+ boost::system::error_code ec;
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].cancel_operations(descriptor, ops, ec);
+}
+
+void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // We can return immediately if there's no work to do and the reactor is
+ // not supposed to block.
+ if (!block && op_queue_[read_op].empty() && op_queue_[write_op].empty()
+ && op_queue_[except_op].empty() && timer_queues_.all_empty())
+ return;
+
+ // Write the pending event registration changes to the /dev/poll descriptor.
+ std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size();
+ if (events_size > 0)
+ {
+ errno = 0;
+ int result = ::write(dev_poll_fd_,
+ &pending_event_changes_[0], events_size);
+ if (result != static_cast<int>(events_size))
+ {
+ boost::system::error_code ec = boost::system::error_code(
+ errno, boost::asio::error::get_system_category());
+ for (std::size_t i = 0; i < pending_event_changes_.size(); ++i)
+ {
+ int descriptor = pending_event_changes_[i].fd;
+ for (int j = 0; j < max_ops; ++j)
+ op_queue_[j].cancel_operations(descriptor, ops, ec);
+ }
+ }
+ pending_event_changes_.clear();
+ pending_event_change_index_.clear();
+ }
+
+ int timeout = block ? get_timeout() : 0;
+ lock.unlock();
+
+ // Block on the /dev/poll descriptor.
+ ::pollfd events[128] = { { 0, 0, 0 } };
+ ::dvpoll dp = { 0, 0, 0 };
+ dp.dp_fds = events;
+ dp.dp_nfds = 128;
+ dp.dp_timeout = timeout;
+ int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp);
+
+ lock.lock();
+
+ // Dispatch the waiting events.
+ for (int i = 0; i < num_events; ++i)
+ {
+ int descriptor = events[i].fd;
+ if (descriptor == interrupter_.read_descriptor())
+ {
+ interrupter_.reset();
+ }
+ else
+ {
+ bool more_reads = false;
+ bool more_writes = false;
+ bool more_except = false;
+
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+ if (events[i].events & (POLLPRI | POLLERR | POLLHUP))
+ more_except =
+ op_queue_[except_op].perform_operations(descriptor, ops);
+ else
+ more_except = op_queue_[except_op].has_operation(descriptor);
+
+ if (events[i].events & (POLLIN | POLLERR | POLLHUP))
+ more_reads = op_queue_[read_op].perform_operations(descriptor, ops);
+ else
+ more_reads = op_queue_[read_op].has_operation(descriptor);
+
+ if (events[i].events & (POLLOUT | POLLERR | POLLHUP))
+ more_writes = op_queue_[write_op].perform_operations(descriptor, ops);
+ else
+ more_writes = op_queue_[write_op].has_operation(descriptor);
+
+ if ((events[i].events & (POLLERR | POLLHUP)) != 0
+ && !more_except && !more_reads && !more_writes)
+ {
+ // If we have an event and no operations associated with the
+ // descriptor then we need to delete the descriptor from /dev/poll.
+ // The poll operation can produce POLLHUP or POLLERR events when there
+ // is no operation pending, so if we do not remove the descriptor we
+ // can end up in a tight polling loop.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = descriptor;
+ ev.events = POLLREMOVE;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+ }
+ else
+ {
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = descriptor;
+ ev.events = POLLERR | POLLHUP;
+ if (more_reads)
+ ev.events |= POLLIN;
+ if (more_writes)
+ ev.events |= POLLOUT;
+ if (more_except)
+ ev.events |= POLLPRI;
+ ev.revents = 0;
+ int result = ::write(dev_poll_fd_, &ev, sizeof(ev));
+ if (result != sizeof(ev))
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ for (int j = 0; j < max_ops; ++j)
+ op_queue_[j].cancel_operations(descriptor, ops, ec);
+ }
+ }
+ }
+ }
+ timer_queues_.get_ready_timers(ops);
+}
+
+void dev_poll_reactor::interrupt()
+{
+ interrupter_.interrupt();
+}
+
+int dev_poll_reactor::do_dev_poll_create()
+{
+ int fd = ::open("/dev/poll", O_RDWR);
+ if (fd == -1)
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "/dev/poll");
+ }
+ return fd;
+}
+
+void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.insert(&queue);
+}
+
+void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.erase(&queue);
+}
+
+int dev_poll_reactor::get_timeout()
+{
+ // By default we will wait no longer than 5 minutes. This will ensure that
+ // any changes to the system clock are detected after no longer than this.
+ return timer_queues_.wait_duration_msec(5 * 60 * 1000);
+}
+
+void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
+ const boost::system::error_code& ec)
+{
+ bool need_interrupt = false;
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ need_interrupt = op_queue_[i].cancel_operations(
+ descriptor, ops, ec) || need_interrupt;
+ io_service_.post_deferred_completions(ops);
+ if (need_interrupt)
+ interrupter_.interrupt();
+}
+
+::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor)
+{
+ hash_map<int, std::size_t>::iterator iter
+ = pending_event_change_index_.find(descriptor);
+ if (iter == pending_event_change_index_.end())
+ {
+ std::size_t index = pending_event_changes_.size();
+ pending_event_changes_.reserve(pending_event_changes_.size() + 1);
+ pending_event_change_index_.insert(std::make_pair(descriptor, index));
+ pending_event_changes_.push_back(::pollfd());
+ pending_event_changes_[index].fd = descriptor;
+ pending_event_changes_[index].revents = 0;
+ return pending_event_changes_[index];
+ }
+ else
+ {
+ return pending_event_changes_[iter->second];
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_DEV_POLL)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
diff --git a/boost/asio/detail/impl/epoll_reactor.hpp b/boost/asio/detail/impl/epoll_reactor.hpp
new file mode 100644
index 0000000000..215f484791
--- /dev/null
+++ b/boost/asio/detail/impl/epoll_reactor.hpp
@@ -0,0 +1,78 @@
+//
+// detail/impl/epoll_reactor.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
+#define BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#if defined(BOOST_ASIO_HAS_EPOLL)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Time_Traits>
+void epoll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_add_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void epoll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
+ const typename Time_Traits::time_type& time,
+ typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+ mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+
+ bool earliest = queue.enqueue_timer(time, timer, op);
+ io_service_.work_started();
+ if (earliest)
+ update_timeout();
+}
+
+template <typename Time_Traits>
+std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
+{
+ mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+ lock.unlock();
+ io_service_.post_deferred_completions(ops);
+ return n;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_EPOLL)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
diff --git a/boost/asio/detail/impl/epoll_reactor.ipp b/boost/asio/detail/impl/epoll_reactor.ipp
new file mode 100644
index 0000000000..771edea67c
--- /dev/null
+++ b/boost/asio/detail/impl/epoll_reactor.ipp
@@ -0,0 +1,637 @@
+//
+// detail/impl/epoll_reactor.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_EPOLL)
+
+#include <cstddef>
+#include <sys/epoll.h>
+#include <boost/asio/detail/epoll_reactor.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+# include <sys/timerfd.h>
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+epoll_reactor::epoll_reactor(boost::asio::io_service& io_service)
+ : boost::asio::detail::service_base<epoll_reactor>(io_service),
+ io_service_(use_service<io_service_impl>(io_service)),
+ mutex_(),
+ interrupter_(),
+ epoll_fd_(do_epoll_create()),
+ timer_fd_(do_timerfd_create()),
+ shutdown_(false)
+{
+ // Add the interrupter's descriptor to epoll.
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLET;
+ ev.data.ptr = &interrupter_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
+ interrupter_.interrupt();
+
+ // Add the timer descriptor to epoll.
+ if (timer_fd_ != -1)
+ {
+ ev.events = EPOLLIN | EPOLLERR;
+ ev.data.ptr = &timer_fd_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
+ }
+}
+
+epoll_reactor::~epoll_reactor()
+{
+ if (epoll_fd_ != -1)
+ close(epoll_fd_);
+ if (timer_fd_ != -1)
+ close(timer_fd_);
+}
+
+void epoll_reactor::shutdown_service()
+{
+ mutex::scoped_lock lock(mutex_);
+ shutdown_ = true;
+ lock.unlock();
+
+ op_queue<operation> ops;
+
+ while (descriptor_state* state = registered_descriptors_.first())
+ {
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(state->op_queue_[i]);
+ state->shutdown_ = true;
+ registered_descriptors_.free(state);
+ }
+
+ timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void epoll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ if (epoll_fd_ != -1)
+ ::close(epoll_fd_);
+ epoll_fd_ = -1;
+ epoll_fd_ = do_epoll_create();
+
+ if (timer_fd_ != -1)
+ ::close(timer_fd_);
+ timer_fd_ = -1;
+ timer_fd_ = do_timerfd_create();
+
+ interrupter_.recreate();
+
+ // Add the interrupter's descriptor to epoll.
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLET;
+ ev.data.ptr = &interrupter_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
+ interrupter_.interrupt();
+
+ // Add the timer descriptor to epoll.
+ if (timer_fd_ != -1)
+ {
+ ev.events = EPOLLIN | EPOLLERR;
+ ev.data.ptr = &timer_fd_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
+ }
+
+ update_timeout();
+
+ // Re-register all descriptors with epoll.
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ for (descriptor_state* state = registered_descriptors_.first();
+ state != 0; state = state->next_)
+ {
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = state;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);
+ if (result != 0)
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "epoll re-registration");
+ }
+ }
+ }
+}
+
+void epoll_reactor::init_task()
+{
+ io_service_.init_task();
+}
+
+int epoll_reactor::register_descriptor(socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data)
+{
+ descriptor_data = allocate_descriptor_state();
+
+ {
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ descriptor_data->reactor_ = this;
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ }
+
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = descriptor_data;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
+ if (result != 0)
+ return errno;
+
+ return 0;
+}
+
+int epoll_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
+{
+ descriptor_data = allocate_descriptor_state();
+
+ {
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ descriptor_data->reactor_ = this;
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+ }
+
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = descriptor_data;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
+ if (result != 0)
+ return errno;
+
+ return 0;
+}
+
+void epoll_reactor::move_descriptor(socket_type,
+ epoll_reactor::per_descriptor_data& target_descriptor_data,
+ epoll_reactor::per_descriptor_data& source_descriptor_data)
+{
+ target_descriptor_data = source_descriptor_data;
+ source_descriptor_data = 0;
+}
+
+void epoll_reactor::start_op(int op_type, socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data,
+ reactor_op* op, bool allow_speculative)
+{
+ if (!descriptor_data)
+ {
+ op->ec_ = boost::asio::error::bad_descriptor;
+ post_immediate_completion(op);
+ return;
+ }
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (descriptor_data->shutdown_)
+ {
+ post_immediate_completion(op);
+ return;
+ }
+
+ if (descriptor_data->op_queue_[op_type].empty())
+ {
+ if (allow_speculative)
+ {
+ if (op_type != read_op || descriptor_data->op_queue_[except_op].empty())
+ {
+ if (op->perform())
+ {
+ descriptor_lock.unlock();
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+ }
+ }
+ else
+ {
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP
+ | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = descriptor_data;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
+ }
+ }
+
+ descriptor_data->op_queue_[op_type].push(op);
+ io_service_.work_started();
+}
+
+void epoll_reactor::cancel_ops(socket_type,
+ epoll_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[i].front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ descriptor_data->op_queue_[i].pop();
+ ops.push(op);
+ }
+ }
+
+ descriptor_lock.unlock();
+
+ io_service_.post_deferred_completions(ops);
+}
+
+void epoll_reactor::deregister_descriptor(socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ if (closing)
+ {
+ // The descriptor will be automatically removed from the epoll set when
+ // it is closed.
+ }
+ else
+ {
+ epoll_event ev = { 0, { 0 } };
+ epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
+ }
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[i].front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ descriptor_data->op_queue_[i].pop();
+ ops.push(op);
+ }
+ }
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ free_descriptor_state(descriptor_data);
+ descriptor_data = 0;
+
+ io_service_.post_deferred_completions(ops);
+ }
+}
+
+void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ epoll_event ev = { 0, { 0 } };
+ epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(descriptor_data->op_queue_[i]);
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ free_descriptor_state(descriptor_data);
+ descriptor_data = 0;
+ }
+}
+
+void epoll_reactor::run(bool block, op_queue<operation>& ops)
+{
+ // This code relies on the fact that the task_io_service queues the reactor
+ // task behind all descriptor operations generated by this function. This
+ // means, that by the time we reach this point, any previously returned
+ // descriptor operations have already been dequeued. Therefore it is now safe
+ // for us to reuse and return them for the task_io_service to queue again.
+
+ // Calculate a timeout only if timerfd is not used.
+ int timeout;
+ if (timer_fd_ != -1)
+ timeout = block ? -1 : 0;
+ else
+ {
+ mutex::scoped_lock lock(mutex_);
+ timeout = block ? get_timeout() : 0;
+ }
+
+ // Block on the epoll descriptor.
+ epoll_event events[128];
+ int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
+
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+ bool check_timers = (timer_fd_ == -1);
+#else // defined(BOOST_ASIO_HAS_TIMERFD)
+ bool check_timers = true;
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+
+ // Dispatch the waiting events.
+ for (int i = 0; i < num_events; ++i)
+ {
+ void* ptr = events[i].data.ptr;
+ if (ptr == &interrupter_)
+ {
+ // No need to reset the interrupter since we're leaving the descriptor
+ // in a ready-to-read state and relying on edge-triggered notifications
+ // to make it so that we only get woken up when the descriptor's epoll
+ // registration is updated.
+
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+ if (timer_fd_ == -1)
+ check_timers = true;
+#else // defined(BOOST_ASIO_HAS_TIMERFD)
+ check_timers = true;
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ }
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+ else if (ptr == &timer_fd_)
+ {
+ check_timers = true;
+ }
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ else
+ {
+ // The descriptor operation doesn't count as work in and of itself, so we
+ // don't call work_started() here. This still allows the io_service to
+ // stop if the only remaining operations are descriptor operations.
+ descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
+ descriptor_data->set_ready_events(events[i].events);
+ ops.push(descriptor_data);
+ }
+ }
+
+ if (check_timers)
+ {
+ mutex::scoped_lock common_lock(mutex_);
+ timer_queues_.get_ready_timers(ops);
+
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+ if (timer_fd_ != -1)
+ {
+ itimerspec new_timeout;
+ itimerspec old_timeout;
+ int flags = get_timeout(new_timeout);
+ timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
+ }
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ }
+}
+
+void epoll_reactor::interrupt()
+{
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLET;
+ ev.data.ptr = &interrupter_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);
+}
+
+int epoll_reactor::do_epoll_create()
+{
+#if defined(EPOLL_CLOEXEC)
+ int fd = epoll_create1(EPOLL_CLOEXEC);
+#else // defined(EPOLL_CLOEXEC)
+ int fd = -1;
+ errno = EINVAL;
+#endif // defined(EPOLL_CLOEXEC)
+
+ if (fd == -1 && errno == EINVAL)
+ {
+ fd = epoll_create(epoll_size);
+ if (fd != -1)
+ ::fcntl(fd, F_SETFD, FD_CLOEXEC);
+ }
+
+ if (fd == -1)
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "epoll");
+ }
+
+ return fd;
+}
+
+int epoll_reactor::do_timerfd_create()
+{
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+# if defined(TFD_CLOEXEC)
+ int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
+# else // defined(TFD_CLOEXEC)
+ int fd = -1;
+ errno = EINVAL;
+# endif // defined(TFD_CLOEXEC)
+
+ if (fd == -1 && errno == EINVAL)
+ {
+ fd = timerfd_create(CLOCK_MONOTONIC, 0);
+ if (fd != -1)
+ ::fcntl(fd, F_SETFD, FD_CLOEXEC);
+ }
+
+ return fd;
+#else // defined(BOOST_ASIO_HAS_TIMERFD)
+ return -1;
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+}
+
+epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ return registered_descriptors_.alloc();
+}
+
+void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ registered_descriptors_.free(s);
+}
+
+void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.insert(&queue);
+}
+
+void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.erase(&queue);
+}
+
+void epoll_reactor::update_timeout()
+{
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+ if (timer_fd_ != -1)
+ {
+ itimerspec new_timeout;
+ itimerspec old_timeout;
+ int flags = get_timeout(new_timeout);
+ timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
+ return;
+ }
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ interrupt();
+}
+
+int epoll_reactor::get_timeout()
+{
+ // By default we will wait no longer than 5 minutes. This will ensure that
+ // any changes to the system clock are detected after no longer than this.
+ return timer_queues_.wait_duration_msec(5 * 60 * 1000);
+}
+
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+int epoll_reactor::get_timeout(itimerspec& ts)
+{
+ ts.it_interval.tv_sec = 0;
+ ts.it_interval.tv_nsec = 0;
+
+ long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
+ ts.it_value.tv_sec = usec / 1000000;
+ ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
+
+ return usec ? 0 : TFD_TIMER_ABSTIME;
+}
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+
+struct epoll_reactor::perform_io_cleanup_on_block_exit
+{
+ explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
+ : reactor_(r), first_op_(0)
+ {
+ }
+
+ ~perform_io_cleanup_on_block_exit()
+ {
+ if (first_op_)
+ {
+ // Post the remaining completed operations for invocation.
+ if (!ops_.empty())
+ reactor_->io_service_.post_deferred_completions(ops_);
+
+ // A user-initiated operation has completed, but there's no need to
+ // explicitly call work_finished() here. Instead, we'll take advantage of
+ // the fact that the task_io_service will call work_finished() once we
+ // return.
+ }
+ else
+ {
+ // No user-initiated operations have completed, so we need to compensate
+ // for the work_finished() call that the task_io_service will make once
+ // this operation returns.
+ reactor_->io_service_.work_started();
+ }
+ }
+
+ epoll_reactor* reactor_;
+ op_queue<operation> ops_;
+ operation* first_op_;
+};
+
+epoll_reactor::descriptor_state::descriptor_state()
+ : operation(&epoll_reactor::descriptor_state::do_complete)
+{
+}
+
+operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
+{
+ perform_io_cleanup_on_block_exit io_cleanup(reactor_);
+ mutex::scoped_lock descriptor_lock(mutex_);
+
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+ static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
+ for (int j = max_ops - 1; j >= 0; --j)
+ {
+ if (events & (flag[j] | EPOLLERR | EPOLLHUP))
+ {
+ while (reactor_op* op = op_queue_[j].front())
+ {
+ if (op->perform())
+ {
+ op_queue_[j].pop();
+ io_cleanup.ops_.push(op);
+ }
+ else
+ break;
+ }
+ }
+ }
+
+ // The first operation will be returned for completion now. The others will
+ // be posted for later by the io_cleanup object's destructor.
+ io_cleanup.first_op_ = io_cleanup.ops_.front();
+ io_cleanup.ops_.pop();
+ return io_cleanup.first_op_;
+}
+
+void epoll_reactor::descriptor_state::do_complete(
+ io_service_impl* owner, operation* base,
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
+{
+ if (owner)
+ {
+ descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
+ uint32_t events = static_cast<uint32_t>(bytes_transferred);
+ if (operation* op = descriptor_data->perform_io(events))
+ {
+ op->complete(*owner, ec, 0);
+ }
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_EPOLL)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
diff --git a/boost/asio/detail/impl/eventfd_select_interrupter.ipp b/boost/asio/detail/impl/eventfd_select_interrupter.ipp
new file mode 100644
index 0000000000..22154bb0c6
--- /dev/null
+++ b/boost/asio/detail/impl/eventfd_select_interrupter.ipp
@@ -0,0 +1,166 @@
+//
+// detail/impl/eventfd_select_interrupter.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
+#define BOOST_ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_EVENTFD)
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+# include <asm/unistd.h>
+#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+# include <sys/eventfd.h>
+#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+#include <boost/asio/detail/eventfd_select_interrupter.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+eventfd_select_interrupter::eventfd_select_interrupter()
+{
+ open_descriptors();
+}
+
+void eventfd_select_interrupter::open_descriptors()
+{
+#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+ write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);
+ if (read_descriptor_ != -1)
+ {
+ ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ }
+#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ write_descriptor_ = read_descriptor_ =
+ ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ errno = EINVAL;
+ write_descriptor_ = read_descriptor_ = -1;
+# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ if (read_descriptor_ == -1 && errno == EINVAL)
+ {
+ write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
+ if (read_descriptor_ != -1)
+ {
+ ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ }
+ }
+#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+
+ if (read_descriptor_ == -1)
+ {
+ int pipe_fds[2];
+ if (pipe(pipe_fds) == 0)
+ {
+ read_descriptor_ = pipe_fds[0];
+ ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ write_descriptor_ = pipe_fds[1];
+ ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
+ }
+ else
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "eventfd_select_interrupter");
+ }
+ }
+}
+
+eventfd_select_interrupter::~eventfd_select_interrupter()
+{
+ close_descriptors();
+}
+
+void eventfd_select_interrupter::close_descriptors()
+{
+ if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)
+ ::close(write_descriptor_);
+ if (read_descriptor_ != -1)
+ ::close(read_descriptor_);
+}
+
+void eventfd_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = -1;
+ read_descriptor_ = -1;
+
+ open_descriptors();
+}
+
+void eventfd_select_interrupter::interrupt()
+{
+ uint64_t counter(1UL);
+ int result = ::write(write_descriptor_, &counter, sizeof(uint64_t));
+ (void)result;
+}
+
+bool eventfd_select_interrupter::reset()
+{
+ if (write_descriptor_ == read_descriptor_)
+ {
+ for (;;)
+ {
+ // Only perform one read. The kernel maintains an atomic counter.
+ uint64_t counter(0);
+ errno = 0;
+ int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t));
+ if (bytes_read < 0 && errno == EINTR)
+ continue;
+ bool was_interrupted = (bytes_read > 0);
+ return was_interrupted;
+ }
+ }
+ else
+ {
+ for (;;)
+ {
+ // Clear all data from the pipe.
+ char data[1024];
+ int bytes_read = ::read(read_descriptor_, data, sizeof(data));
+ if (bytes_read < 0 && errno == EINTR)
+ continue;
+ bool was_interrupted = (bytes_read > 0);
+ while (bytes_read == sizeof(data))
+ bytes_read = ::read(read_descriptor_, data, sizeof(data));
+ return was_interrupted;
+ }
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_EVENTFD)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
diff --git a/boost/asio/detail/impl/handler_tracking.ipp b/boost/asio/detail/impl/handler_tracking.ipp
new file mode 100644
index 0000000000..70342e3e69
--- /dev/null
+++ b/boost/asio/detail/impl/handler_tracking.ipp
@@ -0,0 +1,299 @@
+//
+// detail/impl/handler_tracking.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
+#define BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
+#include <cstdarg>
+#include <cstdio>
+#include <boost/asio/detail/handler_tracking.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/asio/detail/pop_options.hpp>
+
+#if !defined(BOOST_WINDOWS)
+# include <unistd.h>
+#endif // !defined(BOOST_WINDOWS)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct handler_tracking::tracking_state
+{
+ static_mutex mutex_;
+ boost::uint64_t next_id_;
+ tss_ptr<completion>* current_completion_;
+};
+
+handler_tracking::tracking_state* handler_tracking::get_state()
+{
+ static tracking_state state = { BOOST_ASIO_STATIC_MUTEX_INIT, 1, 0 };
+ return &state;
+}
+
+void handler_tracking::init()
+{
+ static tracking_state* state = get_state();
+
+ state->mutex_.init();
+
+ static_mutex::scoped_lock lock(state->mutex_);
+ if (state->current_completion_ == 0)
+ state->current_completion_ = new tss_ptr<completion>;
+}
+
+void handler_tracking::creation(handler_tracking::tracked_handler* h,
+ const char* object_type, void* object, const char* op_name)
+{
+ static tracking_state* state = get_state();
+
+ static_mutex::scoped_lock lock(state->mutex_);
+ h->id_ = state->next_id_++;
+ lock.unlock();
+
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ boost::uint64_t current_id = 0;
+ if (completion* current_completion = *state->current_completion_)
+ current_id = current_completion->id_;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ current_id, h->id_, object_type, object, op_name);
+}
+
+handler_tracking::completion::completion(handler_tracking::tracked_handler* h)
+ : id_(h->id_),
+ invoked_(false),
+ next_(*get_state()->current_completion_)
+{
+ *get_state()->current_completion_ = this;
+}
+
+handler_tracking::completion::~completion()
+{
+ if (id_)
+ {
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%c%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%c%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ invoked_ ? '!' : '~', id_);
+ }
+
+ *get_state()->current_completion_ = next_;
+}
+
+void handler_tracking::completion::invocation_begin()
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000), id_);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value());
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(),
+ static_cast<boost::uint64_t>(bytes_transferred));
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, int signal_number)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(), signal_number);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, const char* arg)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(), arg);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_end()
+{
+ if (id_)
+ {
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|<%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|<%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000), id_);
+
+ id_ = 0;
+ }
+}
+
+void handler_tracking::operation(const char* object_type,
+ void* object, const char* op_name)
+{
+ static tracking_state* state = get_state();
+
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ unsigned long long current_id = 0;
+ if (completion* current_completion = *state->current_completion_)
+ current_id = current_completion->id_;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ current_id, object_type, object, op_name);
+}
+
+void handler_tracking::write_line(const char* format, ...)
+{
+ using namespace std; // For sprintf (or equivalent).
+
+ va_list args;
+ va_start(args, format);
+
+ char line[256] = "";
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ int length = vsprintf_s(line, sizeof(line), format, args);
+#else // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ int length = vsprintf(line, format, args);
+#endif // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+
+ va_end(args);
+
+#if defined(BOOST_WINDOWS)
+ HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
+ DWORD bytes_written = 0;
+ ::WriteFile(stderr_handle, line, length, &bytes_written, 0);
+#else // defined(BOOST_WINDOWS)
+ ::write(STDERR_FILENO, line, length);
+#endif // defined(BOOST_WINDOWS)
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
diff --git a/boost/asio/detail/impl/kqueue_reactor.hpp b/boost/asio/detail/impl/kqueue_reactor.hpp
new file mode 100644
index 0000000000..d3445cd006
--- /dev/null
+++ b/boost/asio/detail/impl/kqueue_reactor.hpp
@@ -0,0 +1,82 @@
+//
+// detail/impl/kqueue_reactor.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
+#define BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_KQUEUE)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Time_Traits>
+void kqueue_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_add_timer_queue(queue);
+}
+
+// Remove a timer queue from the reactor.
+template <typename Time_Traits>
+void kqueue_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
+ const typename Time_Traits::time_type& time,
+ typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+
+ bool earliest = queue.enqueue_timer(time, timer, op);
+ io_service_.work_started();
+ if (earliest)
+ interrupt();
+}
+
+template <typename Time_Traits>
+std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+ lock.unlock();
+ io_service_.post_deferred_completions(ops);
+ return n;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_KQUEUE)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
diff --git a/boost/asio/detail/impl/kqueue_reactor.ipp b/boost/asio/detail/impl/kqueue_reactor.ipp
new file mode 100644
index 0000000000..a819eb9b74
--- /dev/null
+++ b/boost/asio/detail/impl/kqueue_reactor.ipp
@@ -0,0 +1,526 @@
+//
+// detail/impl/kqueue_reactor.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_KQUEUE)
+
+#include <boost/asio/detail/kqueue_reactor.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+#if defined(__NetBSD__)
+# define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
+ EV_SET(ev, ident, filt, flags, fflags, data, \
+ reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
+#else
+# define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
+ EV_SET(ev, ident, filt, flags, fflags, data, udata)
+#endif
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+kqueue_reactor::kqueue_reactor(boost::asio::io_service& io_service)
+ : boost::asio::detail::service_base<kqueue_reactor>(io_service),
+ io_service_(use_service<io_service_impl>(io_service)),
+ mutex_(),
+ kqueue_fd_(do_kqueue_create()),
+ interrupter_(),
+ shutdown_(false)
+{
+ // The interrupter is put into a permanently readable state. Whenever we want
+ // to interrupt the blocked kevent call we register a read operation against
+ // the descriptor.
+ interrupter_.interrupt();
+}
+
+kqueue_reactor::~kqueue_reactor()
+{
+ close(kqueue_fd_);
+}
+
+void kqueue_reactor::shutdown_service()
+{
+ mutex::scoped_lock lock(mutex_);
+ shutdown_ = true;
+ lock.unlock();
+
+ op_queue<operation> ops;
+
+ while (descriptor_state* state = registered_descriptors_.first())
+ {
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(state->op_queue_[i]);
+ state->shutdown_ = true;
+ registered_descriptors_.free(state);
+ }
+
+ timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ // The kqueue descriptor is automatically closed in the child.
+ kqueue_fd_ = -1;
+ kqueue_fd_ = do_kqueue_create();
+
+ interrupter_.recreate();
+
+ // Re-register all descriptors with kqueue.
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ for (descriptor_state* state = registered_descriptors_.first();
+ state != 0; state = state->next_)
+ {
+ struct kevent events[2];
+ int num_events = 0;
+
+ if (!state->op_queue_[read_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
+ else if (!state->op_queue_[except_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, EV_OOBAND, 0, state);
+
+ if (!state->op_queue_[write_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
+
+ if (num_events && ::kevent(kqueue_fd_, events, num_events, 0, 0, 0) == -1)
+ {
+ boost::system::error_code error(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(error);
+ }
+ }
+ }
+}
+
+void kqueue_reactor::init_task()
+{
+ io_service_.init_task();
+}
+
+int kqueue_reactor::register_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data)
+{
+ descriptor_data = allocate_descriptor_state();
+
+ mutex::scoped_lock lock(descriptor_data->mutex_);
+
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+
+ return 0;
+}
+
+int kqueue_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
+{
+ descriptor_data = allocate_descriptor_state();
+
+ mutex::scoped_lock lock(descriptor_data->mutex_);
+
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+
+ struct kevent event;
+ switch (op_type)
+ {
+ case read_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case write_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case except_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, EV_OOBAND, 0, descriptor_data);
+ break;
+ }
+ ::kevent(kqueue_fd_, &event, 1, 0, 0, 0);
+
+ return 0;
+}
+
+void kqueue_reactor::move_descriptor(socket_type,
+ kqueue_reactor::per_descriptor_data& target_descriptor_data,
+ kqueue_reactor::per_descriptor_data& source_descriptor_data)
+{
+ target_descriptor_data = source_descriptor_data;
+ source_descriptor_data = 0;
+}
+
+void kqueue_reactor::start_op(int op_type, socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data,
+ reactor_op* op, bool allow_speculative)
+{
+ if (!descriptor_data)
+ {
+ op->ec_ = boost::asio::error::bad_descriptor;
+ post_immediate_completion(op);
+ return;
+ }
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (descriptor_data->shutdown_)
+ {
+ post_immediate_completion(op);
+ return;
+ }
+
+ bool first = descriptor_data->op_queue_[op_type].empty();
+ if (first)
+ {
+ if (allow_speculative)
+ {
+ if (op_type != read_op || descriptor_data->op_queue_[except_op].empty())
+ {
+ if (op->perform())
+ {
+ descriptor_lock.unlock();
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+ }
+ }
+ }
+
+ descriptor_data->op_queue_[op_type].push(op);
+ io_service_.work_started();
+
+ if (first)
+ {
+ struct kevent event;
+ switch (op_type)
+ {
+ case read_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case write_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case except_op:
+ if (!descriptor_data->op_queue_[read_op].empty())
+ return; // Already registered for read events.
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, EV_OOBAND, 0, descriptor_data);
+ break;
+ }
+
+ if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1)
+ {
+ op->ec_ = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+ descriptor_data->op_queue_[op_type].pop();
+ io_service_.post_deferred_completion(op);
+ }
+ }
+}
+
+void kqueue_reactor::cancel_ops(socket_type,
+ kqueue_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[i].front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ descriptor_data->op_queue_[i].pop();
+ ops.push(op);
+ }
+ }
+
+ descriptor_lock.unlock();
+
+ io_service_.post_deferred_completions(ops);
+}
+
+void kqueue_reactor::deregister_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ if (closing)
+ {
+ // The descriptor will be automatically removed from the kqueue when it
+ // is closed.
+ }
+ else
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+ }
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[i].front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ descriptor_data->op_queue_[i].pop();
+ ops.push(op);
+ }
+ }
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ free_descriptor_state(descriptor_data);
+ descriptor_data = 0;
+
+ io_service_.post_deferred_completions(ops);
+ }
+}
+
+void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(descriptor_data->op_queue_[i]);
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ free_descriptor_state(descriptor_data);
+ descriptor_data = 0;
+ }
+}
+
+void kqueue_reactor::run(bool block, op_queue<operation>& ops)
+{
+ mutex::scoped_lock lock(mutex_);
+
+ // Determine how long to block while waiting for events.
+ timespec timeout_buf = { 0, 0 };
+ timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf;
+
+ lock.unlock();
+
+ // Block on the kqueue descriptor.
+ struct kevent events[128];
+ int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
+
+ // Dispatch the waiting events.
+ for (int i = 0; i < num_events; ++i)
+ {
+ int descriptor = events[i].ident;
+ void* ptr = reinterpret_cast<void*>(events[i].udata);
+ if (ptr == &interrupter_)
+ {
+ // No need to reset the interrupter since we're leaving the descriptor
+ // in a ready-to-read state and relying on edge-triggered notifications.
+ }
+ else
+ {
+ descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+#if defined(__NetBSD__)
+ static const unsigned int filter[max_ops] =
+#else
+ static const int filter[max_ops] =
+#endif
+ { EVFILT_READ, EVFILT_WRITE, EVFILT_READ };
+ for (int j = max_ops - 1; j >= 0; --j)
+ {
+ if (events[i].filter == filter[j])
+ {
+ if (j != except_op || events[i].flags & EV_OOBAND)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[j].front())
+ {
+ if (events[i].flags & EV_ERROR)
+ {
+ op->ec_ = boost::system::error_code(events[i].data,
+ boost::asio::error::get_system_category());
+ descriptor_data->op_queue_[j].pop();
+ ops.push(op);
+ }
+ if (op->perform())
+ {
+ descriptor_data->op_queue_[j].pop();
+ ops.push(op);
+ }
+ else
+ break;
+ }
+ }
+ }
+ }
+
+ // Renew registration for event notifications.
+ struct kevent event;
+ switch (events[i].filter)
+ {
+ case EVFILT_READ:
+ if (!descriptor_data->op_queue_[read_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ else if (!descriptor_data->op_queue_[except_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, EV_OOBAND, 0, descriptor_data);
+ else
+ continue;
+ break;
+ case EVFILT_WRITE:
+ if (!descriptor_data->op_queue_[write_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ else
+ continue;
+ break;
+ default:
+ break;
+ }
+ if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1)
+ {
+ boost::system::error_code error(errno,
+ boost::asio::error::get_system_category());
+ for (int j = 0; j < max_ops; ++j)
+ {
+ while (reactor_op* op = descriptor_data->op_queue_[j].front())
+ {
+ op->ec_ = error;
+ descriptor_data->op_queue_[j].pop();
+ ops.push(op);
+ }
+ }
+ }
+ }
+ }
+
+ lock.lock();
+ timer_queues_.get_ready_timers(ops);
+}
+
+void kqueue_reactor::interrupt()
+{
+ struct kevent event;
+ BOOST_ASIO_KQUEUE_EV_SET(&event, interrupter_.read_descriptor(),
+ EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, &interrupter_);
+ ::kevent(kqueue_fd_, &event, 1, 0, 0, 0);
+}
+
+int kqueue_reactor::do_kqueue_create()
+{
+ int fd = ::kqueue();
+ if (fd == -1)
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "kqueue");
+ }
+ return fd;
+}
+
+kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ return registered_descriptors_.alloc();
+}
+
+void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ registered_descriptors_.free(s);
+}
+
+void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.insert(&queue);
+}
+
+void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.erase(&queue);
+}
+
+timespec* kqueue_reactor::get_timeout(timespec& ts)
+{
+ // By default we will wait no longer than 5 minutes. This will ensure that
+ // any changes to the system clock are detected after no longer than this.
+ long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
+ ts.tv_sec = usec / 1000000;
+ ts.tv_nsec = (usec % 1000000) * 1000;
+ return &ts;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#undef BOOST_ASIO_KQUEUE_EV_SET
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_KQUEUE)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
diff --git a/boost/asio/detail/impl/pipe_select_interrupter.ipp b/boost/asio/detail/impl/pipe_select_interrupter.ipp
new file mode 100644
index 0000000000..75a8d16a41
--- /dev/null
+++ b/boost/asio/detail/impl/pipe_select_interrupter.ipp
@@ -0,0 +1,123 @@
+//
+// detail/impl/pipe_select_interrupter.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
+#define BOOST_ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if !defined(BOOST_WINDOWS)
+#if !defined(__CYGWIN__)
+#if !defined(__SYMBIAN32__)
+#if !defined(BOOST_ASIO_HAS_EVENTFD)
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <boost/asio/detail/pipe_select_interrupter.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+pipe_select_interrupter::pipe_select_interrupter()
+{
+ open_descriptors();
+}
+
+void pipe_select_interrupter::open_descriptors()
+{
+ int pipe_fds[2];
+ if (pipe(pipe_fds) == 0)
+ {
+ read_descriptor_ = pipe_fds[0];
+ ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ write_descriptor_ = pipe_fds[1];
+ ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
+
+#if defined(FD_CLOEXEC)
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
+#endif // defined(FD_CLOEXEC)
+ }
+ else
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "pipe_select_interrupter");
+ }
+}
+
+pipe_select_interrupter::~pipe_select_interrupter()
+{
+ close_descriptors();
+}
+
+void pipe_select_interrupter::close_descriptors()
+{
+ if (read_descriptor_ != -1)
+ ::close(read_descriptor_);
+ if (write_descriptor_ != -1)
+ ::close(write_descriptor_);
+}
+
+void pipe_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = -1;
+ read_descriptor_ = -1;
+
+ open_descriptors();
+}
+
+void pipe_select_interrupter::interrupt()
+{
+ char byte = 0;
+ int result = ::write(write_descriptor_, &byte, 1);
+ (void)result;
+}
+
+bool pipe_select_interrupter::reset()
+{
+ for (;;)
+ {
+ char data[1024];
+ int bytes_read = ::read(read_descriptor_, data, sizeof(data));
+ if (bytes_read < 0 && errno == EINTR)
+ continue;
+ bool was_interrupted = (bytes_read > 0);
+ while (bytes_read == sizeof(data))
+ bytes_read = ::read(read_descriptor_, data, sizeof(data));
+ return was_interrupted;
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_ASIO_HAS_EVENTFD)
+#endif // !defined(__SYMBIAN32__)
+#endif // !defined(__CYGWIN__)
+#endif // !defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
diff --git a/boost/asio/detail/impl/posix_event.ipp b/boost/asio/detail/impl/posix_event.ipp
new file mode 100644
index 0000000000..08eae05c8a
--- /dev/null
+++ b/boost/asio/detail/impl/posix_event.ipp
@@ -0,0 +1,48 @@
+//
+// detail/impl/posix_event.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
+#define BOOST_ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#include <boost/asio/detail/posix_event.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+posix_event::posix_event()
+ : signalled_(false)
+{
+ int error = ::pthread_cond_init(&cond_, 0);
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "event");
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
diff --git a/boost/asio/detail/impl/posix_mutex.ipp b/boost/asio/detail/impl/posix_mutex.ipp
new file mode 100644
index 0000000000..94b9bf4a0b
--- /dev/null
+++ b/boost/asio/detail/impl/posix_mutex.ipp
@@ -0,0 +1,48 @@
+//
+// detail/impl/posix_mutex.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
+#define BOOST_ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#include <boost/asio/detail/posix_mutex.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+posix_mutex::posix_mutex()
+{
+ int error = ::pthread_mutex_init(&mutex_, 0);
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "mutex");
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
diff --git a/boost/asio/detail/impl/posix_thread.ipp b/boost/asio/detail/impl/posix_thread.ipp
new file mode 100644
index 0000000000..0c529715d3
--- /dev/null
+++ b/boost/asio/detail/impl/posix_thread.ipp
@@ -0,0 +1,76 @@
+//
+// detail/impl/posix_thread.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
+#define BOOST_ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#include <boost/asio/detail/posix_thread.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+posix_thread::~posix_thread()
+{
+ if (!joined_)
+ ::pthread_detach(thread_);
+}
+
+void posix_thread::join()
+{
+ if (!joined_)
+ {
+ ::pthread_join(thread_, 0);
+ joined_ = true;
+ }
+}
+
+void posix_thread::start_thread(func_base* arg)
+{
+ int error = ::pthread_create(&thread_, 0,
+ boost_asio_detail_posix_thread_function, arg);
+ if (error != 0)
+ {
+ delete arg;
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "thread");
+ }
+}
+
+void* boost_asio_detail_posix_thread_function(void* arg)
+{
+ posix_thread::auto_func_base_ptr func = {
+ static_cast<posix_thread::func_base*>(arg) };
+ func.ptr->run();
+ return 0;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
diff --git a/boost/asio/detail/impl/posix_tss_ptr.ipp b/boost/asio/detail/impl/posix_tss_ptr.ipp
new file mode 100644
index 0000000000..5124c5fb3b
--- /dev/null
+++ b/boost/asio/detail/impl/posix_tss_ptr.ipp
@@ -0,0 +1,48 @@
+//
+// detail/impl/posix_tss_ptr.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#include <boost/asio/detail/posix_tss_ptr.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void posix_tss_ptr_create(pthread_key_t& key)
+{
+ int error = ::pthread_key_create(&key, 0);
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "tss");
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_HAS_PTHREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
diff --git a/boost/asio/detail/impl/reactive_descriptor_service.ipp b/boost/asio/detail/impl/reactive_descriptor_service.ipp
new file mode 100644
index 0000000000..dff0a82292
--- /dev/null
+++ b/boost/asio/detail/impl/reactive_descriptor_service.ipp
@@ -0,0 +1,205 @@
+//
+// detail/impl/reactive_descriptor_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+#include <boost/asio/error.hpp>
+#include <boost/asio/detail/reactive_descriptor_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+reactive_descriptor_service::reactive_descriptor_service(
+ boost::asio::io_service& io_service)
+ : reactor_(boost::asio::use_service<reactor>(io_service))
+{
+ reactor_.init_task();
+}
+
+void reactive_descriptor_service::shutdown_service()
+{
+}
+
+void reactive_descriptor_service::construct(
+ reactive_descriptor_service::implementation_type& impl)
+{
+ impl.descriptor_ = -1;
+ impl.state_ = 0;
+}
+
+void reactive_descriptor_service::move_construct(
+ reactive_descriptor_service::implementation_type& impl,
+ reactive_descriptor_service::implementation_type& other_impl)
+{
+ impl.descriptor_ = other_impl.descriptor_;
+ other_impl.descriptor_ = -1;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ reactor_.move_descriptor(impl.descriptor_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_descriptor_service::move_assign(
+ reactive_descriptor_service::implementation_type& impl,
+ reactive_descriptor_service& other_service,
+ reactive_descriptor_service::implementation_type& other_impl)
+{
+ destroy(impl);
+
+ impl.descriptor_ = other_impl.descriptor_;
+ other_impl.descriptor_ = -1;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ other_service.reactor_.move_descriptor(impl.descriptor_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_descriptor_service::destroy(
+ reactive_descriptor_service::implementation_type& impl)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
+ (impl.state_ & descriptor_ops::possible_dup) == 0);
+ }
+
+ boost::system::error_code ignored_ec;
+ descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
+}
+
+boost::system::error_code reactive_descriptor_service::assign(
+ reactive_descriptor_service::implementation_type& impl,
+ const native_handle_type& native_descriptor, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ if (int err = reactor_.register_descriptor(
+ native_descriptor, impl.reactor_data_))
+ {
+ ec = boost::system::error_code(err,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ impl.descriptor_ = native_descriptor;
+ impl.state_ = descriptor_ops::possible_dup;
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code reactive_descriptor_service::close(
+ reactive_descriptor_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
+ (impl.state_ & descriptor_ops::possible_dup) == 0);
+ }
+
+ descriptor_ops::close(impl.descriptor_, impl.state_, ec);
+
+ // The descriptor is closed by the OS even if close() returns an error.
+ //
+ // (Actually, POSIX says the state of the descriptor is unspecified. On
+ // Linux the descriptor is apparently closed anyway; e.g. see
+ // http://lkml.org/lkml/2005/9/10/129
+ // We'll just have to assume that other OSes follow the same behaviour.)
+ construct(impl);
+
+ return ec;
+}
+
+reactive_descriptor_service::native_handle_type
+reactive_descriptor_service::release(
+ reactive_descriptor_service::implementation_type& impl)
+{
+ native_handle_type descriptor = impl.descriptor_;
+
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "release"));
+
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
+ construct(impl);
+ }
+
+ return descriptor;
+}
+
+boost::system::error_code reactive_descriptor_service::cancel(
+ reactive_descriptor_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return ec;
+ }
+
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "cancel"));
+
+ reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
+ ec = boost::system::error_code();
+ return ec;
+}
+
+void reactive_descriptor_service::start_op(
+ reactive_descriptor_service::implementation_type& impl,
+ int op_type, reactor_op* op, bool is_non_blocking, bool noop)
+{
+ if (!noop)
+ {
+ if ((impl.state_ & descriptor_ops::non_blocking) ||
+ descriptor_ops::set_internal_non_blocking(
+ impl.descriptor_, impl.state_, true, op->ec_))
+ {
+ reactor_.start_op(op_type, impl.descriptor_,
+ impl.reactor_data_, op, is_non_blocking);
+ return;
+ }
+ }
+
+ reactor_.post_immediate_completion(op);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
diff --git a/boost/asio/detail/impl/reactive_serial_port_service.ipp b/boost/asio/detail/impl/reactive_serial_port_service.ipp
new file mode 100644
index 0000000000..0f530d7b52
--- /dev/null
+++ b/boost/asio/detail/impl/reactive_serial_port_service.ipp
@@ -0,0 +1,153 @@
+//
+// detail/impl/reactive_serial_port_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_SERIAL_PORT)
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+#include <cstring>
+#include <boost/asio/detail/reactive_serial_port_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+reactive_serial_port_service::reactive_serial_port_service(
+ boost::asio::io_service& io_service)
+ : descriptor_service_(io_service)
+{
+}
+
+void reactive_serial_port_service::shutdown_service()
+{
+ descriptor_service_.shutdown_service();
+}
+
+boost::system::error_code reactive_serial_port_service::open(
+ reactive_serial_port_service::implementation_type& impl,
+ const std::string& device, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ descriptor_ops::state_type state = 0;
+ int fd = descriptor_ops::open(device.c_str(),
+ O_RDWR | O_NONBLOCK | O_NOCTTY, ec);
+ if (fd < 0)
+ return ec;
+
+ int s = descriptor_ops::fcntl(fd, F_GETFL, ec);
+ if (s >= 0)
+ s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec);
+ if (s < 0)
+ {
+ boost::system::error_code ignored_ec;
+ descriptor_ops::close(fd, state, ignored_ec);
+ return ec;
+ }
+
+ // Set up default serial port options.
+ termios ios;
+ errno = 0;
+ s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec);
+ if (s >= 0)
+ {
+#if defined(_BSD_SOURCE)
+ ::cfmakeraw(&ios);
+#else
+ ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK
+ | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
+ ios.c_oflag &= ~OPOST;
+ ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ ios.c_cflag &= ~(CSIZE | PARENB);
+ ios.c_cflag |= CS8;
+#endif
+ ios.c_iflag |= IGNPAR;
+ ios.c_cflag |= CREAD | CLOCAL;
+ errno = 0;
+ s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec);
+ }
+ if (s < 0)
+ {
+ boost::system::error_code ignored_ec;
+ descriptor_ops::close(fd, state, ignored_ec);
+ return ec;
+ }
+
+ // We're done. Take ownership of the serial port descriptor.
+ if (descriptor_service_.assign(impl, fd, ec))
+ {
+ boost::system::error_code ignored_ec;
+ descriptor_ops::close(fd, state, ignored_ec);
+ }
+
+ return ec;
+}
+
+boost::system::error_code reactive_serial_port_service::do_set_option(
+ reactive_serial_port_service::implementation_type& impl,
+ reactive_serial_port_service::store_function_type store,
+ const void* option, boost::system::error_code& ec)
+{
+ termios ios;
+ errno = 0;
+ descriptor_ops::error_wrapper(::tcgetattr(
+ descriptor_service_.native_handle(impl), &ios), ec);
+ if (ec)
+ return ec;
+
+ if (store(option, ios, ec))
+ return ec;
+
+ errno = 0;
+ descriptor_ops::error_wrapper(::tcsetattr(
+ descriptor_service_.native_handle(impl), TCSANOW, &ios), ec);
+ return ec;
+}
+
+boost::system::error_code reactive_serial_port_service::do_get_option(
+ const reactive_serial_port_service::implementation_type& impl,
+ reactive_serial_port_service::load_function_type load,
+ void* option, boost::system::error_code& ec) const
+{
+ termios ios;
+ errno = 0;
+ descriptor_ops::error_wrapper(::tcgetattr(
+ descriptor_service_.native_handle(impl), &ios), ec);
+ if (ec)
+ return ec;
+
+ return load(option, ios, ec);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+#endif // defined(BOOST_ASIO_HAS_SERIAL_PORT)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP
diff --git a/boost/asio/detail/impl/reactive_socket_service_base.ipp b/boost/asio/detail/impl/reactive_socket_service_base.ipp
new file mode 100644
index 0000000000..93277e0846
--- /dev/null
+++ b/boost/asio/detail/impl/reactive_socket_service_base.ipp
@@ -0,0 +1,265 @@
+//
+// detail/reactive_socket_service_base.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if !defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/asio/detail/reactive_socket_service_base.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+reactive_socket_service_base::reactive_socket_service_base(
+ boost::asio::io_service& io_service)
+ : reactor_(use_service<reactor>(io_service))
+{
+ reactor_.init_task();
+}
+
+void reactive_socket_service_base::shutdown_service()
+{
+}
+
+void reactive_socket_service_base::construct(
+ reactive_socket_service_base::base_implementation_type& impl)
+{
+ impl.socket_ = invalid_socket;
+ impl.state_ = 0;
+}
+
+void reactive_socket_service_base::base_move_construct(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactive_socket_service_base::base_implementation_type& other_impl)
+{
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ reactor_.move_descriptor(impl.socket_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_socket_service_base::base_move_assign(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactive_socket_service_base& other_service,
+ reactive_socket_service_base::base_implementation_type& other_impl)
+{
+ destroy(impl);
+
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ other_service.reactor_.move_descriptor(impl.socket_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_socket_service_base::destroy(
+ reactive_socket_service_base::base_implementation_type& impl)
+{
+ if (impl.socket_ != invalid_socket)
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
+ (impl.state_ & socket_ops::possible_dup) == 0);
+
+ boost::system::error_code ignored_ec;
+ socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
+ }
+}
+
+boost::system::error_code reactive_socket_service_base::close(
+ reactive_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
+ (impl.state_ & socket_ops::possible_dup) == 0);
+ }
+
+ socket_ops::close(impl.socket_, impl.state_, false, ec);
+
+ // The descriptor is closed by the OS even if close() returns an error.
+ //
+ // (Actually, POSIX says the state of the descriptor is unspecified. On
+ // Linux the descriptor is apparently closed anyway; e.g. see
+ // http://lkml.org/lkml/2005/9/10/129
+ // We'll just have to assume that other OSes follow the same behaviour. The
+ // known exception is when Windows's closesocket() function fails with
+ // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().
+ construct(impl);
+
+ return ec;
+}
+
+boost::system::error_code reactive_socket_service_base::cancel(
+ reactive_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return ec;
+ }
+
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+
+ reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code reactive_socket_service_base::do_open(
+ reactive_socket_service_base::base_implementation_type& impl,
+ int af, int type, int protocol, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ socket_holder sock(socket_ops::socket(af, type, protocol, ec));
+ if (sock.get() == invalid_socket)
+ return ec;
+
+ if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_))
+ {
+ ec = boost::system::error_code(err,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ impl.socket_ = sock.release();
+ switch (type)
+ {
+ case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
+ case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
+ default: impl.state_ = 0; break;
+ }
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code reactive_socket_service_base::do_assign(
+ reactive_socket_service_base::base_implementation_type& impl, int type,
+ const reactive_socket_service_base::native_handle_type& native_socket,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ if (int err = reactor_.register_descriptor(
+ native_socket, impl.reactor_data_))
+ {
+ ec = boost::system::error_code(err,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ impl.socket_ = native_socket;
+ switch (type)
+ {
+ case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
+ case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
+ default: impl.state_ = 0; break;
+ }
+ impl.state_ |= socket_ops::possible_dup;
+ ec = boost::system::error_code();
+ return ec;
+}
+
+void reactive_socket_service_base::start_op(
+ reactive_socket_service_base::base_implementation_type& impl,
+ int op_type, reactor_op* op, bool is_non_blocking, bool noop)
+{
+ if (!noop)
+ {
+ if ((impl.state_ & socket_ops::non_blocking)
+ || socket_ops::set_internal_non_blocking(
+ impl.socket_, impl.state_, true, op->ec_))
+ {
+ reactor_.start_op(op_type, impl.socket_,
+ impl.reactor_data_, op, is_non_blocking);
+ return;
+ }
+ }
+
+ reactor_.post_immediate_completion(op);
+}
+
+void reactive_socket_service_base::start_accept_op(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactor_op* op, bool peer_is_open)
+{
+ if (!peer_is_open)
+ start_op(impl, reactor::read_op, op, true, false);
+ else
+ {
+ op->ec_ = boost::asio::error::already_open;
+ reactor_.post_immediate_completion(op);
+ }
+}
+
+void reactive_socket_service_base::start_connect_op(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactor_op* op, const socket_addr_type* addr, size_t addrlen)
+{
+ if ((impl.state_ & socket_ops::non_blocking)
+ || socket_ops::set_internal_non_blocking(
+ impl.socket_, impl.state_, true, op->ec_))
+ {
+ if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
+ {
+ if (op->ec_ == boost::asio::error::in_progress
+ || op->ec_ == boost::asio::error::would_block)
+ {
+ op->ec_ = boost::system::error_code();
+ reactor_.start_op(reactor::connect_op,
+ impl.socket_, impl.reactor_data_, op, false);
+ return;
+ }
+ }
+ }
+
+ reactor_.post_immediate_completion(op);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
diff --git a/boost/asio/detail/impl/resolver_service_base.ipp b/boost/asio/detail/impl/resolver_service_base.ipp
new file mode 100644
index 0000000000..6a384e4f8b
--- /dev/null
+++ b/boost/asio/detail/impl/resolver_service_base.ipp
@@ -0,0 +1,132 @@
+//
+// detail/impl/resolver_service_base.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/asio/detail/resolver_service_base.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+class resolver_service_base::work_io_service_runner
+{
+public:
+ work_io_service_runner(boost::asio::io_service& io_service)
+ : io_service_(io_service) {}
+ void operator()() { io_service_.run(); }
+private:
+ boost::asio::io_service& io_service_;
+};
+
+resolver_service_base::resolver_service_base(
+ boost::asio::io_service& io_service)
+ : io_service_impl_(boost::asio::use_service<io_service_impl>(io_service)),
+ work_io_service_(new boost::asio::io_service),
+ work_io_service_impl_(boost::asio::use_service<
+ io_service_impl>(*work_io_service_)),
+ work_(new boost::asio::io_service::work(*work_io_service_)),
+ work_thread_(0)
+{
+}
+
+resolver_service_base::~resolver_service_base()
+{
+ shutdown_service();
+}
+
+void resolver_service_base::shutdown_service()
+{
+ work_.reset();
+ if (work_io_service_.get())
+ {
+ work_io_service_->stop();
+ if (work_thread_.get())
+ {
+ work_thread_->join();
+ work_thread_.reset();
+ }
+ work_io_service_.reset();
+ }
+}
+
+void resolver_service_base::fork_service(
+ boost::asio::io_service::fork_event fork_ev)
+{
+ if (work_thread_.get())
+ {
+ if (fork_ev == boost::asio::io_service::fork_prepare)
+ {
+ work_io_service_->stop();
+ work_thread_->join();
+ }
+ else
+ {
+ work_io_service_->reset();
+ work_thread_.reset(new boost::asio::detail::thread(
+ work_io_service_runner(*work_io_service_)));
+ }
+ }
+}
+
+void resolver_service_base::construct(
+ resolver_service_base::implementation_type& impl)
+{
+ impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
+}
+
+void resolver_service_base::destroy(
+ resolver_service_base::implementation_type& impl)
+{
+ BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+
+ impl.reset();
+}
+
+void resolver_service_base::cancel(
+ resolver_service_base::implementation_type& impl)
+{
+ BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+
+ impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
+}
+
+void resolver_service_base::start_resolve_op(operation* op)
+{
+ start_work_thread();
+ io_service_impl_.work_started();
+ work_io_service_impl_.post_immediate_completion(op);
+}
+
+void resolver_service_base::start_work_thread()
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (!work_thread_.get())
+ {
+ work_thread_.reset(new boost::asio::detail::thread(
+ work_io_service_runner(*work_io_service_)));
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
diff --git a/boost/asio/detail/impl/select_reactor.hpp b/boost/asio/detail/impl/select_reactor.hpp
new file mode 100644
index 0000000000..0d4097ee03
--- /dev/null
+++ b/boost/asio/detail/impl/select_reactor.hpp
@@ -0,0 +1,87 @@
+//
+// detail/impl/select_reactor.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
+#define BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP) \
+ || (!defined(BOOST_ASIO_HAS_DEV_POLL) \
+ && !defined(BOOST_ASIO_HAS_EPOLL) \
+ && !defined(BOOST_ASIO_HAS_KQUEUE))
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Time_Traits>
+void select_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_add_timer_queue(queue);
+}
+
+// Remove a timer queue from the reactor.
+template <typename Time_Traits>
+void select_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
+{
+ do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
+ const typename Time_Traits::time_type& time,
+ typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ io_service_.post_immediate_completion(op);
+ return;
+ }
+
+ bool earliest = queue.enqueue_timer(time, timer, op);
+ io_service_.work_started();
+ if (earliest)
+ interrupter_.interrupt();
+}
+
+template <typename Time_Traits>
+std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+ lock.unlock();
+ io_service_.post_deferred_completions(ops);
+ return n;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+ // || (!defined(BOOST_ASIO_HAS_DEV_POLL)
+ // && !defined(BOOST_ASIO_HAS_EPOLL)
+ // && !defined(BOOST_ASIO_HAS_KQUEUE))
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
diff --git a/boost/asio/detail/impl/select_reactor.ipp b/boost/asio/detail/impl/select_reactor.ipp
new file mode 100644
index 0000000000..d11904e40d
--- /dev/null
+++ b/boost/asio/detail/impl/select_reactor.ipp
@@ -0,0 +1,314 @@
+//
+// detail/impl/select_reactor.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP) \
+ || (!defined(BOOST_ASIO_HAS_DEV_POLL) \
+ && !defined(BOOST_ASIO_HAS_EPOLL) \
+ && !defined(BOOST_ASIO_HAS_KQUEUE))
+
+#include <boost/asio/detail/bind_handler.hpp>
+#include <boost/asio/detail/fd_set_adapter.hpp>
+#include <boost/asio/detail/select_reactor.hpp>
+#include <boost/asio/detail/signal_blocker.hpp>
+#include <boost/asio/detail/socket_ops.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+select_reactor::select_reactor(boost::asio::io_service& io_service)
+ : boost::asio::detail::service_base<select_reactor>(io_service),
+ io_service_(use_service<io_service_impl>(io_service)),
+ mutex_(),
+ interrupter_(),
+#if defined(BOOST_ASIO_HAS_IOCP)
+ stop_thread_(false),
+ thread_(0),
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+ shutdown_(false)
+{
+#if defined(BOOST_ASIO_HAS_IOCP)
+ boost::asio::detail::signal_blocker sb;
+ thread_ = new boost::asio::detail::thread(
+ bind_handler(&select_reactor::call_run_thread, this));
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+}
+
+select_reactor::~select_reactor()
+{
+ shutdown_service();
+}
+
+void select_reactor::shutdown_service()
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ shutdown_ = true;
+#if defined(BOOST_ASIO_HAS_IOCP)
+ stop_thread_ = true;
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+ lock.unlock();
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+ if (thread_)
+ {
+ interrupter_.interrupt();
+ thread_->join();
+ delete thread_;
+ thread_ = 0;
+ }
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+ op_queue<operation> ops;
+
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].get_all_operations(ops);
+
+ timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void select_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ interrupter_.recreate();
+}
+
+void select_reactor::init_task()
+{
+ io_service_.init_task();
+}
+
+int select_reactor::register_descriptor(socket_type,
+ select_reactor::per_descriptor_data&)
+{
+ return 0;
+}
+
+int select_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ select_reactor::per_descriptor_data&, reactor_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ op_queue_[op_type].enqueue_operation(descriptor, op);
+ interrupter_.interrupt();
+
+ return 0;
+}
+
+void select_reactor::move_descriptor(socket_type,
+ select_reactor::per_descriptor_data&,
+ select_reactor::per_descriptor_data&)
+{
+}
+
+void select_reactor::start_op(int op_type, socket_type descriptor,
+ select_reactor::per_descriptor_data&, reactor_op* op, bool)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ if (shutdown_)
+ {
+ post_immediate_completion(op);
+ return;
+ }
+
+ bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
+ io_service_.work_started();
+ if (first)
+ interrupter_.interrupt();
+}
+
+void select_reactor::cancel_ops(socket_type descriptor,
+ select_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
+}
+
+void select_reactor::deregister_descriptor(socket_type descriptor,
+ select_reactor::per_descriptor_data&, bool)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
+}
+
+void select_reactor::deregister_internal_descriptor(
+ socket_type descriptor, select_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].cancel_operations(descriptor, ops);
+}
+
+void select_reactor::run(bool block, op_queue<operation>& ops)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+ // Check if the thread is supposed to stop.
+ if (stop_thread_)
+ return;
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+ // Set up the descriptor sets.
+ for (int i = 0; i < max_select_ops; ++i)
+ fd_sets_[i].reset();
+ fd_sets_[read_op].set(interrupter_.read_descriptor());
+ socket_type max_fd = 0;
+ bool have_work_to_do = !timer_queues_.all_empty();
+ for (int i = 0; i < max_select_ops; ++i)
+ {
+ have_work_to_do = have_work_to_do || !op_queue_[i].empty();
+ op_queue_[i].get_descriptors(fd_sets_[i], ops);
+ if (fd_sets_[i].max_descriptor() > max_fd)
+ max_fd = fd_sets_[i].max_descriptor();
+ }
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Connection operations on Windows use both except and write fd_sets.
+ have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
+ op_queue_[connect_op].get_descriptors(fd_sets_[write_op], ops);
+ if (fd_sets_[write_op].max_descriptor() > max_fd)
+ max_fd = fd_sets_[write_op].max_descriptor();
+ op_queue_[connect_op].get_descriptors(fd_sets_[except_op], ops);
+ if (fd_sets_[except_op].max_descriptor() > max_fd)
+ max_fd = fd_sets_[except_op].max_descriptor();
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+ // We can return immediately if there's no work to do and the reactor is
+ // not supposed to block.
+ if (!block && !have_work_to_do)
+ return;
+
+ // Determine how long to block while waiting for events.
+ timeval tv_buf = { 0, 0 };
+ timeval* tv = block ? get_timeout(tv_buf) : &tv_buf;
+
+ lock.unlock();
+
+ // Block on the select call until descriptors become ready.
+ boost::system::error_code ec;
+ int retval = socket_ops::select(static_cast<int>(max_fd + 1),
+ fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
+
+ // Reset the interrupter.
+ if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
+ {
+ interrupter_.reset();
+ --retval;
+ }
+
+ lock.lock();
+
+ // Dispatch all ready operations.
+ if (retval > 0)
+ {
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Connection operations on Windows use both except and write fd_sets.
+ op_queue_[connect_op].perform_operations_for_descriptors(
+ fd_sets_[except_op], ops);
+ op_queue_[connect_op].perform_operations_for_descriptors(
+ fd_sets_[write_op], ops);
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+ for (int i = max_select_ops - 1; i >= 0; --i)
+ op_queue_[i].perform_operations_for_descriptors(fd_sets_[i], ops);
+ }
+ timer_queues_.get_ready_timers(ops);
+}
+
+void select_reactor::interrupt()
+{
+ interrupter_.interrupt();
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+void select_reactor::run_thread()
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ while (!stop_thread_)
+ {
+ lock.unlock();
+ op_queue<operation> ops;
+ run(true, ops);
+ io_service_.post_deferred_completions(ops);
+ lock.lock();
+ }
+}
+
+void select_reactor::call_run_thread(select_reactor* reactor)
+{
+ reactor->run_thread();
+}
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+void select_reactor::do_add_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.insert(&queue);
+}
+
+void select_reactor::do_remove_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(mutex_);
+ timer_queues_.erase(&queue);
+}
+
+timeval* select_reactor::get_timeout(timeval& tv)
+{
+ // By default we will wait no longer than 5 minutes. This will ensure that
+ // any changes to the system clock are detected after no longer than this.
+ long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
+ tv.tv_sec = usec / 1000000;
+ tv.tv_usec = usec % 1000000;
+ return &tv;
+}
+
+void select_reactor::cancel_ops_unlocked(socket_type descriptor,
+ const boost::system::error_code& ec)
+{
+ bool need_interrupt = false;
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ need_interrupt = op_queue_[i].cancel_operations(
+ descriptor, ops, ec) || need_interrupt;
+ io_service_.post_deferred_completions(ops);
+ if (need_interrupt)
+ interrupter_.interrupt();
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+ // || (!defined(BOOST_ASIO_HAS_DEV_POLL)
+ // && !defined(BOOST_ASIO_HAS_EPOLL)
+ // && !defined(BOOST_ASIO_HAS_KQUEUE))
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
diff --git a/boost/asio/detail/impl/service_registry.hpp b/boost/asio/detail/impl/service_registry.hpp
new file mode 100644
index 0000000000..eef25ac73d
--- /dev/null
+++ b/boost/asio/detail/impl/service_registry.hpp
@@ -0,0 +1,90 @@
+//
+// detail/impl/service_registry.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
+#define BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Service, typename Arg>
+service_registry::service_registry(
+ boost::asio::io_service& o, Service*, Arg arg)
+ : owner_(o),
+ first_service_(new Service(o, arg))
+{
+ boost::asio::io_service::service::key key;
+ init_key(key, Service::id);
+ first_service_->key_ = key;
+ first_service_->next_ = 0;
+}
+
+template <typename Service>
+Service& service_registry::first_service()
+{
+ return *static_cast<Service*>(first_service_);
+}
+
+template <typename Service>
+Service& service_registry::use_service()
+{
+ boost::asio::io_service::service::key key;
+ init_key(key, Service::id);
+ factory_type factory = &service_registry::create<Service>;
+ return *static_cast<Service*>(do_use_service(key, factory));
+}
+
+template <typename Service>
+void service_registry::add_service(Service* new_service)
+{
+ boost::asio::io_service::service::key key;
+ init_key(key, Service::id);
+ return do_add_service(key, new_service);
+}
+
+template <typename Service>
+bool service_registry::has_service() const
+{
+ boost::asio::io_service::service::key key;
+ init_key(key, Service::id);
+ return do_has_service(key);
+}
+
+#if !defined(BOOST_ASIO_NO_TYPEID)
+template <typename Service>
+void service_registry::init_key(boost::asio::io_service::service::key& key,
+ const boost::asio::detail::service_id<Service>& /*id*/)
+{
+ key.type_info_ = &typeid(typeid_wrapper<Service>);
+ key.id_ = 0;
+}
+#endif // !defined(BOOST_ASIO_NO_TYPEID)
+
+template <typename Service>
+boost::asio::io_service::service* service_registry::create(
+ boost::asio::io_service& owner)
+{
+ return new Service(owner);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
diff --git a/boost/asio/detail/impl/service_registry.ipp b/boost/asio/detail/impl/service_registry.ipp
new file mode 100644
index 0000000000..6715010504
--- /dev/null
+++ b/boost/asio/detail/impl/service_registry.ipp
@@ -0,0 +1,190 @@
+//
+// detail/impl/service_registry.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/throw_exception.hpp>
+#include <vector>
+#include <boost/asio/detail/service_registry.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+service_registry::~service_registry()
+{
+ // Shutdown all services. This must be done in a separate loop before the
+ // services are destroyed since the destructors of user-defined handler
+ // objects may try to access other service objects.
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ service->shutdown_service();
+ service = service->next_;
+ }
+
+ // Destroy all services.
+ while (first_service_)
+ {
+ boost::asio::io_service::service* next_service = first_service_->next_;
+ destroy(first_service_);
+ first_service_ = next_service;
+ }
+}
+
+void service_registry::notify_fork(boost::asio::io_service::fork_event fork_ev)
+{
+ // Make a copy of all of the services while holding the lock. We don't want
+ // to hold the lock while calling into each service, as it may try to call
+ // back into this class.
+ std::vector<boost::asio::io_service::service*> services;
+ {
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ services.push_back(service);
+ service = service->next_;
+ }
+ }
+
+ // If processing the fork_prepare event, we want to go in reverse order of
+ // service registration, which happens to be the existing order of the
+ // services in the vector. For the other events we want to go in the other
+ // direction.
+ std::size_t num_services = services.size();
+ if (fork_ev == boost::asio::io_service::fork_prepare)
+ for (std::size_t i = 0; i < num_services; ++i)
+ services[i]->fork_service(fork_ev);
+ else
+ for (std::size_t i = num_services; i > 0; --i)
+ services[i - 1]->fork_service(fork_ev);
+}
+
+void service_registry::init_key(boost::asio::io_service::service::key& key,
+ const boost::asio::io_service::id& id)
+{
+ key.type_info_ = 0;
+ key.id_ = &id;
+}
+
+bool service_registry::keys_match(
+ const boost::asio::io_service::service::key& key1,
+ const boost::asio::io_service::service::key& key2)
+{
+ if (key1.id_ && key2.id_)
+ if (key1.id_ == key2.id_)
+ return true;
+ if (key1.type_info_ && key2.type_info_)
+ if (*key1.type_info_ == *key2.type_info_)
+ return true;
+ return false;
+}
+
+void service_registry::destroy(boost::asio::io_service::service* service)
+{
+ delete service;
+}
+
+boost::asio::io_service::service* service_registry::do_use_service(
+ const boost::asio::io_service::service::key& key,
+ factory_type factory)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // First see if there is an existing service object with the given key.
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ if (keys_match(service->key_, key))
+ return service;
+ service = service->next_;
+ }
+
+ // Create a new service object. The service registry's mutex is not locked
+ // at this time to allow for nested calls into this function from the new
+ // service's constructor.
+ lock.unlock();
+ auto_service_ptr new_service = { factory(owner_) };
+ new_service.ptr_->key_ = key;
+ lock.lock();
+
+ // Check that nobody else created another service object of the same type
+ // while the lock was released.
+ service = first_service_;
+ while (service)
+ {
+ if (keys_match(service->key_, key))
+ return service;
+ service = service->next_;
+ }
+
+ // Service was successfully initialised, pass ownership to registry.
+ new_service.ptr_->next_ = first_service_;
+ first_service_ = new_service.ptr_;
+ new_service.ptr_ = 0;
+ return first_service_;
+}
+
+void service_registry::do_add_service(
+ const boost::asio::io_service::service::key& key,
+ boost::asio::io_service::service* new_service)
+{
+ if (&owner_ != &new_service->get_io_service())
+ boost::throw_exception(invalid_service_owner());
+
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // Check if there is an existing service object with the given key.
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ if (keys_match(service->key_, key))
+ boost::throw_exception(service_already_exists());
+ service = service->next_;
+ }
+
+ // Take ownership of the service object.
+ new_service->key_ = key;
+ new_service->next_ = first_service_;
+ first_service_ = new_service;
+}
+
+bool service_registry::do_has_service(
+ const boost::asio::io_service::service::key& key) const
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ if (keys_match(service->key_, key))
+ return true;
+ service = service->next_;
+ }
+
+ return false;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
diff --git a/boost/asio/detail/impl/signal_set_service.ipp b/boost/asio/detail/impl/signal_set_service.ipp
new file mode 100644
index 0000000000..0b57007a98
--- /dev/null
+++ b/boost/asio/detail/impl/signal_set_service.ipp
@@ -0,0 +1,593 @@
+//
+// detail/impl/signal_set_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#include <cstring>
+#include <boost/asio/detail/reactor.hpp>
+#include <boost/asio/detail/signal_blocker.hpp>
+#include <boost/asio/detail/signal_set_service.hpp>
+#include <boost/asio/detail/static_mutex.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct signal_state
+{
+ // Mutex used for protecting global state.
+ static_mutex mutex_;
+
+ // The read end of the pipe used for signal notifications.
+ int read_descriptor_;
+
+ // The write end of the pipe used for signal notifications.
+ int write_descriptor_;
+
+ // Whether the signal state has been prepared for a fork.
+ bool fork_prepared_;
+
+ // The head of a linked list of all signal_set_service instances.
+ class signal_set_service* service_list_;
+
+ // A count of the number of objects that are registered for each signal.
+ std::size_t registration_count_[max_signal_number];
+};
+
+signal_state* get_signal_state()
+{
+ static signal_state state = {
+ BOOST_ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } };
+ return &state;
+}
+
+void asio_signal_handler(int signal_number)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ signal_set_service::deliver_signal(signal_number);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int saved_errno = errno;
+ signal_state* state = get_signal_state();
+ int result = ::write(state->write_descriptor_,
+ &signal_number, sizeof(signal_number));
+ (void)result;
+ errno = saved_errno;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#if defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
+ ::signal(signal_number, asio_signal_handler);
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
+}
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+class signal_set_service::pipe_read_op : public reactor_op
+{
+public:
+ pipe_read_op()
+ : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete)
+ {
+ }
+
+ static bool do_perform(reactor_op*)
+ {
+ signal_state* state = get_signal_state();
+
+ int fd = state->read_descriptor_;
+ int signal_number = 0;
+ while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
+ if (signal_number >= 0 && signal_number < max_signal_number)
+ signal_set_service::deliver_signal(signal_number);
+
+ return false;
+ }
+
+ static void do_complete(io_service_impl* /*owner*/, operation* base,
+ const boost::system::error_code& /*ec*/,
+ std::size_t /*bytes_transferred*/)
+ {
+ pipe_read_op* o(static_cast<pipe_read_op*>(base));
+ delete o;
+ }
+};
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+signal_set_service::signal_set_service(
+ boost::asio::io_service& io_service)
+ : io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ reactor_(boost::asio::use_service<reactor>(io_service)),
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ next_(0),
+ prev_(0)
+{
+ get_signal_state()->mutex_.init();
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ reactor_.init_task();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ for (int i = 0; i < max_signal_number; ++i)
+ registrations_[i] = 0;
+
+ add_service(this);
+}
+
+signal_set_service::~signal_set_service()
+{
+ remove_service(this);
+}
+
+void signal_set_service::shutdown_service()
+{
+ remove_service(this);
+
+ op_queue<operation> ops;
+
+ for (int i = 0; i < max_signal_number; ++i)
+ {
+ registration* reg = registrations_[i];
+ while (reg)
+ {
+ ops.push(*reg->queue_);
+ reg = reg->next_in_table_;
+ }
+ }
+
+ io_service_.abandon_operations(ops);
+}
+
+void signal_set_service::fork_service(
+ boost::asio::io_service::fork_event fork_ev)
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ switch (fork_ev)
+ {
+ case boost::asio::io_service::fork_prepare:
+ reactor_.deregister_internal_descriptor(
+ state->read_descriptor_, reactor_data_);
+ state->fork_prepared_ = true;
+ break;
+ case boost::asio::io_service::fork_parent:
+ state->fork_prepared_ = false;
+ reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, reactor_data_, new pipe_read_op);
+ break;
+ case boost::asio::io_service::fork_child:
+ if (state->fork_prepared_)
+ {
+ boost::asio::detail::signal_blocker blocker;
+ close_descriptors();
+ open_descriptors();
+ state->fork_prepared_ = false;
+ }
+ reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, reactor_data_, new pipe_read_op);
+ break;
+ default:
+ break;
+ }
+#else // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ (void)fork_ev;
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::construct(
+ signal_set_service::implementation_type& impl)
+{
+ impl.signals_ = 0;
+}
+
+void signal_set_service::destroy(
+ signal_set_service::implementation_type& impl)
+{
+ boost::system::error_code ignored_ec;
+ clear(impl, ignored_ec);
+ cancel(impl, ignored_ec);
+}
+
+boost::system::error_code signal_set_service::add(
+ signal_set_service::implementation_type& impl,
+ int signal_number, boost::system::error_code& ec)
+{
+ // Check that the signal number is valid.
+ if (signal_number < 0 || signal_number > max_signal_number)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return ec;
+ }
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ // Find the appropriate place to insert the registration.
+ registration** insertion_point = &impl.signals_;
+ registration* next = impl.signals_;
+ while (next && next->signal_number_ < signal_number)
+ {
+ insertion_point = &next->next_in_set_;
+ next = next->next_in_set_;
+ }
+
+ // Only do something if the signal is not already registered.
+ if (next == 0 || next->signal_number_ != signal_number)
+ {
+ registration* new_registration = new registration;
+
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Register for the signal if we're the first.
+ if (state->registration_count_[signal_number] == 0)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = asio_signal_handler;
+ sigfillset(&sa.sa_mask);
+ if (::sigaction(signal_number, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(signal_number, asio_signal_handler) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ delete new_registration;
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Record the new registration in the set.
+ new_registration->signal_number_ = signal_number;
+ new_registration->queue_ = &impl.queue_;
+ new_registration->next_in_set_ = next;
+ *insertion_point = new_registration;
+
+ // Insert registration into the registration table.
+ new_registration->next_in_table_ = registrations_[signal_number];
+ if (registrations_[signal_number])
+ registrations_[signal_number]->prev_in_table_ = new_registration;
+ registrations_[signal_number] = new_registration;
+
+ ++state->registration_count_[signal_number];
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::remove(
+ signal_set_service::implementation_type& impl,
+ int signal_number, boost::system::error_code& ec)
+{
+ // Check that the signal number is valid.
+ if (signal_number < 0 || signal_number > max_signal_number)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return ec;
+ }
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ // Find the signal number in the list of registrations.
+ registration** deletion_point = &impl.signals_;
+ registration* reg = impl.signals_;
+ while (reg && reg->signal_number_ < signal_number)
+ {
+ deletion_point = &reg->next_in_set_;
+ reg = reg->next_in_set_;
+ }
+
+ if (reg != 0 && reg->signal_number_ == signal_number)
+ {
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Set signal handler back to the default if we're the last.
+ if (state->registration_count_[signal_number] == 1)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ if (::sigaction(signal_number, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(signal_number, SIG_DFL) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Remove the registration from the set.
+ *deletion_point = reg->next_in_set_;
+
+ // Remove the registration from the registration table.
+ if (registrations_[signal_number] == reg)
+ registrations_[signal_number] = reg->next_in_table_;
+ if (reg->prev_in_table_)
+ reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
+ if (reg->next_in_table_)
+ reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
+
+ --state->registration_count_[signal_number];
+
+ delete reg;
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::clear(
+ signal_set_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ while (registration* reg = impl.signals_)
+ {
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Set signal handler back to the default if we're the last.
+ if (state->registration_count_[reg->signal_number_] == 1)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ if (::sigaction(reg->signal_number_, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Remove the registration from the registration table.
+ if (registrations_[reg->signal_number_] == reg)
+ registrations_[reg->signal_number_] = reg->next_in_table_;
+ if (reg->prev_in_table_)
+ reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
+ if (reg->next_in_table_)
+ reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
+
+ --state->registration_count_[reg->signal_number_];
+
+ impl.signals_ = reg->next_in_set_;
+ delete reg;
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::cancel(
+ signal_set_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ BOOST_ASIO_HANDLER_OPERATION(("signal_set", &impl, "cancel"));
+
+ op_queue<operation> ops;
+ {
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ while (signal_op* op = impl.queue_.front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ impl.queue_.pop();
+ ops.push(op);
+ }
+ }
+
+ io_service_.post_deferred_completions(ops);
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+void signal_set_service::deliver_signal(int signal_number)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ signal_set_service* service = state->service_list_;
+ while (service)
+ {
+ op_queue<operation> ops;
+
+ registration* reg = service->registrations_[signal_number];
+ while (reg)
+ {
+ if (reg->queue_->empty())
+ {
+ ++reg->undelivered_;
+ }
+ else
+ {
+ while (signal_op* op = reg->queue_->front())
+ {
+ op->signal_number_ = signal_number;
+ reg->queue_->pop();
+ ops.push(op);
+ }
+ }
+
+ reg = reg->next_in_table_;
+ }
+
+ service->io_service_.post_deferred_completions(ops);
+
+ service = service->next_;
+ }
+}
+
+void signal_set_service::add_service(signal_set_service* service)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // If this is the first service to be created, open a new pipe.
+ if (state->service_list_ == 0)
+ open_descriptors();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ // Insert service into linked list of all services.
+ service->next_ = state->service_list_;
+ service->prev_ = 0;
+ if (state->service_list_)
+ state->service_list_->prev_ = service;
+ state->service_list_ = service;
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // Register for pipe readiness notifications.
+ service->reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, service->reactor_data_, new pipe_read_op);
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::remove_service(signal_set_service* service)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ if (service->next_ || service->prev_ || state->service_list_ == service)
+ {
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // Disable the pipe readiness notifications.
+ service->reactor_.deregister_descriptor(
+ state->read_descriptor_, service->reactor_data_, false);
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ // Remove service from linked list of all services.
+ if (state->service_list_ == service)
+ state->service_list_ = service->next_;
+ if (service->prev_)
+ service->prev_->next_ = service->next_;
+ if (service->next_)
+ service->next_->prev_= service->prev_;
+ service->next_ = 0;
+ service->prev_ = 0;
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // If this is the last service to be removed, close the pipe.
+ if (state->service_list_ == 0)
+ close_descriptors();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ }
+}
+
+void signal_set_service::open_descriptors()
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+
+ int pipe_fds[2];
+ if (::pipe(pipe_fds) == 0)
+ {
+ state->read_descriptor_ = pipe_fds[0];
+ ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);
+
+ state->write_descriptor_ = pipe_fds[1];
+ ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);
+
+#if defined(FD_CLOEXEC)
+ ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);
+ ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);
+#endif // defined(FD_CLOEXEC)
+ }
+ else
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "signal_set_service pipe");
+ }
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::close_descriptors()
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+
+ if (state->read_descriptor_ != -1)
+ ::close(state->read_descriptor_);
+ state->read_descriptor_ = -1;
+
+ if (state->write_descriptor_ != -1)
+ ::close(state->write_descriptor_);
+ state->write_descriptor_ = -1;
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::start_wait_op(
+ signal_set_service::implementation_type& impl, signal_op* op)
+{
+ io_service_.work_started();
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ registration* reg = impl.signals_;
+ while (reg)
+ {
+ if (reg->undelivered_ > 0)
+ {
+ --reg->undelivered_;
+ io_service_.post_deferred_completion(op);
+ return;
+ }
+
+ reg = reg->next_in_set_;
+ }
+
+ impl.queue_.push(op);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
diff --git a/boost/asio/detail/impl/socket_ops.ipp b/boost/asio/detail/impl/socket_ops.ipp
new file mode 100644
index 0000000000..24d2d66ad1
--- /dev/null
+++ b/boost/asio/detail/impl/socket_ops.ipp
@@ -0,0 +1,3062 @@
+//
+// detail/impl/socket_ops.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_SOCKET_OPS_IPP
+#define BOOST_ASIO_DETAIL_SOCKET_OPS_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/assert.hpp>
+#include <boost/detail/workaround.hpp>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <cerrno>
+#include <new>
+#include <boost/asio/detail/socket_ops.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+namespace socket_ops {
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+struct msghdr { int msg_namelen; };
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#if defined(__hpux)
+// HP-UX doesn't declare these functions extern "C", so they are declared again
+// here to avoid linker errors about undefined symbols.
+extern "C" char* if_indextoname(unsigned int, char*);
+extern "C" unsigned int if_nametoindex(const char*);
+#endif // defined(__hpux)
+
+inline void clear_last_error()
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ WSASetLastError(0);
+#else
+ errno = 0;
+#endif
+}
+
+template <typename ReturnType>
+inline ReturnType error_wrapper(ReturnType return_value,
+ boost::system::error_code& ec)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(WSAGetLastError(),
+ boost::asio::error::get_system_category());
+#else
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+#endif
+ return return_value;
+}
+
+template <typename SockLenType>
+inline socket_type call_accept(SockLenType msghdr::*,
+ socket_type s, socket_addr_type* addr, std::size_t* addrlen)
+{
+ SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0;
+ socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0);
+ if (addrlen)
+ *addrlen = (std::size_t)tmp_addrlen;
+ return result;
+}
+
+socket_type accept(socket_type s, socket_addr_type* addr,
+ std::size_t* addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return invalid_socket;
+ }
+
+ clear_last_error();
+
+ socket_type new_s = error_wrapper(call_accept(
+ &msghdr::msg_namelen, s, addr, addrlen), ec);
+ if (new_s == invalid_socket)
+ return new_s;
+
+#if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)
+ int optval = 1;
+ int result = error_wrapper(::setsockopt(new_s,
+ SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec);
+ if (result != 0)
+ {
+ ::close(new_s);
+ return invalid_socket;
+ }
+#endif
+
+ ec = boost::system::error_code();
+ return new_s;
+}
+
+socket_type sync_accept(socket_type s, state_type state,
+ socket_addr_type* addr, std::size_t* addrlen, boost::system::error_code& ec)
+{
+ // Accept a socket.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec);
+
+ // Check if operation succeeded.
+ if (new_socket != invalid_socket)
+ return new_socket;
+
+ // Operation failed.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ {
+ if (state & user_set_non_blocking)
+ return invalid_socket;
+ // Fall through to retry operation.
+ }
+ else if (ec == boost::asio::error::connection_aborted)
+ {
+ if (state & enable_connection_aborted)
+ return invalid_socket;
+ // Fall through to retry operation.
+ }
+#if defined(EPROTO)
+ else if (ec.value() == EPROTO)
+ {
+ if (state & enable_connection_aborted)
+ return invalid_socket;
+ // Fall through to retry operation.
+ }
+#endif // defined(EPROTO)
+ else
+ return invalid_socket;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_read(s, 0, ec) < 0)
+ return invalid_socket;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_accept(socket_type s,
+ void* output_buffer, DWORD address_length,
+ socket_addr_type* addr, std::size_t* addrlen,
+ socket_type new_socket, boost::system::error_code& ec)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ ec = boost::asio::error::connection_aborted;
+
+ if (!ec)
+ {
+ // Get the address of the peer.
+ if (addr && addrlen)
+ {
+ LPSOCKADDR local_addr = 0;
+ int local_addr_length = 0;
+ LPSOCKADDR remote_addr = 0;
+ int remote_addr_length = 0;
+ GetAcceptExSockaddrs(output_buffer, 0, address_length,
+ address_length, &local_addr, &local_addr_length,
+ &remote_addr, &remote_addr_length);
+ if (static_cast<std::size_t>(remote_addr_length) > *addrlen)
+ {
+ ec = boost::asio::error::invalid_argument;
+ }
+ else
+ {
+ using namespace std; // For memcpy.
+ memcpy(addr, remote_addr, remote_addr_length);
+ *addrlen = static_cast<std::size_t>(remote_addr_length);
+ }
+ }
+
+ // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname
+ // and getpeername will work on the accepted socket.
+ SOCKET update_ctx_param = s;
+ socket_ops::state_type state = 0;
+ socket_ops::setsockopt(new_socket, state,
+ SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ &update_ctx_param, sizeof(SOCKET), ec);
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_accept(socket_type s,
+ state_type state, socket_addr_type* addr, std::size_t* addrlen,
+ boost::system::error_code& ec, socket_type& new_socket)
+{
+ for (;;)
+ {
+ // Accept the waiting connection.
+ new_socket = socket_ops::accept(s, addr, addrlen, ec);
+
+ // Check if operation succeeded.
+ if (new_socket != invalid_socket)
+ return true;
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Operation failed.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ {
+ if (state & user_set_non_blocking)
+ return true;
+ // Fall through to retry operation.
+ }
+ else if (ec == boost::asio::error::connection_aborted)
+ {
+ if (state & enable_connection_aborted)
+ return true;
+ // Fall through to retry operation.
+ }
+#if defined(EPROTO)
+ else if (ec.value() == EPROTO)
+ {
+ if (state & enable_connection_aborted)
+ return true;
+ // Fall through to retry operation.
+ }
+#endif // defined(EPROTO)
+ else
+ return true;
+
+ return false;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+template <typename SockLenType>
+inline int call_bind(SockLenType msghdr::*,
+ socket_type s, const socket_addr_type* addr, std::size_t addrlen)
+{
+ return ::bind(s, addr, (SockLenType)addrlen);
+}
+
+int bind(socket_type s, const socket_addr_type* addr,
+ std::size_t addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+ int result = error_wrapper(call_bind(
+ &msghdr::msg_namelen, s, addr, addrlen), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int close(socket_type s, state_type& state,
+ bool destruction, boost::system::error_code& ec)
+{
+ int result = 0;
+ if (s != invalid_socket)
+ {
+ // We don't want the destructor to block, so set the socket to linger in
+ // the background. If the user doesn't like this behaviour then they need
+ // to explicitly close the socket.
+ if (destruction && (state & user_set_linger))
+ {
+ ::linger opt;
+ opt.l_onoff = 0;
+ opt.l_linger = 0;
+ boost::system::error_code ignored_ec;
+ socket_ops::setsockopt(s, state, SOL_SOCKET,
+ SO_LINGER, &opt, sizeof(opt), ignored_ec);
+ }
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::closesocket(s), ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::close(s), ec);
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+ if (result != 0
+ && (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again))
+ {
+ // According to UNIX Network Programming Vol. 1, it is possible for
+ // close() to fail with EWOULDBLOCK under certain circumstances. What
+ // isn't clear is the state of the descriptor after this error. The one
+ // current OS where this behaviour is seen, Windows, says that the socket
+ // remains open. Therefore we'll put the descriptor back into blocking
+ // mode and have another attempt at closing it.
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ioctl_arg_type arg = 0;
+ ::ioctlsocket(s, FIONBIO, &arg);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+# if defined(__SYMBIAN32__)
+ int flags = ::fcntl(s, F_GETFL, 0);
+ if (flags >= 0)
+ ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);
+# else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = 0;
+ ::ioctl(s, FIONBIO, &arg);
+# endif // defined(__SYMBIAN32__)
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ state &= ~non_blocking;
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::closesocket(s), ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::close(s), ec);
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ }
+ }
+
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+bool set_user_non_blocking(socket_type s,
+ state_type& state, bool value, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);
+#elif defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ clear_last_error();
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);
+ }
+#else
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);
+#endif
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= user_set_non_blocking;
+ else
+ {
+ // Clearing the user-set non-blocking mode always overrides any
+ // internally-set non-blocking flag. Any subsequent asynchronous
+ // operations will need to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+bool set_internal_non_blocking(socket_type s,
+ state_type& state, bool value, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ if (!value && (state & user_set_non_blocking))
+ {
+ // It does not make sense to clear the internal non-blocking flag if the
+ // user still wants non-blocking behaviour. Return an error and let the
+ // caller figure out whether to update the user-set non-blocking flag.
+ ec = boost::asio::error::invalid_argument;
+ return false;
+ }
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);
+#elif defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ clear_last_error();
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);
+ }
+#else
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);
+#endif
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= internal_non_blocking;
+ else
+ state &= ~internal_non_blocking;
+ return true;
+ }
+
+ return false;
+}
+
+int shutdown(socket_type s, int what, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+ int result = error_wrapper(::shutdown(s, what), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+template <typename SockLenType>
+inline int call_connect(SockLenType msghdr::*,
+ socket_type s, const socket_addr_type* addr, std::size_t addrlen)
+{
+ return ::connect(s, addr, (SockLenType)addrlen);
+}
+
+int connect(socket_type s, const socket_addr_type* addr,
+ std::size_t addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+ int result = error_wrapper(call_connect(
+ &msghdr::msg_namelen, s, addr, addrlen), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+#if defined(__linux__)
+ else if (ec == boost::asio::error::try_again)
+ ec = boost::asio::error::no_buffer_space;
+#endif // defined(__linux__)
+ return result;
+}
+
+void sync_connect(socket_type s, const socket_addr_type* addr,
+ std::size_t addrlen, boost::system::error_code& ec)
+{
+ // Perform the connect operation.
+ socket_ops::connect(s, addr, addrlen, ec);
+ if (ec != boost::asio::error::in_progress
+ && ec != boost::asio::error::would_block)
+ {
+ // The connect operation finished immediately.
+ return;
+ }
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_connect(s, ec) < 0)
+ return;
+
+ // Get the error code from the connect operation.
+ int connect_error = 0;
+ size_t connect_error_len = sizeof(connect_error);
+ if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,
+ &connect_error, &connect_error_len, ec) == socket_error_retval)
+ return;
+
+ // Return the result of the connect operation.
+ ec = boost::system::error_code(connect_error,
+ boost::asio::error::get_system_category());
+}
+
+bool non_blocking_connect(socket_type s, boost::system::error_code& ec)
+{
+ // Get the error code from the connect operation.
+ int connect_error = 0;
+ size_t connect_error_len = sizeof(connect_error);
+ if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,
+ &connect_error, &connect_error_len, ec) == 0)
+ {
+ if (connect_error)
+ {
+ ec = boost::system::error_code(connect_error,
+ boost::asio::error::get_system_category());
+ }
+ else
+ ec = boost::system::error_code();
+ }
+
+ return true;
+}
+
+int socketpair(int af, int type, int protocol,
+ socket_type sv[2], boost::system::error_code& ec)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ (void)(af);
+ (void)(type);
+ (void)(protocol);
+ (void)(sv);
+ ec = boost::asio::error::operation_not_supported;
+ return socket_error_retval;
+#else
+ clear_last_error();
+ int result = error_wrapper(::socketpair(af, type, protocol, sv), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+#endif
+}
+
+bool sockatmark(socket_type s, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+#if defined(SIOCATMARK)
+ ioctl_arg_type value = 0;
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec);
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec);
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ if (result == 0)
+ ec = boost::system::error_code();
+# if defined(ENOTTY)
+ if (ec.value() == ENOTTY)
+ ec = boost::asio::error::not_socket;
+# endif // defined(ENOTTY)
+#else // defined(SIOCATMARK)
+ int value = error_wrapper(::sockatmark(s), ec);
+ if (value != -1)
+ ec = boost::system::error_code();
+#endif // defined(SIOCATMARK)
+
+ return ec ? false : value != 0;
+}
+
+size_t available(socket_type s, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ ioctl_arg_type value = 0;
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec);
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ if (result == 0)
+ ec = boost::system::error_code();
+#if defined(ENOTTY)
+ if (ec.value() == ENOTTY)
+ ec = boost::asio::error::not_socket;
+#endif // defined(ENOTTY)
+
+ return ec ? static_cast<size_t>(0) : static_cast<size_t>(value);
+}
+
+int listen(socket_type s, int backlog, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+ int result = error_wrapper(::listen(s, backlog), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+inline void init_buf_iov_base(void*& base, void* addr)
+{
+ base = addr;
+}
+
+template <typename T>
+inline void init_buf_iov_base(T& base, void* addr)
+{
+ base = static_cast<T>(addr);
+}
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+typedef WSABUF buf;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+typedef iovec buf;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+void init_buf(buf& b, void* data, size_t size)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ b.buf = static_cast<char*>(data);
+ b.len = static_cast<u_long>(size);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ init_buf_iov_base(b.iov_base, data);
+ b.iov_len = size;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+void init_buf(buf& b, const void* data, size_t size)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ b.buf = static_cast<char*>(const_cast<void*>(data));
+ b.len = static_cast<u_long>(size);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ init_buf_iov_base(b.iov_base, const_cast<void*>(data));
+ b.iov_len = size;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+inline void init_msghdr_msg_name(void*& name, socket_addr_type* addr)
+{
+ name = addr;
+}
+
+inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr)
+{
+ name = const_cast<socket_addr_type*>(addr);
+}
+
+template <typename T>
+inline void init_msghdr_msg_name(T& name, socket_addr_type* addr)
+{
+ name = reinterpret_cast<T>(addr);
+}
+
+template <typename T>
+inline void init_msghdr_msg_name(T& name, const socket_addr_type* addr)
+{
+ name = reinterpret_cast<T>(const_cast<socket_addr_type*>(addr));
+}
+
+int recv(socket_type s, buf* bufs, size_t count, int flags,
+ boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Receive some data.
+ DWORD recv_buf_count = static_cast<DWORD>(count);
+ DWORD bytes_transferred = 0;
+ DWORD recv_flags = flags;
+ int result = error_wrapper(::WSARecv(s, bufs,
+ recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec);
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ ec = boost::asio::error::connection_reset;
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ ec = boost::asio::error::connection_refused;
+ if (result != 0)
+ return socket_error_retval;
+ ec = boost::system::error_code();
+ return bytes_transferred;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ msg.msg_iov = bufs;
+ msg.msg_iovlen = count;
+ int result = error_wrapper(::recvmsg(s, &msg, flags), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_recv(socket_type s, state_type state, buf* bufs,
+ size_t count, int flags, bool all_empty, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to read 0 bytes on a stream is a no-op.
+ if (all_empty && (state & stream_oriented))
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::recv(s, bufs, count, flags, ec);
+
+ // Check if operation succeeded.
+ if (bytes > 0)
+ return bytes;
+
+ // Check for EOF.
+ if ((state & stream_oriented) && bytes == 0)
+ {
+ ec = boost::asio::error::eof;
+ return 0;
+ }
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_read(s, 0, ec) < 0)
+ return 0;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_recv(state_type state,
+ const weak_cancel_token_type& cancel_token, bool all_empty,
+ boost::system::error_code& ec, size_t bytes_transferred)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ {
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ ec = boost::asio::error::connection_reset;
+ }
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ {
+ ec = boost::asio::error::connection_refused;
+ }
+
+ // Check for connection closed.
+ else if (!ec && bytes_transferred == 0
+ && (state & stream_oriented) != 0
+ && !all_empty)
+ {
+ ec = boost::asio::error::eof;
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_recv(socket_type s,
+ buf* bufs, size_t count, int flags, bool is_stream,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Read some data.
+ int bytes = socket_ops::recv(s, bufs, count, flags, ec);
+
+ // Check for end of stream.
+ if (is_stream && bytes == 0)
+ {
+ ec = boost::asio::error::eof;
+ return true;
+ }
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+int recvfrom(socket_type s, buf* bufs, size_t count, int flags,
+ socket_addr_type* addr, std::size_t* addrlen,
+ boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Receive some data.
+ DWORD recv_buf_count = static_cast<DWORD>(count);
+ DWORD bytes_transferred = 0;
+ DWORD recv_flags = flags;
+ int tmp_addrlen = (int)*addrlen;
+ int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count,
+ &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec);
+ *addrlen = (std::size_t)tmp_addrlen;
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ ec = boost::asio::error::connection_reset;
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ ec = boost::asio::error::connection_refused;
+ if (result != 0)
+ return socket_error_retval;
+ ec = boost::system::error_code();
+ return bytes_transferred;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ init_msghdr_msg_name(msg.msg_name, addr);
+ msg.msg_namelen = *addrlen;
+ msg.msg_iov = bufs;
+ msg.msg_iovlen = count;
+ int result = error_wrapper(::recvmsg(s, &msg, flags), ec);
+ *addrlen = msg.msg_namelen;
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_recvfrom(socket_type s, state_type state, buf* bufs,
+ size_t count, int flags, socket_addr_type* addr,
+ std::size_t* addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::recvfrom(s, bufs, count, flags, addr, addrlen, ec);
+
+ // Check if operation succeeded.
+ if (bytes >= 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_read(s, 0, ec) < 0)
+ return 0;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_recvfrom(
+ const weak_cancel_token_type& cancel_token,
+ boost::system::error_code& ec)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ {
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ ec = boost::asio::error::connection_reset;
+ }
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ {
+ ec = boost::asio::error::connection_refused;
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_recvfrom(socket_type s,
+ buf* bufs, size_t count, int flags,
+ socket_addr_type* addr, std::size_t* addrlen,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Read some data.
+ int bytes = socket_ops::recvfrom(s, bufs, count, flags, addr, addrlen, ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+int recvmsg(socket_type s, buf* bufs, size_t count,
+ int in_flags, int& out_flags, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ out_flags = 0;
+ return socket_ops::recv(s, bufs, count, in_flags, ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ msg.msg_iov = bufs;
+ msg.msg_iovlen = count;
+ int result = error_wrapper(::recvmsg(s, &msg, in_flags), ec);
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ out_flags = msg.msg_flags;
+ }
+ else
+ out_flags = 0;
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_recvmsg(socket_type s, state_type state,
+ buf* bufs, size_t count, int in_flags, int& out_flags,
+ boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::recvmsg(s, bufs, count, in_flags, out_flags, ec);
+
+ // Check if operation succeeded.
+ if (bytes >= 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_read(s, 0, ec) < 0)
+ return 0;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_recvmsg(
+ const weak_cancel_token_type& cancel_token,
+ boost::system::error_code& ec)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ {
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ ec = boost::asio::error::connection_reset;
+ }
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ {
+ ec = boost::asio::error::connection_refused;
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_recvmsg(socket_type s,
+ buf* bufs, size_t count, int in_flags, int& out_flags,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Read some data.
+ int bytes = socket_ops::recvmsg(s, bufs, count, in_flags, out_flags, ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+int send(socket_type s, const buf* bufs, size_t count, int flags,
+ boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Send the data.
+ DWORD send_buf_count = static_cast<DWORD>(count);
+ DWORD bytes_transferred = 0;
+ DWORD send_flags = flags;
+ int result = error_wrapper(::WSASend(s, const_cast<buf*>(bufs),
+ send_buf_count, &bytes_transferred, send_flags, 0, 0), ec);
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ ec = boost::asio::error::connection_reset;
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ ec = boost::asio::error::connection_refused;
+ if (result != 0)
+ return socket_error_retval;
+ ec = boost::system::error_code();
+ return bytes_transferred;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ msg.msg_iov = const_cast<buf*>(bufs);
+ msg.msg_iovlen = count;
+#if defined(__linux__)
+ flags |= MSG_NOSIGNAL;
+#endif // defined(__linux__)
+ int result = error_wrapper(::sendmsg(s, &msg, flags), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_send(socket_type s, state_type state, const buf* bufs,
+ size_t count, int flags, bool all_empty, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to write 0 bytes to a stream is a no-op.
+ if (all_empty && (state & stream_oriented))
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::send(s, bufs, count, flags, ec);
+
+ // Check if operation succeeded.
+ if (bytes >= 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_write(s, 0, ec) < 0)
+ return 0;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_send(
+ const weak_cancel_token_type& cancel_token,
+ boost::system::error_code& ec)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ {
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ ec = boost::asio::error::connection_reset;
+ }
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ {
+ ec = boost::asio::error::connection_refused;
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_send(socket_type s,
+ const buf* bufs, size_t count, int flags,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Write some data.
+ int bytes = socket_ops::send(s, bufs, count, flags, ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+int sendto(socket_type s, const buf* bufs, size_t count, int flags,
+ const socket_addr_type* addr, std::size_t addrlen,
+ boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // Send the data.
+ DWORD send_buf_count = static_cast<DWORD>(count);
+ DWORD bytes_transferred = 0;
+ int result = error_wrapper(::WSASendTo(s, const_cast<buf*>(bufs),
+ send_buf_count, &bytes_transferred, flags, addr,
+ static_cast<int>(addrlen), 0, 0), ec);
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ ec = boost::asio::error::connection_reset;
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ ec = boost::asio::error::connection_refused;
+ if (result != 0)
+ return socket_error_retval;
+ ec = boost::system::error_code();
+ return bytes_transferred;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ init_msghdr_msg_name(msg.msg_name, addr);
+ msg.msg_namelen = addrlen;
+ msg.msg_iov = const_cast<buf*>(bufs);
+ msg.msg_iovlen = count;
+#if defined(__linux__)
+ flags |= MSG_NOSIGNAL;
+#endif // defined(__linux__)
+ int result = error_wrapper(::sendmsg(s, &msg, flags), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_sendto(socket_type s, state_type state, const buf* bufs,
+ size_t count, int flags, const socket_addr_type* addr,
+ std::size_t addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // Write some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::sendto(s, bufs, count, flags, addr, addrlen, ec);
+
+ // Check if operation succeeded.
+ if (bytes >= 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_write(s, 0, ec) < 0)
+ return 0;
+ }
+}
+
+#if !defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_sendto(socket_type s,
+ const buf* bufs, size_t count, int flags,
+ const socket_addr_type* addr, std::size_t addrlen,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Write some data.
+ int bytes = socket_ops::sendto(s, bufs, count, flags, addr, addrlen, ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // !defined(BOOST_ASIO_HAS_IOCP)
+
+socket_type socket(int af, int type, int protocol,
+ boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ socket_type s = error_wrapper(::WSASocket(af, type, protocol, 0, 0,
+ WSA_FLAG_OVERLAPPED), ec);
+ if (s == invalid_socket)
+ return s;
+
+ if (af == AF_INET6)
+ {
+ // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to
+ // false. This will only succeed on Windows Vista and later versions of
+ // Windows, where a dual-stack IPv4/v6 implementation is available.
+ DWORD optval = 0;
+ ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
+ reinterpret_cast<const char*>(&optval), sizeof(optval));
+ }
+
+ ec = boost::system::error_code();
+
+ return s;
+#elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)
+ socket_type s = error_wrapper(::socket(af, type, protocol), ec);
+ if (s == invalid_socket)
+ return s;
+
+ int optval = 1;
+ int result = error_wrapper(::setsockopt(s,
+ SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec);
+ if (result != 0)
+ {
+ ::close(s);
+ return invalid_socket;
+ }
+
+ return s;
+#else
+ int s = error_wrapper(::socket(af, type, protocol), ec);
+ if (s >= 0)
+ ec = boost::system::error_code();
+ return s;
+#endif
+}
+
+template <typename SockLenType>
+inline int call_setsockopt(SockLenType msghdr::*,
+ socket_type s, int level, int optname,
+ const void* optval, std::size_t optlen)
+{
+ return ::setsockopt(s, level, optname,
+ (const char*)optval, (SockLenType)optlen);
+}
+
+int setsockopt(socket_type s, state_type& state, int level, int optname,
+ const void* optval, std::size_t optlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ if (level == custom_socket_option_level && optname == always_fail_option)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return socket_error_retval;
+ }
+
+ if (level == custom_socket_option_level
+ && optname == enable_connection_aborted_option)
+ {
+ if (optlen != sizeof(int))
+ {
+ ec = boost::asio::error::invalid_argument;
+ return socket_error_retval;
+ }
+
+ if (*static_cast<const int*>(optval))
+ state |= enable_connection_aborted;
+ else
+ state &= ~enable_connection_aborted;
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ if (level == SOL_SOCKET && optname == SO_LINGER)
+ state |= user_set_linger;
+
+#if defined(__BORLANDC__)
+ // Mysteriously, using the getsockopt and setsockopt functions directly with
+ // Borland C++ results in incorrect values being set and read. The bug can be
+ // worked around by using function addresses resolved with GetProcAddress.
+ if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
+ {
+ typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int);
+ if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt"))
+ {
+ clear_last_error();
+ return error_wrapper(sso(s, level, optname,
+ reinterpret_cast<const char*>(optval),
+ static_cast<int>(optlen)), ec);
+ }
+ }
+ ec = boost::asio::error::fault;
+ return socket_error_retval;
+#else // defined(__BORLANDC__)
+ clear_last_error();
+ int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen,
+ s, level, optname, optval, optlen), ec);
+ if (result == 0)
+ {
+ ec = boost::system::error_code();
+
+#if defined(__MACH__) && defined(__APPLE__) \
+ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+ // To implement portable behaviour for SO_REUSEADDR with UDP sockets we
+ // need to also set SO_REUSEPORT on BSD-based platforms.
+ if ((state & datagram_oriented)
+ && level == SOL_SOCKET && optname == SO_REUSEADDR)
+ {
+ call_setsockopt(&msghdr::msg_namelen, s,
+ SOL_SOCKET, SO_REUSEPORT, optval, optlen);
+ }
+#endif
+ }
+
+ return result;
+#endif // defined(__BORLANDC__)
+}
+
+template <typename SockLenType>
+inline int call_getsockopt(SockLenType msghdr::*,
+ socket_type s, int level, int optname,
+ void* optval, std::size_t* optlen)
+{
+ SockLenType tmp_optlen = (SockLenType)*optlen;
+ int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen);
+ *optlen = (std::size_t)tmp_optlen;
+ return result;
+}
+
+int getsockopt(socket_type s, state_type state, int level, int optname,
+ void* optval, size_t* optlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ if (level == custom_socket_option_level && optname == always_fail_option)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return socket_error_retval;
+ }
+
+ if (level == custom_socket_option_level
+ && optname == enable_connection_aborted_option)
+ {
+ if (*optlen != sizeof(int))
+ {
+ ec = boost::asio::error::invalid_argument;
+ return socket_error_retval;
+ }
+
+ *static_cast<int*>(optval) = (state & enable_connection_aborted) ? 1 : 0;
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+#if defined(__BORLANDC__)
+ // Mysteriously, using the getsockopt and setsockopt functions directly with
+ // Borland C++ results in incorrect values being set and read. The bug can be
+ // worked around by using function addresses resolved with GetProcAddress.
+ if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
+ {
+ typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*);
+ if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt"))
+ {
+ clear_last_error();
+ int tmp_optlen = static_cast<int>(*optlen);
+ int result = error_wrapper(gso(s, level, optname,
+ reinterpret_cast<char*>(optval), &tmp_optlen), ec);
+ *optlen = static_cast<size_t>(tmp_optlen);
+ if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY
+ && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))
+ {
+ // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are
+ // only supported on Windows Vista and later. To simplify program logic
+ // we will fake success of getting this option and specify that the
+ // value is non-zero (i.e. true). This corresponds to the behavior of
+ // IPv6 sockets on Windows platforms pre-Vista.
+ *static_cast<DWORD*>(optval) = 1;
+ ec = boost::system::error_code();
+ }
+ return result;
+ }
+ }
+ ec = boost::asio::error::fault;
+ return socket_error_retval;
+#elif defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ clear_last_error();
+ int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen,
+ s, level, optname, optval, optlen), ec);
+ if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY
+ && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))
+ {
+ // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only
+ // supported on Windows Vista and later. To simplify program logic we will
+ // fake success of getting this option and specify that the value is
+ // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets
+ // on Windows platforms pre-Vista.
+ *static_cast<DWORD*>(optval) = 1;
+ ec = boost::system::error_code();
+ }
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ clear_last_error();
+ int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen,
+ s, level, optname, optval, optlen), ec);
+#if defined(__linux__)
+ if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int)
+ && (optname == SO_SNDBUF || optname == SO_RCVBUF))
+ {
+ // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel
+ // to set the buffer size to N*2. Linux puts additional stuff into the
+ // buffers so that only about half is actually available to the application.
+ // The retrieved value is divided by 2 here to make it appear as though the
+ // correct value has been set.
+ *static_cast<int*>(optval) /= 2;
+ }
+#endif // defined(__linux__)
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+template <typename SockLenType>
+inline int call_getpeername(SockLenType msghdr::*,
+ socket_type s, socket_addr_type* addr, std::size_t* addrlen)
+{
+ SockLenType tmp_addrlen = (SockLenType)*addrlen;
+ int result = ::getpeername(s, addr, &tmp_addrlen);
+ *addrlen = (std::size_t)tmp_addrlen;
+ return result;
+}
+
+int getpeername(socket_type s, socket_addr_type* addr,
+ std::size_t* addrlen, bool cached, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ if (cached)
+ {
+ // Check if socket is still connected.
+ DWORD connect_time = 0;
+ size_t connect_time_len = sizeof(connect_time);
+ if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME,
+ &connect_time, &connect_time_len, ec) == socket_error_retval)
+ {
+ return socket_error_retval;
+ }
+ if (connect_time == 0xFFFFFFFF)
+ {
+ ec = boost::asio::error::not_connected;
+ return socket_error_retval;
+ }
+
+ // The cached value is still valid.
+ ec = boost::system::error_code();
+ return 0;
+ }
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ (void)cached;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+ clear_last_error();
+ int result = error_wrapper(call_getpeername(
+ &msghdr::msg_namelen, s, addr, addrlen), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+template <typename SockLenType>
+inline int call_getsockname(SockLenType msghdr::*,
+ socket_type s, socket_addr_type* addr, std::size_t* addrlen)
+{
+ SockLenType tmp_addrlen = (SockLenType)*addrlen;
+ int result = ::getsockname(s, addr, &tmp_addrlen);
+ *addrlen = (std::size_t)tmp_addrlen;
+ return result;
+}
+
+int getsockname(socket_type s, socket_addr_type* addr,
+ std::size_t* addrlen, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+ int result = error_wrapper(call_getsockname(
+ &msghdr::msg_namelen, s, addr, addrlen), ec);
+ if (result == 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int ioctl(socket_type s, state_type& state, int cmd,
+ ioctl_arg_type* arg, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec);
+#elif defined(__MACH__) && defined(__APPLE__) \
+ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+ int result = error_wrapper(::ioctl(s,
+ static_cast<unsigned int>(cmd), arg), ec);
+#else
+ int result = error_wrapper(::ioctl(s, cmd, arg), ec);
+#endif
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+
+ // When updating the non-blocking mode we always perform the ioctl syscall,
+ // even if the flags would otherwise indicate that the socket is already in
+ // the correct state. This ensures that the underlying socket is put into
+ // the state that has been requested by the user. If the ioctl syscall was
+ // successful then we need to update the flags to match.
+ if (cmd == static_cast<int>(FIONBIO))
+ {
+ if (*arg)
+ {
+ state |= user_set_non_blocking;
+ }
+ else
+ {
+ // Clearing the non-blocking mode always overrides any internally-set
+ // non-blocking flag. Any subsequent asynchronous operations will need
+ // to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ }
+ }
+
+ return result;
+}
+
+int select(int nfds, fd_set* readfds, fd_set* writefds,
+ fd_set* exceptfds, timeval* timeout, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ if (!readfds && !writefds && !exceptfds && timeout)
+ {
+ DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
+ if (milliseconds == 0)
+ milliseconds = 1; // Force context switch.
+ ::Sleep(milliseconds);
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ // The select() call allows timeout values measured in microseconds, but the
+ // system clock (as wrapped by boost::posix_time::microsec_clock) typically
+ // has a resolution of 10 milliseconds. This can lead to a spinning select
+ // reactor, meaning increased CPU usage, when waiting for the earliest
+ // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight
+ // spin we'll use a minimum timeout of 1 millisecond.
+ if (timeout && timeout->tv_sec == 0
+ && timeout->tv_usec > 0 && timeout->tv_usec < 1000)
+ timeout->tv_usec = 1000;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#if defined(__hpux) && defined(__SELECT)
+ timespec ts;
+ ts.tv_sec = timeout ? timeout->tv_sec : 0;
+ ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0;
+ return error_wrapper(::pselect(nfds, readfds,
+ writefds, exceptfds, timeout ? &ts : 0, 0), ec);
+#else
+ int result = error_wrapper(::select(nfds, readfds,
+ writefds, exceptfds, timeout), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif
+}
+
+int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+#if defined(BOOST_WINDOWS) \
+ || defined(__CYGWIN__) \
+ || defined(__SYMBIAN32__)
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(s, &fds);
+ timeval zero_timeout;
+ zero_timeout.tv_sec = 0;
+ zero_timeout.tv_usec = 0;
+ timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
+ clear_last_error();
+ int result = error_wrapper(::select(s, &fds, 0, 0, timeout), ec);
+#else // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ pollfd fds;
+ fds.fd = s;
+ fds.events = POLLIN;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ clear_last_error();
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+#endif // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+#if defined(BOOST_WINDOWS) \
+ || defined(__CYGWIN__) \
+ || defined(__SYMBIAN32__)
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(s, &fds);
+ timeval zero_timeout;
+ zero_timeout.tv_sec = 0;
+ zero_timeout.tv_usec = 0;
+ timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
+ clear_last_error();
+ int result = error_wrapper(::select(s, 0, &fds, 0, timeout), ec);
+#else // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ pollfd fds;
+ fds.fd = s;
+ fds.events = POLLOUT;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ clear_last_error();
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+#endif // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int poll_connect(socket_type s, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+#if defined(BOOST_WINDOWS) \
+ || defined(__CYGWIN__) \
+ || defined(__SYMBIAN32__)
+ fd_set write_fds;
+ FD_ZERO(&write_fds);
+ FD_SET(s, &write_fds);
+ fd_set except_fds;
+ FD_ZERO(&except_fds);
+ FD_SET(s, &except_fds);
+ clear_last_error();
+ int result = error_wrapper(::select(s, 0, &write_fds, &except_fds, 0), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#else // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ pollfd fds;
+ fds.fd = s;
+ fds.events = POLLOUT;
+ fds.revents = 0;
+ clear_last_error();
+ int result = error_wrapper(::poll(&fds, 1, -1), ec);
+ if (result >= 0)
+ ec = boost::system::error_code();
+ return result;
+#endif // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+}
+
+const char* inet_ntop(int af, const void* src, char* dest, size_t length,
+ unsigned long scope_id, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ using namespace std; // For memcpy.
+
+ if (af != AF_INET && af != AF_INET6)
+ {
+ ec = boost::asio::error::address_family_not_supported;
+ return 0;
+ }
+
+ union
+ {
+ socket_addr_type base;
+ sockaddr_storage_type storage;
+ sockaddr_in4_type v4;
+ sockaddr_in6_type v6;
+ } address;
+ DWORD address_length;
+ if (af == AF_INET)
+ {
+ address_length = sizeof(sockaddr_in4_type);
+ address.v4.sin_family = AF_INET;
+ address.v4.sin_port = 0;
+ memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type));
+ }
+ else // AF_INET6
+ {
+ address_length = sizeof(sockaddr_in6_type);
+ address.v6.sin6_family = AF_INET6;
+ address.v6.sin6_port = 0;
+ address.v6.sin6_flowinfo = 0;
+ address.v6.sin6_scope_id = scope_id;
+ memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type));
+ }
+
+ DWORD string_length = static_cast<DWORD>(length);
+#if defined(BOOST_NO_ANSI_APIS)
+ LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR));
+ int result = error_wrapper(::WSAAddressToStringW(&address.base,
+ address_length, 0, string_buffer, &string_length), ec);
+ ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1, dest, length, 0, 0);
+#else
+ int result = error_wrapper(::WSAAddressToStringA(
+ &address.base, address_length, 0, dest, &string_length), ec);
+#endif
+
+ // Windows may set error code on success.
+ if (result != socket_error_retval)
+ ec = boost::system::error_code();
+
+ // Windows may not set an error code on failure.
+ else if (result == socket_error_retval && !ec)
+ ec = boost::asio::error::invalid_argument;
+
+ return result == socket_error_retval ? 0 : dest;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ const char* result = error_wrapper(::inet_ntop(af, src, dest, length), ec);
+ if (result == 0 && !ec)
+ ec = boost::asio::error::invalid_argument;
+ if (result != 0 && af == AF_INET6 && scope_id != 0)
+ {
+ using namespace std; // For strcat and sprintf.
+ char if_name[IF_NAMESIZE + 1] = "%";
+ const in6_addr_type* ipv6_address = static_cast<const in6_addr_type*>(src);
+ bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
+ && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
+ if (!is_link_local || if_indextoname(scope_id, if_name + 1) == 0)
+ sprintf(if_name + 1, "%lu", scope_id);
+ strcat(dest, if_name);
+ }
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+int inet_pton(int af, const char* src, void* dest,
+ unsigned long* scope_id, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ using namespace std; // For memcpy and strcmp.
+
+ if (af != AF_INET && af != AF_INET6)
+ {
+ ec = boost::asio::error::address_family_not_supported;
+ return -1;
+ }
+
+ union
+ {
+ socket_addr_type base;
+ sockaddr_storage_type storage;
+ sockaddr_in4_type v4;
+ sockaddr_in6_type v6;
+ } address;
+ int address_length = sizeof(sockaddr_storage_type);
+#if defined(BOOST_NO_ANSI_APIS)
+ int num_wide_chars = strlen(src) + 1;
+ LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR));
+ ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars);
+ int result = error_wrapper(::WSAStringToAddressW(
+ wide_buffer, af, 0, &address.base, &address_length), ec);
+#else
+ int result = error_wrapper(::WSAStringToAddressA(
+ const_cast<char*>(src), af, 0, &address.base, &address_length), ec);
+#endif
+
+ if (af == AF_INET)
+ {
+ if (result != socket_error_retval)
+ {
+ memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type));
+ ec = boost::system::error_code();
+ }
+ else if (strcmp(src, "255.255.255.255") == 0)
+ {
+ static_cast<in4_addr_type*>(dest)->s_addr = INADDR_NONE;
+ ec = boost::system::error_code();
+ }
+ }
+ else // AF_INET6
+ {
+ if (result != socket_error_retval)
+ {
+ memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type));
+ if (scope_id)
+ *scope_id = address.v6.sin6_scope_id;
+ ec = boost::system::error_code();
+ }
+ }
+
+ // Windows may not set an error code on failure.
+ if (result == socket_error_retval && !ec)
+ ec = boost::asio::error::invalid_argument;
+
+ if (result != socket_error_retval)
+ ec = boost::system::error_code();
+
+ return result == socket_error_retval ? -1 : 1;
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int result = error_wrapper(::inet_pton(af, src, dest), ec);
+ if (result <= 0 && !ec)
+ ec = boost::asio::error::invalid_argument;
+ if (result > 0 && af == AF_INET6 && scope_id)
+ {
+ using namespace std; // For strchr and atoi.
+ *scope_id = 0;
+ if (const char* if_name = strchr(src, '%'))
+ {
+ in6_addr_type* ipv6_address = static_cast<in6_addr_type*>(dest);
+ bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
+ && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
+ if (is_link_local)
+ *scope_id = if_nametoindex(if_name + 1);
+ if (*scope_id == 0)
+ *scope_id = atoi(if_name + 1);
+ }
+ }
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+int gethostname(char* name, int namelen, boost::system::error_code& ec)
+{
+ clear_last_error();
+ int result = error_wrapper(::gethostname(name, namelen), ec);
+#if defined(BOOST_WINDOWS)
+ if (result == 0)
+ ec = boost::system::error_code();
+#endif
+ return result;
+}
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__) \
+ || defined(__MACH__) && defined(__APPLE__)
+
+// The following functions are only needed for emulation of getaddrinfo and
+// getnameinfo.
+
+inline boost::system::error_code translate_netdb_error(int error)
+{
+ switch (error)
+ {
+ case 0:
+ return boost::system::error_code();
+ case HOST_NOT_FOUND:
+ return boost::asio::error::host_not_found;
+ case TRY_AGAIN:
+ return boost::asio::error::host_not_found_try_again;
+ case NO_RECOVERY:
+ return boost::asio::error::no_recovery;
+ case NO_DATA:
+ return boost::asio::error::no_data;
+ default:
+ BOOST_ASSERT(false);
+ return boost::asio::error::invalid_argument;
+ }
+}
+
+inline hostent* gethostbyaddr(const char* addr, int length, int af,
+ hostent* result, char* buffer, int buflength, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ (void)(buffer);
+ (void)(buflength);
+ hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec);
+ if (!retval)
+ return 0;
+ ec = boost::system::error_code();
+ *result = *retval;
+ return retval;
+#elif defined(__sun) || defined(__QNX__)
+ int error = 0;
+ hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result,
+ buffer, buflength, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ return retval;
+#elif defined(__MACH__) && defined(__APPLE__)
+ (void)(buffer);
+ (void)(buflength);
+ int error = 0;
+ hostent* retval = error_wrapper(::getipnodebyaddr(
+ addr, length, af, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ if (!retval)
+ return 0;
+ *result = *retval;
+ return retval;
+#else
+ hostent* retval = 0;
+ int error = 0;
+ error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer,
+ buflength, &retval, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ return retval;
+#endif
+}
+
+inline hostent* gethostbyname(const char* name, int af, struct hostent* result,
+ char* buffer, int buflength, int ai_flags, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ (void)(buffer);
+ (void)(buflength);
+ (void)(ai_flags);
+ if (af != AF_INET)
+ {
+ ec = boost::asio::error::address_family_not_supported;
+ return 0;
+ }
+ hostent* retval = error_wrapper(::gethostbyname(name), ec);
+ if (!retval)
+ return 0;
+ ec = boost::system::error_code();
+ *result = *retval;
+ return result;
+#elif defined(__sun) || defined(__QNX__)
+ (void)(ai_flags);
+ if (af != AF_INET)
+ {
+ ec = boost::asio::error::address_family_not_supported;
+ return 0;
+ }
+ int error = 0;
+ hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer,
+ buflength, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ return retval;
+#elif defined(__MACH__) && defined(__APPLE__)
+ (void)(buffer);
+ (void)(buflength);
+ int error = 0;
+ hostent* retval = error_wrapper(::getipnodebyname(
+ name, af, ai_flags, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ if (!retval)
+ return 0;
+ *result = *retval;
+ return retval;
+#else
+ (void)(ai_flags);
+ if (af != AF_INET)
+ {
+ ec = boost::asio::error::address_family_not_supported;
+ return 0;
+ }
+ hostent* retval = 0;
+ int error = 0;
+ error_wrapper(::gethostbyname_r(name, result,
+ buffer, buflength, &retval, &error), ec);
+ if (error)
+ ec = translate_netdb_error(error);
+ return retval;
+#endif
+}
+
+inline void freehostent(hostent* h)
+{
+#if defined(__MACH__) && defined(__APPLE__)
+ if (h)
+ ::freehostent(h);
+#else
+ (void)(h);
+#endif
+}
+
+// Emulation of getaddrinfo based on implementation in:
+// Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998.
+
+struct gai_search
+{
+ const char* host;
+ int family;
+};
+
+inline int gai_nsearch(const char* host,
+ const addrinfo_type* hints, gai_search (&search)[2])
+{
+ int search_count = 0;
+ if (host == 0 || host[0] == '\0')
+ {
+ if (hints->ai_flags & AI_PASSIVE)
+ {
+ // No host and AI_PASSIVE implies wildcard bind.
+ switch (hints->ai_family)
+ {
+ case AF_INET:
+ search[search_count].host = "0.0.0.0";
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ case AF_INET6:
+ search[search_count].host = "0::0";
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ break;
+ case AF_UNSPEC:
+ search[search_count].host = "0::0";
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ search[search_count].host = "0.0.0.0";
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ // No host and not AI_PASSIVE means connect to local host.
+ switch (hints->ai_family)
+ {
+ case AF_INET:
+ search[search_count].host = "localhost";
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ case AF_INET6:
+ search[search_count].host = "localhost";
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ break;
+ case AF_UNSPEC:
+ search[search_count].host = "localhost";
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ search[search_count].host = "localhost";
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ else
+ {
+ // Host is specified.
+ switch (hints->ai_family)
+ {
+ case AF_INET:
+ search[search_count].host = host;
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ case AF_INET6:
+ search[search_count].host = host;
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ break;
+ case AF_UNSPEC:
+ search[search_count].host = host;
+ search[search_count].family = AF_INET6;
+ ++search_count;
+ search[search_count].host = host;
+ search[search_count].family = AF_INET;
+ ++search_count;
+ break;
+ default:
+ break;
+ }
+ }
+ return search_count;
+}
+
+template <typename T>
+inline T* gai_alloc(std::size_t size = sizeof(T))
+{
+ using namespace std;
+ T* p = static_cast<T*>(::operator new(size, std::nothrow));
+ if (p)
+ memset(p, 0, size);
+ return p;
+}
+
+inline void gai_free(void* p)
+{
+ ::operator delete(p);
+}
+
+inline void gai_strcpy(char* target, const char* source, std::size_t max_size)
+{
+ using namespace std;
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ strcpy_s(target, max_size, source);
+#else
+ *target = 0;
+ strncat(target, source, max_size);
+#endif
+}
+
+enum { gai_clone_flag = 1 << 30 };
+
+inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints,
+ const void* addr, int family)
+{
+ using namespace std;
+
+ addrinfo_type* ai = gai_alloc<addrinfo_type>();
+ if (ai == 0)
+ return EAI_MEMORY;
+
+ ai->ai_next = 0;
+ **next = ai;
+ *next = &ai->ai_next;
+
+ ai->ai_canonname = 0;
+ ai->ai_socktype = hints->ai_socktype;
+ if (ai->ai_socktype == 0)
+ ai->ai_flags |= gai_clone_flag;
+ ai->ai_protocol = hints->ai_protocol;
+ ai->ai_family = family;
+
+ switch (ai->ai_family)
+ {
+ case AF_INET:
+ {
+ sockaddr_in4_type* sinptr = gai_alloc<sockaddr_in4_type>();
+ if (sinptr == 0)
+ return EAI_MEMORY;
+ sinptr->sin_family = AF_INET;
+ memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type));
+ ai->ai_addr = reinterpret_cast<sockaddr*>(sinptr);
+ ai->ai_addrlen = sizeof(sockaddr_in4_type);
+ break;
+ }
+ case AF_INET6:
+ {
+ sockaddr_in6_type* sin6ptr = gai_alloc<sockaddr_in6_type>();
+ if (sin6ptr == 0)
+ return EAI_MEMORY;
+ sin6ptr->sin6_family = AF_INET6;
+ memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type));
+ ai->ai_addr = reinterpret_cast<sockaddr*>(sin6ptr);
+ ai->ai_addrlen = sizeof(sockaddr_in6_type);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+inline addrinfo_type* gai_clone(addrinfo_type* ai)
+{
+ using namespace std;
+
+ addrinfo_type* new_ai = gai_alloc<addrinfo_type>();
+ if (new_ai == 0)
+ return new_ai;
+
+ new_ai->ai_next = ai->ai_next;
+ ai->ai_next = new_ai;
+
+ new_ai->ai_flags = 0;
+ new_ai->ai_family = ai->ai_family;
+ new_ai->ai_socktype = ai->ai_socktype;
+ new_ai->ai_protocol = ai->ai_protocol;
+ new_ai->ai_canonname = 0;
+ new_ai->ai_addrlen = ai->ai_addrlen;
+ new_ai->ai_addr = gai_alloc<sockaddr>(ai->ai_addrlen);
+ memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen);
+
+ return new_ai;
+}
+
+inline int gai_port(addrinfo_type* aihead, int port, int socktype)
+{
+ int num_found = 0;
+
+ for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next)
+ {
+ if (ai->ai_flags & gai_clone_flag)
+ {
+ if (ai->ai_socktype != 0)
+ {
+ ai = gai_clone(ai);
+ if (ai == 0)
+ return -1;
+ // ai now points to newly cloned entry.
+ }
+ }
+ else if (ai->ai_socktype != socktype)
+ {
+ // Ignore if mismatch on socket type.
+ continue;
+ }
+
+ ai->ai_socktype = socktype;
+
+ switch (ai->ai_family)
+ {
+ case AF_INET:
+ {
+ sockaddr_in4_type* sinptr =
+ reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr);
+ sinptr->sin_port = port;
+ ++num_found;
+ break;
+ }
+ case AF_INET6:
+ {
+ sockaddr_in6_type* sin6ptr =
+ reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr);
+ sin6ptr->sin6_port = port;
+ ++num_found;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return num_found;
+}
+
+inline int gai_serv(addrinfo_type* aihead,
+ const addrinfo_type* hints, const char* serv)
+{
+ using namespace std;
+
+ int num_found = 0;
+
+ if (
+#if defined(AI_NUMERICSERV)
+ (hints->ai_flags & AI_NUMERICSERV) ||
+#endif
+ isdigit(static_cast<unsigned char>(serv[0])))
+ {
+ int port = htons(atoi(serv));
+ if (hints->ai_socktype)
+ {
+ // Caller specifies socket type.
+ int rc = gai_port(aihead, port, hints->ai_socktype);
+ if (rc < 0)
+ return EAI_MEMORY;
+ num_found += rc;
+ }
+ else
+ {
+ // Caller does not specify socket type.
+ int rc = gai_port(aihead, port, SOCK_STREAM);
+ if (rc < 0)
+ return EAI_MEMORY;
+ num_found += rc;
+ rc = gai_port(aihead, port, SOCK_DGRAM);
+ if (rc < 0)
+ return EAI_MEMORY;
+ num_found += rc;
+ }
+ }
+ else
+ {
+ // Try service name with TCP first, then UDP.
+ if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM)
+ {
+ servent* sptr = getservbyname(serv, "tcp");
+ if (sptr != 0)
+ {
+ int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM);
+ if (rc < 0)
+ return EAI_MEMORY;
+ num_found += rc;
+ }
+ }
+ if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM)
+ {
+ servent* sptr = getservbyname(serv, "udp");
+ if (sptr != 0)
+ {
+ int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM);
+ if (rc < 0)
+ return EAI_MEMORY;
+ num_found += rc;
+ }
+ }
+ }
+
+ if (num_found == 0)
+ {
+ if (hints->ai_socktype == 0)
+ {
+ // All calls to getservbyname() failed.
+ return EAI_NONAME;
+ }
+ else
+ {
+ // Service not supported for socket type.
+ return EAI_SERVICE;
+ }
+ }
+
+ return 0;
+}
+
+inline int gai_echeck(const char* host, const char* service,
+ int flags, int family, int socktype, int protocol)
+{
+ (void)(flags);
+ (void)(protocol);
+
+ // Host or service must be specified.
+ if (host == 0 || host[0] == '\0')
+ if (service == 0 || service[0] == '\0')
+ return EAI_NONAME;
+
+ // Check combination of family and socket type.
+ switch (family)
+ {
+ case AF_UNSPEC:
+ break;
+ case AF_INET:
+ case AF_INET6:
+ if (service != 0 && service[0] != '\0')
+ if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM)
+ return EAI_SOCKTYPE;
+ break;
+ default:
+ return EAI_FAMILY;
+ }
+
+ return 0;
+}
+
+inline void freeaddrinfo_emulation(addrinfo_type* aihead)
+{
+ addrinfo_type* ai = aihead;
+ while (ai)
+ {
+ gai_free(ai->ai_addr);
+ gai_free(ai->ai_canonname);
+ addrinfo_type* ainext = ai->ai_next;
+ gai_free(ai);
+ ai = ainext;
+ }
+}
+
+inline int getaddrinfo_emulation(const char* host, const char* service,
+ const addrinfo_type* hintsp, addrinfo_type** result)
+{
+ // Set up linked list of addrinfo structures.
+ addrinfo_type* aihead = 0;
+ addrinfo_type** ainext = &aihead;
+ char* canon = 0;
+
+ // Supply default hints if not specified by caller.
+ addrinfo_type hints = addrinfo_type();
+ hints.ai_family = AF_UNSPEC;
+ if (hintsp)
+ hints = *hintsp;
+
+ // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED
+ // and AI_ALL flags.
+#if defined(AI_V4MAPPED)
+ if (hints.ai_family != AF_INET6)
+ hints.ai_flags &= ~AI_V4MAPPED;
+#endif
+#if defined(AI_ALL)
+ if (hints.ai_family != AF_INET6)
+ hints.ai_flags &= ~AI_ALL;
+#endif
+
+ // Basic error checking.
+ int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family,
+ hints.ai_socktype, hints.ai_protocol);
+ if (rc != 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ return rc;
+ }
+
+ gai_search search[2];
+ int search_count = gai_nsearch(host, &hints, search);
+ for (gai_search* sptr = search; sptr < search + search_count; ++sptr)
+ {
+ // Check for IPv4 dotted decimal string.
+ in4_addr_type inaddr;
+ boost::system::error_code ec;
+ if (socket_ops::inet_pton(AF_INET, sptr->host, &inaddr, 0, ec) == 1)
+ {
+ if (hints.ai_family != AF_UNSPEC && hints.ai_family != AF_INET)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ return EAI_FAMILY;
+ }
+ if (sptr->family == AF_INET)
+ {
+ rc = gai_aistruct(&ainext, &hints, &inaddr, AF_INET);
+ if (rc != 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ return rc;
+ }
+ }
+ continue;
+ }
+
+ // Check for IPv6 hex string.
+ in6_addr_type in6addr;
+ if (socket_ops::inet_pton(AF_INET6, sptr->host, &in6addr, 0, ec) == 1)
+ {
+ if (hints.ai_family != AF_UNSPEC && hints.ai_family != AF_INET6)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ return EAI_FAMILY;
+ }
+ if (sptr->family == AF_INET6)
+ {
+ rc = gai_aistruct(&ainext, &hints, &in6addr, AF_INET6);
+ if (rc != 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ return rc;
+ }
+ }
+ continue;
+ }
+
+ // Look up hostname.
+ hostent hent;
+ char hbuf[8192] = "";
+ hostent* hptr = socket_ops::gethostbyname(sptr->host,
+ sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec);
+ if (hptr == 0)
+ {
+ if (search_count == 2)
+ {
+ // Failure is OK if there are multiple searches.
+ continue;
+ }
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ if (ec == boost::asio::error::host_not_found)
+ return EAI_NONAME;
+ if (ec == boost::asio::error::host_not_found_try_again)
+ return EAI_AGAIN;
+ if (ec == boost::asio::error::no_recovery)
+ return EAI_FAIL;
+ if (ec == boost::asio::error::no_data)
+ return EAI_NONAME;
+ return EAI_NONAME;
+ }
+
+ // Check for address family mismatch if one was specified.
+ if (hints.ai_family != AF_UNSPEC && hints.ai_family != hptr->h_addrtype)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ socket_ops::freehostent(hptr);
+ return EAI_FAMILY;
+ }
+
+ // Save canonical name first time.
+ if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0]
+ && (hints.ai_flags & AI_CANONNAME) && canon == 0)
+ {
+ std::size_t canon_len = strlen(hptr->h_name) + 1;
+ canon = gai_alloc<char>(canon_len);
+ if (canon == 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ socket_ops::freehostent(hptr);
+ return EAI_MEMORY;
+ }
+ gai_strcpy(canon, hptr->h_name, canon_len);
+ }
+
+ // Create an addrinfo structure for each returned address.
+ for (char** ap = hptr->h_addr_list; *ap; ++ap)
+ {
+ rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype);
+ if (rc != 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ gai_free(canon);
+ socket_ops::freehostent(hptr);
+ return EAI_FAMILY;
+ }
+ }
+
+ socket_ops::freehostent(hptr);
+ }
+
+ // Check if we found anything.
+ if (aihead == 0)
+ {
+ gai_free(canon);
+ return EAI_NONAME;
+ }
+
+ // Return canonical name in first entry.
+ if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME))
+ {
+ if (canon)
+ {
+ aihead->ai_canonname = canon;
+ canon = 0;
+ }
+ else
+ {
+ std::size_t canonname_len = strlen(search[0].host) + 1;
+ aihead->ai_canonname = gai_alloc<char>(canonname_len);
+ if (aihead->ai_canonname == 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ return EAI_MEMORY;
+ }
+ gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len);
+ }
+ }
+ gai_free(canon);
+
+ // Process the service name.
+ if (service != 0 && service[0] != '\0')
+ {
+ rc = gai_serv(aihead, &hints, service);
+ if (rc != 0)
+ {
+ freeaddrinfo_emulation(aihead);
+ return rc;
+ }
+ }
+
+ // Return result to caller.
+ *result = aihead;
+ return 0;
+}
+
+inline boost::system::error_code getnameinfo_emulation(
+ const socket_addr_type* sa, std::size_t salen, char* host,
+ std::size_t hostlen, char* serv, std::size_t servlen, int flags,
+ boost::system::error_code& ec)
+{
+ using namespace std;
+
+ const char* addr;
+ size_t addr_len;
+ unsigned short port;
+ switch (sa->sa_family)
+ {
+ case AF_INET:
+ if (salen != sizeof(sockaddr_in4_type))
+ {
+ return ec = boost::asio::error::invalid_argument;
+ }
+ addr = reinterpret_cast<const char*>(
+ &reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_addr);
+ addr_len = sizeof(in4_addr_type);
+ port = reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_port;
+ break;
+ case AF_INET6:
+ if (salen != sizeof(sockaddr_in6_type))
+ {
+ return ec = boost::asio::error::invalid_argument;
+ }
+ addr = reinterpret_cast<const char*>(
+ &reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_addr);
+ addr_len = sizeof(in6_addr_type);
+ port = reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_port;
+ break;
+ default:
+ return ec = boost::asio::error::address_family_not_supported;
+ }
+
+ if (host && hostlen > 0)
+ {
+ if (flags & NI_NUMERICHOST)
+ {
+ if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0)
+ {
+ return ec;
+ }
+ }
+ else
+ {
+ hostent hent;
+ char hbuf[8192] = "";
+ hostent* hptr = socket_ops::gethostbyaddr(addr,
+ static_cast<int>(addr_len), sa->sa_family,
+ &hent, hbuf, sizeof(hbuf), ec);
+ if (hptr && hptr->h_name && hptr->h_name[0] != '\0')
+ {
+ if (flags & NI_NOFQDN)
+ {
+ char* dot = strchr(hptr->h_name, '.');
+ if (dot)
+ {
+ *dot = 0;
+ }
+ }
+ gai_strcpy(host, hptr->h_name, hostlen);
+ socket_ops::freehostent(hptr);
+ }
+ else
+ {
+ socket_ops::freehostent(hptr);
+ if (flags & NI_NAMEREQD)
+ {
+ return ec = boost::asio::error::host_not_found;
+ }
+ if (socket_ops::inet_ntop(sa->sa_family,
+ addr, host, hostlen, 0, ec) == 0)
+ {
+ return ec;
+ }
+ }
+ }
+ }
+
+ if (serv && servlen > 0)
+ {
+ if (flags & NI_NUMERICSERV)
+ {
+ if (servlen < 6)
+ {
+ return ec = boost::asio::error::no_buffer_space;
+ }
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ sprintf_s(serv, servlen, "%u", ntohs(port));
+#else
+ sprintf(serv, "%u", ntohs(port));
+#endif
+ }
+ else
+ {
+#if defined(BOOST_HAS_THREADS) && defined(BOOST_HAS_PTHREADS) \
+ && !defined(BOOST_ASIO_DISABLE_THREADS)
+ static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+ ::pthread_mutex_lock(&mutex);
+#endif // defined(BOOST_HAS_THREADS) && defined(BOOST_HAS_PTHREADS)
+ // && !defined(BOOST_ASIO_DISABLE_THREADS)
+ servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0);
+ if (sptr && sptr->s_name && sptr->s_name[0] != '\0')
+ {
+ gai_strcpy(serv, sptr->s_name, servlen);
+ }
+ else
+ {
+ if (servlen < 6)
+ {
+ return ec = boost::asio::error::no_buffer_space;
+ }
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ sprintf_s(serv, servlen, "%u", ntohs(port));
+#else
+ sprintf(serv, "%u", ntohs(port));
+#endif
+ }
+#if defined(BOOST_HAS_THREADS) && defined(BOOST_HAS_PTHREADS) \
+ && !defined(BOOST_ASIO_DISABLE_THREADS)
+ ::pthread_mutex_unlock(&mutex);
+#endif // defined(BOOST_HAS_THREADS) && defined(BOOST_HAS_PTHREADS)
+ // && !defined(BOOST_ASIO_DISABLE_THREADS)
+ }
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ // || defined(__MACH__) && defined(__APPLE__)
+
+inline boost::system::error_code translate_addrinfo_error(int error)
+{
+ switch (error)
+ {
+ case 0:
+ return boost::system::error_code();
+ case EAI_AGAIN:
+ return boost::asio::error::host_not_found_try_again;
+ case EAI_BADFLAGS:
+ return boost::asio::error::invalid_argument;
+ case EAI_FAIL:
+ return boost::asio::error::no_recovery;
+ case EAI_FAMILY:
+ return boost::asio::error::address_family_not_supported;
+ case EAI_MEMORY:
+ return boost::asio::error::no_memory;
+ case EAI_NONAME:
+#if defined(EAI_ADDRFAMILY)
+ case EAI_ADDRFAMILY:
+#endif
+#if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME)
+ case EAI_NODATA:
+#endif
+ return boost::asio::error::host_not_found;
+ case EAI_SERVICE:
+ return boost::asio::error::service_not_found;
+ case EAI_SOCKTYPE:
+ return boost::asio::error::socket_type_not_supported;
+ default: // Possibly the non-portable EAI_SYSTEM.
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ return boost::system::error_code(
+ WSAGetLastError(), boost::asio::error::get_system_category());
+#else
+ return boost::system::error_code(
+ errno, boost::asio::error::get_system_category());
+#endif
+ }
+}
+
+boost::system::error_code getaddrinfo(const char* host,
+ const char* service, const addrinfo_type& hints,
+ addrinfo_type** result, boost::system::error_code& ec)
+{
+ host = (host && *host) ? host : 0;
+ service = (service && *service) ? service : 0;
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+# if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) || defined(UNDER_CE)
+ // Building for Windows XP, Windows Server 2003, or later.
+ int error = ::getaddrinfo(host, service, &hints, result);
+ return ec = translate_addrinfo_error(error);
+# else
+ // Building for Windows 2000 or earlier.
+ typedef int (WSAAPI *gai_t)(const char*,
+ const char*, const addrinfo_type*, addrinfo_type**);
+ if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
+ {
+ if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo"))
+ {
+ int error = gai(host, service, &hints, result);
+ return ec = translate_addrinfo_error(error);
+ }
+ }
+ int error = getaddrinfo_emulation(host, service, &hints, result);
+ return ec = translate_addrinfo_error(error);
+# endif
+#elif defined(__MACH__) && defined(__APPLE__)
+ int error = getaddrinfo_emulation(host, service, &hints, result);
+ return ec = translate_addrinfo_error(error);
+#else
+ int error = ::getaddrinfo(host, service, &hints, result);
+ return ec = translate_addrinfo_error(error);
+#endif
+}
+
+boost::system::error_code background_getaddrinfo(
+ const weak_cancel_token_type& cancel_token, const char* host,
+ const char* service, const addrinfo_type& hints,
+ addrinfo_type** result, boost::system::error_code& ec)
+{
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ socket_ops::getaddrinfo(host, service, hints, result, ec);
+ return ec;
+}
+
+void freeaddrinfo(addrinfo_type* ai)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+# if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) || defined(UNDER_CE)
+ // Building for Windows XP, Windows Server 2003, or later.
+ ::freeaddrinfo(ai);
+# else
+ // Building for Windows 2000 or earlier.
+ typedef int (WSAAPI *fai_t)(addrinfo_type*);
+ if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
+ {
+ if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo"))
+ {
+ fai(ai);
+ return;
+ }
+ }
+ freeaddrinfo_emulation(ai);
+# endif
+#elif defined(__MACH__) && defined(__APPLE__)
+ freeaddrinfo_emulation(ai);
+#else
+ ::freeaddrinfo(ai);
+#endif
+}
+
+boost::system::error_code getnameinfo(const socket_addr_type* addr,
+ std::size_t addrlen, char* host, std::size_t hostlen,
+ char* serv, std::size_t servlen, int flags, boost::system::error_code& ec)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+# if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) || defined(UNDER_CE)
+ // Building for Windows XP, Windows Server 2003, or later.
+ clear_last_error();
+ int error = ::getnameinfo(addr, static_cast<socklen_t>(addrlen),
+ host, static_cast<DWORD>(hostlen),
+ serv, static_cast<DWORD>(servlen), flags);
+ return ec = translate_addrinfo_error(error);
+# else
+ // Building for Windows 2000 or earlier.
+ typedef int (WSAAPI *gni_t)(const socket_addr_type*,
+ int, char*, DWORD, char*, DWORD, int);
+ if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
+ {
+ if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo"))
+ {
+ clear_last_error();
+ int error = gni(addr, static_cast<int>(addrlen),
+ host, static_cast<DWORD>(hostlen),
+ serv, static_cast<DWORD>(servlen), flags);
+ return ec = translate_addrinfo_error(error);
+ }
+ }
+ clear_last_error();
+ return getnameinfo_emulation(addr, addrlen,
+ host, hostlen, serv, servlen, flags, ec);
+# endif
+#elif defined(__MACH__) && defined(__APPLE__)
+ using namespace std; // For memcpy.
+ sockaddr_storage_type tmp_addr;
+ memcpy(&tmp_addr, addr, addrlen);
+ tmp_addr.ss_len = addrlen;
+ addr = reinterpret_cast<socket_addr_type*>(&tmp_addr);
+ clear_last_error();
+ return getnameinfo_emulation(addr, addrlen,
+ host, hostlen, serv, servlen, flags, ec);
+#else
+ clear_last_error();
+ int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags);
+ return ec = translate_addrinfo_error(error);
+#endif
+}
+
+boost::system::error_code sync_getnameinfo(
+ const socket_addr_type* addr, std::size_t addrlen,
+ char* host, std::size_t hostlen, char* serv,
+ std::size_t servlen, int sock_type, boost::system::error_code& ec)
+{
+ // First try resolving with the service name. If that fails try resolving
+ // but allow the service to be returned as a number.
+ int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;
+ socket_ops::getnameinfo(addr, addrlen, host,
+ hostlen, serv, servlen, flags, ec);
+ if (ec)
+ {
+ socket_ops::getnameinfo(addr, addrlen, host, hostlen,
+ serv, servlen, flags | NI_NUMERICSERV, ec);
+ }
+
+ return ec;
+}
+
+boost::system::error_code background_getnameinfo(
+ const weak_cancel_token_type& cancel_token,
+ const socket_addr_type* addr, std::size_t addrlen,
+ char* host, std::size_t hostlen, char* serv,
+ std::size_t servlen, int sock_type, boost::system::error_code& ec)
+{
+ if (cancel_token.expired())
+ {
+ ec = boost::asio::error::operation_aborted;
+ }
+ else
+ {
+ // First try resolving with the service name. If that fails try resolving
+ // but allow the service to be returned as a number.
+ int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;
+ socket_ops::getnameinfo(addr, addrlen, host,
+ hostlen, serv, servlen, flags, ec);
+ if (ec)
+ {
+ socket_ops::getnameinfo(addr, addrlen, host, hostlen,
+ serv, servlen, flags | NI_NUMERICSERV, ec);
+ }
+ }
+
+ return ec;
+}
+
+u_long_type network_to_host_long(u_long_type value)
+{
+ return ntohl(value);
+}
+
+u_long_type host_to_network_long(u_long_type value)
+{
+ return htonl(value);
+}
+
+u_short_type network_to_host_short(u_short_type value)
+{
+ return ntohs(value);
+}
+
+u_short_type host_to_network_short(u_short_type value)
+{
+ return htons(value);
+}
+
+} // namespace socket_ops
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_SOCKET_OPS_IPP
diff --git a/boost/asio/detail/impl/socket_select_interrupter.ipp b/boost/asio/detail/impl/socket_select_interrupter.ipp
new file mode 100644
index 0000000000..6005f12eae
--- /dev/null
+++ b/boost/asio/detail/impl/socket_select_interrupter.ipp
@@ -0,0 +1,173 @@
+//
+// detail/impl/socket_select_interrupter.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS) \
+ || defined(__CYGWIN__) \
+ || defined(__SYMBIAN32__)
+
+#include <cstdlib>
+#include <boost/asio/detail/socket_holder.hpp>
+#include <boost/asio/detail/socket_ops.hpp>
+#include <boost/asio/detail/socket_select_interrupter.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+socket_select_interrupter::socket_select_interrupter()
+{
+ open_descriptors();
+}
+
+void socket_select_interrupter::open_descriptors()
+{
+ boost::system::error_code ec;
+ socket_holder acceptor(socket_ops::socket(
+ AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+ if (acceptor.get() == invalid_socket)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ int opt = 1;
+ socket_ops::state_type acceptor_state = 0;
+ socket_ops::setsockopt(acceptor.get(), acceptor_state,
+ SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
+
+ using namespace std; // For memset.
+ sockaddr_in4_type addr;
+ std::size_t addr_len = sizeof(addr);
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ addr.sin_port = 0;
+ if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr,
+ addr_len, ec) == socket_error_retval)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr,
+ &addr_len, ec) == socket_error_retval)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ // Some broken firewalls on Windows will intermittently cause getsockname to
+ // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
+ // explicitly specify the target address here to work around this problem.
+ addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+ if (socket_ops::listen(acceptor.get(),
+ SOMAXCONN, ec) == socket_error_retval)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ socket_holder client(socket_ops::socket(
+ AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+ if (client.get() == invalid_socket)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr,
+ addr_len, ec) == socket_error_retval)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
+ if (server.get() == invalid_socket)
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ ioctl_arg_type non_blocking = 1;
+ socket_ops::state_type client_state = 0;
+ if (socket_ops::ioctl(client.get(), client_state,
+ FIONBIO, &non_blocking, ec))
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ opt = 1;
+ socket_ops::setsockopt(client.get(), client_state,
+ IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+ non_blocking = 1;
+ socket_ops::state_type server_state = 0;
+ if (socket_ops::ioctl(server.get(), server_state,
+ FIONBIO, &non_blocking, ec))
+ boost::asio::detail::throw_error(ec, "socket_select_interrupter");
+
+ opt = 1;
+ socket_ops::setsockopt(server.get(), server_state,
+ IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+ read_descriptor_ = server.release();
+ write_descriptor_ = client.release();
+}
+
+socket_select_interrupter::~socket_select_interrupter()
+{
+ close_descriptors();
+}
+
+void socket_select_interrupter::close_descriptors()
+{
+ boost::system::error_code ec;
+ socket_ops::state_type state = socket_ops::internal_non_blocking;
+ if (read_descriptor_ != invalid_socket)
+ socket_ops::close(read_descriptor_, state, true, ec);
+ if (write_descriptor_ != invalid_socket)
+ socket_ops::close(write_descriptor_, state, true, ec);
+}
+
+void socket_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = invalid_socket;
+ read_descriptor_ = invalid_socket;
+
+ open_descriptors();
+}
+
+void socket_select_interrupter::interrupt()
+{
+ char byte = 0;
+ socket_ops::buf b;
+ socket_ops::init_buf(b, &byte, 1);
+ boost::system::error_code ec;
+ socket_ops::send(write_descriptor_, &b, 1, 0, ec);
+}
+
+bool socket_select_interrupter::reset()
+{
+ char data[1024];
+ socket_ops::buf b;
+ socket_ops::init_buf(b, data, sizeof(data));
+ boost::system::error_code ec;
+ int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
+ bool was_interrupted = (bytes_read > 0);
+ while (bytes_read == sizeof(data))
+ bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
+ return was_interrupted;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
diff --git a/boost/asio/detail/impl/strand_service.hpp b/boost/asio/detail/impl/strand_service.hpp
new file mode 100644
index 0000000000..1d98d99055
--- /dev/null
+++ b/boost/asio/detail/impl/strand_service.hpp
@@ -0,0 +1,121 @@
+//
+// detail/impl/strand_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
+#define BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/call_stack.hpp>
+#include <boost/asio/detail/completion_handler.hpp>
+#include <boost/asio/detail/fenced_block.hpp>
+#include <boost/asio/detail/handler_alloc_helpers.hpp>
+#include <boost/asio/detail/handler_invoke_helpers.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+inline strand_service::strand_impl::strand_impl()
+ : operation(&strand_service::do_complete),
+ locked_(false)
+{
+}
+
+struct strand_service::on_dispatch_exit
+{
+ io_service_impl* io_service_;
+ strand_impl* impl_;
+
+ ~on_dispatch_exit()
+ {
+ impl_->mutex_.lock();
+ impl_->ready_queue_.push(impl_->waiting_queue_);
+ bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
+ impl_->mutex_.unlock();
+
+ if (more_handlers)
+ io_service_->post_immediate_completion(impl_);
+ }
+};
+
+inline void strand_service::destroy(strand_service::implementation_type& impl)
+{
+ impl = 0;
+}
+
+template <typename Handler>
+void strand_service::dispatch(strand_service::implementation_type& impl,
+ Handler handler)
+{
+ // If we are already in the strand then the handler can run immediately.
+ if (call_stack<strand_impl>::contains(impl))
+ {
+ fenced_block b(fenced_block::full);
+ boost_asio_handler_invoke_helpers::invoke(handler, handler);
+ return;
+ }
+
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch"));
+
+ bool dispatch_immediately = do_dispatch(impl, p.p);
+ operation* o = p.p;
+ p.v = p.p = 0;
+
+ if (dispatch_immediately)
+ {
+ // Indicate that this strand is executing on the current thread.
+ call_stack<strand_impl>::context ctx(impl);
+
+ // Ensure the next handler, if any, is scheduled on block exit.
+ on_dispatch_exit on_exit = { &io_service_, impl };
+ (void)on_exit;
+
+ completion_handler<Handler>::do_complete(
+ &io_service_, o, boost::system::error_code(), 0);
+ }
+}
+
+// Request the io_service to invoke the given handler and return immediately.
+template <typename Handler>
+void strand_service::post(strand_service::implementation_type& impl,
+ Handler handler)
+{
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "post"));
+
+ do_post(impl, p.p);
+ p.v = p.p = 0;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
diff --git a/boost/asio/detail/impl/strand_service.ipp b/boost/asio/detail/impl/strand_service.ipp
new file mode 100644
index 0000000000..64e4cc00ce
--- /dev/null
+++ b/boost/asio/detail/impl/strand_service.ipp
@@ -0,0 +1,171 @@
+//
+// detail/impl/strand_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/asio/detail/call_stack.hpp>
+#include <boost/asio/detail/strand_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct strand_service::on_do_complete_exit
+{
+ io_service_impl* owner_;
+ strand_impl* impl_;
+
+ ~on_do_complete_exit()
+ {
+ impl_->mutex_.lock();
+ impl_->ready_queue_.push(impl_->waiting_queue_);
+ bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
+ impl_->mutex_.unlock();
+
+ if (more_handlers)
+ owner_->post_immediate_completion(impl_);
+ }
+};
+
+strand_service::strand_service(boost::asio::io_service& io_service)
+ : boost::asio::detail::service_base<strand_service>(io_service),
+ io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+ mutex_(),
+ salt_(0)
+{
+}
+
+void strand_service::shutdown_service()
+{
+ op_queue<operation> ops;
+
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ for (std::size_t i = 0; i < num_implementations; ++i)
+ {
+ if (strand_impl* impl = implementations_[i].get())
+ {
+ ops.push(impl->waiting_queue_);
+ ops.push(impl->ready_queue_);
+ }
+ }
+}
+
+void strand_service::construct(strand_service::implementation_type& impl)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ std::size_t salt = salt_++;
+#if defined(BOOST_ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+ std::size_t index = salt;
+#else // defined(BOOST_ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+ std::size_t index = reinterpret_cast<std::size_t>(&impl);
+ index += (reinterpret_cast<std::size_t>(&impl) >> 3);
+ index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);
+#endif // defined(BOOST_ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+ index = index % num_implementations;
+
+ if (!implementations_[index].get())
+ implementations_[index].reset(new strand_impl);
+ impl = implementations_[index].get();
+}
+
+bool strand_service::do_dispatch(implementation_type& impl, operation* op)
+{
+ // If we are running inside the io_service, and no other handler already
+ // holds the strand lock, then the handler can run immediately.
+ bool can_dispatch = io_service_.can_dispatch();
+ impl->mutex_.lock();
+ if (can_dispatch && !impl->locked_)
+ {
+ // Immediate invocation is allowed.
+ impl->locked_ = true;
+ impl->mutex_.unlock();
+ return true;
+ }
+
+ if (impl->locked_)
+ {
+ // Some other handler already holds the strand lock. Enqueue for later.
+ impl->waiting_queue_.push(op);
+ impl->mutex_.unlock();
+ }
+ else
+ {
+ // The handler is acquiring the strand lock and so is responsible for
+ // scheduling the strand.
+ impl->locked_ = true;
+ impl->mutex_.unlock();
+ impl->ready_queue_.push(op);
+ io_service_.post_immediate_completion(impl);
+ }
+
+ return false;
+}
+
+void strand_service::do_post(implementation_type& impl, operation* op)
+{
+ impl->mutex_.lock();
+ if (impl->locked_)
+ {
+ // Some other handler already holds the strand lock. Enqueue for later.
+ impl->waiting_queue_.push(op);
+ impl->mutex_.unlock();
+ }
+ else
+ {
+ // The handler is acquiring the strand lock and so is responsible for
+ // scheduling the strand.
+ impl->locked_ = true;
+ impl->mutex_.unlock();
+ impl->ready_queue_.push(op);
+ io_service_.post_immediate_completion(impl);
+ }
+}
+
+void strand_service::do_complete(io_service_impl* owner, operation* base,
+ const boost::system::error_code& ec, std::size_t /*bytes_transferred*/)
+{
+ if (owner)
+ {
+ strand_impl* impl = static_cast<strand_impl*>(base);
+
+ // Indicate that this strand is executing on the current thread.
+ call_stack<strand_impl>::context ctx(impl);
+
+ // Ensure the next handler, if any, is scheduled on block exit.
+ on_do_complete_exit on_exit = { owner, impl };
+ (void)on_exit;
+
+ // Run all ready handlers. No lock is required since the ready queue is
+ // accessed only within the strand.
+ while (operation* o = impl->ready_queue_.front())
+ {
+ impl->ready_queue_.pop();
+ o->complete(*owner, ec, 0);
+ }
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
diff --git a/boost/asio/detail/impl/task_io_service.hpp b/boost/asio/detail/impl/task_io_service.hpp
new file mode 100644
index 0000000000..7cd7449e60
--- /dev/null
+++ b/boost/asio/detail/impl/task_io_service.hpp
@@ -0,0 +1,75 @@
+//
+// detail/impl/task_io_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
+#define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/completion_handler.hpp>
+#include <boost/asio/detail/fenced_block.hpp>
+#include <boost/asio/detail/handler_alloc_helpers.hpp>
+#include <boost/asio/detail/handler_invoke_helpers.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Handler>
+void task_io_service::dispatch(Handler handler)
+{
+ if (thread_call_stack::contains(this))
+ {
+ fenced_block b(fenced_block::full);
+ boost_asio_handler_invoke_helpers::invoke(handler, handler);
+ }
+ else
+ {
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+ }
+}
+
+template <typename Handler>
+void task_io_service::post(Handler handler)
+{
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
diff --git a/boost/asio/detail/impl/task_io_service.ipp b/boost/asio/detail/impl/task_io_service.ipp
new file mode 100644
index 0000000000..3d679c2dca
--- /dev/null
+++ b/boost/asio/detail/impl/task_io_service.ipp
@@ -0,0 +1,494 @@
+//
+// detail/impl/task_io_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if !defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/limits.hpp>
+#include <boost/asio/detail/event.hpp>
+#include <boost/asio/detail/reactor.hpp>
+#include <boost/asio/detail/task_io_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct task_io_service::task_cleanup
+{
+ ~task_cleanup()
+ {
+ // Enqueue the completed operations and reinsert the task at the end of
+ // the operation queue.
+ lock_->lock();
+ task_io_service_->task_interrupted_ = true;
+ task_io_service_->op_queue_.push(*ops_);
+ task_io_service_->op_queue_.push(&task_io_service_->task_operation_);
+ }
+
+ task_io_service* task_io_service_;
+ mutex::scoped_lock* lock_;
+ op_queue<operation>* ops_;
+};
+
+struct task_io_service::work_cleanup
+{
+ ~work_cleanup()
+ {
+ task_io_service_->work_finished();
+
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (!ops_->empty())
+ {
+ lock_->lock();
+ task_io_service_->op_queue_.push(*ops_);
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ }
+
+ task_io_service* task_io_service_;
+ mutex::scoped_lock* lock_;
+ op_queue<operation>* ops_;
+};
+
+struct task_io_service::thread_info
+{
+ event* wakeup_event;
+ op_queue<operation>* private_op_queue;
+ thread_info* next;
+};
+
+task_io_service::task_io_service(
+ boost::asio::io_service& io_service, std::size_t concurrency_hint)
+ : boost::asio::detail::service_base<task_io_service>(io_service),
+ one_thread_(concurrency_hint == 1),
+ mutex_(),
+ task_(0),
+ task_interrupted_(true),
+ outstanding_work_(0),
+ stopped_(false),
+ shutdown_(false),
+ first_idle_thread_(0)
+{
+ BOOST_ASIO_HANDLER_TRACKING_INIT;
+}
+
+void task_io_service::shutdown_service()
+{
+ mutex::scoped_lock lock(mutex_);
+ shutdown_ = true;
+ lock.unlock();
+
+ // Destroy handler objects.
+ while (!op_queue_.empty())
+ {
+ operation* o = op_queue_.front();
+ op_queue_.pop();
+ if (o != &task_operation_)
+ o->destroy();
+ }
+
+ // Reset to initial state.
+ task_ = 0;
+}
+
+void task_io_service::init_task()
+{
+ mutex::scoped_lock lock(mutex_);
+ if (!shutdown_ && !task_)
+ {
+ task_ = &use_service<reactor>(this->get_io_service());
+ op_queue_.push(&task_operation_);
+ wake_one_thread_and_unlock(lock);
+ }
+}
+
+std::size_t task_io_service::run(boost::system::error_code& ec)
+{
+ ec = boost::system::error_code();
+ if (outstanding_work_ == 0)
+ {
+ stop();
+ return 0;
+ }
+
+ thread_info this_thread;
+ event wakeup_event;
+ this_thread.wakeup_event = &wakeup_event;
+ op_queue<operation> private_op_queue;
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
+#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = 0;
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
+
+ mutex::scoped_lock lock(mutex_);
+
+ std::size_t n = 0;
+ for (; do_run_one(lock, this_thread, private_op_queue, ec); lock.lock())
+ if (n != (std::numeric_limits<std::size_t>::max)())
+ ++n;
+ return n;
+}
+
+std::size_t task_io_service::run_one(boost::system::error_code& ec)
+{
+ ec = boost::system::error_code();
+ if (outstanding_work_ == 0)
+ {
+ stop();
+ return 0;
+ }
+
+ thread_info this_thread;
+ event wakeup_event;
+ this_thread.wakeup_event = &wakeup_event;
+ op_queue<operation> private_op_queue;
+ this_thread.private_op_queue = 0;
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
+
+ mutex::scoped_lock lock(mutex_);
+
+ return do_run_one(lock, this_thread, private_op_queue, ec);
+}
+
+std::size_t task_io_service::poll(boost::system::error_code& ec)
+{
+ ec = boost::system::error_code();
+ if (outstanding_work_ == 0)
+ {
+ stop();
+ return 0;
+ }
+
+ thread_info this_thread;
+ this_thread.wakeup_event = 0;
+ op_queue<operation> private_op_queue;
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = one_thread_ == 1 ? &private_op_queue : 0;
+#else // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.private_op_queue = 0;
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
+
+ mutex::scoped_lock lock(mutex_);
+
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ // We want to support nested calls to poll() and poll_one(), so any handlers
+ // that are already on a thread-private queue need to be put on to the main
+ // queue now.
+ if (one_thread_)
+ if (thread_info* outer_thread_info = ctx.next_by_key())
+ if (outer_thread_info->private_op_queue)
+ op_queue_.push(*outer_thread_info->private_op_queue);
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+ std::size_t n = 0;
+ for (; do_poll_one(lock, private_op_queue, ec); lock.lock())
+ if (n != (std::numeric_limits<std::size_t>::max)())
+ ++n;
+ return n;
+}
+
+std::size_t task_io_service::poll_one(boost::system::error_code& ec)
+{
+ ec = boost::system::error_code();
+ if (outstanding_work_ == 0)
+ {
+ stop();
+ return 0;
+ }
+
+ thread_info this_thread;
+ this_thread.wakeup_event = 0;
+ op_queue<operation> private_op_queue;
+ this_thread.private_op_queue = 0;
+ this_thread.next = 0;
+ thread_call_stack::context ctx(this, this_thread);
+
+ mutex::scoped_lock lock(mutex_);
+
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ // We want to support nested calls to poll() and poll_one(), so any handlers
+ // that are already on a thread-private queue need to be put on to the main
+ // queue now.
+ if (one_thread_)
+ if (thread_info* outer_thread_info = ctx.next_by_key())
+ if (outer_thread_info->private_op_queue)
+ op_queue_.push(*outer_thread_info->private_op_queue);
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+ return do_poll_one(lock, private_op_queue, ec);
+}
+
+void task_io_service::stop()
+{
+ mutex::scoped_lock lock(mutex_);
+ stop_all_threads(lock);
+}
+
+bool task_io_service::stopped() const
+{
+ mutex::scoped_lock lock(mutex_);
+ return stopped_;
+}
+
+void task_io_service::reset()
+{
+ mutex::scoped_lock lock(mutex_);
+ stopped_ = false;
+}
+
+void task_io_service::post_immediate_completion(task_io_service::operation* op)
+{
+ work_started();
+ post_deferred_completion(op);
+}
+
+void task_io_service::post_deferred_completion(task_io_service::operation* op)
+{
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (one_thread_)
+ {
+ if (thread_info* this_thread = thread_call_stack::contains(this))
+ {
+ if (this_thread->private_op_queue)
+ {
+ this_thread->private_op_queue->push(op);
+ return;
+ }
+ }
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+ mutex::scoped_lock lock(mutex_);
+ op_queue_.push(op);
+ wake_one_thread_and_unlock(lock);
+}
+
+void task_io_service::post_deferred_completions(
+ op_queue<task_io_service::operation>& ops)
+{
+ if (!ops.empty())
+ {
+#if defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+ if (one_thread_)
+ {
+ if (thread_info* this_thread = thread_call_stack::contains(this))
+ {
+ if (this_thread->private_op_queue)
+ {
+ this_thread->private_op_queue->push(ops);
+ return;
+ }
+ }
+ }
+#endif // defined(BOOST_HAS_THREADS) && !defined(BOOST_ASIO_DISABLE_THREADS)
+
+ mutex::scoped_lock lock(mutex_);
+ op_queue_.push(ops);
+ wake_one_thread_and_unlock(lock);
+ }
+}
+
+void task_io_service::abandon_operations(
+ op_queue<task_io_service::operation>& ops)
+{
+ op_queue<task_io_service::operation> ops2;
+ ops2.push(ops);
+}
+
+std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
+ task_io_service::thread_info& this_thread,
+ op_queue<operation>& private_op_queue, const boost::system::error_code& ec)
+{
+ while (!stopped_)
+ {
+ if (!op_queue_.empty())
+ {
+ // Prepare to execute first handler from queue.
+ operation* o = op_queue_.front();
+ op_queue_.pop();
+ bool more_handlers = (!op_queue_.empty());
+
+ if (o == &task_operation_)
+ {
+ task_interrupted_ = more_handlers;
+
+ if (more_handlers && !one_thread_)
+ {
+ if (!wake_one_idle_thread_and_unlock(lock))
+ lock.unlock();
+ }
+ else
+ lock.unlock();
+
+ op_queue<operation> completed_ops;
+ task_cleanup on_exit = { this, &lock, &completed_ops };
+ (void)on_exit;
+
+ // Run the task. May throw an exception. Only block if the operation
+ // queue is empty and we're not polling, otherwise we want to return
+ // as soon as possible.
+ task_->run(!more_handlers, completed_ops);
+ }
+ else
+ {
+ std::size_t task_result = o->task_result_;
+
+ if (more_handlers && !one_thread_)
+ wake_one_thread_and_unlock(lock);
+ else
+ lock.unlock();
+
+ // Ensure the count of outstanding work is decremented on block exit.
+ work_cleanup on_exit = { this, &lock, &private_op_queue };
+ (void)on_exit;
+
+ // Complete the operation. May throw an exception. Deletes the object.
+ o->complete(*this, ec, task_result);
+
+ return 1;
+ }
+ }
+ else
+ {
+ // Nothing to run right now, so just wait for work to do.
+ this_thread.next = first_idle_thread_;
+ first_idle_thread_ = &this_thread;
+ this_thread.wakeup_event->clear(lock);
+ this_thread.wakeup_event->wait(lock);
+ }
+ }
+
+ return 0;
+}
+
+std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
+ op_queue<operation>& private_op_queue, const boost::system::error_code& ec)
+{
+ if (stopped_)
+ return 0;
+
+ operation* o = op_queue_.front();
+ if (o == &task_operation_)
+ {
+ op_queue_.pop();
+ lock.unlock();
+
+ {
+ op_queue<operation> completed_ops;
+ task_cleanup c = { this, &lock, &completed_ops };
+ (void)c;
+
+ // Run the task. May throw an exception. Only block if the operation
+ // queue is empty and we're not polling, otherwise we want to return
+ // as soon as possible.
+ task_->run(false, completed_ops);
+ }
+
+ o = op_queue_.front();
+ if (o == &task_operation_)
+ return 0;
+ }
+
+ if (o == 0)
+ return 0;
+
+ op_queue_.pop();
+ bool more_handlers = (!op_queue_.empty());
+
+ std::size_t task_result = o->task_result_;
+
+ if (more_handlers && !one_thread_)
+ wake_one_thread_and_unlock(lock);
+ else
+ lock.unlock();
+
+ // Ensure the count of outstanding work is decremented on block exit.
+ work_cleanup on_exit = { this, &lock, &private_op_queue };
+ (void)on_exit;
+
+ // Complete the operation. May throw an exception. Deletes the object.
+ o->complete(*this, ec, task_result);
+
+ return 1;
+}
+
+void task_io_service::stop_all_threads(
+ mutex::scoped_lock& lock)
+{
+ stopped_ = true;
+
+ while (first_idle_thread_)
+ {
+ thread_info* idle_thread = first_idle_thread_;
+ first_idle_thread_ = idle_thread->next;
+ idle_thread->next = 0;
+ idle_thread->wakeup_event->signal(lock);
+ }
+
+ if (!task_interrupted_ && task_)
+ {
+ task_interrupted_ = true;
+ task_->interrupt();
+ }
+}
+
+bool task_io_service::wake_one_idle_thread_and_unlock(
+ mutex::scoped_lock& lock)
+{
+ if (first_idle_thread_)
+ {
+ thread_info* idle_thread = first_idle_thread_;
+ first_idle_thread_ = idle_thread->next;
+ idle_thread->next = 0;
+ idle_thread->wakeup_event->signal_and_unlock(lock);
+ return true;
+ }
+ return false;
+}
+
+void task_io_service::wake_one_thread_and_unlock(
+ mutex::scoped_lock& lock)
+{
+ if (!wake_one_idle_thread_and_unlock(lock))
+ {
+ if (!task_interrupted_ && task_)
+ {
+ task_interrupted_ = true;
+ task_->interrupt();
+ }
+ lock.unlock();
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
diff --git a/boost/asio/detail/impl/throw_error.ipp b/boost/asio/detail/impl/throw_error.ipp
new file mode 100644
index 0000000000..dbe6112852
--- /dev/null
+++ b/boost/asio/detail/impl/throw_error.ipp
@@ -0,0 +1,47 @@
+//
+// detail/impl/throw_error.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_THROW_ERROR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_THROW_ERROR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/throw_exception.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/system/system_error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void do_throw_error(const boost::system::error_code& err)
+{
+ boost::system::system_error e(err);
+ boost::throw_exception(e);
+}
+
+void do_throw_error(const boost::system::error_code& err, const char* location)
+{
+ boost::system::system_error e(err, location);
+ boost::throw_exception(e);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_THROW_ERROR_IPP
diff --git a/boost/asio/detail/impl/timer_queue.ipp b/boost/asio/detail/impl/timer_queue.ipp
new file mode 100644
index 0000000000..c4d9ea34ae
--- /dev/null
+++ b/boost/asio/detail/impl/timer_queue.ipp
@@ -0,0 +1,87 @@
+//
+// detail/impl/timer_queue.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if !defined(BOOST_ASIO_HEADER_ONLY)
+
+#include <boost/asio/detail/timer_queue.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+timer_queue<time_traits<boost::posix_time::ptime> >::timer_queue()
+{
+}
+
+timer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue()
+{
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer(
+ const time_type& time, per_timer_data& timer, timer_op* op)
+{
+ return impl_.enqueue_timer(time, timer, op);
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const
+{
+ return impl_.empty();
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec(
+ long max_duration) const
+{
+ return impl_.wait_duration_msec(max_duration);
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec(
+ long max_duration) const
+{
+ return impl_.wait_duration_usec(max_duration);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers(
+ op_queue<operation>& ops)
+{
+ impl_.get_ready_timers(ops);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers(
+ op_queue<operation>& ops)
+{
+ impl_.get_all_timers(ops);
+}
+
+std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(
+ per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)
+{
+ return impl_.cancel_timer(timer, ops, max_cancelled);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // !defined(BOOST_ASIO_HEADER_ONLY)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_IPP
diff --git a/boost/asio/detail/impl/timer_queue_ptime.ipp b/boost/asio/detail/impl/timer_queue_ptime.ipp
new file mode 100644
index 0000000000..c72d8854ca
--- /dev/null
+++ b/boost/asio/detail/impl/timer_queue_ptime.ipp
@@ -0,0 +1,82 @@
+//
+// detail/impl/timer_queue_ptime.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
+#define BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/asio/detail/timer_queue_ptime.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+timer_queue<time_traits<boost::posix_time::ptime> >::timer_queue()
+{
+}
+
+timer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue()
+{
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer(
+ const time_type& time, per_timer_data& timer, wait_op* op)
+{
+ return impl_.enqueue_timer(time, timer, op);
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const
+{
+ return impl_.empty();
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec(
+ long max_duration) const
+{
+ return impl_.wait_duration_msec(max_duration);
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec(
+ long max_duration) const
+{
+ return impl_.wait_duration_usec(max_duration);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers(
+ op_queue<operation>& ops)
+{
+ impl_.get_ready_timers(ops);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers(
+ op_queue<operation>& ops)
+{
+ impl_.get_all_timers(ops);
+}
+
+std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(
+ per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)
+{
+ return impl_.cancel_timer(timer, ops, max_cancelled);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
diff --git a/boost/asio/detail/impl/timer_queue_set.ipp b/boost/asio/detail/impl/timer_queue_set.ipp
new file mode 100644
index 0000000000..7f9a662dcb
--- /dev/null
+++ b/boost/asio/detail/impl/timer_queue_set.ipp
@@ -0,0 +1,103 @@
+//
+// detail/impl/timer_queue_set.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
+#define BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/asio/detail/timer_queue_set.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+timer_queue_set::timer_queue_set()
+ : first_(0)
+{
+}
+
+void timer_queue_set::insert(timer_queue_base* q)
+{
+ q->next_ = first_;
+ first_ = q;
+}
+
+void timer_queue_set::erase(timer_queue_base* q)
+{
+ if (first_)
+ {
+ if (q == first_)
+ {
+ first_ = q->next_;
+ q->next_ = 0;
+ return;
+ }
+
+ for (timer_queue_base* p = first_; p->next_; p = p->next_)
+ {
+ if (p->next_ == q)
+ {
+ p->next_ = q->next_;
+ q->next_ = 0;
+ return;
+ }
+ }
+ }
+}
+
+bool timer_queue_set::all_empty() const
+{
+ for (timer_queue_base* p = first_; p; p = p->next_)
+ if (!p->empty())
+ return false;
+ return true;
+}
+
+long timer_queue_set::wait_duration_msec(long max_duration) const
+{
+ long min_duration = max_duration;
+ for (timer_queue_base* p = first_; p; p = p->next_)
+ min_duration = p->wait_duration_msec(min_duration);
+ return min_duration;
+}
+
+long timer_queue_set::wait_duration_usec(long max_duration) const
+{
+ long min_duration = max_duration;
+ for (timer_queue_base* p = first_; p; p = p->next_)
+ min_duration = p->wait_duration_usec(min_duration);
+ return min_duration;
+}
+
+void timer_queue_set::get_ready_timers(op_queue<operation>& ops)
+{
+ for (timer_queue_base* p = first_; p; p = p->next_)
+ p->get_ready_timers(ops);
+}
+
+void timer_queue_set::get_all_timers(op_queue<operation>& ops)
+{
+ for (timer_queue_base* p = first_; p; p = p->next_)
+ p->get_all_timers(ops);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
diff --git a/boost/asio/detail/impl/win_event.ipp b/boost/asio/detail/impl/win_event.ipp
new file mode 100644
index 0000000000..252242ce56
--- /dev/null
+++ b/boost/asio/detail/impl/win_event.ipp
@@ -0,0 +1,52 @@
+//
+// detail/win_event.ipp
+// ~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_EVENT_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_EVENT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS)
+
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_event.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_event::win_event()
+ : event_(::CreateEvent(0, true, false, 0))
+{
+ if (!event_)
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "event");
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_EVENT_IPP
diff --git a/boost/asio/detail/impl/win_iocp_handle_service.ipp b/boost/asio/detail/impl/win_iocp_handle_service.ipp
new file mode 100644
index 0000000000..ecd45c0398
--- /dev/null
+++ b/boost/asio/detail/impl/win_iocp_handle_service.ipp
@@ -0,0 +1,526 @@
+//
+// detail/impl/win_iocp_handle_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/asio/detail/win_iocp_handle_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+class win_iocp_handle_service::overlapped_wrapper
+ : public OVERLAPPED
+{
+public:
+ explicit overlapped_wrapper(boost::system::error_code& ec)
+ {
+ Internal = 0;
+ InternalHigh = 0;
+ Offset = 0;
+ OffsetHigh = 0;
+
+ // Create a non-signalled manual-reset event, for GetOverlappedResult.
+ hEvent = ::CreateEvent(0, TRUE, FALSE, 0);
+ if (hEvent)
+ {
+ // As documented in GetQueuedCompletionStatus, setting the low order
+ // bit of this event prevents our synchronous writes from being treated
+ // as completion port events.
+ *reinterpret_cast<DWORD_PTR*>(&hEvent) |= 1;
+ }
+ else
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ }
+
+ ~overlapped_wrapper()
+ {
+ if (hEvent)
+ {
+ ::CloseHandle(hEvent);
+ }
+ }
+};
+
+win_iocp_handle_service::win_iocp_handle_service(
+ boost::asio::io_service& io_service)
+ : iocp_service_(boost::asio::use_service<win_iocp_io_service>(io_service)),
+ mutex_(),
+ impl_list_(0)
+{
+}
+
+void win_iocp_handle_service::shutdown_service()
+{
+ // Close all implementations, causing all operations to complete.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ implementation_type* impl = impl_list_;
+ while (impl)
+ {
+ close_for_destruction(*impl);
+ impl = impl->next_;
+ }
+}
+
+void win_iocp_handle_service::construct(
+ win_iocp_handle_service::implementation_type& impl)
+{
+ impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.safe_cancellation_thread_id_ = 0;
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_handle_service::move_construct(
+ win_iocp_handle_service::implementation_type& impl,
+ win_iocp_handle_service::implementation_type& other_impl)
+{
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_handle_service::move_assign(
+ win_iocp_handle_service::implementation_type& impl,
+ win_iocp_handle_service& other_service,
+ win_iocp_handle_service::implementation_type& other_impl)
+{
+ close_for_destruction(impl);
+
+ if (this != &other_service)
+ {
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+ }
+
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+
+ if (this != &other_service)
+ {
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+ impl.next_ = other_service.impl_list_;
+ impl.prev_ = 0;
+ if (other_service.impl_list_)
+ other_service.impl_list_->prev_ = &impl;
+ other_service.impl_list_ = &impl;
+ }
+}
+
+void win_iocp_handle_service::destroy(
+ win_iocp_handle_service::implementation_type& impl)
+{
+ close_for_destruction(impl);
+
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+}
+
+boost::system::error_code win_iocp_handle_service::assign(
+ win_iocp_handle_service::implementation_type& impl,
+ const native_handle_type& handle, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ if (iocp_service_.register_handle(handle, ec))
+ return ec;
+
+ impl.handle_ = handle;
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code win_iocp_handle_service::close(
+ win_iocp_handle_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
+ if (!::CloseHandle(impl.handle_))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+
+ impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.safe_cancellation_thread_id_ = 0;
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+
+ return ec;
+}
+
+boost::system::error_code win_iocp_handle_service::cancel(
+ win_iocp_handle_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return ec;
+ }
+
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "cancel"));
+
+ if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
+ ::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
+ {
+ // The version of Windows supports cancellation from any thread.
+ typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
+ cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr;
+ if (!cancel_io_ex(impl.handle_, 0))
+ {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_NOT_FOUND)
+ {
+ // ERROR_NOT_FOUND means that there were no operations to be
+ // cancelled. We swallow this error to match the behaviour on other
+ // platforms.
+ ec = boost::system::error_code();
+ }
+ else
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+ }
+ else if (impl.safe_cancellation_thread_id_ == 0)
+ {
+ // No operations have been started, so there's nothing to cancel.
+ ec = boost::system::error_code();
+ }
+ else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
+ {
+ // Asynchronous operations have been started from the current thread only,
+ // so it is safe to try to cancel them using CancelIo.
+ if (!::CancelIo(impl.handle_))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+ }
+ else
+ {
+ // Asynchronous operations have been started from more than one thread,
+ // so cancellation is not safe.
+ ec = boost::asio::error::operation_not_supported;
+ }
+
+ return ec;
+}
+
+size_t win_iocp_handle_service::do_write(
+ win_iocp_handle_service::implementation_type& impl, boost::uint64_t offset,
+ const boost::asio::const_buffer& buffer, boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to write 0 bytes on a handle is a no-op.
+ if (boost::asio::buffer_size(buffer) == 0)
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ overlapped_wrapper overlapped(ec);
+ if (ec)
+ {
+ return 0;
+ }
+
+ // Write the data.
+ overlapped.Offset = offset & 0xFFFFFFFF;
+ overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+ BOOL ok = ::WriteFile(impl.handle_,
+ boost::asio::buffer_cast<LPCVOID>(buffer),
+ static_cast<DWORD>(boost::asio::buffer_size(buffer)), 0, &overlapped);
+ if (!ok)
+ {
+ DWORD last_error = ::GetLastError();
+ if (last_error != ERROR_IO_PENDING)
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return 0;
+ }
+ }
+
+ // Wait for the operation to complete.
+ DWORD bytes_transferred = 0;
+ ok = ::GetOverlappedResult(impl.handle_,
+ &overlapped, &bytes_transferred, TRUE);
+ if (!ok)
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return 0;
+ }
+
+ ec = boost::system::error_code();
+ return bytes_transferred;
+}
+
+void win_iocp_handle_service::start_write_op(
+ win_iocp_handle_service::implementation_type& impl, boost::uint64_t offset,
+ const boost::asio::const_buffer& buffer, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (!is_open(impl))
+ {
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ }
+ else if (boost::asio::buffer_size(buffer) == 0)
+ {
+ // A request to write 0 bytes on a handle is a no-op.
+ iocp_service_.on_completion(op);
+ }
+ else
+ {
+ DWORD bytes_transferred = 0;
+ op->Offset = offset & 0xFFFFFFFF;
+ op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+ BOOL ok = ::WriteFile(impl.handle_,
+ boost::asio::buffer_cast<LPCVOID>(buffer),
+ static_cast<DWORD>(boost::asio::buffer_size(buffer)),
+ &bytes_transferred, op);
+ DWORD last_error = ::GetLastError();
+ if (!ok && last_error != ERROR_IO_PENDING
+ && last_error != ERROR_MORE_DATA)
+ {
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ }
+ else
+ {
+ iocp_service_.on_pending(op);
+ }
+ }
+}
+
+size_t win_iocp_handle_service::do_read(
+ win_iocp_handle_service::implementation_type& impl, boost::uint64_t offset,
+ const boost::asio::mutable_buffer& buffer, boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // A request to read 0 bytes on a stream handle is a no-op.
+ if (boost::asio::buffer_size(buffer) == 0)
+ {
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ overlapped_wrapper overlapped(ec);
+ if (ec)
+ {
+ return 0;
+ }
+
+ // Read some data.
+ overlapped.Offset = offset & 0xFFFFFFFF;
+ overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+ BOOL ok = ::ReadFile(impl.handle_,
+ boost::asio::buffer_cast<LPVOID>(buffer),
+ static_cast<DWORD>(boost::asio::buffer_size(buffer)), 0, &overlapped);
+ if (!ok)
+ {
+ DWORD last_error = ::GetLastError();
+ if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)
+ {
+ if (last_error == ERROR_HANDLE_EOF)
+ {
+ ec = boost::asio::error::eof;
+ }
+ else
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ return 0;
+ }
+ }
+
+ // Wait for the operation to complete.
+ DWORD bytes_transferred = 0;
+ ok = ::GetOverlappedResult(impl.handle_,
+ &overlapped, &bytes_transferred, TRUE);
+ if (!ok)
+ {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_HANDLE_EOF)
+ {
+ ec = boost::asio::error::eof;
+ }
+ else
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ return 0;
+ }
+
+ ec = boost::system::error_code();
+ return bytes_transferred;
+}
+
+void win_iocp_handle_service::start_read_op(
+ win_iocp_handle_service::implementation_type& impl, boost::uint64_t offset,
+ const boost::asio::mutable_buffer& buffer, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (!is_open(impl))
+ {
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ }
+ else if (boost::asio::buffer_size(buffer) == 0)
+ {
+ // A request to read 0 bytes on a handle is a no-op.
+ iocp_service_.on_completion(op);
+ }
+ else
+ {
+ DWORD bytes_transferred = 0;
+ op->Offset = offset & 0xFFFFFFFF;
+ op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+ BOOL ok = ::ReadFile(impl.handle_,
+ boost::asio::buffer_cast<LPVOID>(buffer),
+ static_cast<DWORD>(boost::asio::buffer_size(buffer)),
+ &bytes_transferred, op);
+ DWORD last_error = ::GetLastError();
+ if (!ok && last_error != ERROR_IO_PENDING
+ && last_error != ERROR_MORE_DATA)
+ {
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ }
+ else
+ {
+ iocp_service_.on_pending(op);
+ }
+ }
+}
+
+void win_iocp_handle_service::update_cancellation_thread_id(
+ win_iocp_handle_service::implementation_type& impl)
+{
+ if (impl.safe_cancellation_thread_id_ == 0)
+ impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
+ else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
+ impl.safe_cancellation_thread_id_ = ~DWORD(0);
+}
+
+void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
+ ::CloseHandle(impl.handle_);
+ impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.safe_cancellation_thread_id_ = 0;
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
diff --git a/boost/asio/detail/impl/win_iocp_io_service.hpp b/boost/asio/detail/impl/win_iocp_io_service.hpp
new file mode 100644
index 0000000000..871f6fa2e4
--- /dev/null
+++ b/boost/asio/detail/impl/win_iocp_io_service.hpp
@@ -0,0 +1,131 @@
+//
+// detail/impl/win_iocp_io_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/asio/detail/completion_handler.hpp>
+#include <boost/asio/detail/fenced_block.hpp>
+#include <boost/asio/detail/handler_alloc_helpers.hpp>
+#include <boost/asio/detail/handler_invoke_helpers.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Handler>
+void win_iocp_io_service::dispatch(Handler handler)
+{
+ if (call_stack<win_iocp_io_service>::contains(this))
+ {
+ fenced_block b(fenced_block::full);
+ boost_asio_handler_invoke_helpers::invoke(handler, handler);
+ }
+ else
+ {
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+ }
+}
+
+template <typename Handler>
+void win_iocp_io_service::post(Handler handler)
+{
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::add_timer_queue(
+ timer_queue<Time_Traits>& queue)
+{
+ do_add_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::remove_timer_queue(
+ timer_queue<Time_Traits>& queue)
+{
+ do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::schedule_timer(timer_queue<Time_Traits>& queue,
+ const typename Time_Traits::time_type& time,
+ typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+ // If the service has been shut down we silently discard the timer.
+ if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
+ {
+ post_immediate_completion(op);
+ return;
+ }
+
+ mutex::scoped_lock lock(dispatch_mutex_);
+
+ bool earliest = queue.enqueue_timer(time, timer, op);
+ work_started();
+ if (earliest)
+ update_timeout();
+}
+
+template <typename Time_Traits>
+std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
+{
+ // If the service has been shut down we silently ignore the cancellation.
+ if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
+ return 0;
+
+ mutex::scoped_lock lock(dispatch_mutex_);
+ op_queue<win_iocp_operation> ops;
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+ post_deferred_completions(ops);
+ return n;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
diff --git a/boost/asio/detail/impl/win_iocp_io_service.ipp b/boost/asio/detail/impl/win_iocp_io_service.ipp
new file mode 100644
index 0000000000..72f4af7524
--- /dev/null
+++ b/boost/asio/detail/impl/win_iocp_io_service.ipp
@@ -0,0 +1,509 @@
+//
+// detail/impl/win_iocp_io_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/limits.hpp>
+#include <boost/asio/error.hpp>
+#include <boost/asio/io_service.hpp>
+#include <boost/asio/detail/handler_alloc_helpers.hpp>
+#include <boost/asio/detail/handler_invoke_helpers.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_iocp_io_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct win_iocp_io_service::work_finished_on_block_exit
+{
+ ~work_finished_on_block_exit()
+ {
+ io_service_->work_finished();
+ }
+
+ win_iocp_io_service* io_service_;
+};
+
+struct win_iocp_io_service::timer_thread_function
+{
+ void operator()()
+ {
+ while (::InterlockedExchangeAdd(&io_service_->shutdown_, 0) == 0)
+ {
+ if (::WaitForSingleObject(io_service_->waitable_timer_.handle,
+ INFINITE) == WAIT_OBJECT_0)
+ {
+ ::InterlockedExchange(&io_service_->dispatch_required_, 1);
+ ::PostQueuedCompletionStatus(io_service_->iocp_.handle,
+ 0, wake_for_dispatch, 0);
+ }
+ }
+ }
+
+ win_iocp_io_service* io_service_;
+};
+
+win_iocp_io_service::win_iocp_io_service(
+ boost::asio::io_service& io_service, size_t concurrency_hint)
+ : boost::asio::detail::service_base<win_iocp_io_service>(io_service),
+ iocp_(),
+ outstanding_work_(0),
+ stopped_(0),
+ shutdown_(0),
+ dispatch_required_(0)
+{
+ BOOST_ASIO_HANDLER_TRACKING_INIT;
+
+ iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
+ static_cast<DWORD>((std::min<size_t>)(concurrency_hint, DWORD(~0))));
+ if (!iocp_.handle)
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "iocp");
+ }
+}
+
+void win_iocp_io_service::shutdown_service()
+{
+ ::InterlockedExchange(&shutdown_, 1);
+
+ if (timer_thread_.get())
+ {
+ LARGE_INTEGER timeout;
+ timeout.QuadPart = 1;
+ ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
+ }
+
+ while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
+ {
+ op_queue<win_iocp_operation> ops;
+ timer_queues_.get_all_timers(ops);
+ ops.push(completed_ops_);
+ if (!ops.empty())
+ {
+ while (win_iocp_operation* op = ops.front())
+ {
+ ops.pop();
+ ::InterlockedDecrement(&outstanding_work_);
+ op->destroy();
+ }
+ }
+ else
+ {
+ DWORD bytes_transferred = 0;
+ dword_ptr_t completion_key = 0;
+ LPOVERLAPPED overlapped = 0;
+ ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
+ &completion_key, &overlapped, gqcs_timeout);
+ if (overlapped)
+ {
+ ::InterlockedDecrement(&outstanding_work_);
+ static_cast<win_iocp_operation*>(overlapped)->destroy();
+ }
+ }
+ }
+
+ if (timer_thread_.get())
+ timer_thread_->join();
+}
+
+boost::system::error_code win_iocp_io_service::register_handle(
+ HANDLE handle, boost::system::error_code& ec)
+{
+ if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+ return ec;
+}
+
+size_t win_iocp_io_service::run(boost::system::error_code& ec)
+{
+ if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+ {
+ InterlockedExchange(&stopped_, 1);
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ call_stack<win_iocp_io_service>::context ctx(this);
+
+ size_t n = 0;
+ while (do_one(true, ec))
+ if (n != (std::numeric_limits<size_t>::max)())
+ ++n;
+ return n;
+}
+
+size_t win_iocp_io_service::run_one(boost::system::error_code& ec)
+{
+ if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+ {
+ InterlockedExchange(&stopped_, 1);
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ call_stack<win_iocp_io_service>::context ctx(this);
+
+ return do_one(true, ec);
+}
+
+size_t win_iocp_io_service::poll(boost::system::error_code& ec)
+{
+ if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+ {
+ InterlockedExchange(&stopped_, 1);
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ call_stack<win_iocp_io_service>::context ctx(this);
+
+ size_t n = 0;
+ while (do_one(false, ec))
+ if (n != (std::numeric_limits<size_t>::max)())
+ ++n;
+ return n;
+}
+
+size_t win_iocp_io_service::poll_one(boost::system::error_code& ec)
+{
+ if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+ {
+ InterlockedExchange(&stopped_, 1);
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ call_stack<win_iocp_io_service>::context ctx(this);
+
+ return do_one(false, ec);
+}
+
+void win_iocp_io_service::stop()
+{
+ if (::InterlockedExchange(&stopped_, 1) == 0)
+ {
+ if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "pqcs");
+ }
+ }
+}
+
+void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op)
+{
+ // Flag the operation as ready.
+ op->ready_ = 1;
+
+ // Enqueue the operation on the I/O completion port.
+ if (!::PostQueuedCompletionStatus(iocp_.handle,
+ 0, overlapped_contains_result, op))
+ {
+ // Out of resources. Put on completed queue instead.
+ mutex::scoped_lock lock(dispatch_mutex_);
+ completed_ops_.push(op);
+ ::InterlockedExchange(&dispatch_required_, 1);
+ }
+}
+
+void win_iocp_io_service::post_deferred_completions(
+ op_queue<win_iocp_operation>& ops)
+{
+ while (win_iocp_operation* op = ops.front())
+ {
+ ops.pop();
+
+ // Flag the operation as ready.
+ op->ready_ = 1;
+
+ // Enqueue the operation on the I/O completion port.
+ if (!::PostQueuedCompletionStatus(iocp_.handle,
+ 0, overlapped_contains_result, op))
+ {
+ // Out of resources. Put on completed queue instead.
+ mutex::scoped_lock lock(dispatch_mutex_);
+ completed_ops_.push(op);
+ completed_ops_.push(ops);
+ ::InterlockedExchange(&dispatch_required_, 1);
+ }
+ }
+}
+
+void win_iocp_io_service::abandon_operations(
+ op_queue<win_iocp_operation>& ops)
+{
+ while (win_iocp_operation* op = ops.front())
+ {
+ ops.pop();
+ ::InterlockedDecrement(&outstanding_work_);
+ op->destroy();
+ }
+}
+
+void win_iocp_io_service::on_pending(win_iocp_operation* op)
+{
+ if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
+ {
+ // Enqueue the operation on the I/O completion port.
+ if (!::PostQueuedCompletionStatus(iocp_.handle,
+ 0, overlapped_contains_result, op))
+ {
+ // Out of resources. Put on completed queue instead.
+ mutex::scoped_lock lock(dispatch_mutex_);
+ completed_ops_.push(op);
+ ::InterlockedExchange(&dispatch_required_, 1);
+ }
+ }
+}
+
+void win_iocp_io_service::on_completion(win_iocp_operation* op,
+ DWORD last_error, DWORD bytes_transferred)
+{
+ // Flag that the operation is ready for invocation.
+ op->ready_ = 1;
+
+ // Store results in the OVERLAPPED structure.
+ op->Internal = reinterpret_cast<ulong_ptr_t>(
+ &boost::asio::error::get_system_category());
+ op->Offset = last_error;
+ op->OffsetHigh = bytes_transferred;
+
+ // Enqueue the operation on the I/O completion port.
+ if (!::PostQueuedCompletionStatus(iocp_.handle,
+ 0, overlapped_contains_result, op))
+ {
+ // Out of resources. Put on completed queue instead.
+ mutex::scoped_lock lock(dispatch_mutex_);
+ completed_ops_.push(op);
+ ::InterlockedExchange(&dispatch_required_, 1);
+ }
+}
+
+void win_iocp_io_service::on_completion(win_iocp_operation* op,
+ const boost::system::error_code& ec, DWORD bytes_transferred)
+{
+ // Flag that the operation is ready for invocation.
+ op->ready_ = 1;
+
+ // Store results in the OVERLAPPED structure.
+ op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
+ op->Offset = ec.value();
+ op->OffsetHigh = bytes_transferred;
+
+ // Enqueue the operation on the I/O completion port.
+ if (!::PostQueuedCompletionStatus(iocp_.handle,
+ 0, overlapped_contains_result, op))
+ {
+ // Out of resources. Put on completed queue instead.
+ mutex::scoped_lock lock(dispatch_mutex_);
+ completed_ops_.push(op);
+ ::InterlockedExchange(&dispatch_required_, 1);
+ }
+}
+
+size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
+{
+ for (;;)
+ {
+ // Try to acquire responsibility for dispatching timers and completed ops.
+ if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
+ {
+ mutex::scoped_lock lock(dispatch_mutex_);
+
+ // Dispatch pending timers and operations.
+ op_queue<win_iocp_operation> ops;
+ ops.push(completed_ops_);
+ timer_queues_.get_ready_timers(ops);
+ post_deferred_completions(ops);
+ update_timeout();
+ }
+
+ // Get the next operation from the queue.
+ DWORD bytes_transferred = 0;
+ dword_ptr_t completion_key = 0;
+ LPOVERLAPPED overlapped = 0;
+ ::SetLastError(0);
+ BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
+ &completion_key, &overlapped, block ? gqcs_timeout : 0);
+ DWORD last_error = ::GetLastError();
+
+ if (overlapped)
+ {
+ win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
+ boost::system::error_code result_ec(last_error,
+ boost::asio::error::get_system_category());
+
+ // We may have been passed the last_error and bytes_transferred in the
+ // OVERLAPPED structure itself.
+ if (completion_key == overlapped_contains_result)
+ {
+ result_ec = boost::system::error_code(static_cast<int>(op->Offset),
+ *reinterpret_cast<boost::system::error_category*>(op->Internal));
+ bytes_transferred = op->OffsetHigh;
+ }
+
+ // Otherwise ensure any result has been saved into the OVERLAPPED
+ // structure.
+ else
+ {
+ op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
+ op->Offset = result_ec.value();
+ op->OffsetHigh = bytes_transferred;
+ }
+
+ // Dispatch the operation only if ready. The operation may not be ready
+ // if the initiating function (e.g. a call to WSARecv) has not yet
+ // returned. This is because the initiating function still wants access
+ // to the operation's OVERLAPPED structure.
+ if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
+ {
+ // Ensure the count of outstanding work is decremented on block exit.
+ work_finished_on_block_exit on_exit = { this };
+ (void)on_exit;
+
+ op->complete(*this, result_ec, bytes_transferred);
+ ec = boost::system::error_code();
+ return 1;
+ }
+ }
+ else if (!ok)
+ {
+ if (last_error != WAIT_TIMEOUT)
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return 0;
+ }
+
+ // If we're not polling we need to keep going until we get a real handler.
+ if (block)
+ continue;
+
+ ec = boost::system::error_code();
+ return 0;
+ }
+ else if (completion_key == wake_for_dispatch)
+ {
+ // We have been woken up to try to acquire responsibility for dispatching
+ // timers and completed operations.
+ }
+ else
+ {
+ // The stopped_ flag is always checked to ensure that any leftover
+ // interrupts from a previous run invocation are ignored.
+ if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
+ {
+ // Wake up next thread that is blocked on GetQueuedCompletionStatus.
+ if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
+ {
+ last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return 0;
+ }
+
+ ec = boost::system::error_code();
+ return 0;
+ }
+ }
+ }
+}
+
+void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(dispatch_mutex_);
+
+ timer_queues_.insert(&queue);
+
+ if (!waitable_timer_.handle)
+ {
+ waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
+ if (waitable_timer_.handle == 0)
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "timer");
+ }
+
+ LARGE_INTEGER timeout;
+ timeout.QuadPart = -max_timeout_usec;
+ timeout.QuadPart *= 10;
+ ::SetWaitableTimer(waitable_timer_.handle,
+ &timeout, max_timeout_msec, 0, 0, FALSE);
+ }
+
+ if (!timer_thread_.get())
+ {
+ timer_thread_function thread_function = { this };
+ timer_thread_.reset(new thread(thread_function, 65536));
+ }
+}
+
+void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue)
+{
+ mutex::scoped_lock lock(dispatch_mutex_);
+
+ timer_queues_.erase(&queue);
+}
+
+void win_iocp_io_service::update_timeout()
+{
+ if (timer_thread_.get())
+ {
+ // There's no point updating the waitable timer if the new timeout period
+ // exceeds the maximum timeout. In that case, we might as well wait for the
+ // existing period of the timer to expire.
+ long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
+ if (timeout_usec < max_timeout_usec)
+ {
+ LARGE_INTEGER timeout;
+ timeout.QuadPart = -timeout_usec;
+ timeout.QuadPart *= 10;
+ ::SetWaitableTimer(waitable_timer_.handle,
+ &timeout, max_timeout_msec, 0, 0, FALSE);
+ }
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
diff --git a/boost/asio/detail/impl/win_iocp_serial_port_service.ipp b/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
new file mode 100644
index 0000000000..e98ad87361
--- /dev/null
+++ b/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
@@ -0,0 +1,182 @@
+//
+// detail/impl/win_iocp_serial_port_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP) && defined(BOOST_ASIO_HAS_SERIAL_PORT)
+
+#include <cstring>
+#include <boost/asio/detail/win_iocp_serial_port_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_iocp_serial_port_service::win_iocp_serial_port_service(
+ boost::asio::io_service& io_service)
+ : handle_service_(io_service)
+{
+}
+
+void win_iocp_serial_port_service::shutdown_service()
+{
+}
+
+boost::system::error_code win_iocp_serial_port_service::open(
+ win_iocp_serial_port_service::implementation_type& impl,
+ const std::string& device, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ // For convenience, add a leading \\.\ sequence if not already present.
+ std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device;
+
+ // Open a handle to the serial port.
+ ::HANDLE handle = ::CreateFileA(name.c_str(),
+ GENERIC_READ | GENERIC_WRITE, 0, 0,
+ OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);
+ if (handle == INVALID_HANDLE_VALUE)
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ // Determine the initial serial port parameters.
+ using namespace std; // For memset.
+ ::DCB dcb;
+ memset(&dcb, 0, sizeof(DCB));
+ dcb.DCBlength = sizeof(DCB);
+ if (!::GetCommState(handle, &dcb))
+ {
+ DWORD last_error = ::GetLastError();
+ ::CloseHandle(handle);
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ // Set some default serial port parameters. This implementation does not
+ // support changing these, so they might as well be in a known state.
+ dcb.fBinary = TRUE; // Win32 only supports binary mode.
+ dcb.fDsrSensitivity = FALSE;
+ dcb.fNull = FALSE; // Do not ignore NULL characters.
+ dcb.fAbortOnError = FALSE; // Ignore serial framing errors.
+ if (!::SetCommState(handle, &dcb))
+ {
+ DWORD last_error = ::GetLastError();
+ ::CloseHandle(handle);
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ // Set up timeouts so that the serial port will behave similarly to a
+ // network socket. Reads wait for at least one byte, then return with
+ // whatever they have. Writes return once everything is out the door.
+ ::COMMTIMEOUTS timeouts;
+ timeouts.ReadIntervalTimeout = 1;
+ timeouts.ReadTotalTimeoutMultiplier = 0;
+ timeouts.ReadTotalTimeoutConstant = 0;
+ timeouts.WriteTotalTimeoutMultiplier = 0;
+ timeouts.WriteTotalTimeoutConstant = 0;
+ if (!::SetCommTimeouts(handle, &timeouts))
+ {
+ DWORD last_error = ::GetLastError();
+ ::CloseHandle(handle);
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ // We're done. Take ownership of the serial port handle.
+ if (handle_service_.assign(impl, handle, ec))
+ ::CloseHandle(handle);
+ return ec;
+}
+
+boost::system::error_code win_iocp_serial_port_service::do_set_option(
+ win_iocp_serial_port_service::implementation_type& impl,
+ win_iocp_serial_port_service::store_function_type store,
+ const void* option, boost::system::error_code& ec)
+{
+ using namespace std; // For memcpy.
+
+ ::DCB dcb;
+ memset(&dcb, 0, sizeof(DCB));
+ dcb.DCBlength = sizeof(DCB);
+ if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ if (store(option, dcb, ec))
+ return ec;
+
+ if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code win_iocp_serial_port_service::do_get_option(
+ const win_iocp_serial_port_service::implementation_type& impl,
+ win_iocp_serial_port_service::load_function_type load,
+ void* option, boost::system::error_code& ec) const
+{
+ using namespace std; // For memset.
+
+ ::DCB dcb;
+ memset(&dcb, 0, sizeof(DCB));
+ dcb.DCBlength = sizeof(DCB);
+ if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ return ec;
+ }
+
+ return load(option, dcb, ec);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP) && defined(BOOST_ASIO_HAS_SERIAL_PORT)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
diff --git a/boost/asio/detail/impl/win_iocp_socket_service_base.ipp b/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
new file mode 100644
index 0000000000..0466e33cb6
--- /dev/null
+++ b/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
@@ -0,0 +1,657 @@
+//
+// detail/impl/win_iocp_socket_service_base.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+#include <boost/asio/detail/win_iocp_socket_service_base.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_iocp_socket_service_base::win_iocp_socket_service_base(
+ boost::asio::io_service& io_service)
+ : io_service_(io_service),
+ iocp_service_(use_service<win_iocp_io_service>(io_service)),
+ reactor_(0),
+ mutex_(),
+ impl_list_(0)
+{
+}
+
+void win_iocp_socket_service_base::shutdown_service()
+{
+ // Close all implementations, causing all operations to complete.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ base_implementation_type* impl = impl_list_;
+ while (impl)
+ {
+ boost::system::error_code ignored_ec;
+ close_for_destruction(*impl);
+ impl = impl->next_;
+ }
+}
+
+void win_iocp_socket_service_base::construct(
+ win_iocp_socket_service_base::base_implementation_type& impl)
+{
+ impl.socket_ = invalid_socket;
+ impl.state_ = 0;
+ impl.cancel_token_.reset();
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_socket_service_base::base_move_construct(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ impl.cancel_token_ = other_impl.cancel_token_;
+ other_impl.cancel_token_.reset();
+
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_socket_service_base::base_move_assign(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ win_iocp_socket_service_base& other_service,
+ win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+ close_for_destruction(impl);
+
+ if (this != &other_service)
+ {
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+ }
+
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ impl.cancel_token_ = other_impl.cancel_token_;
+ other_impl.cancel_token_.reset();
+
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ if (this != &other_service)
+ {
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+ impl.next_ = other_service.impl_list_;
+ impl.prev_ = 0;
+ if (other_service.impl_list_)
+ other_service.impl_list_->prev_ = &impl;
+ other_service.impl_list_ = &impl;
+ }
+}
+
+void win_iocp_socket_service_base::destroy(
+ win_iocp_socket_service_base::base_implementation_type& impl)
+{
+ close_for_destruction(impl);
+
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+}
+
+boost::system::error_code win_iocp_socket_service_base::close(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
+ // Check if the reactor was created, in which case we need to close the
+ // socket on the reactor as well to cancel any operations that might be
+ // running there.
+ reactor* r = static_cast<reactor*>(
+ interlocked_compare_exchange_pointer(
+ reinterpret_cast<void**>(&reactor_), 0, 0));
+ if (r)
+ r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
+ }
+
+ socket_ops::close(impl.socket_, impl.state_, false, ec);
+
+ impl.socket_ = invalid_socket;
+ impl.state_ = 0;
+ impl.cancel_token_.reset();
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ return ec;
+}
+
+boost::system::error_code win_iocp_socket_service_base::cancel(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return ec;
+ }
+
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+
+ if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
+ ::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
+ {
+ // The version of Windows supports cancellation from any thread.
+ typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
+ cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr;
+ socket_type sock = impl.socket_;
+ HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
+ if (!cancel_io_ex(sock_as_handle, 0))
+ {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_NOT_FOUND)
+ {
+ // ERROR_NOT_FOUND means that there were no operations to be
+ // cancelled. We swallow this error to match the behaviour on other
+ // platforms.
+ ec = boost::system::error_code();
+ }
+ else
+ {
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+ }
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ else if (impl.safe_cancellation_thread_id_ == 0)
+ {
+ // No operations have been started, so there's nothing to cancel.
+ ec = boost::system::error_code();
+ }
+ else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
+ {
+ // Asynchronous operations have been started from the current thread only,
+ // so it is safe to try to cancel them using CancelIo.
+ socket_type sock = impl.socket_;
+ HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
+ if (!::CancelIo(sock_as_handle))
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+ }
+ else
+ {
+ // Asynchronous operations have been started from more than one thread,
+ // so cancellation is not safe.
+ ec = boost::asio::error::operation_not_supported;
+ }
+#else // defined(BOOST_ASIO_ENABLE_CANCELIO)
+ else
+ {
+ // Cancellation is not supported as CancelIo may not be used.
+ ec = boost::asio::error::operation_not_supported;
+ }
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ // Cancel any operations started via the reactor.
+ if (!ec)
+ {
+ reactor* r = static_cast<reactor*>(
+ interlocked_compare_exchange_pointer(
+ reinterpret_cast<void**>(&reactor_), 0, 0));
+ if (r)
+ r->cancel_ops(impl.socket_, impl.reactor_data_);
+ }
+
+ return ec;
+}
+
+boost::system::error_code win_iocp_socket_service_base::do_open(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ int family, int type, int protocol, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ socket_holder sock(socket_ops::socket(family, type, protocol, ec));
+ if (sock.get() == invalid_socket)
+ return ec;
+
+ HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get());
+ if (iocp_service_.register_handle(sock_as_handle, ec))
+ return ec;
+
+ impl.socket_ = sock.release();
+ switch (type)
+ {
+ case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
+ case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
+ default: impl.state_ = 0; break;
+ }
+ impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code win_iocp_socket_service_base::do_assign(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ int type, socket_type native_socket, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket);
+ if (iocp_service_.register_handle(sock_as_handle, ec))
+ return ec;
+
+ impl.socket_ = native_socket;
+ switch (type)
+ {
+ case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
+ case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
+ default: impl.state_ = 0; break;
+ }
+ impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
+ ec = boost::system::error_code();
+ return ec;
+}
+
+void win_iocp_socket_service_base::start_send_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ WSABUF* buffers, std::size_t buffer_count,
+ socket_base::message_flags flags, bool noop, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (noop)
+ iocp_service_.on_completion(op);
+ else if (!is_open(impl))
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ else
+ {
+ DWORD bytes_transferred = 0;
+ int result = ::WSASend(impl.socket_, buffers,
+ static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0);
+ DWORD last_error = ::WSAGetLastError();
+ if (last_error == ERROR_PORT_UNREACHABLE)
+ last_error = WSAECONNREFUSED;
+ if (result != 0 && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ else
+ iocp_service_.on_pending(op);
+ }
+}
+
+void win_iocp_socket_service_base::start_send_to_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ WSABUF* buffers, std::size_t buffer_count,
+ const socket_addr_type* addr, int addrlen,
+ socket_base::message_flags flags, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (!is_open(impl))
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ else
+ {
+ DWORD bytes_transferred = 0;
+ int result = ::WSASendTo(impl.socket_, buffers,
+ static_cast<DWORD>(buffer_count),
+ &bytes_transferred, flags, addr, addrlen, op, 0);
+ DWORD last_error = ::WSAGetLastError();
+ if (last_error == ERROR_PORT_UNREACHABLE)
+ last_error = WSAECONNREFUSED;
+ if (result != 0 && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ else
+ iocp_service_.on_pending(op);
+ }
+}
+
+void win_iocp_socket_service_base::start_receive_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ WSABUF* buffers, std::size_t buffer_count,
+ socket_base::message_flags flags, bool noop, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (noop)
+ iocp_service_.on_completion(op);
+ else if (!is_open(impl))
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ else
+ {
+ DWORD bytes_transferred = 0;
+ DWORD recv_flags = flags;
+ int result = ::WSARecv(impl.socket_, buffers,
+ static_cast<DWORD>(buffer_count),
+ &bytes_transferred, &recv_flags, op, 0);
+ DWORD last_error = ::WSAGetLastError();
+ if (last_error == ERROR_NETNAME_DELETED)
+ last_error = WSAECONNRESET;
+ else if (last_error == ERROR_PORT_UNREACHABLE)
+ last_error = WSAECONNREFUSED;
+ if (result != 0 && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ else
+ iocp_service_.on_pending(op);
+ }
+}
+
+void win_iocp_socket_service_base::start_null_buffers_receive_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ socket_base::message_flags flags, reactor_op* op)
+{
+ if ((impl.state_ & socket_ops::stream_oriented) != 0)
+ {
+ // For stream sockets on Windows, we may issue a 0-byte overlapped
+ // WSARecv to wait until there is data available on the socket.
+ ::WSABUF buf = { 0, 0 };
+ start_receive_op(impl, &buf, 1, flags, false, op);
+ }
+ else
+ {
+ start_reactor_op(impl,
+ (flags & socket_base::message_out_of_band)
+ ? reactor::except_op : reactor::read_op,
+ op);
+ }
+}
+
+void win_iocp_socket_service_base::start_receive_from_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr,
+ socket_base::message_flags flags, int* addrlen, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (!is_open(impl))
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ else
+ {
+ DWORD bytes_transferred = 0;
+ DWORD recv_flags = flags;
+ int result = ::WSARecvFrom(impl.socket_, buffers,
+ static_cast<DWORD>(buffer_count),
+ &bytes_transferred, &recv_flags, addr, addrlen, op, 0);
+ DWORD last_error = ::WSAGetLastError();
+ if (last_error == ERROR_PORT_UNREACHABLE)
+ last_error = WSAECONNREFUSED;
+ if (result != 0 && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error, bytes_transferred);
+ else
+ iocp_service_.on_pending(op);
+ }
+}
+
+void win_iocp_socket_service_base::start_accept_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ bool peer_is_open, socket_holder& new_socket, int family, int type,
+ int protocol, void* output_buffer, DWORD address_length, operation* op)
+{
+ update_cancellation_thread_id(impl);
+ iocp_service_.work_started();
+
+ if (!is_open(impl))
+ iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
+ else if (peer_is_open)
+ iocp_service_.on_completion(op, boost::asio::error::already_open);
+ else
+ {
+ boost::system::error_code ec;
+ new_socket.reset(socket_ops::socket(family, type, protocol, ec));
+ if (new_socket.get() == invalid_socket)
+ iocp_service_.on_completion(op, ec);
+ else
+ {
+ DWORD bytes_read = 0;
+ BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer,
+ 0, address_length, address_length, &bytes_read, op);
+ DWORD last_error = ::WSAGetLastError();
+ if (!result && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error);
+ else
+ iocp_service_.on_pending(op);
+ }
+ }
+}
+
+void win_iocp_socket_service_base::restart_accept_op(
+ socket_type s, socket_holder& new_socket, int family, int type,
+ int protocol, void* output_buffer, DWORD address_length, operation* op)
+{
+ new_socket.reset();
+ iocp_service_.work_started();
+
+ boost::system::error_code ec;
+ new_socket.reset(socket_ops::socket(family, type, protocol, ec));
+ if (new_socket.get() == invalid_socket)
+ iocp_service_.on_completion(op, ec);
+ else
+ {
+ DWORD bytes_read = 0;
+ BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer,
+ 0, address_length, address_length, &bytes_read, op);
+ DWORD last_error = ::WSAGetLastError();
+ if (!result && last_error != WSA_IO_PENDING)
+ iocp_service_.on_completion(op, last_error);
+ else
+ iocp_service_.on_pending(op);
+ }
+}
+
+void win_iocp_socket_service_base::start_reactor_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ int op_type, reactor_op* op)
+{
+ reactor& r = get_reactor();
+ update_cancellation_thread_id(impl);
+
+ if (is_open(impl))
+ {
+ r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false);
+ return;
+ }
+ else
+ op->ec_ = boost::asio::error::bad_descriptor;
+
+ iocp_service_.post_immediate_completion(op);
+}
+
+void win_iocp_socket_service_base::start_connect_op(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ reactor_op* op, const socket_addr_type* addr, std::size_t addrlen)
+{
+ reactor& r = get_reactor();
+ update_cancellation_thread_id(impl);
+
+ if ((impl.state_ & socket_ops::non_blocking) != 0
+ || socket_ops::set_internal_non_blocking(
+ impl.socket_, impl.state_, true, op->ec_))
+ {
+ if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
+ {
+ if (op->ec_ == boost::asio::error::in_progress
+ || op->ec_ == boost::asio::error::would_block)
+ {
+ op->ec_ = boost::system::error_code();
+ r.start_op(reactor::connect_op, impl.socket_,
+ impl.reactor_data_, op, false);
+ return;
+ }
+ }
+ }
+
+ r.post_immediate_completion(op);
+}
+
+void win_iocp_socket_service_base::close_for_destruction(
+ win_iocp_socket_service_base::base_implementation_type& impl)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
+ // Check if the reactor was created, in which case we need to close the
+ // socket on the reactor as well to cancel any operations that might be
+ // running there.
+ reactor* r = static_cast<reactor*>(
+ interlocked_compare_exchange_pointer(
+ reinterpret_cast<void**>(&reactor_), 0, 0));
+ if (r)
+ r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
+ }
+
+ boost::system::error_code ignored_ec;
+ socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
+ impl.socket_ = invalid_socket;
+ impl.state_ = 0;
+ impl.cancel_token_.reset();
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+}
+
+void win_iocp_socket_service_base::update_cancellation_thread_id(
+ win_iocp_socket_service_base::base_implementation_type& impl)
+{
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ if (impl.safe_cancellation_thread_id_ == 0)
+ impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
+ else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
+ impl.safe_cancellation_thread_id_ = ~DWORD(0);
+#else // defined(BOOST_ASIO_ENABLE_CANCELIO)
+ (void)impl;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+}
+
+reactor& win_iocp_socket_service_base::get_reactor()
+{
+ reactor* r = static_cast<reactor*>(
+ interlocked_compare_exchange_pointer(
+ reinterpret_cast<void**>(&reactor_), 0, 0));
+ if (!r)
+ {
+ r = &(use_service<reactor>(io_service_));
+ interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);
+ }
+ return *r;
+}
+
+void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(
+ void** dest, void* exch, void* cmp)
+{
+#if defined(_M_IX86)
+ return reinterpret_cast<void*>(InterlockedCompareExchange(
+ reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
+ reinterpret_cast<LONG>(cmp)));
+#else
+ return InterlockedCompareExchangePointer(dest, exch, cmp);
+#endif
+}
+
+void* win_iocp_socket_service_base::interlocked_exchange_pointer(
+ void** dest, void* val)
+{
+#if defined(_M_IX86)
+ return reinterpret_cast<void*>(InterlockedExchange(
+ reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
+#else
+ return InterlockedExchangePointer(dest, val);
+#endif
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
diff --git a/boost/asio/detail/impl/win_mutex.ipp b/boost/asio/detail/impl/win_mutex.ipp
new file mode 100644
index 0000000000..05a7492178
--- /dev/null
+++ b/boost/asio/detail/impl/win_mutex.ipp
@@ -0,0 +1,80 @@
+//
+// detail/impl/win_mutex.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS)
+
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_mutex.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_mutex::win_mutex()
+{
+ int error = do_init();
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "mutex");
+}
+
+int win_mutex::do_init()
+{
+#if defined(__MINGW32__)
+ // Not sure if MinGW supports structured exception handling, so for now
+ // we'll just call the Windows API and hope.
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ return ::GetLastError();
+# endif
+ return 0;
+#else
+ __try
+ {
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ return ::GetLastError();
+# endif
+ }
+ __except(GetExceptionCode() == STATUS_NO_MEMORY
+ ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
+ {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ return 0;
+#endif
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
diff --git a/boost/asio/detail/impl/win_object_handle_service.ipp b/boost/asio/detail/impl/win_object_handle_service.ipp
new file mode 100644
index 0000000000..d91ccac10d
--- /dev/null
+++ b/boost/asio/detail/impl/win_object_handle_service.ipp
@@ -0,0 +1,446 @@
+//
+// detail/impl/win_object_handle_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_WINDOWS_OBJECT_HANDLE)
+
+#include <boost/asio/detail/win_object_handle_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_object_handle_service::win_object_handle_service(
+ boost::asio::io_service& io_service)
+ : io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+ mutex_(),
+ impl_list_(0),
+ shutdown_(false)
+{
+}
+
+void win_object_handle_service::shutdown_service()
+{
+ mutex::scoped_lock lock(mutex_);
+
+ // Setting this flag to true prevents new objects from being registered, and
+ // new asynchronous wait operations from being started. We only need to worry
+ // about cleaning up the operations that are currently in progress.
+ shutdown_ = true;
+
+ op_queue<operation> ops;
+ for (implementation_type* impl = impl_list_; impl; impl = impl->next_)
+ ops.push(impl->op_queue_);
+
+ lock.unlock();
+
+ io_service_.abandon_operations(ops);
+}
+
+void win_object_handle_service::construct(
+ win_object_handle_service::implementation_type& impl)
+{
+ impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.wait_handle_ = INVALID_HANDLE_VALUE;
+ impl.owner_ = this;
+
+ // Insert implementation into linked list of all implementations.
+ mutex::scoped_lock lock(mutex_);
+ if (!shutdown_)
+ {
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+ }
+}
+
+void win_object_handle_service::move_construct(
+ win_object_handle_service::implementation_type& impl,
+ win_object_handle_service::implementation_type& other_impl)
+{
+ mutex::scoped_lock lock(mutex_);
+
+ // Insert implementation into linked list of all implementations.
+ if (!shutdown_)
+ {
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+ }
+
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.wait_handle_ = other_impl.wait_handle_;
+ other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
+ impl.op_queue_.push(other_impl.op_queue_);
+ impl.owner_ = this;
+
+ // We must not hold the lock while calling UnregisterWaitEx. This is because
+ // the registered callback function might be invoked while we are waiting for
+ // UnregisterWaitEx to complete.
+ lock.unlock();
+
+ if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
+ ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
+
+ if (!impl.op_queue_.empty())
+ register_wait_callback(impl, lock);
+}
+
+void win_object_handle_service::move_assign(
+ win_object_handle_service::implementation_type& impl,
+ win_object_handle_service& other_service,
+ win_object_handle_service::implementation_type& other_impl)
+{
+ boost::system::error_code ignored_ec;
+ close(impl, ignored_ec);
+
+ mutex::scoped_lock lock(mutex_);
+
+ if (this != &other_service)
+ {
+ // Remove implementation from linked list of all implementations.
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+ }
+
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+ impl.wait_handle_ = other_impl.wait_handle_;
+ other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
+ impl.op_queue_.push(other_impl.op_queue_);
+ impl.owner_ = this;
+
+ if (this != &other_service)
+ {
+ // Insert implementation into linked list of all implementations.
+ impl.next_ = other_service.impl_list_;
+ impl.prev_ = 0;
+ if (other_service.impl_list_)
+ other_service.impl_list_->prev_ = &impl;
+ other_service.impl_list_ = &impl;
+ }
+
+ // We must not hold the lock while calling UnregisterWaitEx. This is because
+ // the registered callback function might be invoked while we are waiting for
+ // UnregisterWaitEx to complete.
+ lock.unlock();
+
+ if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
+ ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
+
+ if (!impl.op_queue_.empty())
+ register_wait_callback(impl, lock);
+}
+
+void win_object_handle_service::destroy(
+ win_object_handle_service::implementation_type& impl)
+{
+ mutex::scoped_lock lock(mutex_);
+
+ // Remove implementation from linked list of all implementations.
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "close"));
+
+ HANDLE wait_handle = impl.wait_handle_;
+ impl.wait_handle_ = INVALID_HANDLE_VALUE;
+
+ op_queue<operation> ops;
+ while (wait_op* op = impl.op_queue_.front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ impl.op_queue_.pop();
+ ops.push(op);
+ }
+
+ // We must not hold the lock while calling UnregisterWaitEx. This is
+ // because the registered callback function might be invoked while we are
+ // waiting for UnregisterWaitEx to complete.
+ lock.unlock();
+
+ if (wait_handle != INVALID_HANDLE_VALUE)
+ ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
+
+ ::CloseHandle(impl.handle_);
+ impl.handle_ = INVALID_HANDLE_VALUE;
+
+ io_service_.post_deferred_completions(ops);
+ }
+}
+
+boost::system::error_code win_object_handle_service::assign(
+ win_object_handle_service::implementation_type& impl,
+ const native_handle_type& handle, boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ ec = boost::asio::error::already_open;
+ return ec;
+ }
+
+ impl.handle_ = handle;
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code win_object_handle_service::close(
+ win_object_handle_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "close"));
+
+ mutex::scoped_lock lock(mutex_);
+
+ HANDLE wait_handle = impl.wait_handle_;
+ impl.wait_handle_ = INVALID_HANDLE_VALUE;
+
+ op_queue<operation> completed_ops;
+ while (wait_op* op = impl.op_queue_.front())
+ {
+ impl.op_queue_.pop();
+ op->ec_ = boost::asio::error::operation_aborted;
+ completed_ops.push(op);
+ }
+
+ // We must not hold the lock while calling UnregisterWaitEx. This is
+ // because the registered callback function might be invoked while we are
+ // waiting for UnregisterWaitEx to complete.
+ lock.unlock();
+
+ if (wait_handle != INVALID_HANDLE_VALUE)
+ ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
+
+ if (::CloseHandle(impl.handle_))
+ {
+ impl.handle_ = INVALID_HANDLE_VALUE;
+ ec = boost::system::error_code();
+ }
+ else
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ }
+
+ io_service_.post_deferred_completions(completed_ops);
+ }
+ else
+ {
+ ec = boost::system::error_code();
+ }
+
+ return ec;
+}
+
+boost::system::error_code win_object_handle_service::cancel(
+ win_object_handle_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "cancel"));
+
+ mutex::scoped_lock lock(mutex_);
+
+ HANDLE wait_handle = impl.wait_handle_;
+ impl.wait_handle_ = INVALID_HANDLE_VALUE;
+
+ op_queue<operation> completed_ops;
+ while (wait_op* op = impl.op_queue_.front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ impl.op_queue_.pop();
+ completed_ops.push(op);
+ }
+
+ // We must not hold the lock while calling UnregisterWaitEx. This is
+ // because the registered callback function might be invoked while we are
+ // waiting for UnregisterWaitEx to complete.
+ lock.unlock();
+
+ if (wait_handle != INVALID_HANDLE_VALUE)
+ ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
+
+ ec = boost::system::error_code();
+
+ io_service_.post_deferred_completions(completed_ops);
+ }
+ else
+ {
+ ec = boost::asio::error::bad_descriptor;
+ }
+
+ return ec;
+}
+
+void win_object_handle_service::wait(
+ win_object_handle_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ switch (::WaitForSingleObject(impl.handle_, INFINITE))
+ {
+ case WAIT_FAILED:
+ {
+ DWORD last_error = ::GetLastError();
+ ec = boost::system::error_code(last_error,
+ boost::asio::error::get_system_category());
+ break;
+ }
+ case WAIT_OBJECT_0:
+ case WAIT_ABANDONED:
+ default:
+ ec = boost::system::error_code();
+ break;
+ }
+}
+
+void win_object_handle_service::start_wait_op(
+ win_object_handle_service::implementation_type& impl, wait_op* op)
+{
+ io_service_.work_started();
+
+ if (is_open(impl))
+ {
+ mutex::scoped_lock lock(mutex_);
+
+ if (!shutdown_)
+ {
+ impl.op_queue_.push(op);
+
+ // Only the first operation to be queued gets to register a wait callback.
+ // Subsequent operations have to wait for the first to finish.
+ if (impl.op_queue_.front() == op)
+ register_wait_callback(impl, lock);
+ }
+ else
+ {
+ lock.unlock();
+ io_service_.post_deferred_completion(op);
+ }
+ }
+ else
+ {
+ op->ec_ = boost::asio::error::bad_descriptor;
+ io_service_.post_deferred_completion(op);
+ }
+}
+
+void win_object_handle_service::register_wait_callback(
+ win_object_handle_service::implementation_type& impl,
+ mutex::scoped_lock& lock)
+{
+ lock.lock();
+
+ if (!RegisterWaitForSingleObject(&impl.wait_handle_,
+ impl.handle_, &win_object_handle_service::wait_callback,
+ &impl, INFINITE, WT_EXECUTEONLYONCE))
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+
+ op_queue<operation> completed_ops;
+ while (wait_op* op = impl.op_queue_.front())
+ {
+ op->ec_ = ec;
+ impl.op_queue_.pop();
+ completed_ops.push(op);
+ }
+
+ lock.unlock();
+ io_service_.post_deferred_completions(completed_ops);
+ }
+}
+
+void win_object_handle_service::wait_callback(PVOID param, BOOLEAN)
+{
+ implementation_type* impl = static_cast<implementation_type*>(param);
+ mutex::scoped_lock lock(impl->owner_->mutex_);
+
+ if (impl->wait_handle_ != INVALID_HANDLE_VALUE)
+ {
+ ::UnregisterWaitEx(impl->wait_handle_, NULL);
+ impl->wait_handle_ = INVALID_HANDLE_VALUE;
+ }
+
+ if (wait_op* op = impl->op_queue_.front())
+ {
+ op_queue<operation> completed_ops;
+
+ op->ec_ = boost::system::error_code();
+ impl->op_queue_.pop();
+ completed_ops.push(op);
+
+ if (!impl->op_queue_.empty())
+ {
+ if (!RegisterWaitForSingleObject(&impl->wait_handle_,
+ impl->handle_, &win_object_handle_service::wait_callback,
+ param, INFINITE, WT_EXECUTEONLYONCE))
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+
+ while (wait_op* op = impl->op_queue_.front())
+ {
+ op->ec_ = ec;
+ impl->op_queue_.pop();
+ completed_ops.push(op);
+ }
+ }
+ }
+
+ lock.unlock();
+ impl->owner_->io_service_.post_deferred_completions(completed_ops);
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_HAS_WINDOWS_OBJECT_HANDLE)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
diff --git a/boost/asio/detail/impl/win_static_mutex.ipp b/boost/asio/detail/impl/win_static_mutex.ipp
new file mode 100644
index 0000000000..3ec104da2b
--- /dev/null
+++ b/boost/asio/detail/impl/win_static_mutex.ipp
@@ -0,0 +1,120 @@
+//
+// detail/impl/win_static_mutex.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS)
+
+#include <cstdio>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_static_mutex.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void win_static_mutex::init()
+{
+ int error = do_init();
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "static_mutex");
+}
+
+int win_static_mutex::do_init()
+{
+ using namespace std; // For sprintf.
+ wchar_t mutex_name[128];
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ swprintf_s(mutex_name, 128,
+#else // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ swprintf(mutex_name,
+#endif // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p",
+ static_cast<unsigned int>(::GetCurrentProcessId()), this);
+
+ HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name);
+ DWORD last_error = ::GetLastError();
+ if (mutex == 0)
+ return ::GetLastError();
+
+ if (last_error == ERROR_ALREADY_EXISTS)
+ ::WaitForSingleObject(mutex, INFINITE);
+
+ if (initialised_)
+ {
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return 0;
+ }
+
+#if defined(__MINGW32__)
+ // Not sure if MinGW supports structured exception handling, so for now
+ // we'll just call the Windows API and hope.
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ {
+ last_error = ::GetLastError();
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return last_error;
+ }
+# endif
+#else
+ __try
+ {
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ {
+ last_error = ::GetLastError();
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return last_error;
+ }
+# endif
+ }
+ __except(GetExceptionCode() == STATUS_NO_MEMORY
+ ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
+ {
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return ERROR_OUTOFMEMORY;
+ }
+#endif
+
+ initialised_ = true;
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return 0;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
diff --git a/boost/asio/detail/impl/win_thread.ipp b/boost/asio/detail/impl/win_thread.ipp
new file mode 100644
index 0000000000..744990d73e
--- /dev/null
+++ b/boost/asio/detail/impl/win_thread.ipp
@@ -0,0 +1,141 @@
+//
+// detail/impl/win_thread.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_THREAD_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_THREAD_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS) && !defined(UNDER_CE)
+
+#include <process.h>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_thread.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+win_thread::~win_thread()
+{
+ ::CloseHandle(thread_);
+
+ // The exit_event_ handle is deliberately allowed to leak here since it
+ // is an error for the owner of an internal thread not to join() it.
+}
+
+void win_thread::join()
+{
+ HANDLE handles[2] = { exit_event_, thread_ };
+ ::WaitForMultipleObjects(2, handles, FALSE, INFINITE);
+ ::CloseHandle(exit_event_);
+ if (terminate_threads())
+ {
+ ::TerminateThread(thread_, 0);
+ }
+ else
+ {
+ ::QueueUserAPC(apc_function, thread_, 0);
+ ::WaitForSingleObject(thread_, INFINITE);
+ }
+}
+
+void win_thread::start_thread(func_base* arg, unsigned int stack_size)
+{
+ ::HANDLE entry_event = 0;
+ arg->entry_event_ = entry_event = ::CreateEvent(0, true, false, 0);
+ if (!entry_event)
+ {
+ DWORD last_error = ::GetLastError();
+ delete arg;
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "thread.entry_event");
+ }
+
+ arg->exit_event_ = exit_event_ = ::CreateEvent(0, true, false, 0);
+ if (!exit_event_)
+ {
+ DWORD last_error = ::GetLastError();
+ delete arg;
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "thread.exit_event");
+ }
+
+ unsigned int thread_id = 0;
+ thread_ = reinterpret_cast<HANDLE>(::_beginthreadex(0,
+ stack_size, win_thread_function, arg, 0, &thread_id));
+ if (!thread_)
+ {
+ DWORD last_error = ::GetLastError();
+ delete arg;
+ if (entry_event)
+ ::CloseHandle(entry_event);
+ if (exit_event_)
+ ::CloseHandle(exit_event_);
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "thread");
+ }
+
+ if (entry_event)
+ {
+ ::WaitForSingleObject(entry_event, INFINITE);
+ ::CloseHandle(entry_event);
+ }
+}
+
+unsigned int __stdcall win_thread_function(void* arg)
+{
+ win_thread::auto_func_base_ptr func = {
+ static_cast<win_thread::func_base*>(arg) };
+
+ ::SetEvent(func.ptr->entry_event_);
+
+ func.ptr->run();
+
+ // Signal that the thread has finished its work, but rather than returning go
+ // to sleep to put the thread into a well known state. If the thread is being
+ // joined during global object destruction then it may be killed using
+ // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx
+ // call will be interrupted using QueueUserAPC and the thread will shut down
+ // cleanly.
+ HANDLE exit_event = func.ptr->exit_event_;
+ delete func.ptr;
+ func.ptr = 0;
+ ::SetEvent(exit_event);
+ ::SleepEx(INFINITE, TRUE);
+
+ return 0;
+}
+
+#if defined(WINVER) && (WINVER < 0x0500)
+void __stdcall apc_function(ULONG) {}
+#else
+void __stdcall apc_function(ULONG_PTR) {}
+#endif
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS) && !defined(UNDER_CE)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_THREAD_IPP
diff --git a/boost/asio/detail/impl/win_tss_ptr.ipp b/boost/asio/detail/impl/win_tss_ptr.ipp
new file mode 100644
index 0000000000..9da761c46c
--- /dev/null
+++ b/boost/asio/detail/impl/win_tss_ptr.ipp
@@ -0,0 +1,59 @@
+//
+// detail/impl/win_tss_ptr.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS)
+
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_tss_ptr.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+DWORD win_tss_ptr_create()
+{
+#if defined(UNDER_CE)
+ enum { out_of_indexes = 0xFFFFFFFF };
+#else
+ enum { out_of_indexes = TLS_OUT_OF_INDEXES };
+#endif
+
+ DWORD tss_key = ::TlsAlloc();
+ if (tss_key == out_of_indexes)
+ {
+ DWORD last_error = ::GetLastError();
+ boost::system::error_code ec(last_error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "tss");
+ }
+ return tss_key;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
diff --git a/boost/asio/detail/impl/winsock_init.ipp b/boost/asio/detail/impl/winsock_init.ipp
new file mode 100644
index 0000000000..8916934134
--- /dev/null
+++ b/boost/asio/detail/impl/winsock_init.ipp
@@ -0,0 +1,71 @@
+//
+// detail/impl/winsock_init.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#include <boost/asio/detail/socket_types.hpp>
+#include <boost/asio/detail/winsock_init.hpp>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void winsock_init_base::startup(data& d,
+ unsigned char major, unsigned char minor)
+{
+ if (::InterlockedIncrement(&d.init_count_) == 1)
+ {
+ WSADATA wsa_data;
+ long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data);
+ ::InterlockedExchange(&d.result_, result);
+ }
+}
+
+void winsock_init_base::cleanup(data& d)
+{
+ if (::InterlockedDecrement(&d.init_count_) == 0)
+ {
+ ::WSACleanup();
+ }
+}
+
+void winsock_init_base::throw_on_error(data& d)
+{
+ long result = ::InterlockedExchangeAdd(&d.result_, 0);
+ if (result != 0)
+ {
+ boost::system::error_code ec(result,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "winsock");
+ }
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP