1// <experimental/io_service> -*- C++ -*-
2
3// Copyright (C) 2015-2019 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library.  This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file experimental/io_service
26 *  This is a TS C++ Library header.
27 */
28
29#ifndef _GLIBCXX_EXPERIMENTAL_IO_SERVICE
30#define _GLIBCXX_EXPERIMENTAL_IO_SERVICE 1
31
32#pragma GCC system_header
33
34#if __cplusplus >= 201402L
35
36#include <atomic>
37#include <chrono>
38#include <forward_list>
39#include <functional>
40#include <system_error>
41#include <thread>
42#include <experimental/netfwd>
43#include <experimental/executor>
44#if _GLIBCXX_HAVE_UNISTD_H
45# include <unistd.h>
46#endif
47#ifdef _GLIBCXX_HAVE_POLL_H
48# include <poll.h>
49#endif
50#ifdef _GLIBCXX_HAVE_FCNTL_H
51# include <fcntl.h>
52#endif
53
54namespace std _GLIBCXX_VISIBILITY(default)
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57namespace experimental
58{
59namespace net
60{
61inline namespace v1
62{
63
64  /**
65   * @ingroup networking
66   * @{
67   */
68
69  class __socket_impl;
70
71  /// An ExecutionContext for I/O operations.
72  class io_context : public execution_context
73  {
74  public:
75    // types:
76
77    /// An executor for an io_context.
78    class executor_type
79    {
80    public:
81      // construct / copy / destroy:
82
83      executor_type(const executor_type& __other) noexcept = default;
84      executor_type(executor_type&& __other) noexcept = default;
85
86      executor_type& operator=(const executor_type& __other) noexcept = default;
87      executor_type& operator=(executor_type&& __other) noexcept = default;
88
89      // executor operations:
90
91      bool running_in_this_thread() const noexcept
92      {
93	lock_guard<mutex> __lock(_M_ctx->_M_mtx);
94	auto __end = _M_ctx->_M_call_stack.end();
95	return std::find(_M_ctx->_M_call_stack.begin(), __end,
96			 this_thread::get_id()) != __end;
97      }
98
99      io_context& context() const noexcept { return *_M_ctx; }
100
101      void on_work_started() const noexcept { ++_M_ctx->_M_work_count; }
102      void on_work_finished() const noexcept { --_M_ctx->_M_work_count; }
103
104      template<typename _Func, typename _ProtoAllocator>
105	void
106	dispatch(_Func&& __f, const _ProtoAllocator& __a) const
107	{
108	  if (running_in_this_thread())
109	    decay_t<_Func>{std::forward<_Func>(__f)}();
110	  else
111	    post(std::forward<_Func>(__f), __a);
112	}
113
114      template<typename _Func, typename _ProtoAllocator>
115	void
116	post(_Func&& __f, const _ProtoAllocator& __a) const
117	{
118	  lock_guard<mutex> __lock(_M_ctx->_M_mtx);
119	  // TODO (re-use functionality in system_context)
120	  _M_ctx->_M_reactor._M_notify();
121	}
122
123      template<typename _Func, typename _ProtoAllocator>
124	void
125	defer(_Func&& __f, const _ProtoAllocator& __a) const
126	{ post(std::forward<_Func>(__f), __a); }
127
128    private:
129      friend io_context;
130
131      explicit
132      executor_type(io_context& __ctx) : _M_ctx(std::addressof(__ctx)) { }
133
134      io_context* _M_ctx;
135    };
136
137    using count_type =  size_t;
138
139    // construct / copy / destroy:
140
141    io_context() : _M_work_count(0) { }
142
143    explicit
144    io_context(int __concurrency_hint) : _M_work_count(0) { }
145
146    io_context(const io_context&) = delete;
147    io_context& operator=(const io_context&) = delete;
148
149    // io_context operations:
150
151    executor_type get_executor() noexcept { return executor_type(*this); }
152
153    count_type
154    run()
155    {
156      count_type __n = 0;
157      while (run_one())
158	if (__n != numeric_limits<count_type>::max())
159	  ++__n;
160      return __n;
161    }
162
163    template<typename _Rep, typename _Period>
164      count_type
165      run_for(const chrono::duration<_Rep, _Period>& __rel_time)
166      { return run_until(chrono::steady_clock::now() + __rel_time); }
167
168    template<typename _Clock, typename _Duration>
169      count_type
170      run_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
171      {
172	count_type __n = 0;
173	while (run_one_until(__abs_time))
174	  if (__n != numeric_limits<count_type>::max())
175	    ++__n;
176	return __n;
177      }
178
179    count_type
180    run_one()
181    { return _M_do_one(chrono::milliseconds{-1}); }
182
183    template<typename _Rep, typename _Period>
184      count_type
185      run_one_for(const chrono::duration<_Rep, _Period>& __rel_time)
186      { return run_one_until(chrono::steady_clock::now() + __rel_time); }
187
188    template<typename _Clock, typename _Duration>
189      count_type
190      run_one_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
191      {
192	auto __now = _Clock::now();
193	while (__now < __abs_time)
194	  {
195	    using namespace std::chrono;
196	    auto __ms = duration_cast<milliseconds>(__abs_time - __now);
197	    if (_M_do_one(__ms))
198	      return 1;
199	    __now = _Clock::now();
200	  }
201	return 0;
202      }
203
204    count_type
205    poll()
206    {
207      count_type __n = 0;
208      while (poll_one())
209	if (__n != numeric_limits<count_type>::max())
210	  ++__n;
211      return __n;
212    }
213
214    count_type
215    poll_one()
216    { return _M_do_one(chrono::milliseconds{0}); }
217
218    void stop()
219    {
220      lock_guard<mutex> __lock(_M_mtx);
221      _M_stopped = true;
222      _M_reactor._M_notify();
223    }
224
225    bool stopped() const noexcept
226    {
227      lock_guard<mutex> __lock(_M_mtx);
228      return _M_stopped;
229    }
230
231    void restart()
232    {
233      _M_stopped = false;
234    }
235
236  private:
237
238    template<typename _Clock, typename _WaitTraits>
239      friend class basic_waitable_timer;
240
241    friend __socket_impl;
242
243    template<typename _Protocol>
244      friend class __basic_socket_impl;
245
246    template<typename _Protocol>
247      friend class basic_socket;
248
249    template<typename _Protocol>
250      friend class basic_datagram_socket;
251
252    template<typename _Protocol>
253      friend class basic_stream_socket;
254
255    template<typename _Protocol>
256      friend class basic_socket_acceptor;
257
258    count_type
259    _M_outstanding_work() const
260    { return _M_work_count + !_M_ops.empty(); }
261
262    struct __timer_queue_base : execution_context::service
263    {
264      // return milliseconds until next timer expires, or milliseconds::max()
265      virtual chrono::milliseconds _M_next() const = 0;
266      virtual bool run_one() = 0;
267
268    protected:
269      explicit
270      __timer_queue_base(execution_context& __ctx) : service(__ctx)
271      {
272	auto& __ioc = static_cast<io_context&>(__ctx);
273	lock_guard<mutex> __lock(__ioc._M_mtx);
274	__ioc._M_timers.push_back(this);
275      }
276
277      mutable mutex	_M_qmtx;
278    };
279
280    template<typename _Timer, typename _Key = typename _Timer::_Key>
281      struct __timer_queue : __timer_queue_base
282      {
283	using key_type = __timer_queue;
284
285	explicit
286	__timer_queue(execution_context& __ctx) : __timer_queue_base(__ctx)
287	{ }
288
289	void shutdown() noexcept { }
290
291	io_context& context() noexcept
292	{ return static_cast<io_context&>(service::context()); }
293
294	// Start an asynchronous wait.
295	void
296	push(const _Timer& __t, function<void(error_code)> __h)
297	{
298	  context().get_executor().on_work_started();
299	  lock_guard<mutex> __lock(_M_qmtx);
300	  _M_queue.emplace(__t, _M_next_id++, std::move(__h));
301	  // no need to notify reactor unless this timer went to the front?
302	}
303
304	// Cancel all outstanding waits for __t
305	size_t
306	cancel(const _Timer& __t)
307	{
308	  lock_guard<mutex> __lock(_M_qmtx);
309	  size_t __count = 0;
310	  auto __last = _M_queue.end();
311	  for (auto __it = _M_queue.begin(), __end = __last; __it != __end;
312	      ++__it)
313	    {
314	      if (__it->_M_key == __t._M_key.get())
315		{
316		  __it->cancel();
317		  __last = __it;
318		  ++__count;
319		}
320	    }
321	  if (__count)
322	    _M_queue._M_sort_to(__last);
323	  return __count;
324	}
325
326	// Cancel oldest outstanding wait for __t
327	bool
328	cancel_one(const _Timer& __t)
329	{
330	  lock_guard<mutex> __lock(_M_qmtx);
331	  const auto __end = _M_queue.end();
332	  auto __oldest = __end;
333	  for (auto __it = _M_queue.begin(); __it != __end; ++__it)
334	    if (__it->_M_key == __t._M_key.get())
335	      if (__oldest == __end || __it->_M_id < __oldest->_M_id)
336		__oldest = __it;
337	  if (__oldest == __end)
338	    return false;
339	  __oldest->cancel();
340	  _M_queue._M_sort_to(__oldest);
341	  return true;
342	}
343
344	chrono::milliseconds
345	_M_next() const override
346	{
347	  typename _Timer::time_point __exp;
348	  {
349	    lock_guard<mutex> __lock(_M_qmtx);
350	    if (_M_queue.empty())
351	      return chrono::milliseconds::max();  // no pending timers
352	    if (_M_queue.top()._M_key == nullptr)
353	      return chrono::milliseconds::zero(); // cancelled, run now
354	    __exp = _M_queue.top()._M_expiry;
355	  }
356	  auto __dur = _Timer::traits_type::to_wait_duration(__exp);
357	  if (__dur < __dur.zero())
358	    __dur = __dur.zero();
359	  return chrono::duration_cast<chrono::milliseconds>(__dur);
360	}
361
362      private:
363
364	bool run_one() override
365	{
366	  auto __now = _Timer::clock_type::now();
367	  function<void(error_code)> __h;
368	  error_code __ec;
369	  {
370	    lock_guard<mutex> __lock(_M_qmtx);
371
372	    if (_M_queue.top()._M_key == nullptr) // cancelled
373	      {
374		__h = std::move(_M_queue.top()._M_h);
375		__ec = std::make_error_code(errc::operation_canceled);
376		_M_queue.pop();
377	      }
378	    else if (_M_queue.top()._M_expiry <= _Timer::clock_type::now())
379	      {
380		__h = std::move(_M_queue.top()._M_h);
381		_M_queue.pop();
382	      }
383	  }
384	  if (__h)
385	    {
386	      __h(__ec);
387	      context().get_executor().on_work_finished();
388	      return true;
389	    }
390	  return false;
391	}
392
393	using __timer_id_type = uint64_t;
394
395	struct __pending_timer
396	{
397	  __pending_timer(const _Timer& __t, uint64_t __id,
398			  function<void(error_code)> __h)
399	  : _M_expiry(__t.expiry()), _M_key(__t._M_key.get()), _M_id(__id),
400	    _M_h(std::move(__h))
401	  { }
402
403	  typename _Timer::time_point _M_expiry;
404	  _Key* _M_key;
405	  __timer_id_type _M_id;
406	  function<void(error_code)> _M_h;
407
408	  void cancel() { _M_expiry = _M_expiry.min(); _M_key = nullptr; }
409
410	  bool
411	  operator<(const __pending_timer& __rhs) const
412	  { return _M_expiry < __rhs._M_expiry; }
413	};
414
415	struct __queue : priority_queue<__pending_timer>
416	{
417	  using iterator =
418	    typename priority_queue<__pending_timer>::container_type::iterator;
419
420	  // expose begin/end/erase for direct access to underlying container
421	  iterator begin() { return this->c.begin(); }
422	  iterator end() { return this->c.end(); }
423	  iterator erase(iterator __it) { return this->c.erase(__it); }
424
425	  void
426	  _M_sort_to(iterator __it)
427	  { std::stable_sort(this->c.begin(), ++__it); }
428	};
429
430	__queue	_M_queue;
431	__timer_id_type _M_next_id = 0;
432      };
433
434    template<typename _Timer, typename _CompletionHandler>
435      void
436      async_wait(const _Timer& __timer, _CompletionHandler&& __h)
437      {
438	auto& __queue = use_service<__timer_queue<_Timer>>(*this);
439	__queue.push(__timer, std::move(__h));
440	_M_reactor._M_notify();
441      }
442
443    // Cancel all wait operations initiated by __timer.
444    template<typename _Timer>
445      size_t
446      cancel(const _Timer& __timer)
447      {
448	if (!has_service<__timer_queue<_Timer>>(*this))
449	  return 0;
450
451	auto __c = use_service<__timer_queue<_Timer>>(*this).cancel(__timer);
452	if (__c != 0)
453	  _M_reactor._M_notify();
454	return __c;
455      }
456
457    // Cancel the oldest wait operation initiated by __timer.
458    template<typename _Timer>
459      size_t
460      cancel_one(const _Timer& __timer)
461      {
462	if (!has_service<__timer_queue<_Timer>>(*this))
463	  return 0;
464
465	if (use_service<__timer_queue<_Timer>>(*this).cancel_one(__timer))
466	  {
467	    _M_reactor._M_notify();
468	    return 1;
469	  }
470	return 0;
471      }
472
473    template<typename _Op>
474      void
475      async_wait(int __fd, int __w, _Op&& __op)
476      {
477	lock_guard<mutex> __lock(_M_mtx);
478	// TODO need push_back, use std::list not std::forward_list
479	auto __tail = _M_ops.before_begin(), __it = _M_ops.begin();
480	while (__it != _M_ops.end())
481	  {
482	    ++__it;
483	    ++__tail;
484	  }
485	using __type = __async_operation_impl<_Op>;
486	_M_ops.emplace_after(__tail,
487			     make_unique<__type>(std::move(__op), __fd, __w));
488	_M_reactor._M_fd_interest(__fd, __w);
489      }
490
491    void _M_add_fd(int __fd) { _M_reactor._M_add_fd(__fd); }
492    void _M_remove_fd(int __fd) { _M_reactor._M_remove_fd(__fd); }
493
494    void cancel(int __fd, error_code&)
495    {
496      lock_guard<mutex> __lock(_M_mtx);
497      const auto __end = _M_ops.end();
498      auto __it = _M_ops.begin();
499      auto __prev = _M_ops.before_begin();
500      while (__it != __end && (*__it)->_M_is_cancelled())
501	{
502	  ++__it;
503	  ++__prev;
504	}
505      auto __cancelled = __prev;
506      while (__it != __end)
507	{
508	  if ((*__it)->_M_fd == __fd)
509	    {
510	      (*__it)->cancel();
511	      ++__it;
512	      _M_ops.splice_after(__cancelled, _M_ops, __prev);
513	      ++__cancelled;
514	    }
515	  else
516	    {
517	      ++__it;
518	      ++__prev;
519	    }
520	}
521      _M_reactor._M_not_interested(__fd);
522    }
523
524    struct __async_operation
525    {
526      __async_operation(int __fd, int __ev) : _M_fd(__fd), _M_ev(__ev) { }
527
528      virtual ~__async_operation() = default;
529
530      int _M_fd;
531      short _M_ev;
532
533      void cancel() { _M_fd = -1; }
534      bool _M_is_cancelled() const { return _M_fd == -1; }
535      virtual void run(io_context&) = 0;
536    };
537
538    template<typename _Op>
539      struct __async_operation_impl : __async_operation
540      {
541	__async_operation_impl(_Op&& __op, int __fd, int __ev)
542	: __async_operation{__fd, __ev}, _M_op(std::move(__op)) { }
543
544	_Op _M_op;
545
546	void run(io_context& __ctx)
547	{
548	  if (_M_is_cancelled())
549	    _M_op(std::make_error_code(errc::operation_canceled));
550	  else
551	    _M_op(error_code{});
552	}
553      };
554
555    atomic<count_type>		_M_work_count;
556    mutable mutex		_M_mtx;
557    queue<function<void()>>	_M_op;
558    bool			_M_stopped = false;
559
560    struct __monitor
561    {
562      __monitor(io_context& __c) : _M_ctx(__c)
563      {
564	lock_guard<mutex> __lock(_M_ctx._M_mtx);
565	_M_ctx._M_call_stack.push_back(this_thread::get_id());
566      }
567
568      ~__monitor()
569      {
570	lock_guard<mutex> __lock(_M_ctx._M_mtx);
571	_M_ctx._M_call_stack.pop_back();
572	if (_M_ctx._M_outstanding_work() == 0)
573	  {
574	    _M_ctx._M_stopped = true;
575	    _M_ctx._M_reactor._M_notify();
576	  }
577      }
578
579      __monitor(__monitor&&) = delete;
580
581      io_context& _M_ctx;
582    };
583
584    bool
585    _M_do_one(chrono::milliseconds __timeout)
586    {
587      const bool __block = __timeout != chrono::milliseconds::zero();
588
589      __reactor::__fdvec __fds;
590
591      __monitor __mon{*this};
592
593      __timer_queue_base* __timerq = nullptr;
594      unique_ptr<__async_operation> __async_op;
595
596      while (true)
597	{
598	  if (__timerq)
599	    {
600	      if (__timerq->run_one())
601		return true;
602	      else
603		__timerq = nullptr;
604	    }
605
606	  if (__async_op)
607	    {
608	      __async_op->run(*this);
609	      // TODO need to unregister __async_op
610	      return true;
611	    }
612
613	  chrono::milliseconds __ms{0};
614
615	  {
616	    lock_guard<mutex> __lock(_M_mtx);
617
618	    if (_M_stopped)
619	      return false;
620
621	    // find first timer with something to do
622	    for (auto __q : _M_timers)
623	      {
624		auto __next = __q->_M_next();
625		if (__next == __next.zero())  // ready to run immediately
626		  {
627		    __timerq = __q;
628		    __ms = __next;
629		    break;
630		  }
631		else if (__next != __next.max() && __block
632		    && (__next < __ms || __timerq == nullptr))
633		  {
634		    __timerq = __q;
635		    __ms = __next;
636		  }
637	      }
638
639	    if (__timerq && __ms == __ms.zero())
640	      continue;  // restart loop to run a timer immediately
641
642	    if (!_M_ops.empty() && _M_ops.front()->_M_is_cancelled())
643	      {
644		_M_ops.front().swap(__async_op);
645		_M_ops.pop_front();
646		continue;
647	      }
648
649	    // TODO run any posted items
650
651	    if (__block)
652	      {
653		if (__timerq == nullptr)
654		  __ms = __timeout;
655		else if (__ms.zero() <= __timeout && __timeout < __ms)
656		  __ms = __timeout;
657		else if (__ms.count() > numeric_limits<int>::max())
658		  __ms = chrono::milliseconds{numeric_limits<int>::max()};
659	      }
660	    // else __ms == 0 and poll() will return immediately
661
662	  }
663
664	  auto __res = _M_reactor.wait(__fds, __ms);
665
666	  if (__res == __reactor::_S_retry)
667	    continue;
668
669	  if (__res == __reactor::_S_timeout)
670	    {
671	      if (__timerq == nullptr)
672		return false;
673	      else
674		continue;  // timed out, so restart loop and process the timer
675	    }
676
677	  __timerq = nullptr;
678
679	  if (__fds.empty()) // nothing to do
680	    return false;
681
682	  lock_guard<mutex> __lock(_M_mtx);
683	  for (auto __it = _M_ops.begin(), __end = _M_ops.end(),
684	      __prev = _M_ops.before_begin(); __it != __end; ++__it, ++__prev)
685	    {
686	      auto& __op = **__it;
687	      auto __pos = std::lower_bound(__fds.begin(), __fds.end(),
688		  __op._M_fd,
689		  [](const auto& __p, int __fd) { return __p.fd < __fd; });
690	      if (__pos != __fds.end() && __pos->fd == __op._M_fd
691		  && __pos->revents & __op._M_ev)
692		{
693		  __it->swap(__async_op);
694		  _M_ops.erase_after(__prev);
695		  break;  // restart loop and run op
696		}
697	    }
698	}
699    }
700
701    struct __reactor
702    {
703      __reactor() : _M_fds(1)
704      {
705	int __pipe[2];
706	if (::pipe(__pipe) == -1)
707	  __throw_system_error(errno);
708	if (::fcntl(__pipe[0], F_SETFL, O_NONBLOCK) == -1
709	    || ::fcntl(__pipe[1], F_SETFL, O_NONBLOCK) == -1)
710	  {
711	    int __e = errno;
712	    ::close(__pipe[0]);
713	    ::close(__pipe[1]);
714	    __throw_system_error(__e);
715	  }
716	_M_fds.back().events	= POLLIN;
717	_M_fds.back().fd	= __pipe[0];
718	_M_notify_wr		= __pipe[1];
719      }
720
721      ~__reactor()
722      {
723	::close(_M_fds.back().fd);
724	::close(_M_notify_wr);
725      }
726
727      // write a notification byte to the pipe (ignoring errors)
728      void _M_notify()
729      {
730	int __n;
731	do {
732	  __n = ::write(_M_notify_wr, "", 1);
733	} while (__n == -1 && errno == EINTR);
734      }
735
736      // read all notification bytes from the pipe
737      void _M_on_notify()
738      {
739	// Drain the pipe.
740	char __buf[64];
741	ssize_t __n;
742	do {
743	  __n = ::read(_M_fds.back().fd, __buf, sizeof(__buf));
744	} while (__n != -1 || errno == EINTR);
745      }
746
747      void
748      _M_add_fd(int __fd)
749      {
750	auto __pos = _M_lower_bound(__fd);
751	if (__pos->fd == __fd)
752	  __throw_system_error((int)errc::invalid_argument);
753	_M_fds.insert(__pos, __fdvec::value_type{})->fd = __fd;
754	_M_notify();
755      }
756
757      void
758      _M_remove_fd(int __fd)
759      {
760	auto __pos = _M_lower_bound(__fd);
761	if (__pos->fd == __fd)
762	  _M_fds.erase(__pos);
763	// else bug!
764	_M_notify();
765      }
766
767      void
768      _M_fd_interest(int __fd, int __w)
769      {
770	auto __pos = _M_lower_bound(__fd);
771	if (__pos->fd == __fd)
772	  __pos->events |= __w;
773	// else bug!
774	_M_notify();
775      }
776
777      void
778      _M_not_interested(int __fd)
779      {
780	auto __pos = _M_lower_bound(__fd);
781	if (__pos->fd == __fd)
782	  __pos->events = 0;
783	_M_notify();
784      }
785
786# ifdef _GLIBCXX_HAVE_POLL_H
787      using __fdvec = vector<::pollfd>;
788
789      // Find first element p such that !(p.fd < __fd)
790      // N.B. always returns a dereferencable iterator.
791      __fdvec::iterator
792      _M_lower_bound(int __fd)
793      {
794	return std::lower_bound(_M_fds.begin(), _M_fds.end() - 1,
795	    __fd, [](const auto& __p, int __fd) { return __p.fd < __fd; });
796      }
797
798      enum __status { _S_retry, _S_timeout, _S_ok, _S_error };
799
800      __status
801      wait(__fdvec& __fds, chrono::milliseconds __timeout)
802      {
803	// XXX not thread-safe!
804	__fds = _M_fds;  // take snapshot to pass to poll()
805
806	int __res = ::poll(__fds.data(), __fds.size(), __timeout.count());
807
808	if (__res == -1)
809	  {
810	    __fds.clear();
811	    if (errno == EINTR)
812	      return _S_retry;
813	    return _S_error; // XXX ???
814	  }
815	else if (__res == 0)
816	  {
817	    __fds.clear();
818	    return _S_timeout;
819	  }
820	else if (__fds.back().revents != 0) // something changed, restart
821	  {
822	    __fds.clear();
823	    _M_on_notify();
824	    return _S_retry;
825	  }
826
827	auto __part = std::stable_partition(__fds.begin(), __fds.end() - 1,
828	      [](const __fdvec::value_type& __p) { return __p.revents != 0; });
829	__fds.erase(__part, __fds.end());
830
831	return _S_ok;
832      }
833
834      __fdvec _M_fds;	// _M_fds.back() is the read end of the self-pipe
835#endif
836      int _M_notify_wr;	// write end of the self-pipe
837    };
838
839    __reactor _M_reactor;
840
841    vector<__timer_queue_base*>			_M_timers;
842    forward_list<unique_ptr<__async_operation>>	_M_ops;
843
844    vector<thread::id>	_M_call_stack;
845  };
846
847  inline bool
848  operator==(const io_context::executor_type& __a,
849	     const io_context::executor_type& __b) noexcept
850  {
851    // https://github.com/chriskohlhoff/asio-tr2/issues/201
852    using executor_type = io_context::executor_type;
853    return std::addressof(executor_type(__a).context())
854      == std::addressof(executor_type(__b).context());
855  }
856
857  inline bool
858  operator!=(const io_context::executor_type& __a,
859	     const io_context::executor_type& __b) noexcept
860  { return !(__a == __b); }
861
862  template<> struct is_executor<io_context::executor_type> : true_type {};
863
864  /// @}
865
866} // namespace v1
867} // namespace net
868} // namespace experimental
869_GLIBCXX_END_NAMESPACE_VERSION
870} // namespace std
871
872#endif // C++14
873
874#endif // _GLIBCXX_EXPERIMENTAL_IO_SERVICE
875