1// <mutex> -*- C++ -*-
2
3// Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013
4// Free Software Foundation, Inc.
5//
6// This file is part of the GNU ISO C++ Library.  This library is free
7// software; you can redistribute it and/or modify it under the
8// terms of the GNU General Public License as published by the
9// Free Software Foundation; either version 3, or (at your option)
10// any later version.
11
12// This library is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15// GNU General Public License for more details.
16
17// Under Section 7 of GPL version 3, you are granted additional
18// permissions described in the GCC Runtime Library Exception, version
19// 3.1, as published by the Free Software Foundation.
20
21// You should have received a copy of the GNU General Public License and
22// a copy of the GCC Runtime Library Exception along with this program;
23// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24// <http://www.gnu.org/licenses/>.
25
26/** @file include/mutex
27 *  This is a Standard C++ Library header.
28 */
29
30#ifndef _GLIBCXX_MUTEX
31#define _GLIBCXX_MUTEX 1
32
33#pragma GCC system_header
34
35#ifndef __GXX_EXPERIMENTAL_CXX0X__
36# include <bits/c++0x_warning.h>
37#else
38
39#include <tuple>
40#include <chrono>
41#include <exception>
42#include <type_traits>
43#include <functional>
44#include <system_error>
45#include <bits/functexcept.h>
46#include <bits/gthr.h>
47#include <bits/move.h> // for std::swap
48
49#ifdef _GLIBCXX_USE_C99_STDINT_TR1
50
51namespace std _GLIBCXX_VISIBILITY(default)
52{
53_GLIBCXX_BEGIN_NAMESPACE_VERSION
54
55#ifdef _GLIBCXX_HAS_GTHREADS
56  // Common base class for std::mutex and std::timed_mutex
57  class __mutex_base
58  {
59  protected:
60    typedef __gthread_mutex_t			__native_type;
61
62#ifdef __GTHREAD_MUTEX_INIT
63    __native_type  _M_mutex = __GTHREAD_MUTEX_INIT;
64
65    constexpr __mutex_base() noexcept = default;
66#else
67    __native_type  _M_mutex;
68
69    __mutex_base() noexcept
70    {
71      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
72      __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
73    }
74
75    ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
76#endif
77
78    __mutex_base(const __mutex_base&) = delete;
79    __mutex_base& operator=(const __mutex_base&) = delete;
80  };
81
82  // Common base class for std::recursive_mutex and std::timed_recursive_mutex
83  class __recursive_mutex_base
84  {
85  protected:
86    typedef __gthread_recursive_mutex_t		__native_type;
87
88    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
89    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
90
91#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
92    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
93
94    __recursive_mutex_base() = default;
95#else
96    __native_type  _M_mutex;
97
98    __recursive_mutex_base()
99    {
100      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
101      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
102    }
103
104    ~__recursive_mutex_base()
105    { _S_destroy(&_M_mutex); }
106
107  private:
108    // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
109    // so we need to obtain a __gthread_mutex_t to destroy
110
111    // matches when there's only one mutex type
112    template<typename _Rm>
113      static
114      typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
115      _S_destroy(_Rm* __mx)
116      { __gthread_mutex_destroy(__mx); }
117
118    // matches a recursive mutex with a member 'actual'
119    template<typename _Rm>
120      static typename enable_if<(bool)sizeof(&_Rm::actual), void>::type
121      _S_destroy(_Rm* __mx)
122      { __gthread_mutex_destroy(&__mx->actual); }
123
124    // matches a gthr-win32.h recursive mutex
125    template<typename _Rm>
126      static typename enable_if<(bool)sizeof(&_Rm::sema), void>::type
127      _S_destroy(_Rm* __mx)
128      {
129        __gthread_mutex_t __tmp;
130        _S_destroy_win32(&__tmp, __mx);
131      }
132
133    template<typename _Mx, typename _Rm>
134      static void
135      _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
136      {
137        __mx->counter = __rmx->counter;
138        __mx->sema = __rmx->sema;
139        __gthread_mutex_destroy(__mx);
140      }
141#endif
142  };
143
144  /**
145   * @defgroup mutexes Mutexes
146   * @ingroup concurrency
147   *
148   * Classes for mutex support.
149   * @{
150   */
151
152  /// mutex
153  class mutex : private __mutex_base
154  {
155  public:
156    typedef __native_type* 			native_handle_type;
157
158#ifdef __GTHREAD_MUTEX_INIT
159    constexpr
160#endif
161    mutex() noexcept = default;
162    ~mutex() = default;
163
164    mutex(const mutex&) = delete;
165    mutex& operator=(const mutex&) = delete;
166
167    void
168    lock()
169    {
170      int __e = __gthread_mutex_lock(&_M_mutex);
171
172      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
173      if (__e)
174	__throw_system_error(__e);
175    }
176
177    bool
178    try_lock() noexcept
179    {
180      // XXX EINVAL, EAGAIN, EBUSY
181      return !__gthread_mutex_trylock(&_M_mutex);
182    }
183
184    void
185    unlock()
186    {
187      // XXX EINVAL, EAGAIN, EPERM
188      __gthread_mutex_unlock(&_M_mutex);
189    }
190
191    native_handle_type
192    native_handle()
193    { return &_M_mutex; }
194  };
195
196  /// recursive_mutex
197  class recursive_mutex : private __recursive_mutex_base
198  {
199  public:
200    typedef __native_type* 			native_handle_type;
201
202    recursive_mutex() = default;
203    ~recursive_mutex() = default;
204
205    recursive_mutex(const recursive_mutex&) = delete;
206    recursive_mutex& operator=(const recursive_mutex&) = delete;
207
208    void
209    lock()
210    {
211      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
212
213      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
214      if (__e)
215	__throw_system_error(__e);
216    }
217
218    bool
219    try_lock() noexcept
220    {
221      // XXX EINVAL, EAGAIN, EBUSY
222      return !__gthread_recursive_mutex_trylock(&_M_mutex);
223    }
224
225    void
226    unlock()
227    {
228      // XXX EINVAL, EAGAIN, EBUSY
229      __gthread_recursive_mutex_unlock(&_M_mutex);
230    }
231
232    native_handle_type
233    native_handle()
234    { return &_M_mutex; }
235  };
236
237#if _GTHREAD_USE_MUTEX_TIMEDLOCK
238  /// timed_mutex
239  class timed_mutex : private __mutex_base
240  {
241#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
242    typedef chrono::steady_clock 	  	__clock_t;
243#else
244    typedef chrono::high_resolution_clock 	__clock_t;
245#endif
246
247  public:
248    typedef __native_type* 		  	native_handle_type;
249
250    timed_mutex() = default;
251    ~timed_mutex() = default;
252
253    timed_mutex(const timed_mutex&) = delete;
254    timed_mutex& operator=(const timed_mutex&) = delete;
255
256    void
257    lock()
258    {
259      int __e = __gthread_mutex_lock(&_M_mutex);
260
261      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
262      if (__e)
263	__throw_system_error(__e);
264    }
265
266    bool
267    try_lock() noexcept
268    {
269      // XXX EINVAL, EAGAIN, EBUSY
270      return !__gthread_mutex_trylock(&_M_mutex);
271    }
272
273    template <class _Rep, class _Period>
274      bool
275      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
276      { return __try_lock_for_impl(__rtime); }
277
278    template <class _Clock, class _Duration>
279      bool
280      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
281      {
282	chrono::time_point<_Clock, chrono::seconds> __s =
283	  chrono::time_point_cast<chrono::seconds>(__atime);
284
285	chrono::nanoseconds __ns =
286	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
287
288	__gthread_time_t __ts = {
289	  static_cast<std::time_t>(__s.time_since_epoch().count()),
290	  static_cast<long>(__ns.count())
291	};
292
293	return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
294      }
295
296    void
297    unlock()
298    {
299      // XXX EINVAL, EAGAIN, EBUSY
300      __gthread_mutex_unlock(&_M_mutex);
301    }
302
303    native_handle_type
304    native_handle()
305    { return &_M_mutex; }
306
307  private:
308    template<typename _Rep, typename _Period>
309      typename enable_if<
310	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
311      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
312      {
313	__clock_t::time_point __atime = __clock_t::now()
314	  + chrono::duration_cast<__clock_t::duration>(__rtime);
315
316	return try_lock_until(__atime);
317      }
318
319    template <typename _Rep, typename _Period>
320      typename enable_if<
321	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
322      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
323      {
324	__clock_t::time_point __atime = __clock_t::now()
325	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
326
327	return try_lock_until(__atime);
328      }
329  };
330
331  /// recursive_timed_mutex
332  class recursive_timed_mutex : private __recursive_mutex_base
333  {
334#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
335    typedef chrono::steady_clock 		__clock_t;
336#else
337    typedef chrono::high_resolution_clock 	__clock_t;
338#endif
339
340  public:
341    typedef __native_type* 			native_handle_type;
342
343    recursive_timed_mutex() = default;
344    ~recursive_timed_mutex() = default;
345
346    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
347    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
348
349    void
350    lock()
351    {
352      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
353
354      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
355      if (__e)
356	__throw_system_error(__e);
357    }
358
359    bool
360    try_lock() noexcept
361    {
362      // XXX EINVAL, EAGAIN, EBUSY
363      return !__gthread_recursive_mutex_trylock(&_M_mutex);
364    }
365
366    template <class _Rep, class _Period>
367      bool
368      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
369      { return __try_lock_for_impl(__rtime); }
370
371    template <class _Clock, class _Duration>
372      bool
373      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
374      {
375	chrono::time_point<_Clock, chrono::seconds>  __s =
376	  chrono::time_point_cast<chrono::seconds>(__atime);
377
378	chrono::nanoseconds __ns =
379	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
380
381	__gthread_time_t __ts = {
382	  static_cast<std::time_t>(__s.time_since_epoch().count()),
383	  static_cast<long>(__ns.count())
384	};
385
386	return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
387      }
388
389    void
390    unlock()
391    {
392      // XXX EINVAL, EAGAIN, EBUSY
393      __gthread_recursive_mutex_unlock(&_M_mutex);
394    }
395
396    native_handle_type
397    native_handle()
398    { return &_M_mutex; }
399
400  private:
401    template<typename _Rep, typename _Period>
402      typename enable_if<
403	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
404      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
405      {
406	__clock_t::time_point __atime = __clock_t::now()
407	  + chrono::duration_cast<__clock_t::duration>(__rtime);
408
409	return try_lock_until(__atime);
410      }
411
412    template <typename _Rep, typename _Period>
413      typename enable_if<
414	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
415      __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
416      {
417	__clock_t::time_point __atime = __clock_t::now()
418	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
419
420	return try_lock_until(__atime);
421      }
422  };
423#endif
424#endif // _GLIBCXX_HAS_GTHREADS
425
426  /// Do not acquire ownership of the mutex.
427  struct defer_lock_t { };
428
429  /// Try to acquire ownership of the mutex without blocking.
430  struct try_to_lock_t { };
431
432  /// Assume the calling thread has already obtained mutex ownership
433  /// and manage it.
434  struct adopt_lock_t { };
435
436  constexpr defer_lock_t	defer_lock { };
437  constexpr try_to_lock_t	try_to_lock { };
438  constexpr adopt_lock_t	adopt_lock { };
439
440  /// @brief  Scoped lock idiom.
441  // Acquire the mutex here with a constructor call, then release with
442  // the destructor call in accordance with RAII style.
443  template<typename _Mutex>
444    class lock_guard
445    {
446    public:
447      typedef _Mutex mutex_type;
448
449      explicit lock_guard(mutex_type& __m) : _M_device(__m)
450      { _M_device.lock(); }
451
452      lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
453      { } // calling thread owns mutex
454
455      ~lock_guard()
456      { _M_device.unlock(); }
457
458      lock_guard(const lock_guard&) = delete;
459      lock_guard& operator=(const lock_guard&) = delete;
460
461    private:
462      mutex_type&  _M_device;
463    };
464
465  /// unique_lock
466  template<typename _Mutex>
467    class unique_lock
468    {
469    public:
470      typedef _Mutex mutex_type;
471
472      unique_lock() noexcept
473      : _M_device(0), _M_owns(false)
474      { }
475
476      explicit unique_lock(mutex_type& __m)
477      : _M_device(&__m), _M_owns(false)
478      {
479	lock();
480	_M_owns = true;
481      }
482
483      unique_lock(mutex_type& __m, defer_lock_t) noexcept
484      : _M_device(&__m), _M_owns(false)
485      { }
486
487      unique_lock(mutex_type& __m, try_to_lock_t)
488      : _M_device(&__m), _M_owns(_M_device->try_lock())
489      { }
490
491      unique_lock(mutex_type& __m, adopt_lock_t)
492      : _M_device(&__m), _M_owns(true)
493      {
494	// XXX calling thread owns mutex
495      }
496
497      template<typename _Clock, typename _Duration>
498	unique_lock(mutex_type& __m,
499		    const chrono::time_point<_Clock, _Duration>& __atime)
500	: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
501	{ }
502
503      template<typename _Rep, typename _Period>
504	unique_lock(mutex_type& __m,
505		    const chrono::duration<_Rep, _Period>& __rtime)
506	: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
507	{ }
508
509      ~unique_lock()
510      {
511	if (_M_owns)
512	  unlock();
513      }
514
515      unique_lock(const unique_lock&) = delete;
516      unique_lock& operator=(const unique_lock&) = delete;
517
518      unique_lock(unique_lock&& __u) noexcept
519      : _M_device(__u._M_device), _M_owns(__u._M_owns)
520      {
521	__u._M_device = 0;
522	__u._M_owns = false;
523      }
524
525      unique_lock& operator=(unique_lock&& __u) noexcept
526      {
527	if(_M_owns)
528	  unlock();
529
530	unique_lock(std::move(__u)).swap(*this);
531
532	__u._M_device = 0;
533	__u._M_owns = false;
534
535	return *this;
536      }
537
538      void
539      lock()
540      {
541	if (!_M_device)
542	  __throw_system_error(int(errc::operation_not_permitted));
543	else if (_M_owns)
544	  __throw_system_error(int(errc::resource_deadlock_would_occur));
545	else
546	  {
547	    _M_device->lock();
548	    _M_owns = true;
549	  }
550      }
551
552      bool
553      try_lock()
554      {
555	if (!_M_device)
556	  __throw_system_error(int(errc::operation_not_permitted));
557	else if (_M_owns)
558	  __throw_system_error(int(errc::resource_deadlock_would_occur));
559	else
560	  {
561	    _M_owns = _M_device->try_lock();
562	    return _M_owns;
563	  }
564      }
565
566      template<typename _Clock, typename _Duration>
567	bool
568	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
569	{
570	  if (!_M_device)
571	    __throw_system_error(int(errc::operation_not_permitted));
572	  else if (_M_owns)
573	    __throw_system_error(int(errc::resource_deadlock_would_occur));
574	  else
575	    {
576	      _M_owns = _M_device->try_lock_until(__atime);
577	      return _M_owns;
578	    }
579	}
580
581      template<typename _Rep, typename _Period>
582	bool
583	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
584	{
585	  if (!_M_device)
586	    __throw_system_error(int(errc::operation_not_permitted));
587	  else if (_M_owns)
588	    __throw_system_error(int(errc::resource_deadlock_would_occur));
589	  else
590	    {
591	      _M_owns = _M_device->try_lock_for(__rtime);
592	      return _M_owns;
593	    }
594	 }
595
596      void
597      unlock()
598      {
599	if (!_M_owns)
600	  __throw_system_error(int(errc::operation_not_permitted));
601	else if (_M_device)
602	  {
603	    _M_device->unlock();
604	    _M_owns = false;
605	  }
606      }
607
608      void
609      swap(unique_lock& __u) noexcept
610      {
611	std::swap(_M_device, __u._M_device);
612	std::swap(_M_owns, __u._M_owns);
613      }
614
615      mutex_type*
616      release() noexcept
617      {
618	mutex_type* __ret = _M_device;
619	_M_device = 0;
620	_M_owns = false;
621	return __ret;
622      }
623
624      bool
625      owns_lock() const noexcept
626      { return _M_owns; }
627
628      explicit operator bool() const noexcept
629      { return owns_lock(); }
630
631      mutex_type*
632      mutex() const noexcept
633      { return _M_device; }
634
635    private:
636      mutex_type*	_M_device;
637      bool		_M_owns; // XXX use atomic_bool
638    };
639
640  /// Partial specialization for unique_lock objects.
641  template<typename _Mutex>
642    inline void
643    swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
644    { __x.swap(__y); }
645
646  template<int _Idx>
647    struct __unlock_impl
648    {
649      template<typename... _Lock>
650	static void
651	__do_unlock(tuple<_Lock&...>& __locks)
652	{
653	  std::get<_Idx>(__locks).unlock();
654	  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
655	}
656    };
657
658  template<>
659    struct __unlock_impl<-1>
660    {
661      template<typename... _Lock>
662	static void
663	__do_unlock(tuple<_Lock&...>&)
664	{ }
665    };
666
667  template<typename _Lock>
668    unique_lock<_Lock>
669    __try_to_lock(_Lock& __l)
670    { return unique_lock<_Lock>(__l, try_to_lock); }
671
672  template<int _Idx, bool _Continue = true>
673    struct __try_lock_impl
674    {
675      template<typename... _Lock>
676	static void
677	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
678	{
679          __idx = _Idx;
680          auto __lock = __try_to_lock(std::get<_Idx>(__locks));
681          if (__lock.owns_lock())
682            {
683              __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
684                __do_try_lock(__locks, __idx);
685              if (__idx == -1)
686                __lock.release();
687            }
688	}
689    };
690
691  template<int _Idx>
692    struct __try_lock_impl<_Idx, false>
693    {
694      template<typename... _Lock>
695	static void
696	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
697	{
698          __idx = _Idx;
699          auto __lock = __try_to_lock(std::get<_Idx>(__locks));
700          if (__lock.owns_lock())
701            {
702              __idx = -1;
703              __lock.release();
704            }
705	}
706    };
707
708  /** @brief Generic try_lock.
709   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
710   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
711   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
712   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
713   *          a 0-based index corresponding to the argument that returned false.
714   *  @post Either all arguments are locked, or none will be.
715   *
716   *  Sequentially calls try_lock() on each argument.
717   */
718  template<typename _Lock1, typename _Lock2, typename... _Lock3>
719    int
720    try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
721    {
722      int __idx;
723      auto __locks = std::tie(__l1, __l2, __l3...);
724      __try
725      { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
726      __catch(...)
727      { }
728      return __idx;
729    }
730
731  /** @brief Generic lock.
732   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
733   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
734   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
735   *  @throw An exception thrown by an argument's lock() or try_lock() member.
736   *  @post All arguments are locked.
737   *
738   *  All arguments are locked via a sequence of calls to lock(), try_lock()
739   *  and unlock().  If the call exits via an exception any locks that were
740   *  obtained will be released.
741   */
742  template<typename _L1, typename _L2, typename ..._L3>
743    void
744    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
745    {
746      while (true)
747        {
748          unique_lock<_L1> __first(__l1);
749          int __idx;
750          auto __locks = std::tie(__l2, __l3...);
751          __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
752          if (__idx == -1)
753            {
754              __first.release();
755              return;
756            }
757        }
758    }
759
760#ifdef _GLIBCXX_HAS_GTHREADS
761  /// once_flag
762  struct once_flag
763  {
764  private:
765    typedef __gthread_once_t __native_type;
766    __native_type  _M_once = __GTHREAD_ONCE_INIT;
767
768  public:
769    /// Constructor
770    constexpr once_flag() noexcept = default;
771
772    /// Deleted copy constructor
773    once_flag(const once_flag&) = delete;
774    /// Deleted assignment operator
775    once_flag& operator=(const once_flag&) = delete;
776
777    template<typename _Callable, typename... _Args>
778      friend void
779      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
780  };
781
782#ifdef _GLIBCXX_HAVE_TLS
783  extern __thread void* __once_callable;
784  extern __thread void (*__once_call)();
785
786  template<typename _Callable>
787    inline void
788    __once_call_impl()
789    {
790      (*(_Callable*)__once_callable)();
791    }
792#else
793  extern function<void()> __once_functor;
794
795  extern void
796  __set_once_functor_lock_ptr(unique_lock<mutex>*);
797
798  extern mutex&
799  __get_once_mutex();
800#endif
801
802  extern "C" void __once_proxy(void);
803
804  /// call_once
805  template<typename _Callable, typename... _Args>
806    void
807    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
808    {
809#ifdef _GLIBCXX_HAVE_TLS
810      auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
811          std::forward<_Args>(__args)...);
812      __once_callable = &__bound_functor;
813      __once_call = &__once_call_impl<decltype(__bound_functor)>;
814#else
815      unique_lock<mutex> __functor_lock(__get_once_mutex());
816      auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
817          std::forward<_Args>(__args)...);
818      __once_functor = [&]() { __callable(); };
819      __set_once_functor_lock_ptr(&__functor_lock);
820#endif
821
822      int __e = __gthread_once(&(__once._M_once), &__once_proxy);
823
824#ifndef _GLIBCXX_HAVE_TLS
825      if (__functor_lock)
826        __set_once_functor_lock_ptr(0);
827#endif
828
829      if (__e)
830	__throw_system_error(__e);
831    }
832#endif // _GLIBCXX_HAS_GTHREADS
833
834  // @} group mutexes
835_GLIBCXX_END_NAMESPACE_VERSION
836} // namespace
837
838#endif // _GLIBCXX_USE_C99_STDINT_TR1
839
840#endif // __GXX_EXPERIMENTAL_CXX0X__
841
842#endif // _GLIBCXX_MUTEX
843