1*38fd1498Szrj// -*- C++ -*- header.
2*38fd1498Szrj
3*38fd1498Szrj// Copyright (C) 2008-2018 Free Software Foundation, Inc.
4*38fd1498Szrj//
5*38fd1498Szrj// This file is part of the GNU ISO C++ Library.  This library is free
6*38fd1498Szrj// software; you can redistribute it and/or modify it under the
7*38fd1498Szrj// terms of the GNU General Public License as published by the
8*38fd1498Szrj// Free Software Foundation; either version 3, or (at your option)
9*38fd1498Szrj// any later version.
10*38fd1498Szrj
11*38fd1498Szrj// This library is distributed in the hope that it will be useful,
12*38fd1498Szrj// but WITHOUT ANY WARRANTY; without even the implied warranty of
13*38fd1498Szrj// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*38fd1498Szrj// GNU General Public License for more details.
15*38fd1498Szrj
16*38fd1498Szrj// Under Section 7 of GPL version 3, you are granted additional
17*38fd1498Szrj// permissions described in the GCC Runtime Library Exception, version
18*38fd1498Szrj// 3.1, as published by the Free Software Foundation.
19*38fd1498Szrj
20*38fd1498Szrj// You should have received a copy of the GNU General Public License and
21*38fd1498Szrj// a copy of the GCC Runtime Library Exception along with this program;
22*38fd1498Szrj// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23*38fd1498Szrj// <http://www.gnu.org/licenses/>.
24*38fd1498Szrj
25*38fd1498Szrj/** @file include/atomic
26*38fd1498Szrj *  This is a Standard C++ Library header.
27*38fd1498Szrj */
28*38fd1498Szrj
29*38fd1498Szrj// Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
30*38fd1498Szrj// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
31*38fd1498Szrj
32*38fd1498Szrj#ifndef _GLIBCXX_ATOMIC
33*38fd1498Szrj#define _GLIBCXX_ATOMIC 1
34*38fd1498Szrj
35*38fd1498Szrj#pragma GCC system_header
36*38fd1498Szrj
37*38fd1498Szrj#if __cplusplus < 201103L
38*38fd1498Szrj# include <bits/c++0x_warning.h>
39*38fd1498Szrj#else
40*38fd1498Szrj
41*38fd1498Szrj#include <bits/atomic_base.h>
42*38fd1498Szrj#include <bits/move.h>
43*38fd1498Szrj
44*38fd1498Szrjnamespace std _GLIBCXX_VISIBILITY(default)
45*38fd1498Szrj{
46*38fd1498Szrj_GLIBCXX_BEGIN_NAMESPACE_VERSION
47*38fd1498Szrj
48*38fd1498Szrj  /**
49*38fd1498Szrj   * @addtogroup atomics
50*38fd1498Szrj   * @{
51*38fd1498Szrj   */
52*38fd1498Szrj
53*38fd1498Szrj#if __cplusplus > 201402L
54*38fd1498Szrj# define __cpp_lib_atomic_is_always_lock_free 201603
55*38fd1498Szrj#endif
56*38fd1498Szrj
57*38fd1498Szrj  template<typename _Tp>
58*38fd1498Szrj    struct atomic;
59*38fd1498Szrj
60*38fd1498Szrj  /// atomic<bool>
61*38fd1498Szrj  // NB: No operators or fetch-operations for this type.
62*38fd1498Szrj  template<>
63*38fd1498Szrj  struct atomic<bool>
64*38fd1498Szrj  {
65*38fd1498Szrj  private:
66*38fd1498Szrj    __atomic_base<bool>	_M_base;
67*38fd1498Szrj
68*38fd1498Szrj  public:
69*38fd1498Szrj    atomic() noexcept = default;
70*38fd1498Szrj    ~atomic() noexcept = default;
71*38fd1498Szrj    atomic(const atomic&) = delete;
72*38fd1498Szrj    atomic& operator=(const atomic&) = delete;
73*38fd1498Szrj    atomic& operator=(const atomic&) volatile = delete;
74*38fd1498Szrj
75*38fd1498Szrj    constexpr atomic(bool __i) noexcept : _M_base(__i) { }
76*38fd1498Szrj
77*38fd1498Szrj    bool
78*38fd1498Szrj    operator=(bool __i) noexcept
79*38fd1498Szrj    { return _M_base.operator=(__i); }
80*38fd1498Szrj
81*38fd1498Szrj    bool
82*38fd1498Szrj    operator=(bool __i) volatile noexcept
83*38fd1498Szrj    { return _M_base.operator=(__i); }
84*38fd1498Szrj
85*38fd1498Szrj    operator bool() const noexcept
86*38fd1498Szrj    { return _M_base.load(); }
87*38fd1498Szrj
88*38fd1498Szrj    operator bool() const volatile noexcept
89*38fd1498Szrj    { return _M_base.load(); }
90*38fd1498Szrj
91*38fd1498Szrj    bool
92*38fd1498Szrj    is_lock_free() const noexcept { return _M_base.is_lock_free(); }
93*38fd1498Szrj
94*38fd1498Szrj    bool
95*38fd1498Szrj    is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); }
96*38fd1498Szrj
97*38fd1498Szrj#if __cplusplus > 201402L
98*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_BOOL_LOCK_FREE == 2;
99*38fd1498Szrj#endif
100*38fd1498Szrj
101*38fd1498Szrj    void
102*38fd1498Szrj    store(bool __i, memory_order __m = memory_order_seq_cst) noexcept
103*38fd1498Szrj    { _M_base.store(__i, __m); }
104*38fd1498Szrj
105*38fd1498Szrj    void
106*38fd1498Szrj    store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept
107*38fd1498Szrj    { _M_base.store(__i, __m); }
108*38fd1498Szrj
109*38fd1498Szrj    bool
110*38fd1498Szrj    load(memory_order __m = memory_order_seq_cst) const noexcept
111*38fd1498Szrj    { return _M_base.load(__m); }
112*38fd1498Szrj
113*38fd1498Szrj    bool
114*38fd1498Szrj    load(memory_order __m = memory_order_seq_cst) const volatile noexcept
115*38fd1498Szrj    { return _M_base.load(__m); }
116*38fd1498Szrj
117*38fd1498Szrj    bool
118*38fd1498Szrj    exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept
119*38fd1498Szrj    { return _M_base.exchange(__i, __m); }
120*38fd1498Szrj
121*38fd1498Szrj    bool
122*38fd1498Szrj    exchange(bool __i,
123*38fd1498Szrj	     memory_order __m = memory_order_seq_cst) volatile noexcept
124*38fd1498Szrj    { return _M_base.exchange(__i, __m); }
125*38fd1498Szrj
126*38fd1498Szrj    bool
127*38fd1498Szrj    compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
128*38fd1498Szrj			  memory_order __m2) noexcept
129*38fd1498Szrj    { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
130*38fd1498Szrj
131*38fd1498Szrj    bool
132*38fd1498Szrj    compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
133*38fd1498Szrj			  memory_order __m2) volatile noexcept
134*38fd1498Szrj    { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
135*38fd1498Szrj
136*38fd1498Szrj    bool
137*38fd1498Szrj    compare_exchange_weak(bool& __i1, bool __i2,
138*38fd1498Szrj			  memory_order __m = memory_order_seq_cst) noexcept
139*38fd1498Szrj    { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
140*38fd1498Szrj
141*38fd1498Szrj    bool
142*38fd1498Szrj    compare_exchange_weak(bool& __i1, bool __i2,
143*38fd1498Szrj		     memory_order __m = memory_order_seq_cst) volatile noexcept
144*38fd1498Szrj    { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
145*38fd1498Szrj
146*38fd1498Szrj    bool
147*38fd1498Szrj    compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
148*38fd1498Szrj			    memory_order __m2) noexcept
149*38fd1498Szrj    { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
150*38fd1498Szrj
151*38fd1498Szrj    bool
152*38fd1498Szrj    compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
153*38fd1498Szrj			    memory_order __m2) volatile noexcept
154*38fd1498Szrj    { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
155*38fd1498Szrj
156*38fd1498Szrj    bool
157*38fd1498Szrj    compare_exchange_strong(bool& __i1, bool __i2,
158*38fd1498Szrj			    memory_order __m = memory_order_seq_cst) noexcept
159*38fd1498Szrj    { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
160*38fd1498Szrj
161*38fd1498Szrj    bool
162*38fd1498Szrj    compare_exchange_strong(bool& __i1, bool __i2,
163*38fd1498Szrj		    memory_order __m = memory_order_seq_cst) volatile noexcept
164*38fd1498Szrj    { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
165*38fd1498Szrj  };
166*38fd1498Szrj
167*38fd1498Szrj
168*38fd1498Szrj  /**
169*38fd1498Szrj   *  @brief Generic atomic type, primary class template.
170*38fd1498Szrj   *
171*38fd1498Szrj   *  @tparam _Tp  Type to be made atomic, must be trivally copyable.
172*38fd1498Szrj   */
173*38fd1498Szrj  template<typename _Tp>
174*38fd1498Szrj    struct atomic
175*38fd1498Szrj    {
176*38fd1498Szrj    private:
177*38fd1498Szrj      // Align 1/2/4/8/16-byte types to at least their size.
178*38fd1498Szrj      static constexpr int _S_min_alignment
179*38fd1498Szrj	= (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
180*38fd1498Szrj	? 0 : sizeof(_Tp);
181*38fd1498Szrj
182*38fd1498Szrj      static constexpr int _S_alignment
183*38fd1498Szrj        = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
184*38fd1498Szrj
185*38fd1498Szrj      alignas(_S_alignment) _Tp _M_i;
186*38fd1498Szrj
187*38fd1498Szrj      static_assert(__is_trivially_copyable(_Tp),
188*38fd1498Szrj		    "std::atomic requires a trivially copyable type");
189*38fd1498Szrj
190*38fd1498Szrj      static_assert(sizeof(_Tp) > 0,
191*38fd1498Szrj		    "Incomplete or zero-sized types are not supported");
192*38fd1498Szrj
193*38fd1498Szrj    public:
194*38fd1498Szrj      atomic() noexcept = default;
195*38fd1498Szrj      ~atomic() noexcept = default;
196*38fd1498Szrj      atomic(const atomic&) = delete;
197*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
198*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
199*38fd1498Szrj
200*38fd1498Szrj      constexpr atomic(_Tp __i) noexcept : _M_i(__i) { }
201*38fd1498Szrj
202*38fd1498Szrj      operator _Tp() const noexcept
203*38fd1498Szrj      { return load(); }
204*38fd1498Szrj
205*38fd1498Szrj      operator _Tp() const volatile noexcept
206*38fd1498Szrj      { return load(); }
207*38fd1498Szrj
208*38fd1498Szrj      _Tp
209*38fd1498Szrj      operator=(_Tp __i) noexcept
210*38fd1498Szrj      { store(__i); return __i; }
211*38fd1498Szrj
212*38fd1498Szrj      _Tp
213*38fd1498Szrj      operator=(_Tp __i) volatile noexcept
214*38fd1498Szrj      { store(__i); return __i; }
215*38fd1498Szrj
216*38fd1498Szrj      bool
217*38fd1498Szrj      is_lock_free() const noexcept
218*38fd1498Szrj      {
219*38fd1498Szrj	// Produce a fake, minimally aligned pointer.
220*38fd1498Szrj	return __atomic_is_lock_free(sizeof(_M_i),
221*38fd1498Szrj	    reinterpret_cast<void *>(-__alignof(_M_i)));
222*38fd1498Szrj      }
223*38fd1498Szrj
224*38fd1498Szrj      bool
225*38fd1498Szrj      is_lock_free() const volatile noexcept
226*38fd1498Szrj      {
227*38fd1498Szrj	// Produce a fake, minimally aligned pointer.
228*38fd1498Szrj	return __atomic_is_lock_free(sizeof(_M_i),
229*38fd1498Szrj	    reinterpret_cast<void *>(-__alignof(_M_i)));
230*38fd1498Szrj      }
231*38fd1498Szrj
232*38fd1498Szrj#if __cplusplus > 201402L
233*38fd1498Szrj      static constexpr bool is_always_lock_free
234*38fd1498Szrj	= __atomic_always_lock_free(sizeof(_M_i), 0);
235*38fd1498Szrj#endif
236*38fd1498Szrj
237*38fd1498Szrj      void
238*38fd1498Szrj      store(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
239*38fd1498Szrj      { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), __m); }
240*38fd1498Szrj
241*38fd1498Szrj      void
242*38fd1498Szrj      store(_Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept
243*38fd1498Szrj      { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), __m); }
244*38fd1498Szrj
245*38fd1498Szrj      _Tp
246*38fd1498Szrj      load(memory_order __m = memory_order_seq_cst) const noexcept
247*38fd1498Szrj      {
248*38fd1498Szrj	alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
249*38fd1498Szrj	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
250*38fd1498Szrj	__atomic_load(std::__addressof(_M_i), __ptr, __m);
251*38fd1498Szrj	return *__ptr;
252*38fd1498Szrj      }
253*38fd1498Szrj
254*38fd1498Szrj      _Tp
255*38fd1498Szrj      load(memory_order __m = memory_order_seq_cst) const volatile noexcept
256*38fd1498Szrj      {
257*38fd1498Szrj        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
258*38fd1498Szrj	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
259*38fd1498Szrj	__atomic_load(std::__addressof(_M_i), __ptr, __m);
260*38fd1498Szrj	return *__ptr;
261*38fd1498Szrj      }
262*38fd1498Szrj
263*38fd1498Szrj      _Tp
264*38fd1498Szrj      exchange(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
265*38fd1498Szrj      {
266*38fd1498Szrj        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
267*38fd1498Szrj	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
268*38fd1498Szrj	__atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
269*38fd1498Szrj			  __ptr, __m);
270*38fd1498Szrj	return *__ptr;
271*38fd1498Szrj      }
272*38fd1498Szrj
273*38fd1498Szrj      _Tp
274*38fd1498Szrj      exchange(_Tp __i,
275*38fd1498Szrj	       memory_order __m = memory_order_seq_cst) volatile noexcept
276*38fd1498Szrj      {
277*38fd1498Szrj        alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
278*38fd1498Szrj	_Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
279*38fd1498Szrj	__atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
280*38fd1498Szrj			  __ptr, __m);
281*38fd1498Szrj	return *__ptr;
282*38fd1498Szrj      }
283*38fd1498Szrj
284*38fd1498Szrj      bool
285*38fd1498Szrj      compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
286*38fd1498Szrj			    memory_order __f) noexcept
287*38fd1498Szrj      {
288*38fd1498Szrj	return __atomic_compare_exchange(std::__addressof(_M_i),
289*38fd1498Szrj					 std::__addressof(__e),
290*38fd1498Szrj					 std::__addressof(__i),
291*38fd1498Szrj					 true, __s, __f);
292*38fd1498Szrj      }
293*38fd1498Szrj
294*38fd1498Szrj      bool
295*38fd1498Szrj      compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
296*38fd1498Szrj			    memory_order __f) volatile noexcept
297*38fd1498Szrj      {
298*38fd1498Szrj	return __atomic_compare_exchange(std::__addressof(_M_i),
299*38fd1498Szrj					 std::__addressof(__e),
300*38fd1498Szrj					 std::__addressof(__i),
301*38fd1498Szrj					 true, __s, __f);
302*38fd1498Szrj      }
303*38fd1498Szrj
304*38fd1498Szrj      bool
305*38fd1498Szrj      compare_exchange_weak(_Tp& __e, _Tp __i,
306*38fd1498Szrj			    memory_order __m = memory_order_seq_cst) noexcept
307*38fd1498Szrj      { return compare_exchange_weak(__e, __i, __m,
308*38fd1498Szrj                                     __cmpexch_failure_order(__m)); }
309*38fd1498Szrj
310*38fd1498Szrj      bool
311*38fd1498Szrj      compare_exchange_weak(_Tp& __e, _Tp __i,
312*38fd1498Szrj		     memory_order __m = memory_order_seq_cst) volatile noexcept
313*38fd1498Szrj      { return compare_exchange_weak(__e, __i, __m,
314*38fd1498Szrj                                     __cmpexch_failure_order(__m)); }
315*38fd1498Szrj
316*38fd1498Szrj      bool
317*38fd1498Szrj      compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
318*38fd1498Szrj			      memory_order __f) noexcept
319*38fd1498Szrj      {
320*38fd1498Szrj	return __atomic_compare_exchange(std::__addressof(_M_i),
321*38fd1498Szrj					 std::__addressof(__e),
322*38fd1498Szrj					 std::__addressof(__i),
323*38fd1498Szrj					 false, __s, __f);
324*38fd1498Szrj      }
325*38fd1498Szrj
326*38fd1498Szrj      bool
327*38fd1498Szrj      compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
328*38fd1498Szrj			      memory_order __f) volatile noexcept
329*38fd1498Szrj      {
330*38fd1498Szrj	return __atomic_compare_exchange(std::__addressof(_M_i),
331*38fd1498Szrj					 std::__addressof(__e),
332*38fd1498Szrj					 std::__addressof(__i),
333*38fd1498Szrj					 false, __s, __f);
334*38fd1498Szrj      }
335*38fd1498Szrj
336*38fd1498Szrj      bool
337*38fd1498Szrj      compare_exchange_strong(_Tp& __e, _Tp __i,
338*38fd1498Szrj			       memory_order __m = memory_order_seq_cst) noexcept
339*38fd1498Szrj      { return compare_exchange_strong(__e, __i, __m,
340*38fd1498Szrj                                       __cmpexch_failure_order(__m)); }
341*38fd1498Szrj
342*38fd1498Szrj      bool
343*38fd1498Szrj      compare_exchange_strong(_Tp& __e, _Tp __i,
344*38fd1498Szrj		     memory_order __m = memory_order_seq_cst) volatile noexcept
345*38fd1498Szrj      { return compare_exchange_strong(__e, __i, __m,
346*38fd1498Szrj                                       __cmpexch_failure_order(__m)); }
347*38fd1498Szrj    };
348*38fd1498Szrj
349*38fd1498Szrj
350*38fd1498Szrj  /// Partial specialization for pointer types.
351*38fd1498Szrj  template<typename _Tp>
352*38fd1498Szrj    struct atomic<_Tp*>
353*38fd1498Szrj    {
354*38fd1498Szrj      typedef _Tp* 			__pointer_type;
355*38fd1498Szrj      typedef __atomic_base<_Tp*>	__base_type;
356*38fd1498Szrj      __base_type			_M_b;
357*38fd1498Szrj
358*38fd1498Szrj      atomic() noexcept = default;
359*38fd1498Szrj      ~atomic() noexcept = default;
360*38fd1498Szrj      atomic(const atomic&) = delete;
361*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
362*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
363*38fd1498Szrj
364*38fd1498Szrj      constexpr atomic(__pointer_type __p) noexcept : _M_b(__p) { }
365*38fd1498Szrj
366*38fd1498Szrj      operator __pointer_type() const noexcept
367*38fd1498Szrj      { return __pointer_type(_M_b); }
368*38fd1498Szrj
369*38fd1498Szrj      operator __pointer_type() const volatile noexcept
370*38fd1498Szrj      { return __pointer_type(_M_b); }
371*38fd1498Szrj
372*38fd1498Szrj      __pointer_type
373*38fd1498Szrj      operator=(__pointer_type __p) noexcept
374*38fd1498Szrj      { return _M_b.operator=(__p); }
375*38fd1498Szrj
376*38fd1498Szrj      __pointer_type
377*38fd1498Szrj      operator=(__pointer_type __p) volatile noexcept
378*38fd1498Szrj      { return _M_b.operator=(__p); }
379*38fd1498Szrj
380*38fd1498Szrj      __pointer_type
381*38fd1498Szrj      operator++(int) noexcept
382*38fd1498Szrj      { return _M_b++; }
383*38fd1498Szrj
384*38fd1498Szrj      __pointer_type
385*38fd1498Szrj      operator++(int) volatile noexcept
386*38fd1498Szrj      { return _M_b++; }
387*38fd1498Szrj
388*38fd1498Szrj      __pointer_type
389*38fd1498Szrj      operator--(int) noexcept
390*38fd1498Szrj      { return _M_b--; }
391*38fd1498Szrj
392*38fd1498Szrj      __pointer_type
393*38fd1498Szrj      operator--(int) volatile noexcept
394*38fd1498Szrj      { return _M_b--; }
395*38fd1498Szrj
396*38fd1498Szrj      __pointer_type
397*38fd1498Szrj      operator++() noexcept
398*38fd1498Szrj      { return ++_M_b; }
399*38fd1498Szrj
400*38fd1498Szrj      __pointer_type
401*38fd1498Szrj      operator++() volatile noexcept
402*38fd1498Szrj      { return ++_M_b; }
403*38fd1498Szrj
404*38fd1498Szrj      __pointer_type
405*38fd1498Szrj      operator--() noexcept
406*38fd1498Szrj      { return --_M_b; }
407*38fd1498Szrj
408*38fd1498Szrj      __pointer_type
409*38fd1498Szrj      operator--() volatile noexcept
410*38fd1498Szrj      { return --_M_b; }
411*38fd1498Szrj
412*38fd1498Szrj      __pointer_type
413*38fd1498Szrj      operator+=(ptrdiff_t __d) noexcept
414*38fd1498Szrj      { return _M_b.operator+=(__d); }
415*38fd1498Szrj
416*38fd1498Szrj      __pointer_type
417*38fd1498Szrj      operator+=(ptrdiff_t __d) volatile noexcept
418*38fd1498Szrj      { return _M_b.operator+=(__d); }
419*38fd1498Szrj
420*38fd1498Szrj      __pointer_type
421*38fd1498Szrj      operator-=(ptrdiff_t __d) noexcept
422*38fd1498Szrj      { return _M_b.operator-=(__d); }
423*38fd1498Szrj
424*38fd1498Szrj      __pointer_type
425*38fd1498Szrj      operator-=(ptrdiff_t __d) volatile noexcept
426*38fd1498Szrj      { return _M_b.operator-=(__d); }
427*38fd1498Szrj
428*38fd1498Szrj      bool
429*38fd1498Szrj      is_lock_free() const noexcept
430*38fd1498Szrj      { return _M_b.is_lock_free(); }
431*38fd1498Szrj
432*38fd1498Szrj      bool
433*38fd1498Szrj      is_lock_free() const volatile noexcept
434*38fd1498Szrj      { return _M_b.is_lock_free(); }
435*38fd1498Szrj
436*38fd1498Szrj#if __cplusplus > 201402L
437*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
438*38fd1498Szrj#endif
439*38fd1498Szrj
440*38fd1498Szrj      void
441*38fd1498Szrj      store(__pointer_type __p,
442*38fd1498Szrj	    memory_order __m = memory_order_seq_cst) noexcept
443*38fd1498Szrj      { return _M_b.store(__p, __m); }
444*38fd1498Szrj
445*38fd1498Szrj      void
446*38fd1498Szrj      store(__pointer_type __p,
447*38fd1498Szrj	    memory_order __m = memory_order_seq_cst) volatile noexcept
448*38fd1498Szrj      { return _M_b.store(__p, __m); }
449*38fd1498Szrj
450*38fd1498Szrj      __pointer_type
451*38fd1498Szrj      load(memory_order __m = memory_order_seq_cst) const noexcept
452*38fd1498Szrj      { return _M_b.load(__m); }
453*38fd1498Szrj
454*38fd1498Szrj      __pointer_type
455*38fd1498Szrj      load(memory_order __m = memory_order_seq_cst) const volatile noexcept
456*38fd1498Szrj      { return _M_b.load(__m); }
457*38fd1498Szrj
458*38fd1498Szrj      __pointer_type
459*38fd1498Szrj      exchange(__pointer_type __p,
460*38fd1498Szrj	       memory_order __m = memory_order_seq_cst) noexcept
461*38fd1498Szrj      { return _M_b.exchange(__p, __m); }
462*38fd1498Szrj
463*38fd1498Szrj      __pointer_type
464*38fd1498Szrj      exchange(__pointer_type __p,
465*38fd1498Szrj	       memory_order __m = memory_order_seq_cst) volatile noexcept
466*38fd1498Szrj      { return _M_b.exchange(__p, __m); }
467*38fd1498Szrj
468*38fd1498Szrj      bool
469*38fd1498Szrj      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
470*38fd1498Szrj			    memory_order __m1, memory_order __m2) noexcept
471*38fd1498Szrj      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
472*38fd1498Szrj
473*38fd1498Szrj      bool
474*38fd1498Szrj      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
475*38fd1498Szrj			    memory_order __m1,
476*38fd1498Szrj			    memory_order __m2) volatile noexcept
477*38fd1498Szrj      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
478*38fd1498Szrj
479*38fd1498Szrj      bool
480*38fd1498Szrj      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
481*38fd1498Szrj			    memory_order __m = memory_order_seq_cst) noexcept
482*38fd1498Szrj      {
483*38fd1498Szrj	return compare_exchange_weak(__p1, __p2, __m,
484*38fd1498Szrj				     __cmpexch_failure_order(__m));
485*38fd1498Szrj      }
486*38fd1498Szrj
487*38fd1498Szrj      bool
488*38fd1498Szrj      compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
489*38fd1498Szrj		    memory_order __m = memory_order_seq_cst) volatile noexcept
490*38fd1498Szrj      {
491*38fd1498Szrj	return compare_exchange_weak(__p1, __p2, __m,
492*38fd1498Szrj				     __cmpexch_failure_order(__m));
493*38fd1498Szrj      }
494*38fd1498Szrj
495*38fd1498Szrj      bool
496*38fd1498Szrj      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
497*38fd1498Szrj			      memory_order __m1, memory_order __m2) noexcept
498*38fd1498Szrj      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
499*38fd1498Szrj
500*38fd1498Szrj      bool
501*38fd1498Szrj      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
502*38fd1498Szrj			      memory_order __m1,
503*38fd1498Szrj			      memory_order __m2) volatile noexcept
504*38fd1498Szrj      { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
505*38fd1498Szrj
506*38fd1498Szrj      bool
507*38fd1498Szrj      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
508*38fd1498Szrj			      memory_order __m = memory_order_seq_cst) noexcept
509*38fd1498Szrj      {
510*38fd1498Szrj	return _M_b.compare_exchange_strong(__p1, __p2, __m,
511*38fd1498Szrj					    __cmpexch_failure_order(__m));
512*38fd1498Szrj      }
513*38fd1498Szrj
514*38fd1498Szrj      bool
515*38fd1498Szrj      compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
516*38fd1498Szrj		    memory_order __m = memory_order_seq_cst) volatile noexcept
517*38fd1498Szrj      {
518*38fd1498Szrj	return _M_b.compare_exchange_strong(__p1, __p2, __m,
519*38fd1498Szrj					    __cmpexch_failure_order(__m));
520*38fd1498Szrj      }
521*38fd1498Szrj
522*38fd1498Szrj      __pointer_type
523*38fd1498Szrj      fetch_add(ptrdiff_t __d,
524*38fd1498Szrj		memory_order __m = memory_order_seq_cst) noexcept
525*38fd1498Szrj      { return _M_b.fetch_add(__d, __m); }
526*38fd1498Szrj
527*38fd1498Szrj      __pointer_type
528*38fd1498Szrj      fetch_add(ptrdiff_t __d,
529*38fd1498Szrj		memory_order __m = memory_order_seq_cst) volatile noexcept
530*38fd1498Szrj      { return _M_b.fetch_add(__d, __m); }
531*38fd1498Szrj
532*38fd1498Szrj      __pointer_type
533*38fd1498Szrj      fetch_sub(ptrdiff_t __d,
534*38fd1498Szrj		memory_order __m = memory_order_seq_cst) noexcept
535*38fd1498Szrj      { return _M_b.fetch_sub(__d, __m); }
536*38fd1498Szrj
537*38fd1498Szrj      __pointer_type
538*38fd1498Szrj      fetch_sub(ptrdiff_t __d,
539*38fd1498Szrj		memory_order __m = memory_order_seq_cst) volatile noexcept
540*38fd1498Szrj      { return _M_b.fetch_sub(__d, __m); }
541*38fd1498Szrj    };
542*38fd1498Szrj
543*38fd1498Szrj
544*38fd1498Szrj  /// Explicit specialization for char.
545*38fd1498Szrj  template<>
546*38fd1498Szrj    struct atomic<char> : __atomic_base<char>
547*38fd1498Szrj    {
548*38fd1498Szrj      typedef char 			__integral_type;
549*38fd1498Szrj      typedef __atomic_base<char> 	__base_type;
550*38fd1498Szrj
551*38fd1498Szrj      atomic() noexcept = default;
552*38fd1498Szrj      ~atomic() noexcept = default;
553*38fd1498Szrj      atomic(const atomic&) = delete;
554*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
555*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
556*38fd1498Szrj
557*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
558*38fd1498Szrj
559*38fd1498Szrj      using __base_type::operator __integral_type;
560*38fd1498Szrj      using __base_type::operator=;
561*38fd1498Szrj
562*38fd1498Szrj#if __cplusplus > 201402L
563*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
564*38fd1498Szrj#endif
565*38fd1498Szrj    };
566*38fd1498Szrj
567*38fd1498Szrj  /// Explicit specialization for signed char.
568*38fd1498Szrj  template<>
569*38fd1498Szrj    struct atomic<signed char> : __atomic_base<signed char>
570*38fd1498Szrj    {
571*38fd1498Szrj      typedef signed char 		__integral_type;
572*38fd1498Szrj      typedef __atomic_base<signed char> 	__base_type;
573*38fd1498Szrj
574*38fd1498Szrj      atomic() noexcept= default;
575*38fd1498Szrj      ~atomic() noexcept = default;
576*38fd1498Szrj      atomic(const atomic&) = delete;
577*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
578*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
579*38fd1498Szrj
580*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
581*38fd1498Szrj
582*38fd1498Szrj      using __base_type::operator __integral_type;
583*38fd1498Szrj      using __base_type::operator=;
584*38fd1498Szrj
585*38fd1498Szrj#if __cplusplus > 201402L
586*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
587*38fd1498Szrj#endif
588*38fd1498Szrj    };
589*38fd1498Szrj
590*38fd1498Szrj  /// Explicit specialization for unsigned char.
591*38fd1498Szrj  template<>
592*38fd1498Szrj    struct atomic<unsigned char> : __atomic_base<unsigned char>
593*38fd1498Szrj    {
594*38fd1498Szrj      typedef unsigned char 		__integral_type;
595*38fd1498Szrj      typedef __atomic_base<unsigned char> 	__base_type;
596*38fd1498Szrj
597*38fd1498Szrj      atomic() noexcept= default;
598*38fd1498Szrj      ~atomic() noexcept = default;
599*38fd1498Szrj      atomic(const atomic&) = delete;
600*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
601*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
602*38fd1498Szrj
603*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
604*38fd1498Szrj
605*38fd1498Szrj      using __base_type::operator __integral_type;
606*38fd1498Szrj      using __base_type::operator=;
607*38fd1498Szrj
608*38fd1498Szrj#if __cplusplus > 201402L
609*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
610*38fd1498Szrj#endif
611*38fd1498Szrj    };
612*38fd1498Szrj
613*38fd1498Szrj  /// Explicit specialization for short.
614*38fd1498Szrj  template<>
615*38fd1498Szrj    struct atomic<short> : __atomic_base<short>
616*38fd1498Szrj    {
617*38fd1498Szrj      typedef short 			__integral_type;
618*38fd1498Szrj      typedef __atomic_base<short> 		__base_type;
619*38fd1498Szrj
620*38fd1498Szrj      atomic() noexcept = default;
621*38fd1498Szrj      ~atomic() noexcept = default;
622*38fd1498Szrj      atomic(const atomic&) = delete;
623*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
624*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
625*38fd1498Szrj
626*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
627*38fd1498Szrj
628*38fd1498Szrj      using __base_type::operator __integral_type;
629*38fd1498Szrj      using __base_type::operator=;
630*38fd1498Szrj
631*38fd1498Szrj#if __cplusplus > 201402L
632*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
633*38fd1498Szrj#endif
634*38fd1498Szrj    };
635*38fd1498Szrj
636*38fd1498Szrj  /// Explicit specialization for unsigned short.
637*38fd1498Szrj  template<>
638*38fd1498Szrj    struct atomic<unsigned short> : __atomic_base<unsigned short>
639*38fd1498Szrj    {
640*38fd1498Szrj      typedef unsigned short 	      	__integral_type;
641*38fd1498Szrj      typedef __atomic_base<unsigned short> 		__base_type;
642*38fd1498Szrj
643*38fd1498Szrj      atomic() noexcept = default;
644*38fd1498Szrj      ~atomic() noexcept = default;
645*38fd1498Szrj      atomic(const atomic&) = delete;
646*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
647*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
648*38fd1498Szrj
649*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
650*38fd1498Szrj
651*38fd1498Szrj      using __base_type::operator __integral_type;
652*38fd1498Szrj      using __base_type::operator=;
653*38fd1498Szrj
654*38fd1498Szrj#if __cplusplus > 201402L
655*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
656*38fd1498Szrj#endif
657*38fd1498Szrj    };
658*38fd1498Szrj
659*38fd1498Szrj  /// Explicit specialization for int.
660*38fd1498Szrj  template<>
661*38fd1498Szrj    struct atomic<int> : __atomic_base<int>
662*38fd1498Szrj    {
663*38fd1498Szrj      typedef int 			__integral_type;
664*38fd1498Szrj      typedef __atomic_base<int> 		__base_type;
665*38fd1498Szrj
666*38fd1498Szrj      atomic() noexcept = default;
667*38fd1498Szrj      ~atomic() noexcept = default;
668*38fd1498Szrj      atomic(const atomic&) = delete;
669*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
670*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
671*38fd1498Szrj
672*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
673*38fd1498Szrj
674*38fd1498Szrj      using __base_type::operator __integral_type;
675*38fd1498Szrj      using __base_type::operator=;
676*38fd1498Szrj
677*38fd1498Szrj#if __cplusplus > 201402L
678*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
679*38fd1498Szrj#endif
680*38fd1498Szrj    };
681*38fd1498Szrj
682*38fd1498Szrj  /// Explicit specialization for unsigned int.
683*38fd1498Szrj  template<>
684*38fd1498Szrj    struct atomic<unsigned int> : __atomic_base<unsigned int>
685*38fd1498Szrj    {
686*38fd1498Szrj      typedef unsigned int		__integral_type;
687*38fd1498Szrj      typedef __atomic_base<unsigned int> 	__base_type;
688*38fd1498Szrj
689*38fd1498Szrj      atomic() noexcept = default;
690*38fd1498Szrj      ~atomic() noexcept = default;
691*38fd1498Szrj      atomic(const atomic&) = delete;
692*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
693*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
694*38fd1498Szrj
695*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
696*38fd1498Szrj
697*38fd1498Szrj      using __base_type::operator __integral_type;
698*38fd1498Szrj      using __base_type::operator=;
699*38fd1498Szrj
700*38fd1498Szrj#if __cplusplus > 201402L
701*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
702*38fd1498Szrj#endif
703*38fd1498Szrj    };
704*38fd1498Szrj
705*38fd1498Szrj  /// Explicit specialization for long.
706*38fd1498Szrj  template<>
707*38fd1498Szrj    struct atomic<long> : __atomic_base<long>
708*38fd1498Szrj    {
709*38fd1498Szrj      typedef long 			__integral_type;
710*38fd1498Szrj      typedef __atomic_base<long> 	__base_type;
711*38fd1498Szrj
712*38fd1498Szrj      atomic() noexcept = default;
713*38fd1498Szrj      ~atomic() noexcept = default;
714*38fd1498Szrj      atomic(const atomic&) = delete;
715*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
716*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
717*38fd1498Szrj
718*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
719*38fd1498Szrj
720*38fd1498Szrj      using __base_type::operator __integral_type;
721*38fd1498Szrj      using __base_type::operator=;
722*38fd1498Szrj
723*38fd1498Szrj#if __cplusplus > 201402L
724*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
725*38fd1498Szrj#endif
726*38fd1498Szrj    };
727*38fd1498Szrj
728*38fd1498Szrj  /// Explicit specialization for unsigned long.
729*38fd1498Szrj  template<>
730*38fd1498Szrj    struct atomic<unsigned long> : __atomic_base<unsigned long>
731*38fd1498Szrj    {
732*38fd1498Szrj      typedef unsigned long 		__integral_type;
733*38fd1498Szrj      typedef __atomic_base<unsigned long> 	__base_type;
734*38fd1498Szrj
735*38fd1498Szrj      atomic() noexcept = default;
736*38fd1498Szrj      ~atomic() noexcept = default;
737*38fd1498Szrj      atomic(const atomic&) = delete;
738*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
739*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
740*38fd1498Szrj
741*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
742*38fd1498Szrj
743*38fd1498Szrj      using __base_type::operator __integral_type;
744*38fd1498Szrj      using __base_type::operator=;
745*38fd1498Szrj
746*38fd1498Szrj#if __cplusplus > 201402L
747*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
748*38fd1498Szrj#endif
749*38fd1498Szrj    };
750*38fd1498Szrj
751*38fd1498Szrj  /// Explicit specialization for long long.
752*38fd1498Szrj  template<>
753*38fd1498Szrj    struct atomic<long long> : __atomic_base<long long>
754*38fd1498Szrj    {
755*38fd1498Szrj      typedef long long 		__integral_type;
756*38fd1498Szrj      typedef __atomic_base<long long> 		__base_type;
757*38fd1498Szrj
758*38fd1498Szrj      atomic() noexcept = default;
759*38fd1498Szrj      ~atomic() noexcept = default;
760*38fd1498Szrj      atomic(const atomic&) = delete;
761*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
762*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
763*38fd1498Szrj
764*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
765*38fd1498Szrj
766*38fd1498Szrj      using __base_type::operator __integral_type;
767*38fd1498Szrj      using __base_type::operator=;
768*38fd1498Szrj
769*38fd1498Szrj#if __cplusplus > 201402L
770*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
771*38fd1498Szrj#endif
772*38fd1498Szrj    };
773*38fd1498Szrj
774*38fd1498Szrj  /// Explicit specialization for unsigned long long.
775*38fd1498Szrj  template<>
776*38fd1498Szrj    struct atomic<unsigned long long> : __atomic_base<unsigned long long>
777*38fd1498Szrj    {
778*38fd1498Szrj      typedef unsigned long long       	__integral_type;
779*38fd1498Szrj      typedef __atomic_base<unsigned long long> 	__base_type;
780*38fd1498Szrj
781*38fd1498Szrj      atomic() noexcept = default;
782*38fd1498Szrj      ~atomic() noexcept = default;
783*38fd1498Szrj      atomic(const atomic&) = delete;
784*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
785*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
786*38fd1498Szrj
787*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
788*38fd1498Szrj
789*38fd1498Szrj      using __base_type::operator __integral_type;
790*38fd1498Szrj      using __base_type::operator=;
791*38fd1498Szrj
792*38fd1498Szrj#if __cplusplus > 201402L
793*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
794*38fd1498Szrj#endif
795*38fd1498Szrj    };
796*38fd1498Szrj
797*38fd1498Szrj  /// Explicit specialization for wchar_t.
798*38fd1498Szrj  template<>
799*38fd1498Szrj    struct atomic<wchar_t> : __atomic_base<wchar_t>
800*38fd1498Szrj    {
801*38fd1498Szrj      typedef wchar_t 			__integral_type;
802*38fd1498Szrj      typedef __atomic_base<wchar_t> 	__base_type;
803*38fd1498Szrj
804*38fd1498Szrj      atomic() noexcept = default;
805*38fd1498Szrj      ~atomic() noexcept = default;
806*38fd1498Szrj      atomic(const atomic&) = delete;
807*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
808*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
809*38fd1498Szrj
810*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
811*38fd1498Szrj
812*38fd1498Szrj      using __base_type::operator __integral_type;
813*38fd1498Szrj      using __base_type::operator=;
814*38fd1498Szrj
815*38fd1498Szrj#if __cplusplus > 201402L
816*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_WCHAR_T_LOCK_FREE == 2;
817*38fd1498Szrj#endif
818*38fd1498Szrj    };
819*38fd1498Szrj
820*38fd1498Szrj  /// Explicit specialization for char16_t.
821*38fd1498Szrj  template<>
822*38fd1498Szrj    struct atomic<char16_t> : __atomic_base<char16_t>
823*38fd1498Szrj    {
824*38fd1498Szrj      typedef char16_t 			__integral_type;
825*38fd1498Szrj      typedef __atomic_base<char16_t> 	__base_type;
826*38fd1498Szrj
827*38fd1498Szrj      atomic() noexcept = default;
828*38fd1498Szrj      ~atomic() noexcept = default;
829*38fd1498Szrj      atomic(const atomic&) = delete;
830*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
831*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
832*38fd1498Szrj
833*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
834*38fd1498Szrj
835*38fd1498Szrj      using __base_type::operator __integral_type;
836*38fd1498Szrj      using __base_type::operator=;
837*38fd1498Szrj
838*38fd1498Szrj#if __cplusplus > 201402L
839*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_CHAR16_T_LOCK_FREE == 2;
840*38fd1498Szrj#endif
841*38fd1498Szrj    };
842*38fd1498Szrj
843*38fd1498Szrj  /// Explicit specialization for char32_t.
844*38fd1498Szrj  template<>
845*38fd1498Szrj    struct atomic<char32_t> : __atomic_base<char32_t>
846*38fd1498Szrj    {
847*38fd1498Szrj      typedef char32_t 			__integral_type;
848*38fd1498Szrj      typedef __atomic_base<char32_t> 	__base_type;
849*38fd1498Szrj
850*38fd1498Szrj      atomic() noexcept = default;
851*38fd1498Szrj      ~atomic() noexcept = default;
852*38fd1498Szrj      atomic(const atomic&) = delete;
853*38fd1498Szrj      atomic& operator=(const atomic&) = delete;
854*38fd1498Szrj      atomic& operator=(const atomic&) volatile = delete;
855*38fd1498Szrj
856*38fd1498Szrj      constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
857*38fd1498Szrj
858*38fd1498Szrj      using __base_type::operator __integral_type;
859*38fd1498Szrj      using __base_type::operator=;
860*38fd1498Szrj
861*38fd1498Szrj#if __cplusplus > 201402L
862*38fd1498Szrj    static constexpr bool is_always_lock_free = ATOMIC_CHAR32_T_LOCK_FREE == 2;
863*38fd1498Szrj#endif
864*38fd1498Szrj    };
865*38fd1498Szrj
866*38fd1498Szrj
867*38fd1498Szrj  /// atomic_bool
868*38fd1498Szrj  typedef atomic<bool>			atomic_bool;
869*38fd1498Szrj
870*38fd1498Szrj  /// atomic_char
871*38fd1498Szrj  typedef atomic<char>			atomic_char;
872*38fd1498Szrj
873*38fd1498Szrj  /// atomic_schar
874*38fd1498Szrj  typedef atomic<signed char>		atomic_schar;
875*38fd1498Szrj
876*38fd1498Szrj  /// atomic_uchar
877*38fd1498Szrj  typedef atomic<unsigned char>		atomic_uchar;
878*38fd1498Szrj
879*38fd1498Szrj  /// atomic_short
880*38fd1498Szrj  typedef atomic<short>			atomic_short;
881*38fd1498Szrj
882*38fd1498Szrj  /// atomic_ushort
883*38fd1498Szrj  typedef atomic<unsigned short>	atomic_ushort;
884*38fd1498Szrj
885*38fd1498Szrj  /// atomic_int
886*38fd1498Szrj  typedef atomic<int>			atomic_int;
887*38fd1498Szrj
888*38fd1498Szrj  /// atomic_uint
889*38fd1498Szrj  typedef atomic<unsigned int>		atomic_uint;
890*38fd1498Szrj
891*38fd1498Szrj  /// atomic_long
892*38fd1498Szrj  typedef atomic<long>			atomic_long;
893*38fd1498Szrj
894*38fd1498Szrj  /// atomic_ulong
895*38fd1498Szrj  typedef atomic<unsigned long>		atomic_ulong;
896*38fd1498Szrj
897*38fd1498Szrj  /// atomic_llong
898*38fd1498Szrj  typedef atomic<long long>		atomic_llong;
899*38fd1498Szrj
900*38fd1498Szrj  /// atomic_ullong
901*38fd1498Szrj  typedef atomic<unsigned long long>	atomic_ullong;
902*38fd1498Szrj
903*38fd1498Szrj  /// atomic_wchar_t
904*38fd1498Szrj  typedef atomic<wchar_t>		atomic_wchar_t;
905*38fd1498Szrj
906*38fd1498Szrj  /// atomic_char16_t
907*38fd1498Szrj  typedef atomic<char16_t>		atomic_char16_t;
908*38fd1498Szrj
909*38fd1498Szrj  /// atomic_char32_t
910*38fd1498Szrj  typedef atomic<char32_t>		atomic_char32_t;
911*38fd1498Szrj
912*38fd1498Szrj#ifdef _GLIBCXX_USE_C99_STDINT_TR1
913*38fd1498Szrj  // _GLIBCXX_RESOLVE_LIB_DEFECTS
914*38fd1498Szrj  // 2441. Exact-width atomic typedefs should be provided
915*38fd1498Szrj
916*38fd1498Szrj  /// atomic_int8_t
917*38fd1498Szrj  typedef atomic<int8_t>		atomic_int8_t;
918*38fd1498Szrj
919*38fd1498Szrj  /// atomic_uint8_t
920*38fd1498Szrj  typedef atomic<uint8_t>		atomic_uint8_t;
921*38fd1498Szrj
922*38fd1498Szrj  /// atomic_int16_t
923*38fd1498Szrj  typedef atomic<int16_t>		atomic_int16_t;
924*38fd1498Szrj
925*38fd1498Szrj  /// atomic_uint16_t
926*38fd1498Szrj  typedef atomic<uint16_t>		atomic_uint16_t;
927*38fd1498Szrj
928*38fd1498Szrj  /// atomic_int32_t
929*38fd1498Szrj  typedef atomic<int32_t>		atomic_int32_t;
930*38fd1498Szrj
931*38fd1498Szrj  /// atomic_uint32_t
932*38fd1498Szrj  typedef atomic<uint32_t>		atomic_uint32_t;
933*38fd1498Szrj
934*38fd1498Szrj  /// atomic_int64_t
935*38fd1498Szrj  typedef atomic<int64_t>		atomic_int64_t;
936*38fd1498Szrj
937*38fd1498Szrj  /// atomic_uint64_t
938*38fd1498Szrj  typedef atomic<uint64_t>		atomic_uint64_t;
939*38fd1498Szrj
940*38fd1498Szrj
941*38fd1498Szrj  /// atomic_int_least8_t
942*38fd1498Szrj  typedef atomic<int_least8_t>		atomic_int_least8_t;
943*38fd1498Szrj
944*38fd1498Szrj  /// atomic_uint_least8_t
945*38fd1498Szrj  typedef atomic<uint_least8_t>		atomic_uint_least8_t;
946*38fd1498Szrj
947*38fd1498Szrj  /// atomic_int_least16_t
948*38fd1498Szrj  typedef atomic<int_least16_t>		atomic_int_least16_t;
949*38fd1498Szrj
950*38fd1498Szrj  /// atomic_uint_least16_t
951*38fd1498Szrj  typedef atomic<uint_least16_t>	atomic_uint_least16_t;
952*38fd1498Szrj
953*38fd1498Szrj  /// atomic_int_least32_t
954*38fd1498Szrj  typedef atomic<int_least32_t>		atomic_int_least32_t;
955*38fd1498Szrj
956*38fd1498Szrj  /// atomic_uint_least32_t
957*38fd1498Szrj  typedef atomic<uint_least32_t>	atomic_uint_least32_t;
958*38fd1498Szrj
959*38fd1498Szrj  /// atomic_int_least64_t
960*38fd1498Szrj  typedef atomic<int_least64_t>		atomic_int_least64_t;
961*38fd1498Szrj
962*38fd1498Szrj  /// atomic_uint_least64_t
963*38fd1498Szrj  typedef atomic<uint_least64_t>	atomic_uint_least64_t;
964*38fd1498Szrj
965*38fd1498Szrj
966*38fd1498Szrj  /// atomic_int_fast8_t
967*38fd1498Szrj  typedef atomic<int_fast8_t>		atomic_int_fast8_t;
968*38fd1498Szrj
969*38fd1498Szrj  /// atomic_uint_fast8_t
970*38fd1498Szrj  typedef atomic<uint_fast8_t>		atomic_uint_fast8_t;
971*38fd1498Szrj
972*38fd1498Szrj  /// atomic_int_fast16_t
973*38fd1498Szrj  typedef atomic<int_fast16_t>		atomic_int_fast16_t;
974*38fd1498Szrj
975*38fd1498Szrj  /// atomic_uint_fast16_t
976*38fd1498Szrj  typedef atomic<uint_fast16_t>		atomic_uint_fast16_t;
977*38fd1498Szrj
978*38fd1498Szrj  /// atomic_int_fast32_t
979*38fd1498Szrj  typedef atomic<int_fast32_t>		atomic_int_fast32_t;
980*38fd1498Szrj
981*38fd1498Szrj  /// atomic_uint_fast32_t
982*38fd1498Szrj  typedef atomic<uint_fast32_t>		atomic_uint_fast32_t;
983*38fd1498Szrj
984*38fd1498Szrj  /// atomic_int_fast64_t
985*38fd1498Szrj  typedef atomic<int_fast64_t>		atomic_int_fast64_t;
986*38fd1498Szrj
987*38fd1498Szrj  /// atomic_uint_fast64_t
988*38fd1498Szrj  typedef atomic<uint_fast64_t>		atomic_uint_fast64_t;
989*38fd1498Szrj#endif
990*38fd1498Szrj
991*38fd1498Szrj
992*38fd1498Szrj  /// atomic_intptr_t
993*38fd1498Szrj  typedef atomic<intptr_t>		atomic_intptr_t;
994*38fd1498Szrj
995*38fd1498Szrj  /// atomic_uintptr_t
996*38fd1498Szrj  typedef atomic<uintptr_t>		atomic_uintptr_t;
997*38fd1498Szrj
998*38fd1498Szrj  /// atomic_size_t
999*38fd1498Szrj  typedef atomic<size_t>		atomic_size_t;
1000*38fd1498Szrj
1001*38fd1498Szrj  /// atomic_ptrdiff_t
1002*38fd1498Szrj  typedef atomic<ptrdiff_t>		atomic_ptrdiff_t;
1003*38fd1498Szrj
1004*38fd1498Szrj#ifdef _GLIBCXX_USE_C99_STDINT_TR1
1005*38fd1498Szrj  /// atomic_intmax_t
1006*38fd1498Szrj  typedef atomic<intmax_t>		atomic_intmax_t;
1007*38fd1498Szrj
1008*38fd1498Szrj  /// atomic_uintmax_t
1009*38fd1498Szrj  typedef atomic<uintmax_t>		atomic_uintmax_t;
1010*38fd1498Szrj#endif
1011*38fd1498Szrj
1012*38fd1498Szrj  // Function definitions, atomic_flag operations.
1013*38fd1498Szrj  inline bool
1014*38fd1498Szrj  atomic_flag_test_and_set_explicit(atomic_flag* __a,
1015*38fd1498Szrj				    memory_order __m) noexcept
1016*38fd1498Szrj  { return __a->test_and_set(__m); }
1017*38fd1498Szrj
1018*38fd1498Szrj  inline bool
1019*38fd1498Szrj  atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
1020*38fd1498Szrj				    memory_order __m) noexcept
1021*38fd1498Szrj  { return __a->test_and_set(__m); }
1022*38fd1498Szrj
1023*38fd1498Szrj  inline void
1024*38fd1498Szrj  atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept
1025*38fd1498Szrj  { __a->clear(__m); }
1026*38fd1498Szrj
1027*38fd1498Szrj  inline void
1028*38fd1498Szrj  atomic_flag_clear_explicit(volatile atomic_flag* __a,
1029*38fd1498Szrj			     memory_order __m) noexcept
1030*38fd1498Szrj  { __a->clear(__m); }
1031*38fd1498Szrj
1032*38fd1498Szrj  inline bool
1033*38fd1498Szrj  atomic_flag_test_and_set(atomic_flag* __a) noexcept
1034*38fd1498Szrj  { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
1035*38fd1498Szrj
1036*38fd1498Szrj  inline bool
1037*38fd1498Szrj  atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept
1038*38fd1498Szrj  { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
1039*38fd1498Szrj
1040*38fd1498Szrj  inline void
1041*38fd1498Szrj  atomic_flag_clear(atomic_flag* __a) noexcept
1042*38fd1498Szrj  { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
1043*38fd1498Szrj
1044*38fd1498Szrj  inline void
1045*38fd1498Szrj  atomic_flag_clear(volatile atomic_flag* __a) noexcept
1046*38fd1498Szrj  { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
1047*38fd1498Szrj
1048*38fd1498Szrj
1049*38fd1498Szrj  // Function templates generally applicable to atomic types.
1050*38fd1498Szrj  template<typename _ITp>
1051*38fd1498Szrj    inline bool
1052*38fd1498Szrj    atomic_is_lock_free(const atomic<_ITp>* __a) noexcept
1053*38fd1498Szrj    { return __a->is_lock_free(); }
1054*38fd1498Szrj
1055*38fd1498Szrj  template<typename _ITp>
1056*38fd1498Szrj    inline bool
1057*38fd1498Szrj    atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept
1058*38fd1498Szrj    { return __a->is_lock_free(); }
1059*38fd1498Szrj
1060*38fd1498Szrj  template<typename _ITp>
1061*38fd1498Szrj    inline void
1062*38fd1498Szrj    atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept
1063*38fd1498Szrj    { __a->store(__i, memory_order_relaxed); }
1064*38fd1498Szrj
1065*38fd1498Szrj  template<typename _ITp>
1066*38fd1498Szrj    inline void
1067*38fd1498Szrj    atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept
1068*38fd1498Szrj    { __a->store(__i, memory_order_relaxed); }
1069*38fd1498Szrj
1070*38fd1498Szrj  template<typename _ITp>
1071*38fd1498Szrj    inline void
1072*38fd1498Szrj    atomic_store_explicit(atomic<_ITp>* __a, _ITp __i,
1073*38fd1498Szrj			  memory_order __m) noexcept
1074*38fd1498Szrj    { __a->store(__i, __m); }
1075*38fd1498Szrj
1076*38fd1498Szrj  template<typename _ITp>
1077*38fd1498Szrj    inline void
1078*38fd1498Szrj    atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i,
1079*38fd1498Szrj			  memory_order __m) noexcept
1080*38fd1498Szrj    { __a->store(__i, __m); }
1081*38fd1498Szrj
1082*38fd1498Szrj  template<typename _ITp>
1083*38fd1498Szrj    inline _ITp
1084*38fd1498Szrj    atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept
1085*38fd1498Szrj    { return __a->load(__m); }
1086*38fd1498Szrj
1087*38fd1498Szrj  template<typename _ITp>
1088*38fd1498Szrj    inline _ITp
1089*38fd1498Szrj    atomic_load_explicit(const volatile atomic<_ITp>* __a,
1090*38fd1498Szrj			 memory_order __m) noexcept
1091*38fd1498Szrj    { return __a->load(__m); }
1092*38fd1498Szrj
1093*38fd1498Szrj  template<typename _ITp>
1094*38fd1498Szrj    inline _ITp
1095*38fd1498Szrj    atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i,
1096*38fd1498Szrj			     memory_order __m) noexcept
1097*38fd1498Szrj    { return __a->exchange(__i, __m); }
1098*38fd1498Szrj
1099*38fd1498Szrj  template<typename _ITp>
1100*38fd1498Szrj    inline _ITp
1101*38fd1498Szrj    atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i,
1102*38fd1498Szrj			     memory_order __m) noexcept
1103*38fd1498Szrj    { return __a->exchange(__i, __m); }
1104*38fd1498Szrj
1105*38fd1498Szrj  template<typename _ITp>
1106*38fd1498Szrj    inline bool
1107*38fd1498Szrj    atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a,
1108*38fd1498Szrj					  _ITp* __i1, _ITp __i2,
1109*38fd1498Szrj					  memory_order __m1,
1110*38fd1498Szrj					  memory_order __m2) noexcept
1111*38fd1498Szrj    { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
1112*38fd1498Szrj
1113*38fd1498Szrj  template<typename _ITp>
1114*38fd1498Szrj    inline bool
1115*38fd1498Szrj    atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a,
1116*38fd1498Szrj					  _ITp* __i1, _ITp __i2,
1117*38fd1498Szrj					  memory_order __m1,
1118*38fd1498Szrj					  memory_order __m2) noexcept
1119*38fd1498Szrj    { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
1120*38fd1498Szrj
1121*38fd1498Szrj  template<typename _ITp>
1122*38fd1498Szrj    inline bool
1123*38fd1498Szrj    atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a,
1124*38fd1498Szrj					    _ITp* __i1, _ITp __i2,
1125*38fd1498Szrj					    memory_order __m1,
1126*38fd1498Szrj					    memory_order __m2) noexcept
1127*38fd1498Szrj    { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
1128*38fd1498Szrj
1129*38fd1498Szrj  template<typename _ITp>
1130*38fd1498Szrj    inline bool
1131*38fd1498Szrj    atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a,
1132*38fd1498Szrj					    _ITp* __i1, _ITp __i2,
1133*38fd1498Szrj					    memory_order __m1,
1134*38fd1498Szrj					    memory_order __m2) noexcept
1135*38fd1498Szrj    { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
1136*38fd1498Szrj
1137*38fd1498Szrj
1138*38fd1498Szrj  template<typename _ITp>
1139*38fd1498Szrj    inline void
1140*38fd1498Szrj    atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept
1141*38fd1498Szrj    { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
1142*38fd1498Szrj
1143*38fd1498Szrj  template<typename _ITp>
1144*38fd1498Szrj    inline void
1145*38fd1498Szrj    atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept
1146*38fd1498Szrj    { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
1147*38fd1498Szrj
1148*38fd1498Szrj  template<typename _ITp>
1149*38fd1498Szrj    inline _ITp
1150*38fd1498Szrj    atomic_load(const atomic<_ITp>* __a) noexcept
1151*38fd1498Szrj    { return atomic_load_explicit(__a, memory_order_seq_cst); }
1152*38fd1498Szrj
1153*38fd1498Szrj  template<typename _ITp>
1154*38fd1498Szrj    inline _ITp
1155*38fd1498Szrj    atomic_load(const volatile atomic<_ITp>* __a) noexcept
1156*38fd1498Szrj    { return atomic_load_explicit(__a, memory_order_seq_cst); }
1157*38fd1498Szrj
1158*38fd1498Szrj  template<typename _ITp>
1159*38fd1498Szrj    inline _ITp
1160*38fd1498Szrj    atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept
1161*38fd1498Szrj    { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
1162*38fd1498Szrj
1163*38fd1498Szrj  template<typename _ITp>
1164*38fd1498Szrj    inline _ITp
1165*38fd1498Szrj    atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept
1166*38fd1498Szrj    { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
1167*38fd1498Szrj
1168*38fd1498Szrj  template<typename _ITp>
1169*38fd1498Szrj    inline bool
1170*38fd1498Szrj    atomic_compare_exchange_weak(atomic<_ITp>* __a,
1171*38fd1498Szrj				 _ITp* __i1, _ITp __i2) noexcept
1172*38fd1498Szrj    {
1173*38fd1498Szrj      return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
1174*38fd1498Szrj						   memory_order_seq_cst,
1175*38fd1498Szrj						   memory_order_seq_cst);
1176*38fd1498Szrj    }
1177*38fd1498Szrj
1178*38fd1498Szrj  template<typename _ITp>
1179*38fd1498Szrj    inline bool
1180*38fd1498Szrj    atomic_compare_exchange_weak(volatile atomic<_ITp>* __a,
1181*38fd1498Szrj				 _ITp* __i1, _ITp __i2) noexcept
1182*38fd1498Szrj    {
1183*38fd1498Szrj      return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
1184*38fd1498Szrj						   memory_order_seq_cst,
1185*38fd1498Szrj						   memory_order_seq_cst);
1186*38fd1498Szrj    }
1187*38fd1498Szrj
1188*38fd1498Szrj  template<typename _ITp>
1189*38fd1498Szrj    inline bool
1190*38fd1498Szrj    atomic_compare_exchange_strong(atomic<_ITp>* __a,
1191*38fd1498Szrj				   _ITp* __i1, _ITp __i2) noexcept
1192*38fd1498Szrj    {
1193*38fd1498Szrj      return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
1194*38fd1498Szrj						     memory_order_seq_cst,
1195*38fd1498Szrj						     memory_order_seq_cst);
1196*38fd1498Szrj    }
1197*38fd1498Szrj
1198*38fd1498Szrj  template<typename _ITp>
1199*38fd1498Szrj    inline bool
1200*38fd1498Szrj    atomic_compare_exchange_strong(volatile atomic<_ITp>* __a,
1201*38fd1498Szrj				   _ITp* __i1, _ITp __i2) noexcept
1202*38fd1498Szrj    {
1203*38fd1498Szrj      return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
1204*38fd1498Szrj						     memory_order_seq_cst,
1205*38fd1498Szrj						     memory_order_seq_cst);
1206*38fd1498Szrj    }
1207*38fd1498Szrj
1208*38fd1498Szrj  // Function templates for atomic_integral operations only, using
1209*38fd1498Szrj  // __atomic_base. Template argument should be constricted to
1210*38fd1498Szrj  // intergral types as specified in the standard, excluding address
1211*38fd1498Szrj  // types.
1212*38fd1498Szrj  template<typename _ITp>
1213*38fd1498Szrj    inline _ITp
1214*38fd1498Szrj    atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
1215*38fd1498Szrj			      memory_order __m) noexcept
1216*38fd1498Szrj    { return __a->fetch_add(__i, __m); }
1217*38fd1498Szrj
1218*38fd1498Szrj  template<typename _ITp>
1219*38fd1498Szrj    inline _ITp
1220*38fd1498Szrj    atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
1221*38fd1498Szrj			      memory_order __m) noexcept
1222*38fd1498Szrj    { return __a->fetch_add(__i, __m); }
1223*38fd1498Szrj
1224*38fd1498Szrj  template<typename _ITp>
1225*38fd1498Szrj    inline _ITp
1226*38fd1498Szrj    atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
1227*38fd1498Szrj			      memory_order __m) noexcept
1228*38fd1498Szrj    { return __a->fetch_sub(__i, __m); }
1229*38fd1498Szrj
1230*38fd1498Szrj  template<typename _ITp>
1231*38fd1498Szrj    inline _ITp
1232*38fd1498Szrj    atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
1233*38fd1498Szrj			      memory_order __m) noexcept
1234*38fd1498Szrj    { return __a->fetch_sub(__i, __m); }
1235*38fd1498Szrj
1236*38fd1498Szrj  template<typename _ITp>
1237*38fd1498Szrj    inline _ITp
1238*38fd1498Szrj    atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
1239*38fd1498Szrj			      memory_order __m) noexcept
1240*38fd1498Szrj    { return __a->fetch_and(__i, __m); }
1241*38fd1498Szrj
1242*38fd1498Szrj  template<typename _ITp>
1243*38fd1498Szrj    inline _ITp
1244*38fd1498Szrj    atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
1245*38fd1498Szrj			      memory_order __m) noexcept
1246*38fd1498Szrj    { return __a->fetch_and(__i, __m); }
1247*38fd1498Szrj
1248*38fd1498Szrj  template<typename _ITp>
1249*38fd1498Szrj    inline _ITp
1250*38fd1498Szrj    atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
1251*38fd1498Szrj			     memory_order __m) noexcept
1252*38fd1498Szrj    { return __a->fetch_or(__i, __m); }
1253*38fd1498Szrj
1254*38fd1498Szrj  template<typename _ITp>
1255*38fd1498Szrj    inline _ITp
1256*38fd1498Szrj    atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
1257*38fd1498Szrj			     memory_order __m) noexcept
1258*38fd1498Szrj    { return __a->fetch_or(__i, __m); }
1259*38fd1498Szrj
1260*38fd1498Szrj  template<typename _ITp>
1261*38fd1498Szrj    inline _ITp
1262*38fd1498Szrj    atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
1263*38fd1498Szrj			      memory_order __m) noexcept
1264*38fd1498Szrj    { return __a->fetch_xor(__i, __m); }
1265*38fd1498Szrj
1266*38fd1498Szrj  template<typename _ITp>
1267*38fd1498Szrj    inline _ITp
1268*38fd1498Szrj    atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
1269*38fd1498Szrj			      memory_order __m) noexcept
1270*38fd1498Szrj    { return __a->fetch_xor(__i, __m); }
1271*38fd1498Szrj
1272*38fd1498Szrj  template<typename _ITp>
1273*38fd1498Szrj    inline _ITp
1274*38fd1498Szrj    atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1275*38fd1498Szrj    { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
1276*38fd1498Szrj
1277*38fd1498Szrj  template<typename _ITp>
1278*38fd1498Szrj    inline _ITp
1279*38fd1498Szrj    atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1280*38fd1498Szrj    { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
1281*38fd1498Szrj
1282*38fd1498Szrj  template<typename _ITp>
1283*38fd1498Szrj    inline _ITp
1284*38fd1498Szrj    atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1285*38fd1498Szrj    { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
1286*38fd1498Szrj
1287*38fd1498Szrj  template<typename _ITp>
1288*38fd1498Szrj    inline _ITp
1289*38fd1498Szrj    atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1290*38fd1498Szrj    { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
1291*38fd1498Szrj
1292*38fd1498Szrj  template<typename _ITp>
1293*38fd1498Szrj    inline _ITp
1294*38fd1498Szrj    atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1295*38fd1498Szrj    { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
1296*38fd1498Szrj
1297*38fd1498Szrj  template<typename _ITp>
1298*38fd1498Szrj    inline _ITp
1299*38fd1498Szrj    atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1300*38fd1498Szrj    { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
1301*38fd1498Szrj
1302*38fd1498Szrj  template<typename _ITp>
1303*38fd1498Szrj    inline _ITp
1304*38fd1498Szrj    atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1305*38fd1498Szrj    { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
1306*38fd1498Szrj
1307*38fd1498Szrj  template<typename _ITp>
1308*38fd1498Szrj    inline _ITp
1309*38fd1498Szrj    atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1310*38fd1498Szrj    { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
1311*38fd1498Szrj
1312*38fd1498Szrj  template<typename _ITp>
1313*38fd1498Szrj    inline _ITp
1314*38fd1498Szrj    atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept
1315*38fd1498Szrj    { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
1316*38fd1498Szrj
1317*38fd1498Szrj  template<typename _ITp>
1318*38fd1498Szrj    inline _ITp
1319*38fd1498Szrj    atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
1320*38fd1498Szrj    { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
1321*38fd1498Szrj
1322*38fd1498Szrj
1323*38fd1498Szrj  // Partial specializations for pointers.
1324*38fd1498Szrj  template<typename _ITp>
1325*38fd1498Szrj    inline _ITp*
1326*38fd1498Szrj    atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
1327*38fd1498Szrj			      memory_order __m) noexcept
1328*38fd1498Szrj    { return __a->fetch_add(__d, __m); }
1329*38fd1498Szrj
1330*38fd1498Szrj  template<typename _ITp>
1331*38fd1498Szrj    inline _ITp*
1332*38fd1498Szrj    atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d,
1333*38fd1498Szrj			      memory_order __m) noexcept
1334*38fd1498Szrj    { return __a->fetch_add(__d, __m); }
1335*38fd1498Szrj
1336*38fd1498Szrj  template<typename _ITp>
1337*38fd1498Szrj    inline _ITp*
1338*38fd1498Szrj    atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1339*38fd1498Szrj    { return __a->fetch_add(__d); }
1340*38fd1498Szrj
1341*38fd1498Szrj  template<typename _ITp>
1342*38fd1498Szrj    inline _ITp*
1343*38fd1498Szrj    atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1344*38fd1498Szrj    { return __a->fetch_add(__d); }
1345*38fd1498Szrj
1346*38fd1498Szrj  template<typename _ITp>
1347*38fd1498Szrj    inline _ITp*
1348*38fd1498Szrj    atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a,
1349*38fd1498Szrj			      ptrdiff_t __d, memory_order __m) noexcept
1350*38fd1498Szrj    { return __a->fetch_sub(__d, __m); }
1351*38fd1498Szrj
1352*38fd1498Szrj  template<typename _ITp>
1353*38fd1498Szrj    inline _ITp*
1354*38fd1498Szrj    atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
1355*38fd1498Szrj			      memory_order __m) noexcept
1356*38fd1498Szrj    { return __a->fetch_sub(__d, __m); }
1357*38fd1498Szrj
1358*38fd1498Szrj  template<typename _ITp>
1359*38fd1498Szrj    inline _ITp*
1360*38fd1498Szrj    atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1361*38fd1498Szrj    { return __a->fetch_sub(__d); }
1362*38fd1498Szrj
1363*38fd1498Szrj  template<typename _ITp>
1364*38fd1498Szrj    inline _ITp*
1365*38fd1498Szrj    atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
1366*38fd1498Szrj    { return __a->fetch_sub(__d); }
1367*38fd1498Szrj  // @} group atomics
1368*38fd1498Szrj
1369*38fd1498Szrj_GLIBCXX_END_NAMESPACE_VERSION
1370*38fd1498Szrj} // namespace
1371*38fd1498Szrj
1372*38fd1498Szrj#endif // C++11
1373*38fd1498Szrj
1374*38fd1498Szrj#endif // _GLIBCXX_ATOMIC
1375