1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // -----------------------------------------------------------------------------
16 // File: call_once.h
17 // -----------------------------------------------------------------------------
18 //
19 // This header file provides an Abseil version of `std::call_once` for invoking
20 // a given function at most once, across all threads. This Abseil version is
21 // faster than the C++11 version and incorporates the C++17 argument-passing
22 // fix, so that (for example) non-const references may be passed to the invoked
23 // function.
24 
25 #ifndef ABSL_BASE_CALL_ONCE_H_
26 #define ABSL_BASE_CALL_ONCE_H_
27 
28 #include <algorithm>
29 #include <atomic>
30 #include <cstdint>
31 #include <type_traits>
32 #include <utility>
33 
34 #include "absl/base/internal/invoke.h"
35 #include "absl/base/internal/low_level_scheduling.h"
36 #include "absl/base/internal/raw_logging.h"
37 #include "absl/base/internal/scheduling_mode.h"
38 #include "absl/base/internal/spinlock_wait.h"
39 #include "absl/base/macros.h"
40 #include "absl/base/optimization.h"
41 #include "absl/base/port.h"
42 
43 namespace absl {
44 ABSL_NAMESPACE_BEGIN
45 
46 class once_flag;
47 
48 namespace base_internal {
49 std::atomic<uint32_t>* ControlWord(absl::once_flag* flag);
50 }  // namespace base_internal
51 
52 // call_once()
53 //
54 // For all invocations using a given `once_flag`, invokes a given `fn` exactly
55 // once across all threads. The first call to `call_once()` with a particular
56 // `once_flag` argument (that does not throw an exception) will run the
57 // specified function with the provided `args`; other calls with the same
58 // `once_flag` argument will not run the function, but will wait
59 // for the provided function to finish running (if it is still running).
60 //
61 // This mechanism provides a safe, simple, and fast mechanism for one-time
62 // initialization in a multi-threaded process.
63 //
64 // Example:
65 //
66 // class MyInitClass {
67 //  public:
68 //  ...
69 //  mutable absl::once_flag once_;
70 //
71 //  MyInitClass* init() const {
72 //    absl::call_once(once_, &MyInitClass::Init, this);
73 //    return ptr_;
74 //  }
75 //
76 template <typename Callable, typename... Args>
77 void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);
78 
79 // once_flag
80 //
81 // Objects of this type are used to distinguish calls to `call_once()` and
82 // ensure the provided function is only invoked once across all threads. This
83 // type is not copyable or movable. However, it has a `constexpr`
84 // constructor, and is safe to use as a namespace-scoped global variable.
85 class once_flag {
86  public:
once_flag()87   constexpr once_flag() : control_(0) {}
88   once_flag(const once_flag&) = delete;
89   once_flag& operator=(const once_flag&) = delete;
90 
91  private:
92   friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
93   std::atomic<uint32_t> control_;
94 };
95 
96 //------------------------------------------------------------------------------
97 // End of public interfaces.
98 // Implementation details follow.
99 //------------------------------------------------------------------------------
100 
101 namespace base_internal {
102 
103 // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
104 // initialize entities used by the scheduler implementation.
105 template <typename Callable, typename... Args>
106 void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args);
107 
108 // Disables scheduling while on stack when scheduling mode is non-cooperative.
109 // No effect for cooperative scheduling modes.
110 class SchedulingHelper {
111  public:
SchedulingHelper(base_internal::SchedulingMode mode)112   explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
113     if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
114       guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
115     }
116   }
117 
~SchedulingHelper()118   ~SchedulingHelper() {
119     if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
120       base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
121     }
122   }
123 
124  private:
125   base_internal::SchedulingMode mode_;
126   bool guard_result_;
127 };
128 
129 // Bit patterns for call_once state machine values.  Internal implementation
130 // detail, not for use by clients.
131 //
132 // The bit patterns are arbitrarily chosen from unlikely values, to aid in
133 // debugging.  However, kOnceInit must be 0, so that a zero-initialized
134 // once_flag will be valid for immediate use.
135 enum {
136   kOnceInit = 0,
137   kOnceRunning = 0x65C2937B,
138   kOnceWaiter = 0x05A308D2,
139   // A very small constant is chosen for kOnceDone so that it fit in a single
140   // compare with immediate instruction for most common ISAs.  This is verified
141   // for x86, POWER and ARM.
142   kOnceDone = 221,    // Random Number
143 };
144 
145 template <typename Callable, typename... Args>
146 ABSL_ATTRIBUTE_NOINLINE
CallOnceImpl(std::atomic<uint32_t> * control,base_internal::SchedulingMode scheduling_mode,Callable && fn,Args &&...args)147 void CallOnceImpl(std::atomic<uint32_t>* control,
148                   base_internal::SchedulingMode scheduling_mode, Callable&& fn,
149                   Args&&... args) {
150 #ifndef NDEBUG
151   {
152     uint32_t old_control = control->load(std::memory_order_relaxed);
153     if (old_control != kOnceInit &&
154         old_control != kOnceRunning &&
155         old_control != kOnceWaiter &&
156         old_control != kOnceDone) {
157       ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
158                    static_cast<unsigned long>(old_control));  // NOLINT
159     }
160   }
161 #endif  // NDEBUG
162   static const base_internal::SpinLockWaitTransition trans[] = {
163       {kOnceInit, kOnceRunning, true},
164       {kOnceRunning, kOnceWaiter, false},
165       {kOnceDone, kOnceDone, true}};
166 
167   // Must do this before potentially modifying control word's state.
168   base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
169   // Short circuit the simplest case to avoid procedure call overhead.
170   // The base_internal::SpinLockWait() call returns either kOnceInit or
171   // kOnceDone. If it returns kOnceDone, it must have loaded the control word
172   // with std::memory_order_acquire and seen a value of kOnceDone.
173   uint32_t old_control = kOnceInit;
174   if (control->compare_exchange_strong(old_control, kOnceRunning,
175                                        std::memory_order_relaxed) ||
176       base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
177                                   scheduling_mode) == kOnceInit) {
178     base_internal::Invoke(std::forward<Callable>(fn),
179                           std::forward<Args>(args)...);
180     // The call to SpinLockWake below is an optimization, because the waiter
181     // in SpinLockWait is waiting with a short timeout. The atomic load/store
182     // sequence is slightly faster than an atomic exchange:
183     //   old_control = control->exchange(base_internal::kOnceDone,
184     //                                   std::memory_order_release);
185     // We opt for a slightly faster case when there are no waiters, in spite
186     // of longer tail latency when there are waiters.
187     old_control = control->load(std::memory_order_relaxed);
188     control->store(base_internal::kOnceDone, std::memory_order_release);
189     if (old_control == base_internal::kOnceWaiter) {
190       base_internal::SpinLockWake(control, true);
191     }
192   }  // else *control is already kOnceDone
193 }
194 
ControlWord(once_flag * flag)195 inline std::atomic<uint32_t>* ControlWord(once_flag* flag) {
196   return &flag->control_;
197 }
198 
199 template <typename Callable, typename... Args>
LowLevelCallOnce(absl::once_flag * flag,Callable && fn,Args &&...args)200 void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) {
201   std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
202   uint32_t s = once->load(std::memory_order_acquire);
203   if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
204     base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
205                                 std::forward<Callable>(fn),
206                                 std::forward<Args>(args)...);
207   }
208 }
209 
210 }  // namespace base_internal
211 
212 template <typename Callable, typename... Args>
call_once(absl::once_flag & flag,Callable && fn,Args &&...args)213 void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) {
214   std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
215   uint32_t s = once->load(std::memory_order_acquire);
216   if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
217     base_internal::CallOnceImpl(
218         once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
219         std::forward<Callable>(fn), std::forward<Args>(args)...);
220   }
221 }
222 
223 ABSL_NAMESPACE_END
224 }  // namespace absl
225 
226 #endif  // ABSL_BASE_CALL_ONCE_H_
227