1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 #[cfg(feature = "nightly")]
9 use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
10 #[cfg(feature = "nightly")]
11 type U8 = u8;
12 #[cfg(not(feature = "nightly"))]
13 use stable::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
14 #[cfg(not(feature = "nightly"))]
15 type U8 = usize;
16 use std::mem;
17 use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
18 use util::UncheckedOptionExt;
19 
20 const DONE_BIT: U8 = 1;
21 const POISON_BIT: U8 = 2;
22 const LOCKED_BIT: U8 = 4;
23 const PARKED_BIT: U8 = 8;
24 
25 /// Current state of a `Once`.
26 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
27 pub enum OnceState {
28     /// A closure has not been executed yet
29     New,
30 
31     /// A closure was executed but panicked.
32     Poisoned,
33 
34     /// A thread is currently executing a closure.
35     InProgress,
36 
37     /// A closure has completed sucessfully.
38     Done,
39 }
40 
41 impl OnceState {
42     /// Returns whether the associated `Once` has been poisoned.
43     ///
44     /// Once an initalization routine for a `Once` has panicked it will forever
45     /// indicate to future forced initialization routines that it is poisoned.
46     #[inline]
poisoned(&self) -> bool47     pub fn poisoned(&self) -> bool {
48         match *self {
49             OnceState::Poisoned => true,
50             _ => false,
51         }
52     }
53 
54     /// Returns whether the associated `Once` has successfullly executed a
55     /// closure.
56     #[inline]
done(&self) -> bool57     pub fn done(&self) -> bool {
58         match *self {
59             OnceState::Done => true,
60             _ => false,
61         }
62     }
63 }
64 
65 /// A synchronization primitive which can be used to run a one-time
66 /// initialization. Useful for one-time initialization for globals, FFI or
67 /// related functionality.
68 ///
69 /// # Differences from the standard library `Once`
70 ///
71 /// - Only requires 1 byte of space, instead of 1 word.
72 /// - Not required to be `'static`.
73 /// - Relaxed memory barriers in the fast path, which can significantly improve
74 ///   performance on some architectures.
75 /// - Efficient handling of micro-contention using adaptive spinning.
76 ///
77 /// # Examples
78 ///
79 /// ```
80 /// use parking_lot::{Once, ONCE_INIT};
81 ///
82 /// static START: Once = ONCE_INIT;
83 ///
84 /// START.call_once(|| {
85 ///     // run initialization here
86 /// });
87 /// ```
88 pub struct Once(AtomicU8);
89 
90 /// Initialization value for static `Once` values.
91 pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT);
92 
93 impl Once {
94     /// Creates a new `Once` value.
95     #[cfg(feature = "nightly")]
96     #[inline]
new() -> Once97     pub const fn new() -> Once {
98         Once(AtomicU8::new(0))
99     }
100 
101     /// Creates a new `Once` value.
102     #[cfg(not(feature = "nightly"))]
103     #[inline]
new() -> Once104     pub fn new() -> Once {
105         Once(AtomicU8::new(0))
106     }
107 
108     /// Returns the current state of this `Once`.
109     #[inline]
state(&self) -> OnceState110     pub fn state(&self) -> OnceState {
111         let state = self.0.load(Ordering::Acquire);
112         if state & DONE_BIT != 0 {
113             OnceState::Done
114         } else if state & LOCKED_BIT != 0 {
115             OnceState::InProgress
116         } else if state & POISON_BIT != 0 {
117             OnceState::Poisoned
118         } else {
119             OnceState::New
120         }
121     }
122 
123     /// Performs an initialization routine once and only once. The given closure
124     /// will be executed if this is the first time `call_once` has been called,
125     /// and otherwise the routine will *not* be invoked.
126     ///
127     /// This method will block the calling thread if another initialization
128     /// routine is currently running.
129     ///
130     /// When this function returns, it is guaranteed that some initialization
131     /// has run and completed (it may not be the closure specified). It is also
132     /// guaranteed that any memory writes performed by the executed closure can
133     /// be reliably observed by other threads at this point (there is a
134     /// happens-before relation between the closure and code executing after the
135     /// return).
136     ///
137     /// # Examples
138     ///
139     /// ```
140     /// use parking_lot::{Once, ONCE_INIT};
141     ///
142     /// static mut VAL: usize = 0;
143     /// static INIT: Once = ONCE_INIT;
144     ///
145     /// // Accessing a `static mut` is unsafe much of the time, but if we do so
146     /// // in a synchronized fashion (e.g. write once or read all) then we're
147     /// // good to go!
148     /// //
149     /// // This function will only call `expensive_computation` once, and will
150     /// // otherwise always return the value returned from the first invocation.
151     /// fn get_cached_val() -> usize {
152     ///     unsafe {
153     ///         INIT.call_once(|| {
154     ///             VAL = expensive_computation();
155     ///         });
156     ///         VAL
157     ///     }
158     /// }
159     ///
160     /// fn expensive_computation() -> usize {
161     ///     // ...
162     /// # 2
163     /// }
164     /// ```
165     ///
166     /// # Panics
167     ///
168     /// The closure `f` will only be executed once if this is called
169     /// concurrently amongst many threads. If that closure panics, however, then
170     /// it will *poison* this `Once` instance, causing all future invocations of
171     /// `call_once` to also panic.
172     #[inline]
call_once<F>(&self, f: F) where F: FnOnce(),173     pub fn call_once<F>(&self, f: F)
174     where
175         F: FnOnce(),
176     {
177         if self.0.load(Ordering::Acquire) == DONE_BIT {
178             return;
179         }
180 
181         let mut f = Some(f);
182         self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() });
183     }
184 
185     /// Performs the same function as `call_once` except ignores poisoning.
186     ///
187     /// If this `Once` has been poisoned (some initialization panicked) then
188     /// this function will continue to attempt to call initialization functions
189     /// until one of them doesn't panic.
190     ///
191     /// The closure `f` is yielded a structure which can be used to query the
192     /// state of this `Once` (whether initialization has previously panicked or
193     /// not).
194     #[inline]
call_once_force<F>(&self, f: F) where F: FnOnce(OnceState),195     pub fn call_once_force<F>(&self, f: F)
196     where
197         F: FnOnce(OnceState),
198     {
199         if self.0.load(Ordering::Acquire) == DONE_BIT {
200             return;
201         }
202 
203         let mut f = Some(f);
204         self.call_once_slow(true, &mut |state| unsafe {
205             f.take().unchecked_unwrap()(state)
206         });
207     }
208 
209     // This is a non-generic function to reduce the monomorphization cost of
210     // using `call_once` (this isn't exactly a trivial or small implementation).
211     //
212     // Additionally, this is tagged with `#[cold]` as it should indeed be cold
213     // and it helps let LLVM know that calls to this function should be off the
214     // fast path. Essentially, this should help generate more straight line code
215     // in LLVM.
216     //
217     // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
218     // currently no way to take an `FnOnce` and call it via virtual dispatch
219     // without some allocation overhead.
220     #[cold]
221     #[inline(never)]
call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState))222     fn call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState)) {
223         let mut spinwait = SpinWait::new();
224         let mut state = self.0.load(Ordering::Relaxed);
225         loop {
226             // If another thread called the closure, we're done
227             if state & DONE_BIT != 0 {
228                 // An acquire fence is needed here since we didn't load the
229                 // state with Ordering::Acquire.
230                 fence(Ordering::Acquire);
231                 return;
232             }
233 
234             // If the state has been poisoned and we aren't forcing, then panic
235             if state & POISON_BIT != 0 && !ignore_poison {
236                 // Need the fence here as well for the same reason
237                 fence(Ordering::Acquire);
238                 panic!("Once instance has previously been poisoned");
239             }
240 
241             // Grab the lock if it isn't locked, even if there is a queue on it.
242             // We also clear the poison bit since we are going to try running
243             // the closure again.
244             if state & LOCKED_BIT == 0 {
245                 match self.0.compare_exchange_weak(
246                     state,
247                     (state | LOCKED_BIT) & !POISON_BIT,
248                     Ordering::Acquire,
249                     Ordering::Relaxed,
250                 ) {
251                     Ok(_) => break,
252                     Err(x) => state = x,
253                 }
254                 continue;
255             }
256 
257             // If there is no queue, try spinning a few times
258             if state & PARKED_BIT == 0 && spinwait.spin() {
259                 state = self.0.load(Ordering::Relaxed);
260                 continue;
261             }
262 
263             // Set the parked bit
264             if state & PARKED_BIT == 0 {
265                 if let Err(x) = self.0.compare_exchange_weak(
266                     state,
267                     state | PARKED_BIT,
268                     Ordering::Relaxed,
269                     Ordering::Relaxed,
270                 )
271                 {
272                     state = x;
273                     continue;
274                 }
275             }
276 
277             // Park our thread until we are woken up by the thread that owns the
278             // lock.
279             unsafe {
280                 let addr = self as *const _ as usize;
281                 let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
282                 let before_sleep = || {};
283                 let timed_out = |_, _| unreachable!();
284                 parking_lot_core::park(
285                     addr,
286                     validate,
287                     before_sleep,
288                     timed_out,
289                     DEFAULT_PARK_TOKEN,
290                     None,
291                 );
292             }
293 
294             // Loop back and check if the done bit was set
295             spinwait.reset();
296             state = self.0.load(Ordering::Relaxed);
297         }
298 
299         struct PanicGuard<'a>(&'a Once);
300         impl<'a> Drop for PanicGuard<'a> {
301             fn drop(&mut self) {
302                 // Mark the state as poisoned, unlock it and unpark all threads.
303                 let once = self.0;
304                 let state = once.0.swap(POISON_BIT, Ordering::Release);
305                 if state & PARKED_BIT != 0 {
306                     unsafe {
307                         let addr = once as *const _ as usize;
308                         parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
309                     }
310                 }
311             }
312         }
313 
314         // At this point we have the lock, so run the closure. Make sure we
315         // properly clean up if the closure panicks.
316         let guard = PanicGuard(self);
317         let once_state = if state & POISON_BIT != 0 {
318             OnceState::Poisoned
319         } else {
320             OnceState::New
321         };
322         f(once_state);
323         mem::forget(guard);
324 
325         // Now unlock the state, set the done bit and unpark all threads
326         let state = self.0.swap(DONE_BIT, Ordering::Release);
327         if state & PARKED_BIT != 0 {
328             unsafe {
329                 let addr = self as *const _ as usize;
330                 parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
331             }
332         }
333     }
334 }
335 
336 impl Default for Once {
337     #[inline]
default() -> Once338     fn default() -> Once {
339         Once::new()
340     }
341 }
342 
343 #[cfg(test)]
344 mod tests {
345     #[cfg(feature = "nightly")]
346     use std::panic;
347     use std::sync::mpsc::channel;
348     use std::thread;
349     use {Once, ONCE_INIT};
350 
351     #[test]
smoke_once()352     fn smoke_once() {
353         static O: Once = ONCE_INIT;
354         let mut a = 0;
355         O.call_once(|| a += 1);
356         assert_eq!(a, 1);
357         O.call_once(|| a += 1);
358         assert_eq!(a, 1);
359     }
360 
361     #[test]
stampede_once()362     fn stampede_once() {
363         static O: Once = ONCE_INIT;
364         static mut RUN: bool = false;
365 
366         let (tx, rx) = channel();
367         for _ in 0..10 {
368             let tx = tx.clone();
369             thread::spawn(move || {
370                 for _ in 0..4 {
371                     thread::yield_now()
372                 }
373                 unsafe {
374                     O.call_once(|| {
375                         assert!(!RUN);
376                         RUN = true;
377                     });
378                     assert!(RUN);
379                 }
380                 tx.send(()).unwrap();
381             });
382         }
383 
384         unsafe {
385             O.call_once(|| {
386                 assert!(!RUN);
387                 RUN = true;
388             });
389             assert!(RUN);
390         }
391 
392         for _ in 0..10 {
393             rx.recv().unwrap();
394         }
395     }
396 
397     #[cfg(feature = "nightly")]
398     #[test]
poison_bad()399     fn poison_bad() {
400         static O: Once = ONCE_INIT;
401 
402         // poison the once
403         let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
404         assert!(t.is_err());
405 
406         // poisoning propagates
407         let t = panic::catch_unwind(|| { O.call_once(|| {}); });
408         assert!(t.is_err());
409 
410         // we can subvert poisoning, however
411         let mut called = false;
412         O.call_once_force(|p| {
413             called = true;
414             assert!(p.poisoned())
415         });
416         assert!(called);
417 
418         // once any success happens, we stop propagating the poison
419         O.call_once(|| {});
420     }
421 
422     #[cfg(feature = "nightly")]
423     #[test]
wait_for_force_to_finish()424     fn wait_for_force_to_finish() {
425         static O: Once = ONCE_INIT;
426 
427         // poison the once
428         let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
429         assert!(t.is_err());
430 
431         // make sure someone's waiting inside the once via a force
432         let (tx1, rx1) = channel();
433         let (tx2, rx2) = channel();
434         let t1 = thread::spawn(move || {
435             O.call_once_force(|p| {
436                 assert!(p.poisoned());
437                 tx1.send(()).unwrap();
438                 rx2.recv().unwrap();
439             });
440         });
441 
442         rx1.recv().unwrap();
443 
444         // put another waiter on the once
445         let t2 = thread::spawn(|| {
446             let mut called = false;
447             O.call_once(|| { called = true; });
448             assert!(!called);
449         });
450 
451         tx2.send(()).unwrap();
452 
453         assert!(t1.join().is_ok());
454         assert!(t2.join().is_ok());
455 
456     }
457 }
458