1 /**
2 * The atomic module provides basic support for lock-free
3 * concurrent programming.
4 *
5 * Copyright: Copyright Sean Kelly 2005 - 2016.
6 * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
7 * Authors: Sean Kelly, Alex Rønne Petersen, Manu Evans
8 * Source: $(DRUNTIMESRC core/_atomic.d)
9 */
10
11 module core.atomic;
12
13 import core.internal.atomic;
14 import core.internal.attributes : betterC;
15 import core.internal.traits : hasUnsharedIndirections;
16
17 /**
18 * Specifies the memory ordering semantics of an atomic operation.
19 *
20 * See_Also:
21 * $(HTTP en.cppreference.com/w/cpp/atomic/memory_order)
22 */
23 enum MemoryOrder
24 {
25 /**
26 * Not sequenced.
27 * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#monotonic, LLVM AtomicOrdering.Monotonic)
28 * and C++11/C11 `memory_order_relaxed`.
29 */
30 raw = 0,
31 /**
32 * Hoist-load + hoist-store barrier.
33 * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquire, LLVM AtomicOrdering.Acquire)
34 * and C++11/C11 `memory_order_acquire`.
35 */
36 acq = 2,
37 /**
38 * Sink-load + sink-store barrier.
39 * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#release, LLVM AtomicOrdering.Release)
40 * and C++11/C11 `memory_order_release`.
41 */
42 rel = 3,
43 /**
44 * Acquire + release barrier.
45 * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquirerelease, LLVM AtomicOrdering.AcquireRelease)
46 * and C++11/C11 `memory_order_acq_rel`.
47 */
48 acq_rel = 4,
49 /**
50 * Fully sequenced (acquire + release). Corresponds to
51 * $(LINK2 https://llvm.org/docs/Atomics.html#sequentiallyconsistent, LLVM AtomicOrdering.SequentiallyConsistent)
52 * and C++11/C11 `memory_order_seq_cst`.
53 */
54 seq = 5,
55 }
56
57 /**
58 * Loads 'val' from memory and returns it. The memory barrier specified
59 * by 'ms' is applied to the operation, which is fully sequenced by
60 * default. Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
61 * and MemoryOrder.seq.
62 *
63 * Params:
64 * val = The target variable.
65 *
66 * Returns:
67 * The value of 'val'.
68 */
69 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope const T val) pure nothrow @nogc @trusted
70 if (!is(T == shared U, U) && !is(T == shared inout U, U) && !is(T == shared const U, U))
71 {
72 static if (__traits(isFloating, T))
73 {
74 alias IntTy = IntForFloat!T;
75 IntTy r = core.internal.atomic.atomicLoad!ms(cast(IntTy*)&val);
76 return *cast(T*)&r;
77 }
78 else
79 return core.internal.atomic.atomicLoad!ms(cast(T*)&val);
80 }
81
82 /// Ditto
83 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared const T val) pure nothrow @nogc @trusted
84 if (!hasUnsharedIndirections!T)
85 {
86 import core.internal.traits : hasUnsharedIndirections;
87 static assert(!hasUnsharedIndirections!T, "Copying `" ~ shared(const(T)).stringof ~ "` would violate shared.");
88
89 return atomicLoad!ms(*cast(T*)&val);
90 }
91
92 /// Ditto
93 TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(ref shared const T val) pure nothrow @nogc @trusted
94 if (hasUnsharedIndirections!T)
95 {
96 // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
97 // this is here because code exists in the wild that does this...
98
99 return core.internal.atomic.atomicLoad!ms(cast(TailShared!T*)&val);
100 }
101
102 /**
103 * Writes 'newval' into 'val'. The memory barrier specified by 'ms' is
104 * applied to the operation, which is fully sequenced by default.
105 * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
106 * MemoryOrder.seq.
107 *
108 * Params:
109 * val = The target variable.
110 * newval = The value to store.
111 */
112 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref T val, V newval) pure nothrow @nogc @trusted
113 if (!is(T == shared) && !is(V == shared))
114 {
115 import core.internal.traits : hasElaborateCopyConstructor;
116 static assert (!hasElaborateCopyConstructor!T, "`T` may not have an elaborate copy: atomic operations override regular copying semantics.");
117
118 // resolve implicit conversions
119 T arg = newval;
120
121 static if (__traits(isFloating, T))
122 {
123 alias IntTy = IntForFloat!T;
124 core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&arg);
125 }
126 else
127 core.internal.atomic.atomicStore!ms(&val, arg);
128 }
129
130 /// Ditto
131 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, V newval) pure nothrow @nogc @trusted
132 if (!is(T == class))
133 {
134 static if (is (V == shared U, U))
135 alias Thunk = U;
136 else
137 {
138 import core.internal.traits : hasUnsharedIndirections;
139 static assert(!hasUnsharedIndirections!V, "Copying argument `" ~ V.stringof ~ " newval` to `" ~ shared(T).stringof ~ " here` would violate shared.");
140 alias Thunk = V;
141 }
142 atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
143 }
144
145 /// Ditto
146 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, shared V newval) pure nothrow @nogc @trusted
147 if (is(T == class))
148 {
149 static assert (is (V : T), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
150
151 core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
152 }
153
154 /**
155 * Atomically adds `mod` to the value referenced by `val` and returns the value `val` held previously.
156 * This operation is both lock-free and atomic.
157 *
158 * Params:
159 * val = Reference to the value to modify.
160 * mod = The value to add.
161 *
162 * Returns:
163 * The value held previously by `val`.
164 */
165 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
166 if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
in(atomicValueIsProperlyAligned (val))167 in (atomicValueIsProperlyAligned(val))
168 {
169 static if (is(T == U*, U))
170 return cast(T)core.internal.atomic.atomicFetchAdd!ms(cast(size_t*)&val, mod * U.sizeof);
171 else
172 return core.internal.atomic.atomicFetchAdd!ms(&val, cast(T)mod);
173 }
174
175 /// Ditto
176 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
177 if (__traits(isIntegral, T) || is(T == U*, U))
in(atomicValueIsProperlyAligned (val))178 in (atomicValueIsProperlyAligned(val))
179 {
180 return atomicFetchAdd!ms(*cast(T*)&val, mod);
181 }
182
183 /**
184 * Atomically subtracts `mod` from the value referenced by `val` and returns the value `val` held previously.
185 * This operation is both lock-free and atomic.
186 *
187 * Params:
188 * val = Reference to the value to modify.
189 * mod = The value to subtract.
190 *
191 * Returns:
192 * The value held previously by `val`.
193 */
194 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
195 if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
in(atomicValueIsProperlyAligned (val))196 in (atomicValueIsProperlyAligned(val))
197 {
198 static if (is(T == U*, U))
199 return cast(T)core.internal.atomic.atomicFetchSub!ms(cast(size_t*)&val, mod * U.sizeof);
200 else
201 return core.internal.atomic.atomicFetchSub!ms(&val, cast(T)mod);
202 }
203
204 /// Ditto
205 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
206 if (__traits(isIntegral, T) || is(T == U*, U))
in(atomicValueIsProperlyAligned (val))207 in (atomicValueIsProperlyAligned(val))
208 {
209 return atomicFetchSub!ms(*cast(T*)&val, mod);
210 }
211
212 /**
213 * Exchange `exchangeWith` with the memory referenced by `here`.
214 * This operation is both lock-free and atomic.
215 *
216 * Params:
217 * here = The address of the destination variable.
218 * exchangeWith = The value to exchange.
219 *
220 * Returns:
221 * The value held previously by `here`.
222 */
223 T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(T* here, V exchangeWith) pure nothrow @nogc @trusted
224 if (!is(T == shared) && !is(V == shared))
atomicPtrIsProperlyAligned(here)225 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
226 {
227 // resolve implicit conversions
228 T arg = exchangeWith;
229
230 static if (__traits(isFloating, T))
231 {
232 alias IntTy = IntForFloat!T;
233 IntTy r = core.internal.atomic.atomicExchange!ms(cast(IntTy*)here, *cast(IntTy*)&arg);
234 return *cast(shared(T)*)&r;
235 }
236 else
237 return core.internal.atomic.atomicExchange!ms(here, arg);
238 }
239
240 /// Ditto
241 TailShared!T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, V exchangeWith) pure nothrow @nogc @trusted
242 if (!is(T == class) && !is(T == interface))
atomicPtrIsProperlyAligned(here)243 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
244 {
245 static if (is (V == shared U, U))
246 alias Thunk = U;
247 else
248 {
249 import core.internal.traits : hasUnsharedIndirections;
250 static assert(!hasUnsharedIndirections!V, "Copying `exchangeWith` of type `" ~ V.stringof ~ "` to `" ~ shared(T).stringof ~ "` would violate shared.");
251 alias Thunk = V;
252 }
253 return atomicExchange!ms(cast(T*)here, *cast(Thunk*)&exchangeWith);
254 }
255
256 /// Ditto
257 shared(T) atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, shared(V) exchangeWith) pure nothrow @nogc @trusted
258 if (is(T == class) || is(T == interface))
atomicPtrIsProperlyAligned(here)259 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
260 {
261 static assert (is (V : T), "Can't assign `exchangeWith` of type `" ~ shared(V).stringof ~ "` to `" ~ shared(T).stringof ~ "`.");
262
263 return cast(shared)core.internal.atomic.atomicExchange!ms(cast(T*)here, cast(V)exchangeWith);
264 }
265
266 /**
267 * Performs either compare-and-set or compare-and-swap (or exchange).
268 *
269 * There are two categories of overloads in this template:
270 * The first category does a simple compare-and-set.
271 * The comparison value (`ifThis`) is treated as an rvalue.
272 *
273 * The second category does a compare-and-swap (a.k.a. compare-and-exchange),
274 * and expects `ifThis` to be a pointer type, where the previous value
275 * of `here` will be written.
276 *
277 * This operation is both lock-free and atomic.
278 *
279 * Params:
280 * here = The address of the destination variable.
281 * writeThis = The value to store.
282 * ifThis = The comparison value.
283 *
284 * Returns:
285 * true if the store occurred, false if not.
286 */
287 template cas(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)
288 {
289 /// Compare-and-set for non-shared values
290 bool cas(T, V1, V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
291 if (!is(T == shared) && is(T : V1))
atomicPtrIsProperlyAligned(here)292 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
293 {
294 // resolve implicit conversions
295 T arg1 = ifThis;
296 T arg2 = writeThis;
297
298 static if (__traits(isFloating, T))
299 {
300 alias IntTy = IntForFloat!T;
301 return atomicCompareExchangeStrongNoResult!(succ, fail)(
302 cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
303 }
304 else
305 return atomicCompareExchangeStrongNoResult!(succ, fail)(here, arg1, arg2);
306 }
307
308 /// Compare-and-set for shared value type
309 bool cas(T, V1, V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
310 if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
atomicPtrIsProperlyAligned(here)311 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
312 {
313 static if (is (V1 == shared U1, U1))
314 alias Thunk1 = U1;
315 else
316 alias Thunk1 = V1;
317 static if (is (V2 == shared U2, U2))
318 alias Thunk2 = U2;
319 else
320 {
321 import core.internal.traits : hasUnsharedIndirections;
322 static assert(!hasUnsharedIndirections!V2,
323 "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
324 shared(T).stringof ~ "* here` would violate shared.");
325 alias Thunk2 = V2;
326 }
327 return cas(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
328 }
329
330 /// Compare-and-set for `shared` reference type (`class`)
331 bool cas(T, V1, V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis)
332 pure nothrow @nogc @trusted
333 if (is(T == class))
atomicPtrIsProperlyAligned(here)334 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
335 {
336 return atomicCompareExchangeStrongNoResult!(succ, fail)(
337 cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
338 }
339
340 /// Compare-and-exchange for non-`shared` types
341 bool cas(T, V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
342 if (!is(T == shared) && !is(V == shared))
atomicPtrIsProperlyAligned(here)343 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
344 {
345 // resolve implicit conversions
346 T arg1 = writeThis;
347
348 static if (__traits(isFloating, T))
349 {
350 alias IntTy = IntForFloat!T;
351 return atomicCompareExchangeStrong!(succ, fail)(
352 cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
353 }
354 else
355 return atomicCompareExchangeStrong!(succ, fail)(here, ifThis, writeThis);
356 }
357
358 /// Compare and exchange for mixed-`shared`ness types
359 bool cas(T, V1, V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
360 if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
atomicPtrIsProperlyAligned(here)361 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
362 {
363 static if (is (V1 == shared U1, U1))
364 alias Thunk1 = U1;
365 else
366 {
367 import core.internal.traits : hasUnsharedIndirections;
368 static assert(!hasUnsharedIndirections!V1,
369 "Copying `" ~ shared(T).stringof ~ "* here` to `" ~
370 V1.stringof ~ "* ifThis` would violate shared.");
371 alias Thunk1 = V1;
372 }
373 static if (is (V2 == shared U2, U2))
374 alias Thunk2 = U2;
375 else
376 {
377 import core.internal.traits : hasUnsharedIndirections;
378 static assert(!hasUnsharedIndirections!V2,
379 "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
380 shared(T).stringof ~ "* here` would violate shared.");
381 alias Thunk2 = V2;
382 }
383 static assert (is(T : Thunk1),
384 "Mismatching types for `here` and `ifThis`: `" ~
385 shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
386 return cas(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
387 }
388
389 /// Compare-and-exchange for `class`
390 bool cas(T, V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis)
391 pure nothrow @nogc @trusted
392 if (is(T == class))
atomicPtrIsProperlyAligned(here)393 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
394 {
395 return atomicCompareExchangeStrong!(succ, fail)(
396 cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
397 }
398 }
399
400 /**
401 * Stores 'writeThis' to the memory referenced by 'here' if the value
402 * referenced by 'here' is equal to 'ifThis'.
403 * The 'weak' version of cas may spuriously fail. It is recommended to
404 * use `casWeak` only when `cas` would be used in a loop.
405 * This operation is both
406 * lock-free and atomic.
407 *
408 * Params:
409 * here = The address of the destination variable.
410 * writeThis = The value to store.
411 * ifThis = The comparison value.
412 *
413 * Returns:
414 * true if the store occurred, false if not.
415 */
416 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
417 if (!is(T == shared) && is(T : V1))
atomicPtrIsProperlyAligned(here)418 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
419 {
420 // resolve implicit conversions
421 T arg1 = ifThis;
422 T arg2 = writeThis;
423
424 static if (__traits(isFloating, T))
425 {
426 alias IntTy = IntForFloat!T;
427 return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
428 }
429 else
430 return atomicCompareExchangeWeakNoResult!(succ, fail)(here, arg1, arg2);
431 }
432
433 /// Ditto
434 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
435 if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
atomicPtrIsProperlyAligned(here)436 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
437 {
438 static if (is (V1 == shared U1, U1))
439 alias Thunk1 = U1;
440 else
441 alias Thunk1 = V1;
442 static if (is (V2 == shared U2, U2))
443 alias Thunk2 = U2;
444 else
445 {
446 import core.internal.traits : hasUnsharedIndirections;
447 static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
448 alias Thunk2 = V2;
449 }
450 return casWeak!(succ, fail)(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
451 }
452
453 /// Ditto
454 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis) pure nothrow @nogc @trusted
455 if (is(T == class))
atomicPtrIsProperlyAligned(here)456 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
457 {
458 return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
459 }
460
461 /**
462 * Stores 'writeThis' to the memory referenced by 'here' if the value
463 * referenced by 'here' is equal to the value referenced by 'ifThis'.
464 * The prior value referenced by 'here' is written to `ifThis` and
465 * returned to the user.
466 * The 'weak' version of cas may spuriously fail. It is recommended to
467 * use `casWeak` only when `cas` would be used in a loop.
468 * This operation is both lock-free and atomic.
469 *
470 * Params:
471 * here = The address of the destination variable.
472 * writeThis = The value to store.
473 * ifThis = The address of the value to compare, and receives the prior value of `here` as output.
474 *
475 * Returns:
476 * true if the store occurred, false if not.
477 */
478 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
479 if (!is(T == shared S, S) && !is(V == shared U, U))
atomicPtrIsProperlyAligned(here)480 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
481 {
482 // resolve implicit conversions
483 T arg1 = writeThis;
484
485 static if (__traits(isFloating, T))
486 {
487 alias IntTy = IntForFloat!T;
488 return atomicCompareExchangeWeak!(succ, fail)(cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
489 }
490 else
491 return atomicCompareExchangeWeak!(succ, fail)(here, ifThis, writeThis);
492 }
493
494 /// Ditto
495 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
496 if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
atomicPtrIsProperlyAligned(here)497 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
498 {
499 static if (is (V1 == shared U1, U1))
500 alias Thunk1 = U1;
501 else
502 {
503 import core.internal.traits : hasUnsharedIndirections;
504 static assert(!hasUnsharedIndirections!V1, "Copying `" ~ shared(T).stringof ~ "* here` to `" ~ V1.stringof ~ "* ifThis` would violate shared.");
505 alias Thunk1 = V1;
506 }
507 static if (is (V2 == shared U2, U2))
508 alias Thunk2 = U2;
509 else
510 {
511 import core.internal.traits : hasUnsharedIndirections;
512 static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
513 alias Thunk2 = V2;
514 }
515 static assert (is(T : Thunk1), "Mismatching types for `here` and `ifThis`: `" ~ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
516 return casWeak!(succ, fail)(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
517 }
518
519 /// Ditto
520 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis) pure nothrow @nogc @trusted
521 if (is(T == class))
atomicPtrIsProperlyAligned(here)522 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
523 {
524 return atomicCompareExchangeWeak!(succ, fail)(cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
525 }
526
527 /**
528 * Inserts a full load/store memory fence (on platforms that need it). This ensures
529 * that all loads and stores before a call to this function are executed before any
530 * loads and stores after the call.
531 */
532 void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @safe
533 {
534 core.internal.atomic.atomicFence!order();
535 }
536
537 /**
538 * Gives a hint to the processor that the calling thread is in a 'spin-wait' loop,
539 * allowing to more efficiently allocate resources.
540 */
pause()541 void pause() pure nothrow @nogc @safe
542 {
543 core.internal.atomic.pause();
544 }
545
546 /**
547 * Performs the binary operation 'op' on val using 'mod' as the modifier.
548 *
549 * Params:
550 * val = The target variable.
551 * mod = The modifier to apply.
552 *
553 * Returns:
554 * The result of the operation.
555 */
556 TailShared!T atomicOp(string op, T, V1)(ref shared T val, V1 mod) pure nothrow @nogc @safe
557 if (__traits(compiles, mixin("*cast(T*)&val" ~ op ~ "mod")))
in(atomicValueIsProperlyAligned (val))558 in (atomicValueIsProperlyAligned(val))
559 {
560 // binary operators
561 //
562 // + - * / % ^^ &
563 // | ^ << >> >>> ~ in
564 // == != < <= > >=
565 static if (op == "+" || op == "-" || op == "*" || op == "/" ||
566 op == "%" || op == "^^" || op == "&" || op == "|" ||
567 op == "^" || op == "<<" || op == ">>" || op == ">>>" ||
568 op == "~" || // skip "in"
569 op == "==" || op == "!=" || op == "<" || op == "<=" ||
570 op == ">" || op == ">=")
571 {
572 T get = atomicLoad!(MemoryOrder.raw, T)(val);
573 mixin("return get " ~ op ~ " mod;");
574 }
575 else
576 // assignment operators
577 //
578 // += -= *= /= %= ^^= &=
579 // |= ^= <<= >>= >>>= ~=
580 static if (op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
581 {
582 return cast(T)(atomicFetchAdd(val, mod) + mod);
583 }
584 else static if (op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
585 {
586 return cast(T)(atomicFetchSub(val, mod) - mod);
587 }
588 else static if (op == "+=" || op == "-=" || op == "*=" || op == "/=" ||
589 op == "%=" || op == "^^=" || op == "&=" || op == "|=" ||
590 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=") // skip "~="
591 {
592 T set, get = atomicLoad!(MemoryOrder.raw, T)(val);
593 do
594 {
595 set = get;
596 mixin("set " ~ op ~ " mod;");
597 } while (!casWeakByRef(val, get, set));
598 return set;
599 }
600 else
601 {
602 static assert(false, "Operation not supported.");
603 }
604 }
605
606
version(D_InlineAsm_X86)607 version (D_InlineAsm_X86)
608 {
609 enum has64BitXCHG = false;
610 enum has64BitCAS = true;
611 enum has128BitCAS = false;
612 }
version(D_InlineAsm_X86_64)613 else version (D_InlineAsm_X86_64)
614 {
615 enum has64BitXCHG = true;
616 enum has64BitCAS = true;
617 enum has128BitCAS = true;
618 }
version(GNU)619 else version (GNU)
620 {
621 import gcc.config;
622 enum has64BitCAS = GNU_Have_64Bit_Atomics;
623 enum has64BitXCHG = GNU_Have_64Bit_Atomics;
624 enum has128BitCAS = GNU_Have_LibAtomic;
625 }
626 else
627 {
628 enum has64BitXCHG = false;
629 enum has64BitCAS = false;
630 enum has128BitCAS = false;
631 }
632
633 private
634 {
atomicValueIsProperlyAligned(T)635 bool atomicValueIsProperlyAligned(T)(ref T val) pure nothrow @nogc @trusted
636 {
637 return atomicPtrIsProperlyAligned(&val);
638 }
639
atomicPtrIsProperlyAligned(T)640 bool atomicPtrIsProperlyAligned(T)(T* ptr) pure nothrow @nogc @safe
641 {
642 // NOTE: Strictly speaking, the x86 supports atomic operations on
643 // unaligned values. However, this is far slower than the
644 // common case, so such behavior should be prohibited.
645 static if (T.sizeof > size_t.sizeof)
646 {
647 version (X86)
648 {
649 // cmpxchg8b only requires 4-bytes alignment
650 return cast(size_t)ptr % size_t.sizeof == 0;
651 }
652 else
653 {
654 // e.g., x86_64 cmpxchg16b requires 16-bytes alignment
655 return cast(size_t)ptr % T.sizeof == 0;
656 }
657 }
658 else
659 {
660 return cast(size_t)ptr % T.sizeof == 0;
661 }
662 }
663
664 template IntForFloat(F)
665 if (__traits(isFloating, F))
666 {
667 static if (F.sizeof == 4)
668 alias IntForFloat = uint;
669 else static if (F.sizeof == 8)
670 alias IntForFloat = ulong;
671 else
672 static assert (false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`.");
673 }
674
675 template IntForStruct(S)
676 if (is(S == struct))
677 {
678 static if (S.sizeof == 1)
679 alias IntForFloat = ubyte;
680 else static if (F.sizeof == 2)
681 alias IntForFloat = ushort;
682 else static if (F.sizeof == 4)
683 alias IntForFloat = uint;
684 else static if (F.sizeof == 8)
685 alias IntForFloat = ulong;
686 else static if (F.sizeof == 16)
687 alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
688 else
689 static assert (ValidateStruct!S);
690 }
691
692 template ValidateStruct(S)
693 if (is(S == struct))
694 {
695 import core.internal.traits : hasElaborateAssign;
696
697 // `(x & (x-1)) == 0` checks that x is a power of 2.
698 static assert (S.sizeof <= size_t.sizeof * 2
699 && (S.sizeof & (S.sizeof - 1)) == 0,
700 S.stringof ~ " has invalid size for atomic operations.");
701 static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
702
703 enum ValidateStruct = true;
704 }
705
706 // TODO: it'd be nice if we had @trusted scopes; we could remove this...
casWeakByRef(T,V1,V2)707 bool casWeakByRef(T,V1,V2)(ref T value, ref V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
708 {
709 return casWeak(&value, &ifThis, writeThis);
710 }
711
712 /* Construct a type with a shared tail, and if possible with an unshared
713 head. */
714 template TailShared(U) if (!is(U == shared))
715 {
716 alias TailShared = .TailShared!(shared U);
717 }
718 template TailShared(S) if (is(S == shared))
719 {
720 // Get the unshared variant of S.
721 static if (is(S U == shared U)) {}
722 else static assert(false, "Should never be triggered. The `static " ~
723 "if` declares `U` as the unshared version of the shared type " ~
724 "`S`. `S` is explicitly declared as shared, so getting `U` " ~
725 "should always work.");
726
727 static if (is(S : U))
728 alias TailShared = U;
729 else static if (is(S == struct))
730 {
731 enum implName = () {
732 /* Start with "_impl". If S has a field with that name, append
733 underscores until the clash is resolved. */
734 string name = "_impl";
735 string[] fieldNames;
736 static foreach (alias field; S.tupleof)
737 {
738 fieldNames ~= __traits(identifier, field);
739 }
canFind(string[]haystack,string needle)740 static bool canFind(string[] haystack, string needle)
741 {
742 foreach (candidate; haystack)
743 {
744 if (candidate == needle) return true;
745 }
746 return false;
747 }
748 while (canFind(fieldNames, name)) name ~= "_";
749 return name;
750 } ();
751 struct TailShared
752 {
753 static foreach (i, alias field; S.tupleof)
754 {
755 /* On @trusted: This is casting the field from shared(Foo)
756 to TailShared!Foo. The cast is safe because the field has
757 been loaded and is not shared anymore. */
758 mixin("
759 @trusted @property
760 ref " ~ __traits(identifier, field) ~ "()
761 {
762 alias R = TailShared!(typeof(field));
763 return * cast(R*) &" ~ implName ~ ".tupleof[i];
764 }
765 ");
766 }
767 mixin("
768 S " ~ implName ~ ";
769 alias " ~ implName ~ " this;
770 ");
771 }
772 }
773 else
774 alias TailShared = S;
775 }
776 @safe unittest
777 {
778 // No tail (no indirections) -> fully unshared.
779
780 static assert(is(TailShared!int == int));
781 static assert(is(TailShared!(shared int) == int));
782
783 static struct NoIndir { int i; }
784 static assert(is(TailShared!NoIndir == NoIndir));
785 static assert(is(TailShared!(shared NoIndir) == NoIndir));
786
787 // Tail can be independently shared or is already -> tail-shared.
788
789 static assert(is(TailShared!(int*) == shared(int)*));
790 static assert(is(TailShared!(shared int*) == shared(int)*));
791 static assert(is(TailShared!(shared(int)*) == shared(int)*));
792
793 static assert(is(TailShared!(int[]) == shared(int)[]));
794 static assert(is(TailShared!(shared int[]) == shared(int)[]));
795 static assert(is(TailShared!(shared(int)[]) == shared(int)[]));
796
797 static struct S1 { shared int* p; }
798 static assert(is(TailShared!S1 == S1));
799 static assert(is(TailShared!(shared S1) == S1));
800
801 static struct S2 { shared(int)* p; }
802 static assert(is(TailShared!S2 == S2));
803 static assert(is(TailShared!(shared S2) == S2));
804
805 // Tail follows shared-ness of head -> fully shared.
806
807 static class C { int i; }
808 static assert(is(TailShared!C == shared C));
809 static assert(is(TailShared!(shared C) == shared C));
810
811 /* However, structs get a wrapper that has getters which cast to
812 TailShared. */
813
814 static struct S3 { int* p; int _impl; int _impl_; int _impl__; }
815 static assert(!is(TailShared!S3 : S3));
816 static assert(is(TailShared!S3 : shared S3));
817 static assert(is(TailShared!(shared S3) == TailShared!S3));
818
819 static struct S4 { shared(int)** p; }
820 static assert(!is(TailShared!S4 : S4));
821 static assert(is(TailShared!S4 : shared S4));
822 static assert(is(TailShared!(shared S4) == TailShared!S4));
823 }
824 }
825
826
827 ////////////////////////////////////////////////////////////////////////////////
828 // Unit Tests
829 ////////////////////////////////////////////////////////////////////////////////
830
831
832 version (CoreUnittest)
833 {
834 version (D_LP64)
835 {
836 enum hasDWCAS = has128BitCAS;
837 }
838 else
839 {
840 enum hasDWCAS = has64BitCAS;
841 }
842
843 void testXCHG(T)(T val) pure nothrow @nogc @trusted
844 in
845 {
846 assert(val !is T.init);
847 }
848 do
849 {
850 T base = cast(T)null;
851 shared(T) atom = cast(shared(T))null;
852
853 assert(base !is val, T.stringof);
854 assert(atom is base, T.stringof);
855
856 assert(atomicExchange(&atom, val) is base, T.stringof);
857 assert(atom is val, T.stringof);
858 }
859
860 void testCAS(T)(T val) pure nothrow @nogc @trusted
861 in
862 {
863 assert(val !is T.init);
864 }
865 do
866 {
867 T base = cast(T)null;
868 shared(T) atom = cast(shared(T))null;
869
870 assert(base !is val, T.stringof);
871 assert(atom is base, T.stringof);
872
873 assert(cas(&atom, base, val), T.stringof);
874 assert(atom is val, T.stringof);
875 assert(!cas(&atom, base, base), T.stringof);
876 assert(atom is val, T.stringof);
877
878 atom = cast(shared(T))null;
879
880 shared(T) arg = base;
881 assert(cas(&atom, &arg, val), T.stringof);
882 assert(arg is base, T.stringof);
883 assert(atom is val, T.stringof);
884
885 arg = base;
886 assert(!cas(&atom, &arg, base), T.stringof);
887 assert(arg is val, T.stringof);
888 assert(atom is val, T.stringof);
889 }
890
891 void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)(T val = T.init + 1) pure nothrow @nogc @trusted
892 {
893 T base = cast(T) 0;
894 shared(T) atom = cast(T) 0;
895
896 assert(base !is val);
897 assert(atom is base);
898 atomicStore!(ms)(atom, val);
899 base = atomicLoad!(ms)(atom);
900
901 assert(base is val, T.stringof);
902 assert(atom is val);
903 }
904
905
906 void testType(T)(T val = T.init + 1) pure nothrow @nogc @safe
907 {
908 static if (T.sizeof < 8 || has64BitXCHG)
909 testXCHG!(T)(val);
910 testCAS!(T)(val);
911 testLoadStore!(MemoryOrder.seq, T)(val);
912 testLoadStore!(MemoryOrder.raw, T)(val);
913 }
914
915 @betterC @safe pure nothrow unittest
916 {
917 testType!(bool)();
918
919 testType!(byte)();
920 testType!(ubyte)();
921
922 testType!(short)();
923 testType!(ushort)();
924
925 testType!(int)();
926 testType!(uint)();
927 }
928
929 @safe pure nothrow unittest
930 {
931
932 testType!(shared int*)();
933
934 static interface Inter {}
935 static class KlassImpl : Inter {}
936 testXCHG!(shared Inter)(new shared(KlassImpl));
937 testCAS!(shared Inter)(new shared(KlassImpl));
938
939 static class Klass {}
940 testXCHG!(shared Klass)(new shared(Klass));
941 testCAS!(shared Klass)(new shared(Klass));
942
943 testXCHG!(shared int)(42);
944
945 testType!(float)(0.1f);
946
947 static if (has64BitCAS)
948 {
949 testType!(double)(0.1);
950 testType!(long)();
951 testType!(ulong)();
952 }
953 static if (has128BitCAS)
954 {
955 () @trusted
956 {
957 align(16) struct Big { long a, b; }
958
959 shared(Big) atom;
960 shared(Big) base;
961 shared(Big) arg;
962 shared(Big) val = Big(1, 2);
963
964 assert(cas(&atom, arg, val), Big.stringof);
965 assert(atom is val, Big.stringof);
966 assert(!cas(&atom, arg, val), Big.stringof);
967 assert(atom is val, Big.stringof);
968
969 atom = Big();
970 assert(cas(&atom, &arg, val), Big.stringof);
971 assert(arg is base, Big.stringof);
972 assert(atom is val, Big.stringof);
973
974 arg = Big();
975 assert(!cas(&atom, &arg, base), Big.stringof);
976 assert(arg is val, Big.stringof);
977 assert(atom is val, Big.stringof);
978 }();
979 }
980
981 shared(size_t) i;
982
983 atomicOp!"+="(i, cast(size_t) 1);
984 assert(i == 1);
985
986 atomicOp!"-="(i, cast(size_t) 1);
987 assert(i == 0);
988
989 shared float f = 0.1f;
990 atomicOp!"+="(f, 0.1f);
991 assert(f > 0.1999f && f < 0.2001f);
992
993 static if (has64BitCAS)
994 {
995 shared double d = 0.1;
996 atomicOp!"+="(d, 0.1);
997 assert(d > 0.1999 && d < 0.2001);
998 }
999 }
1000
1001 @betterC pure nothrow unittest
1002 {
1003 static if (has128BitCAS)
1004 {
1005 struct DoubleValue
1006 {
1007 long value1;
1008 long value2;
1009 }
1010
1011 align(16) shared DoubleValue a;
1012 atomicStore(a, DoubleValue(1,2));
1013 assert(a.value1 == 1 && a.value2 ==2);
1014
1015 while (!cas(&a, DoubleValue(1,2), DoubleValue(3,4))){}
1016 assert(a.value1 == 3 && a.value2 ==4);
1017
1018 align(16) DoubleValue b = atomicLoad(a);
1019 assert(b.value1 == 3 && b.value2 ==4);
1020 }
1021
1022 static if (hasDWCAS)
1023 {
1024 static struct List { size_t gen; List* next; }
1025 shared(List) head;
1026 assert(cas(&head, shared(List)(0, null), shared(List)(1, cast(List*)1)));
1027 assert(head.gen == 1);
1028 assert(cast(size_t)head.next == 1);
1029 }
1030
1031 // https://issues.dlang.org/show_bug.cgi?id=20629
1032 static struct Struct
1033 {
1034 uint a, b;
1035 }
1036 shared Struct s1 = Struct(1, 2);
1037 atomicStore(s1, Struct(3, 4));
1038 assert(cast(uint) s1.a == 3);
1039 assert(cast(uint) s1.b == 4);
1040 }
1041
1042 // https://issues.dlang.org/show_bug.cgi?id=20844
1043 static if (hasDWCAS)
1044 {
1045 debug: // tests CAS in-contract
1046
1047 pure nothrow unittest
1048 {
1049 import core.exception : AssertError;
1050
1051 align(16) shared ubyte[2 * size_t.sizeof + 1] data;
1052 auto misalignedPointer = cast(size_t[2]*) &data[1];
1053 size_t[2] x;
1054
1055 try
1056 cas(misalignedPointer, x, x);
1057 catch (AssertError)
1058 return;
1059
1060 assert(0, "should have failed");
1061 }
1062 }
1063
1064 @betterC pure nothrow @nogc @safe unittest
1065 {
1066 int a;
1067 if (casWeak!(MemoryOrder.acq_rel, MemoryOrder.raw)(&a, 0, 4))
1068 assert(a == 4);
1069 }
1070
1071 @betterC pure nothrow unittest
1072 {
1073 static struct S { int val; }
1074 auto s = shared(S)(1);
1075
1076 shared(S*) ptr;
1077
1078 // head unshared
1079 shared(S)* ifThis = null;
1080 shared(S)* writeThis = &s;
1081 assert(ptr is null);
1082 assert(cas(&ptr, ifThis, writeThis));
1083 assert(ptr is writeThis);
1084
1085 // head shared
1086 shared(S*) ifThis2 = writeThis;
1087 shared(S*) writeThis2 = null;
1088 assert(cas(&ptr, ifThis2, writeThis2));
1089 assert(ptr is null);
1090 }
1091
1092 unittest
1093 {
1094 import core.thread;
1095
1096 // Use heap memory to ensure an optimizing
1097 // compiler doesn't put things in registers.
1098 uint* x = new uint();
1099 bool* f = new bool();
1100 uint* r = new uint();
1101
1102 auto thr = new Thread(()
1103 {
1104 while (!*f)
1105 {
1106 }
1107
1108 atomicFence();
1109
1110 *r = *x;
1111 });
1112
1113 thr.start();
1114
1115 *x = 42;
1116
1117 atomicFence();
1118
1119 *f = true;
1120
1121 atomicFence();
1122
1123 thr.join();
1124
1125 assert(*r == 42);
1126 }
1127
1128 // === atomicFetchAdd and atomicFetchSub operations ====
1129 @betterC pure nothrow @nogc @safe unittest
1130 {
1131 shared ubyte u8 = 1;
1132 shared ushort u16 = 2;
1133 shared uint u32 = 3;
1134 shared byte i8 = 5;
1135 shared short i16 = 6;
1136 shared int i32 = 7;
1137
1138 assert(atomicOp!"+="(u8, 8) == 9);
1139 assert(atomicOp!"+="(u16, 8) == 10);
1140 assert(atomicOp!"+="(u32, 8) == 11);
1141 assert(atomicOp!"+="(i8, 8) == 13);
1142 assert(atomicOp!"+="(i16, 8) == 14);
1143 assert(atomicOp!"+="(i32, 8) == 15);
1144 version (D_LP64)
1145 {
1146 shared ulong u64 = 4;
1147 shared long i64 = 8;
1148 assert(atomicOp!"+="(u64, 8) == 12);
1149 assert(atomicOp!"+="(i64, 8) == 16);
1150 }
1151 }
1152
1153 @betterC pure nothrow @nogc unittest
1154 {
1155 byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1156 ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1157
1158 {
1159 auto array = byteArray;
1160 byte* ptr = &array[0];
1161 byte* prevPtr = atomicFetchAdd(ptr, 3);
1162 assert(prevPtr == &array[0]);
1163 assert(*prevPtr == 1);
1164 assert(*ptr == 7);
1165 }
1166 {
1167 auto array = ulongArray;
1168 ulong* ptr = &array[0];
1169 ulong* prevPtr = atomicFetchAdd(ptr, 3);
1170 assert(prevPtr == &array[0]);
1171 assert(*prevPtr == 2);
1172 assert(*ptr == 8);
1173 }
1174 }
1175
1176 @betterC pure nothrow @nogc @safe unittest
1177 {
1178 shared ubyte u8 = 1;
1179 shared ushort u16 = 2;
1180 shared uint u32 = 3;
1181 shared byte i8 = 5;
1182 shared short i16 = 6;
1183 shared int i32 = 7;
1184
1185 assert(atomicOp!"-="(u8, 1) == 0);
1186 assert(atomicOp!"-="(u16, 1) == 1);
1187 assert(atomicOp!"-="(u32, 1) == 2);
1188 assert(atomicOp!"-="(i8, 1) == 4);
1189 assert(atomicOp!"-="(i16, 1) == 5);
1190 assert(atomicOp!"-="(i32, 1) == 6);
1191 version (D_LP64)
1192 {
1193 shared ulong u64 = 4;
1194 shared long i64 = 8;
1195 assert(atomicOp!"-="(u64, 1) == 3);
1196 assert(atomicOp!"-="(i64, 1) == 7);
1197 }
1198 }
1199
1200 @betterC pure nothrow @nogc unittest
1201 {
1202 byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1203 ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1204
1205 {
1206 auto array = byteArray;
1207 byte* ptr = &array[5];
1208 byte* prevPtr = atomicFetchSub(ptr, 4);
1209 assert(prevPtr == &array[5]);
1210 assert(*prevPtr == 11);
1211 assert(*ptr == 3); // https://issues.dlang.org/show_bug.cgi?id=21578
1212 }
1213 {
1214 auto array = ulongArray;
1215 ulong* ptr = &array[5];
1216 ulong* prevPtr = atomicFetchSub(ptr, 4);
1217 assert(prevPtr == &array[5]);
1218 assert(*prevPtr == 12);
1219 assert(*ptr == 4); // https://issues.dlang.org/show_bug.cgi?id=21578
1220 }
1221 }
1222
1223 @betterC pure nothrow @nogc @safe unittest // issue 16651
1224 {
1225 shared ulong a = 2;
1226 uint b = 1;
1227 atomicOp!"-="(a, b);
1228 assert(a == 1);
1229
1230 shared uint c = 2;
1231 ubyte d = 1;
1232 atomicOp!"-="(c, d);
1233 assert(c == 1);
1234 }
1235
1236 pure nothrow @safe unittest // issue 16230
1237 {
1238 shared int i;
1239 static assert(is(typeof(atomicLoad(i)) == int));
1240
1241 shared int* p;
1242 static assert(is(typeof(atomicLoad(p)) == shared(int)*));
1243
1244 shared int[] a;
1245 static if (__traits(compiles, atomicLoad(a)))
1246 {
1247 static assert(is(typeof(atomicLoad(a)) == shared(int)[]));
1248 }
1249
1250 static struct S { int* _impl; }
1251 shared S s;
1252 static assert(is(typeof(atomicLoad(s)) : shared S));
1253 static assert(is(typeof(atomicLoad(s)._impl) == shared(int)*));
1254 auto u = atomicLoad(s);
1255 assert(u._impl is null);
1256 u._impl = new shared int(42);
1257 assert(atomicLoad(*u._impl) == 42);
1258
1259 static struct S2 { S s; }
1260 shared S2 s2;
1261 static assert(is(typeof(atomicLoad(s2).s) == TailShared!S));
1262
1263 static struct S3 { size_t head; int* tail; }
1264 shared S3 s3;
1265 static if (__traits(compiles, atomicLoad(s3)))
1266 {
1267 static assert(is(typeof(atomicLoad(s3).head) == size_t));
1268 static assert(is(typeof(atomicLoad(s3).tail) == shared(int)*));
1269 }
1270
1271 static class C { int i; }
1272 shared C c;
1273 static assert(is(typeof(atomicLoad(c)) == shared C));
1274
1275 static struct NoIndirections { int i; }
1276 shared NoIndirections n;
1277 static assert(is(typeof(atomicLoad(n)) == NoIndirections));
1278 }
1279 }
1280