1 /**
2  * The atomic module provides basic support for lock-free
3  * concurrent programming.
4  *
5  * $(NOTE Use the `-preview=nosharedaccess` compiler flag to detect
6  * unsafe individual read or write operations on shared data.)
7  *
8  * Copyright: Copyright Sean Kelly 2005 - 2016.
9  * License:   $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0)
10  * Authors:   Sean Kelly, Alex Rønne Petersen, Manu Evans
11  * Source:    $(DRUNTIMESRC core/_atomic.d)
12  */
13 
14 module core.atomic;
15 
16 ///
17 @safe unittest
18 {
19     int y = 2;
20     shared int x = y; // OK
21 
22     //x++; // read modify write error
23     x.atomicOp!"+="(1); // OK
24     //y = x; // read error with preview flag
25     y = x.atomicLoad(); // OK
26     assert(y == 3);
27     //x = 5; // write error with preview flag
28     x.atomicStore(5); // OK
29     assert(x.atomicLoad() == 5);
30 }
31 
32 import core.internal.atomic;
33 import core.internal.attributes : betterC;
34 import core.internal.traits : hasUnsharedIndirections;
35 
36 /**
37  * Specifies the memory ordering semantics of an atomic operation.
38  *
39  * See_Also:
40  *     $(HTTP en.cppreference.com/w/cpp/atomic/memory_order)
41  */
42 enum MemoryOrder
43 {
44     /**
45      * Not sequenced.
46      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#monotonic, LLVM AtomicOrdering.Monotonic)
47      * and C++11/C11 `memory_order_relaxed`.
48      */
49     raw = 0,
50     /**
51      * Hoist-load + hoist-store barrier.
52      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquire, LLVM AtomicOrdering.Acquire)
53      * and C++11/C11 `memory_order_acquire`.
54      */
55     acq = 2,
56     /**
57      * Sink-load + sink-store barrier.
58      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#release, LLVM AtomicOrdering.Release)
59      * and C++11/C11 `memory_order_release`.
60      */
61     rel = 3,
62     /**
63      * Acquire + release barrier.
64      * Corresponds to $(LINK2 https://llvm.org/docs/Atomics.html#acquirerelease, LLVM AtomicOrdering.AcquireRelease)
65      * and C++11/C11 `memory_order_acq_rel`.
66      */
67     acq_rel = 4,
68     /**
69      * Fully sequenced (acquire + release). Corresponds to
70      * $(LINK2 https://llvm.org/docs/Atomics.html#sequentiallyconsistent, LLVM AtomicOrdering.SequentiallyConsistent)
71      * and C++11/C11 `memory_order_seq_cst`.
72      */
73     seq = 5,
74 }
75 
76 /**
77  * Loads 'val' from memory and returns it.  The memory barrier specified
78  * by 'ms' is applied to the operation, which is fully sequenced by
79  * default.  Valid memory orders are MemoryOrder.raw, MemoryOrder.acq,
80  * and MemoryOrder.seq.
81  *
82  * Params:
83  *  val = The target variable.
84  *
85  * Returns:
86  *  The value of 'val'.
87  */
88 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope const T val) pure nothrow @nogc @trusted
89     if (!is(T == shared U, U) && !is(T == shared inout U, U) && !is(T == shared const U, U))
90 {
91     static if (__traits(isFloating, T))
92     {
93         alias IntTy = IntForFloat!T;
94         IntTy r = core.internal.atomic.atomicLoad!ms(cast(IntTy*)&val);
95         return *cast(T*)&r;
96     }
97     else
98         return core.internal.atomic.atomicLoad!ms(cast(T*)&val);
99 }
100 
101 /// Ditto
102 T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref return scope shared const T val) pure nothrow @nogc @trusted
103     if (!hasUnsharedIndirections!T)
104 {
105     import core.internal.traits : hasUnsharedIndirections;
106     static assert(!hasUnsharedIndirections!T, "Copying `" ~ shared(const(T)).stringof ~ "` would violate shared.");
107 
108     return atomicLoad!ms(*cast(T*)&val);
109 }
110 
111 /// Ditto
112 TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)(auto ref shared const T val) pure nothrow @nogc @trusted
113     if (hasUnsharedIndirections!T)
114 {
115     // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
116     // this is here because code exists in the wild that does this...
117 
118     return core.internal.atomic.atomicLoad!ms(cast(TailShared!T*)&val);
119 }
120 
121 /**
122  * Writes 'newval' into 'val'.  The memory barrier specified by 'ms' is
123  * applied to the operation, which is fully sequenced by default.
124  * Valid memory orders are MemoryOrder.raw, MemoryOrder.rel, and
125  * MemoryOrder.seq.
126  *
127  * Params:
128  *  val    = The target variable.
129  *  newval = The value to store.
130  */
131 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref T val, V newval) pure nothrow @nogc @trusted
132     if (!is(T == shared) && !is(V == shared))
133 {
134     import core.internal.traits : hasElaborateCopyConstructor;
135     static assert (!hasElaborateCopyConstructor!T, "`T` may not have an elaborate copy: atomic operations override regular copying semantics.");
136 
137     // resolve implicit conversions
138     T arg = newval;
139 
140     static if (__traits(isFloating, T))
141     {
142         alias IntTy = IntForFloat!T;
143         core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&arg);
144     }
145     else
146         core.internal.atomic.atomicStore!ms(&val, arg);
147 }
148 
149 /// Ditto
150 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, V newval) pure nothrow @nogc @trusted
151     if (!is(T == class))
152 {
153     static if (is (V == shared U, U))
154         alias Thunk = U;
155     else
156     {
157         import core.internal.traits : hasUnsharedIndirections;
158         static assert(!hasUnsharedIndirections!V, "Copying argument `" ~ V.stringof ~ " newval` to `" ~ shared(T).stringof ~ " here` would violate shared.");
159         alias Thunk = V;
160     }
161     atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
162 }
163 
164 /// Ditto
165 void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)(ref shared T val, auto ref shared V newval) pure nothrow @nogc @trusted
166     if (is(T == class))
167 {
168     static assert (is (V : T), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
169 
170     core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
171 }
172 
173 /**
174  * Atomically adds `mod` to the value referenced by `val` and returns the value `val` held previously.
175  * This operation is both lock-free and atomic.
176  *
177  * Params:
178  *  val = Reference to the value to modify.
179  *  mod = The value to add.
180  *
181  * Returns:
182  *  The value held previously by `val`.
183  */
184 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
185     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
186 in (atomicValueIsProperlyAligned(val))
187 {
188     static if (is(T == U*, U))
189         return cast(T)core.internal.atomic.atomicFetchAdd!ms(cast(size_t*)&val, mod * U.sizeof);
190     else
191         return core.internal.atomic.atomicFetchAdd!ms(&val, cast(T)mod);
192 }
193 
194 /// Ditto
195 T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
196     if (__traits(isIntegral, T) || is(T == U*, U))
197 in (atomicValueIsProperlyAligned(val))
198 {
199     return atomicFetchAdd!ms(*cast(T*)&val, mod);
200 }
201 
202 /**
203  * Atomically subtracts `mod` from the value referenced by `val` and returns the value `val` held previously.
204  * This operation is both lock-free and atomic.
205  *
206  * Params:
207  *  val = Reference to the value to modify.
208  *  mod = The value to subtract.
209  *
210  * Returns:
211  *  The value held previously by `val`.
212  */
213 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope T val, size_t mod) pure nothrow @nogc @trusted
214     if ((__traits(isIntegral, T) || is(T == U*, U)) && !is(T == shared))
215 in (atomicValueIsProperlyAligned(val))
216 {
217     static if (is(T == U*, U))
218         return cast(T)core.internal.atomic.atomicFetchSub!ms(cast(size_t*)&val, mod * U.sizeof);
219     else
220         return core.internal.atomic.atomicFetchSub!ms(&val, cast(T)mod);
221 }
222 
223 /// Ditto
224 T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)(ref return scope shared T val, size_t mod) pure nothrow @nogc @trusted
225     if (__traits(isIntegral, T) || is(T == U*, U))
226 in (atomicValueIsProperlyAligned(val))
227 {
228     return atomicFetchSub!ms(*cast(T*)&val, mod);
229 }
230 
231 /**
232  * Exchange `exchangeWith` with the memory referenced by `here`.
233  * This operation is both lock-free and atomic.
234  *
235  * Params:
236  *  here         = The address of the destination variable.
237  *  exchangeWith = The value to exchange.
238  *
239  * Returns:
240  *  The value held previously by `here`.
241  */
242 T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(T* here, V exchangeWith) pure nothrow @nogc @trusted
243     if (!is(T == shared) && !is(V == shared))
244 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
245 {
246     // resolve implicit conversions
247     T arg = exchangeWith;
248 
249     static if (__traits(isFloating, T))
250     {
251         alias IntTy = IntForFloat!T;
252         IntTy r = core.internal.atomic.atomicExchange!ms(cast(IntTy*)here, *cast(IntTy*)&arg);
253         return *cast(shared(T)*)&r;
254     }
255     else
256         return core.internal.atomic.atomicExchange!ms(here, arg);
257 }
258 
259 /// Ditto
260 TailShared!T atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, V exchangeWith) pure nothrow @nogc @trusted
261     if (!is(T == class) && !is(T == interface))
262 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
263 {
264     static if (is (V == shared U, U))
265         alias Thunk = U;
266     else
267     {
268         import core.internal.traits : hasUnsharedIndirections;
269         static assert(!hasUnsharedIndirections!V, "Copying `exchangeWith` of type `" ~ V.stringof ~ "` to `" ~ shared(T).stringof ~ "` would violate shared.");
270         alias Thunk = V;
271     }
272     return atomicExchange!ms(cast(T*)here, *cast(Thunk*)&exchangeWith);
273 }
274 
275 /// Ditto
276 shared(T) atomicExchange(MemoryOrder ms = MemoryOrder.seq,T,V)(shared(T)* here, shared(V) exchangeWith) pure nothrow @nogc @trusted
277     if (is(T == class) || is(T == interface))
278 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
279 {
280     static assert (is (V : T), "Can't assign `exchangeWith` of type `" ~ shared(V).stringof ~ "` to `" ~ shared(T).stringof ~ "`.");
281 
282     return cast(shared)core.internal.atomic.atomicExchange!ms(cast(T*)here, cast(V)exchangeWith);
283 }
284 
285 /**
286  * Performs either compare-and-set or compare-and-swap (or exchange).
287  *
288  * There are two categories of overloads in this template:
289  * The first category does a simple compare-and-set.
290  * The comparison value (`ifThis`) is treated as an rvalue.
291  *
292  * The second category does a compare-and-swap (a.k.a. compare-and-exchange),
293  * and expects `ifThis` to be a pointer type, where the previous value
294  * of `here` will be written.
295  *
296  * This operation is both lock-free and atomic.
297  *
298  * Params:
299  *  here      = The address of the destination variable.
300  *  writeThis = The value to store.
301  *  ifThis    = The comparison value.
302  *
303  * Returns:
304  *  true if the store occurred, false if not.
305  */
306 template cas(MemoryOrder succ = MemoryOrder.seq, MemoryOrder fail = MemoryOrder.seq)
307 {
308     /// Compare-and-set for non-shared values
309     bool cas(T, V1, V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
310     if (!is(T == shared) && is(T : V1))
311     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
312     {
313         // resolve implicit conversions
314         const T arg1 = ifThis;
315         T arg2 = writeThis;
316 
317         static if (__traits(isFloating, T))
318         {
319             alias IntTy = IntForFloat!T;
320             return atomicCompareExchangeStrongNoResult!(succ, fail)(
321                 cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
322         }
323         else
324             return atomicCompareExchangeStrongNoResult!(succ, fail)(here, arg1, arg2);
325     }
326 
327     /// Compare-and-set for shared value type
328     bool cas(T, V1, V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
329     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
330     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
331     {
332         static if (is (V1 == shared U1, U1))
333             alias Thunk1 = U1;
334         else
335             alias Thunk1 = V1;
336         static if (is (V2 == shared U2, U2))
337             alias Thunk2 = U2;
338         else
339         {
340             import core.internal.traits : hasUnsharedIndirections;
341             static assert(!hasUnsharedIndirections!V2,
342                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
343                           shared(T).stringof ~ "* here` would violate shared.");
344             alias Thunk2 = V2;
345         }
346         return cas(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
347     }
348 
349     /// Compare-and-set for `shared` reference type (`class`)
350     bool cas(T, V1, V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis)
351     pure nothrow @nogc @trusted
352     if (is(T == class))
353     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
354     {
355         return atomicCompareExchangeStrongNoResult!(succ, fail)(
356             cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
357     }
358 
359     /// Compare-and-exchange for non-`shared` types
360     bool cas(T, V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
361     if (!is(T == shared) && !is(V == shared))
362     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
363     {
364         // resolve implicit conversions
365         T arg1 = writeThis;
366 
367         static if (__traits(isFloating, T))
368         {
369             alias IntTy = IntForFloat!T;
370             return atomicCompareExchangeStrong!(succ, fail)(
371                 cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
372         }
373         else
374             return atomicCompareExchangeStrong!(succ, fail)(here, ifThis, writeThis);
375     }
376 
377     /// Compare and exchange for mixed-`shared`ness types
378     bool cas(T, V1, V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
379     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
380     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
381     {
382         static if (is (V1 == shared U1, U1))
383             alias Thunk1 = U1;
384         else
385         {
386             import core.internal.traits : hasUnsharedIndirections;
387             static assert(!hasUnsharedIndirections!V1,
388                           "Copying `" ~ shared(T).stringof ~ "* here` to `" ~
389                           V1.stringof ~ "* ifThis` would violate shared.");
390             alias Thunk1 = V1;
391         }
392         static if (is (V2 == shared U2, U2))
393             alias Thunk2 = U2;
394         else
395         {
396             import core.internal.traits : hasUnsharedIndirections;
397             static assert(!hasUnsharedIndirections!V2,
398                           "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~
399                           shared(T).stringof ~ "* here` would violate shared.");
400             alias Thunk2 = V2;
401         }
402         static assert (is(T : Thunk1),
403                        "Mismatching types for `here` and `ifThis`: `" ~
404                        shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
405         return cas(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
406     }
407 
408     /// Compare-and-exchange for `class`
409     bool cas(T, V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis)
410     pure nothrow @nogc @trusted
411     if (is(T == class))
412     in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
413     {
414         return atomicCompareExchangeStrong!(succ, fail)(
415             cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
416     }
417 }
418 
419 /**
420 * Stores 'writeThis' to the memory referenced by 'here' if the value
421 * referenced by 'here' is equal to 'ifThis'.
422 * The 'weak' version of cas may spuriously fail. It is recommended to
423 * use `casWeak` only when `cas` would be used in a loop.
424 * This operation is both
425 * lock-free and atomic.
426 *
427 * Params:
428 *  here      = The address of the destination variable.
429 *  writeThis = The value to store.
430 *  ifThis    = The comparison value.
431 *
432 * Returns:
433 *  true if the store occurred, false if not.
434 */
435 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(T* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
436     if (!is(T == shared) && is(T : V1))
437 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
438 {
439     // resolve implicit conversions
440     T arg1 = ifThis;
441     T arg2 = writeThis;
442 
443     static if (__traits(isFloating, T))
444     {
445         alias IntTy = IntForFloat!T;
446         return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(IntTy*)here, *cast(IntTy*)&arg1, *cast(IntTy*)&arg2);
447     }
448     else
449         return atomicCompareExchangeWeakNoResult!(succ, fail)(here, arg1, arg2);
450 }
451 
452 /// Ditto
453 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
454     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
455 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
456 {
457     static if (is (V1 == shared U1, U1))
458         alias Thunk1 = U1;
459     else
460         alias Thunk1 = V1;
461     static if (is (V2 == shared U2, U2))
462         alias Thunk2 = U2;
463     else
464     {
465         import core.internal.traits : hasUnsharedIndirections;
466         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
467         alias Thunk2 = V2;
468     }
469     return casWeak!(succ, fail)(cast(T*)here, *cast(Thunk1*)&ifThis, *cast(Thunk2*)&writeThis);
470 }
471 
472 /// Ditto
473 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, shared(V1) ifThis, shared(V2) writeThis) pure nothrow @nogc @trusted
474     if (is(T == class))
475 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
476 {
477     return atomicCompareExchangeWeakNoResult!(succ, fail)(cast(T*)here, cast(V1)ifThis, cast(V2)writeThis);
478 }
479 
480 /**
481 * Stores 'writeThis' to the memory referenced by 'here' if the value
482 * referenced by 'here' is equal to the value referenced by 'ifThis'.
483 * The prior value referenced by 'here' is written to `ifThis` and
484 * returned to the user.
485 * The 'weak' version of cas may spuriously fail. It is recommended to
486 * use `casWeak` only when `cas` would be used in a loop.
487 * This operation is both lock-free and atomic.
488 *
489 * Params:
490 *  here      = The address of the destination variable.
491 *  writeThis = The value to store.
492 *  ifThis    = The address of the value to compare, and receives the prior value of `here` as output.
493 *
494 * Returns:
495 *  true if the store occurred, false if not.
496 */
497 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(T* here, T* ifThis, V writeThis) pure nothrow @nogc @trusted
498     if (!is(T == shared S, S) && !is(V == shared U, U))
499 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
500 {
501     // resolve implicit conversions
502     T arg1 = writeThis;
503 
504     static if (__traits(isFloating, T))
505     {
506         alias IntTy = IntForFloat!T;
507         return atomicCompareExchangeWeak!(succ, fail)(cast(IntTy*)here, cast(IntTy*)ifThis, *cast(IntTy*)&writeThis);
508     }
509     else
510         return atomicCompareExchangeWeak!(succ, fail)(here, ifThis, writeThis);
511 }
512 
513 /// Ditto
514 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V1,V2)(shared(T)* here, V1* ifThis, V2 writeThis) pure nothrow @nogc @trusted
515     if (!is(T == class) && (is(T : V1) || is(shared T : V1)))
516 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
517 {
518     static if (is (V1 == shared U1, U1))
519         alias Thunk1 = U1;
520     else
521     {
522         import core.internal.traits : hasUnsharedIndirections;
523         static assert(!hasUnsharedIndirections!V1, "Copying `" ~ shared(T).stringof ~ "* here` to `" ~ V1.stringof ~ "* ifThis` would violate shared.");
524         alias Thunk1 = V1;
525     }
526     static if (is (V2 == shared U2, U2))
527         alias Thunk2 = U2;
528     else
529     {
530         import core.internal.traits : hasUnsharedIndirections;
531         static assert(!hasUnsharedIndirections!V2, "Copying `" ~ V2.stringof ~ "* writeThis` to `" ~ shared(T).stringof ~ "* here` would violate shared.");
532         alias Thunk2 = V2;
533     }
534     static assert (is(T : Thunk1), "Mismatching types for `here` and `ifThis`: `" ~ shared(T).stringof ~ "` and `" ~ V1.stringof ~ "`.");
535     return casWeak!(succ, fail)(cast(T*)here, cast(Thunk1*)ifThis, *cast(Thunk2*)&writeThis);
536 }
537 
538 /// Ditto
539 bool casWeak(MemoryOrder succ = MemoryOrder.seq,MemoryOrder fail = MemoryOrder.seq,T,V)(shared(T)* here, shared(T)* ifThis, shared(V) writeThis) pure nothrow @nogc @trusted
540     if (is(T == class))
541 in (atomicPtrIsProperlyAligned(here), "Argument `here` is not properly aligned")
542 {
543     return atomicCompareExchangeWeak!(succ, fail)(cast(T*)here, cast(T*)ifThis, cast(V)writeThis);
544 }
545 
546 /**
547  * Inserts a full load/store memory fence (on platforms that need it). This ensures
548  * that all loads and stores before a call to this function are executed before any
549  * loads and stores after the call.
550  */
551 void atomicFence(MemoryOrder order = MemoryOrder.seq)() pure nothrow @nogc @safe
552 {
553     core.internal.atomic.atomicFence!order();
554 }
555 
556 /**
557  * Gives a hint to the processor that the calling thread is in a 'spin-wait' loop,
558  * allowing to more efficiently allocate resources.
559  */
560 void pause() pure nothrow @nogc @safe
561 {
562     core.internal.atomic.pause();
563 }
564 
565 /**
566  * Performs the binary operation 'op' on val using 'mod' as the modifier.
567  *
568  * Params:
569  *  val = The target variable.
570  *  mod = The modifier to apply.
571  *
572  * Returns:
573  *  The result of the operation.
574  */
575 TailShared!T atomicOp(string op, T, V1)(ref shared T val, V1 mod) pure nothrow @nogc @safe
576     if (__traits(compiles, mixin("*cast(T*)&val" ~ op ~ "mod")))
577 in (atomicValueIsProperlyAligned(val))
578 {
579     // binary operators
580     //
581     // +    -   *   /   %   ^^  &
582     // |    ^   <<  >>  >>> ~   in
583     // ==   !=  <   <=  >   >=
584     static if (op == "+"  || op == "-"  || op == "*"  || op == "/"   ||
585                 op == "%"  || op == "^^" || op == "&"  || op == "|"   ||
586                 op == "^"  || op == "<<" || op == ">>" || op == ">>>" ||
587                 op == "~"  || // skip "in"
588                 op == "==" || op == "!=" || op == "<"  || op == "<="  ||
589                 op == ">"  || op == ">=")
590     {
591         T get = atomicLoad!(MemoryOrder.raw, T)(val);
592         mixin("return get " ~ op ~ " mod;");
593     }
594     else
595     // assignment operators
596     //
597     // +=   -=  *=  /=  %=  ^^= &=
598     // |=   ^=  <<= >>= >>>=    ~=
599     static if (op == "+=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
600     {
601         return cast(T)(atomicFetchAdd(val, mod) + mod);
602     }
603     else static if (op == "-=" && __traits(isIntegral, T) && __traits(isIntegral, V1) && T.sizeof <= size_t.sizeof && V1.sizeof <= size_t.sizeof)
604     {
605         return cast(T)(atomicFetchSub(val, mod) - mod);
606     }
607     else static if (op == "+=" || op == "-="  || op == "*="  || op == "/=" ||
608                 op == "%=" || op == "^^=" || op == "&="  || op == "|=" ||
609                 op == "^=" || op == "<<=" || op == ">>=" || op == ">>>=") // skip "~="
610     {
611         T set, get = atomicLoad!(MemoryOrder.raw, T)(val);
612         do
613         {
614             set = get;
615             mixin("set " ~ op ~ " mod;");
616         } while (!casWeakByRef(val, get, set));
617         return set;
618     }
619     else
620     {
621         static assert(false, "Operation not supported.");
622     }
623 }
624 
625 
626 version (D_InlineAsm_X86)
627 {
628     enum has64BitXCHG = false;
629     enum has64BitCAS = true;
630     enum has128BitCAS = false;
631 }
632 else version (D_InlineAsm_X86_64)
633 {
634     enum has64BitXCHG = true;
635     enum has64BitCAS = true;
636     enum has128BitCAS = true;
637 }
638 else version (GNU)
639 {
640     import gcc.config;
641     enum has64BitCAS = GNU_Have_64Bit_Atomics;
642     enum has64BitXCHG = GNU_Have_64Bit_Atomics;
643     enum has128BitCAS = GNU_Have_LibAtomic;
644 }
645 else
646 {
647     enum has64BitXCHG = false;
648     enum has64BitCAS = false;
649     enum has128BitCAS = false;
650 }
651 
652 private
653 {
654     bool atomicValueIsProperlyAligned(T)(ref T val) pure nothrow @nogc @trusted
655     {
656         return atomicPtrIsProperlyAligned(&val);
657     }
658 
659     bool atomicPtrIsProperlyAligned(T)(T* ptr) pure nothrow @nogc @safe
660     {
661         // NOTE: Strictly speaking, the x86 supports atomic operations on
662         //       unaligned values.  However, this is far slower than the
663         //       common case, so such behavior should be prohibited.
664         static if (T.sizeof > size_t.sizeof)
665         {
666             version (X86)
667             {
668                 // cmpxchg8b only requires 4-bytes alignment
669                 return cast(size_t)ptr % size_t.sizeof == 0;
670             }
671             else
672             {
673                 // e.g., x86_64 cmpxchg16b requires 16-bytes alignment
674                 return cast(size_t)ptr % T.sizeof == 0;
675             }
676         }
677         else
678         {
679             return cast(size_t)ptr % T.sizeof == 0;
680         }
681     }
682 
683     template IntForFloat(F)
684         if (__traits(isFloating, F))
685     {
686         static if (F.sizeof == 4)
687             alias IntForFloat = uint;
688         else static if (F.sizeof == 8)
689             alias IntForFloat = ulong;
690         else
691             static assert (false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`.");
692     }
693 
694     template IntForStruct(S)
695         if (is(S == struct))
696     {
697         static if (S.sizeof == 1)
698             alias IntForFloat = ubyte;
699         else static if (F.sizeof == 2)
700             alias IntForFloat = ushort;
701         else static if (F.sizeof == 4)
702             alias IntForFloat = uint;
703         else static if (F.sizeof == 8)
704             alias IntForFloat = ulong;
705         else static if (F.sizeof == 16)
706             alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
707         else
708             static assert (ValidateStruct!S);
709     }
710 
711     template ValidateStruct(S)
712         if (is(S == struct))
713     {
714         import core.internal.traits : hasElaborateAssign;
715 
716         // `(x & (x-1)) == 0` checks that x is a power of 2.
717         static assert (S.sizeof <= size_t.sizeof * 2
718             && (S.sizeof & (S.sizeof - 1)) == 0,
719             S.stringof ~ " has invalid size for atomic operations.");
720         static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
721 
722         enum ValidateStruct = true;
723     }
724 
725     // TODO: it'd be nice if we had @trusted scopes; we could remove this...
726     bool casWeakByRef(T,V1,V2)(ref T value, ref V1 ifThis, V2 writeThis) pure nothrow @nogc @trusted
727     {
728         return casWeak(&value, &ifThis, writeThis);
729     }
730 
731     /* Construct a type with a shared tail, and if possible with an unshared
732     head. */
733     template TailShared(U) if (!is(U == shared))
734     {
735         alias TailShared = .TailShared!(shared U);
736     }
737     template TailShared(S) if (is(S == shared))
738     {
739         // Get the unshared variant of S.
740         static if (is(S U == shared U)) {}
741         else static assert(false, "Should never be triggered. The `static " ~
742             "if` declares `U` as the unshared version of the shared type " ~
743             "`S`. `S` is explicitly declared as shared, so getting `U` " ~
744             "should always work.");
745 
746         static if (is(S : U))
747             alias TailShared = U;
748         else static if (is(S == struct))
749         {
750             enum implName = () {
751                 /* Start with "_impl". If S has a field with that name, append
752                 underscores until the clash is resolved. */
753                 string name = "_impl";
754                 string[] fieldNames;
755                 static foreach (alias field; S.tupleof)
756                 {
757                     fieldNames ~= __traits(identifier, field);
758                 }
759                 static bool canFind(string[] haystack, string needle)
760                 {
761                     foreach (candidate; haystack)
762                     {
763                         if (candidate == needle) return true;
764                     }
765                     return false;
766                 }
767                 while (canFind(fieldNames, name)) name ~= "_";
768                 return name;
769             } ();
770             struct TailShared
771             {
772                 static foreach (i, alias field; S.tupleof)
773                 {
774                     /* On @trusted: This is casting the field from shared(Foo)
775                     to TailShared!Foo. The cast is safe because the field has
776                     been loaded and is not shared anymore. */
777                     mixin("
778                         @trusted @property
779                         ref " ~ __traits(identifier, field) ~ "()
780                         {
781                             alias R = TailShared!(typeof(field));
782                             return * cast(R*) &" ~ implName ~ ".tupleof[i];
783                         }
784                     ");
785                 }
786                 mixin("
787                     S " ~ implName ~ ";
788                     alias " ~ implName ~ " this;
789                 ");
790             }
791         }
792         else
793             alias TailShared = S;
794     }
795     @safe unittest
796     {
797         // No tail (no indirections) -> fully unshared.
798 
799         static assert(is(TailShared!int == int));
800         static assert(is(TailShared!(shared int) == int));
801 
802         static struct NoIndir { int i; }
803         static assert(is(TailShared!NoIndir == NoIndir));
804         static assert(is(TailShared!(shared NoIndir) == NoIndir));
805 
806         // Tail can be independently shared or is already -> tail-shared.
807 
808         static assert(is(TailShared!(int*) == shared(int)*));
809         static assert(is(TailShared!(shared int*) == shared(int)*));
810         static assert(is(TailShared!(shared(int)*) == shared(int)*));
811 
812         static assert(is(TailShared!(int[]) == shared(int)[]));
813         static assert(is(TailShared!(shared int[]) == shared(int)[]));
814         static assert(is(TailShared!(shared(int)[]) == shared(int)[]));
815 
816         static struct S1 { shared int* p; }
817         static assert(is(TailShared!S1 == S1));
818         static assert(is(TailShared!(shared S1) == S1));
819 
820         static struct S2 { shared(int)* p; }
821         static assert(is(TailShared!S2 == S2));
822         static assert(is(TailShared!(shared S2) == S2));
823 
824         // Tail follows shared-ness of head -> fully shared.
825 
826         static class C { int i; }
827         static assert(is(TailShared!C == shared C));
828         static assert(is(TailShared!(shared C) == shared C));
829 
830         /* However, structs get a wrapper that has getters which cast to
831         TailShared. */
832 
833         static struct S3 { int* p; int _impl; int _impl_; int _impl__; }
834         static assert(!is(TailShared!S3 : S3));
835         static assert(is(TailShared!S3 : shared S3));
836         static assert(is(TailShared!(shared S3) == TailShared!S3));
837 
838         static struct S4 { shared(int)** p; }
839         static assert(!is(TailShared!S4 : S4));
840         static assert(is(TailShared!S4 : shared S4));
841         static assert(is(TailShared!(shared S4) == TailShared!S4));
842     }
843 }
844 
845 
846 ////////////////////////////////////////////////////////////////////////////////
847 // Unit Tests
848 ////////////////////////////////////////////////////////////////////////////////
849 
850 
851 version (CoreUnittest)
852 {
853     version (D_LP64)
854     {
855         enum hasDWCAS = has128BitCAS;
856     }
857     else
858     {
859         enum hasDWCAS = has64BitCAS;
860     }
861 
862     void testXCHG(T)(T val) pure nothrow @nogc @trusted
863     in
864     {
865         assert(val !is T.init);
866     }
867     do
868     {
869         T         base = cast(T)null;
870         shared(T) atom = cast(shared(T))null;
871 
872         assert(base !is val, T.stringof);
873         assert(atom is base, T.stringof);
874 
875         assert(atomicExchange(&atom, val) is base, T.stringof);
876         assert(atom is val, T.stringof);
877     }
878 
879     void testCAS(T)(T val) pure nothrow @nogc @trusted
880     in
881     {
882         assert(val !is T.init);
883     }
884     do
885     {
886         T         base = cast(T)null;
887         shared(T) atom = cast(shared(T))null;
888 
889         assert(base !is val, T.stringof);
890         assert(atom is base, T.stringof);
891 
892         assert(cas(&atom, base, val), T.stringof);
893         assert(atom is val, T.stringof);
894         assert(!cas(&atom, base, base), T.stringof);
895         assert(atom is val, T.stringof);
896 
897         atom = cast(shared(T))null;
898 
899         shared(T) arg = base;
900         assert(cas(&atom, &arg, val), T.stringof);
901         assert(arg is base, T.stringof);
902         assert(atom is val, T.stringof);
903 
904         arg = base;
905         assert(!cas(&atom, &arg, base), T.stringof);
906         assert(arg is val, T.stringof);
907         assert(atom is val, T.stringof);
908     }
909 
910     void testLoadStore(MemoryOrder ms = MemoryOrder.seq, T)(T val = T.init + 1) pure nothrow @nogc @trusted
911     {
912         T         base = cast(T) 0;
913         shared(T) atom = cast(T) 0;
914 
915         assert(base !is val);
916         assert(atom is base);
917         atomicStore!(ms)(atom, val);
918         base = atomicLoad!(ms)(atom);
919 
920         assert(base is val, T.stringof);
921         assert(atom is val);
922     }
923 
924 
925     void testType(T)(T val = T.init + 1) pure nothrow @nogc @safe
926     {
927         static if (T.sizeof < 8 || has64BitXCHG)
928             testXCHG!(T)(val);
929         testCAS!(T)(val);
930         testLoadStore!(MemoryOrder.seq, T)(val);
931         testLoadStore!(MemoryOrder.raw, T)(val);
932     }
933 
934     @betterC @safe pure nothrow unittest
935     {
936         testType!(bool)();
937 
938         testType!(byte)();
939         testType!(ubyte)();
940 
941         testType!(short)();
942         testType!(ushort)();
943 
944         testType!(int)();
945         testType!(uint)();
946     }
947 
948     @safe pure nothrow unittest
949     {
950 
951         testType!(shared int*)();
952 
953         static interface Inter {}
954         static class KlassImpl : Inter {}
955         testXCHG!(shared Inter)(new shared(KlassImpl));
956         testCAS!(shared Inter)(new shared(KlassImpl));
957 
958         static class Klass {}
959         testXCHG!(shared Klass)(new shared(Klass));
960         testCAS!(shared Klass)(new shared(Klass));
961 
962         testXCHG!(shared int)(42);
963 
964         testType!(float)(0.1f);
965 
966         static if (has64BitCAS)
967         {
968             testType!(double)(0.1);
969             testType!(long)();
970             testType!(ulong)();
971         }
972         static if (has128BitCAS)
973         {
974             () @trusted
975             {
976                 align(16) struct Big { long a, b; }
977 
978                 shared(Big) atom;
979                 shared(Big) base;
980                 shared(Big) arg;
981                 shared(Big) val = Big(1, 2);
982 
983                 assert(cas(&atom, arg, val), Big.stringof);
984                 assert(atom is val, Big.stringof);
985                 assert(!cas(&atom, arg, val), Big.stringof);
986                 assert(atom is val, Big.stringof);
987 
988                 atom = Big();
989                 assert(cas(&atom, &arg, val), Big.stringof);
990                 assert(arg is base, Big.stringof);
991                 assert(atom is val, Big.stringof);
992 
993                 arg = Big();
994                 assert(!cas(&atom, &arg, base), Big.stringof);
995                 assert(arg is val, Big.stringof);
996                 assert(atom is val, Big.stringof);
997             }();
998         }
999 
1000         shared(size_t) i;
1001 
1002         atomicOp!"+="(i, cast(size_t) 1);
1003         assert(i == 1);
1004 
1005         atomicOp!"-="(i, cast(size_t) 1);
1006         assert(i == 0);
1007 
1008         shared float f = 0.1f;
1009         atomicOp!"+="(f, 0.1f);
1010         assert(f > 0.1999f && f < 0.2001f);
1011 
1012         static if (has64BitCAS)
1013         {
1014             shared double d = 0.1;
1015             atomicOp!"+="(d, 0.1);
1016             assert(d > 0.1999 && d < 0.2001);
1017         }
1018     }
1019 
1020     @betterC pure nothrow unittest
1021     {
1022         static if (has128BitCAS)
1023         {
1024             struct DoubleValue
1025             {
1026                 long value1;
1027                 long value2;
1028             }
1029 
1030             align(16) shared DoubleValue a;
1031             atomicStore(a, DoubleValue(1,2));
1032             assert(a.value1 == 1 && a.value2 ==2);
1033 
1034             while (!cas(&a, DoubleValue(1,2), DoubleValue(3,4))){}
1035             assert(a.value1 == 3 && a.value2 ==4);
1036 
1037             align(16) DoubleValue b = atomicLoad(a);
1038             assert(b.value1 == 3 && b.value2 ==4);
1039         }
1040 
1041         static if (hasDWCAS)
1042         {
1043             static struct List { size_t gen; List* next; }
1044             shared(List) head;
1045             assert(cas(&head, shared(List)(0, null), shared(List)(1, cast(List*)1)));
1046             assert(head.gen == 1);
1047             assert(cast(size_t)head.next == 1);
1048         }
1049 
1050         // https://issues.dlang.org/show_bug.cgi?id=20629
1051         static struct Struct
1052         {
1053             uint a, b;
1054         }
1055         shared Struct s1 = Struct(1, 2);
1056         atomicStore(s1, Struct(3, 4));
1057         assert(cast(uint) s1.a == 3);
1058         assert(cast(uint) s1.b == 4);
1059     }
1060 
1061     // https://issues.dlang.org/show_bug.cgi?id=20844
1062     static if (hasDWCAS)
1063     {
1064         debug: // tests CAS in-contract
1065 
1066         pure nothrow unittest
1067         {
1068             import core.exception : AssertError;
1069 
1070             align(16) shared ubyte[2 * size_t.sizeof + 1] data;
1071             auto misalignedPointer = cast(size_t[2]*) &data[1];
1072             size_t[2] x;
1073 
1074             try
1075                 cas(misalignedPointer, x, x);
1076             catch (AssertError)
1077                 return;
1078 
1079             assert(0, "should have failed");
1080         }
1081     }
1082 
1083     @betterC pure nothrow @nogc @safe unittest
1084     {
1085         int a;
1086         if (casWeak!(MemoryOrder.acq_rel, MemoryOrder.raw)(&a, 0, 4))
1087             assert(a == 4);
1088     }
1089 
1090     @betterC pure nothrow unittest
1091     {
1092         static struct S { int val; }
1093         auto s = shared(S)(1);
1094 
1095         shared(S*) ptr;
1096 
1097         // head unshared
1098         shared(S)* ifThis = null;
1099         shared(S)* writeThis = &s;
1100         assert(ptr is null);
1101         assert(cas(&ptr, ifThis, writeThis));
1102         assert(ptr is writeThis);
1103 
1104         // head shared
1105         shared(S*) ifThis2 = writeThis;
1106         shared(S*) writeThis2 = null;
1107         assert(cas(&ptr, ifThis2, writeThis2));
1108         assert(ptr is null);
1109     }
1110 
1111     // === atomicFetchAdd and atomicFetchSub operations ====
1112     @betterC pure nothrow @nogc @safe unittest
1113     {
1114         shared ubyte u8 = 1;
1115         shared ushort u16 = 2;
1116         shared uint u32 = 3;
1117         shared byte i8 = 5;
1118         shared short i16 = 6;
1119         shared int i32 = 7;
1120 
1121         assert(atomicOp!"+="(u8, 8) == 9);
1122         assert(atomicOp!"+="(u16, 8) == 10);
1123         assert(atomicOp!"+="(u32, 8) == 11);
1124         assert(atomicOp!"+="(i8, 8) == 13);
1125         assert(atomicOp!"+="(i16, 8) == 14);
1126         assert(atomicOp!"+="(i32, 8) == 15);
1127         version (D_LP64)
1128         {
1129             shared ulong u64 = 4;
1130             shared long i64 = 8;
1131             assert(atomicOp!"+="(u64, 8) == 12);
1132             assert(atomicOp!"+="(i64, 8) == 16);
1133         }
1134     }
1135 
1136     @betterC pure nothrow @nogc unittest
1137     {
1138         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1139         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1140 
1141         {
1142             auto array = byteArray;
1143             byte* ptr = &array[0];
1144             byte* prevPtr = atomicFetchAdd(ptr, 3);
1145             assert(prevPtr == &array[0]);
1146             assert(*prevPtr == 1);
1147             assert(*ptr == 7);
1148         }
1149         {
1150             auto array = ulongArray;
1151             ulong* ptr = &array[0];
1152             ulong* prevPtr = atomicFetchAdd(ptr, 3);
1153             assert(prevPtr == &array[0]);
1154             assert(*prevPtr == 2);
1155             assert(*ptr == 8);
1156         }
1157     }
1158 
1159     @betterC pure nothrow @nogc @safe unittest
1160     {
1161         shared ubyte u8 = 1;
1162         shared ushort u16 = 2;
1163         shared uint u32 = 3;
1164         shared byte i8 = 5;
1165         shared short i16 = 6;
1166         shared int i32 = 7;
1167 
1168         assert(atomicOp!"-="(u8, 1) == 0);
1169         assert(atomicOp!"-="(u16, 1) == 1);
1170         assert(atomicOp!"-="(u32, 1) == 2);
1171         assert(atomicOp!"-="(i8, 1) == 4);
1172         assert(atomicOp!"-="(i16, 1) == 5);
1173         assert(atomicOp!"-="(i32, 1) == 6);
1174         version (D_LP64)
1175         {
1176             shared ulong u64 = 4;
1177             shared long i64 = 8;
1178             assert(atomicOp!"-="(u64, 1) == 3);
1179             assert(atomicOp!"-="(i64, 1) == 7);
1180         }
1181     }
1182 
1183     @betterC pure nothrow @nogc unittest
1184     {
1185         byte[10] byteArray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
1186         ulong[10] ulongArray = [2, 4, 6, 8, 10, 12, 14, 16, 19, 20];
1187 
1188         {
1189             auto array = byteArray;
1190             byte* ptr = &array[5];
1191             byte* prevPtr = atomicFetchSub(ptr, 4);
1192             assert(prevPtr == &array[5]);
1193             assert(*prevPtr == 11);
1194             assert(*ptr == 3); // https://issues.dlang.org/show_bug.cgi?id=21578
1195         }
1196         {
1197             auto array = ulongArray;
1198             ulong* ptr = &array[5];
1199             ulong* prevPtr = atomicFetchSub(ptr, 4);
1200             assert(prevPtr == &array[5]);
1201             assert(*prevPtr == 12);
1202             assert(*ptr == 4); // https://issues.dlang.org/show_bug.cgi?id=21578
1203         }
1204     }
1205 
1206     @betterC pure nothrow @nogc @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16651
1207     {
1208         shared ulong a = 2;
1209         uint b = 1;
1210         atomicOp!"-="(a, b);
1211         assert(a == 1);
1212 
1213         shared uint c = 2;
1214         ubyte d = 1;
1215         atomicOp!"-="(c, d);
1216         assert(c == 1);
1217     }
1218 
1219     pure nothrow @safe unittest // https://issues.dlang.org/show_bug.cgi?id=16230
1220     {
1221         shared int i;
1222         static assert(is(typeof(atomicLoad(i)) == int));
1223 
1224         shared int* p;
1225         static assert(is(typeof(atomicLoad(p)) == shared(int)*));
1226 
1227         shared int[] a;
1228         static if (__traits(compiles, atomicLoad(a)))
1229         {
1230             static assert(is(typeof(atomicLoad(a)) == shared(int)[]));
1231         }
1232 
1233         static struct S { int* _impl; }
1234         shared S s;
1235         static assert(is(typeof(atomicLoad(s)) : shared S));
1236         static assert(is(typeof(atomicLoad(s)._impl) == shared(int)*));
1237         auto u = atomicLoad(s);
1238         assert(u._impl is null);
1239         u._impl = new shared int(42);
1240         assert(atomicLoad(*u._impl) == 42);
1241 
1242         static struct S2 { S s; }
1243         shared S2 s2;
1244         static assert(is(typeof(atomicLoad(s2).s) == TailShared!S));
1245 
1246         static struct S3 { size_t head; int* tail; }
1247         shared S3 s3;
1248         static if (__traits(compiles, atomicLoad(s3)))
1249         {
1250             static assert(is(typeof(atomicLoad(s3).head) == size_t));
1251             static assert(is(typeof(atomicLoad(s3).tail) == shared(int)*));
1252         }
1253 
1254         static class C { int i; }
1255         shared C c;
1256         static assert(is(typeof(atomicLoad(c)) == shared C));
1257 
1258         static struct NoIndirections { int i; }
1259         shared NoIndirections n;
1260         static assert(is(typeof(atomicLoad(n)) == NoIndirections));
1261     }
1262 
1263     unittest // Issue 21631
1264     {
1265         shared uint si1 = 45;
1266         shared uint si2 = 38;
1267         shared uint* psi = &si1;
1268 
1269         assert((&psi).cas(cast(const) psi, &si2));
1270     }
1271 }