1 /****************************************************************************
2 **
3 **  This file is part of GAP, a system for computational discrete algebra.
4 **
5 **  Copyright of GAP belongs to its developers, whose names are too numerous
6 **  to list here. Please refer to the COPYRIGHT file for details.
7 **
8 **  SPDX-License-Identifier: GPL-2.0-or-later
9 */
10 
11 #ifndef GAP_AOBJECTS_H
12 #define GAP_AOBJECTS_H
13 
14 #include "objects.h"
15 #include "hpc/atomic.h"
16 
17 #ifndef HPCGAP
18 #error This header is only meant to be used with HPC-GAP
19 #endif
20 
21 StructInitInfo *InitInfoAObjects(void);
22 Obj NewAtomicRecord(UInt capacity);
23 Obj SetARecordField(Obj record, UInt field, Obj obj);
24 Obj GetARecordField(Obj record, UInt field);
25 Obj ElmARecord(Obj record, UInt rnam);
26 Int IsbARecord(Obj record, UInt rnam);
27 void AssARecord(Obj record, UInt rnam, Obj value);
28 void UnbARecord(Obj record, UInt rnam);
29 
30 void AssTLRecord(Obj record, UInt field, Obj obj);
31 Obj GetTLRecordField(Obj record, UInt field);
32 Obj FromAtomicRecord(Obj record);
33 void SetTLDefault(Obj record, UInt rnam, Obj value);
34 void SetTLConstructor(Obj record, UInt rnam, Obj func);
35 
36 Obj NewAtomicList(UInt tnum, UInt capacity);
37 Obj FromAtomicList(Obj list);
38 UInt AddAList(Obj list, Obj obj);
39 void AssAList(Obj list, Int pos, Obj obj);
40 Obj ElmAList(Obj list, Int pos);
41 Obj Elm0AList(Obj list, Int pos);
42 Obj LengthAList(Obj list);
43 
44 
45 /*****************************************************************************
46 **
47 *F  CompareAndSwapObj(<addr>, <old>, <new_>)
48 **
49 **  Atomically compare *<addr> with <old> and exchange for <new_>.
50 **
51 **  The function implements the usual compare-and-swap semantics for
52 **  objects. It atomically does the following:
53 **
54 **    (1) Compare *<addr> with <old>.
55 **    (2) Exchange *<addr> with <new_> if the comparison succeeded.
56 **
57 **  It returns a non-zero value if the comparison in (1) succeeded, zero
58 **  otherwise.
59 **  markuspf: renamed new to new_ for compatibility with C++ packages.
60 */
61 
CompareAndSwapObj(Obj * addr,Obj old,Obj new_)62 EXPORT_INLINE int CompareAndSwapObj(Obj *addr, Obj old, Obj new_) {
63 #ifndef WARD_ENABLED
64   return COMPARE_AND_SWAP((AtomicUInt *) addr,
65     (AtomicUInt) old, (AtomicUInt) new_);
66 #endif
67 }
68 
69 /*****************************************************************************
70 **
71 *F  ATOMIC_SET_ELM_PLIST(<list>, <index>, <value>)
72 *F  ATOMIC_SET_ELM_PLIST_ONCE(<list>, <index>, <value>)
73 *F  ATOMIC_ELM_PLIST(<list>, <index>)
74 **
75 **  Set or access plain lists atomically. The plain lists must be of fixed
76 **  size and not be resized concurrently with such operations. The functions
77 **  assume that <index> is in the range 1..LEN_PLIST(<list>).
78 **
79 **  <value> must be an atomic or immutable object or access to it must be
80 **  properly regulated by locks.
81 **
82 **  'ATOMIC_ELM_PLIST' and 'ATOMIC_SET_ELM_PLIST' read and write plain lists,
83 **  annotated with memory barriers that ensure that concurrent threads do
84 **  not read objects that have not been fully initialized.
85 **
86 **  'ATOMIC_SET_ELM_PLIST_ONCE' assigns a value similar to 'SET_PLIST',
87 **  but only if <list>[<index>] is currently unbound. If that value has
88 **  been bound already, it will return the existing value; otherwise it
89 **  assigns <value> and returns it.
90 **
91 **  Canonical usage to read or initialize the field of a plist is as
92 **  follows:
93 **
94 **    obj = ATOMIC_ELM_PLIST(list, index);
95 **    if (!obj) {
96 **       obj = ...;
97 **       obj = ATOMIC_SET_ELM_PLIST(list, index, obj);
98 **    }
99 **
100 **  This construction ensures that while <obj> may be calculated more
101 **  than once, all threads will share the same value; furthermore,
102 **  reading an alreadu initialized value is generally very cheap,
103 **  incurring the cost of a read, a read barrier, and a branch (which,
104 **  after initialization, will generally predicted correctly by branch
105 **  prediction logic).
106 */
107 
108 
ATOMIC_SET_ELM_PLIST(Obj list,UInt index,Obj value)109 EXPORT_INLINE void ATOMIC_SET_ELM_PLIST(Obj list, UInt index, Obj value) {
110 #ifndef WARD_ENABLED
111   Obj *contents = ADDR_OBJ(list);
112   MEMBAR_WRITE(); /* ensure that contents[index] becomes visible to
113                    * other threads only after value has become visible,
114                    * too.
115                    */
116   contents[index] = value;
117 #endif
118 }
119 
ATOMIC_SET_ELM_PLIST_ONCE(Obj list,UInt index,Obj value)120 EXPORT_INLINE Obj ATOMIC_SET_ELM_PLIST_ONCE(Obj list, UInt index, Obj value) {
121 #ifndef WARD_ENABLED
122   Obj *contents = ADDR_OBJ(list);
123   Obj result;
124   for (;;) {
125     result = contents[index];
126     if (result) {
127       MEMBAR_READ(); /* matching memory barrier. */
128       return result;
129     }
130     if (COMPARE_AND_SWAP((AtomicUInt *)(contents+index),
131       (AtomicUInt) 0, (AtomicUInt) value)) {
132       /* no extra memory barrier needed, a full barrier is implicit in the
133        * COMPARE_AND_SWAP() call.
134        */
135       return value;
136     }
137   }
138 #endif
139 }
140 
ATOMIC_ELM_PLIST(Obj list,UInt index)141 EXPORT_INLINE Obj ATOMIC_ELM_PLIST(Obj list, UInt index) {
142 #ifndef WARD_ENABLED
143   const Obj *contents = CONST_ADDR_OBJ(list);
144   Obj result;
145   result = contents[index];
146   MEMBAR_READ(); /* matching memory barrier. */
147   return result;
148 #endif
149 }
150 
151 #endif // GAP_AOBJECTS_H
152