1 /****************************************************************************
2 **
3 **  This file is part of GAP, a system for computational discrete algebra.
4 **
5 **  Copyright of GAP belongs to its developers, whose names are too numerous
6 **  to list here. Please refer to the COPYRIGHT file for details.
7 **
8 **  SPDX-License-Identifier: GPL-2.0-or-later
9 **
10 **  This file contains the GAP interface for thread primitives.
11 */
12 
13 #include "hpc/aobjects.h"
14 
15 #include "hpc/guards.h"
16 #include "hpc/thread.h"
17 #include "hpc/traverse.h"
18 
19 #include "ariths.h"
20 #include "bool.h"
21 #include "calls.h"
22 #include "error.h"
23 #include "fibhash.h"
24 #include "gapstate.h"
25 #include "gaputils.h"
26 #include "gvars.h"
27 #include "io.h"
28 #include "lists.h"
29 #include "modules.h"
30 #include "objects.h"
31 #include "plist.h"
32 #include "precord.h"
33 #include "records.h"
34 #include "stringobj.h"
35 
36 
37 static Obj TYPE_ALIST;
38 static Obj TYPE_AREC;
39 static Obj TYPE_TLREC;
40 
41 #define ALIST_LEN(x) ((x) >> 2)
42 #define ALIST_POL(x) ((x) & 3)
43 #define CHANGE_ALIST_LEN(x, y) (((x) & 3) | ((y) << 2))
44 #define CHANGE_ALIST_POL(x, y) (((x) & ~3) | y)
45 
46 typedef enum  {
47     ALIST_RW = 0,
48     ALIST_W1 = 1,
49     ALIST_WX = 2,
50 } AtomicListPolicy;
51 
52 typedef enum {
53     AREC_RW = 1,
54     AREC_W1 = 0,
55     AREC_WX = -1,
56 } AtomicRecordPolicy;
57 
58 typedef union AtomicObj
59 {
60   AtomicUInt atom;
61   Obj obj;
62 } AtomicObj;
63 
64 #define ADDR_ATOM(bag) ((AtomicObj *)(ADDR_OBJ(bag)))
65 #define CONST_ADDR_ATOM(bag) ((const AtomicObj *)(CONST_ADDR_OBJ(bag)))
66 
67 #ifndef WARD_ENABLED
68 
69 static UInt UsageCap[sizeof(UInt)*8];
70 
TypeAList(Obj obj)71 static Obj TypeAList(Obj obj)
72 {
73   Obj result;
74   const Obj *addr = CONST_ADDR_OBJ(obj);
75   MEMBAR_READ();
76   result = addr[1];
77   return result != NULL ? result : TYPE_ALIST;
78 }
79 
TypeARecord(Obj obj)80 static Obj TypeARecord(Obj obj)
81 {
82   Obj result;
83   MEMBAR_READ();
84   result = CONST_ADDR_OBJ(obj)[0];
85   return result != NULL ? result : TYPE_AREC;
86 }
87 
TypeTLRecord(Obj obj)88 static Obj TypeTLRecord(Obj obj)
89 {
90   return TYPE_TLREC;
91 }
92 
SetTypeAList(Obj obj,Obj kind)93 static void SetTypeAList(Obj obj, Obj kind)
94 {
95   switch (TNUM_OBJ(obj)) {
96     case T_ALIST:
97     case T_FIXALIST:
98       HashLock(obj);
99       ADDR_OBJ(obj)[1] = kind;
100       CHANGED_BAG(obj);
101       RetypeBag(obj, T_APOSOBJ);
102       HashUnlock(obj);
103       break;
104     case T_APOSOBJ:
105       HashLock(obj);
106       ADDR_OBJ(obj)[1] = kind;
107       CHANGED_BAG(obj);
108       HashUnlock(obj);
109       break;
110   }
111   MEMBAR_WRITE();
112 }
113 
SetTypeARecord(Obj obj,Obj kind)114 static void SetTypeARecord(Obj obj, Obj kind)
115 {
116   ADDR_OBJ(obj)[0] = kind;
117   CHANGED_BAG(obj);
118   RetypeBag(obj, T_ACOMOBJ);
119   MEMBAR_WRITE();
120 }
121 
122 
ArgumentError(const char * message)123 static void ArgumentError(const char *message)
124 {
125   ErrorQuit(message, 0, 0);
126 }
127 
NewAtomicList(UInt tnum,UInt capacity)128 Obj NewAtomicList(UInt tnum, UInt capacity)
129 {
130     Obj result = NewBag(tnum, sizeof(AtomicObj) * (capacity + 2));
131     MEMBAR_WRITE();
132     return result;
133 }
134 
NewAtomicListInit(UInt tnum,UInt len,Obj init)135 static Obj NewAtomicListInit(UInt tnum, UInt len, Obj init)
136 {
137     Obj         result = NewAtomicList(tnum, len);
138     AtomicObj * data = ADDR_ATOM(result);
139     data->atom = CHANGE_ALIST_LEN(ALIST_RW, len);
140     for (UInt i = 1; i <= len; i++)
141         data[i + 1].obj = init;
142     CHANGED_BAG(result);
143     MEMBAR_WRITE();    // Should not be necessary, but better be safe.
144     return result;
145 }
146 
NewAtomicListFrom(UInt tnum,Obj list)147 static Obj NewAtomicListFrom(UInt tnum, Obj list)
148 {
149     UInt        len = LEN_LIST(list);
150     Obj         result = NewAtomicList(tnum, len);
151     AtomicObj * data = ADDR_ATOM(result);
152     data->atom = CHANGE_ALIST_LEN(ALIST_RW, len);
153     for (UInt i = 1; i <= len; i++)
154         data[i + 1].obj = ELM0_LIST(list, i);;
155     CHANGED_BAG(result);
156     MEMBAR_WRITE();    // Should not be necessary, but better be safe.
157     return result;
158 }
159 
FuncAtomicList(Obj self,Obj args)160 static Obj FuncAtomicList(Obj self, Obj args)
161 {
162   Obj init;
163   Int len;
164   switch (LEN_PLIST(args)) {
165   case 0:
166       return NewAtomicList(T_ALIST, 0);
167   case 1:
168       init = ELM_PLIST(args, 1);
169       if (IS_LIST(init)) {
170           return NewAtomicListFrom(T_ALIST, init);
171       }
172       else if (IS_INTOBJ(init) && INT_INTOBJ(init) >= 0) {
173           len = INT_INTOBJ(init);
174           return NewAtomicListInit(T_ALIST, len, 0);
175       }
176       else {
177           ArgumentError(
178               "AtomicList: Argument must be list or a non-negative integer");
179       }
180     case 2:
181         init = ELM_PLIST(args, 1);
182         len = IS_INTOBJ(init) ? INT_INTOBJ(init) : -1;
183         if (len < 0)
184             ArgumentError(
185                 "AtomicList: First argument must be a non-negative integer");
186         init = ELM_PLIST(args, 2);
187         return NewAtomicListInit(T_ALIST, len, init);
188     default:
189       ArgumentError("AtomicList: Too many arguments");
190   }
191   return (Obj)0; /* flow control hint */
192 }
193 
FuncFixedAtomicList(Obj self,Obj args)194 static Obj FuncFixedAtomicList(Obj self, Obj args)
195 {
196   Obj init;
197   Int len;
198   switch (LEN_PLIST(args)) {
199   case 0:
200       return NewAtomicList(T_FIXALIST, 0);
201   case 1:
202       init = ELM_PLIST(args, 1);
203       if (IS_LIST(init)) {
204           return NewAtomicListFrom(T_FIXALIST, init);
205       }
206       else if (IS_INTOBJ(init) && INT_INTOBJ(init) >= 0) {
207           len = INT_INTOBJ(init);
208           return NewAtomicListInit(T_FIXALIST, len, 0);
209       }
210       else {
211           ArgumentError("FixedAtomicList: Argument must be list or a "
212                         "non-negative integer");
213       }
214     case 2:
215         init = ELM_PLIST(args, 1);
216         len = IS_INTOBJ(init) ? INT_INTOBJ(init) : -1;
217         if (len < 0)
218             ArgumentError("FixedAtomicList: First argument must be a "
219                           "non-negative integer");
220         init = ELM_PLIST(args, 2);
221         return NewAtomicListInit(T_FIXALIST, len, init);
222     default:
223       ArgumentError("FixedAtomicList: Too many arguments");
224   }
225   return (Obj)0; /* flow control hint */
226 }
227 
FuncMakeFixedAtomicList(Obj self,Obj list)228 static Obj FuncMakeFixedAtomicList(Obj self, Obj list) {
229   switch (TNUM_OBJ(list)) {
230     case T_ALIST:
231     case T_FIXALIST:
232       HashLock(list);
233       switch (TNUM_OBJ(list)) {
234         case T_ALIST:
235         case T_FIXALIST:
236           RetypeBag(list, T_FIXALIST);
237           HashUnlock(list);
238           return list;
239         default:
240           HashUnlock(list);
241           ArgumentError("MakeFixedAtomicList: Argument must be atomic list");
242           return (Obj) 0; /* flow control hint */
243       }
244       HashUnlock(list);
245       break;
246     default:
247       ArgumentError("MakeFixedAtomicList: Argument must be atomic list");
248   }
249   return (Obj) 0; /* flow control hint */
250 }
251 
FuncIS_ATOMIC_RECORD(Obj self,Obj obj)252 static Obj FuncIS_ATOMIC_RECORD (Obj self, Obj obj)
253 {
254         return (TNUM_OBJ(obj) == T_AREC) ? True : False;
255 }
256 
FuncIS_ATOMIC_LIST(Obj self,Obj obj)257 static Obj FuncIS_ATOMIC_LIST (Obj self, Obj obj)
258 {
259         return (TNUM_OBJ(obj) == T_ALIST) ? True : False;
260 }
261 
FuncIS_FIXED_ATOMIC_LIST(Obj self,Obj obj)262 static Obj FuncIS_FIXED_ATOMIC_LIST (Obj self, Obj obj)
263 {
264         return (TNUM_OBJ(obj) == T_FIXALIST) ? True : False;
265 }
266 
267 
FuncGET_ATOMIC_LIST(Obj self,Obj list,Obj index)268 static Obj FuncGET_ATOMIC_LIST(Obj self, Obj list, Obj index)
269 {
270   UInt n;
271   UInt len;
272   const AtomicObj *addr;
273   if (TNUM_OBJ(list) != T_ALIST && TNUM_OBJ(list) != T_FIXALIST)
274     ArgumentError("GET_ATOMIC_LIST: First argument must be an atomic list");
275   addr = CONST_ADDR_ATOM(list);
276   len = ALIST_LEN((UInt) addr[0].atom);
277   if (!IS_INTOBJ(index))
278     ArgumentError("GET_ATOMIC_LIST: Second argument must be an integer");
279   n = INT_INTOBJ(index);
280   if (n <= 0 || n > len)
281     ArgumentError("GET_ATOMIC_LIST: Index out of range");
282   MEMBAR_READ(); /* read barrier */
283   return addr[n+1].obj;
284 }
285 
286 // If list[index] is bound then return it, else return 'value'.
287 // The reason this function exists is that it is not thread-safe to
288 // check if an index in a list is bound before reading it, as it
289 // could be unbound before the actual reading is performed.
ElmDefAList(Obj list,Int n,Obj value)290 static Obj ElmDefAList(Obj list, Int n, Obj value)
291 {
292     UInt        len;
293     const AtomicObj * addr;
294     Obj         val;
295 
296     GAP_ASSERT(TNUM_OBJ(list) == T_ALIST || TNUM_OBJ(list) == T_FIXALIST);
297     GAP_ASSERT(n > 0);
298     addr = CONST_ADDR_ATOM(list);
299     len = ALIST_LEN((UInt)addr[0].atom);
300 
301     if (n <= 0 || n > len) {
302         val = 0;
303     }
304     else {
305         MEMBAR_READ();
306         val = addr[n + 1].obj;
307     }
308 
309     if (val == 0) {
310         return value;
311     }
312     else {
313         return val;
314     }
315 }
316 
FuncSET_ATOMIC_LIST(Obj self,Obj list,Obj index,Obj value)317 static Obj FuncSET_ATOMIC_LIST(Obj self, Obj list, Obj index, Obj value)
318 {
319   UInt n;
320   UInt len;
321   AtomicObj *addr;
322   if (TNUM_OBJ(list) != T_ALIST && TNUM_OBJ(list) != T_FIXALIST)
323     ArgumentError("SET_ATOMIC_LIST: First argument must be an atomic list");
324   addr = ADDR_ATOM(list);
325   len = ALIST_LEN((UInt) addr[0].atom);
326   if (!IS_INTOBJ(index))
327     ArgumentError("SET_ATOMIC_LIST: Second argument must be an integer");
328   n = INT_INTOBJ(index);
329   if (n <= 0 || n > len)
330     ArgumentError("SET_ATOMIC_LIST: Index out of range");
331   addr[n+1].obj = value;
332   CHANGED_BAG(list);
333   MEMBAR_WRITE(); /* write barrier */
334   return (Obj) 0;
335 }
336 
337 static Obj AtomicCompareSwapAList(Obj list, Int index, Obj old, Obj new);
338 
339 // Given atomic list 'list', assign list[index] the value 'new', if list[index]
340 // is currently assigned 'old'. This operation is performed atomicly.
FuncCOMPARE_AND_SWAP(Obj self,Obj list,Obj index,Obj old,Obj new)341 static Obj FuncCOMPARE_AND_SWAP(Obj self, Obj list, Obj index, Obj old, Obj new)
342 {
343     Int         len;
344     AtomicObj   aold, anew;
345     AtomicObj * addr;
346     Obj         result;
347 
348     if (!IS_INTOBJ(index))
349         ArgumentError("COMPARE_AND_SWAP: Second argument must be an integer");
350     UInt n = INT_INTOBJ(index);
351 
352     switch (TNUM_OBJ(list)) {
353     case T_FIXALIST:
354     case T_APOSOBJ:
355       break;
356     case T_ALIST:
357         return AtomicCompareSwapAList(list, n, old, new);
358     default:
359       ArgumentError("COMPARE_AND_SWAP: First argument must an atomic list");
360   }
361   addr = ADDR_ATOM(list);
362   len = ALIST_LEN((UInt)addr[0].atom);
363 
364   if (n <= 0 || n > len)
365     ArgumentError("COMPARE_AND_SWAP: Index out of range");
366   aold.obj = old;
367   anew.obj = new;
368   result = COMPARE_AND_SWAP(&(addr[n+1].atom), aold.atom, anew.atom) ?
369     True : False;
370   if (result == True)
371     CHANGED_BAG(list);
372   return result;
373 }
374 
375 // Similar to COMPARE_AND_SWAP, but assigns list[index] the value 'new'
376 // if list[index] is currently unbound
FuncATOMIC_BIND(Obj self,Obj list,Obj index,Obj new)377 static Obj FuncATOMIC_BIND(Obj self, Obj list, Obj index, Obj new)
378 {
379     return FuncCOMPARE_AND_SWAP(self, list, index, 0, new);
380 }
381 
382 // Similar to COMPARE_AND_SWAP, but unbinds list[index] if list[index]
383 // is currently assigned 'old'
FuncATOMIC_UNBIND(Obj self,Obj list,Obj index,Obj old)384 static Obj FuncATOMIC_UNBIND(Obj self, Obj list, Obj index, Obj old)
385 {
386     return FuncCOMPARE_AND_SWAP(self, list, index, old, 0);
387 }
388 
FuncATOMIC_ADDITION(Obj self,Obj list,Obj index,Obj inc)389 static Obj FuncATOMIC_ADDITION(Obj self, Obj list, Obj index, Obj inc)
390 {
391   UInt n;
392   UInt len;
393   AtomicObj aold, anew, *addr;
394   switch (TNUM_OBJ(list)) {
395     case T_FIXALIST:
396     case T_APOSOBJ:
397       break;
398     default:
399       ArgumentError("ATOMIC_ADDITION: First argument must be a fixed atomic list");
400   }
401   addr = ADDR_ATOM(list);
402   len = ALIST_LEN((UInt) addr[0].atom);
403   if (!IS_INTOBJ(index))
404     ArgumentError("ATOMIC_ADDITION: Second argument must be an integer");
405   n = INT_INTOBJ(index);
406   if (n <= 0 || n > len)
407     ArgumentError("ATOMIC_ADDITION: Index out of range");
408   if (!IS_INTOBJ(inc))
409     ArgumentError("ATOMIC_ADDITION: increment is not an integer");
410   do
411   {
412     aold = addr[n+1];
413     if (!IS_INTOBJ(aold.obj))
414       ArgumentError("ATOMIC_ADDITION: list element is not an integer");
415     anew.obj = INTOBJ_INT(INT_INTOBJ(aold.obj) + INT_INTOBJ(inc));
416   } while (!COMPARE_AND_SWAP(&(addr[n+1].atom), aold.atom, anew.atom));
417   return anew.obj;
418 }
419 
420 
FuncAddAtomicList(Obj self,Obj list,Obj obj)421 static Obj FuncAddAtomicList(Obj self, Obj list, Obj obj)
422 {
423   if (TNUM_OBJ(list) != T_ALIST)
424     ArgumentError("AddAtomicList: First argument must be an atomic list");
425   return INTOBJ_INT(AddAList(list, obj));
426 }
427 
FromAtomicList(Obj list)428 Obj FromAtomicList(Obj list)
429 {
430   Obj result;
431   const AtomicObj *data;
432   UInt i, len;
433   data = CONST_ADDR_ATOM(list);
434   len = ALIST_LEN((UInt) (data++->atom));
435   result = NEW_PLIST(T_PLIST, len);
436   SET_LEN_PLIST(result, len);
437   MEMBAR_READ();
438   for (i=1; i<=len; i++)
439     SET_ELM_PLIST(result, i, data[i].obj);
440   CHANGED_BAG(result);
441   return result;
442 }
443 
FuncFromAtomicList(Obj self,Obj list)444 static Obj FuncFromAtomicList(Obj self, Obj list)
445 {
446   if (TNUM_OBJ(list) != T_FIXALIST && TNUM_OBJ(list) != T_ALIST)
447     ArgumentError("FromAtomicList: First argument must be an atomic list");
448   return FromAtomicList(list);
449 }
450 
MarkAtomicList(Bag bag)451 static void MarkAtomicList(Bag bag)
452 {
453   UInt len;
454   const AtomicObj *ptr, *ptrend;
455   ptr = CONST_ADDR_ATOM(bag);
456   len = ALIST_LEN((UInt)(ptr++->atom));
457   ptrend = ptr + len + 1;
458   while (ptr < ptrend)
459     MarkBag(ptr++->obj);
460 }
461 
462 /* T_AREC_INNER substructure:
463  * ADDR_OBJ(rec)[0] == capacity, must be a power of 2.
464  * ADDR_OBJ(rec)[1] == log2(capacity).
465  * ADDR_OBJ(rec)[2] == estimated size (occupied slots).
466  * ADDR_OBJ(rec)[3] == update policy.
467  * ADDR_OBJ(rec)[4..] == hash table of pairs of objects
468  */
469 enum {
470     AR_CAP  = 0,
471     AR_BITS = 1,
472     AR_SIZE = 2,
473     AR_POL  = 3,
474     AR_DATA = 4,
475 };
476 
477 /* T_TLREC_INNER substructure:
478  * ADDR_OBJ(rec)[0] == number of subrecords
479  * ADDR_OBJ(rec)[1] == default values
480  * ADDR_OBJ(rec)[2] == constructors
481  * ADDR_OBJ(rec)[3..] == table of per-thread subrecords
482  */
483 enum {
484   TLR_SIZE         = 0,
485   TLR_DEFAULTS     = 1,
486   TLR_CONSTRUCTORS = 2,
487   TLR_DATA         = 3,
488 };
489 
GetTLInner(Obj obj)490 static Obj GetTLInner(Obj obj)
491 {
492   Obj contents = CONST_ADDR_ATOM(obj)->obj;
493   MEMBAR_READ(); /* read barrier */
494   return contents;
495 }
496 
MarkTLRecord(Bag bag)497 static void MarkTLRecord(Bag bag)
498 {
499   MarkBag(GetTLInner(bag));
500 }
501 
502 
MarkAtomicRecord(Bag bag)503 static void MarkAtomicRecord(Bag bag)
504 {
505   MarkBag(GetTLInner(bag));
506 }
507 
MarkAtomicRecord2(Bag bag)508 static void MarkAtomicRecord2(Bag bag)
509 {
510   const AtomicObj *p = CONST_ADDR_ATOM(bag);
511   UInt cap = p->atom;
512   p += 5;
513   while (cap) {
514     MarkBag(p->obj);
515     p += 2;
516     cap--;
517   }
518 }
519 
ExpandTLRecord(Obj obj)520 static void ExpandTLRecord(Obj obj)
521 {
522   AtomicObj contents, newcontents;
523   do {
524     contents = *CONST_ADDR_ATOM(obj);
525     const Obj *table = CONST_ADDR_OBJ(contents.obj);
526     UInt thread = TLS(threadID);
527     if (thread < (UInt)*table)
528       return;
529     newcontents.obj = NewBag(T_TLREC_INNER, sizeof(Obj) * (thread+TLR_DATA+1));
530     Obj *newtable = ADDR_OBJ(newcontents.obj);
531     newtable[TLR_SIZE] = (Obj)(thread+1);
532     newtable[TLR_DEFAULTS] = table[TLR_DEFAULTS];
533     newtable[TLR_CONSTRUCTORS] = table[TLR_CONSTRUCTORS];
534     memcpy(newtable + TLR_DATA, table + TLR_DATA,
535       (UInt)table[TLR_SIZE] * sizeof(Obj));
536   } while (!COMPARE_AND_SWAP(&(ADDR_ATOM(obj)->atom),
537     contents.atom, newcontents.atom));
538   CHANGED_BAG(obj);
539   CHANGED_BAG(newcontents.obj);
540 }
541 
PrintAtomicList(Obj obj)542 static void PrintAtomicList(Obj obj)
543 {
544 
545   if (TNUM_OBJ(obj) == T_FIXALIST)
546     Pr("<fixed atomic list of size %d>",
547       ALIST_LEN((UInt)(CONST_ADDR_OBJ(obj)[0])), 0L);
548   else
549     Pr("<atomic list of size %d>", ALIST_LEN((UInt)(CONST_ADDR_OBJ(obj)[0])), 0L);
550 }
551 
ARecordObj(Obj record)552 static inline Obj ARecordObj(Obj record)
553 {
554   return CONST_ADDR_OBJ(record)[1];
555 }
556 
ARecordTable(Obj record)557 static inline AtomicObj* ARecordTable(Obj record)
558 {
559   return ADDR_ATOM(ARecordObj(record));
560 }
561 
PrintAtomicRecord(Obj record)562 static void PrintAtomicRecord(Obj record)
563 {
564   UInt cap, size;
565   HashLock(record);
566   AtomicObj *table = ARecordTable(record);
567   cap = table[AR_CAP].atom;
568   size = table[AR_SIZE].atom;
569   HashUnlock(record);
570   Pr("<atomic record %d/%d full>", size, cap);
571 }
572 
PrintTLRecord(Obj obj)573 static void PrintTLRecord(Obj obj)
574 {
575   Obj contents = GetTLInner(obj);
576   const Obj *table = CONST_ADDR_OBJ(contents);
577   Obj record = 0;
578   Obj defrec = table[TLR_DEFAULTS];
579   int comma = 0;
580   AtomicObj *deftable;
581   int i;
582   if (TLS(threadID) < (UInt)table[TLR_SIZE]) {
583     record = table[TLR_DATA+TLS(threadID)];
584   }
585   Pr("%2>rec( %2>", 0L, 0L);
586   if (record) {
587     for (i = 1; i <= LEN_PREC(record); i++) {
588       Obj val = GET_ELM_PREC(record, i);
589       Pr("%H", (Int)NAME_RNAM(labs(GET_RNAM_PREC(record, i))), 0L);
590       Pr ("%< := %>", 0L, 0L);
591       if (val)
592         PrintObj(val);
593       else
594         Pr("<undefined>", 0L, 0L);
595       if (i < LEN_PREC(record))
596         Pr("%2<, %2>", 0L, 0L);
597       else
598         comma = 1;
599     }
600   }
601   HashLockShared(defrec);
602   deftable = ARecordTable(defrec);
603   for (i = 0; i < deftable[AR_CAP].atom; i++) {
604     UInt key = deftable[AR_DATA+2*i].atom;
605     Obj value = deftable[AR_DATA+2*i+1].obj;
606     if (key && (!record || !PositionPRec(record, key, 0))) {
607       if (comma)
608         Pr("%2<, %2>", 0L, 0L);
609       Pr("%H", (Int)(NAME_RNAM(key)), 0L);
610       Pr ("%< := %>", 0L, 0L);
611       PrintObj(CopyTraversed(value));
612       comma = 1;
613     }
614   }
615   HashUnlockShared(defrec);
616   Pr(" %4<)", 0L, 0L);
617 }
618 
619 
GetARecordField(Obj record,UInt field)620 Obj GetARecordField(Obj record, UInt field)
621 {
622   AtomicObj *table = ARecordTable(record);
623   AtomicObj *data = table + AR_DATA;
624   UInt cap, bits, hash, n;
625   /* We need a memory barrier to ensure that we see fields that
626    * were updated before the table pointer was updated; there is
627    * a matching write barrier in the set operation. */
628   MEMBAR_READ();
629   cap = table[AR_CAP].atom;
630   bits = table[AR_BITS].atom;
631   hash = FibHash(field, bits);
632   n = cap;
633   while (n-- > 0)
634   {
635     UInt key = data[hash*2].atom;
636     if (key == field)
637     {
638       Obj result;
639       MEMBAR_READ(); /* memory barrier */
640       result = data[hash*2+1].obj;
641       if (result != Undefined)
642         return result;
643     }
644     if (!key)
645       return (Obj) 0;
646     hash++;
647     if (hash == cap)
648       hash = 0;
649   }
650   return (Obj) 0;
651 }
652 
ARecordFastInsert(AtomicObj * table,AtomicUInt field)653 static UInt ARecordFastInsert(AtomicObj *table, AtomicUInt field)
654 {
655   AtomicObj *data = table + AR_DATA;
656   UInt cap = table[AR_CAP].atom;
657   UInt bits = table[AR_BITS].atom;
658   UInt hash = FibHash(field, bits);
659   for (;;)
660   {
661     AtomicUInt key;
662     key = data[hash*2].atom;
663     if (!key)
664     {
665       table[AR_SIZE].atom++; /* increase size */
666       data[hash*2].atom = field;
667       return hash;
668     }
669     if (key == field)
670       return hash;
671     hash++;
672     if (hash == cap)
673       hash = 0;
674   }
675 }
676 
SetARecordField(Obj record,UInt field,Obj obj)677 Obj SetARecordField(Obj record, UInt field, Obj obj)
678 {
679   AtomicObj *table, *data, *newtable, *newdata;
680   Obj inner, result;
681   UInt cap, bits, hash, i, n, size;
682   AtomicRecordPolicy policy;
683   int have_room;
684   HashLockShared(record);
685   inner = ARecordObj(record);
686   table = ADDR_ATOM(inner);
687   data = table + AR_DATA;
688   cap = table[AR_CAP].atom;
689   bits = table[AR_BITS].atom;
690   policy = table[AR_POL].atom;
691   hash = FibHash(field, bits);
692   n = cap;
693   /* case 1: key exists, we can replace it */
694   while (n-- > 0)
695   {
696     UInt key = data[hash*2].atom;
697     if (!key)
698       break;
699     if (key == field)
700     {
701       MEMBAR_FULL(); /* memory barrier */
702       if (policy == AREC_WX) {
703         HashUnlockShared(record);
704         return 0;
705       }
706       else if (policy == AREC_RW) {
707         AtomicObj old;
708         AtomicObj new;
709         new.obj = obj;
710         do {
711           old = data[hash*2+1];
712         } while (!COMPARE_AND_SWAP(&data[hash*2+1].atom,
713                   old.atom, new.atom));
714         CHANGED_BAG(inner);
715         HashUnlockShared(record);
716         return obj;
717       } else { // AREC_W1
718         do {
719           result = data[hash*2+1].obj;
720         } while (!result);
721         CHANGED_BAG(inner);
722         HashUnlockShared(record);
723         return result;
724       }
725     }
726     hash++;
727     if (hash == cap)
728       hash = 0;
729   }
730   do {
731     size = table[AR_SIZE].atom + 1;
732     have_room = (size <= UsageCap[bits]);
733   } while (have_room && !COMPARE_AND_SWAP(&table[AR_SIZE].atom,
734                          size-1, size));
735   /* we're guaranteed to have a non-full table for the insertion step */
736   /* if have_room is true */
737   if (have_room) for (;;) { /* hash iteration loop */
738     AtomicObj old = data[hash*2];
739     if (old.atom == field) {
740       /* we don't actually need a new entry, so revert the size update */
741       do {
742         size = table[AR_SIZE].atom;
743       } while (!COMPARE_AND_SWAP(&table[AR_SIZE].atom, size, size-1));
744       /* continue below */
745     } else if (!old.atom) {
746       AtomicObj new;
747       new.atom = field;
748       if (!COMPARE_AND_SWAP(&data[hash*2].atom, old.atom, new.atom))
749         continue;
750       /* else continue below */
751     } else {
752       hash++;
753       if (hash == cap)
754         hash = 0;
755       continue;
756     }
757     MEMBAR_FULL(); /* memory barrier */
758     for (;;) { /* CAS loop */
759       old = data[hash*2+1];
760       if (old.obj) {
761         if (policy == AREC_WX) {
762           result = 0;
763           break;
764         }
765         else if (policy == AREC_RW) {
766           AtomicObj new;
767           new.obj = obj;
768           if (COMPARE_AND_SWAP(&data[hash*2+1].atom,
769               old.atom, new.atom)) {
770             result = obj;
771             break;
772           }
773         } else {
774           result = old.obj;
775           break;
776         }
777       } else {
778         AtomicObj new;
779         new.obj = obj;
780         if (COMPARE_AND_SWAP(&data[hash*2+1].atom,
781             old.atom, new.atom)) {
782           result = obj;
783           break;
784         }
785       }
786     } /* end CAS loop */
787     CHANGED_BAG(inner);
788     HashUnlockShared(record);
789     return result;
790   } /* end hash iteration loop */
791   /* have_room is false at this point */
792   HashUnlockShared(record);
793   HashLock(record);
794   inner = NewBag(T_AREC_INNER, sizeof(AtomicObj) * (AR_DATA + cap * 2 * 2));
795   newtable = ADDR_ATOM(inner);
796   newdata = newtable + AR_DATA;
797   newtable[AR_CAP].atom = cap * 2;
798   newtable[AR_BITS].atom = bits+1;
799   newtable[AR_SIZE].atom = 0; /* size */
800   newtable[AR_POL] = table[AR_POL]; /* policy */
801   for (i=0; i<cap; i++) {
802     UInt key = data[2*i].atom;
803     Obj value = data[2*i+1].obj;
804     if (key && value != Undefined) {
805       n = ARecordFastInsert(newtable, key);
806       newdata[2*n+1].obj = value;
807     }
808   }
809   n = ARecordFastInsert(newtable, field);
810   if (newdata[2*n+1].obj)
811   {
812     if (policy == AREC_WX)
813       result = (Obj) 0;
814     else {
815       if (policy == AREC_RW)
816         newdata[2*n+1].obj = result = obj;
817       else
818         result = newdata[2*n+1].obj;
819     }
820   }
821   else
822     newdata[2*n+1].obj = result = obj;
823   MEMBAR_WRITE(); /* memory barrier */
824   ADDR_OBJ(record)[1] = inner;
825   CHANGED_BAG(inner);
826   CHANGED_BAG(record);
827   HashUnlock(record);
828   return result;
829 }
830 
FromAtomicRecord(Obj record)831 Obj FromAtomicRecord(Obj record)
832 {
833   Obj result;
834   AtomicObj *table, *data;
835   UInt cap, i;
836   table = ARecordTable(record);
837   data = table + AR_DATA;
838   MEMBAR_READ(); /* memory barrier */
839   cap = table[AR_CAP].atom;
840   result = NEW_PREC(0);
841   for (i=0; i<cap; i++)
842   {
843     UInt key;
844     Obj value;
845     key = data[2*i].atom;
846     MEMBAR_READ();
847     value = data[2*i+1].obj;
848     if (key && value && value != Undefined)
849       AssPRec(result, key, value);
850   }
851   return result;
852 }
853 
FuncFromAtomicRecord(Obj self,Obj record)854 static Obj FuncFromAtomicRecord(Obj self, Obj record)
855 {
856   if (TNUM_OBJ(record) != T_AREC)
857     ArgumentError("FromAtomicRecord: First argument must be an atomic record");
858   return FromAtomicRecord(record);
859 }
860 
FuncFromAtomicComObj(Obj self,Obj comobj)861 static Obj FuncFromAtomicComObj(Obj self, Obj comobj)
862 {
863   if (TNUM_OBJ(comobj) != T_ACOMOBJ)
864     ArgumentError("FromAtomicComObj: First argument must be an atomic record");
865   return FromAtomicRecord(comobj);
866 }
867 
NewAtomicRecord(UInt capacity)868 Obj NewAtomicRecord(UInt capacity)
869 {
870   Obj arec, result;
871   AtomicObj *table;
872   UInt bits = 1;
873   while (capacity > (1 << bits))
874     bits++;
875   capacity = 1 << bits;
876   arec = NewBag(T_AREC_INNER, sizeof(AtomicObj) * (AR_DATA+2*capacity));
877   table = ADDR_ATOM(arec);
878   result = NewBag(T_AREC, 2*sizeof(Obj));
879   table[AR_CAP].atom = capacity;
880   table[AR_BITS].atom = bits;
881   table[AR_SIZE].atom = 0;
882   table[AR_POL].atom = AREC_RW;
883   ADDR_OBJ(result)[1] = arec;
884   CHANGED_BAG(arec);
885   CHANGED_BAG(result);
886   return result;
887 }
888 
NewAtomicRecordFrom(Obj precord)889 static Obj NewAtomicRecordFrom(Obj precord)
890 {
891   Obj result;
892   AtomicObj *table;
893   UInt i, pos, len = LEN_PREC(precord);
894   result = NewAtomicRecord(len);
895   table = ARecordTable(result);
896   for (i=1; i<=len; i++) {
897     Int field = GET_RNAM_PREC(precord, i);
898     if (field < 0)
899       field = -field;
900     pos = ARecordFastInsert(table, field);
901     table[AR_DATA+2*pos+1].obj = GET_ELM_PREC(precord, i);
902   }
903   CHANGED_BAG(ARecordObj(result));
904   CHANGED_BAG(result);
905   MEMBAR_WRITE();
906   return result;
907 }
908 
SetARecordUpdatePolicy(Obj record,AtomicRecordPolicy policy)909 static void SetARecordUpdatePolicy(Obj record, AtomicRecordPolicy policy)
910 {
911   AtomicObj *table = ARecordTable(record);
912   table[AR_POL].atom = policy;
913 }
914 
GetARecordUpdatePolicy(Obj record)915 static AtomicRecordPolicy GetARecordUpdatePolicy(Obj record)
916 {
917   AtomicObj *table = ARecordTable(record);
918   return table[AR_POL].atom;
919 }
920 
ElmARecord(Obj record,UInt rnam)921 Obj ElmARecord(Obj record, UInt rnam)
922 {
923     Obj result = GetARecordField(record, rnam);
924     if (!result)
925         ErrorMayQuit(
926             "Record: '<atomic record>.%g' must have an assigned value",
927             (UInt)NAME_RNAM(rnam), 0);
928     return result;
929 }
930 
AssARecord(Obj record,UInt rnam,Obj value)931 void AssARecord(Obj record, UInt rnam, Obj value)
932 {
933     Obj result = SetARecordField(record, rnam, value);
934     if (!result)
935         ErrorMayQuit(
936             "Record: '<atomic record>.%g' already has an assigned value",
937             (UInt)NAME_RNAM(rnam), 0);
938 }
939 
UnbARecord(Obj record,UInt rnam)940 void UnbARecord(Obj record, UInt rnam) {
941    SetARecordField(record, rnam, Undefined);
942 }
943 
IsbARecord(Obj record,UInt rnam)944 Int IsbARecord(Obj record, UInt rnam)
945 {
946   return GetARecordField(record, rnam) != (Obj) 0;
947 }
948 
ShallowCopyARecord(Obj obj)949 static Obj ShallowCopyARecord(Obj obj)
950 {
951   Obj copy, inner, innerCopy;
952   HashLock(obj);
953   copy = NewBag(TNUM_BAG(obj), SIZE_BAG(obj));
954   memcpy(ADDR_OBJ(copy), CONST_ADDR_OBJ(obj), SIZE_BAG(obj));
955   inner = CONST_ADDR_OBJ(obj)[1];
956   innerCopy = NewBag(TNUM_BAG(inner), SIZE_BAG(inner));
957   memcpy(ADDR_OBJ(innerCopy), CONST_ADDR_OBJ(inner), SIZE_BAG(inner));
958   ADDR_OBJ(copy)[1] = innerCopy;
959   HashUnlock(obj);
960   CHANGED_BAG(innerCopy);
961   CHANGED_BAG(copy);
962   return copy;
963 }
964 
UpdateThreadRecord(Obj record,Obj tlrecord)965 static void UpdateThreadRecord(Obj record, Obj tlrecord)
966 {
967   Obj inner;
968   do {
969     inner = GetTLInner(record);
970     ADDR_OBJ(inner)[TLR_DATA+TLS(threadID)] = tlrecord;
971     MEMBAR_FULL(); /* memory barrier */
972   } while (inner != GetTLInner(record));
973   if (tlrecord) {
974     if (TLS(tlRecords))
975       AssPlist(TLS(tlRecords), LEN_PLIST(TLS(tlRecords))+1, record);
976     else {
977       TLS(tlRecords) = NEW_PLIST(T_PLIST, 1);
978       SET_LEN_PLIST(TLS(tlRecords), 1);
979       SET_ELM_PLIST(TLS(tlRecords), 1, record);
980       CHANGED_BAG(TLS(tlRecords));
981     }
982   }
983 }
984 
GetTLRecordField(Obj record,UInt rnam)985 Obj GetTLRecordField(Obj record, UInt rnam)
986 {
987   Obj contents, *table;
988   Obj tlrecord;
989   UInt pos;
990   Region *savedRegion = TLS(currentRegion);
991   TLS(currentRegion) = TLS(threadRegion);
992   ExpandTLRecord(record);
993   contents = GetTLInner(record);
994   table = ADDR_OBJ(contents);
995   tlrecord = table[TLR_DATA+TLS(threadID)];
996   if (!tlrecord || !(pos = PositionPRec(tlrecord, rnam, 1))) {
997     Obj result;
998     Obj defrec = table[TLR_DEFAULTS];
999     result = GetARecordField(defrec, rnam);
1000     if (result) {
1001       result = CopyTraversed(result);
1002       if (!tlrecord) {
1003         tlrecord = NEW_PREC(0);
1004         UpdateThreadRecord(record, tlrecord);
1005       }
1006       AssPRec(tlrecord, rnam, result);
1007       TLS(currentRegion) = savedRegion;
1008       return result;
1009     } else {
1010       Obj func;
1011       Obj constructors = table[TLR_CONSTRUCTORS];
1012       func = GetARecordField(constructors, rnam);
1013       if (!tlrecord) {
1014         tlrecord = NEW_PREC(0);
1015         UpdateThreadRecord(record, tlrecord);
1016       }
1017       if (func) {
1018         if (NARG_FUNC(func) == 0)
1019           result = CALL_0ARGS(func);
1020         else
1021           result = CALL_1ARGS(func, record);
1022         TLS(currentRegion) = savedRegion;
1023         if (!result) {
1024           pos = PositionPRec(tlrecord, rnam, 1);
1025           if (!pos)
1026             return 0;
1027           return GET_ELM_PREC(tlrecord, pos);
1028         }
1029         AssPRec(tlrecord, rnam, result);
1030         return result;
1031       }
1032       TLS(currentRegion) = savedRegion;
1033       return 0;
1034     }
1035   }
1036   TLS(currentRegion) = savedRegion;
1037   return GET_ELM_PREC(tlrecord, pos);
1038 }
1039 
ElmTLRecord(Obj record,UInt rnam)1040 static Obj ElmTLRecord(Obj record, UInt rnam)
1041 {
1042     Obj result = GetTLRecordField(record, rnam);
1043     if (!result)
1044         ErrorMayQuit(
1045             "Record: '<thread-local record>.%g' must have an assigned value",
1046             (UInt)NAME_RNAM(rnam), 0);
1047     return result;
1048 }
1049 
AssTLRecord(Obj record,UInt rnam,Obj value)1050 void AssTLRecord(Obj record, UInt rnam, Obj value)
1051 {
1052   Obj contents, *table;
1053   Obj tlrecord;
1054   ExpandTLRecord(record);
1055   contents = GetTLInner(record);
1056   table = ADDR_OBJ(contents);
1057   tlrecord = table[TLR_DATA+TLS(threadID)];
1058   if (!tlrecord) {
1059     tlrecord = NEW_PREC(0);
1060     UpdateThreadRecord(record, tlrecord);
1061   }
1062   AssPRec(tlrecord, rnam, value);
1063 }
1064 
UnbTLRecord(Obj record,UInt rnam)1065 static void UnbTLRecord(Obj record, UInt rnam)
1066 {
1067   Obj contents, *table;
1068   Obj tlrecord;
1069   ExpandTLRecord(record);
1070   contents = GetTLInner(record);
1071   table = ADDR_OBJ(contents);
1072   tlrecord = table[TLR_DATA+TLS(threadID)];
1073   if (!tlrecord) {
1074     tlrecord = NEW_PREC(0);
1075     UpdateThreadRecord(record, tlrecord);
1076   }
1077   UnbPRec(tlrecord, rnam);
1078 }
1079 
1080 
IsbTLRecord(Obj record,UInt rnam)1081 static Int IsbTLRecord(Obj record, UInt rnam)
1082 {
1083   return GetTLRecordField(record, rnam) != (Obj) 0;
1084 }
1085 
FuncAtomicRecord(Obj self,Obj args)1086 static Obj FuncAtomicRecord(Obj self, Obj args)
1087 {
1088   Obj arg;
1089   switch (LEN_PLIST(args)) {
1090     case 0:
1091       return NewAtomicRecord(8);
1092     case 1:
1093       arg = ELM_PLIST(args, 1);
1094       if (IS_POS_INTOBJ(arg)) {
1095         return NewAtomicRecord(INT_INTOBJ(arg));
1096       }
1097       if (IS_PREC(arg)) {
1098           return NewAtomicRecordFrom(arg);
1099       }
1100       ArgumentError("AtomicRecord: argument must be a positive small integer or a record");
1101     default:
1102       ArgumentError("AtomicRecord: takes one optional argument");
1103       return (Obj) 0;
1104   }
1105 }
1106 
FuncGET_ATOMIC_RECORD(Obj self,Obj record,Obj field,Obj def)1107 static Obj FuncGET_ATOMIC_RECORD(Obj self, Obj record, Obj field, Obj def)
1108 {
1109   UInt fieldname;
1110   Obj result;
1111   if (TNUM_OBJ(record) != T_AREC)
1112     ArgumentError("GET_ATOMIC_RECORD: First argument must be an atomic record");
1113   RequireStringRep("GET_ATOMIC_RECORD", field);
1114   fieldname = RNamName(CONST_CSTR_STRING(field));
1115   result = GetARecordField(record, fieldname);
1116   return result ? result : def;
1117 }
1118 
FuncSET_ATOMIC_RECORD(Obj self,Obj record,Obj field,Obj value)1119 static Obj FuncSET_ATOMIC_RECORD(Obj self, Obj record, Obj field, Obj value)
1120 {
1121   UInt fieldname;
1122   Obj result;
1123   if (TNUM_OBJ(record) != T_AREC)
1124     ArgumentError("SET_ATOMIC_RECORD: First argument must be an atomic record");
1125   RequireStringRep("SET_ATOMIC_RECORD", field);
1126   fieldname = RNamName(CONST_CSTR_STRING(field));
1127   result = SetARecordField(record, fieldname, value);
1128   if (!result)
1129     ErrorQuit("SET_ATOMIC_RECORD: Field '%s' already exists",
1130       (UInt) CONST_CSTR_STRING(field), 0L);
1131   return result;
1132 }
1133 
FuncUNBIND_ATOMIC_RECORD(Obj self,Obj record,Obj field)1134 static Obj FuncUNBIND_ATOMIC_RECORD(Obj self, Obj record, Obj field)
1135 {
1136   UInt fieldname;
1137   Obj exists;
1138   if (TNUM_OBJ(record) != T_AREC)
1139     ArgumentError("UNBIND_ATOMIC_RECORD: First argument must be an atomic record");
1140   RequireStringRep("UNBIND_ATOMIC_RECORD", field);
1141   fieldname = RNamName(CONST_CSTR_STRING(field));
1142   if (GetARecordUpdatePolicy(record) != AREC_RW)
1143     ErrorQuit("UNBIND_ATOMIC_RECORD: Record elements cannot be changed",
1144       (UInt) CONST_CSTR_STRING(field), 0L);
1145   exists = GetARecordField(record, fieldname);
1146   if (exists)
1147     SetARecordField(record, fieldname, (Obj) 0);
1148   return (Obj) 0;
1149 }
1150 
CreateTLDefaults(Obj defrec)1151 static Obj CreateTLDefaults(Obj defrec) {
1152   Region *saved_region = TLS(currentRegion);
1153   Obj result;
1154   UInt i;
1155   TLS(currentRegion) = LimboRegion;
1156   result = NewBag(T_PREC, SIZE_BAG(defrec));
1157   memcpy(ADDR_OBJ(result), CONST_ADDR_OBJ(defrec), SIZE_BAG(defrec));
1158   for (i = 1; i <= LEN_PREC(defrec); i++) {
1159     SET_ELM_PREC(result, i,
1160       CopyReachableObjectsFrom(GET_ELM_PREC(result, i), 0, 1, 0));
1161   }
1162   CHANGED_BAG(result);
1163   TLS(currentRegion) = saved_region;
1164   return NewAtomicRecordFrom(result);
1165 }
1166 
NewTLRecord(Obj defaults,Obj constructors)1167 static Obj NewTLRecord(Obj defaults, Obj constructors) {
1168   Obj result = NewBag(T_TLREC, sizeof(AtomicObj));
1169   Obj inner = NewBag(T_TLREC_INNER, sizeof(Obj) * TLR_DATA);
1170   ADDR_OBJ(inner)[TLR_SIZE] = 0;
1171   ADDR_OBJ(inner)[TLR_DEFAULTS] = CreateTLDefaults(defaults);
1172   WriteGuard(constructors);
1173   SET_REGION(constructors, LimboRegion);
1174   MEMBAR_WRITE();
1175   ADDR_OBJ(inner)[TLR_CONSTRUCTORS] = NewAtomicRecordFrom(constructors);
1176   ((AtomicObj *)(ADDR_OBJ(result)))->obj = inner;
1177   CHANGED_BAG(result);
1178   return result;
1179 }
1180 
SetTLDefault(Obj record,UInt rnam,Obj value)1181 void SetTLDefault(Obj record, UInt rnam, Obj value) {
1182   Obj inner = GetTLInner(record);
1183   SetARecordField(ADDR_OBJ(inner)[TLR_DEFAULTS],
1184     rnam, CopyReachableObjectsFrom(value, 0, 1, 0));
1185 }
1186 
SetTLConstructor(Obj record,UInt rnam,Obj func)1187 void SetTLConstructor(Obj record, UInt rnam, Obj func) {
1188   Obj inner = GetTLInner(record);
1189   SetARecordField(ADDR_OBJ(inner)[TLR_CONSTRUCTORS],
1190     rnam, func);
1191 }
1192 
1193 
OnlyConstructors(Obj precord)1194 static int OnlyConstructors(Obj precord) {
1195   UInt i, len;
1196   len = LEN_PREC(precord);
1197   for (i=1; i<=len; i++) {
1198     Obj elm = GET_ELM_PREC(precord, i);
1199     if (TNUM_OBJ(elm) != T_FUNCTION || (Int) NARG_FUNC(elm) != 0)
1200       return 0;
1201   }
1202   return 1;
1203 }
1204 
FuncThreadLocalRecord(Obj self,Obj args)1205 static Obj FuncThreadLocalRecord(Obj self, Obj args)
1206 {
1207   switch (LEN_PLIST(args)) {
1208     case 0:
1209       return NewTLRecord(NEW_PREC(0), NEW_PREC(0));
1210     case 1:
1211       if (TNUM_OBJ(ELM_PLIST(args, 1)) != T_PREC)
1212         ArgumentError("ThreadLocalRecord: First argument must be a record");
1213       return NewTLRecord(ELM_PLIST(args, 1), NEW_PREC(0));
1214     case 2:
1215       if (TNUM_OBJ(ELM_PLIST(args, 1)) != T_PREC)
1216         ArgumentError("ThreadLocalRecord: First argument must be a record");
1217       if (TNUM_OBJ(ELM_PLIST(args, 2)) != T_PREC ||
1218           !OnlyConstructors(ELM_PLIST(args, 2)))
1219         ArgumentError("ThreadLocalRecord: Second argument must be a record containing parameterless functions");
1220       return NewTLRecord(ELM_PLIST(args, 1), ELM_PLIST(args, 2));
1221     default:
1222       ArgumentError("ThreadLocalRecord: Too many arguments");
1223       return (Obj) 0; /* flow control hint */
1224   }
1225 }
1226 
FuncSetTLDefault(Obj self,Obj record,Obj name,Obj value)1227 static Obj FuncSetTLDefault(Obj self, Obj record, Obj name, Obj value)
1228 {
1229   if (TNUM_OBJ(record) != T_TLREC)
1230     ArgumentError("SetTLDefault: First argument must be a thread-local record");
1231   if (!IS_STRING(name) && !IS_INTOBJ(name))
1232     ArgumentError("SetTLDefault: Second argument must be a string or integer");
1233   SetTLDefault(record, RNamObj(name), value);
1234   return (Obj) 0;
1235 }
1236 
FuncSetTLConstructor(Obj self,Obj record,Obj name,Obj function)1237 static Obj FuncSetTLConstructor(Obj self, Obj record, Obj name, Obj function)
1238 {
1239   if (TNUM_OBJ(record) != T_TLREC)
1240     ArgumentError("SetTLConstructor: First argument must be a thread-local record");
1241   if (!IS_STRING(name) && !IS_INTOBJ(name))
1242     ArgumentError("SetTLConstructor: Second argument must be a string or integer");
1243   RequireFunction("SetTLConstructor", function);
1244   SetTLConstructor(record, RNamObj(name), function);
1245   return (Obj) 0;
1246 }
1247 
LenListAList(Obj list)1248 static Int LenListAList(Obj list)
1249 {
1250   MEMBAR_READ();
1251   return (Int)(ALIST_LEN((UInt)CONST_ADDR_ATOM(list)[0].atom));
1252 }
1253 
LengthAList(Obj list)1254 Obj LengthAList(Obj list)
1255 {
1256   MEMBAR_READ();
1257   return INTOBJ_INT(ALIST_LEN((UInt)CONST_ADDR_ATOM(list)[0].atom));
1258 }
1259 
Elm0AList(Obj list,Int pos)1260 Obj Elm0AList(Obj list, Int pos)
1261 {
1262   const AtomicObj *addr = CONST_ADDR_ATOM(list);
1263   UInt len;
1264   MEMBAR_READ();
1265   len = ALIST_LEN((UInt) addr[0].atom);
1266   if (pos < 1 || pos > len)
1267     return 0;
1268   MEMBAR_READ();
1269   return addr[1+pos].obj;
1270 }
1271 
ElmAList(Obj list,Int pos)1272 Obj ElmAList(Obj list, Int pos)
1273 {
1274   const AtomicObj *addr = CONST_ADDR_ATOM(list);
1275   UInt len;
1276   MEMBAR_READ();
1277   len = ALIST_LEN((UInt)addr[0].atom);
1278   Obj result;
1279   if (pos < 1 || pos > len) {
1280       ErrorMayQuit(
1281           "Atomic List Element: <pos>=%d is an invalid index for <list>",
1282           (Int)pos, 0);
1283   }
1284 
1285   result = addr[1 + pos].obj;
1286   if (!result)
1287       ErrorMayQuit(
1288           "Atomic List Element: <list>[%d] must have an assigned value",
1289           (Int)pos, 0);
1290 
1291   MEMBAR_READ();
1292   return result;
1293 }
1294 
IsbAList(Obj list,Int pos)1295 static Int IsbAList(Obj list, Int pos) {
1296   const AtomicObj *addr = CONST_ADDR_ATOM(list);
1297   UInt len;
1298   MEMBAR_READ();
1299   len = ALIST_LEN((UInt) addr[0].atom);
1300   return pos >= 1 && pos <= len && addr[1+pos].obj;
1301 }
1302 
AssFixAList(Obj list,Int pos,Obj obj)1303 static void AssFixAList(Obj list, Int pos, Obj obj)
1304 {
1305   UInt pol = (UInt)CONST_ADDR_ATOM(list)[0].atom;
1306   UInt len = ALIST_LEN(pol);
1307   if (pos < 1 || pos > len) {
1308       ErrorMayQuit(
1309           "Atomic List Element: <pos>=%d is an invalid index for <list>",
1310           (Int)pos, 0);
1311   }
1312   switch (ALIST_POL(pol)) {
1313     case ALIST_RW:
1314       ADDR_ATOM(list)[1+pos].obj = obj;
1315       break;
1316     case ALIST_W1:
1317       COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
1318         (AtomicUInt) 0, (AtomicUInt) obj);
1319       break;
1320     case ALIST_WX:
1321       if (!COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
1322         (AtomicUInt) 0, (AtomicUInt) obj)) {
1323         ErrorQuit("Atomic List Assignment: <list>[%d] already has an assigned value", pos, (Int) 0);
1324       }
1325       break;
1326   }
1327   CHANGED_BAG(list);
1328   MEMBAR_WRITE();
1329 }
1330 
1331 // Ensure the capacity of atomic list 'list' is at least 'pos'.
1332 // Errors if 'pos' is 'list' is fixed length and 'pos' is greater
1333 // than the existing length.
1334 // If this function returns, then the code has a (possibly shared)
1335 // HashLock on the list, which must be released by the caller.
EnlargeAList(Obj list,Int pos)1336 static void EnlargeAList(Obj list, Int pos)
1337 {
1338     HashLockShared(list);
1339     AtomicObj * addr = ADDR_ATOM(list);
1340     UInt        pol = (UInt)addr[0].atom;
1341     UInt        len = ALIST_LEN(pol);
1342     if (pos > len) {
1343         HashUnlockShared(list);
1344         HashLock(list);
1345         addr = ADDR_ATOM(list);
1346         pol = (UInt)addr[0].atom;
1347         len = ALIST_LEN(pol);
1348     }
1349     if (pos > len) {
1350         if (TNUM_OBJ(list) != T_ALIST) {
1351             HashUnlock(list);
1352             ErrorQuit(
1353                 "Atomic List Assignment: extending fixed size atomic list",
1354                 0L, 0L);
1355             return; /* flow control hint */
1356         }
1357         addr = ADDR_ATOM(list);
1358         if (pos > SIZE_BAG(list) / sizeof(AtomicObj) - 2) {
1359             Obj  newlist;
1360             UInt newlen = len;
1361             do {
1362                 newlen = newlen * 3 / 2 + 1;
1363             } while (pos > newlen);
1364             newlist = NewBag(T_ALIST, sizeof(AtomicObj) * (2 + newlen));
1365             memcpy(PTR_BAG(newlist), PTR_BAG(list),
1366                    sizeof(AtomicObj) * (2 + len));
1367             addr = ADDR_ATOM(newlist);
1368             addr[0].atom = CHANGE_ALIST_LEN(pol, pos);
1369             MEMBAR_WRITE();
1370             /* TODO: Won't work with GASMAN */
1371             SET_PTR_BAG(list, PTR_BAG(newlist));
1372             MEMBAR_WRITE();
1373         }
1374         else {
1375             addr[0].atom = CHANGE_ALIST_LEN(pol, pos);
1376             MEMBAR_WRITE();
1377         }
1378     }
1379 }
1380 
AssAList(Obj list,Int pos,Obj obj)1381 void AssAList(Obj list, Int pos, Obj obj)
1382 {
1383   if (pos < 1) {
1384     ErrorQuit(
1385         "Atomic List Element: <pos>=%d is an invalid index for <list>",
1386         (Int) pos, 0L);
1387     return; /* flow control hint */
1388   }
1389 
1390   EnlargeAList(list, pos);
1391 
1392   AtomicObj * addr = ADDR_ATOM(list);
1393   UInt        pol = (UInt)addr[0].atom;
1394 
1395   switch (ALIST_POL(pol)) {
1396     case ALIST_RW:
1397       ADDR_ATOM(list)[1+pos].obj = obj;
1398       break;
1399     case ALIST_W1:
1400       COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
1401         (AtomicUInt) 0, (AtomicUInt) obj);
1402       break;
1403     case ALIST_WX:
1404       if (!COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
1405         (AtomicUInt) 0, (AtomicUInt) obj)) {
1406         HashUnlock(list);
1407         ErrorQuit("Atomic List Assignment: <list>[%d] already has an assigned value", pos, (Int) 0);
1408       }
1409       break;
1410   }
1411   CHANGED_BAG(list);
1412   MEMBAR_WRITE();
1413   HashUnlock(list);
1414 }
1415 
AtomicCompareSwapAList(Obj list,Int pos,Obj old,Obj new)1416 static Obj AtomicCompareSwapAList(Obj list, Int pos, Obj old, Obj new)
1417 {
1418     if (pos < 1) {
1419         ErrorQuit(
1420             "Atomic List Element: <pos>=%d is an invalid index for <list>",
1421             (Int)pos, 0L);
1422         return False; /* flow control hint */
1423     }
1424 
1425     EnlargeAList(list, pos);
1426 
1427     UInt swap = COMPARE_AND_SWAP(&ADDR_ATOM(list)[1 + pos].atom,
1428                                  (AtomicUInt)old, (AtomicUInt) new);
1429 
1430     if (!swap) {
1431         HashUnlock(list);
1432         return False;
1433     }
1434     else {
1435         CHANGED_BAG(list);
1436         MEMBAR_WRITE();
1437         HashUnlock(list);
1438         return True;
1439     }
1440 }
1441 
AddAList(Obj list,Obj obj)1442 UInt AddAList(Obj list, Obj obj)
1443 {
1444   AtomicObj *addr;
1445   UInt len, newlen, pol;
1446   HashLock(list);
1447   if (TNUM_OBJ(list) != T_ALIST) {
1448     HashUnlock(list);
1449     ErrorQuit("Atomic List Assignment: extending fixed size atomic list",
1450       0L, 0L);
1451     return 0; /* flow control hint */
1452   }
1453   addr = ADDR_ATOM(list);
1454   pol = (UInt)addr[0].atom;
1455   len = ALIST_LEN(pol);
1456   if (len + 1 > SIZE_BAG(list)/sizeof(AtomicObj) - 2) {
1457     Obj newlist;
1458     newlen = len * 3 / 2 + 1;
1459     newlist = NewBag(T_ALIST, sizeof(AtomicObj) * ( 2 + newlen));
1460     memcpy(PTR_BAG(newlist), PTR_BAG(list), sizeof(AtomicObj)*(2+len));
1461     addr = ADDR_ATOM(newlist);
1462     addr[0].atom = CHANGE_ALIST_LEN(pol, len + 1);
1463     MEMBAR_WRITE();
1464     SET_PTR_BAG(list, PTR_BAG(newlist));
1465     MEMBAR_WRITE();
1466   } else {
1467     addr[0].atom = CHANGE_ALIST_LEN(pol, len + 1);
1468     MEMBAR_WRITE();
1469   }
1470   switch (ALIST_POL(pol)) {
1471     case ALIST_RW:
1472       ADDR_ATOM(list)[2+len].obj = obj;
1473       break;
1474     case ALIST_W1:
1475       COMPARE_AND_SWAP(&ADDR_ATOM(list)[2+len].atom,
1476         (AtomicUInt) 0, (AtomicUInt) obj);
1477       break;
1478     case ALIST_WX:
1479       if (!COMPARE_AND_SWAP(&ADDR_ATOM(list)[2+len].atom,
1480         (AtomicUInt) 0, (AtomicUInt) obj)) {
1481         HashUnlock(list);
1482         ErrorQuit("Atomic List Assignment: <list>[%d] already has an assigned value", len+1, (Int) 0);
1483       }
1484       break;
1485   }
1486   CHANGED_BAG(list);
1487   MEMBAR_WRITE();
1488   HashUnlock(list);
1489   return len+1;
1490 }
1491 
UnbAList(Obj list,Int pos)1492 static void UnbAList(Obj list, Int pos)
1493 {
1494   AtomicObj *addr;
1495   UInt len, pol;
1496   HashLockShared(list);
1497   addr = ADDR_ATOM(list);
1498   pol = (UInt)addr[0].atom;
1499   len = ALIST_LEN(pol);
1500   if (ALIST_POL(pol) != ALIST_RW) {
1501     HashUnlockShared(list);
1502     ErrorQuit("Atomic List Unbind: list is in write-once mode", (Int) 0, (Int) 0);
1503   }
1504   if (pos >= 1 && pos <= len) {
1505     addr[1+pos].obj = 0;
1506     MEMBAR_WRITE();
1507   }
1508   HashUnlockShared(list);
1509 }
1510 
InitAObjectsState(void)1511 static Int InitAObjectsState(void)
1512 {
1513     TLS(tlRecords) = (Obj)0;
1514     return 0;
1515 }
1516 
DestroyAObjectsState(void)1517 static Int DestroyAObjectsState(void)
1518 {
1519     Obj  records;
1520     UInt i, len;
1521     records = TLS(tlRecords);
1522     if (records) {
1523         len = LEN_PLIST(records);
1524         for (i = 1; i <= len; i++)
1525             UpdateThreadRecord(ELM_PLIST(records, i), (Obj)0);
1526     }
1527     return 0;
1528 }
1529 
1530 #endif /* WARD_ENABLED */
1531 
MakeAtomic(Obj obj)1532 static Obj MakeAtomic(Obj obj) {
1533   if (IS_LIST(obj))
1534       return NewAtomicListFrom(T_ALIST, obj);
1535   else if (TNUM_OBJ(obj) == T_PREC)
1536     return NewAtomicRecordFrom(obj);
1537   else
1538     return (Obj) 0;
1539 }
1540 
FuncMakeWriteOnceAtomic(Obj self,Obj obj)1541 static Obj FuncMakeWriteOnceAtomic(Obj self, Obj obj) {
1542   switch (TNUM_OBJ(obj)) {
1543     case T_ALIST:
1544     case T_FIXALIST:
1545     case T_APOSOBJ:
1546       HashLock(obj);
1547       ADDR_ATOM(obj)[0].atom =
1548         CHANGE_ALIST_POL(CONST_ADDR_ATOM(obj)[0].atom, ALIST_W1);
1549       HashUnlock(obj);
1550       break;
1551     case T_AREC:
1552     case T_ACOMOBJ:
1553       SetARecordUpdatePolicy(obj, AREC_W1);
1554       break;
1555     default:
1556       obj = MakeAtomic(obj);
1557       if (obj)
1558         return FuncMakeWriteOnceAtomic(self, obj);
1559       ArgumentError("MakeWriteOnceAtomic: argument not an atomic object, list, or record");
1560   }
1561   return obj;
1562 }
1563 
FuncMakeReadWriteAtomic(Obj self,Obj obj)1564 static Obj FuncMakeReadWriteAtomic(Obj self, Obj obj) {
1565   switch (TNUM_OBJ(obj)) {
1566     case T_ALIST:
1567     case T_FIXALIST:
1568     case T_APOSOBJ:
1569       HashLock(obj);
1570       ADDR_ATOM(obj)[0].atom =
1571         CHANGE_ALIST_POL(CONST_ADDR_ATOM(obj)[0].atom, ALIST_RW);
1572       HashUnlock(obj);
1573       break;
1574     case T_AREC:
1575     case T_ACOMOBJ:
1576       SetARecordUpdatePolicy(obj, AREC_RW);
1577       break;
1578     default:
1579       obj = MakeAtomic(obj);
1580       if (obj)
1581         return FuncMakeReadWriteAtomic(self, obj);
1582       ArgumentError("MakeReadWriteAtomic: argument not an atomic object, list, or record");
1583   }
1584   return obj;
1585 }
1586 
FuncMakeStrictWriteOnceAtomic(Obj self,Obj obj)1587 static Obj FuncMakeStrictWriteOnceAtomic(Obj self, Obj obj) {
1588   switch (TNUM_OBJ(obj)) {
1589     case T_ALIST:
1590     case T_FIXALIST:
1591     case T_APOSOBJ:
1592       HashLock(obj);
1593       ADDR_ATOM(obj)[0].atom =
1594         CHANGE_ALIST_POL(CONST_ADDR_ATOM(obj)[0].atom, ALIST_WX);
1595       HashUnlock(obj);
1596       break;
1597     case T_AREC:
1598     case T_ACOMOBJ:
1599       SetARecordUpdatePolicy(obj, AREC_WX);
1600       break;
1601     default:
1602       obj = MakeAtomic(obj);
1603       if (obj)
1604         return FuncMakeStrictWriteOnceAtomic(self, obj);
1605       ArgumentError("MakeStrictWriteOnceAtomic: argument not an atomic object, list, or record");
1606   }
1607   return obj;
1608 }
1609 
1610 
1611 #define FuncError(message)  ErrorQuit("%s: %s", (Int)currFuncName, (Int)message)
1612 
BindOncePosObj(Obj obj,Obj index,Obj * new,int eval,const char * currFuncName)1613 static Obj BindOncePosObj(Obj obj, Obj index, Obj *new, int eval, const char *currFuncName) {
1614   Int n;
1615   Bag *contents;
1616   Bag result;
1617   n = GetPositiveSmallInt(currFuncName, index);
1618   ReadGuard(obj);
1619 #ifndef WARD_ENABLED
1620   contents = PTR_BAG(obj);
1621   MEMBAR_READ();
1622   if (SIZE_BAG_CONTENTS(contents) / sizeof(Bag) <= n) {
1623     HashLock(obj);
1624     /* resize bag */
1625     if (SIZE_BAG(obj) / sizeof(Bag) <= n) {
1626       /* can't use ResizeBag() directly because of guards. */
1627       /* therefore we create a faux master pointer in the public region. */
1628       UInt *mptr[2];
1629       mptr[0] = (UInt *)contents;
1630       mptr[1] = 0;
1631       ResizeBag(mptr, sizeof(Bag) * (n+1));
1632       MEMBAR_WRITE();
1633       SET_PTR_BAG(obj, (void *)(mptr[0]));
1634     }
1635     /* reread contents pointer */
1636     HashUnlock(obj);
1637     contents = PTR_BAG(obj);
1638     MEMBAR_READ();
1639   }
1640   /* already bound? */
1641   result = (Bag)(contents[n]);
1642   if (result && result != Fail)
1643     return result;
1644   if (eval)
1645     *new = CALL_0ARGS(*new);
1646   HashLockShared(obj);
1647   contents = PTR_BAG(obj);
1648   MEMBAR_READ();
1649   for (;;) {
1650     result = (Bag)(contents[n]);
1651     if (result && result != Fail)
1652       break;
1653     if (COMPARE_AND_SWAP((AtomicUInt*)(contents+n),
1654       (AtomicUInt) result, (AtomicUInt) *new))
1655       break;
1656   }
1657   CHANGED_BAG(obj);
1658   HashUnlockShared(obj);
1659   return result == Fail ? (Obj) 0 : result;
1660 #endif
1661 }
1662 
BindOnceAPosObj(Obj obj,Obj index,Obj * new,int eval,const char * currFuncName)1663 static Obj BindOnceAPosObj(Obj obj, Obj index, Obj *new, int eval, const char *currFuncName) {
1664   UInt n;
1665   UInt len;
1666   AtomicObj anew;
1667   AtomicObj *addr;
1668   Obj result;
1669   /* atomic positional objects aren't resizable. */
1670   addr = ADDR_ATOM(obj);
1671   MEMBAR_READ();
1672   len = ALIST_LEN(addr[0].atom);
1673   n = GetSmallInt(currFuncName, index);
1674   if (n <= 0 || n > len)
1675     FuncError("Index out of range");
1676   result = addr[n+1].obj;
1677   if (result && result != Fail)
1678     return result;
1679   anew.obj = *new;
1680   if (eval)
1681     *new = CALL_0ARGS(*new);
1682   for (;;) {
1683     result = addr[n+1].obj;
1684     if (result && result != Fail) {
1685       break;
1686     }
1687     if (COMPARE_AND_SWAP(&(addr[n+1].atom), (AtomicUInt) result, anew.atom))
1688       break;
1689   }
1690   CHANGED_BAG(obj);
1691   return result == Fail ? (Obj) 0 : result;
1692 }
1693 
1694 
BindOnceComObj(Obj obj,Obj index,Obj * new,int eval,const char * currFuncName)1695 static Obj BindOnceComObj(Obj obj, Obj index, Obj *new, int eval, const char *currFuncName) {
1696   FuncError("not yet implemented");
1697   return (Obj) 0;
1698 }
1699 
1700 
BindOnceAComObj(Obj obj,Obj index,Obj * new,int eval,const char * currFuncName)1701 static Obj BindOnceAComObj(Obj obj, Obj index, Obj *new, int eval, const char *currFuncName) {
1702   FuncError("not yet implemented");
1703   return (Obj) 0;
1704 }
1705 
1706 
BindOnce(Obj obj,Obj index,Obj * new,int eval,const char * currFuncName)1707 static Obj BindOnce(Obj obj, Obj index, Obj *new, int eval, const char *currFuncName) {
1708   switch (TNUM_OBJ(obj)) {
1709     case T_POSOBJ:
1710       return BindOncePosObj(obj, index, new, eval, currFuncName);
1711     case T_APOSOBJ:
1712       return BindOnceAPosObj(obj, index, new, eval, currFuncName);
1713     case T_COMOBJ:
1714       return BindOnceComObj(obj, index, new, eval, currFuncName);
1715     case T_ACOMOBJ:
1716       return BindOnceAComObj(obj, index, new, eval, currFuncName);
1717     default:
1718       FuncError("first argument must be a positional or component object");
1719       return (Obj) 0; /* flow control hint */
1720   }
1721 }
1722 
FuncBindOnce(Obj self,Obj obj,Obj index,Obj new)1723 static Obj FuncBindOnce(Obj self, Obj obj, Obj index, Obj new) {
1724   Obj result;
1725   result = BindOnce(obj, index, &new, 0, "BindOnce");
1726   return result ? result : new;
1727 }
1728 
FuncStrictBindOnce(Obj self,Obj obj,Obj index,Obj new)1729 static Obj FuncStrictBindOnce(Obj self, Obj obj, Obj index, Obj new) {
1730   Obj result;
1731   result = BindOnce(obj, index, &new, 0, "StrictBindOnce");
1732   if (result)
1733     ErrorQuit("StrictBindOnce: Element already initialized", 0L, 0L);
1734   return result;
1735 }
1736 
FuncTestBindOnce(Obj self,Obj obj,Obj index,Obj new)1737 static Obj FuncTestBindOnce(Obj self, Obj obj, Obj index, Obj new) {
1738   Obj result;
1739   result = BindOnce(obj, index, &new, 0, "TestBindOnce");
1740   return result ? False : True;
1741 }
1742 
FuncBindOnceExpr(Obj self,Obj obj,Obj index,Obj new)1743 static Obj FuncBindOnceExpr(Obj self, Obj obj, Obj index, Obj new) {
1744   Obj result;
1745   result = BindOnce(obj, index, &new, 1, "BindOnceExpr");
1746   return result ? result : new;
1747 }
1748 
FuncTestBindOnceExpr(Obj self,Obj obj,Obj index,Obj new)1749 static Obj FuncTestBindOnceExpr(Obj self, Obj obj, Obj index, Obj new) {
1750   Obj result;
1751   result = BindOnce(obj, index, &new, 1, "TestBindOnceExpr");
1752   return result ? False : True;
1753 }
1754 
1755 
1756 /****************************************************************************
1757 **
1758 *F * * * * * * * * * * * * * initialize module * * * * * * * * * * * * * * *
1759 */
1760 
1761 
1762 /****************************************************************************
1763 **
1764 *V  BagNames  . . . . . . . . . . . . . . . . . . . . . . . list of bag names
1765 */
1766 static StructBagNames BagNames[] = {
1767     { T_ALIST, "atomic list" },
1768     { T_FIXALIST, "fixed atomic list" },
1769     { T_APOSOBJ, "atomic positional object" },
1770     { T_AREC, "atomic record" },
1771     { T_ACOMOBJ, "atomic component object" },
1772     { T_TLREC, "thread-local record" },
1773     { -1,    "" }
1774 };
1775 
1776 
1777 /****************************************************************************
1778 **
1779 *V  GVarFuncs . . . . . . . . . . . . . . . . . . list of functions to export
1780 */
1781 
1782 static StructGVarFunc GVarFuncs[] = {
1783 
1784     GVAR_FUNC(AtomicList, -1, "list|count, obj"),
1785     GVAR_FUNC(FixedAtomicList, -1, "list|count, obj"),
1786     GVAR_FUNC(MakeFixedAtomicList, 1, "list"),
1787     GVAR_FUNC(FromAtomicList, 1, "list"),
1788     GVAR_FUNC(AddAtomicList, 2, "list, obj"),
1789     GVAR_FUNC(GET_ATOMIC_LIST, 2, "list, index"),
1790     GVAR_FUNC(SET_ATOMIC_LIST, 3, "list, index, value"),
1791     GVAR_FUNC(COMPARE_AND_SWAP, 4, "list, index, old, new"),
1792     GVAR_FUNC(ATOMIC_BIND, 3, "list, index, new"),
1793     GVAR_FUNC(ATOMIC_UNBIND, 3, "list, index, old"),
1794 
1795     GVAR_FUNC(ATOMIC_ADDITION, 3, "list, index, inc"),
1796     GVAR_FUNC(AtomicRecord, -1, "[capacity]"),
1797     GVAR_FUNC(IS_ATOMIC_LIST, 1, "object"),
1798     GVAR_FUNC(IS_FIXED_ATOMIC_LIST, 1, "object"),
1799     GVAR_FUNC(IS_ATOMIC_RECORD, 1, "object"),
1800     GVAR_FUNC(GET_ATOMIC_RECORD, 3, "record, field, default"),
1801     GVAR_FUNC(SET_ATOMIC_RECORD, 3, "record, field, value"),
1802     GVAR_FUNC(UNBIND_ATOMIC_RECORD, 2, "record, field"),
1803     GVAR_FUNC(FromAtomicRecord, 1, "record"),
1804     GVAR_FUNC(FromAtomicComObj, 1, "record"),
1805     GVAR_FUNC(ThreadLocalRecord, -1, "record [, record]"),
1806     GVAR_FUNC(SetTLDefault, 3, "threadLocalRecord, name, value"),
1807     GVAR_FUNC(SetTLConstructor, 3, "threadLocalRecord, name, function"),
1808     GVAR_FUNC(MakeWriteOnceAtomic, 1, "obj"),
1809     GVAR_FUNC(MakeReadWriteAtomic, 1, "obj"),
1810     GVAR_FUNC(MakeStrictWriteOnceAtomic, 1, "obj"),
1811     GVAR_FUNC(BindOnce, 3, "obj, index, value"),
1812     GVAR_FUNC(StrictBindOnce, 3, "obj, index, value"),
1813     GVAR_FUNC(TestBindOnce, 3, "obj, index, value"),
1814     GVAR_FUNC(BindOnceExpr, 3, "obj, index, func"),
1815     GVAR_FUNC(TestBindOnceExpr, 3, "obj, index, func"),
1816     { 0, 0, 0, 0, 0 }
1817 
1818 };
1819 
1820 // Forbid comparision and copying of atomic objects, because they
1821 // cannot be done in a thread-safe manner
AtomicRecordErrorNoCompare(Obj arg1,Obj arg2)1822 static Int AtomicRecordErrorNoCompare(Obj arg1, Obj arg2)
1823 {
1824     ErrorQuit("atomic records cannot be compared with other records", 0, 0);
1825     // Make compiler happy
1826     return 0;
1827 }
1828 
AtomicListErrorNoCompare(Obj arg1,Obj arg2)1829 static Int AtomicListErrorNoCompare(Obj arg1, Obj arg2)
1830 {
1831     ErrorQuit("atomic lists cannot be compared with other lists", 0, 0);
1832     // Make compiler happy
1833     return 0;
1834 }
1835 
AtomicErrorNoShallowCopy(Obj arg1)1836 static Obj AtomicErrorNoShallowCopy(Obj arg1)
1837 {
1838     ErrorQuit("atomic objects cannot be copied", 0, 0);
1839     // Make compiler happy
1840     return 0;
1841 }
1842 
1843 #if !defined(USE_THREADSAFE_COPYING)
AtomicErrorNoCopy(Obj arg1,Int arg2)1844 static Obj AtomicErrorNoCopy(Obj arg1, Int arg2)
1845 {
1846     ErrorQuit("atomic objects cannot be copied", 0, 0);
1847     // Make compiler happy
1848     return 0;
1849 }
1850 #endif
1851 
1852 /****************************************************************************
1853 **
1854 *F  InitKernel( <module> )  . . . . . . . . initialise kernel data structures
1855 */
InitKernel(StructInitInfo * module)1856 static Int InitKernel (
1857     StructInitInfo *    module )
1858 {
1859   UInt i;
1860   /* compute UsageCap */
1861   for (i=0; i<=3; i++)
1862     UsageCap[i] = (1<<i)-1;
1863   UsageCap[4] = 13;
1864   UsageCap[5] = 24;
1865   UsageCap[6] = 48;
1866   UsageCap[7] = 96;
1867   for (i=8; i<sizeof(UInt)*8; i++)
1868     UsageCap[i] = (1<<i)/3 * 2;
1869 
1870   // set the bag type names (for error messages and debugging)
1871   InitBagNamesFromTable(BagNames);
1872 
1873   /* install the kind methods */
1874   TypeObjFuncs[ T_ALIST ] = TypeAList;
1875   TypeObjFuncs[ T_FIXALIST ] = TypeAList;
1876   TypeObjFuncs[ T_APOSOBJ ] = TypeAList;
1877   TypeObjFuncs[ T_AREC ] = TypeARecord;
1878   TypeObjFuncs[ T_ACOMOBJ ] = TypeARecord;
1879   TypeObjFuncs[ T_TLREC ] = TypeTLRecord;
1880   SetTypeObjFuncs[ T_ALIST ] = SetTypeAList;
1881   SetTypeObjFuncs[ T_FIXALIST ] = SetTypeAList;
1882   SetTypeObjFuncs[ T_APOSOBJ ] = SetTypeAList;
1883   SetTypeObjFuncs[ T_AREC ] = SetTypeARecord;
1884   SetTypeObjFuncs[ T_ACOMOBJ ] = SetTypeARecord;
1885   /* install global variables */
1886   InitCopyGVar("TYPE_ALIST", &TYPE_ALIST);
1887   InitCopyGVar("TYPE_AREC", &TYPE_AREC);
1888   InitCopyGVar("TYPE_TLREC", &TYPE_TLREC);
1889   /* install mark functions */
1890   InitMarkFuncBags(T_ALIST, MarkAtomicList);
1891   InitMarkFuncBags(T_FIXALIST, MarkAtomicList);
1892   InitMarkFuncBags(T_APOSOBJ, MarkAtomicList);
1893   InitMarkFuncBags(T_AREC, MarkAtomicRecord);
1894   InitMarkFuncBags(T_ACOMOBJ, MarkAtomicRecord);
1895   InitMarkFuncBags(T_AREC_INNER, MarkAtomicRecord2);
1896   InitMarkFuncBags(T_TLREC, MarkTLRecord);
1897   /* install print functions */
1898   PrintObjFuncs[ T_ALIST ] = PrintAtomicList;
1899   PrintObjFuncs[ T_FIXALIST ] = PrintAtomicList;
1900   PrintObjFuncs[ T_AREC ] = PrintAtomicRecord;
1901   PrintObjFuncs[ T_TLREC ] = PrintTLRecord;
1902   /* install mutability functions */
1903   IsMutableObjFuncs [ T_ALIST ] = AlwaysYes;
1904   IsMutableObjFuncs [ T_FIXALIST ] = AlwaysYes;
1905   IsMutableObjFuncs [ T_AREC ] = AlwaysYes;
1906   /* mutability for T_ACOMOBJ and T_APOSOBJ is set in objects.c */
1907   MakeBagTypePublic(T_ALIST);
1908   MakeBagTypePublic(T_FIXALIST);
1909   MakeBagTypePublic(T_APOSOBJ);
1910   MakeBagTypePublic(T_AREC);
1911   MakeBagTypePublic(T_ACOMOBJ);
1912   MakeBagTypePublic(T_AREC_INNER);
1913   MakeBagTypePublic(T_TLREC);
1914   MakeBagTypePublic(T_TLREC_INNER);
1915   /* install list functions */
1916 
1917   for (UInt type = T_FIXALIST; type <= T_ALIST; type++) {
1918       IsListFuncs[type] = AlwaysYes;
1919       IsSmallListFuncs[type] = AlwaysYes;
1920       LenListFuncs[type] = LenListAList;
1921       LengthFuncs[type] = LengthAList;
1922       Elm0ListFuncs[type] = Elm0AList;
1923       ElmDefListFuncs[type] = ElmDefAList;
1924       Elm0vListFuncs[type] = Elm0AList;
1925       ElmListFuncs[type] = ElmAList;
1926       ElmvListFuncs[type] = ElmAList;
1927       ElmwListFuncs[type] = ElmAList;
1928       UnbListFuncs[type] = UnbAList;
1929       IsbListFuncs[type] = IsbAList;
1930   }
1931 
1932   AssListFuncs[T_FIXALIST] = AssFixAList;
1933   AssListFuncs[T_ALIST] = AssAList;
1934 
1935 
1936   /* AsssListFuncs[T_ALIST] = AsssAList; */
1937   /* install record functions */
1938   ElmRecFuncs[ T_AREC ] = ElmARecord;
1939   IsbRecFuncs[ T_AREC ] = IsbARecord;
1940   AssRecFuncs[ T_AREC ] = AssARecord;
1941   ShallowCopyObjFuncs[ T_AREC ] = ShallowCopyARecord;
1942   IsRecFuncs[ T_AREC ] = AlwaysYes;
1943   UnbRecFuncs[ T_AREC ] = UnbARecord;
1944   IsRecFuncs[ T_ACOMOBJ ] = AlwaysNo;
1945   ElmRecFuncs[ T_TLREC ] = ElmTLRecord;
1946   IsbRecFuncs[ T_TLREC ] = IsbTLRecord;
1947   AssRecFuncs[ T_TLREC ] = AssTLRecord;
1948   IsRecFuncs[ T_TLREC ] = AlwaysYes;
1949   UnbRecFuncs[ T_TLREC ] = UnbTLRecord;
1950 
1951   // Forbit various operations on atomic lists and records we can't
1952   // perform thread-safely.
1953 
1954   // Ensure that atomic objects cannot be copied
1955   for (UInt type = FIRST_ATOMIC_TNUM; type <= LAST_ATOMIC_TNUM; type++) {
1956       ShallowCopyObjFuncs[type] = AtomicErrorNoShallowCopy;
1957 #if !defined(USE_THREADSAFE_COPYING)
1958       CopyObjFuncs[type] = AtomicErrorNoCopy;
1959       // Do not error on CleanObj, just leave it as a no-op
1960 #endif // !defined(USE_THREADSAFE_COPYING)
1961   }
1962 
1963 
1964   // Ensure atomic lists can't be compared with other lists
1965   for (UInt type = FIRST_ATOMIC_LIST_TNUM; type <= LAST_ATOMIC_LIST_TNUM;
1966        type++) {
1967       for (UInt t2 = FIRST_LIST_TNUM; t2 <= LAST_LIST_TNUM; ++t2) {
1968           EqFuncs[type][t2] = AtomicListErrorNoCompare;
1969           EqFuncs[t2][type] = AtomicListErrorNoCompare;
1970           LtFuncs[type][t2] = AtomicListErrorNoCompare;
1971           LtFuncs[t2][type] = AtomicListErrorNoCompare;
1972       }
1973       for (UInt t2 = FIRST_ATOMIC_LIST_TNUM; t2 <= LAST_ATOMIC_LIST_TNUM;
1974            ++t2) {
1975           EqFuncs[type][t2] = AtomicListErrorNoCompare;
1976           EqFuncs[t2][type] = AtomicListErrorNoCompare;
1977           LtFuncs[type][t2] = AtomicListErrorNoCompare;
1978           LtFuncs[t2][type] = AtomicListErrorNoCompare;
1979       }
1980   }
1981 
1982   // Ensure atomic records can't be compared with other records
1983   for (UInt type = FIRST_ATOMIC_RECORD_TNUM; type <= LAST_ATOMIC_RECORD_TNUM;
1984        type++) {
1985       for (UInt t2 = FIRST_RECORD_TNUM; t2 <= LAST_RECORD_TNUM; ++t2) {
1986           EqFuncs[type][t2] = AtomicRecordErrorNoCompare;
1987           EqFuncs[t2][type] = AtomicRecordErrorNoCompare;
1988           LtFuncs[type][t2] = AtomicRecordErrorNoCompare;
1989           LtFuncs[t2][type] = AtomicRecordErrorNoCompare;
1990       }
1991       for (UInt t2 = FIRST_ATOMIC_RECORD_TNUM; t2 <= LAST_ATOMIC_RECORD_TNUM;
1992            ++t2) {
1993           EqFuncs[type][t2] = AtomicRecordErrorNoCompare;
1994           EqFuncs[t2][type] = AtomicRecordErrorNoCompare;
1995           LtFuncs[type][t2] = AtomicRecordErrorNoCompare;
1996           LtFuncs[t2][type] = AtomicRecordErrorNoCompare;
1997       }
1998   }
1999 
2000   /* return success                                                      */
2001   return 0;
2002 }
2003 
2004 
2005 /****************************************************************************
2006 **
2007 *F  InitLibrary( <module> ) . . . . . . .  initialise library data structures
2008 */
InitLibrary(StructInitInfo * module)2009 static Int InitLibrary (
2010     StructInitInfo *    module )
2011 {
2012     /* init filters and functions                                          */
2013     InitGVarFuncsFromTable( GVarFuncs );
2014 
2015     /* return success                                                      */
2016     return 0;
2017 }
2018 
2019 /****************************************************************************
2020 **
2021 *F  InitInfoAObjects() . . . . . . . . . . . . . . . table of init functions
2022 */
2023 static StructInitInfo module = {
2024     // init struct using C99 designated initializers; for a full list of
2025     // fields, please refer to the definition of StructInitInfo
2026     .type = MODULE_BUILTIN,
2027     .name = "aobjects",
2028     .initKernel = InitKernel,
2029     .initLibrary = InitLibrary,
2030     .initModuleState = InitAObjectsState,
2031     .destroyModuleState = DestroyAObjectsState,
2032 };
2033 
InitInfoAObjects(void)2034 StructInitInfo * InitInfoAObjects ( void )
2035 {
2036     return &module;
2037 }
2038