1 /****************************************************************************
2 **
3 **  This file is part of GAP, a system for computational discrete algebra.
4 **
5 **  Copyright of GAP belongs to its developers, whose names are too numerous
6 **  to list here. Please refer to the COPYRIGHT file for details.
7 **
8 **  SPDX-License-Identifier: GPL-2.0-or-later
9 **
10 ** This file contains integer related functions which are independent of the
11 ** large integer representation in use. See integer.c for other things.
12 */
13 
14 #include "intfuncs.h"
15 
16 #include "bool.h"
17 #include "calls.h"
18 #include "error.h"
19 #include "integer.h"
20 #include "lists.h"
21 #include "modules.h"
22 #include "plist.h"
23 #include "precord.h"
24 #include "records.h"
25 #include "stringobj.h"
26 
27 
28 /****************************************************************************
29 **
30 ** * * * * * * * "Mersenne twister" random numbers  * * * * * * * * * * * * *
31 **
32 **  Part of this code for fast generation of 32 bit pseudo random numbers with
33 **  a period of length 2^19937-1 and a 623-dimensional equidistribution is
34 **  taken from:
35 **          http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
36 **  (Also look in Wikipedia for "Mersenne twister".)
37 **  We use the file mt19937ar.c, version 2002/1/26.
38 */
39 
40 /****************************************************************************
41 **
42 *F  InitRandomMT( <initstr> )
43 **
44 **  Returns a string that can be used as data structure of a new MT random
45 **  number generator. <initstr> can be an arbitrary string as seed.
46 */
47 #define MATRIX_A 0x9908b0dfUL   /* constant vector a */
48 #define UPPER_MASK 0x80000000UL /* most significant w-r bits */
49 #define LOWER_MASK 0x7fffffffUL /* least significant r bits */
50 
initGRMT(UInt4 * mt,UInt4 s)51 static void initGRMT(UInt4 * mt, UInt4 s)
52 {
53     UInt4 mti;
54     mt[0]= s & 0xffffffffUL;
55     for (mti=1; mti<624; mti++) {
56         mt[mti] =
57             (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti);
58         mt[mti] &= 0xffffffffUL;
59     }
60     /* store mti as last entry of mt[] */
61     mt[624] = mti;
62 }
63 
64 // Read s[pos], returning 0 if pos is past the error of the array
checkedReadChar(const UChar * s,UInt4 pos,UInt4 len)65 static inline UChar checkedReadChar(const UChar * s, UInt4 pos, UInt4 len)
66 {
67     if (pos < len)
68         return s[pos];
69     else
70         return 0;
71 }
72 
73 /* to read a seed string independently of endianness */
uint4frombytes(const UChar * s,UInt4 pos,UInt4 len)74 static inline UInt4 uint4frombytes(const UChar * s, UInt4 pos, UInt4 len)
75 {
76   UInt4 res;
77   res = checkedReadChar(s, pos + 3, len);
78   res <<= 8;
79   res += checkedReadChar(s, pos + 2, len);
80   res <<= 8;
81   res += checkedReadChar(s, pos + 1, len);
82   res <<= 8;
83   res += checkedReadChar(s, pos + 0, len);
84   return res;
85 }
86 
FuncInitRandomMT(Obj self,Obj initstr)87 static Obj FuncInitRandomMT(Obj self, Obj initstr)
88 {
89   Obj str;
90   const UChar *init_key;
91   UInt4 *mt, key_length, byte_key_length, i, j, k, N = 624;
92 
93   /* check the seed, given as string */
94   RequireStringRep("InitRandomMT", initstr);
95 
96   /* store array of 624 UInt4 and one UInt4 as counter "mti" and an
97      endianness marker */
98   str = NEW_STRING(4*626);
99   SET_LEN_STRING(str, 4*626);
100   mt = (UInt4 *)(ADDR_OBJ(str) + 1);
101   /* here the counter mti is set to 624 */
102   initGRMT(mt, 19650218UL);
103   i=1; j=0;
104   /* Do not set these up until all garbage collection is done   */
105   init_key = CONST_CHARS_STRING(initstr);
106   byte_key_length = GET_LEN_STRING(initstr);
107   key_length = byte_key_length / 4;
108   k = (N>key_length ? N : key_length);
109   for (; k; k--) {
110       mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) +
111               uint4frombytes(init_key, 4 * j, byte_key_length) + j;
112       mt[i] &= 0xffffffffUL;
113       i++; j++;
114       if (i>=N) { mt[0] = mt[N-1]; i=1; }
115       if (4 * j >= byte_key_length) j=0;
116   }
117   for (k=N-1; k; k--) {
118       mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - i;
119       mt[i] &= 0xffffffffUL;
120       i++;
121       if (i>=N) { mt[0] = mt[N-1]; i=1; }
122   }
123   mt[0] = 0x80000000UL;
124   /* gives string "1234" in little endian as marker */
125   mt[625] = 875770417UL;
126   return str;
127 }
128 
129 
130 /*  internal, generates a random number on [0,0xffffffff]-interval
131 **  argument <mt> is pointer to a string generated by InitRandomMT
132 **  (the first 4*624 bytes are the random numbers, the last 4 bytes contain
133 **  a counter)
134 */
nextrandMT_int32(UInt4 * mt)135 UInt4 nextrandMT_int32(UInt4* mt)
136 {
137     UInt4 mti, y, N=624, M=397;
138     static UInt4 mag01[2]={0x0UL, MATRIX_A};
139 
140     mti = mt[624];
141     if (mti >= N) {
142         int kk;
143 
144         for (kk=0;kk<N-M;kk++) {
145             y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
146             mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL];
147         }
148         for (;kk<N-1;kk++) {
149             y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
150             mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL];
151         }
152         y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK);
153         mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL];
154 
155         mti = 0;
156     }
157 
158     y = mt[mti++];
159     mt[624] = mti;
160 
161     /* Tempering */
162     y ^= (y >> 11);
163     y ^= (y << 7) & 0x9d2c5680UL;
164     y ^= (y << 15) & 0xefc60000UL;
165     y ^= (y >> 18);
166 
167     return y;
168 }
169 
170 
171 //-----------------------------------------------------------------------------
172 // MurmurHash3 was written by Austin Appleby, and is placed in the public
173 // domain. The author hereby disclaims copyright to this source code.
174 
175 // Note - The x86 and x64 versions do _not_ produce the same results, as the
176 // algorithms are optimized for their respective platforms. You can still
177 // compile and run any of them on any platform, but your performance with the
178 // non-native version will be less than optimal.
179 
180 //-----------------------------------------------------------------------------
181 // MurmurHash3 was written by Austin Appleby, and is placed in the public
182 // domain. The author hereby disclaims copyright to this source code.
183 
184 /* Minor modifications to get it to compile in C rather than C++ and
185 integrate with GAP  SL*/
186 
187 
188 #define FORCE_INLINE static inline
189 
190 #ifndef SYS_IS_64_BIT
191 
192 //-----------------------------------------------------------------------------
193 // Platform-specific functions and macros
194 
rotl32(uint32_t x,int8_t r)195 static inline uint32_t rotl32 ( uint32_t x, int8_t r )
196 {
197   return (x << r) | (x >> (32 - r));
198 }
199 #define ROTL32(x,y)     rotl32(x,y)
200 
201 //-----------------------------------------------------------------------------
202 // Block read - if your platform needs to do endian-swapping or can only
203 // handle aligned reads, do the conversion here
204 
getblock4(const uint32_t * p,int i)205 FORCE_INLINE uint32_t getblock4 ( const uint32_t * p, int i )
206 {
207   return p[i];
208 }
209 
210 //-----------------------------------------------------------------------------
211 // Finalization mix - force all bits of a hash block to avalanche
212 
fmix4(uint32_t h)213 FORCE_INLINE uint32_t fmix4 ( uint32_t h )
214 {
215   h ^= h >> 16;
216   h *= 0x85ebca6b;
217   h ^= h >> 13;
218   h *= 0xc2b2ae35;
219   h ^= h >> 16;
220 
221   return h;
222 }
223 
224 //-----------------------------------------------------------------------------
225 
MurmurHash3_x86_32(const void * key,int len,UInt4 seed,void * out)226 void MurmurHash3_x86_32 ( const void * key, int len,
227                           UInt4 seed, void * out )
228 {
229   const uint8_t * data = (const uint8_t*)key;
230   const int nblocks = len / 4;
231 
232   uint32_t h1 = seed;
233 
234   uint32_t c1 = 0xcc9e2d51;
235   uint32_t c2 = 0x1b873593;
236 
237   //----------
238   // body
239 
240   const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
241 
242   int i;
243   for(i = -nblocks; i; i++)
244   {
245     uint32_t k1 = getblock4(blocks,i);
246 
247     k1 *= c1;
248     k1 = ROTL32(k1,15);
249     k1 *= c2;
250 
251     h1 ^= k1;
252     h1 = ROTL32(h1,13);
253     h1 = h1*5+0xe6546b64;
254   }
255 
256   //----------
257   // tail
258 
259   const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
260 
261   uint32_t k1 = 0;
262 
263   switch(len & 3)
264   {
265   case 3: k1 ^= tail[2] << 16;
266   case 2: k1 ^= tail[1] << 8;
267   case 1: k1 ^= tail[0];
268           k1 *= c1; k1 = ROTL32(k1,16); k1 *= c2; h1 ^= k1;
269   };
270 
271   //----------
272   // finalization
273 
274   h1 ^= len;
275 
276   h1 = fmix4(h1);
277 
278   *(uint32_t*)out = h1;
279 }
280 
281 #else
282 
283 //-----------------------------------------------------------------------------
284 // Platform-specific functions and macros
285 
rotl64(uint64_t x,int8_t r)286 static inline uint64_t rotl64 ( uint64_t x, int8_t r )
287 {
288   return (x << r) | (x >> (64 - r));
289 }
290 
291 #define ROTL64(x,y)     rotl64(x,y)
292 
293 
294 #define BIG_CONSTANT(x) (x##LLU)
295 
296 
297 //-----------------------------------------------------------------------------
298 // Block read - if your platform needs to do endian-swapping or can only
299 // handle aligned reads, do the conversion here
300 //
301 // The pointer p may not be aligned, which means that directly reading it can
302 // incur a major performance penalty or even trigger a segfault on certain
303 // architectures (e.g. ARM, SPARC). Thus we use memcpy here, with the implicit
304 // hope that on archs which don't need this, the compiler will optimize it back
305 // into a direct copy (verified to happen with GCC and clang on x86_64)
306 
getblock8(const uint64_t * p,int i)307 FORCE_INLINE uint64_t getblock8 ( const uint64_t * p, int i )
308 {
309   uint64_t val;
310   memcpy(&val, p + i, sizeof(uint64_t));
311   return val;
312 }
313 
314 //-----------------------------------------------------------------------------
315 // Finalization mix - force all bits of a hash block to avalanche
316 
fmix8(uint64_t k)317 FORCE_INLINE uint64_t fmix8 ( uint64_t k )
318 {
319   k ^= k >> 33;
320   k *= BIG_CONSTANT(0xff51afd7ed558ccd);
321   k ^= k >> 33;
322   k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
323   k ^= k >> 33;
324 
325   return k;
326 }
327 
MurmurHash3_x64_128(const void * key,const int len,const UInt4 seed,void * out)328 void MurmurHash3_x64_128 ( const void * key, const int len,
329                            const UInt4 seed, void * out )
330 {
331   const int nblocks = len / 16;
332 
333   uint64_t h1 = seed;
334   uint64_t h2 = seed;
335 
336   uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
337   uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
338 
339   //----------
340   // body
341 
342   const uint64_t * blocks = (const uint64_t *)key;
343 
344   int i;
345   for(i = 0; i < nblocks; i++)
346   {
347     uint64_t k1 = getblock8(blocks,i*2+0);
348     uint64_t k2 = getblock8(blocks,i*2+1);
349 
350     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
351 
352     h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
353 
354     k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
355 
356     h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
357   }
358 
359   //----------
360   // tail
361 
362   const uint8_t * tail = (const uint8_t*)key + nblocks*16;
363 
364   uint64_t k1 = 0;
365   uint64_t k2 = 0;
366 
367   switch(len & 15)
368   {
369   case 15: k2 ^= (uint64_t)(tail[14]) << 48;
370   case 14: k2 ^= (uint64_t)(tail[13]) << 40;
371   case 13: k2 ^= (uint64_t)(tail[12]) << 32;
372   case 12: k2 ^= (uint64_t)(tail[11]) << 24;
373   case 11: k2 ^= (uint64_t)(tail[10]) << 16;
374   case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
375   case  9: k2 ^= (uint64_t)(tail[ 8]) << 0;
376            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
377 
378   case  8: k1 ^= (uint64_t)(tail[ 7]) << 56;
379   case  7: k1 ^= (uint64_t)(tail[ 6]) << 48;
380   case  6: k1 ^= (uint64_t)(tail[ 5]) << 40;
381   case  5: k1 ^= (uint64_t)(tail[ 4]) << 32;
382   case  4: k1 ^= (uint64_t)(tail[ 3]) << 24;
383   case  3: k1 ^= (uint64_t)(tail[ 2]) << 16;
384   case  2: k1 ^= (uint64_t)(tail[ 1]) << 8;
385   case  1: k1 ^= (uint64_t)(tail[ 0]) << 0;
386            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
387   };
388 
389   //----------
390   // finalization
391 
392   h1 ^= len; h2 ^= len;
393 
394   h1 += h2;
395   h2 += h1;
396 
397   h1 = fmix8(h1);
398   h2 = fmix8(h2);
399 
400   h1 += h2;
401   h2 += h1;
402 
403   ((uint64_t*)out)[0] = h1;
404   ((uint64_t*)out)[1] = h2;
405 }
406 #endif
407 
408 
409 /****************************************************************************
410 **
411 *F  FuncHASHKEY_BAG(<self>,<obj>,<seed>,<offset>,<maxlen>)
412 **
413 **  'FuncHASHKEY_BAG' implements the internal function 'HASHKEY_BAG'.
414 **
415 **  'HASHKEY_BAG( <obj>, <seed>, <offset>, <maxlen> )'
416 **
417 **  takes a non-immediate object and a small integer <seed> and computes a
418 **  hash value for the contents of the bag from these. For this to be usable
419 **  in algorithms, we need that objects of this kind are stored uniquely
420 **  internally.
421 **  The offset and the maximum number of bytes to process both count in
422 **  bytes. The values passed to these parameters might depend on the word
423 **  length of the computer.
424 **  A <maxlen> value of -1 indicates infinity.
425 */
426 static Obj
FuncHASHKEY_BAG(Obj self,Obj obj,Obj seed,Obj offset,Obj maxlen)427 FuncHASHKEY_BAG(Obj self, Obj obj, Obj seed, Obj offset, Obj maxlen)
428 {
429   Int n;
430   if ( IS_INTOBJ(obj) )
431     return obj;
432 
433   if ( IS_FFE(obj) ) {
434     /* We must be careful here, as different FFEs can represent equal
435        values (e.g. 0*Z(2^2) and 0*Z(2) compare as equal). Thus, we cannot
436        simply use the bit pattern of obj to compute a hash, as a well-defined
437        hash function must satisfy the implication
438           obj1 = obj2 => HASH(obj1) = HASH(obj2)
439        There are different ways to do this for FFEs, with different trade-offs.
440        Instead of making an arbitrary choice here, let's just refuse to
441        compute a hash here, and require the caller to provide a custom hash
442        function tailored to their needs.
443      */
444     ErrorMayQuit("HASHKEY_BAG: <obj> must not be an FFE", 0, 0);
445   }
446 
447   /* check the arguments                                                 */
448   Int s = GetSmallInt("HASHKEY_BAG", seed);
449 
450   Int offs = GetSmallInt("HASHKEY_BAG", offset);
451   if (offs < 0 || offs > SIZE_OBJ(obj)) {
452       ErrorMayQuit("HashKeyBag: <offset> must be non-negative and less than "
453                    "the bag size",
454                    0, 0);
455   }
456 
457   /* maximal number of bytes to read */
458   Int imaxlen = GetSmallInt("HASHKEY_BAG", maxlen);
459 
460   n=SIZE_OBJ(obj)-offs;
461 
462   if (n > imaxlen && imaxlen != -1) {
463       n = imaxlen;
464   }
465 
466   return INTOBJ_INT(HASHKEY_BAG_NC(obj, (UInt4)s, offs, (int)n));
467 }
468 
HASHKEY_MEM_NC(const void * ptr,UInt4 seed,Int read)469 Int HASHKEY_MEM_NC(const void * ptr, UInt4 seed, Int read)
470 {
471 #ifdef SYS_IS_64_BIT
472     UInt8 hashout[2];
473     MurmurHash3_x64_128(ptr, read, seed, (void *)hashout);
474     return hashout[0] % (1UL << 60);
475 #else
476     UInt4 hashout;
477     MurmurHash3_x86_32(ptr, read, seed, (void *)&hashout);
478     return hashout % (1UL << 28);
479 #endif
480 }
481 
HASHKEY_BAG_NC(Obj obj,UInt4 seed,Int skip,int read)482 Int HASHKEY_BAG_NC(Obj obj, UInt4 seed, Int skip, int read)
483 {
484     return HASHKEY_MEM_NC((const UChar *)CONST_ADDR_OBJ(obj) + skip, seed,
485                           read);
486 }
487 
HASHKEY_WHOLE_BAG_NC(Obj obj,UInt4 seed)488 Int HASHKEY_WHOLE_BAG_NC(Obj obj, UInt4 seed)
489 {
490     return HASHKEY_BAG_NC(obj, seed, 0, SIZE_OBJ(obj));
491 }
492 
493 
494 /****************************************************************************
495 **
496 *F  SmallInt Bitfield operations
497 **
498 **  The goal here it to set up a division of the usable bits in a small
499 **  integer into fields which can be accessed very quickly from GAP level and
500 **  quickly and conveniently from C. The purpose is to allow implementation
501 **  of data structures that divide up the bits within a word without having
502 **  to make them entirely opaque to the GAP level or ridiculously slow.
503 **
504 **  The API is defined in lib/bitfields.gd and works by providing the user
505 **  with a collection of functions to get and set fields and assemble an
506 **  entire word.
507 **
508 **  These functions are constructed here and have special handlers. The
509 **  information the handlers need about the size and position of the
510 **  bitfields are stored in special fields added after the regular function
511 **  bag fields, and are referred to as MASK_BITFIELD_FUNC and
512 **  OFFSET_BITFIELD_FUNC.
513 **
514 **  For fields of size 1 we also offer Boolean setters and getters which
515 **  accept and return True for 1 and False for 0. This makes for much nicer
516 **  code on the GAP side.
517 */
518 typedef struct {
519     FuncBag f;
520 
521     Obj mask;
522     Obj offset;
523 } BitfieldFuncBag;
524 
CBFB(Obj func)525 static inline const BitfieldFuncBag * CBFB(Obj func)
526 {
527     return (const BitfieldFuncBag *)CONST_ADDR_OBJ(func);
528 }
529 
BFB(Obj func)530 static inline BitfieldFuncBag * BFB(Obj func)
531 {
532     return (BitfieldFuncBag *)ADDR_OBJ(func);
533 }
534 
MASK_BITFIELD_FUNC(Obj func)535 static inline UInt MASK_BITFIELD_FUNC(Obj func)
536 {
537     GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION);
538     GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag));
539     return UInt_ObjInt(CBFB(func)->mask);
540 }
541 
SET_MASK_BITFIELD_FUNC(Obj func,UInt mask)542 static inline void SET_MASK_BITFIELD_FUNC(Obj func, UInt mask)
543 {
544     GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION);
545     GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag));
546     BFB(func)->mask = ObjInt_UInt(mask);
547 }
548 
OFFSET_BITFIELD_FUNC(Obj func)549 static inline UInt OFFSET_BITFIELD_FUNC(Obj func)
550 {
551     GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION);
552     GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag));
553     return UInt_ObjInt(CBFB(func)->offset);
554 }
555 
SET_OFFFSET_BITFIELD_FUNC(Obj func,UInt offset)556 static inline void SET_OFFFSET_BITFIELD_FUNC(Obj func, UInt offset)
557 {
558     GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION);
559     GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag));
560     BFB(func)->offset = ObjInt_UInt(offset);
561 }
562 
DoFieldGetter(Obj self,Obj data)563 static Obj DoFieldGetter(Obj self, Obj data)
564 {
565     UInt x = GetSmallInt("Field getter", data);
566     UInt mask = MASK_BITFIELD_FUNC(self);
567     UInt offset = OFFSET_BITFIELD_FUNC(self);
568     return INTOBJ_INT((x & mask) >> offset);
569 }
570 
DoFieldSetter(Obj self,Obj data,Obj val)571 static Obj DoFieldSetter(Obj self, Obj data, Obj val)
572 {
573     UInt x = GetSmallInt("Field Setter", data);
574     UInt y = GetSmallInt("Field Setter", val);
575     UInt mask = MASK_BITFIELD_FUNC(self);
576     UInt offset = OFFSET_BITFIELD_FUNC(self);
577     return INTOBJ_INT((x & ~mask) | (y << offset));
578 }
579 
DoBooleanFieldGetter(Obj self,Obj data)580 static Obj DoBooleanFieldGetter(Obj self, Obj data)
581 {
582     UInt x = GetSmallInt("Boolean Field getter", data);
583     UInt mask = MASK_BITFIELD_FUNC(self);
584     return (x & mask) ? True : False;
585 }
586 
DoBooleanFieldSetter(Obj self,Obj data,Obj val)587 static Obj DoBooleanFieldSetter(Obj self, Obj data, Obj val)
588 {
589     UInt x = GetSmallInt("Boolean Field Setter", data);
590     RequireTrueOrFalse("Boolean Field Setter", val);
591     UInt mask = MASK_BITFIELD_FUNC(self);
592     if (val == True)
593         x |= mask;
594     else if (val == False)
595         x &= ~mask;
596     return INTOBJ_INT(x);
597 }
598 
599 
FuncBUILD_BITFIELDS(Obj self,Obj args)600 static Obj FuncBUILD_BITFIELDS(Obj self, Obj args)
601 {
602     GAP_ASSERT(IS_PLIST(args));
603     GAP_ASSERT(LEN_PLIST(args) >= 1 && ELM_PLIST(args, 1));
604     Obj widths = ELM_PLIST(args, 1);
605     if (!IS_LIST(widths))
606         ErrorMayQuit("Fields builder: first argument must be list of widths",
607                      0, 0);
608     UInt nfields = LEN_LIST(widths);
609     if (LEN_PLIST(args) != nfields + 1)
610         ErrorMayQuit(
611             "Fields builder: number of values must match number of widths", 0,
612             0);
613     UInt x = 0;
614     UInt i;
615     for (i = nfields; i > 0; i--) {
616         GAP_ASSERT(ISB_LIST(widths, i));
617         Obj y = ELM_LIST(widths, i);
618         x <<= INT_INTOBJ(y);
619         GAP_ASSERT(ELM_PLIST(args, i + 1));
620         Obj z = ELM_PLIST(args, i + 1);
621         if (!IS_INTOBJ(z))
622             ErrorMayQuit("Fields builder: values must be small integers", 0,
623                          0);
624         GAP_ASSERT(INT_INTOBJ(z) < (1 << INT_INTOBJ(y)));
625         x |= INT_INTOBJ(z);
626     }
627     return INTOBJ_INT(x);
628 }
629 
630 
FuncMAKE_BITFIELDS(Obj self,Obj widths)631 static Obj FuncMAKE_BITFIELDS(Obj self, Obj widths)
632 {
633     if (!IS_LIST(widths))
634         ErrorMayQuit("MAKE_BITFIELDS: widths must be a list", 0, 0);
635     UInt nfields = LEN_LIST(widths);
636     UInt starts[nfields + 1];
637     starts[0] = 0;
638     for (UInt i = 1; i <= nfields; i++) {
639         Obj o = ELM_LIST(widths, i);
640         if (!IS_INTOBJ(o))
641             ErrorMayQuit("MAKE_BITFIELDS: widths must be small integers", 0,
642                          0);
643         UInt width = INT_INTOBJ(o);
644         starts[i] = starts[i - 1] + width;
645     }
646     if (starts[nfields] > 8 * sizeof(UInt))
647         ErrorMayQuit("MAKE_BITFIELDS: total widths too large", 0, 0);
648 
649     Obj nameSetter = MakeImmString("<field setter>");
650     Obj nameGetter = MakeImmString("<field getter>");
651     Obj nameBSetter = MakeImmString("<boolean field setter>");
652     Obj nameBGetter = MakeImmString("<boolean field getter>");
653     Obj dataArgs = ArgStringToList("data");
654     Obj dataValArgs = ArgStringToList("data, val");
655 
656     Obj  setters = NEW_PLIST_IMM(T_PLIST_DENSE, nfields);
657     Obj  getters = NEW_PLIST_IMM(T_PLIST_DENSE, nfields);
658     Obj  bsetters = NEW_PLIST_IMM(T_PLIST, nfields);
659     UInt bslen = 0;
660     Obj  bgetters = NEW_PLIST_IMM(T_PLIST, nfields);
661     for (UInt i = 1; i <= nfields; i++) {
662         UInt mask = (1L << starts[i]) - (1L << starts[i - 1]);
663         Obj  s = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameSetter,
664                              2, dataValArgs, DoFieldSetter);
665         SET_MASK_BITFIELD_FUNC(s, mask);
666         SET_OFFFSET_BITFIELD_FUNC(s, starts[i - 1]);
667         SET_ELM_PLIST(setters, i, s);
668         CHANGED_BAG(setters);
669         Obj g = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameGetter,
670                              1, dataArgs, DoFieldGetter);
671         SET_MASK_BITFIELD_FUNC(g, mask);
672         SET_OFFFSET_BITFIELD_FUNC(g, starts[i - 1]);
673         SET_ELM_PLIST(getters, i, g);
674         CHANGED_BAG(getters);
675         if (starts[i] - starts[i - 1] == 1) {
676             s = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameBSetter,
677                              2, dataValArgs, DoBooleanFieldSetter);
678             SET_MASK_BITFIELD_FUNC(s, mask);
679             SET_OFFFSET_BITFIELD_FUNC(s, starts[i - 1]);
680             SET_ELM_PLIST(bsetters, i, s);
681             CHANGED_BAG(bsetters);
682             bslen = i;
683             g = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameBGetter,
684                              1, dataArgs, DoBooleanFieldGetter);
685             SET_MASK_BITFIELD_FUNC(g, mask);
686             SET_OFFFSET_BITFIELD_FUNC(g, starts[i - 1]);
687             SET_ELM_PLIST(bgetters, i, g);
688             CHANGED_BAG(bgetters);
689         }
690     }
691 
692     SET_LEN_PLIST(setters, nfields);
693     SET_LEN_PLIST(getters, nfields);
694     SET_LEN_PLIST(bsetters, bslen);
695     SET_LEN_PLIST(bgetters, bslen);
696 
697     Obj ms = NEW_PREC(5);
698     AssPRec(ms, RNamName("widths"), CopyObj(widths, 0));
699     AssPRec(ms, RNamName("getters"), getters);
700     AssPRec(ms, RNamName("setters"), setters);
701     if (bslen > 0) {
702         AssPRec(ms, RNamName("booleanGetters"), bgetters);
703         AssPRec(ms, RNamName("booleanSetters"), bsetters);
704     }
705     SortPRecRNam(ms, 0);
706     MakeImmutableNoRecurse(ms);
707     return ms;
708 }
709 
710 
711 /****************************************************************************
712 **
713 *F * * * * * * * * * * * * * initialize module * * * * * * * * * * * * * * *
714 */
715 
716 
717 /****************************************************************************
718 **
719 *V  GVarFuncs . . . . . . . . . . . . . . . . . . list of functions to export
720 */
721 static StructGVarFunc GVarFuncs[] = {
722 
723 
724     GVAR_FUNC(HASHKEY_BAG, 4, "obj, seed, offset, maxlen"),
725     GVAR_FUNC(InitRandomMT, 1, "initstr"),
726     GVAR_FUNC(MAKE_BITFIELDS, -1, "widths"),
727     GVAR_FUNC(BUILD_BITFIELDS, -2, "widths, vals"),
728     { 0, 0, 0, 0, 0 }
729 
730 };
731 
732 
733 /****************************************************************************
734 **
735 *F  InitKernel( <module> )  . . . . . . . . initialise kernel data structures
736 */
InitKernel(StructInitInfo * module)737 static Int InitKernel (
738     StructInitInfo *    module )
739 {
740 
741     InitHandlerFunc(DoFieldSetter, "field-setter");
742     InitHandlerFunc(DoFieldGetter, "field-getter");
743     InitHandlerFunc(DoBooleanFieldSetter, "boolean-field-setter");
744     InitHandlerFunc(DoBooleanFieldGetter, "boolean-field-getter");
745 
746     /* init filters and functions                                          */
747     InitHdlrFuncsFromTable( GVarFuncs );
748 
749     return 0;
750 }
751 
752 
753 /****************************************************************************
754 **
755 *F  InitLibrary( <module> ) . . . . . . .  initialise library data structures
756 */
InitLibrary(StructInitInfo * module)757 static Int InitLibrary (
758     StructInitInfo *    module )
759 {
760     /* init filters and functions                                          */
761     InitGVarFuncsFromTable( GVarFuncs );
762 
763     /* return success                                                      */
764     return 0;
765 }
766 
767 
768 /****************************************************************************
769 **
770 *F  InitInfoIntFuncs() . . . . . . . . . . . . . . . . . . table of init functions
771 */
772 static StructInitInfo module = {
773     // init struct using C99 designated initializers; for a full list of
774     // fields, please refer to the definition of StructInitInfo
775     .type = MODULE_BUILTIN,
776     .name = "intfuncs",
777     .initKernel = InitKernel,
778     .initLibrary = InitLibrary,
779 };
780 
InitInfoIntFuncs(void)781 StructInitInfo * InitInfoIntFuncs ( void )
782 {
783     return &module;
784 }
785