/**************************************************************************** ** ** This file is part of GAP, a system for computational discrete algebra. ** ** Copyright of GAP belongs to its developers, whose names are too numerous ** to list here. Please refer to the COPYRIGHT file for details. ** ** SPDX-License-Identifier: GPL-2.0-or-later ** ** This file contains integer related functions which are independent of the ** large integer representation in use. See integer.c for other things. */ #include "intfuncs.h" #include "bool.h" #include "calls.h" #include "error.h" #include "integer.h" #include "lists.h" #include "modules.h" #include "plist.h" #include "precord.h" #include "records.h" #include "stringobj.h" /**************************************************************************** ** ** * * * * * * * "Mersenne twister" random numbers * * * * * * * * * * * * * ** ** Part of this code for fast generation of 32 bit pseudo random numbers with ** a period of length 2^19937-1 and a 623-dimensional equidistribution is ** taken from: ** http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html ** (Also look in Wikipedia for "Mersenne twister".) ** We use the file mt19937ar.c, version 2002/1/26. */ /**************************************************************************** ** *F InitRandomMT( ) ** ** Returns a string that can be used as data structure of a new MT random ** number generator. can be an arbitrary string as seed. */ #define MATRIX_A 0x9908b0dfUL /* constant vector a */ #define UPPER_MASK 0x80000000UL /* most significant w-r bits */ #define LOWER_MASK 0x7fffffffUL /* least significant r bits */ static void initGRMT(UInt4 * mt, UInt4 s) { UInt4 mti; mt[0]= s & 0xffffffffUL; for (mti=1; mti<624; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); mt[mti] &= 0xffffffffUL; } /* store mti as last entry of mt[] */ mt[624] = mti; } // Read s[pos], returning 0 if pos is past the error of the array static inline UChar checkedReadChar(const UChar * s, UInt4 pos, UInt4 len) { if (pos < len) return s[pos]; else return 0; } /* to read a seed string independently of endianness */ static inline UInt4 uint4frombytes(const UChar * s, UInt4 pos, UInt4 len) { UInt4 res; res = checkedReadChar(s, pos + 3, len); res <<= 8; res += checkedReadChar(s, pos + 2, len); res <<= 8; res += checkedReadChar(s, pos + 1, len); res <<= 8; res += checkedReadChar(s, pos + 0, len); return res; } static Obj FuncInitRandomMT(Obj self, Obj initstr) { Obj str; const UChar *init_key; UInt4 *mt, key_length, byte_key_length, i, j, k, N = 624; /* check the seed, given as string */ RequireStringRep("InitRandomMT", initstr); /* store array of 624 UInt4 and one UInt4 as counter "mti" and an endianness marker */ str = NEW_STRING(4*626); SET_LEN_STRING(str, 4*626); mt = (UInt4 *)(ADDR_OBJ(str) + 1); /* here the counter mti is set to 624 */ initGRMT(mt, 19650218UL); i=1; j=0; /* Do not set these up until all garbage collection is done */ init_key = CONST_CHARS_STRING(initstr); byte_key_length = GET_LEN_STRING(initstr); key_length = byte_key_length / 4; k = (N>key_length ? N : key_length); for (; k; k--) { mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) + uint4frombytes(init_key, 4 * j, byte_key_length) + j; mt[i] &= 0xffffffffUL; i++; j++; if (i>=N) { mt[0] = mt[N-1]; i=1; } if (4 * j >= byte_key_length) j=0; } for (k=N-1; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - i; mt[i] &= 0xffffffffUL; i++; if (i>=N) { mt[0] = mt[N-1]; i=1; } } mt[0] = 0x80000000UL; /* gives string "1234" in little endian as marker */ mt[625] = 875770417UL; return str; } /* internal, generates a random number on [0,0xffffffff]-interval ** argument is pointer to a string generated by InitRandomMT ** (the first 4*624 bytes are the random numbers, the last 4 bytes contain ** a counter) */ UInt4 nextrandMT_int32(UInt4* mt) { UInt4 mti, y, N=624, M=397; static UInt4 mag01[2]={0x0UL, MATRIX_A}; mti = mt[624]; if (mti >= N) { int kk; for (kk=0;kk> 1) ^ mag01[y & 0x1UL]; } for (;kk> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; mt[624] = mti; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } //----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. // Note - The x86 and x64 versions do _not_ produce the same results, as the // algorithms are optimized for their respective platforms. You can still // compile and run any of them on any platform, but your performance with the // non-native version will be less than optimal. //----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. /* Minor modifications to get it to compile in C rather than C++ and integrate with GAP SL*/ #define FORCE_INLINE static inline #ifndef SYS_IS_64_BIT //----------------------------------------------------------------------------- // Platform-specific functions and macros static inline uint32_t rotl32 ( uint32_t x, int8_t r ) { return (x << r) | (x >> (32 - r)); } #define ROTL32(x,y) rotl32(x,y) //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here FORCE_INLINE uint32_t getblock4 ( const uint32_t * p, int i ) { return p[i]; } //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche FORCE_INLINE uint32_t fmix4 ( uint32_t h ) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } //----------------------------------------------------------------------------- void MurmurHash3_x86_32 ( const void * key, int len, UInt4 seed, void * out ) { const uint8_t * data = (const uint8_t*)key; const int nblocks = len / 4; uint32_t h1 = seed; uint32_t c1 = 0xcc9e2d51; uint32_t c2 = 0x1b873593; //---------- // body const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); int i; for(i = -nblocks; i; i++) { uint32_t k1 = getblock4(blocks,i); k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; h1 = ROTL32(h1,13); h1 = h1*5+0xe6546b64; } //---------- // tail const uint8_t * tail = (const uint8_t*)(data + nblocks*4); uint32_t k1 = 0; switch(len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = ROTL32(k1,16); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h1 = fmix4(h1); *(uint32_t*)out = h1; } #else //----------------------------------------------------------------------------- // Platform-specific functions and macros static inline uint64_t rotl64 ( uint64_t x, int8_t r ) { return (x << r) | (x >> (64 - r)); } #define ROTL64(x,y) rotl64(x,y) #define BIG_CONSTANT(x) (x##LLU) //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here // // The pointer p may not be aligned, which means that directly reading it can // incur a major performance penalty or even trigger a segfault on certain // architectures (e.g. ARM, SPARC). Thus we use memcpy here, with the implicit // hope that on archs which don't need this, the compiler will optimize it back // into a direct copy (verified to happen with GCC and clang on x86_64) FORCE_INLINE uint64_t getblock8 ( const uint64_t * p, int i ) { uint64_t val; memcpy(&val, p + i, sizeof(uint64_t)); return val; } //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche FORCE_INLINE uint64_t fmix8 ( uint64_t k ) { k ^= k >> 33; k *= BIG_CONSTANT(0xff51afd7ed558ccd); k ^= k >> 33; k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } void MurmurHash3_x64_128 ( const void * key, const int len, const UInt4 seed, void * out ) { const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); //---------- // body const uint64_t * blocks = (const uint64_t *)key; int i; for(i = 0; i < nblocks; i++) { uint64_t k1 = getblock8(blocks,i*2+0); uint64_t k2 = getblock8(blocks,i*2+1); k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; } //---------- // tail const uint8_t * tail = (const uint8_t*)key + nblocks*16; uint64_t k1 = 0; uint64_t k2 = 0; switch(len & 15) { case 15: k2 ^= (uint64_t)(tail[14]) << 48; case 14: k2 ^= (uint64_t)(tail[13]) << 40; case 13: k2 ^= (uint64_t)(tail[12]) << 32; case 12: k2 ^= (uint64_t)(tail[11]) << 24; case 11: k2 ^= (uint64_t)(tail[10]) << 16; case 10: k2 ^= (uint64_t)(tail[ 9]) << 8; case 9: k2 ^= (uint64_t)(tail[ 8]) << 0; k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; case 8: k1 ^= (uint64_t)(tail[ 7]) << 56; case 7: k1 ^= (uint64_t)(tail[ 6]) << 48; case 6: k1 ^= (uint64_t)(tail[ 5]) << 40; case 5: k1 ^= (uint64_t)(tail[ 4]) << 32; case 4: k1 ^= (uint64_t)(tail[ 3]) << 24; case 3: k1 ^= (uint64_t)(tail[ 2]) << 16; case 2: k1 ^= (uint64_t)(tail[ 1]) << 8; case 1: k1 ^= (uint64_t)(tail[ 0]) << 0; k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = fmix8(h1); h2 = fmix8(h2); h1 += h2; h2 += h1; ((uint64_t*)out)[0] = h1; ((uint64_t*)out)[1] = h2; } #endif /**************************************************************************** ** *F FuncHASHKEY_BAG(,,,,) ** ** 'FuncHASHKEY_BAG' implements the internal function 'HASHKEY_BAG'. ** ** 'HASHKEY_BAG( , , , )' ** ** takes a non-immediate object and a small integer and computes a ** hash value for the contents of the bag from these. For this to be usable ** in algorithms, we need that objects of this kind are stored uniquely ** internally. ** The offset and the maximum number of bytes to process both count in ** bytes. The values passed to these parameters might depend on the word ** length of the computer. ** A value of -1 indicates infinity. */ static Obj FuncHASHKEY_BAG(Obj self, Obj obj, Obj seed, Obj offset, Obj maxlen) { Int n; if ( IS_INTOBJ(obj) ) return obj; if ( IS_FFE(obj) ) { /* We must be careful here, as different FFEs can represent equal values (e.g. 0*Z(2^2) and 0*Z(2) compare as equal). Thus, we cannot simply use the bit pattern of obj to compute a hash, as a well-defined hash function must satisfy the implication obj1 = obj2 => HASH(obj1) = HASH(obj2) There are different ways to do this for FFEs, with different trade-offs. Instead of making an arbitrary choice here, let's just refuse to compute a hash here, and require the caller to provide a custom hash function tailored to their needs. */ ErrorMayQuit("HASHKEY_BAG: must not be an FFE", 0, 0); } /* check the arguments */ Int s = GetSmallInt("HASHKEY_BAG", seed); Int offs = GetSmallInt("HASHKEY_BAG", offset); if (offs < 0 || offs > SIZE_OBJ(obj)) { ErrorMayQuit("HashKeyBag: must be non-negative and less than " "the bag size", 0, 0); } /* maximal number of bytes to read */ Int imaxlen = GetSmallInt("HASHKEY_BAG", maxlen); n=SIZE_OBJ(obj)-offs; if (n > imaxlen && imaxlen != -1) { n = imaxlen; } return INTOBJ_INT(HASHKEY_BAG_NC(obj, (UInt4)s, offs, (int)n)); } Int HASHKEY_MEM_NC(const void * ptr, UInt4 seed, Int read) { #ifdef SYS_IS_64_BIT UInt8 hashout[2]; MurmurHash3_x64_128(ptr, read, seed, (void *)hashout); return hashout[0] % (1UL << 60); #else UInt4 hashout; MurmurHash3_x86_32(ptr, read, seed, (void *)&hashout); return hashout % (1UL << 28); #endif } Int HASHKEY_BAG_NC(Obj obj, UInt4 seed, Int skip, int read) { return HASHKEY_MEM_NC((const UChar *)CONST_ADDR_OBJ(obj) + skip, seed, read); } Int HASHKEY_WHOLE_BAG_NC(Obj obj, UInt4 seed) { return HASHKEY_BAG_NC(obj, seed, 0, SIZE_OBJ(obj)); } /**************************************************************************** ** *F SmallInt Bitfield operations ** ** The goal here it to set up a division of the usable bits in a small ** integer into fields which can be accessed very quickly from GAP level and ** quickly and conveniently from C. The purpose is to allow implementation ** of data structures that divide up the bits within a word without having ** to make them entirely opaque to the GAP level or ridiculously slow. ** ** The API is defined in lib/bitfields.gd and works by providing the user ** with a collection of functions to get and set fields and assemble an ** entire word. ** ** These functions are constructed here and have special handlers. The ** information the handlers need about the size and position of the ** bitfields are stored in special fields added after the regular function ** bag fields, and are referred to as MASK_BITFIELD_FUNC and ** OFFSET_BITFIELD_FUNC. ** ** For fields of size 1 we also offer Boolean setters and getters which ** accept and return True for 1 and False for 0. This makes for much nicer ** code on the GAP side. */ typedef struct { FuncBag f; Obj mask; Obj offset; } BitfieldFuncBag; static inline const BitfieldFuncBag * CBFB(Obj func) { return (const BitfieldFuncBag *)CONST_ADDR_OBJ(func); } static inline BitfieldFuncBag * BFB(Obj func) { return (BitfieldFuncBag *)ADDR_OBJ(func); } static inline UInt MASK_BITFIELD_FUNC(Obj func) { GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION); GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag)); return UInt_ObjInt(CBFB(func)->mask); } static inline void SET_MASK_BITFIELD_FUNC(Obj func, UInt mask) { GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION); GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag)); BFB(func)->mask = ObjInt_UInt(mask); } static inline UInt OFFSET_BITFIELD_FUNC(Obj func) { GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION); GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag)); return UInt_ObjInt(CBFB(func)->offset); } static inline void SET_OFFFSET_BITFIELD_FUNC(Obj func, UInt offset) { GAP_ASSERT(TNUM_OBJ(func) == T_FUNCTION); GAP_ASSERT(SIZE_OBJ(func) == sizeof(BitfieldFuncBag)); BFB(func)->offset = ObjInt_UInt(offset); } static Obj DoFieldGetter(Obj self, Obj data) { UInt x = GetSmallInt("Field getter", data); UInt mask = MASK_BITFIELD_FUNC(self); UInt offset = OFFSET_BITFIELD_FUNC(self); return INTOBJ_INT((x & mask) >> offset); } static Obj DoFieldSetter(Obj self, Obj data, Obj val) { UInt x = GetSmallInt("Field Setter", data); UInt y = GetSmallInt("Field Setter", val); UInt mask = MASK_BITFIELD_FUNC(self); UInt offset = OFFSET_BITFIELD_FUNC(self); return INTOBJ_INT((x & ~mask) | (y << offset)); } static Obj DoBooleanFieldGetter(Obj self, Obj data) { UInt x = GetSmallInt("Boolean Field getter", data); UInt mask = MASK_BITFIELD_FUNC(self); return (x & mask) ? True : False; } static Obj DoBooleanFieldSetter(Obj self, Obj data, Obj val) { UInt x = GetSmallInt("Boolean Field Setter", data); RequireTrueOrFalse("Boolean Field Setter", val); UInt mask = MASK_BITFIELD_FUNC(self); if (val == True) x |= mask; else if (val == False) x &= ~mask; return INTOBJ_INT(x); } static Obj FuncBUILD_BITFIELDS(Obj self, Obj args) { GAP_ASSERT(IS_PLIST(args)); GAP_ASSERT(LEN_PLIST(args) >= 1 && ELM_PLIST(args, 1)); Obj widths = ELM_PLIST(args, 1); if (!IS_LIST(widths)) ErrorMayQuit("Fields builder: first argument must be list of widths", 0, 0); UInt nfields = LEN_LIST(widths); if (LEN_PLIST(args) != nfields + 1) ErrorMayQuit( "Fields builder: number of values must match number of widths", 0, 0); UInt x = 0; UInt i; for (i = nfields; i > 0; i--) { GAP_ASSERT(ISB_LIST(widths, i)); Obj y = ELM_LIST(widths, i); x <<= INT_INTOBJ(y); GAP_ASSERT(ELM_PLIST(args, i + 1)); Obj z = ELM_PLIST(args, i + 1); if (!IS_INTOBJ(z)) ErrorMayQuit("Fields builder: values must be small integers", 0, 0); GAP_ASSERT(INT_INTOBJ(z) < (1 << INT_INTOBJ(y))); x |= INT_INTOBJ(z); } return INTOBJ_INT(x); } static Obj FuncMAKE_BITFIELDS(Obj self, Obj widths) { if (!IS_LIST(widths)) ErrorMayQuit("MAKE_BITFIELDS: widths must be a list", 0, 0); UInt nfields = LEN_LIST(widths); UInt starts[nfields + 1]; starts[0] = 0; for (UInt i = 1; i <= nfields; i++) { Obj o = ELM_LIST(widths, i); if (!IS_INTOBJ(o)) ErrorMayQuit("MAKE_BITFIELDS: widths must be small integers", 0, 0); UInt width = INT_INTOBJ(o); starts[i] = starts[i - 1] + width; } if (starts[nfields] > 8 * sizeof(UInt)) ErrorMayQuit("MAKE_BITFIELDS: total widths too large", 0, 0); Obj nameSetter = MakeImmString(""); Obj nameGetter = MakeImmString(""); Obj nameBSetter = MakeImmString(""); Obj nameBGetter = MakeImmString(""); Obj dataArgs = ArgStringToList("data"); Obj dataValArgs = ArgStringToList("data, val"); Obj setters = NEW_PLIST_IMM(T_PLIST_DENSE, nfields); Obj getters = NEW_PLIST_IMM(T_PLIST_DENSE, nfields); Obj bsetters = NEW_PLIST_IMM(T_PLIST, nfields); UInt bslen = 0; Obj bgetters = NEW_PLIST_IMM(T_PLIST, nfields); for (UInt i = 1; i <= nfields; i++) { UInt mask = (1L << starts[i]) - (1L << starts[i - 1]); Obj s = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameSetter, 2, dataValArgs, DoFieldSetter); SET_MASK_BITFIELD_FUNC(s, mask); SET_OFFFSET_BITFIELD_FUNC(s, starts[i - 1]); SET_ELM_PLIST(setters, i, s); CHANGED_BAG(setters); Obj g = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameGetter, 1, dataArgs, DoFieldGetter); SET_MASK_BITFIELD_FUNC(g, mask); SET_OFFFSET_BITFIELD_FUNC(g, starts[i - 1]); SET_ELM_PLIST(getters, i, g); CHANGED_BAG(getters); if (starts[i] - starts[i - 1] == 1) { s = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameBSetter, 2, dataValArgs, DoBooleanFieldSetter); SET_MASK_BITFIELD_FUNC(s, mask); SET_OFFFSET_BITFIELD_FUNC(s, starts[i - 1]); SET_ELM_PLIST(bsetters, i, s); CHANGED_BAG(bsetters); bslen = i; g = NewFunctionT(T_FUNCTION, sizeof(BitfieldFuncBag), nameBGetter, 1, dataArgs, DoBooleanFieldGetter); SET_MASK_BITFIELD_FUNC(g, mask); SET_OFFFSET_BITFIELD_FUNC(g, starts[i - 1]); SET_ELM_PLIST(bgetters, i, g); CHANGED_BAG(bgetters); } } SET_LEN_PLIST(setters, nfields); SET_LEN_PLIST(getters, nfields); SET_LEN_PLIST(bsetters, bslen); SET_LEN_PLIST(bgetters, bslen); Obj ms = NEW_PREC(5); AssPRec(ms, RNamName("widths"), CopyObj(widths, 0)); AssPRec(ms, RNamName("getters"), getters); AssPRec(ms, RNamName("setters"), setters); if (bslen > 0) { AssPRec(ms, RNamName("booleanGetters"), bgetters); AssPRec(ms, RNamName("booleanSetters"), bsetters); } SortPRecRNam(ms, 0); MakeImmutableNoRecurse(ms); return ms; } /**************************************************************************** ** *F * * * * * * * * * * * * * initialize module * * * * * * * * * * * * * * * */ /**************************************************************************** ** *V GVarFuncs . . . . . . . . . . . . . . . . . . list of functions to export */ static StructGVarFunc GVarFuncs[] = { GVAR_FUNC(HASHKEY_BAG, 4, "obj, seed, offset, maxlen"), GVAR_FUNC(InitRandomMT, 1, "initstr"), GVAR_FUNC(MAKE_BITFIELDS, -1, "widths"), GVAR_FUNC(BUILD_BITFIELDS, -2, "widths, vals"), { 0, 0, 0, 0, 0 } }; /**************************************************************************** ** *F InitKernel( ) . . . . . . . . initialise kernel data structures */ static Int InitKernel ( StructInitInfo * module ) { InitHandlerFunc(DoFieldSetter, "field-setter"); InitHandlerFunc(DoFieldGetter, "field-getter"); InitHandlerFunc(DoBooleanFieldSetter, "boolean-field-setter"); InitHandlerFunc(DoBooleanFieldGetter, "boolean-field-getter"); /* init filters and functions */ InitHdlrFuncsFromTable( GVarFuncs ); return 0; } /**************************************************************************** ** *F InitLibrary( ) . . . . . . . initialise library data structures */ static Int InitLibrary ( StructInitInfo * module ) { /* init filters and functions */ InitGVarFuncsFromTable( GVarFuncs ); /* return success */ return 0; } /**************************************************************************** ** *F InitInfoIntFuncs() . . . . . . . . . . . . . . . . . . table of init functions */ static StructInitInfo module = { // init struct using C99 designated initializers; for a full list of // fields, please refer to the definition of StructInitInfo .type = MODULE_BUILTIN, .name = "intfuncs", .initKernel = InitKernel, .initLibrary = InitLibrary, }; StructInitInfo * InitInfoIntFuncs ( void ) { return &module; }