1 /*  Part of SWI-Prolog
2 
3     Author:        Jan Wielemaker
4     E-mail:        J.Wielemaker@vu.nl
5     WWW:           http://www.swi-prolog.org
6     Copyright (c)  2008-2020, University of Amsterdam
7 			      CWI, Amsterdam
8     All rights reserved.
9 
10     Redistribution and use in source and binary forms, with or without
11     modification, are permitted provided that the following conditions
12     are met:
13 
14     1. Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16 
17     2. Redistributions in binary form must reproduce the above copyright
18        notice, this list of conditions and the following disclaimer in
19        the documentation and/or other materials provided with the
20        distribution.
21 
22     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25     FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26     COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28     BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31     LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32     ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33     POSSIBILITY OF SUCH DAMAGE.
34 */
35 
36 #ifndef PL_INLINE_H_INCLUDED
37 #define PL_INLINE_H_INCLUDED
38 #undef LD
39 #define LD LOCAL_LD
40 
41 #ifdef __WINDOWS__
42   #include <windows.h>
43   #undef small
44   #include <intrin.h>
45   #ifdef _MSC_VER
46     #if SIZEOF_VOIDP == 8
47       #pragma intrinsic(_BitScanReverse64)
48     #endif
49     #pragma intrinsic(_BitScanReverse)
50   #endif
51 #endif
52 
53 		 /*******************************
54 		 *	 LOCK-FREE SUPPORT	*
55 		 *******************************/
56 
57 #ifdef _MSC_VER
58 #define LL(x) x ## i64
59 #else
60 #define LL(x) x ## LL
61 #endif
62 
63 #ifdef _MSC_VER				/* Windows MSVC version */
64 
65 /* MSB(0) = undefined
66    MSB(1) = 0
67    MSB(2) = 1
68    ...
69 */
70 
71 #define HAVE_MSB 1
72 static inline int
MSB(size_t i)73 MSB(size_t i)
74 { unsigned long index;
75 #if SIZEOF_VOIDP == 8
76   unsigned __int64 mask = i;
77   _BitScanReverse64(&index, mask);
78 #else
79   unsigned long mask = i;
80   _BitScanReverse(&index, mask);
81 #endif
82 
83   return index;
84 }
85 
86 #if SIZEOF_VOIDP == 8
87 #define HAVE_MSB64 1
88 static inline int
MSB64(int64_t i)89 MSB64(int64_t i)
90 { unsigned long index;
91   _BitScanReverse64(&index, i);
92   return index;
93 }
94 #endif
95 
96 #define MEMORY_ACQUIRE() MemoryBarrier()
97 #define MEMORY_RELEASE() MemoryBarrier()
98 #define MEMORY_BARRIER() MemoryBarrier()
99 
100 static inline size_t
__builtin_popcount(size_t sz)101 __builtin_popcount(size_t sz)
102 {
103 #if SIZEOF_VOIDP == 4
104   return __popcnt(sz);
105 #else
106   return __popcnt64(sz);
107 #endif
108 }
109 
110 #endif /*_MSC_VER*/
111 
112 #if !defined(HAVE_MSB) && defined(HAVE__BUILTIN_CLZ)
113 #if SIZEOF_VOIDP == SIZEOF_LONG
114 #define MSB(i) ((int)sizeof(long)*8-1-__builtin_clzl(i)) /* GCC builtin */
115 #define HAVE_MSB 1
116 #elif SIZEOF_VOIDP == SIZEOF_LONG_LONG
117 #define MSB(i) ((int)sizeof(long long)*8-1-__builtin_clzll(i)) /* GCC builtin */
118 #define HAVE_MSB 1
119 #endif
120 #define HAVE_MSB64 1
121 #define MSB64(i) ((int)sizeof(long long)*8-1-__builtin_clzll(i))
122 #endif
123 
124 #ifdef HAVE_GCC_ATOMIC
125 #define MEMORY_ACQUIRE()	__atomic_thread_fence(__ATOMIC_ACQUIRE)
126 #define MEMORY_RELEASE()	__atomic_thread_fence(__ATOMIC_RELEASE)
127 #define MEMORY_BARRIER()	__atomic_thread_fence(__ATOMIC_SEQ_CST)
128 #endif
129 
130 #ifdef O_PLMT
131 #define ATOMIC_ADD(ptr, v)	__atomic_add_fetch(ptr, v, __ATOMIC_SEQ_CST)
132 #define ATOMIC_SUB(ptr, v)	__atomic_sub_fetch(ptr, v, __ATOMIC_SEQ_CST)
133 #define ATOMIC_INC(ptr)		ATOMIC_ADD(ptr, 1) /* ++(*ptr) */
134 #define ATOMIC_DEC(ptr)		ATOMIC_SUB(ptr, 1) /* --(*ptr) */
135 #define ATOMIC_OR(ptr, v)	__atomic_fetch_or(ptr, v, __ATOMIC_SEQ_CST)
136 #define ATOMIC_AND(ptr, v)	__atomic_fetch_and(ptr, v, __ATOMIC_SEQ_CST)
137 
138 #define __COMPARE_AND_SWAP(at, from, to) \
139 	__atomic_compare_exchange_n(at, &(from), to, FALSE, \
140 				    __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
141 
142 static inline int
COMPARE_AND_SWAP_PTR(void * at,void * from,void * to)143 COMPARE_AND_SWAP_PTR(void *at, void *from, void *to)
144 { void **ptr = at;
145 
146   return __COMPARE_AND_SWAP(ptr, from, to);
147 }
148 
149 static inline int
COMPARE_AND_SWAP_INT64(int64_t * at,int64_t from,int64_t to)150 COMPARE_AND_SWAP_INT64(int64_t *at, int64_t from, int64_t to)
151 { return __COMPARE_AND_SWAP(at, from, to);
152 }
153 
154 static inline int
COMPARE_AND_SWAP_UINT64(uint64_t * at,uint64_t from,uint64_t to)155 COMPARE_AND_SWAP_UINT64(uint64_t *at, uint64_t from, uint64_t to)
156 { return __COMPARE_AND_SWAP(at, from, to);
157 }
158 
159 static inline int
COMPARE_AND_SWAP_INT(int * at,int from,int to)160 COMPARE_AND_SWAP_INT(int *at, int from, int to)
161 { return __COMPARE_AND_SWAP(at, from, to);
162 }
163 
164 static inline int
COMPARE_AND_SWAP_UINT(unsigned int * at,unsigned int from,unsigned int to)165 COMPARE_AND_SWAP_UINT(unsigned int *at, unsigned int from, unsigned int to)
166 { return __COMPARE_AND_SWAP(at, from, to);
167 }
168 
169 static inline int
COMPARE_AND_SWAP_SIZE(size_t * at,size_t from,size_t to)170 COMPARE_AND_SWAP_SIZE(size_t *at, size_t from, size_t to)
171 { return __COMPARE_AND_SWAP(at, from, to);
172 }
173 
174 static inline int
COMPARE_AND_SWAP_WORD(word * at,word from,word to)175 COMPARE_AND_SWAP_WORD(word *at, word from, word to)
176 { return __COMPARE_AND_SWAP(at, from, to);
177 }
178 
179 #else
180 #define ATOMIC_ADD(ptr, v)		(*ptr += v)
181 #define ATOMIC_SUB(ptr, v)		(*ptr -= v)
182 #define ATOMIC_INC(ptr)			(++(*ptr))
183 #define ATOMIC_DEC(ptr)			(--(*ptr))
184 #define ATOMIC_OR(ptr, v)		(*ptr |= v)
185 #define ATOMIC_AND(ptr, v)		(*ptr &= v)
186 #define COMPARE_AND_SWAP(ptr,o,n)	(*ptr == o ? (*ptr = n), 1 : 0)
187 #define COMPARE_AND_SWAP_PTR(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
188 #define COMPARE_AND_SWAP_INT64(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
189 #define COMPARE_AND_SWAP_UINT64(ptr,o,n) COMPARE_AND_SWAP(ptr,o,n)
190 #define COMPARE_AND_SWAP_INT(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
191 #define COMPARE_AND_SWAP_UINT(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
192 #define COMPARE_AND_SWAP_SIZE(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
193 #define COMPARE_AND_SWAP_WORD(ptr,o,n)	COMPARE_AND_SWAP(ptr,o,n)
194 #endif
195 
196 #ifndef HAVE_MSB
197 #define HAVE_MSB 1
198 static inline int
MSB(size_t i)199 MSB(size_t i)
200 { int j = 0;
201 
202 #if SIZEOF_VOIDP == 8
203   if (i >= 0x100000000) {i >>= 32; j += 32;}
204 #endif
205   if (i >=     0x10000) {i >>= 16; j += 16;}
206   if (i >=       0x100) {i >>=  8; j +=  8;}
207   if (i >=        0x10) {i >>=  4; j +=  4;}
208   if (i >=         0x4) {i >>=  2; j +=  2;}
209   if (i >=         0x2) j++;
210 
211   return j;
212 }
213 #endif
214 
215 
216 #ifndef HAVE_MSB64
217 #define HAVE_MSB64 1
218 static inline int
MSB64(int64_t i)219 MSB64(int64_t i)
220 { int j = 0;
221 
222   if (i >= LL(0x100000000)) {i >>= 32; j += 32;}
223   if (i >=     LL(0x10000)) {i >>= 16; j += 16;}
224   if (i >=       LL(0x100)) {i >>=  8; j +=  8;}
225   if (i >=	  LL(0x10)) {i >>=  4; j +=  4;}
226   if (i >=         LL(0x4)) {i >>=  2; j +=  2;}
227   if (i >=         LL(0x2)) j++;
228 
229   return j;
230 }
231 #endif
232 
233 
234 #ifndef MEMORY_BARRIER
235 #define MEMORY_BARRIER() (void)0
236 #define MEMORY_ACQUIRE() (void)0
237 #define MEMORY_RELEASE() (void)0
238 #endif
239 
240 		 /*******************************
241 		 *	 ATOMS/FUNCTORS		*
242 		 *******************************/
243 
244 static inline void
initAtoms(void)245 initAtoms(void)
246 { if ( !likely(GD->atoms.initialised) )
247     do_init_atoms();
248 }
249 
250 static inline Atom
fetchAtomArray(size_t index)251 fetchAtomArray(size_t index)
252 { int idx = MSB(index);
253 
254   return &GD->atoms.array.blocks[idx][index];
255 }
256 
257 
258 static inline FunctorDef
fetchFunctorArray(size_t index)259 fetchFunctorArray(size_t index)
260 { int idx = MSB(index);
261 
262   return GD->functors.array.blocks[idx][index];
263 }
264 
265 static inline void
pushVolatileAtom__LD(atom_t a ARG_LD)266 pushVolatileAtom__LD(atom_t a ARG_LD)
267 { LD->atoms.unregistering = a;
268   if ( GD->atoms.gc_active )
269     markAtom(a);
270 }
271 
272 #define pushVolatileAtom(a) pushVolatileAtom__LD(a PASS_LD)
273 
274 
275 		 /*******************************
276 		 *	     BITVECTOR		*
277 		 *******************************/
278 
279 typedef unsigned int bitv_chunk;
280 typedef struct bit_vector
281 { size_t size;
282   bitv_chunk chunk[1];				/* bits */
283 } bit_vector;
284 #define BITSPERE (sizeof(bitv_chunk)*8)
285 
286 #ifndef offset
287 #define offset(s, f) ((size_t)(&((struct s *)NULL)->f))
288 #endif
289 
290 static inline size_t
sizeof_bitvector(size_t bits)291 sizeof_bitvector(size_t bits)
292 { return offset(bit_vector, chunk[(bits+BITSPERE-1)/BITSPERE]);
293 }
294 
295 static inline void
init_bitvector(bit_vector * v,size_t bits)296 init_bitvector(bit_vector *v, size_t bits)
297 { size_t bytes = offset(bit_vector, chunk[(bits+BITSPERE-1)/BITSPERE]);
298 
299   memset(v, 0, bytes);
300   v->size = bits;
301 }
302 
303 static inline bit_vector *
new_bitvector(size_t size)304 new_bitvector(size_t size)
305 { size_t bytes = offset(bit_vector, chunk[(size+BITSPERE-1)/BITSPERE]);
306   bit_vector *v = allocHeapOrHalt(bytes);
307 
308   memset(v, 0, bytes);
309   v->size = size;
310   return v;
311 }
312 
313 static inline void
free_bitvector(bit_vector * v)314 free_bitvector(bit_vector *v)
315 { size_t bytes = offset(bit_vector, chunk[(v->size+BITSPERE-1)/BITSPERE]);
316 
317   freeHeap(v, bytes);
318 }
319 
320 static inline void
clear_bitvector(bit_vector * v)321 clear_bitvector(bit_vector *v)
322 { size_t chunks = (v->size+BITSPERE-1)/BITSPERE;
323 
324   memset(v->chunk, 0, chunks*sizeof(bitv_chunk));
325 }
326 
327 static inline void
setall_bitvector(bit_vector * v)328 setall_bitvector(bit_vector *v)
329 { size_t chunks = (v->size+BITSPERE-1)/BITSPERE;
330 
331   memset(v->chunk, 0xff, chunks*sizeof(bitv_chunk));
332 }
333 
334 static inline void
set_bit(bit_vector * v,size_t which)335 set_bit(bit_vector *v, size_t which)
336 { size_t e = which/BITSPERE;
337   size_t b = which%BITSPERE;
338 
339   v->chunk[e] |= ((bitv_chunk)1<<b);
340 }
341 
342 static inline void
clear_bit(bit_vector * v,size_t which)343 clear_bit(bit_vector *v, size_t which)
344 { size_t e = which/BITSPERE;
345   size_t b = which%BITSPERE;
346 
347   v->chunk[e] &= ~((bitv_chunk)1<<b);
348 }
349 
350 static inline int
true_bit(bit_vector * v,size_t which)351 true_bit(bit_vector *v, size_t which)
352 { size_t e = which/BITSPERE;
353   size_t b = which%BITSPERE;
354 
355   return (v->chunk[e]&((bitv_chunk)1<<b)) != 0;
356 }
357 
358 static inline size_t
popcount_bitvector(const bit_vector * v)359 popcount_bitvector(const bit_vector *v)
360 { const bitv_chunk *p = v->chunk;
361   int cnt = (int)(v->size+BITSPERE-1)/BITSPERE;
362   size_t bits = 0;
363 
364   while( cnt-- > 0 )
365     bits += __builtin_popcount(*p++);
366 
367   return bits;
368 }
369 
370 
371 		 /*******************************
372 		 *	     MISC STUFF		*
373 		 *******************************/
374 
375 static int	  same_type_numbers(Number n1, Number n2) WUNUSED;
376 static Definition lookupDefinition(functor_t f, Module m) WUNUSED;
377 
378 static inline int
same_type_numbers(Number n1,Number n2)379 same_type_numbers(Number n1, Number n2)
380 { if ( n1->type == n2->type )
381     return TRUE;
382   return make_same_type_numbers(n1, n2);
383 }
384 
385 
386 static inline Definition
lookupDefinition(functor_t f,Module m)387 lookupDefinition(functor_t f, Module m)
388 { Procedure proc = lookupProcedure(f, m);
389 
390   return proc ? proc->definition : NULL;
391 }
392 
393 
394 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
395 Mark() sets LD->mark_bar, indicating  that   any  assignment  above this
396 value need not be trailed.
397 
398 Note that the local stack is always _above_ the global stack.
399 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
400 
401 static inline void
Trail__LD(Word p,word v ARG_LD)402 Trail__LD(Word p, word v ARG_LD)
403 { DEBUG(CHK_SECURE, assert(tTop+1 <= tMax));
404 
405   if ( (void*)p >= (void*)lBase || p < LD->mark_bar )
406     (tTop++)->address = p;
407   *p = v;
408 }
409 
410 
411 static inline void
bindConst__LD(Word p,word c ARG_LD)412 bindConst__LD(Word p, word c ARG_LD)
413 { DEBUG(0, assert(hasGlobalSpace(0)));
414 
415 #ifdef O_ATTVAR
416   if ( isVar(*p) )
417   { *p = (c);
418     if ( (void*)p >= (void*)lBase || p < LD->mark_bar )
419       (tTop++)->address = p;
420   } else
421   { assignAttVar(p, &(c) PASS_LD);
422   }
423 #else
424   *p = (c);
425   if ( (void*)p >= (void*)lBase || p < LD->mark_bar )
426     (tTop++)->address = p;
427 #endif
428 }
429 
430 
431 static inline word
consPtr__LD(void * p,word ts ARG_LD)432 consPtr__LD(void *p, word ts ARG_LD)
433 { uintptr_t v = (uintptr_t) p;
434 
435   v -= LD->bases[ts&STG_MASK];
436   DEBUG(CHK_SECURE, assert(v < MAXTAGGEDPTR && !(v&0x3)));
437   return (v<<5)|ts;
438 }
439 
440 
441 #if ALIGNOF_DOUBLE != ALIGNOF_VOIDP
442 static inline double
valFloat__LD(word w ARG_LD)443 valFloat__LD(word w ARG_LD)
444 { Word p = valIndirectP(w);
445   double d;
446 
447   memcpy(&d, p, sizeof(d));
448   return d;
449 }
450 #endif
451 
452 static inline int
is_signalled(ARG1_LD)453 is_signalled(ARG1_LD)
454 { return HAS_LD && unlikely((LD->signal.pending[0]|LD->signal.pending[1]) != 0);
455 }
456 
457 static inline void
register_attvar(Word gp ARG_LD)458 register_attvar(Word gp ARG_LD)
459 { if ( LD->attvar.attvars )
460   { *gp = makeRefG(LD->attvar.attvars);
461     DEBUG(MSG_ATTVAR_LINK,
462 	  Sdprintf("Linking %p -> %p\n", gp, LD->attvar.attvars));
463   } else
464   { DEBUG(MSG_ATTVAR_LINK,
465 	  Sdprintf("Attvar chain head at %p\n", gp));
466     setVar(*gp);
467   }
468 
469   LD->attvar.attvars = gp;
470 }
471 
472 static inline int
visibleClause__LD(Clause cl,gen_t gen ARG_LD)473 visibleClause__LD(Clause cl, gen_t gen ARG_LD)
474 { return VISIBLE_CLAUSE(cl, gen);
475 }
476 
477 static inline int
visibleClauseCNT__LD(Clause cl,gen_t gen ARG_LD)478 visibleClauseCNT__LD(Clause cl, gen_t gen ARG_LD)
479 { if ( likely(visibleClause__LD(cl, gen PASS_LD)) )
480     return TRUE;
481   LD->clauses.erased_skipped++;
482   return FALSE;
483 }
484 
485 #ifdef ATOMIC_GENERATION_HACK
486 /* Work around lacking 64-bit atomic operations.  These are designed to
487    be safe if we assume that read and increment complete before other
488    threads incremented 4G generations.
489 */
490 
491 static inline gen_t
global_generation(void)492 global_generation(void)
493 { gen_t g;
494   gen_t last;
495 
496   do
497   { last = GD->_last_generation;
498     g = (gen_t)GD->_generation.gen_u<<32 | GD->_generation.gen_l;
499   } while ( unlikely(g < last) );
500 
501   if ( unlikely(last != g) )
502     GD->_last_generation = g;
503 
504   return g;
505 }
506 
507 static inline gen_t
next_global_generation(void)508 next_global_generation(void)
509 { uint32_t u = GD->_generation.gen_u;
510   uint32_t l;
511 
512   if ( unlikely((l=ATOMIC_INC(&GD->_generation.gen_l)) == 0) )
513     u = ATOMIC_INC(&GD->_generation.gen_u);
514 
515   return (gen_t)u<<32|l;
516 }
517 
518 #else /*ATOMIC_GENERATION_HACK*/
519 
520 static inline gen_t
global_generation(void)521 global_generation(void)
522 { return GD->_generation;
523 }
524 
525 static inline gen_t
next_global_generation(void)526 next_global_generation(void)
527 { return ATOMIC_INC(&GD->_generation);
528 }
529 
530 #endif /*ATOMIC_GENERATION_HACK*/
531 
532 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
533 We must ensure that cleanDefinition() does   not remove clauses that are
534 valid   for   the   generation   in   the   frame.   This   means   that
535 pl_garbage_collect_clauses() must either pick  up   the  generation from
536 this frame using markPredicatesInEnvironments() or  the start generation
537 of pl_garbage_collect_clauses() is older than  what   is  stored in this
538 frame.  This  loop  ensure  that  if    CGC  has  been  running  between
539 global_generation()  and  storing  the  generation  in  our  frame,  our
540 generation is updated and thus no harm is done.
541 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
542 
543 static inline void
setGenerationFrame__LD(LocalFrame fr ARG_LD)544 setGenerationFrame__LD(LocalFrame fr ARG_LD)
545 {
546 #ifdef O_LOGICAL_UPDATE
547   gen_t gen;
548 
549   do
550   { gen = global_generation();
551     setGenerationFrameVal(fr, gen);
552   } while(gen != global_generation());
553 #endif
554 }
555 
556 static inline int
ensureLocalSpace__LD(size_t bytes ARG_LD)557 ensureLocalSpace__LD(size_t bytes ARG_LD)
558 { int rc;
559 
560   if ( likely(addPointer(lTop, bytes) <= (void*)lMax) )
561     return TRUE;
562 
563   if ( (rc=growLocalSpace__LD(bytes, ALLOW_SHIFT PASS_LD)) == TRUE )
564     return TRUE;
565 
566   return raiseStackOverflow(rc);
567 }
568 
569 static inline int
ensureStackSpace__LD(size_t gcells,size_t tcells,int flags ARG_LD)570 ensureStackSpace__LD(size_t gcells, size_t tcells, int flags ARG_LD)
571 { gcells += BIND_GLOBAL_SPACE;
572   tcells += BIND_TRAIL_SPACE;
573 
574   if ( likely(gTop+gcells <= gMax) && likely(tTop+tcells <= tMax) )
575     return TRUE;
576 
577   return f_ensureStackSpace__LD(gcells, tcells, flags PASS_LD);
578 }
579 
580 
581 		 /*******************************
582 		 *	      THREADS		*
583 		 *******************************/
584 
585 #ifdef O_PLMT
586 static inline PL_local_data_t *
acquire_ldata__LD(PL_thread_info_t * info ARG_LD)587 acquire_ldata__LD(PL_thread_info_t *info ARG_LD)
588 { PL_local_data_t *ld = info->thread_data;
589   LD->thread.info->access.ldata = ld;
590   if ( ld && ld->magic == LD_MAGIC )
591     return ld;
592   LD->thread.info->access.ldata = NULL;
593   return NULL;
594 }
595 #endif
596 
597 
598 		 /*******************************
599 		 *     POINTER <-> PROLOG INT	*
600 		 *******************************/
601 
602 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
603 Pointers are not a special type in Prolog. Instead, they are represented
604 by an integer. The funtions below convert   integers  such that they can
605 normally be expressed as a tagged  integer: the heap_base is subtracted,
606 it is divided by 4 and the low 2   bits  are placed at the top (they are
607 normally 0). longToPointer() does the inverse operation.
608 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
609 
610 static inline uintptr_t
pointerToInt(void * ptr)611 pointerToInt(void *ptr)
612 {
613 #if SIZEOF_VOIDP == 8
614   return (uintptr_t) ptr;
615 #else
616   uintptr_t p   = (uintptr_t) ptr;
617   uintptr_t low = p & 0x3L;
618 
619   p >>= 2;
620   p |= low<<(sizeof(uintptr_t)*8-2);
621 
622   return p;
623 #endif
624 }
625 
626 
627 static inline void *
intToPointer(uintptr_t p)628 intToPointer(uintptr_t p)
629 {
630 #if SIZEOF_VOIDP == 8
631   return (void*) p;
632 #else
633   uintptr_t low = p >> (sizeof(uintptr_t)*8-2);
634 
635   p <<= 2;
636   p |= low;
637 
638   return (void *) p;
639 #endif
640 }
641 
642 #endif /*PL_INLINE_H_INCLUDED*/
643