1 /* alloc.c
2 * Copyright 1984-2017 Cisco Systems, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "system.h"
18 #include "popcount.h"
19
20 /* locally defined functions */
21 static void maybe_queue_fire_collector(thread_gc *tgc);
22
S_alloc_init()23 void S_alloc_init() {
24 ISPC s; IGEN g; UINT i;
25
26 if (S_boot_time) {
27 ptr tc = TO_PTR(S_G.thread_context);
28
29 GCDATA(tc) = TO_PTR(&S_G.main_thread_gc);
30 S_G.main_thread_gc.tc = tc;
31
32 /* reset the allocation tables */
33 for (g = 0; g <= static_generation; g++) {
34 S_G.bytes_of_generation[g] = 0;
35 for (s = 0; s <= max_real_space; s++) {
36 S_G.main_thread_gc.base_loc[g][s] = FIX(0);
37 S_G.main_thread_gc.next_loc[g][s] = FIX(0);
38 S_G.main_thread_gc.bytes_left[g][s] = 0;
39 S_G.main_thread_gc.sweep_next[g][s] = NULL;
40 S_G.bytes_of_space[g][s] = 0;
41 }
42 }
43
44 /* initialize the dirty-segment lists. */
45 for (i = 0; i < DIRTY_SEGMENT_LISTS; i += 1) {
46 S_G.dirty_segments[i] = NULL;
47 }
48
49 S_G.collect_trip_bytes = default_collect_trip_bytes;
50 S_G.g0_bytes_after_last_gc = 0;
51
52 /* set to final value in prim.c when known */
53 S_protect(&S_G.nonprocedure_code);
54 S_G.nonprocedure_code = FIX(0);
55
56 S_protect(&S_G.null_vector);
57 find_room(tc, space_new, 0, type_typed_object, size_vector(0), S_G.null_vector);
58 VECTTYPE(S_G.null_vector) = (0 << vector_length_offset) | type_vector;
59
60 S_protect(&S_G.null_fxvector);
61 find_room(tc, space_new, 0, type_typed_object, size_fxvector(0), S_G.null_fxvector);
62 FXVECTOR_TYPE(S_G.null_fxvector) = (0 << fxvector_length_offset) | type_fxvector;
63
64 S_protect(&S_G.null_flvector);
65 find_room(tc, space_new, 0, type_typed_object, size_flvector(0), S_G.null_flvector);
66 FXVECTOR_TYPE(S_G.null_flvector) = (0 << flvector_length_offset) | type_flvector;
67
68 S_protect(&S_G.null_bytevector);
69 find_room(tc, space_new, 0, type_typed_object, size_bytevector(0), S_G.null_bytevector);
70 BYTEVECTOR_TYPE(S_G.null_bytevector) = (0 << bytevector_length_offset) | type_bytevector;
71
72 S_protect(&S_G.null_string);
73 find_room(tc, space_new, 0, type_typed_object, size_string(0), S_G.null_string);
74 STRTYPE(S_G.null_string) = (0 << string_length_offset) | type_string;
75
76 S_protect(&S_G.null_immutable_vector);
77 find_room(tc, space_new, 0, type_typed_object, size_vector(0), S_G.null_immutable_vector);
78 VECTTYPE(S_G.null_immutable_vector) = (0 << vector_length_offset) | type_vector | vector_immutable_flag;
79
80 S_protect(&S_G.null_immutable_bytevector);
81 find_room(tc, space_new, 0, type_typed_object, size_bytevector(0), S_G.null_immutable_bytevector);
82 BYTEVECTOR_TYPE(S_G.null_immutable_bytevector) = (0 << bytevector_length_offset) | type_bytevector | bytevector_immutable_flag;
83
84 S_protect(&S_G.null_immutable_string);
85 find_room(tc, space_new, 0, type_typed_object, size_string(0), S_G.null_immutable_string);
86 STRTYPE(S_G.null_immutable_string) = (0 << string_length_offset) | type_string | string_immutable_flag;
87
88 S_protect(&S_G.zero_length_bignum);
89 S_G.zero_length_bignum = S_bignum(tc, 0, 0);
90 }
91 }
92
S_protect(p)93 void S_protect(p) ptr *p; {
94 if (S_G.protect_next > max_protected)
95 S_error_abort("max_protected constant too small");
96 *p = snil;
97 S_G.protected[S_G.protect_next++] = p;
98 }
99
S_reset_scheme_stack(tc,n)100 void S_reset_scheme_stack(tc, n) ptr tc; iptr n; {
101 ptr *x; iptr m;
102
103 /* we allow less than one_shot_headroom here for no truly justifyable
104 reason */
105 n = ptr_align(n + (one_shot_headroom >> 1));
106
107 x = &STACKCACHE(tc);
108 for (;;) {
109 if (*x == snil) {
110 if (n < default_stack_size) n = default_stack_size;
111 /* stacks are untyped objects */
112 find_room(tc, space_new, 0, type_untyped, n, SCHEMESTACK(tc));
113 break;
114 }
115 if ((m = CACHEDSTACKSIZE(*x)) >= n) {
116 n = m;
117 SCHEMESTACK(tc) = *x;
118 /* if we decide to leave KEEPSMALLPUPPIES undefined permanently, we should
119 rewrite this code to remove the indirect on x */
120 /* #define KEEPSMALLPUPPIES */
121 #ifdef KEEPSMALLPUPPIES
122 *x = CACHEDSTACKLINK(*x);
123 #else
124 STACKCACHE(tc) = CACHEDSTACKLINK(*x);
125 #endif
126 break;
127 }
128 x = &CACHEDSTACKLINK(*x);
129 }
130 SCHEMESTACKSIZE(tc) = n;
131 ESP(tc) = (ptr)((uptr)SCHEMESTACK(tc) + n - stack_slop);
132 SFP(tc) = (ptr)SCHEMESTACK(tc);
133 }
134
S_compute_bytes_allocated(xg,xs)135 ptr S_compute_bytes_allocated(xg, xs) ptr xg; ptr xs; {
136 ptr tc = get_thread_context();
137 ISPC s, smax, smin; IGEN g, gmax, gmin;
138 uptr n;
139
140 tc_mutex_acquire();
141 alloc_mutex_acquire();
142
143 gmin = (IGEN)UNFIX(xg);
144 if (gmin < 0) {
145 gmin = 0;
146 gmax = static_generation;
147 } else if (gmin == S_G.new_max_nonstatic_generation) {
148 /* include virtual inhabitents too */
149 gmax = S_G.max_nonstatic_generation;
150 } else {
151 gmax = gmin;
152 }
153
154 smin = (ISPC)(UNFIX(xs));
155 smax = smin < 0 ? max_real_space : smin;
156 smin = smin < 0 ? 0 : smin;
157
158 n = 0;
159
160 g = gmin;
161 while (g <= gmax) {
162 n += S_G.bytesof[g][countof_phantom];
163 for (s = smin; s <= smax; s++) {
164 ptr next_loc;
165 /* add in bytes previously recorded */
166 n += S_G.bytes_of_space[g][s];
167 /* add in bytes in active segments */
168 next_loc = THREAD_GC(tc)->next_loc[g][s];
169 if (next_loc != FIX(0))
170 n += (uptr)next_loc - (uptr)THREAD_GC(tc)->base_loc[g][s];
171 if (s == space_data) {
172 /* don't count space used for bitmaks */
173 n -= S_G.bitmask_overhead[g];
174 }
175 }
176 if (g == S_G.max_nonstatic_generation)
177 g = static_generation;
178 else
179 g += 1;
180 }
181
182 /* subtract off bytes not allocated */
183 if (gmin == 0 && smin <= space_new && space_new <= smax)
184 n -= (uptr)REAL_EAP(tc) - (uptr)AP(tc);
185
186 alloc_mutex_release();
187 tc_mutex_release();
188
189 return Sunsigned(n);
190 }
191
S_bytes_finalized()192 ptr S_bytes_finalized() {
193 return Sunsigned(S_G.bytes_finalized);
194 }
195
196 /* called with alloc mutex */
maybe_queue_fire_collector(thread_gc * tgc)197 static void maybe_queue_fire_collector(thread_gc *tgc) {
198 if ((S_G.bytes_of_generation[0] + S_G.bytesof[0][countof_phantom]) - S_G.g0_bytes_after_last_gc >= S_G.collect_trip_bytes)
199 tgc->queued_fire = 1;
200 }
201
S_maybe_fire_collector(thread_gc * tgc)202 void S_maybe_fire_collector(thread_gc *tgc) {
203 if ((tgc->during_alloc == 0) && (!IS_ALLOC_MUTEX_OWNER() || IS_TC_MUTEX_OWNER())) {
204 if (tgc->queued_fire) {
205 tgc->queued_fire = 0;
206 S_fire_collector();
207 }
208 }
209 }
210
211 /* allocation mutex must be held (or single-threaded guaranteed because collecting) */
close_off_segment(thread_gc * tgc,ptr old,ptr base_loc,ptr sweep_loc,ISPC s,IGEN g)212 static void close_off_segment(thread_gc *tgc, ptr old, ptr base_loc, ptr sweep_loc, ISPC s, IGEN g)
213 {
214 if (base_loc) {
215 seginfo *si;
216 uptr bytes = (uptr)old - (uptr)base_loc;
217
218 /* increment bytes_allocated by the closed-off partial segment */
219 S_G.bytes_of_space[g][s] += bytes;
220 S_G.bytes_of_generation[g] += bytes;
221
222 /* lay down an end-of-segment marker */
223 *(ptr*)TO_VOIDP(old) = forward_marker;
224
225 /* in case this is during a GC, add to sweep list */
226 si = SegInfo(addr_get_segment(base_loc));
227 si->sweep_start = sweep_loc;
228 #if defined(WRITE_XOR_EXECUTE_CODE)
229 si->sweep_bytes = bytes;
230 #endif
231 si->sweep_next = tgc->sweep_next[g][s];
232 tgc->sweep_next[g][s] = si;
233 }
234 }
235
S_find_more_gc_room(thread_gc * tgc,ISPC s,IGEN g,iptr n,ptr old)236 ptr S_find_more_gc_room(thread_gc *tgc, ISPC s, IGEN g, iptr n, ptr old) {
237 iptr nsegs, seg;
238 ptr new;
239 iptr new_bytes;
240
241 alloc_mutex_acquire();
242
243 close_off_segment(tgc, old, tgc->base_loc[g][s], tgc->sweep_loc[g][s], s, g);
244
245 tgc->during_alloc += 1;
246
247 nsegs = (uptr)(n + allocation_segment_tail_padding + bytes_per_segment - 1) >> segment_offset_bits;
248
249 /* block requests to minimize fragmentation and improve cache locality */
250 if (s == space_code && nsegs < 16) nsegs = 16;
251
252 seg = S_find_segments(tgc, s, g, nsegs);
253 new = build_ptr(seg, 0);
254
255 new_bytes = nsegs * bytes_per_segment;
256
257 tgc->base_loc[g][s] = new;
258 tgc->sweep_loc[g][s] = new;
259 tgc->bytes_left[g][s] = (new_bytes - n) - allocation_segment_tail_padding;
260 tgc->next_loc[g][s] = (ptr)((uptr)new + n);
261
262 #if defined(WRITE_XOR_EXECUTE_CODE)
263 if (s == space_code) {
264 /* Ensure allocated code segments are writable. The caller should
265 already have bracketed the writes with calls to start and stop
266 so there is no need for a stop here. */
267 S_thread_start_code_write(tgc->tc, 0, 1, NULL, 0);
268 }
269 #endif
270
271 if (tgc->during_alloc == 1) maybe_queue_fire_collector(tgc);
272
273 tgc->during_alloc -= 1;
274
275 alloc_mutex_release();
276 S_maybe_fire_collector(tgc);
277
278 return new;
279 }
280
281 /* allocation mutex must be held (or single-threaded guaranteed because collecting) */
S_close_off_thread_local_segment(ptr tc,ISPC s,IGEN g)282 void S_close_off_thread_local_segment(ptr tc, ISPC s, IGEN g) {
283 thread_gc *tgc = THREAD_GC(tc);
284
285 close_off_segment(tgc, tgc->next_loc[g][s], tgc->base_loc[g][s], tgc->sweep_loc[g][s], s, g);
286
287 tgc->base_loc[g][s] = (ptr)0;
288 tgc->bytes_left[g][s] = 0;
289 tgc->next_loc[g][s] = (ptr)0;
290 tgc->sweep_loc[g][s] = (ptr)0;
291 tgc->sweep_next[g][s] = NULL;
292 }
293
294 /* S_reset_allocation_pointer is always called with allocation mutex
295 (or single-threaded guaranteed because collecting) */
296 /* We always allocate exactly one segment for the allocation area, since
297 we can get into hot water with formerly locked objects, specifically
298 symbols and impure records, that cross segment boundaries. This allows
299 us to maintain the invariant that no object crosses a segment boundary
300 unless it starts on a segment boundary (and is thus at least one
301 segment long). NB. This invariant does not apply to code objects
302 since we grab large blocks of segments for them.
303 */
304
S_reset_allocation_pointer(tc)305 void S_reset_allocation_pointer(tc) ptr tc; {
306 iptr seg;
307 thread_gc *tgc = THREAD_GC(tc);
308
309 tgc->during_alloc += 1;
310
311 seg = S_find_segments(tgc, space_new, 0, 1);
312
313 /* NB: if allocate_segments didn't already ensure we don't use the last segment
314 of memory, we'd have to reject it here so cp2-alloc can avoid a carry check for
315 small allocation requests, using something like this:
316
317 if (seg == (((uptr)1 << (ptr_bits - segment_offset_bits)) - 1))
318 seg = S_find_segments(THREAD_GC(tc), space_new, 0, 1);
319 */
320
321 S_G.bytes_of_space[0][space_new] += bytes_per_segment;
322 S_G.bytes_of_generation[0] += bytes_per_segment;
323
324 if (tgc->during_alloc == 1) maybe_queue_fire_collector(THREAD_GC(tc));
325
326 AP(tc) = build_ptr(seg, 0);
327 REAL_EAP(tc) = EAP(tc) = (ptr)((uptr)AP(tc) + bytes_per_segment);
328
329 tgc->during_alloc -= 1;
330 }
331
S_record_new_dirty_card(thread_gc * tgc,ptr * ppp,IGEN to_g)332 void S_record_new_dirty_card(thread_gc *tgc, ptr *ppp, IGEN to_g) {
333 uptr card = (uptr)TO_PTR(ppp) >> card_offset_bits;
334 dirtycardinfo *ndc;
335
336 alloc_mutex_acquire();
337 ndc = S_G.new_dirty_cards;
338 if (ndc != NULL && ndc->card == card) {
339 if (to_g < ndc->youngest) ndc->youngest = to_g;
340 } else {
341 dirtycardinfo *next = ndc;
342 find_gc_room_voidp(tgc, space_new, 0, ptr_align(sizeof(dirtycardinfo)), ndc);
343 ndc->card = card;
344 ndc->youngest = to_g;
345 ndc->next = next;
346 S_G.new_dirty_cards = ndc;
347 }
348 alloc_mutex_release();
349 }
350
351 /* allocation mutex must be held (or only one thread due to call by collector) */
mark_segment_dirty(seginfo * si,IGEN from_g,IGEN to_g)352 FORCEINLINE void mark_segment_dirty(seginfo *si, IGEN from_g, IGEN to_g) {
353 IGEN old_to_g = si->min_dirty_byte;
354 if (to_g < old_to_g) {
355 seginfo **pointer_to_first, *oldfirst;
356 if (old_to_g != 0xff) {
357 seginfo *next = si->dirty_next, **prev = si->dirty_prev;
358 /* presently on some other list, so remove */
359 *prev = next;
360 if (next != NULL) next->dirty_prev = prev;
361 }
362 oldfirst = *(pointer_to_first = &DirtySegments(from_g, to_g));
363 *pointer_to_first = si;
364 si->dirty_prev = pointer_to_first;
365 si->dirty_next = oldfirst;
366 if (oldfirst != NULL) oldfirst->dirty_prev = &si->dirty_next;
367 si->min_dirty_byte = to_g;
368 }
369 }
370
S_dirty_set(ptr * loc,ptr x)371 void S_dirty_set(ptr *loc, ptr x) {
372 *loc = x;
373 if (!Sfixnump(x)) {
374 seginfo *si = SegInfo(addr_get_segment(TO_PTR(loc)));
375 if (si->use_marks) {
376 /* GC must be in progress */
377 if (!FIXMEDIATE(x)) {
378 seginfo *t_si = SegInfo(ptr_get_segment(x));
379 if (t_si->generation < si->generation)
380 S_record_new_dirty_card(THREAD_GC(get_thread_context()), loc, t_si->generation);
381 }
382 } else {
383 IGEN from_g = si->generation;
384 if (from_g != 0) {
385 alloc_mutex_acquire();
386 si->dirty_bytes[((uptr)TO_PTR(loc) >> card_offset_bits) & ((1 << segment_card_offset_bits) - 1)] = 0;
387 mark_segment_dirty(si, from_g, 0);
388 alloc_mutex_release();
389 }
390 }
391 }
392 }
393
394 /* only called by GC, so no other thread is running */
S_mark_card_dirty(uptr card,IGEN to_g)395 void S_mark_card_dirty(uptr card, IGEN to_g) {
396 uptr loc = card << card_offset_bits;
397 uptr seg = addr_get_segment(loc);
398 seginfo *si = SegInfo(seg);
399 uptr cardno = card & ((1 << segment_card_offset_bits) - 1);
400 if (to_g < si->dirty_bytes[cardno]) {
401 si->dirty_bytes[cardno] = to_g;
402 mark_segment_dirty(si, si->generation, to_g);
403 }
404 }
405
406 /* scan remembered set from P to ENDP, transfering to dirty vector;
407 allocation mutex must be held */
S_scan_dirty(ptr * p,ptr * endp)408 void S_scan_dirty(ptr *p, ptr *endp) {
409 uptr this, last;
410
411 last = 0;
412
413 while (p < endp) {
414 ptr loc = *p;
415 /* whether building s directory or running UXLB code, the most
416 common situations are that *loc is a fixnum, this == last, or loc
417 is in generation 0. the generated code no longer adds elements
418 to the remembered set if the RHS val is a fixnum. the other
419 checks we do here. we don't bother looking for *loc being an
420 immediate or outside the heap, nor for the generation of *loc
421 being the same or older than the generation of loc, since these
422 don't seem to weed out many dirty writes, and we don't want to
423 waste time here on fruitless memory reads and comparisions */
424 if ((this = (uptr)loc >> card_offset_bits) != last) {
425 seginfo *si = SegInfo(addr_get_segment(loc));
426 IGEN from_g = si->generation;
427 if (from_g != 0) {
428 si->dirty_bytes[((uptr)loc >> card_offset_bits) & ((1 << segment_card_offset_bits) - 1)] = 0;
429 if (this >> segment_card_offset_bits != last >> segment_card_offset_bits) mark_segment_dirty(si, from_g, 0);
430 }
431 last = this;
432 }
433 p += 1;
434 }
435 }
436
437 /* S_scan_remembered_set is called from generated machine code when there
438 * is insufficient room for a remembered set addition.
439 */
440
S_scan_remembered_set()441 void S_scan_remembered_set() {
442 ptr tc = get_thread_context();
443 uptr ap, eap, real_eap;
444
445 alloc_mutex_acquire();
446
447 ap = (uptr)AP(tc);
448 eap = (uptr)EAP(tc);
449 real_eap = (uptr)REAL_EAP(tc);
450
451 S_scan_dirty(TO_VOIDP(eap), TO_VOIDP(real_eap));
452 eap = real_eap;
453
454 if (eap - ap > alloc_waste_maximum) {
455 AP(tc) = (ptr)ap;
456 EAP(tc) = (ptr)eap;
457 } else {
458 uptr bytes = eap - ap;
459 S_G.bytes_of_space[0][space_new] -= bytes;
460 S_G.bytes_of_generation[0] -= bytes;
461 S_reset_allocation_pointer(tc);
462 }
463
464 alloc_mutex_release();
465 S_maybe_fire_collector(THREAD_GC(tc));
466 }
467
468 /* S_get_more_room is called from genereated machine code when there is
469 * insufficient room for an allocation. ap has already been incremented
470 * by the size of the object and xp is a (typed) pointer to the value of
471 * ap before the allocation attempt. xp must be set to a new object of
472 * the appropriate type and size.
473 */
474
S_get_more_room()475 void S_get_more_room() {
476 ptr tc = get_thread_context();
477 ptr xp; uptr ap, type, size;
478
479 xp = XP(tc);
480 type = TYPEBITS(xp);
481 if ((type_untyped != 0) && (type == 0)) type = type_untyped;
482 ap = (uptr)UNTYPE(xp, type);
483 size = (uptr)((iptr)AP(tc) - (iptr)ap);
484
485 XP(tc) = S_get_more_room_help(tc, ap, type, size);
486 }
487
S_get_more_room_help(ptr tc,uptr ap,uptr type,uptr size)488 ptr S_get_more_room_help(ptr tc, uptr ap, uptr type, uptr size) {
489 ptr x; uptr eap, real_eap;
490
491 eap = (uptr)EAP(tc);
492 real_eap = (uptr)REAL_EAP(tc);
493
494 alloc_mutex_acquire();
495
496 S_scan_dirty(TO_VOIDP(eap), TO_VOIDP(real_eap));
497 eap = real_eap;
498
499 if (eap - ap >= size) {
500 x = TYPE(ap, type);
501 ap += size;
502 if (eap - ap > alloc_waste_maximum) {
503 AP(tc) = (ptr)ap;
504 EAP(tc) = (ptr)eap;
505 } else {
506 uptr bytes = eap - ap;
507 S_G.bytes_of_space[0][space_new] -= bytes;
508 S_G.bytes_of_generation[0] -= bytes;
509 S_reset_allocation_pointer(tc);
510 }
511 } else if (eap - ap > alloc_waste_maximum) {
512 AP(tc) = (ptr)ap;
513 EAP(tc) = (ptr)eap;
514 find_room(tc, space_new, 0, type, size, x);
515 } else {
516 uptr bytes = eap - ap;
517 S_G.bytes_of_space[0][space_new] -= bytes;
518 S_G.bytes_of_generation[0] -= bytes;
519 S_reset_allocation_pointer(tc);
520 ap = (uptr)AP(tc);
521 if (size + alloc_waste_maximum <= (uptr)EAP(tc) - ap) {
522 x = TYPE(ap, type);
523 AP(tc) = (ptr)(ap + size);
524 } else {
525 find_room(tc, space_new, 0, type, size, x);
526 }
527 }
528
529 alloc_mutex_release();
530 S_maybe_fire_collector(THREAD_GC(tc));
531
532 return x;
533 }
534
S_list_bits_ref(p)535 ptr S_list_bits_ref(p) ptr p; {
536 seginfo *si = SegInfo(ptr_get_segment(p));
537
538 if (si->list_bits) {
539 int bit_pos = (segment_bitmap_index(p) & 0x7);
540 return FIX((si->list_bits[segment_bitmap_byte(p)] >> bit_pos) & list_bits_mask);
541 } else
542 return FIX(0);
543 }
544
S_list_bits_set(p,bits)545 void S_list_bits_set(p, bits) ptr p; iptr bits; {
546 seginfo *si = SegInfo(ptr_get_segment(p));
547
548 /* This function includes potential races when writing list bits.
549 If a race loses bits, that's ok, as long as it's unlikely. */
550
551 if (!si->list_bits) {
552 void *list_bits;
553 ptr tc = get_thread_context();
554
555 if (si->generation == 0)
556 newspace_find_room_voidp(tc, ptr_align(segment_bitmap_bytes), list_bits);
557 else
558 find_room_voidp(tc, space_data, si->generation, ptr_align(segment_bitmap_bytes), list_bits);
559
560 memset(list_bits, 0, segment_bitmap_bytes);
561
562 /* A store fence is needed here to make sure `list_bits` is zeroed
563 for everyone who sees it. On x86, TSO takes care of that
564 ordering already. */
565 STORE_FENCE();
566
567 /* beware: racy write here */
568 si->list_bits = list_bits;
569 }
570
571 /* beware: racy read+write here */
572 si->list_bits[segment_bitmap_byte(p)] |= segment_bitmap_bits(p, bits);
573 }
574
S_cons_in(tc,s,g,car,cdr)575 ptr S_cons_in(tc, s, g, car, cdr) ptr tc; ISPC s; IGEN g; ptr car, cdr; {
576 ptr p;
577
578 find_room(tc, s, g, type_pair, size_pair, p);
579 INITCAR(p) = car;
580 INITCDR(p) = cdr;
581 return p;
582 }
583
Scons(car,cdr)584 ptr Scons(car, cdr) ptr car, cdr; {
585 ptr tc = get_thread_context();
586 ptr p;
587
588 newspace_find_room(tc, type_pair, size_pair, p);
589 INITCAR(p) = car;
590 INITCDR(p) = cdr;
591 return p;
592 }
593
S_ephemeron_cons_in(gen,car,cdr)594 ptr S_ephemeron_cons_in(gen, car, cdr) IGEN gen; ptr car, cdr; {
595 ptr p;
596 ptr tc = get_thread_context();
597
598 find_room(tc, space_ephemeron, gen, type_pair, size_ephemeron, p);
599 INITCAR(p) = car;
600 INITCDR(p) = cdr;
601 EPHEMERONPREVREF(p) = 0;
602 EPHEMERONNEXT(p) = 0;
603
604 return p;
605 }
606
S_box2(ref,immobile)607 ptr S_box2(ref, immobile) ptr ref; IBOOL immobile; {
608 ptr tc = get_thread_context();
609 ptr p;
610
611 if (immobile)
612 find_room(tc, space_immobile_impure, 0, type_typed_object, size_box, p);
613 else
614 newspace_find_room(tc, type_typed_object, size_box, p);
615 BOXTYPE(p) = type_box;
616 INITBOXREF(p) = ref;
617 return p;
618 }
619
Sbox(ref)620 ptr Sbox(ref) ptr ref; {
621 return S_box2(ref, 0);
622 }
623
S_symbol(name)624 ptr S_symbol(name) ptr name; {
625 ptr tc = get_thread_context();
626 ptr p;
627
628 newspace_find_room(tc, type_symbol, size_symbol, p);
629 /* changes here should be reflected in the oblist collection code in gc.c */
630 INITSYMVAL(p) = sunbound;
631 INITSYMCODE(p,S_G.nonprocedure_code);
632 INITSYMPLIST(p) = snil;
633 INITSYMSPLIST(p) = snil;
634 INITSYMNAME(p) = name;
635 INITSYMHASH(p) = Sfalse;
636 return p;
637 }
638
S_rational(n,d)639 ptr S_rational(n, d) ptr n, d; {
640 if (d == FIX(1)) return n;
641 else {
642 ptr tc = get_thread_context();
643 ptr p;
644
645 newspace_find_room(tc, type_typed_object, size_ratnum, p);
646 RATTYPE(p) = type_ratnum;
647 RATNUM(p) = n;
648 RATDEN(p) = d;
649 return p;
650 }
651 }
652
S_tlc(ptr keyval,ptr ht,ptr next)653 ptr S_tlc(ptr keyval, ptr ht, ptr next) {
654 ptr tc = get_thread_context();
655 ptr p;
656
657 newspace_find_room(tc, type_typed_object, size_tlc, p);
658 TLCTYPE(p) = type_tlc;
659 INITTLCKEYVAL(p) = keyval;
660 INITTLCHT(p) = ht;
661 INITTLCNEXT(p) = next;
662 return p;
663 }
664
S_vector_in(tc,s,g,n)665 ptr S_vector_in(tc, s, g, n) ptr tc; ISPC s; IGEN g; iptr n; {
666 ptr p; iptr d;
667
668 if (n == 0) return S_G.null_vector;
669
670 if ((uptr)n >= maximum_vector_length)
671 S_error("", "invalid vector size request");
672
673 d = size_vector(n);
674 find_room(tc, s, g, type_typed_object, d, p);
675 VECTTYPE(p) = (n << vector_length_offset) | type_vector;
676 return p;
677 }
678
S_vector(n)679 ptr S_vector(n) iptr n; {
680 ptr tc;
681 ptr p; iptr d;
682
683 if (n == 0) return S_G.null_vector;
684
685 if ((uptr)n >= maximum_vector_length)
686 S_error("", "invalid vector size request");
687
688 tc = get_thread_context();
689
690 d = size_vector(n);
691 newspace_find_room(tc, type_typed_object, d, p);
692 VECTTYPE(p) = (n << vector_length_offset) | type_vector;
693 return p;
694 }
695
S_fxvector(n)696 ptr S_fxvector(n) iptr n; {
697 ptr tc;
698 ptr p; iptr d;
699
700 if (n == 0) return S_G.null_fxvector;
701
702 if ((uptr)n > (uptr)maximum_fxvector_length)
703 S_error("", "invalid fxvector size request");
704
705 tc = get_thread_context();
706
707 d = size_fxvector(n);
708 newspace_find_room(tc, type_typed_object, d, p);
709 FXVECTOR_TYPE(p) = (n << fxvector_length_offset) | type_fxvector;
710 return p;
711 }
712
S_flvector(n)713 ptr S_flvector(n) iptr n; {
714 ptr tc;
715 ptr p; iptr d;
716
717 if (n == 0) return S_G.null_flvector;
718
719 if ((uptr)n > (uptr)maximum_flvector_length)
720 S_error("", "invalid flvector size request");
721
722 tc = get_thread_context();
723
724 d = size_flvector(n);
725 newspace_find_room(tc, type_typed_object, d, p);
726 FLVECTOR_TYPE(p) = (n << flvector_length_offset) | type_flvector;
727 return p;
728 }
729
S_bytevector(n)730 ptr S_bytevector(n) iptr n; {
731 return S_bytevector2(get_thread_context(), n, space_new);
732 }
733
S_bytevector2(tc,n,spc)734 ptr S_bytevector2(tc, n, spc) ptr tc; iptr n; ISPC spc; {
735 ptr p; iptr d;
736
737 if (n == 0) return S_G.null_bytevector;
738
739 if ((uptr)n > (uptr)maximum_bytevector_length)
740 S_error("", "invalid bytevector size request");
741
742 d = size_bytevector(n);
743 if (spc != space_new)
744 find_room(tc, spc, 0, type_typed_object, d, p);
745 else
746 newspace_find_room(tc, type_typed_object, d, p);
747 BYTEVECTOR_TYPE(p) = (n << bytevector_length_offset) | type_bytevector;
748 return p;
749 }
750
S_null_immutable_vector()751 ptr S_null_immutable_vector() {
752 ptr tc = get_thread_context();
753 ptr v;
754 find_room(tc, space_new, 0, type_typed_object, size_vector(0), v);
755 VECTTYPE(v) = (0 << vector_length_offset) | type_vector | vector_immutable_flag;
756 return v;
757 }
758
S_null_immutable_bytevector()759 ptr S_null_immutable_bytevector() {
760 ptr tc = get_thread_context();
761 ptr v;
762 find_room(tc, space_new, 0, type_typed_object, size_bytevector(0), v);
763 VECTTYPE(v) = (0 << bytevector_length_offset) | type_bytevector | bytevector_immutable_flag;
764 return v;
765 }
766
S_null_immutable_string()767 ptr S_null_immutable_string() {
768 ptr tc = get_thread_context();
769 ptr v;
770 find_room(tc, space_new, 0, type_typed_object, size_string(0), v);
771 VECTTYPE(v) = (0 << string_length_offset) | type_string | string_immutable_flag;
772 return v;
773 }
774
S_stencil_vector(mask)775 ptr S_stencil_vector(mask) uptr mask; {
776 ptr tc;
777 ptr p; iptr d;
778 iptr n = Spopcount(mask);
779
780 tc = get_thread_context();
781
782 d = size_stencil_vector(n);
783 newspace_find_room(tc, type_typed_object, d, p);
784 VECTTYPE(p) = (mask << stencil_vector_mask_offset) | type_stencil_vector;
785 return p;
786 }
787
S_record(n)788 ptr S_record(n) iptr n; {
789 ptr tc = get_thread_context();
790 ptr p;
791
792 newspace_find_room(tc, type_typed_object, n, p);
793 return p;
794 }
795
Srecord_type(ptr r)796 ptr Srecord_type(ptr r) {
797 return RECORDINSTTYPE(r);
798 }
799
Srecord_type_parent(ptr rtd)800 ptr Srecord_type_parent(ptr rtd) {
801 return rtd_parent(rtd);
802 }
803
Srecord_type_size(ptr rtd)804 uptr Srecord_type_size(ptr rtd) {
805 return UNFIX(RECORDDESCSIZE(rtd));
806 }
807
Srecord_type_uniformp(ptr rtd)808 int Srecord_type_uniformp(ptr rtd) {
809 return RECORDDESCPM(rtd) == FIX(-1);
810 }
811
S_closure(cod,n)812 ptr S_closure(cod, n) ptr cod; iptr n; {
813 ptr tc = get_thread_context();
814 ptr p; iptr d;
815
816 d = size_closure(n);
817 newspace_find_room(tc, type_closure, d, p);
818 CLOSENTRY(p) = cod;
819 return p;
820 }
821
S_mkcontinuation(s,g,nuate,stack,length,clength,link,ret,winders,attachments)822 ptr S_mkcontinuation(s, g, nuate, stack, length, clength, link, ret, winders, attachments)
823 ISPC s; IGEN g; ptr nuate; ptr stack; iptr length; iptr clength; ptr link;
824 ptr ret; ptr winders; ptr attachments; {
825 ptr p;
826 ptr tc = get_thread_context();
827
828 find_room(tc, s, g, type_closure, size_continuation, p);
829 CLOSENTRY(p) = nuate;
830 CONTSTACK(p) = stack;
831 CONTLENGTH(p) = length;
832 CONTCLENGTH(p) = clength;
833 CONTLINK(p) = link;
834 CONTRET(p) = ret;
835 CONTWINDERS(p) = winders;
836 CONTATTACHMENTS(p) = attachments;
837 return p;
838 }
839
Sflonum(x)840 ptr Sflonum(x) double x; {
841 ptr tc = get_thread_context();
842 ptr p;
843
844 newspace_find_room(tc, type_flonum, size_flonum, p);
845 FLODAT(p) = x;
846 return p;
847 }
848
S_inexactnum(rp,ip)849 ptr S_inexactnum(rp, ip) double rp, ip; {
850 ptr tc = get_thread_context();
851 ptr p;
852
853 newspace_find_room(tc, type_typed_object, size_inexactnum, p);
854 INEXACTNUM_TYPE(p) = type_inexactnum;
855 INEXACTNUM_REAL_PART(p) = rp;
856 INEXACTNUM_IMAG_PART(p) = ip;
857 return p;
858 }
859
S_thread(tc)860 ptr S_thread(tc) ptr tc; {
861 ptr p;
862
863 find_room(tc, space_new, 0, type_typed_object, size_thread, p);
864 TYPEFIELD(p) = (ptr)type_thread;
865 THREADTC(p) = (uptr)tc;
866 return p;
867 }
868
S_exactnum(a,b)869 ptr S_exactnum(a, b) ptr a, b; {
870 ptr tc = get_thread_context();
871 ptr p;
872
873 newspace_find_room(tc, type_typed_object, size_exactnum, p);
874 EXACTNUM_TYPE(p) = type_exactnum;
875 EXACTNUM_REAL_PART(p) = a;
876 EXACTNUM_IMAG_PART(p) = b;
877 return p;
878 }
879
880 /* S_string returns a new string of length n. If s is not NULL, it is
881 * copied into the new string. If n < 0, then s must be non-NULL,
882 * and the length of s (by strlen) determines the length of the string */
S_string(s,n)883 ptr S_string(s, n) const char *s; iptr n; {
884 ptr tc;
885 ptr p; iptr d;
886 iptr i;
887
888 if (n < 0) n = strlen(s);
889
890 if (n == 0) return S_G.null_string;
891
892 if ((uptr)n > (uptr)maximum_string_length)
893 S_error("", "invalid string size request");
894
895 tc = get_thread_context();
896
897 d = size_string(n);
898 newspace_find_room(tc, type_typed_object, d, p);
899 STRTYPE(p) = (n << string_length_offset) | type_string;
900
901 /* fill the string with valid characters */
902 i = 0;
903
904 /* first copy input string, if any */
905 if (s != (char *)NULL) {
906 while (i != n && *s != 0) {
907 Sstring_set(p, i, *s++);
908 i += 1;
909 }
910 }
911
912 /* fill remaining slots with nul */
913 while (i != n) {
914 Sstring_set(p, i, 0);
915 i += 1;
916 }
917
918 return p;
919 }
920
Sstring_utf8(s,n)921 ptr Sstring_utf8(s, n) const char *s; iptr n; {
922 const char* u8;
923 iptr cc, d, i, n8;
924 ptr p, tc;
925
926 if (n < 0) n = strlen(s);
927
928 if (n == 0) return S_G.null_string;
929
930 /* determine code point count cc */
931 u8 = s;
932 n8 = n;
933 cc = 0;
934 while (n8 > 0) {
935 unsigned char b1 = *(const unsigned char*)u8++;
936 n8--;
937 cc++;
938 if ((b1 & 0x80) == 0)
939 ;
940 else if ((b1 & 0x40) == 0)
941 ;
942 else if ((b1 & 0x20) == 0) {
943 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
944 u8++;
945 n8--;
946 }
947 } else if ((b1 & 0x10) == 0) {
948 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
949 u8++;
950 n8--;
951 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
952 u8++;
953 n8--;
954 }
955 }
956 } else if ((b1 & 0x08) == 0) {
957 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
958 u8++;
959 n8--;
960 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
961 u8++;
962 n8--;
963 if ((n8 >= 1) && ((*u8 & 0xc0) == 0x80)) {
964 u8++;
965 n8--;
966 }
967 }
968 }
969 }
970 }
971
972 if ((uptr)cc > (uptr)maximum_string_length)
973 S_error("", "invalid string size request");
974
975 tc = get_thread_context();
976 d = size_string(cc);
977 newspace_find_room(tc, type_typed_object, d, p);
978 STRTYPE(p) = (cc << string_length_offset) | type_string;
979
980 /* fill the string */
981 u8 = s;
982 n8 = n;
983 i = 0;
984 while (n8 > 0) {
985 unsigned char b1 = *u8++;
986 int c = 0xfffd;
987 n8--;
988 if ((b1 & 0x80) == 0)
989 c = b1;
990 else if ((b1 & 0x40) == 0)
991 ;
992 else if ((b1 & 0x20) == 0) {
993 unsigned char b2;
994 if ((n8 >= 1) && (((b2 = *u8) & 0xc0) == 0x80)) {
995 int x = ((b1 & 0x1f) << 6) | (b2 & 0x3f);
996 u8++;
997 n8--;
998 if (x >= 0x80)
999 c = x;
1000 }
1001 } else if ((b1 & 0x10) == 0) {
1002 unsigned char b2;
1003 if ((n8 >= 1) && (((b2 = *u8) & 0xc0) == 0x80)) {
1004 unsigned char b3;
1005 u8++;
1006 n8--;
1007 if ((n8 >= 1) && (((b3 = *u8) & 0xc0) == 0x80)) {
1008 int x = ((b1 & 0x0f) << 12) | ((b2 & 0x3f) << 6) | (b3 & 0x3f);
1009 u8++;
1010 n8--;
1011 if ((x >= 0x800) && ((x < 0xd800) || (x > 0xdfff)))
1012 c = x;
1013 }
1014 }
1015 } else if ((b1 & 0x08) == 0) {
1016 unsigned char b2;
1017 if ((n8 >= 1) && (((b2 = *u8) & 0xc0) == 0x80)) {
1018 unsigned char b3;
1019 u8++;
1020 n8--;
1021 if ((n8 >= 1) && (((b3 = *u8) & 0xc0) == 0x80)) {
1022 unsigned char b4;
1023 u8++;
1024 n8--;
1025 if ((n8 >= 1) && (((b4 = *u8) & 0xc0) == 0x80)) {
1026 int x = ((b1 & 0x07) << 18) | ((b2 & 0x3f) << 12) | ((b3 & 0x3f) << 6) | (b4 & 0x3f);
1027 u8++;
1028 n8--;
1029 if ((x >= 0x10000) && (x <= 0x10ffff))
1030 c = x;
1031 }
1032 }
1033 }
1034 }
1035 Sstring_set(p, i++, c);
1036 }
1037 return p;
1038 }
1039
S_bignum(tc,n,sign)1040 ptr S_bignum(tc, n, sign) ptr tc; iptr n; IBOOL sign; {
1041 ptr p; iptr d;
1042
1043 if ((uptr)n > (uptr)maximum_bignum_length)
1044 S_error("", "invalid bignum size request");
1045
1046 /* for anything that allocates bignums, make sure scheduling fuel is consumed */
1047 USE_TRAP_FUEL(tc, n);
1048
1049 d = size_bignum(n);
1050 newspace_find_room(tc, type_typed_object, d, p);
1051 BIGTYPE(p) = (uptr)n << bignum_length_offset | sign << bignum_sign_offset | type_bignum;
1052 return p;
1053 }
1054
S_code(tc,type,n)1055 ptr S_code(tc, type, n) ptr tc; iptr type, n; {
1056 ptr p; iptr d;
1057
1058 d = size_code(n);
1059 find_room(tc, space_code, 0, type_typed_object, d, p);
1060 CODETYPE(p) = type;
1061 CODELEN(p) = n;
1062 /* we record the code modification here, even though we haven't
1063 even started modifying the code yet, since we always create
1064 and fill the code object within a critical section. */
1065 S_record_code_mod(tc, (uptr)TO_PTR(&CODEIT(p,0)), (uptr)n);
1066 return p;
1067 }
1068
S_relocation_table(n)1069 ptr S_relocation_table(n) iptr n; {
1070 ptr tc = get_thread_context();
1071 ptr p; iptr d;
1072
1073 d = size_reloc_table(n);
1074 newspace_find_room(tc, type_untyped, d, p);
1075 RELOCSIZE(p) = n;
1076 return p;
1077 }
1078
S_weak_cons(ptr car,ptr cdr)1079 ptr S_weak_cons(ptr car, ptr cdr) {
1080 ptr tc = get_thread_context();
1081 return S_cons_in(tc, space_weakpair, 0, car, cdr);
1082 }
1083
S_phantom_bytevector(sz)1084 ptr S_phantom_bytevector(sz) uptr sz; {
1085 ptr tc = get_thread_context();
1086 ptr p;
1087
1088 newspace_find_room(tc, type_typed_object, size_phantom, p);
1089
1090 PHANTOMTYPE(p) = type_phantom;
1091 PHANTOMLEN(p) = 0;
1092
1093 S_phantom_bytevector_adjust(p, sz);
1094
1095 return p;
1096 }
1097
S_phantom_bytevector_adjust(ph,new_sz)1098 void S_phantom_bytevector_adjust(ph, new_sz) ptr ph; uptr new_sz; {
1099 uptr old_sz = PHANTOMLEN(ph);
1100 seginfo *si;
1101 IGEN g;
1102
1103 tc_mutex_acquire();
1104
1105 si = SegInfo(ptr_get_segment(ph));
1106 g = si->generation;
1107
1108 S_G.bytesof[g][countof_phantom] += (new_sz - old_sz);
1109 S_adjustmembytes(new_sz - old_sz);
1110 PHANTOMLEN(ph) = new_sz;
1111
1112 tc_mutex_release();
1113 }
1114