1 /**************************************************************************/
2 /* */
3 /* OCaml */
4 /* */
5 /* Damien Doligez, projet Para, INRIA Rocquencourt */
6 /* */
7 /* Copyright 1996 Institut National de Recherche en Informatique et */
8 /* en Automatique. */
9 /* */
10 /* All rights reserved. This file is distributed under the terms of */
11 /* the GNU Lesser General Public License version 2.1, with the */
12 /* special exception on linking described in the file LICENSE. */
13 /* */
14 /**************************************************************************/
15
16 #define CAML_INTERNALS
17
18 #define FREELIST_DEBUG 0
19 #if FREELIST_DEBUG
20 #include <stdio.h>
21 #endif
22
23 #include <string.h>
24
25 #include "caml/config.h"
26 #include "caml/freelist.h"
27 #include "caml/gc.h"
28 #include "caml/gc_ctrl.h"
29 #include "caml/memory.h"
30 #include "caml/major_gc.h"
31 #include "caml/misc.h"
32 #include "caml/mlvalues.h"
33
34 /* The free-list is kept sorted by increasing addresses.
35 This makes the merging of adjacent free blocks possible.
36 (See [caml_fl_merge_block].)
37 */
38
39 /* A free list block is a [value] (integer representing a pointer to the
40 first word after the block's header). The end of the list is NULL. */
41 #define Val_NULL ((value) NULL)
42
43 /* The sentinel can be located anywhere in memory, but it must not be
44 adjacent to any heap object. */
45 static struct {
46 value filler1; /* Make sure the sentinel is never adjacent to any block. */
47 header_t h;
48 value first_field;
49 value filler2; /* Make sure the sentinel is never adjacent to any block. */
50 } sentinel = {0, Make_header (0, 0, Caml_blue), Val_NULL, 0};
51
52 #define Fl_head (Val_bp (&(sentinel.first_field)))
53 static value fl_prev = Fl_head; /* Current allocation pointer. */
54 static value fl_last = Val_NULL; /* Last block in the list. Only valid
55 just after [caml_fl_allocate] returns NULL. */
56 value caml_fl_merge = Fl_head; /* Current insertion pointer. Managed
57 jointly with [sweep_slice]. */
58 asize_t caml_fl_cur_wsz = 0; /* Number of words in the free list,
59 including headers but not fragments. */
60
61 #define FLP_MAX 1000
62 static value flp [FLP_MAX];
63 static int flp_size = 0;
64 static value beyond = Val_NULL;
65
66 #define Next(b) (Field (b, 0))
67
68 #define Policy_next_fit 0
69 #define Policy_first_fit 1
70 uintnat caml_allocation_policy = Policy_next_fit;
71 #define policy caml_allocation_policy
72
73 #ifdef DEBUG
fl_check(void)74 static void fl_check (void)
75 {
76 value cur, prev;
77 int prev_found = 0, flp_found = 0, merge_found = 0;
78 uintnat size_found = 0;
79 int sz = 0;
80
81 prev = Fl_head;
82 cur = Next (prev);
83 while (cur != Val_NULL){
84 size_found += Whsize_bp (cur);
85 Assert (Is_in_heap (cur));
86 if (cur == fl_prev) prev_found = 1;
87 if (policy == Policy_first_fit && Wosize_bp (cur) > sz){
88 sz = Wosize_bp (cur);
89 if (flp_found < flp_size){
90 Assert (Next (flp[flp_found]) == cur);
91 ++ flp_found;
92 }else{
93 Assert (beyond == Val_NULL || cur >= Next (beyond));
94 }
95 }
96 if (cur == caml_fl_merge) merge_found = 1;
97 prev = cur;
98 cur = Next (prev);
99 }
100 if (policy == Policy_next_fit) Assert (prev_found || fl_prev == Fl_head);
101 if (policy == Policy_first_fit) Assert (flp_found == flp_size);
102 Assert (merge_found || caml_fl_merge == Fl_head);
103 Assert (size_found == caml_fl_cur_wsz);
104 }
105
106 #endif
107
108 /* [allocate_block] is called by [caml_fl_allocate]. Given a suitable free
109 block and the requested size, it allocates a new block from the free
110 block. There are three cases:
111 0. The free block has the requested size. Detach the block from the
112 free-list and return it.
113 1. The free block is 1 word longer than the requested size. Detach
114 the block from the free list. The remaining word cannot be linked:
115 turn it into an empty block (header only), and return the rest.
116 2. The free block is large enough. Split it in two and return the right
117 block.
118 In all cases, the allocated block is right-justified in the free block:
119 it is located in the high-address words of the free block, so that
120 the linking of the free-list does not change in case 2.
121 */
allocate_block(mlsize_t wh_sz,int flpi,value prev,value cur)122 static header_t *allocate_block (mlsize_t wh_sz, int flpi, value prev,
123 value cur)
124 {
125 header_t h = Hd_bp (cur);
126 Assert (Whsize_hd (h) >= wh_sz);
127 if (Wosize_hd (h) < wh_sz + 1){ /* Cases 0 and 1. */
128 caml_fl_cur_wsz -= Whsize_hd (h);
129 Next (prev) = Next (cur);
130 Assert (Is_in_heap (Next (prev)) || Next (prev) == Val_NULL);
131 if (caml_fl_merge == cur) caml_fl_merge = prev;
132 #ifdef DEBUG
133 fl_last = Val_NULL;
134 #endif
135 /* In case 1, the following creates the empty block correctly.
136 In case 0, it gives an invalid header to the block. The function
137 calling [caml_fl_allocate] will overwrite it. */
138 Hd_op (cur) = Make_header (0, 0, Caml_white);
139 if (policy == Policy_first_fit){
140 if (flpi + 1 < flp_size && flp[flpi + 1] == cur){
141 flp[flpi + 1] = prev;
142 }else if (flpi == flp_size - 1){
143 beyond = (prev == Fl_head) ? Val_NULL : prev;
144 -- flp_size;
145 }
146 }
147 }else{ /* Case 2. */
148 caml_fl_cur_wsz -= wh_sz;
149 Hd_op (cur) = Make_header (Wosize_hd (h) - wh_sz, 0, Caml_blue);
150 }
151 if (policy == Policy_next_fit) fl_prev = prev;
152 return (header_t *) &Field (cur, Wosize_hd (h) - wh_sz);
153 }
154
155 #ifdef CAML_INSTR
156 static uintnat instr_size [20] =
157 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
158 static char *instr_name [20] = {
159 NULL,
160 "alloc01@",
161 "alloc02@",
162 "alloc03@",
163 "alloc04@",
164 "alloc05@",
165 "alloc06@",
166 "alloc07@",
167 "alloc08@",
168 "alloc09@",
169 "alloc10-19@",
170 "alloc20-29@",
171 "alloc30-39@",
172 "alloc40-49@",
173 "alloc50-59@",
174 "alloc60-69@",
175 "alloc70-79@",
176 "alloc80-89@",
177 "alloc90-99@",
178 "alloc_large@",
179 };
180 uintnat caml_instr_alloc_jump = 0;
181 /* number of pointers followed to allocate from the free list */
182 #endif /*CAML_INSTR*/
183
184 /* [caml_fl_allocate] does not set the header of the newly allocated block.
185 The calling function must do it before any GC function gets called.
186 [caml_fl_allocate] returns a head pointer.
187 */
caml_fl_allocate(mlsize_t wo_sz)188 header_t *caml_fl_allocate (mlsize_t wo_sz)
189 {
190 value cur = Val_NULL, prev;
191 header_t *result;
192 int i;
193 mlsize_t sz, prevsz;
194 Assert (sizeof (char *) == sizeof (value));
195 Assert (wo_sz >= 1);
196 #ifdef CAML_INSTR
197 if (wo_sz < 10){
198 ++instr_size[wo_sz];
199 }else if (wo_sz < 100){
200 ++instr_size[wo_sz/10 + 9];
201 }else{
202 ++instr_size[19];
203 }
204 #endif /* CAML_INSTR */
205
206 switch (policy){
207 case Policy_next_fit:
208 Assert (fl_prev != Val_NULL);
209 /* Search from [fl_prev] to the end of the list. */
210 prev = fl_prev;
211 cur = Next (prev);
212 while (cur != Val_NULL){ Assert (Is_in_heap (cur));
213 if (Wosize_bp (cur) >= wo_sz){
214 return allocate_block (Whsize_wosize (wo_sz), 0, prev, cur);
215 }
216 prev = cur;
217 cur = Next (prev);
218 #ifdef CAML_INSTR
219 ++ caml_instr_alloc_jump;
220 #endif
221 }
222 fl_last = prev;
223 /* Search from the start of the list to [fl_prev]. */
224 prev = Fl_head;
225 cur = Next (prev);
226 while (prev != fl_prev){
227 if (Wosize_bp (cur) >= wo_sz){
228 return allocate_block (Whsize_wosize (wo_sz), 0, prev, cur);
229 }
230 prev = cur;
231 cur = Next (prev);
232 #ifdef CAML_INSTR
233 ++ caml_instr_alloc_jump;
234 #endif
235 }
236 /* No suitable block was found. */
237 return NULL;
238 break;
239
240 case Policy_first_fit: {
241 /* Search in the flp array. */
242 for (i = 0; i < flp_size; i++){
243 sz = Wosize_bp (Next (flp[i]));
244 if (sz >= wo_sz){
245 #if FREELIST_DEBUG
246 if (i > 5) fprintf (stderr, "FLP: found at %d size=%d\n", i, wo_sz);
247 #endif
248 result = allocate_block (Whsize_wosize (wo_sz), i, flp[i],
249 Next (flp[i]));
250 goto update_flp;
251 }
252 }
253 /* Extend the flp array. */
254 if (flp_size == 0){
255 prev = Fl_head;
256 prevsz = 0;
257 }else{
258 prev = Next (flp[flp_size - 1]);
259 prevsz = Wosize_bp (prev);
260 if (beyond != Val_NULL) prev = beyond;
261 }
262 while (flp_size < FLP_MAX){
263 cur = Next (prev);
264 if (cur == Val_NULL){
265 fl_last = prev;
266 beyond = (prev == Fl_head) ? Val_NULL : prev;
267 return NULL;
268 }else{
269 sz = Wosize_bp (cur);
270 if (sz > prevsz){
271 flp[flp_size] = prev;
272 ++ flp_size;
273 if (sz >= wo_sz){
274 beyond = cur;
275 i = flp_size - 1;
276 #if FREELIST_DEBUG
277 if (flp_size > 5){
278 fprintf (stderr, "FLP: extended to %d\n", flp_size);
279 }
280 #endif
281 result = allocate_block (Whsize_wosize (wo_sz), flp_size - 1, prev,
282 cur);
283 goto update_flp;
284 }
285 prevsz = sz;
286 }
287 }
288 prev = cur;
289 }
290 beyond = cur;
291
292 /* The flp table is full. Do a slow first-fit search. */
293 #if FREELIST_DEBUG
294 fprintf (stderr, "FLP: table is full -- slow first-fit\n");
295 #endif
296 if (beyond != Val_NULL){
297 prev = beyond;
298 }else{
299 prev = flp[flp_size - 1];
300 }
301 prevsz = Wosize_bp (Next (flp[FLP_MAX-1]));
302 Assert (prevsz < wo_sz);
303 cur = Next (prev);
304 while (cur != Val_NULL){
305 Assert (Is_in_heap (cur));
306 sz = Wosize_bp (cur);
307 if (sz < prevsz){
308 beyond = cur;
309 }else if (sz >= wo_sz){
310 return allocate_block (Whsize_wosize (wo_sz), flp_size, prev, cur);
311 }
312 prev = cur;
313 cur = Next (prev);
314 }
315 fl_last = prev;
316 return NULL;
317
318 update_flp: /* (i, sz) */
319 /* The block at [i] was removed or reduced. Update the table. */
320 Assert (0 <= i && i < flp_size + 1);
321 if (i < flp_size){
322 if (i > 0){
323 prevsz = Wosize_bp (Next (flp[i-1]));
324 }else{
325 prevsz = 0;
326 }
327 if (i == flp_size - 1){
328 if (Wosize_bp (Next (flp[i])) <= prevsz){
329 beyond = Next (flp[i]);
330 -- flp_size;
331 }else{
332 beyond = Val_NULL;
333 }
334 }else{
335 value buf [FLP_MAX];
336 int j = 0;
337 mlsize_t oldsz = sz;
338
339 prev = flp[i];
340 while (prev != flp[i+1]){
341 cur = Next (prev);
342 sz = Wosize_bp (cur);
343 if (sz > prevsz){
344 buf[j++] = prev;
345 prevsz = sz;
346 if (sz >= oldsz){
347 Assert (sz == oldsz);
348 break;
349 }
350 }
351 prev = cur;
352 }
353 #if FREELIST_DEBUG
354 if (j > 2) fprintf (stderr, "FLP: update; buf size = %d\n", j);
355 #endif
356 if (FLP_MAX >= flp_size + j - 1){
357 if (j != 1){
358 memmove (&flp[i+j], &flp[i+1], sizeof (value) * (flp_size-i-1));
359 }
360 if (j > 0) memmove (&flp[i], &buf[0], sizeof (value) * j);
361 flp_size += j - 1;
362 }else{
363 if (FLP_MAX > i + j){
364 if (j != 1){
365 memmove (&flp[i+j], &flp[i+1], sizeof (value) * (FLP_MAX-i-j));
366 }
367 if (j > 0) memmove (&flp[i], &buf[0], sizeof (value) * j);
368 }else{
369 if (i != FLP_MAX){
370 memmove (&flp[i], &buf[0], sizeof (value) * (FLP_MAX - i));
371 }
372 }
373 flp_size = FLP_MAX - 1;
374 beyond = Next (flp[FLP_MAX - 1]);
375 }
376 }
377 }
378 return result;
379 }
380 break;
381
382 default:
383 Assert (0); /* unknown policy */
384 break;
385 }
386 return NULL; /* NOT REACHED */
387 }
388
389 /* Location of the last fragment seen by the sweeping code.
390 This is a pointer to the first word after the fragment, which is
391 the header of the next block.
392 Note that [last_fragment] doesn't point to the fragment itself,
393 but to the block after it.
394 */
395 static header_t *last_fragment;
396
caml_fl_init_merge(void)397 void caml_fl_init_merge (void)
398 {
399 #ifdef CAML_INSTR
400 int i;
401 for (i = 1; i < 20; i++){
402 CAML_INSTR_INT (instr_name[i], instr_size[i]);
403 instr_size[i] = 0;
404 }
405 #endif /* CAML_INSTR */
406 last_fragment = NULL;
407 caml_fl_merge = Fl_head;
408 #ifdef DEBUG
409 fl_check ();
410 #endif
411 }
412
truncate_flp(value changed)413 static void truncate_flp (value changed)
414 {
415 if (changed == Fl_head){
416 flp_size = 0;
417 beyond = Val_NULL;
418 }else{
419 while (flp_size > 0 && Next (flp[flp_size - 1]) >= changed)
420 -- flp_size;
421 if (beyond >= changed) beyond = Val_NULL;
422 }
423 }
424
425 /* This is called by caml_compact_heap. */
caml_fl_reset(void)426 void caml_fl_reset (void)
427 {
428 Next (Fl_head) = Val_NULL;
429 switch (policy){
430 case Policy_next_fit:
431 fl_prev = Fl_head;
432 break;
433 case Policy_first_fit:
434 truncate_flp (Fl_head);
435 break;
436 default:
437 Assert (0);
438 break;
439 }
440 caml_fl_cur_wsz = 0;
441 caml_fl_init_merge ();
442 }
443
444 /* [caml_fl_merge_block] returns the head pointer of the next block after [bp],
445 because merging blocks may change the size of [bp]. */
caml_fl_merge_block(value bp)446 header_t *caml_fl_merge_block (value bp)
447 {
448 value prev, cur;
449 header_t *adj;
450 header_t hd = Hd_val (bp);
451 mlsize_t prev_wosz;
452
453 caml_fl_cur_wsz += Whsize_hd (hd);
454
455 #ifdef DEBUG
456 caml_set_fields (bp, 0, Debug_free_major);
457 #endif
458 prev = caml_fl_merge;
459 cur = Next (prev);
460 /* The sweep code makes sure that this is the right place to insert
461 this block: */
462 Assert (prev < bp || prev == Fl_head);
463 Assert (cur > bp || cur == Val_NULL);
464
465 if (policy == Policy_first_fit) truncate_flp (prev);
466
467 /* If [last_fragment] and [bp] are adjacent, merge them. */
468 if (last_fragment == Hp_bp (bp)){
469 mlsize_t bp_whsz = Whsize_val (bp);
470 if (bp_whsz <= Max_wosize){
471 hd = Make_header (bp_whsz, 0, Caml_white);
472 bp = (value) last_fragment;
473 Hd_val (bp) = hd;
474 caml_fl_cur_wsz += Whsize_wosize (0);
475 }
476 }
477
478 /* If [bp] and [cur] are adjacent, remove [cur] from the free-list
479 and merge them. */
480 adj = (header_t *) &Field (bp, Wosize_hd (hd));
481 if (adj == Hp_val (cur)){
482 value next_cur = Next (cur);
483 mlsize_t cur_whsz = Whsize_val (cur);
484
485 if (Wosize_hd (hd) + cur_whsz <= Max_wosize){
486 Next (prev) = next_cur;
487 if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev;
488 hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue);
489 Hd_val (bp) = hd;
490 adj = (header_t *) &Field (bp, Wosize_hd (hd));
491 #ifdef DEBUG
492 fl_last = Val_NULL;
493 Next (cur) = (value) Debug_free_major;
494 Hd_val (cur) = Debug_free_major;
495 #endif
496 cur = next_cur;
497 }
498 }
499 /* If [prev] and [bp] are adjacent merge them, else insert [bp] into
500 the free-list if it is big enough. */
501 prev_wosz = Wosize_val (prev);
502 if ((header_t *) &Field (prev, prev_wosz) == Hp_val (bp)
503 && prev_wosz + Whsize_hd (hd) < Max_wosize){
504 Hd_val (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue);
505 #ifdef DEBUG
506 Hd_val (bp) = Debug_free_major;
507 #endif
508 Assert (caml_fl_merge == prev);
509 }else if (Wosize_hd (hd) != 0){
510 Hd_val (bp) = Bluehd_hd (hd);
511 Next (bp) = cur;
512 Next (prev) = bp;
513 caml_fl_merge = bp;
514 }else{
515 /* This is a fragment. Leave it in white but remember it for eventual
516 merging with the next block. */
517 last_fragment = (header_t *) bp;
518 caml_fl_cur_wsz -= Whsize_wosize (0);
519 }
520 return adj;
521 }
522
523 /* This is a heap extension. We have to insert it in the right place
524 in the free-list.
525 [caml_fl_add_blocks] can only be called right after a call to
526 [caml_fl_allocate] that returned Val_NULL.
527 Most of the heap extensions are expected to be at the end of the
528 free list. (This depends on the implementation of [malloc].)
529
530 [bp] must point to a list of blocks chained by their field 0,
531 terminated by Val_NULL, and field 1 of the first block must point to
532 the last block.
533 */
caml_fl_add_blocks(value bp)534 void caml_fl_add_blocks (value bp)
535 {
536 Assert (fl_last != Val_NULL);
537 Assert (Next (fl_last) == Val_NULL);
538 caml_fl_cur_wsz += Whsize_bp (bp);
539
540 if (bp > fl_last){
541 Next (fl_last) = bp;
542 if (fl_last == caml_fl_merge && (char *) bp < caml_gc_sweep_hp){
543 caml_fl_merge = Field (bp, 1);
544 }
545 if (policy == Policy_first_fit && flp_size < FLP_MAX){
546 flp [flp_size++] = fl_last;
547 }
548 }else{
549 value cur, prev;
550
551 prev = Fl_head;
552 cur = Next (prev);
553 while (cur != Val_NULL && cur < bp){
554 Assert (prev < bp || prev == Fl_head);
555 /* XXX TODO: extend flp on the fly */
556 prev = cur;
557 cur = Next (prev);
558 } Assert (prev < bp || prev == Fl_head);
559 Assert (cur > bp || cur == Val_NULL);
560 Next (Field (bp, 1)) = cur;
561 Next (prev) = bp;
562 /* When inserting blocks between [caml_fl_merge] and [caml_gc_sweep_hp],
563 we must advance [caml_fl_merge] to the new block, so that [caml_fl_merge]
564 is always the last free-list block before [caml_gc_sweep_hp]. */
565 if (prev == caml_fl_merge && (char *) bp < caml_gc_sweep_hp){
566 caml_fl_merge = Field (bp, 1);
567 }
568 if (policy == Policy_first_fit) truncate_flp (bp);
569 }
570 }
571
572 /* Cut a block of memory into Max_wosize pieces, give them headers,
573 and optionally merge them into the free list.
574 arguments:
575 p: pointer to the first word of the block
576 size: size of the block (in words)
577 do_merge: 1 -> do merge; 0 -> do not merge
578 color: which color to give to the pieces; if [do_merge] is 1, this
579 is overridden by the merge code, but we have historically used
580 [Caml_white].
581 */
caml_make_free_blocks(value * p,mlsize_t size,int do_merge,int color)582 void caml_make_free_blocks (value *p, mlsize_t size, int do_merge, int color)
583 {
584 mlsize_t sz;
585
586 while (size > 0){
587 if (size > Whsize_wosize (Max_wosize)){
588 sz = Whsize_wosize (Max_wosize);
589 }else{
590 sz = size;
591 }
592 *(header_t *)p =
593 Make_header (Wosize_whsize (sz), 0, color);
594 if (do_merge) caml_fl_merge_block (Val_hp (p));
595 size -= sz;
596 p += sz;
597 }
598 }
599
caml_set_allocation_policy(uintnat p)600 void caml_set_allocation_policy (uintnat p)
601 {
602 switch (p){
603 case Policy_next_fit:
604 fl_prev = Fl_head;
605 policy = p;
606 break;
607 case Policy_first_fit:
608 flp_size = 0;
609 beyond = Val_NULL;
610 policy = p;
611 break;
612 default:
613 break;
614 }
615 }
616