1 /* Memory management data structures, part 2: Heap */
2 
3 /* -------------------------- Specification ---------------------------- */
4 
5 #ifdef SPVW_PAGES
6 /* Pages is a collection of pages.
7  typedef ... Pages; */
8 #endif
9 
10 /* Heap is a collection of pages, together with some management information.
11  typedef ... Heap;
12 
13  Iteration through all pages of a heap.
14  map_heap(heap,pagevar,statement); */
15 
16 /* -------------------------- Implementation --------------------------- */
17 
18 #ifdef SPVW_PAGES
19 
20 typedef Page* Pages;
21 
22 typedef struct {
23   Pages inuse;      /* the currently used pages */
24   /* _Page reserve; - a reserve-page ?? */
25   /* heap for objects of fixed length: */
26   Pages lastused;   /* a cache for the last used page */
27   uintM misaligned; /* a misalignment that must be applied to all objects */
28 } Heap;
29 
30 #define map_heap(heap,pagevar,statement)                \
31   { AVL_map((heap).inuse,pagevar,statement); }
32 
33 #endif
34 
35 #ifdef SPVW_BLOCKS
36 
37 typedef Page Pages;
38 
39 #ifdef GENERATIONAL_GC
40 /* For each physical memory page of the old generation we memorize,
41  which pointers to objects of the new generation this page contains,
42  so that we do not have to access this page.
43  So long as we do not access this page for writing, this information stays
44  up to date. But after we have written to this page, we must regenerate
45  this information at the next GC. Additionally, this should be done
46  without accessing the page before it or after it. */
47 typedef struct {
48   gcv_object_t* p;    /* address of the pointer, within an old object */
49   gcv_object_t o;     /* o = *p, pointer to a new object */
50 } old_new_pointer_t;
51 typedef struct {
52   /* traversal of the pointers in the page requires the following:
53      continuation of the last object on previous page: */
54   gcv_object_t* continued_addr;
55   uintC continued_count;
56   /* first object, that begins at this page (or later) : */
57   aint firstobject;
58   /* the cache of pointers to objects of the new generation: */
59   int protection; /* PROT_NONE : only the cache is valid.
60                      PROT_READ : both page and cache are valid.
61                      PROT_READ_WRITE : only the page is valid. */
62   uintL cache_size;         /* number of cached pointers */
63   old_new_pointer_t* cache; /* cache of all pointers into the new generation */
64 #if defined(MULTITHREAD)
65   /* during fault handling we have to allow a single thread to  change this page cache */
66   spinlock_t cache_lock;
67 #endif
68 } physpage_state_t;
69 
70 #if defined(MULTITHREAD)
71 /* during GC GEN1 the highest bit of cache_size marks whether the
72    protection of the page should be preserved (becasue it contains
73    pinned objects).
74    Used only during GEN1 of GC !!! Never set outside GC. */
75  #define physpage_pinned_mask  ((uintL)(bit(intLsize-1)))
76  #define physpage_pin_marked(p) ((p)->cache_size & physpage_pinned_mask)
77  #define physpage_pin_mark(p) p->cache_size |= physpage_pinned_mask
78 #endif /* MULTITHREAD & GENERATIONAL_GC */
79 #endif /* GENERATIONAL_GC */
80 
81 typedef struct {
82   Pages pages;
83   #if defined(SPVW_PURE_BLOCKS) || (defined(SPVW_MIXED_BLOCKS) && defined(TRIVIALMAP_MEMORY))
84   aint heap_limit;
85   #if !defined(SPVW_MIXED_BLOCKS_OPPOSITE) /* SPVW_PURE_BLOCKS || SPVW_MIXED_BLOCKS_STAGGERED */
86   aint heap_hardlimit;
87   #endif
88   #endif
89   #ifdef GENERATIONAL_GC
90   aint heap_gen0_start;
91   aint heap_gen0_end;
92   aint heap_gen1_start;
93   physpage_state_t* physpages;
94   #endif
95   #ifdef MULTITHREAD
96   /* list of heap holes (simple vectors) is kept here so we can reuse these
97      heap areas for regular allocations */
98   aint holes_list;
99   #endif
100 } Heap;
101 
102 #ifdef MULTITHREAD
103 /* Heap holes are "filled" with dummy simple bit vectors. We reuse larger holes
104  (> mmap_pagesize) and we place the structure below in sbvector->data - thus
105  making a list of such holes */
106 typedef struct {
107   uintM hh_size; /* size in bytes of the hole */
108   aint hh_next; /* pointer to next hole */
109 } heap_hole;
110 /* define smallest hole size that we will consider to reuse. holes smaller
111    than this will be ignored */
112 #define MIN_HOLE_SIZE_FOR_REUSE mmap_pagesize
113 #endif
114 
115 #define heap_start  pages.page_start
116 #define heap_end    pages.page_end
117 #if defined(SPVW_PURE_BLOCKS) || (defined(SPVW_MIXED_BLOCKS) && defined(TRIVIALMAP_MEMORY))
118 /* always: heap_start <= heap_end <= heap_limit. */
119 #if defined(SPVW_MIXED_BLOCKS_OPPOSITE)
120 /* resp. heap_limit <= heap_start <= heap_end. */
121 #endif
122 /* the memory between heap_start and heap_end is occupied,
123  the memory between heap_end (resp. heap_start) and heap_limit is free.
124  heap_limit is enlarged (resp. reduced), if necessary.
125  heap_limit is divisible by physpagesize if heap_start < heap_end. If
126  heap_start == heap_end, heap_limit may be equal to heap_end and thus
127  == varobjects_misaligned mod physpagesize. */
128 #if !defined(SPVW_MIXED_BLOCKS_OPPOSITE)
129 /* heap_hardlimit is the biggest resp. smallest admissible value of heap_limit. */
130 #endif
131 #else  /* defined(SPVW_MIXED_BLOCKS) && !defined(TRIVIALMAP_MEMORY) */
132 /* always: heap_start <= heap_end.
133  the memory between heap_start and heap_end is occupied, */
134 #endif
135 #ifdef GENERATIONAL_GC
136 #ifndef SPVW_MIXED_BLOCKS_OPPOSITE
137 /* the generation 0 (older generation) begins at     heap_gen0_start,
138                                        reaches until heap_gen0_end.
139    the generation 1 (newer generation) begins at     heap_gen1_start,
140                                        reaches until heap_end.
141  heap_gen0_start and heap_gen1_start are divisible by physpagesize or
142  (for mem.varobjects) == varobjects_misaligned mod physpagesize.
143  Between heap_gen0_end and heap_gen1_start is a gap of
144  less than a page.
145  heap_start is either = heap_gen0_start or = heap_gen1_start. */
146 #else
147 /* the generation 0 (older generation) begins at     heap_gen0_start,
148                                        reaches until heap_gen0_end.
149  For mem.varobjects:
150    generation 1 (newer generation)   begins at     heap_gen1_start,
151                                      reaches until heap_end.
152    heap_gen0_start and heap_gen1_start are divisible by physpagesize or
153    == varobjects_misaligned mod physpagesize.
154    Between heap_gen0_end and heap_gen1_start is a gap of
155    less than a page.
156    heap_start is either = heap_gen0_start or = heap_gen1_start.
157  For mem.conses: */
158     #define heap_gen1_end  heap_gen1_start
159 /*   generation 1 (newer generation) begins at     heap_start,
160                                      reaches until heap_gen1_end.
161    heap_gen1_end and heap_gen0_end are divisible by physpagesize.
162    Between heap_gen1_end and heap_gen0_start is a gap of
163    less than a page.
164    heap_end is either = heap_gen1_end or = heap_gen0_end. */
165 #endif
166 /* the status of address addr (heap_gen0_start <= addr < heap_gen0_end) is
167  given by physpages[(addr>>physpageshift)-(heap_gen0_start>>physpageshift)] .
168  physpages=NULL is possible, if there was not sufficient space! */
169 #endif
170 
171 #define map_heap(heap,pagevar,statement)                \
172   { var Page* pagevar = &(heap).pages; statement; }
173 
174 #endif
175