1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 *
14 */
15
16 #include "private/gc_pmark.h" /* includes gc_priv.h */
17
18 #ifdef GC_GCJ_SUPPORT
19
20 /*
21 * This is an allocator interface tuned for gcj (the GNU static
22 * java compiler).
23 *
24 * Each allocated object has a pointer in its first word to a vtable,
25 * which for our purposes is simply a structure describing the type of
26 * the object.
27 * This descriptor structure contains a GC marking descriptor at offset
28 * MARK_DESCR_OFFSET.
29 *
30 * It is hoped that this interface may also be useful for other systems,
31 * possibly with some tuning of the constants. But the immediate goal
32 * is to get better gcj performance.
33 *
34 * We assume:
35 * 1) Counting on explicit initialization of this interface is OK;
36 * 2) FASTLOCK is not a significant win.
37 */
38
39 #include "gc_gcj.h"
40 #include "private/dbg_mlc.h"
41
42 #ifdef GC_ASSERTIONS
43 GC_INNER /* variable is also used in thread_local_alloc.c */
44 #else
45 STATIC
46 #endif
47 GC_bool GC_gcj_malloc_initialized = FALSE;
48
49 int GC_gcj_kind = 0; /* Object kind for objects with descriptors */
50 /* in "vtable". */
51 int GC_gcj_debug_kind = 0;
52 /* The kind of objects that is always marked */
53 /* with a mark proc call. */
54
55 GC_INNER ptr_t * GC_gcjobjfreelist = NULL;
56
57 STATIC ptr_t * GC_gcjdebugobjfreelist = NULL;
58
GC_gcj_fake_mark_proc(word * addr GC_ATTR_UNUSED,struct GC_ms_entry * mark_stack_ptr,struct GC_ms_entry * mark_stack_limit GC_ATTR_UNUSED,word env GC_ATTR_UNUSED)59 STATIC struct GC_ms_entry * GC_gcj_fake_mark_proc(word * addr GC_ATTR_UNUSED,
60 struct GC_ms_entry *mark_stack_ptr,
61 struct GC_ms_entry * mark_stack_limit GC_ATTR_UNUSED,
62 word env GC_ATTR_UNUSED)
63 {
64 ABORT_RET("No client gcj mark proc is specified");
65 return mark_stack_ptr;
66 }
67
68 /* Caller does not hold allocation lock. */
GC_init_gcj_malloc(int mp_index,void * mp)69 GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
70 void * /* really GC_mark_proc */mp)
71 {
72 GC_bool ignore_gcj_info;
73 DCL_LOCK_STATE;
74
75 if (mp == 0) /* In case GC_DS_PROC is unused. */
76 mp = (void *)(word)GC_gcj_fake_mark_proc;
77
78 GC_init(); /* In case it's not already done. */
79 LOCK();
80 if (GC_gcj_malloc_initialized) {
81 UNLOCK();
82 return;
83 }
84 GC_gcj_malloc_initialized = TRUE;
85 # ifdef GC_IGNORE_GCJ_INFO
86 /* This is useful for debugging on platforms with missing getenv(). */
87 ignore_gcj_info = 1;
88 # else
89 ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO"));
90 # endif
91 if (ignore_gcj_info) {
92 GC_COND_LOG_PRINTF("Gcj-style type information is disabled!\n");
93 }
94 GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */
95 GC_mark_procs[mp_index] = (GC_mark_proc)(word)mp;
96 if ((unsigned)mp_index >= GC_n_mark_procs)
97 ABORT("GC_init_gcj_malloc: bad index");
98 /* Set up object kind gcj-style indirect descriptor. */
99 GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner();
100 if (ignore_gcj_info) {
101 /* Use a simple length-based descriptor, thus forcing a fully */
102 /* conservative scan. */
103 GC_gcj_kind = GC_new_kind_inner((void **)GC_gcjobjfreelist,
104 /* 0 | */ GC_DS_LENGTH,
105 TRUE, TRUE);
106 } else {
107 GC_gcj_kind = GC_new_kind_inner(
108 (void **)GC_gcjobjfreelist,
109 (((word)(-(signed_word)MARK_DESCR_OFFSET
110 - GC_INDIR_PER_OBJ_BIAS))
111 | GC_DS_PER_OBJECT),
112 FALSE, TRUE);
113 }
114 /* Set up object kind for objects that require mark proc call. */
115 if (ignore_gcj_info) {
116 GC_gcj_debug_kind = GC_gcj_kind;
117 GC_gcjdebugobjfreelist = GC_gcjobjfreelist;
118 } else {
119 GC_gcjdebugobjfreelist = (ptr_t *)GC_new_free_list_inner();
120 GC_gcj_debug_kind = GC_new_kind_inner(
121 (void **)GC_gcjdebugobjfreelist,
122 GC_MAKE_PROC(mp_index,
123 1 /* allocated with debug info */),
124 FALSE, TRUE);
125 }
126 UNLOCK();
127 }
128
129 #define GENERAL_MALLOC_INNER(lb,k) \
130 GC_clear_stack(GC_generic_malloc_inner(lb, k))
131
132 #define GENERAL_MALLOC_INNER_IOP(lb,k) \
133 GC_clear_stack(GC_generic_malloc_inner_ignore_off_page(lb, k))
134
135 /* We need a mechanism to release the lock and invoke finalizers. */
136 /* We don't really have an opportunity to do this on a rarely executed */
137 /* path on which the lock is not held. Thus we check at a */
138 /* rarely executed point at which it is safe to release the lock. */
139 /* We do this even where we could just call GC_INVOKE_FINALIZERS, */
140 /* since it's probably cheaper and certainly more uniform. */
141 /* FIXME - Consider doing the same elsewhere? */
maybe_finalize(void)142 static void maybe_finalize(void)
143 {
144 static word last_finalized_no = 0;
145 DCL_LOCK_STATE;
146
147 if (GC_gc_no == last_finalized_no ||
148 !EXPECT(GC_is_initialized, TRUE)) return;
149 UNLOCK();
150 GC_INVOKE_FINALIZERS();
151 LOCK();
152 last_finalized_no = GC_gc_no;
153 }
154
155 /* Allocate an object, clear it, and store the pointer to the */
156 /* type structure (vtable in gcj). */
157 /* This adds a byte at the end of the object if GC_malloc would.*/
158 #ifdef THREAD_LOCAL_ALLOC
GC_core_gcj_malloc(size_t lb,void * ptr_to_struct_containing_descr)159 GC_INNER void * GC_core_gcj_malloc(size_t lb,
160 void * ptr_to_struct_containing_descr)
161 #else
162 GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc(size_t lb,
163 void * ptr_to_struct_containing_descr)
164 #endif
165 {
166 ptr_t op;
167 ptr_t * opp;
168 word lg;
169 DCL_LOCK_STATE;
170
171 GC_DBG_COLLECT_AT_MALLOC(lb);
172 if(SMALL_OBJ(lb)) {
173 lg = GC_size_map[lb];
174 opp = &(GC_gcjobjfreelist[lg]);
175 LOCK();
176 op = *opp;
177 if(EXPECT(op == 0, FALSE)) {
178 maybe_finalize();
179 op = (ptr_t)GENERAL_MALLOC_INNER((word)lb, GC_gcj_kind);
180 if (0 == op) {
181 GC_oom_func oom_fn = GC_oom_fn;
182 UNLOCK();
183 return((*oom_fn)(lb));
184 }
185 } else {
186 *opp = obj_link(op);
187 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
188 }
189 *(void **)op = ptr_to_struct_containing_descr;
190 GC_ASSERT(((void **)op)[1] == 0);
191 UNLOCK();
192 } else {
193 LOCK();
194 maybe_finalize();
195 op = (ptr_t)GENERAL_MALLOC_INNER((word)lb, GC_gcj_kind);
196 if (0 == op) {
197 GC_oom_func oom_fn = GC_oom_fn;
198 UNLOCK();
199 return((*oom_fn)(lb));
200 }
201 *(void **)op = ptr_to_struct_containing_descr;
202 UNLOCK();
203 }
204 return((void *) op);
205 }
206
207 /* Similar to GC_gcj_malloc, but add debug info. This is allocated */
208 /* with GC_gcj_debug_kind. */
GC_debug_gcj_malloc(size_t lb,void * ptr_to_struct_containing_descr,GC_EXTRA_PARAMS)209 GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_gcj_malloc(size_t lb,
210 void * ptr_to_struct_containing_descr, GC_EXTRA_PARAMS)
211 {
212 void * result;
213 DCL_LOCK_STATE;
214
215 /* We're careful to avoid extra calls, which could */
216 /* confuse the backtrace. */
217 LOCK();
218 maybe_finalize();
219 result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind);
220 if (result == 0) {
221 GC_oom_func oom_fn = GC_oom_fn;
222 UNLOCK();
223 GC_err_printf("GC_debug_gcj_malloc(%lu, %p) returning NULL (%s:%d)\n",
224 (unsigned long)lb, ptr_to_struct_containing_descr, s, i);
225 return((*oom_fn)(lb));
226 }
227 *((void **)((ptr_t)result + sizeof(oh))) = ptr_to_struct_containing_descr;
228 UNLOCK();
229 if (!GC_debugging_started) {
230 GC_start_debugging();
231 }
232 ADD_CALL_CHAIN(result, ra);
233 return (GC_store_debug_info(result, (word)lb, s, i));
234 }
235
236 /* There is no THREAD_LOCAL_ALLOC for GC_gcj_malloc_ignore_off_page(). */
GC_gcj_malloc_ignore_off_page(size_t lb,void * ptr_to_struct_containing_descr)237 GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
238 void * ptr_to_struct_containing_descr)
239 {
240 ptr_t op;
241 ptr_t * opp;
242 word lg;
243 DCL_LOCK_STATE;
244
245 GC_DBG_COLLECT_AT_MALLOC(lb);
246 if(SMALL_OBJ(lb)) {
247 lg = GC_size_map[lb];
248 opp = &(GC_gcjobjfreelist[lg]);
249 LOCK();
250 op = *opp;
251 if (EXPECT(0 == op, FALSE)) {
252 maybe_finalize();
253 op = (ptr_t)GENERAL_MALLOC_INNER_IOP(lb, GC_gcj_kind);
254 if (0 == op) {
255 GC_oom_func oom_fn = GC_oom_fn;
256 UNLOCK();
257 return((*oom_fn)(lb));
258 }
259 } else {
260 *opp = obj_link(op);
261 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
262 }
263 } else {
264 LOCK();
265 maybe_finalize();
266 op = (ptr_t)GENERAL_MALLOC_INNER_IOP(lb, GC_gcj_kind);
267 if (0 == op) {
268 GC_oom_func oom_fn = GC_oom_fn;
269 UNLOCK();
270 return((*oom_fn)(lb));
271 }
272 }
273 *(void **)op = ptr_to_struct_containing_descr;
274 UNLOCK();
275 return((void *) op);
276 }
277
278 #endif /* GC_GCJ_SUPPORT */
279