1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sw=4 et tw=78:
3  *
4  * ***** BEGIN LICENSE BLOCK *****
5  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6  *
7  * The contents of this file are subject to the Mozilla Public License Version
8  * 1.1 (the "License"); you may not use this file except in compliance with
9  * the License. You may obtain a copy of the License at
10  * http://www.mozilla.org/MPL/
11  *
12  * Software distributed under the License is distributed on an "AS IS" basis,
13  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14  * for the specific language governing rights and limitations under the
15  * License.
16  *
17  * The Original Code is Mozilla Communicator client code, released
18  * March 31, 1998.
19  *
20  * The Initial Developer of the Original Code is
21  * Netscape Communications Corporation.
22  * Portions created by the Initial Developer are Copyright (C) 1998
23  * the Initial Developer. All Rights Reserved.
24  *
25  * Contributor(s):
26  *
27  * Alternatively, the contents of this file may be used under the terms of
28  * either of the GNU General Public License Version 2 or later (the "GPL"),
29  * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30  * in which case the provisions of the GPL or the LGPL are applicable instead
31  * of those above. If you wish to allow use of your version of this file only
32  * under the terms of either the GPL or the LGPL, and not to allow others to
33  * use your version of this file under the terms of the MPL, indicate your
34  * decision by deleting the provisions above and replace them with the notice
35  * and other provisions required by the GPL or the LGPL. If you do not delete
36  * the provisions above, a recipient may use your version of this file under
37  * the terms of any one of the MPL, the GPL or the LGPL.
38  *
39  * ***** END LICENSE BLOCK ***** */
40 
41 /*
42  * JS Mark-and-Sweep Garbage Collector.
43  *
44  * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45  * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46  * using malloc. It uses an ideally parallel array of flag bytes to hold the
47  * mark bit, finalizer type index, etc.
48  *
49  * XXX swizzle page to freelist for better locality of reference
50  */
51 #include "jsstddef.h"
52 #include <stdlib.h>     /* for free */
53 #include <string.h>     /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsutil.h" /* Added by JSIFY */
56 #include "jshash.h" /* Added by JSIFY */
57 #include "jsapi.h"
58 #include "jsatom.h"
59 #include "jsbit.h"
60 #include "jsclist.h"
61 #include "jscntxt.h"
62 #include "jsconfig.h"
63 #include "jsdbgapi.h"
64 #include "jsexn.h"
65 #include "jsfun.h"
66 #include "jsgc.h"
67 #include "jsinterp.h"
68 #include "jsiter.h"
69 #include "jslock.h"
70 #include "jsnum.h"
71 #include "jsobj.h"
72 #include "jsscope.h"
73 #include "jsscript.h"
74 #include "jsstr.h"
75 
76 #if JS_HAS_XML_SUPPORT
77 #include "jsxml.h"
78 #endif
79 
80 /*
81  * GC arena sizing depends on amortizing arena overhead using a large number
82  * of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
83  *
84  * On 64-bit platforms, we would have half as many things per arena because
85  * pointers are twice as big, so we double the bytes for things per arena.
86  * This preserves the 1024 byte flags sub-arena size, which relates to the
87  * GC_PAGE_SIZE (see below for why).
88  */
89 #if JS_BYTES_PER_WORD == 8
90 # define GC_THINGS_SHIFT 14     /* 16KB for things on Alpha, etc. */
91 #else
92 # define GC_THINGS_SHIFT 13     /* 8KB for things on most platforms */
93 #endif
94 #define GC_THINGS_SIZE  JS_BIT(GC_THINGS_SHIFT)
95 #define GC_FLAGS_SIZE   (GC_THINGS_SIZE / sizeof(JSGCThing))
96 
97 /*
98  * A GC arena contains one flag byte for each thing in its heap, and supports
99  * O(1) lookup of a flag given its thing's address.
100  *
101  * To implement this, we take advantage of the thing/flags numerology: given
102  * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K
103  * allocation for things+flags there are always 8 consecutive 1K-pages each
104  * aligned on 1K boundary. We use these pages to allocate things and the
105  * remaining 1K of space before and after the aligned pages to store flags.
106  * If we are really lucky and things+flags starts on a 1K boundary, then
107  * flags would consist of a single 1K chunk that comes after 8K of things.
108  * Otherwise there are 2 chunks of flags, one before and one after things.
109  *
110  * To be able to find the flag byte for a particular thing, we put a
111  * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that
112  * page's offset from the beginning of things+flags allocation and we allocate
113  * things after this record. Thus for each thing |thing_address & ~1023|
114  * gives the address of a JSGCPageInfo record from which we read page_offset.
115  * Due to page alignment
116  *  (page_offset & ~1023) + (thing_address & 1023)
117  * gives thing_offset from the beginning of 8K paged things. We then divide
118  * thing_offset by sizeof(JSGCThing) to get thing_index.
119  *
120  * Now |page_address - page_offset| is things+flags arena_address and
121  * (page_offset & 1023) is the offset of the first page from the start of
122  * things+flags area. Thus if
123  *  thing_index < (page_offset & 1023)
124  * then
125  *  allocation_start_address + thing_index < address_of_the_first_page
126  * and we use
127  *  allocation_start_address + thing_index
128  * as the address to store thing's flags. If
129  *  thing_index >= (page_offset & 1023),
130  * then we use the chunk of flags that comes after the pages with things
131  * and calculate the address for the flag byte as
132  *  address_of_the_first_page + 8K + (thing_index - (page_offset & 1023))
133  * which is just
134  *  allocation_start_address + thing_index + 8K.
135  *
136  * When we allocate things with size equal to sizeof(JSGCThing), the overhead
137  * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87%
138  * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each
139  * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the
140  * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size
141  * and the overhead is close to 1/8 or 12.5%.
142  * FIXME: How can we avoid this overhead?
143  *
144  * Here's some ASCII art showing an arena:
145  *
146  *   split or the first 1-K aligned address.
147  *     |
148  *     V
149  *  +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
150  *  |fB|  tp0  |  tp1  |  tp2  |  tp3  |  tp4  |  tp5  |  tp6  |  tp7  | fA  |
151  *  +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
152  *              ^                                 ^
153  *  tI ---------+                                 |
154  *  tJ -------------------------------------------+
155  *
156  *  - fB are the "before split" flags, fA are the "after split" flags
157  *  - tp0-tp7 are the 8 thing pages
158  *  - thing tI points into tp1, whose flags are below the split, in fB
159  *  - thing tJ points into tp5, clearly above the split
160  *
161  * In general, one of the thing pages will have some of its things' flags on
162  * the low side of the split, and the rest of its things' flags on the high
163  * side.  All the other pages have flags only below or only above.
164  *
165  * (If we need to implement card-marking for an incremental GC write barrier,
166  * we can replace word-sized offsetInArena in JSGCPageInfo by pair of
167  * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed
168  * GC_THINGS_SIZE. This would gives an extremely efficient write barrier:
169  * when mutating an object obj, just store a 1 byte at
170  * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.)
171  */
172 #define GC_PAGE_SHIFT   10
173 #define GC_PAGE_MASK    ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
174 #define GC_PAGE_SIZE    JS_BIT(GC_PAGE_SHIFT)
175 #define GC_PAGE_COUNT   (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
176 
177 typedef struct JSGCPageInfo {
178     jsuword     offsetInArena;          /* offset from the arena start */
179     jsuword     unscannedBitmap;        /* bitset for fast search of marked
180                                            but not yet scanned GC things */
181 } JSGCPageInfo;
182 
183 struct JSGCArena {
184     JSGCArenaList       *list;          /* allocation list for the arena */
185     JSGCArena           *prev;          /* link field for allocation list */
186     JSGCArena           *prevUnscanned; /* link field for the list of arenas
187                                            with marked but not yet scanned
188                                            things */
189     jsuword             unscannedPages; /* bitset for fast search of pages
190                                            with marked but not yet scanned
191                                            things */
192     uint8               base[1];        /* things+flags allocation area */
193 };
194 
195 #define GC_ARENA_SIZE                                                         \
196     (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
197 
198 #define FIRST_THING_PAGE(a)                                                   \
199     (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
200 
201 #define PAGE_TO_ARENA(pi)                                                     \
202     ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena                        \
203                    - offsetof(JSGCArena, base)))
204 
205 #define PAGE_INDEX(pi)                                                        \
206     ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
207 
208 #define THING_TO_PAGE(thing)                                                  \
209     ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
210 
211 /*
212  * Given a thing size n, return the size of the gap from the page start before
213  * the first thing.  We know that any n not a power of two packs from
214  * the end of the page leaving at least enough room for one JSGCPageInfo, but
215  * not for another thing, at the front of the page (JS_ASSERTs below insist
216  * on this).
217  *
218  * This works because all allocations are a multiple of sizeof(JSGCThing) ==
219  * sizeof(JSGCPageInfo) in size.
220  */
221 #define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
222 
223 #ifdef JS_THREADSAFE
224 /*
225  * The maximum number of things to put to the local free list by taking
226  * several things from the global free list or from the tail of the last
227  * allocated arena to amortize the cost of rt->gcLock.
228  *
229  * We use number 8 based on benchmarks from bug 312238.
230  */
231 #define MAX_THREAD_LOCAL_THINGS 8
232 
233 #endif
234 
235 JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
236 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
237 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
238 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
239 JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
240 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
241 
242 /*
243  * JSPtrTable capacity growth descriptor. The table grows by powers of two
244  * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
245  * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
246  */
247 typedef struct JSPtrTableInfo {
248     uint16      minCapacity;
249     uint16      linearGrowthThreshold;
250 } JSPtrTableInfo;
251 
252 #define GC_ITERATOR_TABLE_MIN     4
253 #define GC_ITERATOR_TABLE_LINEAR  1024
254 
255 static const JSPtrTableInfo iteratorTableInfo = {
256     GC_ITERATOR_TABLE_MIN,
257     GC_ITERATOR_TABLE_LINEAR
258 };
259 
260 /* Calculate table capacity based on the current value of JSPtrTable.count. */
261 static size_t
PtrTableCapacity(size_t count,const JSPtrTableInfo * info)262 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
263 {
264     size_t linear, log, capacity;
265 
266     linear = info->linearGrowthThreshold;
267     JS_ASSERT(info->minCapacity <= linear);
268 
269     if (count == 0) {
270         capacity = 0;
271     } else if (count < linear) {
272         log = JS_CEILING_LOG2W(count);
273         JS_ASSERT(log != JS_BITS_PER_WORD);
274         capacity = (size_t)1 << log;
275         if (capacity < info->minCapacity)
276             capacity = info->minCapacity;
277     } else {
278         capacity = JS_ROUNDUP(count, linear);
279     }
280 
281     JS_ASSERT(capacity >= count);
282     return capacity;
283 }
284 
285 static void
FreePtrTable(JSPtrTable * table,const JSPtrTableInfo * info)286 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
287 {
288     if (table->array) {
289         JS_ASSERT(table->count > 0);
290         free(table->array);
291         table->array = NULL;
292         table->count = 0;
293     }
294     JS_ASSERT(table->count == 0);
295 }
296 
297 static JSBool
AddToPtrTable(JSContext * cx,JSPtrTable * table,const JSPtrTableInfo * info,void * ptr)298 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
299               void *ptr)
300 {
301     size_t count, capacity;
302     void **array;
303 
304     count = table->count;
305     capacity = PtrTableCapacity(count, info);
306 
307     if (count == capacity) {
308         if (capacity < info->minCapacity) {
309             JS_ASSERT(capacity == 0);
310             JS_ASSERT(!table->array);
311             capacity = info->minCapacity;
312         } else {
313             /*
314              * Simplify the overflow detection assuming pointer is bigger
315              * than byte.
316              */
317             JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
318             capacity = (capacity < info->linearGrowthThreshold)
319                        ? 2 * capacity
320                        : capacity + info->linearGrowthThreshold;
321             if (capacity > (size_t)-1 / sizeof table->array[0])
322                 goto bad;
323         }
324         array = (void **) realloc(table->array,
325                                   capacity * sizeof table->array[0]);
326         if (!array)
327             goto bad;
328 #ifdef DEBUG
329         memset(array + count, JS_FREE_PATTERN,
330                (capacity - count) * sizeof table->array[0]);
331 #endif
332         table->array = array;
333     }
334 
335     table->array[count] = ptr;
336     table->count = count + 1;
337 
338     return JS_TRUE;
339 
340   bad:
341     JS_ReportOutOfMemory(cx);
342     return JS_FALSE;
343 }
344 
345 static void
ShrinkPtrTable(JSPtrTable * table,const JSPtrTableInfo * info,size_t newCount)346 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
347                size_t newCount)
348 {
349     size_t oldCapacity, capacity;
350     void **array;
351 
352     JS_ASSERT(newCount <= table->count);
353     if (newCount == table->count)
354         return;
355 
356     oldCapacity = PtrTableCapacity(table->count, info);
357     table->count = newCount;
358     capacity = PtrTableCapacity(newCount, info);
359 
360     if (oldCapacity != capacity) {
361         array = table->array;
362         JS_ASSERT(array);
363         if (capacity == 0) {
364             free(array);
365             table->array = NULL;
366             return;
367         }
368         array = (void **) realloc(array, capacity * sizeof array[0]);
369         if (array)
370             table->array = array;
371     }
372 #ifdef DEBUG
373     memset(table->array + newCount, JS_FREE_PATTERN,
374            (capacity - newCount) * sizeof table->array[0]);
375 #endif
376 }
377 
378 #ifdef JS_GCMETER
379 # define METER(x) x
380 #else
381 # define METER(x) ((void) 0)
382 #endif
383 
384 static JSBool
NewGCArena(JSRuntime * rt,JSGCArenaList * arenaList)385 NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList)
386 {
387     JSGCArena *a;
388     jsuword offset;
389     JSGCPageInfo *pi;
390     uint32 *bytesptr;
391 
392     /* Check if we are allowed and can allocate a new arena. */
393     if (rt->gcBytes >= rt->gcMaxBytes)
394         return JS_FALSE;
395     a = (JSGCArena *)malloc(GC_ARENA_SIZE);
396     if (!a)
397         return JS_FALSE;
398 
399     /* Initialize the JSGCPageInfo records at the start of every thing page. */
400     offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
401     JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
402     do {
403         pi = (JSGCPageInfo *) (a->base + offset);
404         pi->offsetInArena = offset;
405         pi->unscannedBitmap = 0;
406         offset += GC_PAGE_SIZE;
407     } while (offset < GC_THINGS_SIZE);
408 
409     METER(++arenaList->stats.narenas);
410     METER(arenaList->stats.maxarenas
411           = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));
412 
413     a->list = arenaList;
414     a->prev = arenaList->last;
415     a->prevUnscanned = NULL;
416     a->unscannedPages = 0;
417     arenaList->last = a;
418     arenaList->lastLimit = 0;
419 
420     bytesptr = (arenaList == &rt->gcArenaList[0])
421                ? &rt->gcBytes
422                : &rt->gcPrivateBytes;
423     *bytesptr += GC_ARENA_SIZE;
424 
425     return JS_TRUE;
426 }
427 
428 static void
DestroyGCArena(JSRuntime * rt,JSGCArenaList * arenaList,JSGCArena ** ap)429 DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
430 {
431     JSGCArena *a;
432     uint32 *bytesptr;
433 
434     a = *ap;
435     JS_ASSERT(a);
436     bytesptr = (arenaList == &rt->gcArenaList[0])
437                ? &rt->gcBytes
438                : &rt->gcPrivateBytes;
439     JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
440     *bytesptr -= GC_ARENA_SIZE;
441     METER(rt->gcStats.afree++);
442     METER(--arenaList->stats.narenas);
443     if (a == arenaList->last)
444         arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
445     *ap = a->prev;
446 
447 #ifdef DEBUG
448     memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
449 #endif
450     free(a);
451 }
452 
453 static void
InitGCArenaLists(JSRuntime * rt)454 InitGCArenaLists(JSRuntime *rt)
455 {
456     uintN i, thingSize;
457     JSGCArenaList *arenaList;
458 
459     for (i = 0; i < GC_NUM_FREELISTS; i++) {
460         arenaList = &rt->gcArenaList[i];
461         thingSize = GC_FREELIST_NBYTES(i);
462         JS_ASSERT((size_t)(uint16)thingSize == thingSize);
463         arenaList->last = NULL;
464         arenaList->lastLimit = 0;
465         arenaList->thingSize = (uint16)thingSize;
466         arenaList->freeList = NULL;
467         METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
468     }
469 }
470 
471 static void
FinishGCArenaLists(JSRuntime * rt)472 FinishGCArenaLists(JSRuntime *rt)
473 {
474     uintN i;
475     JSGCArenaList *arenaList;
476 
477     for (i = 0; i < GC_NUM_FREELISTS; i++) {
478         arenaList = &rt->gcArenaList[i];
479         while (arenaList->last)
480             DestroyGCArena(rt, arenaList, &arenaList->last);
481         arenaList->freeList = NULL;
482     }
483 }
484 
485 uint8 *
js_GetGCThingFlags(void * thing)486 js_GetGCThingFlags(void *thing)
487 {
488     JSGCPageInfo *pi;
489     jsuword offsetInArena, thingIndex;
490 
491     pi = THING_TO_PAGE(thing);
492     offsetInArena = pi->offsetInArena;
493     JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
494     thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
495                   ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
496     JS_ASSERT(thingIndex < GC_PAGE_SIZE);
497     if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
498         thingIndex += GC_THINGS_SIZE;
499     return (uint8 *)pi - offsetInArena + thingIndex;
500 }
501 
502 JSRuntime*
js_GetGCStringRuntime(JSString * str)503 js_GetGCStringRuntime(JSString *str)
504 {
505     JSGCPageInfo *pi;
506     JSGCArenaList *list;
507 
508     pi = THING_TO_PAGE(str);
509     list = PAGE_TO_ARENA(pi)->list;
510 
511     JS_ASSERT(list->thingSize == sizeof(JSGCThing));
512     JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
513 
514     return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
515 }
516 
517 JSBool
js_IsAboutToBeFinalized(JSContext * cx,void * thing)518 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
519 {
520     uint8 flags = *js_GetGCThingFlags(thing);
521 
522     return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
523 }
524 
525 typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
526 
527 #ifndef DEBUG
528 # define js_FinalizeDouble       NULL
529 #endif
530 
531 #if !JS_HAS_XML_SUPPORT
532 # define js_FinalizeXMLNamespace NULL
533 # define js_FinalizeXMLQName     NULL
534 # define js_FinalizeXML          NULL
535 #endif
536 
537 static GCFinalizeOp gc_finalizers[GCX_NTYPES] = {
538     (GCFinalizeOp) js_FinalizeObject,           /* GCX_OBJECT */
539     (GCFinalizeOp) js_FinalizeString,           /* GCX_STRING */
540     (GCFinalizeOp) js_FinalizeDouble,           /* GCX_DOUBLE */
541     (GCFinalizeOp) js_FinalizeString,           /* GCX_MUTABLE_STRING */
542     NULL,                                       /* GCX_PRIVATE */
543     (GCFinalizeOp) js_FinalizeXMLNamespace,     /* GCX_NAMESPACE */
544     (GCFinalizeOp) js_FinalizeXMLQName,         /* GCX_QNAME */
545     (GCFinalizeOp) js_FinalizeXML,              /* GCX_XML */
546     NULL,                                       /* GCX_EXTERNAL_STRING */
547     NULL,
548     NULL,
549     NULL,
550     NULL,
551     NULL,
552     NULL,
553     NULL
554 };
555 
556 #ifdef GC_MARK_DEBUG
557 static const char newborn_external_string[] = "newborn external string";
558 
559 static const char *gc_typenames[GCX_NTYPES] = {
560     "newborn object",
561     "newborn string",
562     "newborn double",
563     "newborn mutable string",
564     "newborn private",
565     "newborn Namespace",
566     "newborn QName",
567     "newborn XML",
568     newborn_external_string,
569     newborn_external_string,
570     newborn_external_string,
571     newborn_external_string,
572     newborn_external_string,
573     newborn_external_string,
574     newborn_external_string,
575     newborn_external_string
576 };
577 #endif
578 
579 intN
js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,JSStringFinalizeOp newop)580 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
581                                  JSStringFinalizeOp newop)
582 {
583     uintN i;
584 
585     for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
586         if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
587             gc_finalizers[i] = (GCFinalizeOp) newop;
588             return (intN) i;
589         }
590     }
591     return -1;
592 }
593 
594 /* This is compatible with JSDHashEntryStub. */
595 typedef struct JSGCRootHashEntry {
596     JSDHashEntryHdr hdr;
597     void            *root;
598     const char      *name;
599 } JSGCRootHashEntry;
600 
601 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
602 #define GC_ROOTS_SIZE   256
603 #define GC_FINALIZE_LEN 1024
604 
605 JSBool
js_InitGC(JSRuntime * rt,uint32 maxbytes)606 js_InitGC(JSRuntime *rt, uint32 maxbytes)
607 {
608     InitGCArenaLists(rt);
609     if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
610                            sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
611         rt->gcRootsHash.ops = NULL;
612         return JS_FALSE;
613     }
614     rt->gcLocksHash = NULL;     /* create lazily */
615 
616     /*
617      * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
618      * for default backward API compatibility.
619      */
620     rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
621 
622     return JS_TRUE;
623 }
624 
625 #ifdef JS_GCMETER
626 JS_FRIEND_API(void)
js_DumpGCStats(JSRuntime * rt,FILE * fp)627 js_DumpGCStats(JSRuntime *rt, FILE *fp)
628 {
629     uintN i;
630     size_t totalThings, totalMaxThings, totalBytes;
631 
632     fprintf(fp, "\nGC allocation statistics:\n");
633 
634 #define UL(x)       ((unsigned long)(x))
635 #define ULSTAT(x)   UL(rt->gcStats.x)
636     totalThings = 0;
637     totalMaxThings = 0;
638     totalBytes = 0;
639     for (i = 0; i < GC_NUM_FREELISTS; i++) {
640         JSGCArenaList *list = &rt->gcArenaList[i];
641         JSGCArenaStats *stats = &list->stats;
642         if (stats->maxarenas == 0) {
643             fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
644                     i, UL(GC_FREELIST_NBYTES(i)));
645             continue;
646         }
647         fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
648                 i, UL(GC_FREELIST_NBYTES(i)));
649         fprintf(fp, "                     arenas: %lu\n", UL(stats->narenas));
650         fprintf(fp, "                 max arenas: %lu\n", UL(stats->maxarenas));
651         fprintf(fp, "                     things: %lu\n", UL(stats->nthings));
652         fprintf(fp, "                 max things: %lu\n", UL(stats->maxthings));
653         fprintf(fp, "                  free list: %lu\n", UL(stats->freelen));
654         fprintf(fp, "          free list density: %.1f%%\n",
655                 stats->narenas == 0
656                 ? 0.0
657                 : (100.0 * list->thingSize * (jsdouble)stats->freelen /
658                    (GC_THINGS_SIZE * (jsdouble)stats->narenas)));
659         fprintf(fp, "  average free list density: %.1f%%\n",
660                 stats->totalarenas == 0
661                 ? 0.0
662                 : (100.0 * list->thingSize * (jsdouble)stats->totalfreelen /
663                    (GC_THINGS_SIZE * (jsdouble)stats->totalarenas)));
664         fprintf(fp, "                   recycles: %lu\n", UL(stats->recycle));
665         fprintf(fp, "        recycle/alloc ratio: %.2f\n",
666                 (jsdouble)stats->recycle /
667                 (jsdouble)(stats->totalnew - stats->recycle));
668         totalThings += stats->nthings;
669         totalMaxThings += stats->maxthings;
670         totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings;
671     }
672     fprintf(fp, "TOTAL STATS:\n");
673     fprintf(fp, "     public bytes allocated: %lu\n", UL(rt->gcBytes));
674     fprintf(fp, "    private bytes allocated: %lu\n", UL(rt->gcPrivateBytes));
675     fprintf(fp, "             alloc attempts: %lu\n", ULSTAT(alloc));
676 #ifdef JS_THREADSAFE
677     fprintf(fp, "        alloc without locks: %1u\n", ULSTAT(localalloc));
678 #endif
679     fprintf(fp, "            total GC things: %lu\n", UL(totalThings));
680     fprintf(fp, "        max total GC things: %lu\n", UL(totalMaxThings));
681     fprintf(fp, "             GC things size: %lu\n", UL(totalBytes));
682     fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
683     fprintf(fp, "        allocation failures: %lu\n", ULSTAT(fail));
684     fprintf(fp, "         things born locked: %lu\n", ULSTAT(lockborn));
685     fprintf(fp, "           valid lock calls: %lu\n", ULSTAT(lock));
686     fprintf(fp, "         valid unlock calls: %lu\n", ULSTAT(unlock));
687     fprintf(fp, "       mark recursion depth: %lu\n", ULSTAT(depth));
688     fprintf(fp, "     maximum mark recursion: %lu\n", ULSTAT(maxdepth));
689     fprintf(fp, "     mark C recursion depth: %lu\n", ULSTAT(cdepth));
690     fprintf(fp, "   maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
691     fprintf(fp, "      delayed scan bag adds: %lu\n", ULSTAT(unscanned));
692 #ifdef DEBUG
693     fprintf(fp, "  max delayed scan bag size: %lu\n", ULSTAT(maxunscanned));
694 #endif
695     fprintf(fp, "   maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
696     fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
697     fprintf(fp, "           useless GC calls: %lu\n", ULSTAT(nopoke));
698     fprintf(fp, "  thing arenas freed so far: %lu\n", ULSTAT(afree));
699     fprintf(fp, "     stack segments scanned: %lu\n", ULSTAT(stackseg));
700     fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
701     fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
702     fprintf(fp, "    max reachable closeable: %lu\n", ULSTAT(maxnclose));
703     fprintf(fp, "      scheduled close hooks: %lu\n", ULSTAT(closelater));
704     fprintf(fp, "  max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
705 #undef UL
706 #undef US
707 
708 #ifdef JS_ARENAMETER
709     JS_DumpArenaStats(fp);
710 #endif
711 }
712 #endif
713 
714 #ifdef DEBUG
715 static void
716 CheckLeakedRoots(JSRuntime *rt);
717 #endif
718 
719 void
js_FinishGC(JSRuntime * rt)720 js_FinishGC(JSRuntime *rt)
721 {
722 #ifdef JS_ARENAMETER
723     JS_DumpArenaStats(stdout);
724 #endif
725 #ifdef JS_GCMETER
726     js_DumpGCStats(rt, stdout);
727 #endif
728 
729     FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
730 #if JS_HAS_GENERATORS
731     rt->gcCloseState.reachableList = NULL;
732     METER(rt->gcStats.nclose = 0);
733     rt->gcCloseState.todoQueue = NULL;
734 #endif
735     FinishGCArenaLists(rt);
736 
737     if (rt->gcRootsHash.ops) {
738 #ifdef DEBUG
739         CheckLeakedRoots(rt);
740 #endif
741         JS_DHashTableFinish(&rt->gcRootsHash);
742         rt->gcRootsHash.ops = NULL;
743     }
744     if (rt->gcLocksHash) {
745         JS_DHashTableDestroy(rt->gcLocksHash);
746         rt->gcLocksHash = NULL;
747     }
748 }
749 
750 JSBool
js_AddRoot(JSContext * cx,void * rp,const char * name)751 js_AddRoot(JSContext *cx, void *rp, const char *name)
752 {
753     JSBool ok = js_AddRootRT(cx->runtime, rp, name);
754     if (!ok)
755         JS_ReportOutOfMemory(cx);
756     return ok;
757 }
758 
759 JSBool
js_AddRootRT(JSRuntime * rt,void * rp,const char * name)760 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
761 {
762     JSBool ok;
763     JSGCRootHashEntry *rhe;
764 
765     /*
766      * Due to the long-standing, but now removed, use of rt->gcLock across the
767      * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
768      * properly with a racing GC, without calling JS_AddRoot from a request.
769      * We have to preserve API compatibility here, now that we avoid holding
770      * rt->gcLock across the mark phase (including the root hashtable mark).
771      *
772      * If the GC is running and we're called on another thread, wait for this
773      * GC activation to finish.  We can safely wait here (in the case where we
774      * are called within a request on another thread's context) without fear
775      * of deadlock because the GC doesn't set rt->gcRunning until after it has
776      * waited for all active requests to end.
777      */
778     JS_LOCK_GC(rt);
779 #ifdef JS_THREADSAFE
780     JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
781     if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
782         do {
783             JS_AWAIT_GC_DONE(rt);
784         } while (rt->gcLevel > 0);
785     }
786 #endif
787     rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
788                                                      JS_DHASH_ADD);
789     if (rhe) {
790         rhe->root = rp;
791         rhe->name = name;
792         ok = JS_TRUE;
793     } else {
794         ok = JS_FALSE;
795     }
796     JS_UNLOCK_GC(rt);
797     return ok;
798 }
799 
800 JSBool
js_RemoveRoot(JSRuntime * rt,void * rp)801 js_RemoveRoot(JSRuntime *rt, void *rp)
802 {
803     /*
804      * Due to the JS_RemoveRootRT API, we may be called outside of a request.
805      * Same synchronization drill as above in js_AddRoot.
806      */
807     JS_LOCK_GC(rt);
808 #ifdef JS_THREADSAFE
809     JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
810     if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
811         do {
812             JS_AWAIT_GC_DONE(rt);
813         } while (rt->gcLevel > 0);
814     }
815 #endif
816     (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
817     rt->gcPoke = JS_TRUE;
818     JS_UNLOCK_GC(rt);
819     return JS_TRUE;
820 }
821 
822 #ifdef DEBUG
823 
824 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_root_printer(JSDHashTable * table,JSDHashEntryHdr * hdr,uint32 i,void * arg)825 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
826 {
827     uint32 *leakedroots = (uint32 *)arg;
828     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
829 
830     (*leakedroots)++;
831     fprintf(stderr,
832             "JS engine warning: leaking GC root \'%s\' at %p\n",
833             rhe->name ? (char *)rhe->name : "", rhe->root);
834 
835     return JS_DHASH_NEXT;
836 }
837 
838 static void
CheckLeakedRoots(JSRuntime * rt)839 CheckLeakedRoots(JSRuntime *rt)
840 {
841     uint32 leakedroots = 0;
842 
843     /* Warn (but don't assert) debug builds of any remaining roots. */
844     JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
845                            &leakedroots);
846     if (leakedroots > 0) {
847         if (leakedroots == 1) {
848             fprintf(stderr,
849 "JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
850 "                   This root may point to freed memory. Objects reachable\n"
851 "                   through it have not been finalized.\n");
852         } else {
853             fprintf(stderr,
854 "JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
855 "                   These roots may point to freed memory. Objects reachable\n"
856 "                   through them have not been finalized.\n",
857                         (unsigned long) leakedroots);
858         }
859     }
860 }
861 
862 typedef struct NamedRootDumpArgs {
863     void (*dump)(const char *name, void *rp, void *data);
864     void *data;
865 } NamedRootDumpArgs;
866 
867 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_named_root_dumper(JSDHashTable * table,JSDHashEntryHdr * hdr,uint32 number,void * arg)868 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
869                      void *arg)
870 {
871     NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
872     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
873 
874     if (rhe->name)
875         args->dump(rhe->name, rhe->root, args->data);
876     return JS_DHASH_NEXT;
877 }
878 
879 void
js_DumpNamedRoots(JSRuntime * rt,void (* dump)(const char * name,void * rp,void * data),void * data)880 js_DumpNamedRoots(JSRuntime *rt,
881                   void (*dump)(const char *name, void *rp, void *data),
882                   void *data)
883 {
884     NamedRootDumpArgs args;
885 
886     args.dump = dump;
887     args.data = data;
888     JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
889 }
890 
891 #endif /* DEBUG */
892 
893 typedef struct GCRootMapArgs {
894     JSGCRootMapFun map;
895     void *data;
896 } GCRootMapArgs;
897 
898 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_gcroot_mapper(JSDHashTable * table,JSDHashEntryHdr * hdr,uint32 number,void * arg)899 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
900                  void *arg)
901 {
902     GCRootMapArgs *args = (GCRootMapArgs *) arg;
903     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
904     intN mapflags;
905     JSDHashOperator op;
906 
907     mapflags = args->map(rhe->root, rhe->name, args->data);
908 
909 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT &&                                     \
910     JS_MAP_GCROOT_STOP == JS_DHASH_STOP &&                                     \
911     JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
912     op = (JSDHashOperator)mapflags;
913 #else
914     op = JS_DHASH_NEXT;
915     if (mapflags & JS_MAP_GCROOT_STOP)
916         op |= JS_DHASH_STOP;
917     if (mapflags & JS_MAP_GCROOT_REMOVE)
918         op |= JS_DHASH_REMOVE;
919 #endif
920 
921     return op;
922 }
923 
924 uint32
js_MapGCRoots(JSRuntime * rt,JSGCRootMapFun map,void * data)925 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
926 {
927     GCRootMapArgs args;
928     uint32 rv;
929 
930     args.map = map;
931     args.data = data;
932     JS_LOCK_GC(rt);
933     rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
934     JS_UNLOCK_GC(rt);
935     return rv;
936 }
937 
938 JSBool
js_RegisterCloseableIterator(JSContext * cx,JSObject * obj)939 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
940 {
941     JSRuntime *rt;
942     JSBool ok;
943 
944     rt = cx->runtime;
945     JS_ASSERT(!rt->gcRunning);
946 
947     JS_LOCK_GC(rt);
948     ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
949     JS_UNLOCK_GC(rt);
950     return ok;
951 }
952 
953 static void
CloseIteratorStates(JSContext * cx)954 CloseIteratorStates(JSContext *cx)
955 {
956     JSRuntime *rt;
957     size_t count, newCount, i;
958     void **array;
959     JSObject *obj;
960 
961     rt = cx->runtime;
962     count = rt->gcIteratorTable.count;
963     array = rt->gcIteratorTable.array;
964 
965     newCount = 0;
966     for (i = 0; i != count; ++i) {
967         obj = (JSObject *)array[i];
968         if (js_IsAboutToBeFinalized(cx, obj))
969             js_CloseIteratorState(cx, obj);
970         else
971             array[newCount++] = obj;
972     }
973     ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
974 }
975 
976 #if JS_HAS_GENERATORS
977 
978 void
js_RegisterGenerator(JSContext * cx,JSGenerator * gen)979 js_RegisterGenerator(JSContext *cx, JSGenerator *gen)
980 {
981     JSRuntime *rt;
982 
983     rt = cx->runtime;
984     JS_ASSERT(!rt->gcRunning);
985     JS_ASSERT(rt->state != JSRTS_LANDING);
986     JS_ASSERT(gen->state == JSGEN_NEWBORN);
987 
988     JS_LOCK_GC(rt);
989     gen->next = rt->gcCloseState.reachableList;
990     rt->gcCloseState.reachableList = gen;
991     METER(rt->gcStats.nclose++);
992     METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
993                                          rt->gcStats.nclose));
994     JS_UNLOCK_GC(rt);
995 }
996 
997 /*
998  * We do not run close hooks when the parent scope of the generator instance
999  * becomes unreachable to prevent denial-of-service and resource leakage from
1000  * misbehaved generators.
1001  *
1002  * Called from the GC.
1003  */
1004 static JSBool
CanScheduleCloseHook(JSGenerator * gen)1005 CanScheduleCloseHook(JSGenerator *gen)
1006 {
1007     JSObject *parent;
1008     JSBool canSchedule;
1009 
1010     /* Avoid OBJ_GET_PARENT overhead as we are in GC. */
1011     parent = JSVAL_TO_OBJECT(gen->obj->slots[JSSLOT_PARENT]);
1012     canSchedule = *js_GetGCThingFlags(parent) & GCF_MARK;
1013 #ifdef DEBUG_igor
1014     if (!canSchedule) {
1015         fprintf(stderr, "GEN: Kill without schedule, gen=%p parent=%p\n",
1016                 (void *)gen, (void *)parent);
1017     }
1018 #endif
1019     return canSchedule;
1020 }
1021 
1022 /*
1023  * Check if we should delay execution of the close hook.
1024  *
1025  * Called outside GC or any locks.
1026  *
1027  * XXX The current implementation is a hack that embeds the knowledge of the
1028  * browser embedding pending the resolution of bug 352788. In the browser we
1029  * must not close any generators that came from a page that is currently in
1030  * the browser history. We detect that using the fact in the browser the scope
1031  * is history if scope->outerObject->innerObject != scope.
1032  */
1033 static JSBool
ShouldDeferCloseHook(JSContext * cx,JSGenerator * gen,JSBool * defer)1034 ShouldDeferCloseHook(JSContext *cx, JSGenerator *gen, JSBool *defer)
1035 {
1036     JSObject *parent, *obj;
1037     JSClass *clasp;
1038     JSExtendedClass *xclasp;
1039 
1040     /*
1041      * This is called outside any locks, so use thread-safe macros to access
1042      * parent and  classes.
1043      */
1044     *defer = JS_FALSE;
1045     parent = OBJ_GET_PARENT(cx, gen->obj);
1046     clasp = OBJ_GET_CLASS(cx, parent);
1047     if (clasp->flags & JSCLASS_IS_EXTENDED) {
1048         xclasp = (JSExtendedClass *)clasp;
1049         if (xclasp->outerObject) {
1050             obj = xclasp->outerObject(cx, parent);
1051             if (!obj)
1052                 return JS_FALSE;
1053             OBJ_TO_INNER_OBJECT(cx, obj);
1054             if (!obj)
1055                 return JS_FALSE;
1056             *defer = obj != parent;
1057         }
1058     }
1059 #ifdef DEBUG_igor
1060     if (*defer) {
1061         fprintf(stderr, "GEN: deferring, gen=%p parent=%p\n",
1062                 (void *)gen, (void *)parent);
1063     }
1064 #endif
1065     return JS_TRUE;
1066 }
1067 
1068 /*
1069  * Find all unreachable generators and move them to the todo queue from
1070  * rt->gcCloseState.reachableList to execute thier close hooks after the GC
1071  * cycle completes. To ensure liveness during the sweep phase we mark all
1072  * generators we are going to close later.
1073  */
1074 static void
FindAndMarkObjectsToClose(JSContext * cx,JSGCInvocationKind gckind,JSGenerator ** todoQueueTail)1075 FindAndMarkObjectsToClose(JSContext *cx, JSGCInvocationKind gckind,
1076                           JSGenerator **todoQueueTail)
1077 {
1078     JSRuntime *rt;
1079     JSGenerator *todo, **genp, *gen;
1080 
1081     rt = cx->runtime;
1082     todo = NULL;
1083     genp = &rt->gcCloseState.reachableList;
1084     while ((gen = *genp) != NULL) {
1085         if (*js_GetGCThingFlags(gen->obj) & GCF_MARK) {
1086             genp = &gen->next;
1087         } else {
1088             /* Generator must not be executing when it becomes unreachable. */
1089             JS_ASSERT(gen->state == JSGEN_NEWBORN ||
1090                       gen->state == JSGEN_OPEN ||
1091                       gen->state == JSGEN_CLOSED);
1092 
1093             *genp = gen->next;
1094             if (gen->state == JSGEN_OPEN &&
1095                 js_FindFinallyHandler(gen->frame.script, gen->frame.pc) &&
1096                 CanScheduleCloseHook(gen)) {
1097                 /*
1098                  * Generator yielded inside a try with a finally block.
1099                  * Schedule it for closing.
1100                  *
1101                  * We keep generators that yielded outside try-with-finally
1102                  * with gen->state == JSGEN_OPEN. The finalizer must deal with
1103                  * open generators as we may skip the close hooks, see below.
1104                  */
1105                 gen->next = NULL;
1106                 *todoQueueTail = gen;
1107                 todoQueueTail = &gen->next;
1108                 if (!todo)
1109                     todo = gen;
1110                 METER(JS_ASSERT(rt->gcStats.nclose));
1111                 METER(rt->gcStats.nclose--);
1112                 METER(rt->gcStats.closelater++);
1113                 METER(rt->gcStats.maxcloselater
1114                       = JS_MAX(rt->gcStats.maxcloselater,
1115                                rt->gcStats.closelater));
1116             }
1117         }
1118     }
1119 
1120     if (gckind == GC_LAST_CONTEXT) {
1121         /*
1122          * Remove scheduled hooks on shutdown as it is too late to run them:
1123          * we do not allow execution of arbitrary scripts at this point.
1124          */
1125         rt->gcCloseState.todoQueue = NULL;
1126     } else {
1127         /*
1128          * Mark just-found unreachable generators *after* we scan the global
1129          * list to prevent a generator that refers to other unreachable
1130          * generators from keeping them on gcCloseState.reachableList.
1131          */
1132         for (gen = todo; gen; gen = gen->next)
1133             GC_MARK(cx, gen->obj, "newly scheduled generator");
1134     }
1135 }
1136 
1137 /*
1138  * Mark unreachable generators already scheduled to close and return the tail
1139  * pointer to JSGCCloseState.todoQueue.
1140  */
1141 static JSGenerator **
MarkScheduledGenerators(JSContext * cx)1142 MarkScheduledGenerators(JSContext *cx)
1143 {
1144     JSRuntime *rt;
1145     JSGenerator **genp, *gen;
1146 
1147     rt = cx->runtime;
1148     genp = &rt->gcCloseState.todoQueue;
1149     while ((gen = *genp) != NULL) {
1150         if (CanScheduleCloseHook(gen)) {
1151             GC_MARK(cx, gen->obj, "scheduled generator");
1152             genp = &gen->next;
1153         } else {
1154             /* Discard the generator from the list if its schedule is over. */
1155             *genp = gen->next;
1156             METER(JS_ASSERT(rt->gcStats.closelater > 0));
1157             METER(rt->gcStats.closelater--);
1158         }
1159     }
1160     return genp;
1161 }
1162 
1163 #ifdef JS_THREADSAFE
1164 # define GC_RUNNING_CLOSE_HOOKS_PTR(cx)                                       \
1165     (&(cx)->thread->gcRunningCloseHooks)
1166 #else
1167 # define GC_RUNNING_CLOSE_HOOKS_PTR(cx)                                       \
1168     (&(cx)->runtime->gcCloseState.runningCloseHook)
1169 #endif
1170 
1171 typedef struct JSTempCloseList {
1172     JSTempValueRooter   tvr;
1173     JSGenerator         *head;
1174 } JSTempCloseList;
1175 
1176 JS_STATIC_DLL_CALLBACK(void)
mark_temp_close_list(JSContext * cx,JSTempValueRooter * tvr)1177 mark_temp_close_list(JSContext *cx, JSTempValueRooter *tvr)
1178 {
1179     JSTempCloseList *list = (JSTempCloseList *)tvr;
1180     JSGenerator *gen;
1181 
1182     for (gen = list->head; gen; gen = gen->next)
1183         GC_MARK(cx, gen->obj, "temp list generator");
1184 }
1185 
1186 #define JS_PUSH_TEMP_CLOSE_LIST(cx, tempList)                                 \
1187     JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)
1188 
1189 #define JS_POP_TEMP_CLOSE_LIST(cx, tempList)                                  \
1190     JS_BEGIN_MACRO                                                            \
1191         JS_ASSERT((tempList)->tvr.u.marker == mark_temp_close_list);          \
1192         JS_POP_TEMP_ROOT(cx, &(tempList)->tvr);                               \
1193     JS_END_MACRO
1194 
1195 JSBool
js_RunCloseHooks(JSContext * cx)1196 js_RunCloseHooks(JSContext *cx)
1197 {
1198     JSRuntime *rt;
1199     JSTempCloseList tempList;
1200     JSStackFrame *fp;
1201     JSGenerator **genp, *gen;
1202     JSBool ok, defer;
1203 #if JS_GCMETER
1204     uint32 deferCount;
1205 #endif
1206 
1207     rt = cx->runtime;
1208 
1209     /*
1210      * It is OK to access todoQueue outside the lock here. When many threads
1211      * update the todo list, accessing some older value of todoQueue in the
1212      * worst case just delays the excution of close hooks.
1213      */
1214     if (!rt->gcCloseState.todoQueue)
1215         return JS_TRUE;
1216 
1217     /*
1218      * To prevent an infinite loop when a close hook creats more objects with
1219      * close hooks and then triggers GC we ignore recursive invocations of
1220      * js_RunCloseHooks and limit number of hooks to execute to the initial
1221      * size of the list.
1222      */
1223     if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
1224         return JS_TRUE;
1225 
1226     *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;
1227 
1228     JS_LOCK_GC(rt);
1229     tempList.head = rt->gcCloseState.todoQueue;
1230     JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
1231     rt->gcCloseState.todoQueue = NULL;
1232     METER(rt->gcStats.closelater = 0);
1233     rt->gcPoke = JS_TRUE;
1234     JS_UNLOCK_GC(rt);
1235 
1236     /*
1237      * Set aside cx->fp since we do not want a close hook using caller or
1238      * other means to backtrace into whatever stack might be active when
1239      * running the hook. We store the current frame on the dormant list to
1240      * protect against GC that the hook can trigger.
1241      */
1242     fp = cx->fp;
1243     if (fp) {
1244         JS_ASSERT(!fp->dormantNext);
1245         fp->dormantNext = cx->dormantFrameChain;
1246         cx->dormantFrameChain = fp;
1247     }
1248     cx->fp = NULL;
1249 
1250     genp = &tempList.head;
1251     ok = JS_TRUE;
1252     while ((gen = *genp) != NULL) {
1253         ok = ShouldDeferCloseHook(cx, gen, &defer);
1254         if (!ok) {
1255             /* Quit ASAP discarding the hook. */
1256             *genp = gen->next;
1257             break;
1258         }
1259         if (defer) {
1260             genp = &gen->next;
1261             METER(deferCount++);
1262             continue;
1263         }
1264         ok = js_CloseGeneratorObject(cx, gen);
1265 
1266         /*
1267          * Unlink the generator after closing it to make sure it always stays
1268          * rooted through tempList.
1269          */
1270         *genp = gen->next;
1271 
1272         if (cx->throwing) {
1273             /*
1274              * Report the exception thrown by the close hook and continue to
1275              * execute the rest of the hooks.
1276              */
1277             if (!js_ReportUncaughtException(cx))
1278                 JS_ClearPendingException(cx);
1279             ok = JS_TRUE;
1280         } else if (!ok) {
1281             /*
1282              * Assume this is a stop signal from the branch callback or
1283              * other quit ASAP condition. Break execution until the next
1284              * invocation of js_RunCloseHooks.
1285              */
1286             break;
1287         }
1288     }
1289 
1290     cx->fp = fp;
1291     if (fp) {
1292         JS_ASSERT(cx->dormantFrameChain == fp);
1293         cx->dormantFrameChain = fp->dormantNext;
1294         fp->dormantNext = NULL;
1295     }
1296 
1297     if (tempList.head) {
1298         /*
1299          * Some close hooks were not yet executed, put them back into the
1300          * scheduled list.
1301          */
1302         while ((gen = *genp) != NULL) {
1303             genp = &gen->next;
1304             METER(deferCount++);
1305         }
1306 
1307         /* Now genp is a pointer to the tail of tempList. */
1308         JS_LOCK_GC(rt);
1309         *genp = rt->gcCloseState.todoQueue;
1310         rt->gcCloseState.todoQueue = tempList.head;
1311         METER(rt->gcStats.closelater += deferCount);
1312         METER(rt->gcStats.maxcloselater
1313               = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
1314         JS_UNLOCK_GC(rt);
1315     }
1316 
1317     JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
1318     *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;
1319 
1320     return ok;
1321 }
1322 
1323 #endif /* JS_HAS_GENERATORS */
1324 
1325 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1326 #define DEBUG_gchist
1327 #endif
1328 
1329 #ifdef DEBUG_gchist
1330 #define NGCHIST 64
1331 
1332 static struct GCHist {
1333     JSBool      lastDitch;
1334     JSGCThing   *freeList;
1335 } gchist[NGCHIST];
1336 
1337 unsigned gchpos;
1338 #endif
1339 
1340 void *
js_NewGCThing(JSContext * cx,uintN flags,size_t nbytes)1341 js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
1342 {
1343     JSRuntime *rt;
1344     uintN flindex;
1345     JSBool doGC;
1346     JSGCThing *thing;
1347     uint8 *flagp, *firstPage;
1348     JSGCArenaList *arenaList;
1349     jsuword offset;
1350     JSGCArena *a;
1351     JSLocalRootStack *lrs;
1352 #ifdef JS_THREADSAFE
1353     JSBool gcLocked;
1354     uintN localMallocBytes;
1355     JSGCThing **flbase, **lastptr;
1356     JSGCThing *tmpthing;
1357     uint8 *tmpflagp;
1358     uintN maxFreeThings;         /* max to take from the global free list */
1359     METER(size_t nfree);
1360 #endif
1361 
1362     rt = cx->runtime;
1363     METER(rt->gcStats.alloc++);        /* this is not thread-safe */
1364     nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
1365     flindex = GC_FREELIST_INDEX(nbytes);
1366 
1367 #ifdef JS_THREADSAFE
1368     gcLocked = JS_FALSE;
1369     JS_ASSERT(cx->thread);
1370     flbase = cx->thread->gcFreeLists;
1371     JS_ASSERT(flbase);
1372     thing = flbase[flindex];
1373     localMallocBytes = cx->thread->gcMallocBytes;
1374     if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1375         flagp = thing->flagp;
1376         flbase[flindex] = thing->next;
1377         METER(rt->gcStats.localalloc++);  /* this is not thread-safe */
1378         goto success;
1379     }
1380 
1381     JS_LOCK_GC(rt);
1382     gcLocked = JS_TRUE;
1383 
1384     /* Transfer thread-local counter to global one. */
1385     if (localMallocBytes != 0) {
1386         cx->thread->gcMallocBytes = 0;
1387         if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1388             rt->gcMallocBytes = rt->gcMaxMallocBytes;
1389         else
1390             rt->gcMallocBytes += localMallocBytes;
1391     }
1392 #endif
1393     JS_ASSERT(!rt->gcRunning);
1394     if (rt->gcRunning) {
1395         METER(rt->gcStats.finalfail++);
1396         JS_UNLOCK_GC(rt);
1397         return NULL;
1398     }
1399 
1400 #ifdef TOO_MUCH_GC
1401 #ifdef WAY_TOO_MUCH_GC
1402     rt->gcPoke = JS_TRUE;
1403 #endif
1404     doGC = JS_TRUE;
1405 #else
1406     doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
1407 #endif
1408 
1409     arenaList = &rt->gcArenaList[flindex];
1410     for (;;) {
1411         if (doGC) {
1412             /*
1413              * Keep rt->gcLock across the call into js_GC so we don't starve
1414              * and lose to racing threads who deplete the heap just after
1415              * js_GC has replenished it (or has synchronized with a racing
1416              * GC that collected a bunch of garbage).  This unfair scheduling
1417              * can happen on certain operating systems. For the gory details,
1418              * see bug 162779 at https://bugzilla.mozilla.org/.
1419              */
1420             js_GC(cx, GC_LAST_DITCH);
1421             METER(rt->gcStats.retry++);
1422         }
1423 
1424         /* Try to get thing from the free list. */
1425         thing = arenaList->freeList;
1426         if (thing) {
1427             arenaList->freeList = thing->next;
1428             flagp = thing->flagp;
1429             JS_ASSERT(*flagp & GCF_FINAL);
1430             METER(arenaList->stats.freelen--);
1431             METER(arenaList->stats.recycle++);
1432 
1433 #ifdef JS_THREADSAFE
1434             /*
1435              * Refill the local free list by taking several things from the
1436              * global free list unless we are still at rt->gcMaxMallocBytes
1437              * barrier or the free list is already populated. The former
1438              * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
1439              * or no gcPoke. The latter is caused via allocating new things
1440              * in gcCallback(cx, JSGC_END).
1441              */
1442             if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
1443                 break;
1444             tmpthing = arenaList->freeList;
1445             if (tmpthing) {
1446                 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1447                 do {
1448                     if (!tmpthing->next)
1449                         break;
1450                     tmpthing = tmpthing->next;
1451                 } while (--maxFreeThings != 0);
1452 
1453                 flbase[flindex] = arenaList->freeList;
1454                 arenaList->freeList = tmpthing->next;
1455                 tmpthing->next = NULL;
1456             }
1457 #endif
1458             break;
1459         }
1460 
1461         /* Allocate from the tail of last arena or from new arena if we can. */
1462         if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
1463             NewGCArena(rt, arenaList)) {
1464 
1465             offset = arenaList->lastLimit;
1466             if ((offset & GC_PAGE_MASK) == 0) {
1467                 /*
1468                  * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
1469                  */
1470                 offset += PAGE_THING_GAP(nbytes);
1471             }
1472             JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
1473             arenaList->lastLimit = (uint16)(offset + nbytes);
1474             a = arenaList->last;
1475             firstPage = (uint8 *)FIRST_THING_PAGE(a);
1476             thing = (JSGCThing *)(firstPage + offset);
1477             flagp = a->base + offset / sizeof(JSGCThing);
1478             if (flagp >= firstPage)
1479                 flagp += GC_THINGS_SIZE;
1480             METER(++arenaList->stats.nthings);
1481             METER(arenaList->stats.maxthings =
1482                   JS_MAX(arenaList->stats.nthings,
1483                          arenaList->stats.maxthings));
1484 
1485 #ifdef JS_THREADSAFE
1486             /*
1487              * Refill the local free list by taking free things from the last
1488              * arena. Prefer to order free things by ascending address in the
1489              * (unscientific) hope of better cache locality.
1490              */
1491             if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
1492                 break;
1493             METER(nfree = 0);
1494             lastptr = &flbase[flindex];
1495             maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1496             for (offset = arenaList->lastLimit;
1497                  offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
1498                  offset += nbytes) {
1499                 if ((offset & GC_PAGE_MASK) == 0)
1500                     offset += PAGE_THING_GAP(nbytes);
1501                 JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
1502                 tmpflagp = a->base + offset / sizeof(JSGCThing);
1503                 if (tmpflagp >= firstPage)
1504                     tmpflagp += GC_THINGS_SIZE;
1505 
1506                 tmpthing = (JSGCThing *)(firstPage + offset);
1507                 tmpthing->flagp = tmpflagp;
1508                 *tmpflagp = GCF_FINAL;    /* signifying that thing is free */
1509 
1510                 *lastptr = tmpthing;
1511                 lastptr = &tmpthing->next;
1512                 METER(++nfree);
1513             }
1514             arenaList->lastLimit = offset;
1515             *lastptr = NULL;
1516             METER(arenaList->stats.freelen += nfree);
1517 #endif
1518             break;
1519         }
1520 
1521         /* Consider doing a "last ditch" GC unless already tried. */
1522         if (doGC)
1523             goto fail;
1524         rt->gcPoke = JS_TRUE;
1525         doGC = JS_TRUE;
1526     }
1527 
1528     /* We successfully allocated the thing. */
1529 #ifdef JS_THREADSAFE
1530   success:
1531 #endif
1532     lrs = cx->localRootStack;
1533     if (lrs) {
1534         /*
1535          * If we're in a local root scope, don't set newborn[type] at all, to
1536          * avoid entraining garbage from it for an unbounded amount of time
1537          * on this context.  A caller will leave the local root scope and pop
1538          * this reference, allowing thing to be GC'd if it has no other refs.
1539          * See JS_EnterLocalRootScope and related APIs.
1540          */
1541         if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
1542             /*
1543              * When we fail for a thing allocated through the tail of the last
1544              * arena, thing's flag byte is not initialized. So to prevent GC
1545              * accessing the uninitialized flags during the finalization, we
1546              * always mark the thing as final. See bug 337407.
1547              */
1548             *flagp = GCF_FINAL;
1549             goto fail;
1550         }
1551     } else {
1552         /*
1553          * No local root scope, so we're stuck with the old, fragile model of
1554          * depending on a pigeon-hole newborn per type per context.
1555          */
1556         cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
1557     }
1558 
1559     /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
1560     *flagp = (uint8)flags;
1561 
1562     /*
1563      * Clear thing before unlocking in case a GC run is about to scan it,
1564      * finding it via newborn[].
1565      */
1566     thing->next = NULL;
1567     thing->flagp = NULL;
1568 #ifdef DEBUG_gchist
1569     gchist[gchpos].lastDitch = doGC;
1570     gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
1571     if (++gchpos == NGCHIST)
1572         gchpos = 0;
1573 #endif
1574     METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
1575     METER(++rt->gcArenaList[flindex].stats.totalnew);
1576 #ifdef JS_THREADSAFE
1577     if (gcLocked)
1578         JS_UNLOCK_GC(rt);
1579 #endif
1580     return thing;
1581 
1582 fail:
1583 #ifdef JS_THREADSAFE
1584     if (gcLocked)
1585         JS_UNLOCK_GC(rt);
1586 #endif
1587     METER(rt->gcStats.fail++);
1588     JS_ReportOutOfMemory(cx);
1589     return NULL;
1590 }
1591 
1592 JSBool
js_LockGCThing(JSContext * cx,void * thing)1593 js_LockGCThing(JSContext *cx, void *thing)
1594 {
1595     JSBool ok = js_LockGCThingRT(cx->runtime, thing);
1596     if (!ok)
1597         JS_ReportOutOfMemory(cx);
1598     return ok;
1599 }
1600 
1601 /*
1602  * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
1603  * their descendants must be marked by the GC.  To find them during the mark
1604  * phase, they are added to rt->gcLocksHash, which is created lazily.
1605  *
1606  * NB: we depend on the order of GC-thing type indexes here!
1607  */
1608 #define GC_TYPE_IS_STRING(t)    ((t) == GCX_STRING ||                         \
1609                                  (t) >= GCX_EXTERNAL_STRING)
1610 #define GC_TYPE_IS_XML(t)       ((unsigned)((t) - GCX_NAMESPACE) <=           \
1611                                  (unsigned)(GCX_XML - GCX_NAMESPACE))
1612 #define GC_TYPE_IS_DEEP(t)      ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
1613 
1614 #define IS_DEEP_STRING(t,o)     (GC_TYPE_IS_STRING(t) &&                      \
1615                                  JSSTRING_IS_DEPENDENT((JSString *)(o)))
1616 
1617 #define GC_THING_IS_DEEP(t,o)   (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
1618 
1619 /* This is compatible with JSDHashEntryStub. */
1620 typedef struct JSGCLockHashEntry {
1621     JSDHashEntryHdr hdr;
1622     const JSGCThing *thing;
1623     uint32          count;
1624 } JSGCLockHashEntry;
1625 
1626 JSBool
js_LockGCThingRT(JSRuntime * rt,void * thing)1627 js_LockGCThingRT(JSRuntime *rt, void *thing)
1628 {
1629     JSBool ok, deep;
1630     uint8 *flagp;
1631     uintN flags, lock, type;
1632     JSGCLockHashEntry *lhe;
1633 
1634     ok = JS_TRUE;
1635     if (!thing)
1636         return ok;
1637 
1638     flagp = js_GetGCThingFlags(thing);
1639 
1640     JS_LOCK_GC(rt);
1641     flags = *flagp;
1642     lock = (flags & GCF_LOCK);
1643     type = (flags & GCF_TYPEMASK);
1644     deep = GC_THING_IS_DEEP(type, thing);
1645 
1646     /*
1647      * Avoid adding a rt->gcLocksHash entry for shallow things until someone
1648      * nests a lock -- then start such an entry with a count of 2, not 1.
1649      */
1650     if (lock || deep) {
1651         if (!rt->gcLocksHash) {
1652             rt->gcLocksHash =
1653                 JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
1654                                  sizeof(JSGCLockHashEntry),
1655                                  GC_ROOTS_SIZE);
1656             if (!rt->gcLocksHash) {
1657                 ok = JS_FALSE;
1658                 goto done;
1659             }
1660         } else if (lock == 0) {
1661 #ifdef DEBUG
1662             JSDHashEntryHdr *hdr =
1663                 JS_DHashTableOperate(rt->gcLocksHash, thing,
1664                                      JS_DHASH_LOOKUP);
1665             JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
1666 #endif
1667         }
1668 
1669         lhe = (JSGCLockHashEntry *)
1670             JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
1671         if (!lhe) {
1672             ok = JS_FALSE;
1673             goto done;
1674         }
1675         if (!lhe->thing) {
1676             lhe->thing = thing;
1677             lhe->count = deep ? 1 : 2;
1678         } else {
1679             JS_ASSERT(lhe->count >= 1);
1680             lhe->count++;
1681         }
1682     }
1683 
1684     *flagp = (uint8)(flags | GCF_LOCK);
1685     METER(rt->gcStats.lock++);
1686     ok = JS_TRUE;
1687 done:
1688     JS_UNLOCK_GC(rt);
1689     return ok;
1690 }
1691 
1692 JSBool
js_UnlockGCThingRT(JSRuntime * rt,void * thing)1693 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
1694 {
1695     uint8 *flagp, flags;
1696     JSGCLockHashEntry *lhe;
1697 
1698     if (!thing)
1699         return JS_TRUE;
1700 
1701     flagp = js_GetGCThingFlags(thing);
1702     JS_LOCK_GC(rt);
1703     flags = *flagp;
1704 
1705     if (flags & GCF_LOCK) {
1706         if (!rt->gcLocksHash ||
1707             (lhe = (JSGCLockHashEntry *)
1708                    JS_DHashTableOperate(rt->gcLocksHash, thing,
1709                                         JS_DHASH_LOOKUP),
1710              JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
1711             /* Shallow GC-thing with an implicit lock count of 1. */
1712             JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
1713         } else {
1714             /* Basis or nested unlock of a deep thing, or nested of shallow. */
1715             if (--lhe->count != 0)
1716                 goto out;
1717             JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
1718         }
1719         *flagp = (uint8)(flags & ~GCF_LOCK);
1720     }
1721 
1722     rt->gcPoke = JS_TRUE;
1723 out:
1724     METER(rt->gcStats.unlock++);
1725     JS_UNLOCK_GC(rt);
1726     return JS_TRUE;
1727 }
1728 
1729 #ifdef GC_MARK_DEBUG
1730 
1731 #include <stdio.h>
1732 #include "jsprf.h"
1733 
1734 typedef struct GCMarkNode GCMarkNode;
1735 
1736 struct GCMarkNode {
1737     void        *thing;
1738     const char  *name;
1739     GCMarkNode  *next;
1740     GCMarkNode  *prev;
1741 };
1742 
1743 JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
1744 JS_EXPORT_DATA(void *) js_LiveThingToFind;
1745 
1746 #ifdef HAVE_XPCONNECT
1747 #include "dump_xpc.h"
1748 #endif
1749 
1750 static void
GetObjSlotName(JSScope * scope,JSObject * obj,uint32 slot,char * buf,size_t bufsize)1751 GetObjSlotName(JSScope *scope, JSObject *obj, uint32 slot, char *buf,
1752                size_t bufsize)
1753 {
1754     jsval nval;
1755     JSScopeProperty *sprop;
1756     JSClass *clasp;
1757     uint32 key;
1758     const char *slotname;
1759 
1760     if (!scope) {
1761         JS_snprintf(buf, bufsize, "**UNKNOWN OBJECT MAP ENTRY**");
1762         return;
1763     }
1764 
1765     sprop = SCOPE_LAST_PROP(scope);
1766     while (sprop && sprop->slot != slot)
1767         sprop = sprop->parent;
1768 
1769     if (!sprop) {
1770         switch (slot) {
1771           case JSSLOT_PROTO:
1772             JS_snprintf(buf, bufsize, "__proto__");
1773             break;
1774           case JSSLOT_PARENT:
1775             JS_snprintf(buf, bufsize, "__parent__");
1776             break;
1777           default:
1778             slotname = NULL;
1779             clasp = LOCKED_OBJ_GET_CLASS(obj);
1780             if (clasp->flags & JSCLASS_IS_GLOBAL) {
1781                 key = slot - JSSLOT_START(clasp);
1782 #define JS_PROTO(name,code,init) \
1783     if ((code) == key) { slotname = js_##name##_str; goto found; }
1784 #include "jsproto.tbl"
1785 #undef JS_PROTO
1786             }
1787           found:
1788             if (slotname)
1789                 JS_snprintf(buf, bufsize, "CLASS_OBJECT(%s)", slotname);
1790             else
1791                 JS_snprintf(buf, bufsize, "**UNKNOWN SLOT %ld**", (long)slot);
1792             break;
1793         }
1794     } else {
1795         nval = ID_TO_VALUE(sprop->id);
1796         if (JSVAL_IS_INT(nval)) {
1797             JS_snprintf(buf, bufsize, "%ld", (long)JSVAL_TO_INT(nval));
1798         } else if (JSVAL_IS_STRING(nval)) {
1799             JS_snprintf(buf, bufsize, "%s",
1800                         JS_GetStringBytes(JSVAL_TO_STRING(nval)));
1801         } else {
1802             JS_snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
1803         }
1804     }
1805 }
1806 
1807 static const char *
gc_object_class_name(void * thing)1808 gc_object_class_name(void* thing)
1809 {
1810     uint8 *flagp = js_GetGCThingFlags(thing);
1811     const char *className = "";
1812     static char depbuf[32];
1813 
1814     switch (*flagp & GCF_TYPEMASK) {
1815       case GCX_OBJECT: {
1816         JSObject  *obj = (JSObject *)thing;
1817         JSClass   *clasp = JSVAL_TO_PRIVATE(obj->slots[JSSLOT_CLASS]);
1818         className = clasp->name;
1819 #ifdef HAVE_XPCONNECT
1820         if (clasp->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) {
1821             jsval privateValue = obj->slots[JSSLOT_PRIVATE];
1822 
1823             JS_ASSERT(clasp->flags & JSCLASS_HAS_PRIVATE);
1824             if (!JSVAL_IS_VOID(privateValue)) {
1825                 void  *privateThing = JSVAL_TO_PRIVATE(privateValue);
1826                 const char *xpcClassName = GetXPCObjectClassName(privateThing);
1827 
1828                 if (xpcClassName)
1829                     className = xpcClassName;
1830             }
1831         }
1832 #endif
1833         break;
1834       }
1835 
1836       case GCX_STRING:
1837       case GCX_MUTABLE_STRING: {
1838         JSString *str = (JSString *)thing;
1839         if (JSSTRING_IS_DEPENDENT(str)) {
1840             JS_snprintf(depbuf, sizeof depbuf, "start:%u, length:%u",
1841                         JSSTRDEP_START(str), JSSTRDEP_LENGTH(str));
1842             className = depbuf;
1843         } else {
1844             className = "string";
1845         }
1846         break;
1847       }
1848 
1849       case GCX_DOUBLE:
1850         className = "double";
1851         break;
1852     }
1853 
1854     return className;
1855 }
1856 
1857 static void
gc_dump_thing(JSContext * cx,JSGCThing * thing,FILE * fp)1858 gc_dump_thing(JSContext *cx, JSGCThing *thing, FILE *fp)
1859 {
1860     GCMarkNode *prev = (GCMarkNode *)cx->gcCurrentMarkNode;
1861     GCMarkNode *next = NULL;
1862     char *path = NULL;
1863 
1864     while (prev) {
1865         next = prev;
1866         prev = prev->prev;
1867     }
1868     while (next) {
1869         uint8 nextFlags = *js_GetGCThingFlags(next->thing);
1870         if ((nextFlags & GCF_TYPEMASK) == GCX_OBJECT) {
1871             path = JS_sprintf_append(path, "%s(%s @ 0x%08p).",
1872                                      next->name,
1873                                      gc_object_class_name(next->thing),
1874                                      (JSObject*)next->thing);
1875         } else {
1876             path = JS_sprintf_append(path, "%s(%s).",
1877                                      next->name,
1878                                      gc_object_class_name(next->thing));
1879         }
1880         next = next->next;
1881     }
1882     if (!path)
1883         return;
1884 
1885     fprintf(fp, "%08lx ", (long)thing);
1886     switch (*js_GetGCThingFlags(thing) & GCF_TYPEMASK) {
1887       case GCX_OBJECT:
1888       {
1889         JSObject  *obj = (JSObject *)thing;
1890         jsval     privateValue = obj->slots[JSSLOT_PRIVATE];
1891         void      *privateThing = JSVAL_IS_VOID(privateValue)
1892                                   ? NULL
1893                                   : JSVAL_TO_PRIVATE(privateValue);
1894         const char *className = gc_object_class_name(thing);
1895         fprintf(fp, "object %8p %s", privateThing, className);
1896         break;
1897       }
1898 #if JS_HAS_XML_SUPPORT
1899       case GCX_NAMESPACE:
1900       {
1901         JSXMLNamespace *ns = (JSXMLNamespace *)thing;
1902         fprintf(fp, "namespace %s:%s",
1903                 JS_GetStringBytes(ns->prefix), JS_GetStringBytes(ns->uri));
1904         break;
1905       }
1906       case GCX_QNAME:
1907       {
1908         JSXMLQName *qn = (JSXMLQName *)thing;
1909         fprintf(fp, "qname %s(%s):%s",
1910                 JS_GetStringBytes(qn->prefix), JS_GetStringBytes(qn->uri),
1911                 JS_GetStringBytes(qn->localName));
1912         break;
1913       }
1914       case GCX_XML:
1915       {
1916         extern const char *js_xml_class_str[];
1917         JSXML *xml = (JSXML *)thing;
1918         fprintf(fp, "xml %8p %s", xml, js_xml_class_str[xml->xml_class]);
1919         break;
1920       }
1921 #endif
1922       case GCX_DOUBLE:
1923         fprintf(fp, "double %g", *(jsdouble *)thing);
1924         break;
1925       case GCX_PRIVATE:
1926         fprintf(fp, "private %8p", (void *)thing);
1927         break;
1928       default:
1929         fprintf(fp, "string %s", JS_GetStringBytes((JSString *)thing));
1930         break;
1931     }
1932     fprintf(fp, " via %s\n", path);
1933     free(path);
1934 }
1935 
1936 void
js_MarkNamedGCThing(JSContext * cx,void * thing,const char * name)1937 js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name)
1938 {
1939     GCMarkNode markNode;
1940 
1941     if (!thing)
1942         return;
1943 
1944     markNode.thing = thing;
1945     markNode.name  = name;
1946     markNode.next  = NULL;
1947     markNode.prev  = (GCMarkNode *)cx->gcCurrentMarkNode;
1948     if (markNode.prev)
1949         markNode.prev->next = &markNode;
1950     cx->gcCurrentMarkNode = &markNode;
1951 
1952     if (thing == js_LiveThingToFind) {
1953         /*
1954          * Dump js_LiveThingToFind each time we reach it during the marking
1955          * phase of GC to print all live references to the thing.
1956          */
1957         gc_dump_thing(cx, thing, stderr);
1958     }
1959 
1960     js_MarkGCThing(cx, thing);
1961 
1962     if (markNode.prev)
1963         markNode.prev->next = NULL;
1964     cx->gcCurrentMarkNode = markNode.prev;
1965 }
1966 
1967 #endif /* !GC_MARK_DEBUG */
1968 
1969 static void
gc_mark_atom_key_thing(void * thing,void * arg)1970 gc_mark_atom_key_thing(void *thing, void *arg)
1971 {
1972     JSContext *cx = (JSContext *) arg;
1973 
1974     GC_MARK(cx, thing, "atom");
1975 }
1976 
1977 void
js_MarkAtom(JSContext * cx,JSAtom * atom)1978 js_MarkAtom(JSContext *cx, JSAtom *atom)
1979 {
1980     jsval key;
1981 
1982     if (atom->flags & ATOM_MARK)
1983         return;
1984     atom->flags |= ATOM_MARK;
1985     key = ATOM_KEY(atom);
1986     if (JSVAL_IS_GCTHING(key)) {
1987 #ifdef GC_MARK_DEBUG
1988         char name[32];
1989 
1990         if (JSVAL_IS_STRING(key)) {
1991             JS_snprintf(name, sizeof name, "'%s'",
1992                         JS_GetStringBytes(JSVAL_TO_STRING(key)));
1993         } else {
1994             JS_snprintf(name, sizeof name, "<%x>", key);
1995         }
1996 #endif
1997         GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
1998     }
1999     if (atom->flags & ATOM_HIDDEN)
2000         js_MarkAtom(cx, atom->entry.value);
2001 }
2002 
2003 static void
2004 AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp);
2005 
2006 static void
MarkGCThingChildren(JSContext * cx,void * thing,uint8 * flagp,JSBool shouldCheckRecursion)2007 MarkGCThingChildren(JSContext *cx, void *thing, uint8 *flagp,
2008                     JSBool shouldCheckRecursion)
2009 {
2010     JSRuntime *rt;
2011     JSObject *obj;
2012     jsval v, *vp, *end;
2013     void *next_thing;
2014     uint8 *next_flagp;
2015     JSString *str;
2016 #ifdef JS_GCMETER
2017     uint32 tailCallNesting;
2018 #endif
2019 #ifdef GC_MARK_DEBUG
2020     JSScope *scope;
2021     char name[32];
2022 #endif
2023 
2024     /*
2025      * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2026      * uses the non-recursive code that otherwise would be called only on
2027      * a low C stack condition.
2028      */
2029 #ifdef JS_GC_ASSUME_LOW_C_STACK
2030 # define RECURSION_TOO_DEEP() shouldCheckRecursion
2031 #else
2032     int stackDummy;
2033 # define RECURSION_TOO_DEEP() (shouldCheckRecursion &&                        \
2034                                !JS_CHECK_STACK_SIZE(cx, stackDummy))
2035 #endif
2036 
2037     rt = cx->runtime;
2038     METER(tailCallNesting = 0);
2039     METER(if (++rt->gcStats.cdepth > rt->gcStats.maxcdepth)
2040               rt->gcStats.maxcdepth = rt->gcStats.cdepth);
2041 
2042 #ifndef GC_MARK_DEBUG
2043   start:
2044 #endif
2045     JS_ASSERT(flagp);
2046     JS_ASSERT(*flagp & GCF_MARK); /* the caller must already mark the thing */
2047     METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
2048               rt->gcStats.maxdepth = rt->gcStats.depth);
2049 #ifdef GC_MARK_DEBUG
2050     if (js_DumpGCHeap)
2051         gc_dump_thing(cx, thing, js_DumpGCHeap);
2052 #endif
2053 
2054     switch (*flagp & GCF_TYPEMASK) {
2055       case GCX_OBJECT:
2056         if (RECURSION_TOO_DEEP())
2057             goto add_to_unscanned_bag;
2058         /* If obj->slots is null, obj must be a newborn. */
2059         obj = (JSObject *) thing;
2060         vp = obj->slots;
2061         if (!vp)
2062             break;
2063 
2064         /* Mark slots if they are small enough to be GC-allocated. */
2065         if ((vp[-1] + 1) * sizeof(jsval) <= GC_NBYTES_MAX)
2066             GC_MARK(cx, vp - 1, "slots");
2067 
2068         /* Set up local variables to loop over unmarked things. */
2069         end = vp + ((obj->map->ops->mark)
2070                     ? obj->map->ops->mark(cx, obj, NULL)
2071                     : JS_MIN(obj->map->freeslot, obj->map->nslots));
2072         thing = NULL;
2073         flagp = NULL;
2074 #ifdef GC_MARK_DEBUG
2075         scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
2076 #endif
2077         for (; vp != end; ++vp) {
2078             v = *vp;
2079             if (!JSVAL_IS_GCTHING(v) || v == JSVAL_NULL)
2080                 continue;
2081             next_thing = JSVAL_TO_GCTHING(v);
2082             if (next_thing == thing)
2083                 continue;
2084             next_flagp = js_GetGCThingFlags(next_thing);
2085             if (*next_flagp & GCF_MARK)
2086                 continue;
2087             JS_ASSERT(*next_flagp != GCF_FINAL);
2088             if (thing) {
2089 #ifdef GC_MARK_DEBUG
2090                 GC_MARK(cx, thing, name);
2091 #else
2092                 *flagp |= GCF_MARK;
2093                 MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
2094 #endif
2095                 if (*next_flagp & GCF_MARK) {
2096                     /*
2097                      * This happens when recursive MarkGCThingChildren marks
2098                      * the thing with flags referred by *next_flagp.
2099                      */
2100                     thing = NULL;
2101                     continue;
2102                 }
2103             }
2104 #ifdef GC_MARK_DEBUG
2105             GetObjSlotName(scope, obj, vp - obj->slots, name, sizeof name);
2106 #endif
2107             thing = next_thing;
2108             flagp = next_flagp;
2109         }
2110         if (thing) {
2111             /*
2112              * thing came from the last unmarked GC-thing slot and we
2113              * can optimize tail recursion.
2114              *
2115              * Since we already know that there is enough C stack space,
2116              * we clear shouldCheckRecursion to avoid extra checking in
2117              * RECURSION_TOO_DEEP.
2118              */
2119             shouldCheckRecursion = JS_FALSE;
2120             goto on_tail_recursion;
2121         }
2122         break;
2123 
2124 #ifdef DEBUG
2125       case GCX_STRING:
2126         str = (JSString *)thing;
2127         JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
2128         break;
2129 #endif
2130 
2131       case GCX_MUTABLE_STRING:
2132         str = (JSString *)thing;
2133         if (!JSSTRING_IS_DEPENDENT(str))
2134             break;
2135         thing = JSSTRDEP_BASE(str);
2136         flagp = js_GetGCThingFlags(thing);
2137         if (*flagp & GCF_MARK)
2138             break;
2139 #ifdef GC_MARK_DEBUG
2140         strcpy(name, "base");
2141 #endif
2142         /* Fallthrough to code to deal with the tail recursion. */
2143 
2144       on_tail_recursion:
2145 #ifdef GC_MARK_DEBUG
2146         /*
2147          * Do not eliminate C recursion when debugging to allow
2148          * js_MarkNamedGCThing to build a full dump of live GC
2149          * things.
2150          */
2151         GC_MARK(cx, thing, name);
2152         break;
2153 #else
2154         /* Eliminate tail recursion for the last unmarked child. */
2155         JS_ASSERT(*flagp != GCF_FINAL);
2156         METER(++tailCallNesting);
2157         *flagp |= GCF_MARK;
2158         goto start;
2159 #endif
2160 
2161 #if JS_HAS_XML_SUPPORT
2162       case GCX_NAMESPACE:
2163         if (RECURSION_TOO_DEEP())
2164             goto add_to_unscanned_bag;
2165         js_MarkXMLNamespace(cx, (JSXMLNamespace *)thing);
2166         break;
2167 
2168       case GCX_QNAME:
2169         if (RECURSION_TOO_DEEP())
2170             goto add_to_unscanned_bag;
2171         js_MarkXMLQName(cx, (JSXMLQName *)thing);
2172         break;
2173 
2174       case GCX_XML:
2175         if (RECURSION_TOO_DEEP())
2176             goto add_to_unscanned_bag;
2177         js_MarkXML(cx, (JSXML *)thing);
2178         break;
2179 #endif
2180       add_to_unscanned_bag:
2181         AddThingToUnscannedBag(cx->runtime, thing, flagp);
2182         break;
2183     }
2184 
2185 #undef RECURSION_TOO_DEEP
2186 
2187     METER(rt->gcStats.depth -= 1 + tailCallNesting);
2188     METER(rt->gcStats.cdepth--);
2189 }
2190 
2191 /*
2192  * Avoid using PAGE_THING_GAP inside this macro to optimize the
2193  * thingsPerUnscannedChunk calculation when thingSize is a power of two.
2194  */
2195 #define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap)   \
2196     JS_BEGIN_MACRO                                                            \
2197         if (0 == ((thingSize) & ((thingSize) - 1))) {                         \
2198             pageGap = (thingSize);                                            \
2199             thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize))           \
2200                                        + JS_BITS_PER_WORD - 1)                \
2201                                       >> JS_BITS_PER_WORD_LOG2;               \
2202         } else {                                                              \
2203             pageGap = GC_PAGE_SIZE % (thingSize);                             \
2204             thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize),  \
2205                                                  JS_BITS_PER_WORD);           \
2206         }                                                                     \
2207     JS_END_MACRO
2208 
2209 static void
AddThingToUnscannedBag(JSRuntime * rt,void * thing,uint8 * flagp)2210 AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp)
2211 {
2212     JSGCPageInfo *pi;
2213     JSGCArena *arena;
2214     size_t thingSize;
2215     size_t thingsPerUnscannedChunk;
2216     size_t pageGap;
2217     size_t chunkIndex;
2218     jsuword bit;
2219 
2220     /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */
2221     JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
2222     *flagp |= GCF_FINAL;
2223 
2224     METER(rt->gcStats.unscanned++);
2225 #ifdef DEBUG
2226     ++rt->gcUnscannedBagSize;
2227     METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned)
2228               rt->gcStats.maxunscanned = rt->gcUnscannedBagSize);
2229 #endif
2230 
2231     pi = THING_TO_PAGE(thing);
2232     arena = PAGE_TO_ARENA(pi);
2233     thingSize = arena->list->thingSize;
2234     GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
2235     chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) /
2236                  (thingSize * thingsPerUnscannedChunk);
2237     JS_ASSERT(chunkIndex < JS_BITS_PER_WORD);
2238     bit = (jsuword)1 << chunkIndex;
2239     if (pi->unscannedBitmap != 0) {
2240         JS_ASSERT(rt->gcUnscannedArenaStackTop);
2241         if (thingsPerUnscannedChunk != 1) {
2242             if (pi->unscannedBitmap & bit) {
2243                 /* Chunk already contains things to scan later. */
2244                 return;
2245             }
2246         } else {
2247             /*
2248              * The chunk must not contain things to scan later if there is
2249              * only one thing per chunk.
2250              */
2251             JS_ASSERT(!(pi->unscannedBitmap & bit));
2252         }
2253         pi->unscannedBitmap |= bit;
2254         JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi)));
2255     } else {
2256         /*
2257          * The thing is the first unscanned thing in the page, set the bit
2258          * corresponding to this page arena->unscannedPages.
2259          */
2260         pi->unscannedBitmap = bit;
2261         JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD);
2262         bit = (jsuword)1 << PAGE_INDEX(pi);
2263         JS_ASSERT(!(arena->unscannedPages & bit));
2264         if (arena->unscannedPages != 0) {
2265             arena->unscannedPages |= bit;
2266             JS_ASSERT(arena->prevUnscanned);
2267             JS_ASSERT(rt->gcUnscannedArenaStackTop);
2268         } else {
2269             /*
2270              * The thing is the first unscanned thing in the whole arena, push
2271              * the arena on the stack of unscanned arenas unless the arena
2272              * has already been pushed. We detect that through prevUnscanned
2273              * field which is NULL only for not yet pushed arenas. To ensure
2274              * that prevUnscanned != NULL even when the stack contains one
2275              * element, we make prevUnscanned for the arena at the bottom
2276              * to point to itself.
2277              *
2278              * See comments in ScanDelayedChildren.
2279              */
2280             arena->unscannedPages = bit;
2281             if (!arena->prevUnscanned) {
2282                 if (!rt->gcUnscannedArenaStackTop) {
2283                     /* Stack was empty, mark the arena as bottom element. */
2284                     arena->prevUnscanned = arena;
2285                 } else {
2286                     JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
2287                     arena->prevUnscanned = rt->gcUnscannedArenaStackTop;
2288                 }
2289                 rt->gcUnscannedArenaStackTop = arena;
2290             }
2291          }
2292      }
2293     JS_ASSERT(rt->gcUnscannedArenaStackTop);
2294 }
2295 
2296 static void
ScanDelayedChildren(JSContext * cx)2297 ScanDelayedChildren(JSContext *cx)
2298 {
2299     JSRuntime *rt;
2300     JSGCArena *arena;
2301     size_t thingSize;
2302     size_t thingsPerUnscannedChunk;
2303     size_t pageGap;
2304     size_t pageIndex;
2305     JSGCPageInfo *pi;
2306     size_t chunkIndex;
2307     size_t thingOffset, thingLimit;
2308     JSGCThing *thing;
2309     uint8 *flagp;
2310     JSGCArena *prevArena;
2311 
2312     rt = cx->runtime;
2313     arena = rt->gcUnscannedArenaStackTop;
2314     if (!arena) {
2315         JS_ASSERT(rt->gcUnscannedBagSize == 0);
2316         return;
2317     }
2318 
2319   init_size:
2320     thingSize = arena->list->thingSize;
2321     GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
2322     for (;;) {
2323         /*
2324          * The following assert verifies that the current arena belongs to
2325          * the unscan stack since AddThingToUnscannedBag ensures that even
2326          * for stack's bottom prevUnscanned != NULL but rather points to self.
2327          */
2328         JS_ASSERT(arena->prevUnscanned);
2329         JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
2330         while (arena->unscannedPages != 0) {
2331             pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages);
2332             JS_ASSERT(pageIndex < GC_PAGE_COUNT);
2333             pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) +
2334                                   pageIndex * GC_PAGE_SIZE);
2335             JS_ASSERT(pi->unscannedBitmap);
2336             chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap);
2337             pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex);
2338             if (pi->unscannedBitmap == 0)
2339                 arena->unscannedPages &= ~((jsuword)1 << pageIndex);
2340             thingOffset = (pageGap
2341                            + chunkIndex * thingsPerUnscannedChunk * thingSize);
2342             JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo));
2343             thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize;
2344             if (thingsPerUnscannedChunk != 1) {
2345                 /*
2346                  * thingLimit can go beyond the last allocated thing for the
2347                  * last chunk as the real limit can be inside the chunk.
2348                  */
2349                 if (arena->list->last == arena &&
2350                     arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE +
2351                                               thingLimit)) {
2352                     thingLimit = (arena->list->lastLimit -
2353                                   pageIndex * GC_PAGE_SIZE);
2354                 } else if (thingLimit > GC_PAGE_SIZE) {
2355                     thingLimit = GC_PAGE_SIZE;
2356                 }
2357                 JS_ASSERT(thingLimit > thingOffset);
2358             }
2359             JS_ASSERT(arena->list->last != arena ||
2360                       arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE +
2361                                                  thingLimit));
2362             JS_ASSERT(thingLimit <= GC_PAGE_SIZE);
2363 
2364             for (; thingOffset != thingLimit; thingOffset += thingSize) {
2365                 /*
2366                  * XXX: inline js_GetGCThingFlags() to use already available
2367                  * pi.
2368                  */
2369                 thing = (void *)((jsuword)pi + thingOffset);
2370                 flagp = js_GetGCThingFlags(thing);
2371                 if (thingsPerUnscannedChunk != 1) {
2372                     /*
2373                      * Skip free or already scanned things that share the chunk
2374                      * with unscanned ones.
2375                      */
2376                     if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
2377                         continue;
2378                 }
2379                 JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL))
2380                               == (GCF_MARK|GCF_FINAL));
2381                 *flagp &= ~GCF_FINAL;
2382 #ifdef DEBUG
2383                 JS_ASSERT(rt->gcUnscannedBagSize != 0);
2384                 --rt->gcUnscannedBagSize;
2385 
2386                 /*
2387                  * Check that GC thing type is consistent with the type of
2388                  * things that can be put to the unscanned bag.
2389                  */
2390                 switch (*flagp & GCF_TYPEMASK) {
2391                   case GCX_OBJECT:
2392 # if JS_HAS_XML_SUPPORT
2393                   case GCX_NAMESPACE:
2394                   case GCX_QNAME:
2395                   case GCX_XML:
2396 # endif
2397                     break;
2398                   default:
2399                     JS_ASSERT(0);
2400                 }
2401 #endif
2402                 MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
2403             }
2404         }
2405         /*
2406          * We finished scanning of the arena but we can only pop it from
2407          * the stack if the arena is the stack's top.
2408          *
2409          * When MarkGCThingChildren from the above calls
2410          * AddThingToUnscannedBag and the latter pushes new arenas to the
2411          * stack, we have to skip popping of this arena until it becomes
2412          * the top of the stack again.
2413          */
2414         if (arena == rt->gcUnscannedArenaStackTop) {
2415             prevArena = arena->prevUnscanned;
2416             arena->prevUnscanned = NULL;
2417             if (arena == prevArena) {
2418                 /*
2419                  * prevUnscanned points to itself and we reached the bottom
2420                  * of the stack.
2421                  */
2422                 break;
2423             }
2424             rt->gcUnscannedArenaStackTop = arena = prevArena;
2425         } else {
2426             arena = rt->gcUnscannedArenaStackTop;
2427         }
2428         if (arena->list->thingSize != thingSize)
2429             goto init_size;
2430     }
2431     JS_ASSERT(rt->gcUnscannedArenaStackTop);
2432     JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned);
2433     rt->gcUnscannedArenaStackTop = NULL;
2434     JS_ASSERT(rt->gcUnscannedBagSize == 0);
2435 }
2436 
2437 void
js_MarkGCThing(JSContext * cx,void * thing)2438 js_MarkGCThing(JSContext *cx, void *thing)
2439 {
2440     uint8 *flagp;
2441 
2442     if (!thing)
2443         return;
2444 
2445     flagp = js_GetGCThingFlags(thing);
2446     JS_ASSERT(*flagp != GCF_FINAL);
2447     if (*flagp & GCF_MARK)
2448         return;
2449     *flagp |= GCF_MARK;
2450 
2451     if (!cx->insideGCMarkCallback) {
2452         MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
2453     } else {
2454         /*
2455          * For API compatibility we allow for the callback to assume that
2456          * after it calls js_MarkGCThing for the last time, the callback
2457          * can start to finalize its own objects that are only referenced
2458          * by unmarked GC things.
2459          *
2460          * Since we do not know which call from inside the callback is the
2461          * last, we ensure that the unscanned bag is always empty when we
2462          * return to the callback and all marked things are scanned.
2463          *
2464          * As an optimization we do not check for the stack size here and
2465          * pass JS_FALSE as the last argument to MarkGCThingChildren.
2466          * Otherwise with low C stack the thing would be pushed to the bag
2467          * just to be feed to MarkGCThingChildren from inside
2468          * ScanDelayedChildren.
2469          */
2470         cx->insideGCMarkCallback = JS_FALSE;
2471         MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
2472         ScanDelayedChildren(cx);
2473         cx->insideGCMarkCallback = JS_TRUE;
2474     }
2475 }
2476 
2477 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
gc_root_marker(JSDHashTable * table,JSDHashEntryHdr * hdr,uint32 num,void * arg)2478 gc_root_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
2479 {
2480     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2481     jsval *rp = (jsval *)rhe->root;
2482     jsval v = *rp;
2483 
2484     /* Ignore null object and scalar values. */
2485     if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
2486         JSContext *cx = (JSContext *)arg;
2487 #ifdef DEBUG
2488         JSBool root_points_to_gcArenaList = JS_FALSE;
2489         jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2490         uintN i;
2491         JSGCArenaList *arenaList;
2492         JSGCArena *a;
2493         size_t limit;
2494 
2495         for (i = 0; i < GC_NUM_FREELISTS; i++) {
2496             arenaList = &cx->runtime->gcArenaList[i];
2497             limit = arenaList->lastLimit;
2498             for (a = arenaList->last; a; a = a->prev) {
2499                 if (thing - FIRST_THING_PAGE(a) < limit) {
2500                     root_points_to_gcArenaList = JS_TRUE;
2501                     break;
2502                 }
2503                 limit = GC_THINGS_SIZE;
2504             }
2505         }
2506         if (!root_points_to_gcArenaList && rhe->name) {
2507             fprintf(stderr,
2508 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2509 "invalid jsval.  This is usually caused by a missing call to JS_RemoveRoot.\n"
2510 "The root's name is \"%s\".\n",
2511                     rhe->name);
2512         }
2513         JS_ASSERT(root_points_to_gcArenaList);
2514 #endif
2515 
2516         GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root");
2517     }
2518     return JS_DHASH_NEXT;
2519 }
2520 
2521 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
gc_lock_marker(JSDHashTable * table,JSDHashEntryHdr * hdr,uint32 num,void * arg)2522 gc_lock_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
2523 {
2524     JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2525     void *thing = (void *)lhe->thing;
2526     JSContext *cx = (JSContext *)arg;
2527 
2528     GC_MARK(cx, thing, "locked object");
2529     return JS_DHASH_NEXT;
2530 }
2531 
2532 #define GC_MARK_JSVALS(cx, len, vec, name)                                    \
2533     JS_BEGIN_MACRO                                                            \
2534         jsval _v, *_vp, *_end;                                                \
2535                                                                               \
2536         for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) {                \
2537             _v = *_vp;                                                        \
2538             if (JSVAL_IS_GCTHING(_v))                                         \
2539                 GC_MARK(cx, JSVAL_TO_GCTHING(_v), name);                      \
2540         }                                                                     \
2541     JS_END_MACRO
2542 
2543 void
js_MarkStackFrame(JSContext * cx,JSStackFrame * fp)2544 js_MarkStackFrame(JSContext *cx, JSStackFrame *fp)
2545 {
2546     uintN depth, nslots;
2547 
2548     if (fp->callobj)
2549         GC_MARK(cx, fp->callobj, "call object");
2550     if (fp->argsobj)
2551         GC_MARK(cx, fp->argsobj, "arguments object");
2552     if (fp->varobj)
2553         GC_MARK(cx, fp->varobj, "variables object");
2554     if (fp->script) {
2555         js_MarkScript(cx, fp->script);
2556         if (fp->spbase) {
2557             /*
2558              * Don't mark what has not been pushed yet, or what has been
2559              * popped already.
2560              */
2561             depth = fp->script->depth;
2562             nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
2563                       < depth * sizeof(jsval))
2564                      ? (uintN)(fp->sp - fp->spbase)
2565                      : depth;
2566             GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
2567         }
2568     }
2569 
2570     /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2571     JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
2572               (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
2573     if (JSVAL_IS_GCTHING((jsval)fp->thisp))
2574         GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");
2575 
2576     /*
2577      * Mark fp->argv, even though in the common case it will be marked via our
2578      * caller's frame, or via a JSStackHeader if fp was pushed by an external
2579      * invocation.
2580      *
2581      * The hard case is when there is not enough contiguous space in the stack
2582      * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
2583      * slots.  In this case, fp->argv points to new space in a new arena, and
2584      * marking the caller's operand stack, or an external caller's allocated
2585      * stack tracked by a JSStackHeader, will not mark all the values stored
2586      * and addressable via fp->argv.
2587      *
2588      * So in summary, solely for the hard case of moving argv due to missing
2589      * formals and extra roots, we must mark actuals, missing formals, and any
2590      * local roots arrayed at fp->argv here.
2591      *
2592      * It would be good to avoid redundant marking of the same reference, in
2593      * the case where fp->argv does point into caller-allocated space tracked
2594      * by fp->down->spbase or cx->stackHeaders.  This would allow callbacks
2595      * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
2596      * reference counts.  So this comment deserves a FIXME bug to cite.
2597      */
2598     if (fp->argv) {
2599         nslots = fp->argc;
2600         if (fp->fun) {
2601             if (fp->fun->nargs > nslots)
2602                 nslots = fp->fun->nargs;
2603             if (!FUN_INTERPRETED(fp->fun))
2604                 nslots += fp->fun->u.n.extra;
2605         }
2606         GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
2607     }
2608     if (JSVAL_IS_GCTHING(fp->rval))
2609         GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
2610     if (fp->vars)
2611         GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
2612     GC_MARK(cx, fp->scopeChain, "scope chain");
2613     if (fp->sharpArray)
2614         GC_MARK(cx, fp->sharpArray, "sharp array");
2615 
2616     if (fp->xmlNamespace)
2617         GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
2618 }
2619 
2620 static void
MarkWeakRoots(JSContext * cx,JSWeakRoots * wr)2621 MarkWeakRoots(JSContext *cx, JSWeakRoots *wr)
2622 {
2623     uintN i;
2624     void *thing;
2625 
2626     for (i = 0; i < GCX_NTYPES; i++)
2627         GC_MARK(cx, wr->newborn[i], gc_typenames[i]);
2628     if (wr->lastAtom)
2629         GC_MARK_ATOM(cx, wr->lastAtom);
2630     if (JSVAL_IS_GCTHING(wr->lastInternalResult)) {
2631         thing = JSVAL_TO_GCTHING(wr->lastInternalResult);
2632         if (thing)
2633             GC_MARK(cx, thing, "lastInternalResult");
2634     }
2635 }
2636 
2637 /*
2638  * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
2639  * rt->gcLock already held and when the lock should be kept on return.
2640  */
2641 void
js_GC(JSContext * cx,JSGCInvocationKind gckind)2642 js_GC(JSContext *cx, JSGCInvocationKind gckind)
2643 {
2644     JSRuntime *rt;
2645     JSBool keepAtoms;
2646     uintN i, type;
2647     JSContext *iter, *acx;
2648 #if JS_HAS_GENERATORS
2649     JSGenerator **genTodoTail;
2650 #endif
2651     JSStackFrame *fp, *chain;
2652     JSStackHeader *sh;
2653     JSTempValueRooter *tvr;
2654     size_t nbytes, limit, offset;
2655     JSGCArena *a, **ap;
2656     uint8 flags, *flagp, *firstPage;
2657     JSGCThing *thing, *freeList;
2658     JSGCArenaList *arenaList;
2659     GCFinalizeOp finalizer;
2660     JSBool allClear;
2661 #ifdef JS_THREADSAFE
2662     uint32 requestDebit;
2663 #endif
2664 
2665     rt = cx->runtime;
2666 #ifdef JS_THREADSAFE
2667     /* Avoid deadlock. */
2668     JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
2669 #endif
2670 
2671     if (gckind == GC_LAST_DITCH) {
2672         /* The last ditch GC preserves all atoms and weak roots. */
2673         keepAtoms = JS_TRUE;
2674     } else {
2675         JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
2676         rt->gcPoke = JS_TRUE;
2677 
2678         /* Keep atoms when a suspended compile is running on another context. */
2679         keepAtoms = (rt->gcKeepAtoms != 0);
2680     }
2681 
2682     /*
2683      * Don't collect garbage if the runtime isn't up, and cx is not the last
2684      * context in the runtime.  The last context must force a GC, and nothing
2685      * should suppress that final collection or there may be shutdown leaks,
2686      * or runtime bloat until the next context is created.
2687      */
2688     if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
2689         return;
2690 
2691   restart_after_callback:
2692     /*
2693      * Let the API user decide to defer a GC if it wants to (unless this
2694      * is the last context).  Invoke the callback regardless.
2695      */
2696     if (rt->gcCallback &&
2697         !rt->gcCallback(cx, JSGC_BEGIN) &&
2698         gckind != GC_LAST_CONTEXT) {
2699         return;
2700     }
2701 
2702     /* Lock out other GC allocator and collector invocations. */
2703     if (gckind != GC_LAST_DITCH)
2704         JS_LOCK_GC(rt);
2705 
2706     /* Do nothing if no mutator has executed since the last GC. */
2707     if (!rt->gcPoke) {
2708         METER(rt->gcStats.nopoke++);
2709         if (gckind != GC_LAST_DITCH)
2710             JS_UNLOCK_GC(rt);
2711         return;
2712     }
2713     METER(rt->gcStats.poke++);
2714     rt->gcPoke = JS_FALSE;
2715 
2716 #ifdef JS_THREADSAFE
2717     JS_ASSERT(cx->thread->id == js_CurrentThreadId());
2718 
2719     /* Bump gcLevel and return rather than nest on this thread. */
2720     if (rt->gcThread == cx->thread) {
2721         JS_ASSERT(rt->gcLevel > 0);
2722         rt->gcLevel++;
2723         METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2724                   rt->gcStats.maxlevel = rt->gcLevel);
2725         if (gckind != GC_LAST_DITCH)
2726             JS_UNLOCK_GC(rt);
2727         return;
2728     }
2729 
2730     /*
2731      * If we're in one or more requests (possibly on more than one context)
2732      * running on the current thread, indicate, temporarily, that all these
2733      * requests are inactive.  If cx->thread is NULL, then cx is not using
2734      * the request model, and does not contribute to rt->requestCount.
2735      */
2736     requestDebit = 0;
2737     if (cx->thread) {
2738         JSCList *head, *link;
2739 
2740         /*
2741          * Check all contexts on cx->thread->contextList for active requests,
2742          * counting each such context against requestDebit.
2743          */
2744         head = &cx->thread->contextList;
2745         for (link = head->next; link != head; link = link->next) {
2746             acx = CX_FROM_THREAD_LINKS(link);
2747             JS_ASSERT(acx->thread == cx->thread);
2748             if (acx->requestDepth)
2749                 requestDebit++;
2750         }
2751     } else {
2752         /*
2753          * We assert, but check anyway, in case someone is misusing the API.
2754          * Avoiding the loop over all of rt's contexts is a win in the event
2755          * that the GC runs only on request-less contexts with null threads,
2756          * in a special thread such as might be used by the UI/DOM/Layout
2757          * "mozilla" or "main" thread in Mozilla-the-browser.
2758          */
2759         JS_ASSERT(cx->requestDepth == 0);
2760         if (cx->requestDepth)
2761             requestDebit = 1;
2762     }
2763     if (requestDebit) {
2764         JS_ASSERT(requestDebit <= rt->requestCount);
2765         rt->requestCount -= requestDebit;
2766         if (rt->requestCount == 0)
2767             JS_NOTIFY_REQUEST_DONE(rt);
2768     }
2769 
2770     /* If another thread is already in GC, don't attempt GC; wait instead. */
2771     if (rt->gcLevel > 0) {
2772         /* Bump gcLevel to restart the current GC, so it finds new garbage. */
2773         rt->gcLevel++;
2774         METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2775                   rt->gcStats.maxlevel = rt->gcLevel);
2776 
2777         /* Wait for the other thread to finish, then resume our request. */
2778         while (rt->gcLevel > 0)
2779             JS_AWAIT_GC_DONE(rt);
2780         if (requestDebit)
2781             rt->requestCount += requestDebit;
2782         if (gckind != GC_LAST_DITCH)
2783             JS_UNLOCK_GC(rt);
2784         return;
2785     }
2786 
2787     /* No other thread is in GC, so indicate that we're now in GC. */
2788     rt->gcLevel = 1;
2789     rt->gcThread = cx->thread;
2790 
2791     /* Wait for all other requests to finish. */
2792     while (rt->requestCount > 0)
2793         JS_AWAIT_REQUEST_DONE(rt);
2794 
2795 #else  /* !JS_THREADSAFE */
2796 
2797     /* Bump gcLevel and return rather than nest; the outer gc will restart. */
2798     rt->gcLevel++;
2799     METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2800               rt->gcStats.maxlevel = rt->gcLevel);
2801     if (rt->gcLevel > 1)
2802         return;
2803 
2804 #endif /* !JS_THREADSAFE */
2805 
2806     /*
2807      * Set rt->gcRunning here within the GC lock, and after waiting for any
2808      * active requests to end, so that new requests that try to JS_AddRoot,
2809      * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
2810      * rt->gcLevel to drop to zero, while request-less calls to the *Root*
2811      * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
2812      * waiting for GC to finish.
2813      */
2814     rt->gcRunning = JS_TRUE;
2815     JS_UNLOCK_GC(rt);
2816 
2817     /* Reset malloc counter. */
2818     rt->gcMallocBytes = 0;
2819 
2820     /* Drop atoms held by the property cache, and clear property weak links. */
2821     js_DisablePropertyCache(cx);
2822     js_FlushPropertyCache(cx);
2823 #ifdef DEBUG_scopemeters
2824   { extern void js_DumpScopeMeters(JSRuntime *rt);
2825     js_DumpScopeMeters(rt);
2826   }
2827 #endif
2828 
2829 #ifdef JS_THREADSAFE
2830     /*
2831      * Set all thread local freelists to NULL. We may visit a thread's
2832      * freelist more than once. To avoid redundant clearing we unroll the
2833      * current thread's step.
2834      *
2835      * Also, in case a JSScript wrapped within an object was finalized, we
2836      * null acx->thread->gsnCache.script and finish the cache's hashtable.
2837      * Note that js_DestroyScript, called from script_finalize, will have
2838      * already cleared cx->thread->gsnCache above during finalization, so we
2839      * don't have to here.
2840      */
2841     memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
2842     iter = NULL;
2843     while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
2844         if (!acx->thread || acx->thread == cx->thread)
2845             continue;
2846         memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
2847         GSN_CACHE_CLEAR(&acx->thread->gsnCache);
2848     }
2849 #else
2850     /* The thread-unsafe case just has to clear the runtime's GSN cache. */
2851     GSN_CACHE_CLEAR(&rt->gsnCache);
2852 #endif
2853 
2854 restart:
2855     rt->gcNumber++;
2856     JS_ASSERT(!rt->gcUnscannedArenaStackTop);
2857     JS_ASSERT(rt->gcUnscannedBagSize == 0);
2858 
2859     /*
2860      * Mark phase.
2861      */
2862     JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
2863     if (rt->gcLocksHash)
2864         JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
2865     js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
2866     js_MarkWatchPoints(cx);
2867     js_MarkScriptFilenames(rt, keepAtoms);
2868     js_MarkNativeIteratorStates(cx);
2869 
2870 #if JS_HAS_GENERATORS
2871     genTodoTail = MarkScheduledGenerators(cx);
2872     JS_ASSERT(!*genTodoTail);
2873 #endif
2874 
2875     iter = NULL;
2876     while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
2877         /*
2878          * Iterate frame chain and dormant chains. Temporarily tack current
2879          * frame onto the head of the dormant list to ease iteration.
2880          *
2881          * (NB: see comment on this whole "dormant" thing in js_Execute.)
2882          */
2883         chain = acx->fp;
2884         if (chain) {
2885             JS_ASSERT(!chain->dormantNext);
2886             chain->dormantNext = acx->dormantFrameChain;
2887         } else {
2888             chain = acx->dormantFrameChain;
2889         }
2890 
2891         for (fp = chain; fp; fp = chain = chain->dormantNext) {
2892             do {
2893                 js_MarkStackFrame(cx, fp);
2894             } while ((fp = fp->down) != NULL);
2895         }
2896 
2897         /* Cleanup temporary "dormant" linkage. */
2898         if (acx->fp)
2899             acx->fp->dormantNext = NULL;
2900 
2901         /* Mark other roots-by-definition in acx. */
2902         GC_MARK(cx, acx->globalObject, "global object");
2903         MarkWeakRoots(cx, &acx->weakRoots);
2904         if (acx->throwing) {
2905             if (JSVAL_IS_GCTHING(acx->exception))
2906                 GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
2907         } else {
2908             /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2909             acx->exception = JSVAL_NULL;
2910         }
2911 #if JS_HAS_LVALUE_RETURN
2912         if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
2913             GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
2914 #endif
2915 
2916         for (sh = acx->stackHeaders; sh; sh = sh->down) {
2917             METER(rt->gcStats.stackseg++);
2918             METER(rt->gcStats.segslots += sh->nslots);
2919             GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
2920         }
2921 
2922         if (acx->localRootStack)
2923             js_MarkLocalRoots(cx, acx->localRootStack);
2924 
2925         for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
2926             switch (tvr->count) {
2927               case JSTVU_SINGLE:
2928                 if (JSVAL_IS_GCTHING(tvr->u.value)) {
2929                     GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
2930                             "tvr->u.value");
2931                 }
2932                 break;
2933               case JSTVU_MARKER:
2934                 tvr->u.marker(cx, tvr);
2935                 break;
2936               case JSTVU_SPROP:
2937                 MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
2938                 break;
2939               case JSTVU_WEAK_ROOTS:
2940                 MarkWeakRoots(cx, tvr->u.weakRoots);
2941                 break;
2942               default:
2943                 JS_ASSERT(tvr->count >= 0);
2944                 GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
2945             }
2946         }
2947 
2948         if (acx->sharpObjectMap.depth > 0)
2949             js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
2950     }
2951 
2952 #ifdef DUMP_CALL_TABLE
2953     js_DumpCallTable(cx);
2954 #endif
2955 
2956     /*
2957      * Mark children of things that caused too deep recursion during above
2958      * marking phase.
2959      */
2960     ScanDelayedChildren(cx);
2961 
2962 #if JS_HAS_GENERATORS
2963     /*
2964      * Close phase: search and mark part. See comments in
2965      * FindAndMarkObjectsToClose for details.
2966      */
2967     FindAndMarkObjectsToClose(cx, gckind, genTodoTail);
2968 
2969     /*
2970      * Mark children of things that caused too deep recursion during the
2971      * just-completed marking part of the close phase.
2972      */
2973     ScanDelayedChildren(cx);
2974 #endif
2975 
2976     JS_ASSERT(!cx->insideGCMarkCallback);
2977     if (rt->gcCallback) {
2978         cx->insideGCMarkCallback = JS_TRUE;
2979         (void) rt->gcCallback(cx, JSGC_MARK_END);
2980         JS_ASSERT(cx->insideGCMarkCallback);
2981         cx->insideGCMarkCallback = JS_FALSE;
2982     }
2983     JS_ASSERT(rt->gcUnscannedBagSize == 0);
2984 
2985     /* Finalize iterator states before the objects they iterate over. */
2986     CloseIteratorStates(cx);
2987 
2988     /*
2989      * Sweep phase.
2990      *
2991      * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
2992      * so that any attempt to allocate a GC-thing from a finalizer will fail,
2993      * rather than nest badly and leave the unmarked newborn to be swept.
2994      *
2995      * Finalize smaller objects before larger, to guarantee finalization of
2996      * GC-allocated obj->slots after obj.  See FreeSlots in jsobj.c.
2997      */
2998     for (i = 0; i < GC_NUM_FREELISTS; i++) {
2999         arenaList = &rt->gcArenaList[i];
3000         nbytes = GC_FREELIST_NBYTES(i);
3001         limit = arenaList->lastLimit;
3002         for (a = arenaList->last; a; a = a->prev) {
3003             JS_ASSERT(!a->prevUnscanned);
3004             JS_ASSERT(a->unscannedPages == 0);
3005             firstPage = (uint8 *) FIRST_THING_PAGE(a);
3006             for (offset = 0; offset != limit; offset += nbytes) {
3007                 if ((offset & GC_PAGE_MASK) == 0) {
3008                     JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
3009                               unscannedBitmap == 0);
3010                     offset += PAGE_THING_GAP(nbytes);
3011                 }
3012                 JS_ASSERT(offset < limit);
3013                 flagp = a->base + offset / sizeof(JSGCThing);
3014                 if (flagp >= firstPage)
3015                     flagp += GC_THINGS_SIZE;
3016                 flags = *flagp;
3017                 if (flags & GCF_MARK) {
3018                     *flagp &= ~GCF_MARK;
3019                 } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
3020                     /* Call the finalizer with GCF_FINAL ORed into flags. */
3021                     type = flags & GCF_TYPEMASK;
3022                     finalizer = gc_finalizers[type];
3023                     if (finalizer) {
3024                         thing = (JSGCThing *)(firstPage + offset);
3025                         *flagp = (uint8)(flags | GCF_FINAL);
3026                         if (type >= GCX_EXTERNAL_STRING)
3027                             js_PurgeDeflatedStringCache(rt, (JSString *)thing);
3028                         finalizer(cx, thing);
3029                     }
3030 
3031                     /* Set flags to GCF_FINAL, signifying that thing is free. */
3032                     *flagp = GCF_FINAL;
3033                 }
3034             }
3035             limit = GC_THINGS_SIZE;
3036         }
3037     }
3038 
3039     /*
3040      * Sweep the runtime's property tree after finalizing objects, in case any
3041      * had watchpoints referencing tree nodes.  Then sweep atoms, which may be
3042      * referenced from dead property ids.
3043      */
3044     js_SweepScopeProperties(rt);
3045     js_SweepAtomState(&rt->atomState);
3046 
3047     /*
3048      * Sweep script filenames after sweeping functions in the generic loop
3049      * above. In this way when a scripted function's finalizer destroys the
3050      * script and calls rt->destroyScriptHook, the hook can still access the
3051      * script's filename. See bug 323267.
3052      */
3053     js_SweepScriptFilenames(rt);
3054 
3055     /*
3056      * Free phase.
3057      * Free any unused arenas and rebuild the JSGCThing freelist.
3058      */
3059     for (i = 0; i < GC_NUM_FREELISTS; i++) {
3060         arenaList = &rt->gcArenaList[i];
3061         ap = &arenaList->last;
3062         a = *ap;
3063         if (!a)
3064             continue;
3065 
3066         allClear = JS_TRUE;
3067         arenaList->freeList = NULL;
3068         freeList = NULL;
3069         METER(arenaList->stats.nthings = 0);
3070         METER(arenaList->stats.freelen = 0);
3071 
3072         nbytes = GC_FREELIST_NBYTES(i);
3073         limit = arenaList->lastLimit;
3074         do {
3075             METER(size_t nfree = 0);
3076             firstPage = (uint8 *) FIRST_THING_PAGE(a);
3077             for (offset = 0; offset != limit; offset += nbytes) {
3078                 if ((offset & GC_PAGE_MASK) == 0)
3079                     offset += PAGE_THING_GAP(nbytes);
3080                 JS_ASSERT(offset < limit);
3081                 flagp = a->base + offset / sizeof(JSGCThing);
3082                 if (flagp >= firstPage)
3083                     flagp += GC_THINGS_SIZE;
3084 
3085                 if (*flagp != GCF_FINAL) {
3086                     allClear = JS_FALSE;
3087                     METER(++arenaList->stats.nthings);
3088                 } else {
3089                     thing = (JSGCThing *)(firstPage + offset);
3090                     thing->flagp = flagp;
3091                     thing->next = freeList;
3092                     freeList = thing;
3093                     METER(++nfree);
3094                 }
3095             }
3096             if (allClear) {
3097                 /*
3098                  * Forget just assembled free list head for the arena
3099                  * and destroy the arena itself.
3100                  */
3101                 freeList = arenaList->freeList;
3102                 DestroyGCArena(rt, arenaList, ap);
3103             } else {
3104                 allClear = JS_TRUE;
3105                 arenaList->freeList = freeList;
3106                 ap = &a->prev;
3107                 METER(arenaList->stats.freelen += nfree);
3108                 METER(arenaList->stats.totalfreelen += nfree);
3109                 METER(++arenaList->stats.totalarenas);
3110             }
3111             limit = GC_THINGS_SIZE;
3112         } while ((a = *ap) != NULL);
3113     }
3114 
3115     if (rt->gcCallback)
3116         (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
3117 #ifdef DEBUG_srcnotesize
3118   { extern void DumpSrcNoteSizeHist();
3119     DumpSrcNoteSizeHist();
3120     printf("GC HEAP SIZE %lu (%lu)\n",
3121            (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
3122   }
3123 #endif
3124 
3125     JS_LOCK_GC(rt);
3126 
3127     /*
3128      * We want to restart GC if js_GC was called recursively or if any of the
3129      * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
3130      */
3131     if (rt->gcLevel > 1 || rt->gcPoke) {
3132         rt->gcLevel = 1;
3133         rt->gcPoke = JS_FALSE;
3134         JS_UNLOCK_GC(rt);
3135         goto restart;
3136     }
3137     js_EnablePropertyCache(cx);
3138     rt->gcLevel = 0;
3139     rt->gcLastBytes = rt->gcBytes;
3140     rt->gcRunning = JS_FALSE;
3141 
3142 #ifdef JS_THREADSAFE
3143     /* If we were invoked during a request, pay back the temporary debit. */
3144     if (requestDebit)
3145         rt->requestCount += requestDebit;
3146     rt->gcThread = NULL;
3147     JS_NOTIFY_GC_DONE(rt);
3148 
3149     /*
3150      * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
3151      */
3152     if (gckind != GC_LAST_DITCH)
3153         JS_UNLOCK_GC(rt);
3154 #endif
3155 
3156     /* Execute JSGC_END callback outside the lock. */
3157     if (rt->gcCallback) {
3158         JSWeakRoots savedWeakRoots;
3159         JSTempValueRooter tvr;
3160 
3161         if (gckind == GC_LAST_DITCH) {
3162             /*
3163              * We allow JSGC_END implementation to force a full GC or allocate
3164              * new GC things. Thus we must protect the weak roots from GC or
3165              * overwrites.
3166              */
3167             savedWeakRoots = cx->weakRoots;
3168             JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
3169             JS_KEEP_ATOMS(rt);
3170             JS_UNLOCK_GC(rt);
3171         }
3172 
3173         (void) rt->gcCallback(cx, JSGC_END);
3174 
3175         if (gckind == GC_LAST_DITCH) {
3176             JS_LOCK_GC(rt);
3177             JS_UNKEEP_ATOMS(rt);
3178             JS_POP_TEMP_ROOT(cx, &tvr);
3179         } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
3180             /*
3181              * On shutdown iterate until JSGC_END callback stops creating
3182              * garbage.
3183              */
3184             goto restart_after_callback;
3185         }
3186     }
3187 }
3188 
3189 void
js_UpdateMallocCounter(JSContext * cx,size_t nbytes)3190 js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
3191 {
3192     uint32 *pbytes, bytes;
3193 
3194 #ifdef JS_THREADSAFE
3195     pbytes = &cx->thread->gcMallocBytes;
3196 #else
3197     pbytes = &cx->runtime->gcMallocBytes;
3198 #endif
3199     bytes = *pbytes;
3200     *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
3201 }
3202