1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  *
3  * ***** BEGIN LICENSE BLOCK *****
4  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5  *
6  * The contents of this file are subject to the Mozilla Public License Version
7  * 1.1 (the "License"); you may not use this file except in compliance with
8  * the License. You may obtain a copy of the License at
9  * http://www.mozilla.org/MPL/
10  *
11  * Software distributed under the License is distributed on an "AS IS" basis,
12  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13  * for the specific language governing rights and limitations under the
14  * License.
15  *
16  * The Original Code is Mozilla Communicator client code, released
17  * March 31, 1998.
18  *
19  * The Initial Developer of the Original Code is
20  * Netscape Communications Corporation.
21  * Portions created by the Initial Developer are Copyright (C) 1998
22  * the Initial Developer. All Rights Reserved.
23  *
24  * Contributor(s):
25  *
26  * Alternatively, the contents of this file may be used under the terms of
27  * either of the GNU General Public License Version 2 or later (the "GPL"),
28  * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29  * in which case the provisions of the GPL or the LGPL are applicable instead
30  * of those above. If you wish to allow use of your version of this file only
31  * under the terms of either the GPL or the LGPL, and not to allow others to
32  * use your version of this file under the terms of the MPL, indicate your
33  * decision by deleting the provisions above and replace them with the notice
34  * and other provisions required by the GPL or the LGPL. If you do not delete
35  * the provisions above, a recipient may use your version of this file under
36  * the terms of any one of the MPL, the GPL or the LGPL.
37  *
38  * ***** END LICENSE BLOCK ***** */
39 
40 #ifdef JS_THREADSAFE
41 
42 /*
43  * JS locking stubs.
44  */
45 #include "jsstddef.h"
46 #include <stdlib.h>
47 #include "jspubtd.h"
48 #include "jsutil.h" /* Added by JSIFY */
49 #include "jstypes.h"
50 #include "jsbit.h"
51 #include "jscntxt.h"
52 #include "jsdtoa.h"
53 #include "jsgc.h"
54 #include "jslock.h"
55 #include "jsscope.h"
56 #include "jsstr.h"
57 
58 #define ReadWord(W) (W)
59 
60 #ifndef NSPR_LOCK
61 
62 #include <memory.h>
63 
64 static PRLock **global_locks;
65 static uint32 global_lock_count = 1;
66 static uint32 global_locks_log2 = 0;
67 static uint32 global_locks_mask = 0;
68 
69 #define GLOBAL_LOCK_INDEX(id)   (((uint32)(id) >> 2) & global_locks_mask)
70 
71 static void
js_LockGlobal(void * id)72 js_LockGlobal(void *id)
73 {
74     uint32 i = GLOBAL_LOCK_INDEX(id);
75     PR_Lock(global_locks[i]);
76 }
77 
78 static void
js_UnlockGlobal(void * id)79 js_UnlockGlobal(void *id)
80 {
81     uint32 i = GLOBAL_LOCK_INDEX(id);
82     PR_Unlock(global_locks[i]);
83 }
84 
85 /* Exclude Alpha NT. */
86 #if defined(_WIN32) && defined(_M_IX86)
87 #pragma warning( disable : 4035 )
88 
89 static JS_INLINE int
js_CompareAndSwap(jsword * w,jsword ov,jsword nv)90 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
91 {
92     __asm {
93         mov eax, ov
94         mov ecx, nv
95         mov ebx, w
96         lock cmpxchg [ebx], ecx
97         sete al
98         and eax, 1h
99     }
100 }
101 
102 #elif defined(__GNUC__) && defined(__i386__)
103 
104 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
105 static JS_INLINE int
js_CompareAndSwap(jsword * w,jsword ov,jsword nv)106 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
107 {
108     unsigned int res;
109 
110     __asm__ __volatile__ (
111                           "lock\n"
112                           "cmpxchgl %2, (%1)\n"
113                           "sete %%al\n"
114                           "andl $1, %%eax\n"
115                           : "=a" (res)
116                           : "r" (w), "r" (nv), "a" (ov)
117                           : "cc", "memory");
118     return (int)res;
119 }
120 
121 #elif (defined(__USLC__) || defined(_SCO_DS)) && defined(i386)
122 
123 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
124 
125 asm int
js_CompareAndSwap(jsword * w,jsword ov,jsword nv)126 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
127 {
128 %ureg w, nv;
129 	movl	ov,%eax
130 	lock
131 	cmpxchgl nv,(w)
132 	sete	%al
133 	andl	$1,%eax
134 %ureg w;  mem ov, nv;
135 	movl	ov,%eax
136 	movl	nv,%ecx
137 	lock
138 	cmpxchgl %ecx,(w)
139 	sete	%al
140 	andl	$1,%eax
141 %ureg nv;
142 	movl	ov,%eax
143 	movl	w,%edx
144 	lock
145 	cmpxchgl nv,(%edx)
146 	sete	%al
147 	andl	$1,%eax
148 %mem w, ov, nv;
149 	movl	ov,%eax
150 	movl	nv,%ecx
151 	movl	w,%edx
152 	lock
153 	cmpxchgl %ecx,(%edx)
154 	sete	%al
155 	andl	$1,%eax
156 }
157 #pragma asm full_optimization js_CompareAndSwap
158 
159 #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
160 
161 static JS_INLINE int
js_CompareAndSwap(jsword * w,jsword ov,jsword nv)162 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
163 {
164 #if defined(__GNUC__)
165     unsigned int res;
166     JS_ASSERT(ov != nv);
167     asm volatile ("\
168 stbar\n\
169 cas [%1],%2,%3\n\
170 cmp %2,%3\n\
171 be,a 1f\n\
172 mov 1,%0\n\
173 mov 0,%0\n\
174 1:"
175                   : "=r" (res)
176                   : "r" (w), "r" (ov), "r" (nv));
177     return (int)res;
178 #else /* !__GNUC__ */
179     extern int compare_and_swap(jsword*, jsword, jsword);
180     JS_ASSERT(ov != nv);
181     return compare_and_swap(w, ov, nv);
182 #endif
183 }
184 
185 #elif defined(AIX)
186 
187 #include <sys/atomic_op.h>
188 
189 static JS_INLINE int
js_CompareAndSwap(jsword * w,jsword ov,jsword nv)190 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
191 {
192     return !_check_lock((atomic_p)w, ov, nv);
193 }
194 
195 #else
196 
197 #error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
198 
199 #endif /* arch-tests */
200 
201 #endif /* !NSPR_LOCK */
202 
203 void
js_InitLock(JSThinLock * tl)204 js_InitLock(JSThinLock *tl)
205 {
206 #ifdef NSPR_LOCK
207     tl->owner = 0;
208     tl->fat = (JSFatLock*)JS_NEW_LOCK();
209 #else
210     memset(tl, 0, sizeof(JSThinLock));
211 #endif
212 }
213 
214 void
js_FinishLock(JSThinLock * tl)215 js_FinishLock(JSThinLock *tl)
216 {
217 #ifdef NSPR_LOCK
218     tl->owner = 0xdeadbeef;
219     if (tl->fat)
220         JS_DESTROY_LOCK(((JSLock*)tl->fat));
221 #else
222     JS_ASSERT(tl->owner == 0);
223     JS_ASSERT(tl->fat == NULL);
224 #endif
225 }
226 
227 #ifndef NSPR_LOCK
228 static void js_Dequeue(JSThinLock *);
229 #endif
230 
231 #ifdef DEBUG_SCOPE_COUNT
232 
233 #include <stdio.h>
234 #include "jsdhash.h"
235 
236 static FILE *logfp;
237 static JSDHashTable logtbl;
238 
239 typedef struct logentry {
240     JSDHashEntryStub stub;
241     char             op;
242     const char       *file;
243     int              line;
244 } logentry;
245 
246 static void
logit(JSScope * scope,char op,const char * file,int line)247 logit(JSScope *scope, char op, const char *file, int line)
248 {
249     logentry *entry;
250 
251     if (!logfp) {
252         logfp = fopen("/tmp/scope.log", "w");
253         if (!logfp)
254             return;
255         setvbuf(logfp, NULL, _IONBF, 0);
256     }
257     fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
258 
259     if (!logtbl.entryStore &&
260         !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
261                            sizeof(logentry), 100)) {
262         return;
263     }
264     entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
265     if (!entry)
266         return;
267     entry->stub.key = scope;
268     entry->op = op;
269     entry->file = file;
270     entry->line = line;
271 }
272 
273 void
js_unlog_scope(JSScope * scope)274 js_unlog_scope(JSScope *scope)
275 {
276     if (!logtbl.entryStore)
277         return;
278     (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
279 }
280 
281 # define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
282 
283 #else
284 
285 # define LOGIT(scope,op) /* nothing */
286 
287 #endif /* DEBUG_SCOPE_COUNT */
288 
289 /*
290  * Return true if scope's ownercx, or the ownercx of a single-threaded scope
291  * for which ownercx is waiting to become multi-threaded and shared, is cx.
292  * That condition implies deadlock in ClaimScope if cx's thread were to wait
293  * to share scope.
294  *
295  * (i) rt->gcLock held
296  */
297 static JSBool
WillDeadlock(JSScope * scope,JSContext * cx)298 WillDeadlock(JSScope *scope, JSContext *cx)
299 {
300     JSContext *ownercx;
301 
302     do {
303         ownercx = scope->ownercx;
304         if (ownercx == cx) {
305             JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
306             return JS_TRUE;
307         }
308     } while (ownercx && (scope = ownercx->scopeToShare) != NULL);
309     return JS_FALSE;
310 }
311 
312 /*
313  * Make scope multi-threaded, i.e. share its ownership among contexts in rt
314  * using a "thin" or (if necessary due to contention) "fat" lock.  Called only
315  * from ClaimScope, immediately below, when we detect deadlock were we to wait
316  * for scope's lock, because its ownercx is waiting on a scope owned by the
317  * calling cx.
318  *
319  * (i) rt->gcLock held
320  */
321 static void
ShareScope(JSRuntime * rt,JSScope * scope)322 ShareScope(JSRuntime *rt, JSScope *scope)
323 {
324     JSScope **todop;
325 
326     if (scope->u.link) {
327         for (todop = &rt->scopeSharingTodo; *todop != scope;
328              todop = &(*todop)->u.link) {
329             JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
330         }
331         *todop = scope->u.link;
332         scope->u.link = NULL;       /* null u.link for sanity ASAP */
333         JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
334     }
335     js_InitLock(&scope->lock);
336     if (scope == rt->setSlotScope) {
337         /*
338          * Nesting locks on another thread that's using scope->ownercx: give
339          * the held lock a reentrancy count of 1 and set its lock.owner field
340          * directly (no compare-and-swap needed while scope->ownercx is still
341          * non-null).  See below in ClaimScope, before the ShareScope call,
342          * for more on why this is necessary.
343          *
344          * If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
345          * acquiring scope->lock.fat here, against another thread holding that
346          * fat lock and trying to grab rt->gcLock.  This is because no other
347          * thread can attempt to acquire scope->lock.fat until scope->ownercx
348          * is null *and* our thread has released rt->gcLock, which interlocks
349          * scope->ownercx's transition to null against tests of that member
350          * in ClaimScope.
351          */
352         scope->lock.owner = CX_THINLOCK_ID(scope->ownercx);
353 #ifdef NSPR_LOCK
354         JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
355 #endif
356         scope->u.count = 1;
357     } else {
358         scope->u.count = 0;
359     }
360     js_FinishSharingScope(rt, scope);
361 }
362 
363 /*
364  * js_FinishSharingScope is the tail part of ShareScope, split out to become a
365  * subroutine of JS_EndRequest too.  The bulk of the work here involves making
366  * mutable strings in the scope's object's slots be immutable.  We have to do
367  * this because such strings will soon be available to multiple threads, so
368  * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
369  * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
370  * or js_UndependString.
371  *
372  * The last bit of work done by js_FinishSharingScope nulls scope->ownercx and
373  * updates rt->sharedScopes.
374  */
375 #define MAKE_STRING_IMMUTABLE(rt, v, vp)                                      \
376     JS_BEGIN_MACRO                                                            \
377         JSString *str_ = JSVAL_TO_STRING(v);                                  \
378         uint8 *flagp_ = js_GetGCThingFlags(str_);                             \
379         if (*flagp_ & GCF_MUTABLE) {                                          \
380             if (JSSTRING_IS_DEPENDENT(str_) &&                                \
381                 !js_UndependString(NULL, str_)) {                             \
382                 JS_RUNTIME_METER(rt, badUndependStrings);                     \
383                 *vp = JSVAL_VOID;                                             \
384             } else {                                                          \
385                 *flagp_ &= ~GCF_MUTABLE;                                      \
386             }                                                                 \
387         }                                                                     \
388     JS_END_MACRO
389 
390 void
js_FinishSharingScope(JSRuntime * rt,JSScope * scope)391 js_FinishSharingScope(JSRuntime *rt, JSScope *scope)
392 {
393     JSObject *obj;
394     uint32 nslots;
395     jsval v, *vp, *end;
396 
397     obj = scope->object;
398     nslots = JS_MIN(obj->map->freeslot, obj->map->nslots);
399     for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
400         v = *vp;
401         if (JSVAL_IS_STRING(v))
402             MAKE_STRING_IMMUTABLE(rt, v, vp);
403     }
404 
405     scope->ownercx = NULL;  /* NB: set last, after lock init */
406     JS_RUNTIME_METER(rt, sharedScopes);
407 }
408 
409 /*
410  * Given a scope with apparently non-null ownercx different from cx, try to
411  * set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
412  * If we claim ownership, return true.  Otherwise, we wait for ownercx to be
413  * set to null (indicating that scope is multi-threaded); or if waiting would
414  * deadlock, we set ownercx to null ourselves via ShareScope.  In any case,
415  * once ownercx is null we return false.
416  */
417 static JSBool
ClaimScope(JSScope * scope,JSContext * cx)418 ClaimScope(JSScope *scope, JSContext *cx)
419 {
420     JSRuntime *rt;
421     JSContext *ownercx;
422     jsrefcount saveDepth;
423     PRStatus stat;
424 
425     rt = cx->runtime;
426     JS_RUNTIME_METER(rt, claimAttempts);
427     JS_LOCK_GC(rt);
428 
429     /* Reload in case ownercx went away while we blocked on the lock. */
430     while ((ownercx = scope->ownercx) != NULL) {
431         /*
432          * Avoid selflock if ownercx is dead, or is not running a request, or
433          * has the same thread as cx.  Set scope->ownercx to cx so that the
434          * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
435          * fast path around the corresponding js_UnlockScope or js_UnlockObj
436          * function call.
437          *
438          * If scope->u.link is non-null, scope has already been inserted on
439          * the rt->scopeSharingTodo list, because another thread's context
440          * already wanted to lock scope while ownercx was running a request.
441          * We can't claim any scope whose u.link is non-null at this point,
442          * even if ownercx->requestDepth is 0 (see below where we suspend our
443          * request before waiting on rt->scopeSharingDone).
444          */
445         if (!scope->u.link &&
446             (!js_ValidContextPointer(rt, ownercx) ||
447              !ownercx->requestDepth ||
448              ownercx->thread == cx->thread)) {
449             JS_ASSERT(scope->u.count == 0);
450             scope->ownercx = cx;
451             JS_UNLOCK_GC(rt);
452             JS_RUNTIME_METER(rt, claimedScopes);
453             return JS_TRUE;
454         }
455 
456         /*
457          * Avoid deadlock if scope's owner context is waiting on a scope that
458          * we own, by revoking scope's ownership.  This approach to deadlock
459          * avoidance works because the engine never nests scope locks, except
460          * for the notable case of js_SetProtoOrParent (see jsobj.c).
461          *
462          * If cx could hold locks on ownercx->scopeToShare, or if ownercx
463          * could hold locks on scope, we would need to keep reentrancy counts
464          * for all such "flyweight" (ownercx != NULL) locks, so that control
465          * would unwind properly once these locks became "thin" or "fat".
466          * Apart from the js_SetProtoOrParent exception, the engine promotes
467          * a scope from exclusive to shared access only when locking, never
468          * when holding or unlocking.
469          *
470          * If ownercx's thread is calling js_SetProtoOrParent, trying to lock
471          * the inner scope (the scope of the object being set as the prototype
472          * of the outer object), ShareScope will find the outer object's scope
473          * at rt->setSlotScope.  If it's the same as scope, we give it a lock
474          * held by ownercx's thread with reentrancy count of 1, then we return
475          * here and break.  After that we unwind to js_[GS]etSlotThreadSafe or
476          * js_LockScope (our caller), where we wait on the newly-fattened lock
477          * until ownercx's thread unwinds from js_SetProtoOrParent.
478          *
479          * Avoid deadlock before any of this scope/context cycle detection if
480          * cx is on the active GC's thread, because in that case, no requests
481          * will run until the GC completes.  Any scope wanted by the GC (from
482          * a finalizer) that can't be claimed must be slated for sharing.
483          */
484         if (rt->gcThread == cx->thread ||
485             (ownercx->scopeToShare &&
486              WillDeadlock(ownercx->scopeToShare, cx))) {
487             ShareScope(rt, scope);
488             break;
489         }
490 
491         /*
492          * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
493          * can decide whether scope is on rt->scopeSharingTodo with a single
494          * non-null test, and avoid double-insertion bugs.
495          */
496         if (!scope->u.link) {
497             scope->u.link = rt->scopeSharingTodo;
498             rt->scopeSharingTodo = scope;
499             js_HoldObjectMap(cx, &scope->map);
500         }
501 
502         /*
503          * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
504          * saving and clearing cx->requestDepth so we don't deadlock if the
505          * GC needs to run on ownercx.
506          *
507          * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
508          * to decrement rt->requestCount if cx is active on the GC's thread,
509          * because the GC has already reduced rt->requestCount to exclude all
510          * such such contexts.
511          */
512         saveDepth = cx->requestDepth;
513         if (saveDepth) {
514             cx->requestDepth = 0;
515             if (rt->gcThread != cx->thread) {
516                 JS_ASSERT(rt->requestCount > 0);
517                 rt->requestCount--;
518                 if (rt->requestCount == 0)
519                     JS_NOTIFY_REQUEST_DONE(rt);
520             }
521         }
522 
523         /*
524          * We know that some other thread's context owns scope, which is now
525          * linked onto rt->scopeSharingTodo, awaiting the end of that other
526          * thread's request.  So it is safe to wait on rt->scopeSharingDone.
527          */
528         cx->scopeToShare = scope;
529         stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
530         JS_ASSERT(stat != PR_FAILURE);
531 
532         /*
533          * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
534          * restoring cx->requestDepth.  Same note as above for the inlined,
535          * specialized JS_SuspendRequest code: beware rt->gcThread.
536          */
537         if (saveDepth) {
538             if (rt->gcThread != cx->thread) {
539                 while (rt->gcLevel > 0)
540                     JS_AWAIT_GC_DONE(rt);
541                 rt->requestCount++;
542             }
543             cx->requestDepth = saveDepth;
544         }
545 
546         /*
547          * Don't clear cx->scopeToShare until after we're through waiting on
548          * all condition variables protected by rt->gcLock -- that includes
549          * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
550          * in the inlined JS_ResumeRequest code immediately above).
551          *
552          * Otherwise, the GC could easily deadlock with another thread that
553          * owns a scope wanted by a finalizer.  By keeping cx->scopeToShare
554          * set till here, we ensure that such deadlocks are detected, which
555          * results in the finalized object's scope being shared (it must, of
556          * course, have other, live objects sharing it).
557          */
558         cx->scopeToShare = NULL;
559     }
560 
561     JS_UNLOCK_GC(rt);
562     return JS_FALSE;
563 }
564 
565 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
566 JS_FRIEND_API(jsval)
js_GetSlotThreadSafe(JSContext * cx,JSObject * obj,uint32 slot)567 js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
568 {
569     jsval v;
570     JSScope *scope;
571 #ifndef NSPR_LOCK
572     JSThinLock *tl;
573     jsword me;
574 #endif
575 
576     /*
577      * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
578      * all slots starting from 0 as required slots.  A property definition or
579      * some prior arrangement must have allocated slot.
580      *
581      * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
582      * the crucial distinction between a |required slot number| that's passed
583      * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
584      * passed to the JS_Get/SetReservedSlot APIs.
585      */
586     if (!OBJ_IS_NATIVE(obj))
587         return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
588 
589     /*
590      * Native object locking is inlined here to optimize the single-threaded
591      * and contention-free multi-threaded cases.
592      */
593     scope = OBJ_SCOPE(obj);
594     JS_ASSERT(scope->ownercx != cx);
595     JS_ASSERT(obj->slots && slot < obj->map->freeslot);
596 
597     /*
598      * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
599      * Also avoid locking an object owning a sealed scope.  If neither of those
600      * special cases applies, try to claim scope's flyweight lock from whatever
601      * context may have had it in an earlier request.
602      */
603     if (CX_THREAD_IS_RUNNING_GC(cx) ||
604         (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
605         (scope->ownercx && ClaimScope(scope, cx))) {
606         return obj->slots[slot];
607     }
608 
609 #ifndef NSPR_LOCK
610     tl = &scope->lock;
611     me = CX_THINLOCK_ID(cx);
612     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
613     if (js_CompareAndSwap(&tl->owner, 0, me)) {
614         /*
615          * Got the lock with one compare-and-swap.  Even so, someone else may
616          * have mutated obj so it now has its own scope and lock, which would
617          * require either a restart from the top of this routine, or a thin
618          * lock release followed by fat lock acquisition.
619          */
620         if (scope == OBJ_SCOPE(obj)) {
621             v = obj->slots[slot];
622             if (!js_CompareAndSwap(&tl->owner, me, 0)) {
623                 /* Assert that scope locks never revert to flyweight. */
624                 JS_ASSERT(scope->ownercx != cx);
625                 LOGIT(scope, '1');
626                 scope->u.count = 1;
627                 js_UnlockObj(cx, obj);
628             }
629             return v;
630         }
631         if (!js_CompareAndSwap(&tl->owner, me, 0))
632             js_Dequeue(tl);
633     }
634     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
635         return obj->slots[slot];
636     }
637 #endif
638 
639     js_LockObj(cx, obj);
640     v = obj->slots[slot];
641 
642     /*
643      * Test whether cx took ownership of obj's scope during js_LockObj.
644      *
645      * This does not mean that a given scope reverted to flyweight from "thin"
646      * or "fat" -- it does mean that obj's map pointer changed due to another
647      * thread setting a property, requiring obj to cease sharing a prototype
648      * object's scope (whose lock was not flyweight, else we wouldn't be here
649      * in the first place!).
650      */
651     scope = OBJ_SCOPE(obj);
652     if (scope->ownercx != cx)
653         js_UnlockScope(cx, scope);
654     return v;
655 }
656 
657 void
js_SetSlotThreadSafe(JSContext * cx,JSObject * obj,uint32 slot,jsval v)658 js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
659 {
660     JSScope *scope;
661 #ifndef NSPR_LOCK
662     JSThinLock *tl;
663     jsword me;
664 #endif
665 
666     /* Any string stored in a thread-safe object must be immutable. */
667     if (JSVAL_IS_STRING(v))
668         MAKE_STRING_IMMUTABLE(cx->runtime, v, &v);
669 
670     /*
671      * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
672      * for the Get case.
673      */
674     if (!OBJ_IS_NATIVE(obj)) {
675         OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
676         return;
677     }
678 
679     /*
680      * Native object locking is inlined here to optimize the single-threaded
681      * and contention-free multi-threaded cases.
682      */
683     scope = OBJ_SCOPE(obj);
684     JS_ASSERT(scope->ownercx != cx);
685     JS_ASSERT(obj->slots && slot < obj->map->freeslot);
686 
687     /*
688      * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
689      * Also avoid locking an object owning a sealed scope.  If neither of those
690      * special cases applies, try to claim scope's flyweight lock from whatever
691      * context may have had it in an earlier request.
692      */
693     if (CX_THREAD_IS_RUNNING_GC(cx) ||
694         (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
695         (scope->ownercx && ClaimScope(scope, cx))) {
696         obj->slots[slot] = v;
697         return;
698     }
699 
700 #ifndef NSPR_LOCK
701     tl = &scope->lock;
702     me = CX_THINLOCK_ID(cx);
703     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
704     if (js_CompareAndSwap(&tl->owner, 0, me)) {
705         if (scope == OBJ_SCOPE(obj)) {
706             obj->slots[slot] = v;
707             if (!js_CompareAndSwap(&tl->owner, me, 0)) {
708                 /* Assert that scope locks never revert to flyweight. */
709                 JS_ASSERT(scope->ownercx != cx);
710                 LOGIT(scope, '1');
711                 scope->u.count = 1;
712                 js_UnlockObj(cx, obj);
713             }
714             return;
715         }
716         if (!js_CompareAndSwap(&tl->owner, me, 0))
717             js_Dequeue(tl);
718     }
719     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
720         obj->slots[slot] = v;
721         return;
722     }
723 #endif
724 
725     js_LockObj(cx, obj);
726     obj->slots[slot] = v;
727 
728     /*
729      * Same drill as above, in js_GetSlotThreadSafe.  Note that we cannot
730      * assume obj has its own mutable scope (where scope->object == obj) yet,
731      * because OBJ_SET_SLOT is called for the "universal", common slots such
732      * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
733      * See also the JSPROP_SHARED attribute and its usage.
734      */
735     scope = OBJ_SCOPE(obj);
736     if (scope->ownercx != cx)
737         js_UnlockScope(cx, scope);
738 }
739 
740 #ifndef NSPR_LOCK
741 
742 static JSFatLock *
NewFatlock()743 NewFatlock()
744 {
745     JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
746     if (!fl) return NULL;
747     fl->susp = 0;
748     fl->next = NULL;
749     fl->prevp = NULL;
750     fl->slock = PR_NewLock();
751     fl->svar = PR_NewCondVar(fl->slock);
752     return fl;
753 }
754 
755 static void
DestroyFatlock(JSFatLock * fl)756 DestroyFatlock(JSFatLock *fl)
757 {
758     PR_DestroyLock(fl->slock);
759     PR_DestroyCondVar(fl->svar);
760     free(fl);
761 }
762 
763 static JSFatLock *
ListOfFatlocks(int listc)764 ListOfFatlocks(int listc)
765 {
766     JSFatLock *m;
767     JSFatLock *m0;
768     int i;
769 
770     JS_ASSERT(listc>0);
771     m0 = m = NewFatlock();
772     for (i=1; i<listc; i++) {
773         m->next = NewFatlock();
774         m = m->next;
775     }
776     return m0;
777 }
778 
779 static void
DeleteListOfFatlocks(JSFatLock * m)780 DeleteListOfFatlocks(JSFatLock *m)
781 {
782     JSFatLock *m0;
783     for (; m; m=m0) {
784         m0 = m->next;
785         DestroyFatlock(m);
786     }
787 }
788 
789 static JSFatLockTable *fl_list_table = NULL;
790 static uint32          fl_list_table_len = 0;
791 static uint32          fl_list_chunk_len = 0;
792 
793 static JSFatLock *
GetFatlock(void * id)794 GetFatlock(void *id)
795 {
796     JSFatLock *m;
797 
798     uint32 i = GLOBAL_LOCK_INDEX(id);
799     if (fl_list_table[i].free == NULL) {
800 #ifdef DEBUG
801         if (fl_list_table[i].taken)
802             printf("Ran out of fat locks!\n");
803 #endif
804         fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
805     }
806     m = fl_list_table[i].free;
807     fl_list_table[i].free = m->next;
808     m->susp = 0;
809     m->next = fl_list_table[i].taken;
810     m->prevp = &fl_list_table[i].taken;
811     if (fl_list_table[i].taken)
812         fl_list_table[i].taken->prevp = &m->next;
813     fl_list_table[i].taken = m;
814     return m;
815 }
816 
817 static void
PutFatlock(JSFatLock * m,void * id)818 PutFatlock(JSFatLock *m, void *id)
819 {
820     uint32 i;
821     if (m == NULL)
822         return;
823 
824     /* Unlink m from fl_list_table[i].taken. */
825     *m->prevp = m->next;
826     if (m->next)
827         m->next->prevp = m->prevp;
828 
829     /* Insert m in fl_list_table[i].free. */
830     i = GLOBAL_LOCK_INDEX(id);
831     m->next = fl_list_table[i].free;
832     fl_list_table[i].free = m;
833 }
834 
835 #endif /* !NSPR_LOCK */
836 
837 JSBool
js_SetupLocks(int listc,int globc)838 js_SetupLocks(int listc, int globc)
839 {
840 #ifndef NSPR_LOCK
841     uint32 i;
842 
843     if (global_locks)
844         return JS_TRUE;
845 #ifdef DEBUG
846     if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
847         printf("Bad number %d in js_SetupLocks()!\n", listc);
848     if (globc > 100 || globc < 0)   /* globc == number of global locks */
849         printf("Bad number %d in js_SetupLocks()!\n", listc);
850 #endif
851     global_locks_log2 = JS_CeilingLog2(globc);
852     global_locks_mask = JS_BITMASK(global_locks_log2);
853     global_lock_count = JS_BIT(global_locks_log2);
854     global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
855     if (!global_locks)
856         return JS_FALSE;
857     for (i = 0; i < global_lock_count; i++) {
858         global_locks[i] = PR_NewLock();
859         if (!global_locks[i]) {
860             global_lock_count = i;
861             js_CleanupLocks();
862             return JS_FALSE;
863         }
864     }
865     fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
866     if (!fl_list_table) {
867         js_CleanupLocks();
868         return JS_FALSE;
869     }
870     fl_list_table_len = global_lock_count;
871     for (i = 0; i < global_lock_count; i++)
872         fl_list_table[i].free = fl_list_table[i].taken = NULL;
873     fl_list_chunk_len = listc;
874 #endif /* !NSPR_LOCK */
875     return JS_TRUE;
876 }
877 
878 void
js_CleanupLocks()879 js_CleanupLocks()
880 {
881 #ifndef NSPR_LOCK
882     uint32 i;
883 
884     if (global_locks) {
885         for (i = 0; i < global_lock_count; i++)
886             PR_DestroyLock(global_locks[i]);
887         free(global_locks);
888         global_locks = NULL;
889         global_lock_count = 1;
890         global_locks_log2 = 0;
891         global_locks_mask = 0;
892     }
893     if (fl_list_table) {
894         for (i = 0; i < fl_list_table_len; i++) {
895             DeleteListOfFatlocks(fl_list_table[i].free);
896             fl_list_table[i].free = NULL;
897             DeleteListOfFatlocks(fl_list_table[i].taken);
898             fl_list_table[i].taken = NULL;
899         }
900         free(fl_list_table);
901         fl_list_table = NULL;
902         fl_list_table_len = 0;
903     }
904 #endif /* !NSPR_LOCK */
905 }
906 
907 #ifndef NSPR_LOCK
908 
909 /*
910  * Fast locking and unlocking is implemented by delaying the allocation of a
911  * system lock (fat lock) until contention.  As long as a locking thread A
912  * runs uncontended, the lock is represented solely by storing A's identity in
913  * the object being locked.
914  *
915  * If another thread B tries to lock the object currently locked by A, B is
916  * enqueued into a fat lock structure (which might have to be allocated and
917  * pointed to by the object), and suspended using NSPR conditional variables
918  * (wait).  A wait bit (Bacon bit) is set in the lock word of the object,
919  * signalling to A that when releasing the lock, B must be dequeued and
920  * notified.
921  *
922  * The basic operation of the locking primitives (js_Lock, js_Unlock,
923  * js_Enqueue, and js_Dequeue) is compare-and-swap.  Hence, when locking into
924  * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
925  * is unlocked.  Similarly, when unlocking p, if compare-and-swap(p, A, 0)
926  * succeeds this implies that p is uncontended (no one is waiting because the
927  * wait bit is not set).
928  *
929  * When dequeueing, the lock is released, and one of the threads suspended on
930  * the lock is notified.  If other threads still are waiting, the wait bit is
931  * kept (in js_Enqueue), and if not, the fat lock is deallocated.
932  *
933  * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
934  * are serialized using a global lock.  For scalability, a hashtable of global
935  * locks is used, which is indexed modulo the thin lock pointer.
936  */
937 
938 /*
939  * Invariants:
940  * (i)  global lock is held
941  * (ii) fl->susp >= 0
942  */
943 static int
js_SuspendThread(JSThinLock * tl)944 js_SuspendThread(JSThinLock *tl)
945 {
946     JSFatLock *fl;
947     PRStatus stat;
948 
949     if (tl->fat == NULL)
950         fl = tl->fat = GetFatlock(tl);
951     else
952         fl = tl->fat;
953     JS_ASSERT(fl->susp >= 0);
954     fl->susp++;
955     PR_Lock(fl->slock);
956     js_UnlockGlobal(tl);
957     stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
958     JS_ASSERT(stat != PR_FAILURE);
959     PR_Unlock(fl->slock);
960     js_LockGlobal(tl);
961     fl->susp--;
962     if (fl->susp == 0) {
963         PutFatlock(fl, tl);
964         tl->fat = NULL;
965     }
966     return tl->fat == NULL;
967 }
968 
969 /*
970  * (i)  global lock is held
971  * (ii) fl->susp > 0
972  */
973 static void
js_ResumeThread(JSThinLock * tl)974 js_ResumeThread(JSThinLock *tl)
975 {
976     JSFatLock *fl = tl->fat;
977     PRStatus stat;
978 
979     JS_ASSERT(fl != NULL);
980     JS_ASSERT(fl->susp > 0);
981     PR_Lock(fl->slock);
982     js_UnlockGlobal(tl);
983     stat = PR_NotifyCondVar(fl->svar);
984     JS_ASSERT(stat != PR_FAILURE);
985     PR_Unlock(fl->slock);
986 }
987 
988 static void
js_Enqueue(JSThinLock * tl,jsword me)989 js_Enqueue(JSThinLock *tl, jsword me)
990 {
991     jsword o, n;
992 
993     js_LockGlobal(tl);
994     for (;;) {
995         o = ReadWord(tl->owner);
996         n = Thin_SetWait(o);
997         if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
998             if (js_SuspendThread(tl))
999                 me = Thin_RemoveWait(me);
1000             else
1001                 me = Thin_SetWait(me);
1002         }
1003         else if (js_CompareAndSwap(&tl->owner, 0, me)) {
1004             js_UnlockGlobal(tl);
1005             return;
1006         }
1007     }
1008 }
1009 
1010 static void
js_Dequeue(JSThinLock * tl)1011 js_Dequeue(JSThinLock *tl)
1012 {
1013     jsword o;
1014 
1015     js_LockGlobal(tl);
1016     o = ReadWord(tl->owner);
1017     JS_ASSERT(Thin_GetWait(o) != 0);
1018     JS_ASSERT(tl->fat != NULL);
1019     if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
1020         JS_ASSERT(0);
1021     js_ResumeThread(tl);
1022 }
1023 
1024 JS_INLINE void
js_Lock(JSThinLock * tl,jsword me)1025 js_Lock(JSThinLock *tl, jsword me)
1026 {
1027     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1028     if (js_CompareAndSwap(&tl->owner, 0, me))
1029         return;
1030     if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1031         js_Enqueue(tl, me);
1032 #ifdef DEBUG
1033     else
1034         JS_ASSERT(0);
1035 #endif
1036 }
1037 
1038 JS_INLINE void
js_Unlock(JSThinLock * tl,jsword me)1039 js_Unlock(JSThinLock *tl, jsword me)
1040 {
1041     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1042 
1043     /*
1044      * Only me can hold the lock, no need to use compare and swap atomic
1045      * operation for this common case.
1046      */
1047     if (tl->owner == me) {
1048         tl->owner = 0;
1049         return;
1050     }
1051     JS_ASSERT(Thin_GetWait(tl->owner));
1052     if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1053         js_Dequeue(tl);
1054 #ifdef DEBUG
1055     else
1056         JS_ASSERT(0);   /* unbalanced unlock */
1057 #endif
1058 }
1059 
1060 #endif /* !NSPR_LOCK */
1061 
1062 void
js_LockRuntime(JSRuntime * rt)1063 js_LockRuntime(JSRuntime *rt)
1064 {
1065     PR_Lock(rt->rtLock);
1066 #ifdef DEBUG
1067     rt->rtLockOwner = js_CurrentThreadId();
1068 #endif
1069 }
1070 
1071 void
js_UnlockRuntime(JSRuntime * rt)1072 js_UnlockRuntime(JSRuntime *rt)
1073 {
1074 #ifdef DEBUG
1075     rt->rtLockOwner = 0;
1076 #endif
1077     PR_Unlock(rt->rtLock);
1078 }
1079 
1080 void
js_LockScope(JSContext * cx,JSScope * scope)1081 js_LockScope(JSContext *cx, JSScope *scope)
1082 {
1083     jsword me = CX_THINLOCK_ID(cx);
1084 
1085     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1086     JS_ASSERT(scope->ownercx != cx);
1087     if (CX_THREAD_IS_RUNNING_GC(cx))
1088         return;
1089     if (scope->ownercx && ClaimScope(scope, cx))
1090         return;
1091 
1092     if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
1093         JS_ASSERT(scope->u.count > 0);
1094         LOGIT(scope, '+');
1095         scope->u.count++;
1096     } else {
1097         JSThinLock *tl = &scope->lock;
1098         JS_LOCK0(tl, me);
1099         JS_ASSERT(scope->u.count == 0);
1100         LOGIT(scope, '1');
1101         scope->u.count = 1;
1102     }
1103 }
1104 
1105 void
js_UnlockScope(JSContext * cx,JSScope * scope)1106 js_UnlockScope(JSContext *cx, JSScope *scope)
1107 {
1108     jsword me = CX_THINLOCK_ID(cx);
1109 
1110     /* We hope compilers use me instead of reloading cx->thread in the macro. */
1111     if (CX_THREAD_IS_RUNNING_GC(cx))
1112         return;
1113     if (cx->lockedSealedScope == scope) {
1114         cx->lockedSealedScope = NULL;
1115         return;
1116     }
1117 
1118     /*
1119      * If scope->ownercx is not null, it's likely that two contexts not using
1120      * requests nested locks for scope.  The first context, cx here, claimed
1121      * scope; the second, scope->ownercx here, re-claimed it because the first
1122      * was not in a request, or was on the same thread.  We don't want to keep
1123      * track of such nesting, because it penalizes the common non-nested case.
1124      * Instead of asserting here and silently coping, we simply re-claim scope
1125      * for cx and return.
1126      *
1127      * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1128      * case where an asymmetric thread model (Mozilla's main thread is known
1129      * to be the only thread that runs the GC) combined with multiple contexts
1130      * per thread has led to such request-less nesting.
1131      */
1132     if (scope->ownercx) {
1133         JS_ASSERT(scope->u.count == 0);
1134         JS_ASSERT(scope->lock.owner == 0);
1135         scope->ownercx = cx;
1136         return;
1137     }
1138 
1139     JS_ASSERT(scope->u.count > 0);
1140     if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
1141         JS_ASSERT(0);   /* unbalanced unlock */
1142         return;
1143     }
1144     LOGIT(scope, '-');
1145     if (--scope->u.count == 0) {
1146         JSThinLock *tl = &scope->lock;
1147         JS_UNLOCK0(tl, me);
1148     }
1149 }
1150 
1151 /*
1152  * NB: oldscope may be null if our caller is js_GetMutableScope and it just
1153  * dropped the last reference to oldscope.
1154  */
1155 void
js_TransferScopeLock(JSContext * cx,JSScope * oldscope,JSScope * newscope)1156 js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
1157 {
1158     jsword me;
1159     JSThinLock *tl;
1160 
1161     JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
1162 
1163     /*
1164      * If the last reference to oldscope went away, newscope needs no lock
1165      * state update.
1166      */
1167     if (!oldscope)
1168         return;
1169     JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope));
1170 
1171     /*
1172      * Special case in js_LockScope and js_UnlockScope for the GC calling
1173      * code that locks, unlocks, or mutates.  Nothing to do in these cases,
1174      * because scope and newscope were "locked" by the GC thread, so neither
1175      * was actually locked.
1176      */
1177     if (CX_THREAD_IS_RUNNING_GC(cx))
1178         return;
1179 
1180     /*
1181      * Special case in js_LockObj and js_UnlockScope for locking the sealed
1182      * scope of an object that owns that scope (the prototype or mutated obj
1183      * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1184      */
1185     JS_ASSERT(cx->lockedSealedScope != newscope);
1186     if (cx->lockedSealedScope == oldscope) {
1187         JS_ASSERT(newscope->ownercx == cx ||
1188                   (!newscope->ownercx && newscope->u.count == 1));
1189         cx->lockedSealedScope = NULL;
1190         return;
1191     }
1192 
1193     /*
1194      * If oldscope is single-threaded, there's nothing to do.
1195      */
1196     if (oldscope->ownercx) {
1197         JS_ASSERT(oldscope->ownercx == cx);
1198         JS_ASSERT(newscope->ownercx == cx ||
1199                   (!newscope->ownercx && newscope->u.count == 1));
1200         return;
1201     }
1202 
1203     /*
1204      * We transfer oldscope->u.count only if newscope is not single-threaded.
1205      * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
1206      * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
1207      * if they find newscope->ownercx != cx.
1208      */
1209     if (newscope->ownercx != cx) {
1210         JS_ASSERT(!newscope->ownercx);
1211         newscope->u.count = oldscope->u.count;
1212     }
1213 
1214     /*
1215      * Reset oldscope's lock state so that it is completely unlocked.
1216      */
1217     LOGIT(oldscope, '0');
1218     oldscope->u.count = 0;
1219     tl = &oldscope->lock;
1220     me = CX_THINLOCK_ID(cx);
1221     JS_UNLOCK0(tl, me);
1222 }
1223 
1224 void
js_LockObj(JSContext * cx,JSObject * obj)1225 js_LockObj(JSContext *cx, JSObject *obj)
1226 {
1227     JSScope *scope;
1228 
1229     JS_ASSERT(OBJ_IS_NATIVE(obj));
1230 
1231     /*
1232      * We must test whether the GC is calling and return without mutating any
1233      * state, especially cx->lockedSealedScope.  Note asymmetry with respect to
1234      * js_UnlockObj, which is a thin-layer on top of js_UnlockScope.
1235      */
1236     if (CX_THREAD_IS_RUNNING_GC(cx))
1237         return;
1238 
1239     for (;;) {
1240         scope = OBJ_SCOPE(obj);
1241         if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
1242             !cx->lockedSealedScope) {
1243             cx->lockedSealedScope = scope;
1244             return;
1245         }
1246 
1247         js_LockScope(cx, scope);
1248 
1249         /* If obj still has this scope, we're done. */
1250         if (scope == OBJ_SCOPE(obj))
1251             return;
1252 
1253         /* Lost a race with a mutator; retry with obj's new scope. */
1254         js_UnlockScope(cx, scope);
1255     }
1256 }
1257 
1258 void
js_UnlockObj(JSContext * cx,JSObject * obj)1259 js_UnlockObj(JSContext *cx, JSObject *obj)
1260 {
1261     JS_ASSERT(OBJ_IS_NATIVE(obj));
1262     js_UnlockScope(cx, OBJ_SCOPE(obj));
1263 }
1264 
1265 #ifdef DEBUG
1266 
1267 JSBool
js_IsRuntimeLocked(JSRuntime * rt)1268 js_IsRuntimeLocked(JSRuntime *rt)
1269 {
1270     return js_CurrentThreadId() == rt->rtLockOwner;
1271 }
1272 
1273 JSBool
js_IsObjLocked(JSContext * cx,JSObject * obj)1274 js_IsObjLocked(JSContext *cx, JSObject *obj)
1275 {
1276     JSScope *scope = OBJ_SCOPE(obj);
1277 
1278     return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);
1279 }
1280 
1281 JSBool
js_IsScopeLocked(JSContext * cx,JSScope * scope)1282 js_IsScopeLocked(JSContext *cx, JSScope *scope)
1283 {
1284     /* Special case: the GC locking any object's scope, see js_LockScope. */
1285     if (CX_THREAD_IS_RUNNING_GC(cx))
1286         return JS_TRUE;
1287 
1288     /* Special case: locked object owning a sealed scope, see js_LockObj. */
1289     if (cx->lockedSealedScope == scope)
1290         return JS_TRUE;
1291 
1292     /*
1293      * General case: the scope is either exclusively owned (by cx), or it has
1294      * a thin or fat lock to cope with shared (concurrent) ownership.
1295      */
1296     if (scope->ownercx) {
1297         JS_ASSERT(scope->ownercx == cx || scope->ownercx->thread == cx->thread);
1298         return JS_TRUE;
1299     }
1300     return js_CurrentThreadId() ==
1301            ((JSThread *)Thin_RemoveWait(ReadWord(scope->lock.owner)))->id;
1302 }
1303 
1304 #endif /* DEBUG */
1305 #endif /* JS_THREADSAFE */
1306