1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "nsThread.h"
8
9 #include "base/message_loop.h"
10 #include "base/platform_thread.h"
11
12 // Chromium's logging can sometimes leak through...
13 #ifdef LOG
14 # undef LOG
15 #endif
16
17 #include "mozilla/ReentrantMonitor.h"
18 #include "nsMemoryPressure.h"
19 #include "nsThreadManager.h"
20 #include "nsIClassInfoImpl.h"
21 #include "nsCOMPtr.h"
22 #include "nsQueryObject.h"
23 #include "pratom.h"
24 #include "mozilla/BackgroundHangMonitor.h"
25 #include "mozilla/CycleCollectedJSContext.h"
26 #include "mozilla/Logging.h"
27 #include "nsIObserverService.h"
28 #include "mozilla/IOInterposer.h"
29 #include "mozilla/ipc/MessageChannel.h"
30 #include "mozilla/ipc/BackgroundChild.h"
31 #include "mozilla/Preferences.h"
32 #include "mozilla/SchedulerGroup.h"
33 #include "mozilla/Services.h"
34 #include "nsXPCOMPrivate.h"
35 #include "mozilla/ChaosMode.h"
36 #include "mozilla/SchedulerGroup.h"
37 #include "mozilla/Telemetry.h"
38 #include "mozilla/TimeStamp.h"
39 #include "mozilla/Unused.h"
40 #include "mozilla/dom/ScriptSettings.h"
41 #include "nsThreadSyncDispatch.h"
42 #include "nsServiceManagerUtils.h"
43 #include "GeckoProfiler.h"
44 #ifdef MOZ_GECKO_PROFILER
45 # include "ProfilerMarkerPayload.h"
46 #endif
47 #include "InputEventStatistics.h"
48 #include "ThreadEventQueue.h"
49 #include "ThreadEventTarget.h"
50 #include "ThreadDelay.h"
51
52 #include <limits>
53
54 #ifdef XP_LINUX
55 # ifdef __GLIBC__
56 # include <gnu/libc-version.h>
57 # endif
58 # include <sys/mman.h>
59 # include <sys/time.h>
60 # include <sys/resource.h>
61 # include <sched.h>
62 # include <stdio.h>
63 #endif
64
65 #ifdef XP_WIN
66 # include "mozilla/DynamicallyLinkedFunctionPtr.h"
67
68 # include <winbase.h>
69
70 using GetCurrentThreadStackLimitsFn = void(WINAPI*)(PULONG_PTR LowLimit,
71 PULONG_PTR HighLimit);
72 #endif
73
74 #define HAVE_UALARM \
75 _BSD_SOURCE || \
76 (_XOPEN_SOURCE >= 500 || _XOPEN_SOURCE && _XOPEN_SOURCE_EXTENDED) && \
77 !(_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)
78
79 #if defined(XP_LINUX) && !defined(ANDROID) && defined(_GNU_SOURCE)
80 # define HAVE_SCHED_SETAFFINITY
81 #endif
82
83 #ifdef XP_MACOSX
84 # include <mach/mach.h>
85 # include <mach/thread_policy.h>
86 #endif
87
88 #ifdef MOZ_CANARY
89 # include <unistd.h>
90 # include <execinfo.h>
91 # include <signal.h>
92 # include <fcntl.h>
93 # include "nsXULAppAPI.h"
94 #endif
95
96 #if defined(NS_FUNCTION_TIMER) && defined(_MSC_VER)
97 # include "nsTimerImpl.h"
98 # include "mozilla/StackWalk.h"
99 #endif
100 #ifdef NS_FUNCTION_TIMER
101 # include "nsCRT.h"
102 #endif
103
104 #ifdef MOZ_TASK_TRACER
105 # include "GeckoTaskTracer.h"
106 # include "TracedTaskCommon.h"
107 using namespace mozilla::tasktracer;
108 #endif
109
110 using namespace mozilla;
111
112 extern void InitThreadLocalVariables();
113
114 static LazyLogModule sThreadLog("nsThread");
115 #ifdef LOG
116 # undef LOG
117 #endif
118 #define LOG(args) MOZ_LOG(sThreadLog, mozilla::LogLevel::Debug, args)
119
120 NS_DECL_CI_INTERFACE_GETTER(nsThread)
121
122 Array<char, nsThread::kRunnableNameBufSize> nsThread::sMainThreadRunnableName;
123
124 uint32_t nsThread::sActiveThreads;
125 uint32_t nsThread::sMaxActiveThreads;
126
127 #ifdef EARLY_BETA_OR_EARLIER
128 const uint32_t kTelemetryWakeupCountLimit = 100;
129 #endif
130
131 //-----------------------------------------------------------------------------
132 // Because we do not have our own nsIFactory, we have to implement nsIClassInfo
133 // somewhat manually.
134
135 class nsThreadClassInfo : public nsIClassInfo {
136 public:
137 NS_DECL_ISUPPORTS_INHERITED // no mRefCnt
138 NS_DECL_NSICLASSINFO
139
140 nsThreadClassInfo() = default;
141 };
142
NS_IMETHODIMP_(MozExternalRefCountType)143 NS_IMETHODIMP_(MozExternalRefCountType)
144 nsThreadClassInfo::AddRef() { return 2; }
NS_IMETHODIMP_(MozExternalRefCountType)145 NS_IMETHODIMP_(MozExternalRefCountType)
146 nsThreadClassInfo::Release() { return 1; }
NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo,nsIClassInfo)147 NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo, nsIClassInfo)
148
149 NS_IMETHODIMP
150 nsThreadClassInfo::GetInterfaces(nsTArray<nsIID>& aArray) {
151 return NS_CI_INTERFACE_GETTER_NAME(nsThread)(aArray);
152 }
153
154 NS_IMETHODIMP
GetScriptableHelper(nsIXPCScriptable ** aResult)155 nsThreadClassInfo::GetScriptableHelper(nsIXPCScriptable** aResult) {
156 *aResult = nullptr;
157 return NS_OK;
158 }
159
160 NS_IMETHODIMP
GetContractID(nsACString & aResult)161 nsThreadClassInfo::GetContractID(nsACString& aResult) {
162 aResult.SetIsVoid(true);
163 return NS_OK;
164 }
165
166 NS_IMETHODIMP
GetClassDescription(nsACString & aResult)167 nsThreadClassInfo::GetClassDescription(nsACString& aResult) {
168 aResult.SetIsVoid(true);
169 return NS_OK;
170 }
171
172 NS_IMETHODIMP
GetClassID(nsCID ** aResult)173 nsThreadClassInfo::GetClassID(nsCID** aResult) {
174 *aResult = nullptr;
175 return NS_OK;
176 }
177
178 NS_IMETHODIMP
GetFlags(uint32_t * aResult)179 nsThreadClassInfo::GetFlags(uint32_t* aResult) {
180 *aResult = THREADSAFE;
181 return NS_OK;
182 }
183
184 NS_IMETHODIMP
GetClassIDNoAlloc(nsCID * aResult)185 nsThreadClassInfo::GetClassIDNoAlloc(nsCID* aResult) {
186 return NS_ERROR_NOT_AVAILABLE;
187 }
188
189 //-----------------------------------------------------------------------------
190
191 NS_IMPL_ADDREF(nsThread)
192 NS_IMPL_RELEASE(nsThread)
193 NS_INTERFACE_MAP_BEGIN(nsThread)
194 NS_INTERFACE_MAP_ENTRY(nsIThread)
195 NS_INTERFACE_MAP_ENTRY(nsIThreadInternal)
196 NS_INTERFACE_MAP_ENTRY(nsIEventTarget)
197 NS_INTERFACE_MAP_ENTRY(nsISerialEventTarget)
198 NS_INTERFACE_MAP_ENTRY(nsISupportsPriority)
199 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIThread)
200 if (aIID.Equals(NS_GET_IID(nsIClassInfo))) {
201 static nsThreadClassInfo sThreadClassInfo;
202 foundInterface = static_cast<nsIClassInfo*>(&sThreadClassInfo);
203 } else
204 NS_INTERFACE_MAP_END
205 NS_IMPL_CI_INTERFACE_GETTER(nsThread, nsIThread, nsIThreadInternal,
206 nsIEventTarget, nsISupportsPriority)
207
208 //-----------------------------------------------------------------------------
209
210 class nsThreadStartupEvent final : public Runnable {
211 public:
nsThreadStartupEvent()212 nsThreadStartupEvent()
213 : Runnable("nsThreadStartupEvent"),
214 mMon("nsThreadStartupEvent.mMon"),
215 mInitialized(false) {}
216
217 // This method does not return until the thread startup object is in the
218 // completion state.
Wait()219 void Wait() {
220 ReentrantMonitorAutoEnter mon(mMon);
221 while (!mInitialized) {
222 mon.Wait();
223 }
224 }
225
226 private:
227 ~nsThreadStartupEvent() = default;
228
Run()229 NS_IMETHOD Run() override {
230 ReentrantMonitorAutoEnter mon(mMon);
231 mInitialized = true;
232 mon.Notify();
233 return NS_OK;
234 }
235
236 ReentrantMonitor mMon;
237 bool mInitialized;
238 };
239 //-----------------------------------------------------------------------------
240
Equals(const ShutdownContexts::elem_type & a,const ShutdownContexts::elem_type::Pointer b) const241 bool nsThread::ShutdownContextsComp::Equals(
242 const ShutdownContexts::elem_type& a,
243 const ShutdownContexts::elem_type::Pointer b) const {
244 return a.get() == b;
245 }
246
247 // This event is responsible for notifying nsThread::Shutdown that it is time
248 // to call PR_JoinThread. It implements nsICancelableRunnable so that it can
249 // run on a DOM Worker thread (where all events must implement
250 // nsICancelableRunnable.)
251 class nsThreadShutdownAckEvent : public CancelableRunnable {
252 public:
nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext * > aCtx)253 explicit nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext*> aCtx)
254 : CancelableRunnable("nsThreadShutdownAckEvent"),
255 mShutdownContext(aCtx) {}
Run()256 NS_IMETHOD Run() override {
257 mShutdownContext->mTerminatingThread->ShutdownComplete(mShutdownContext);
258 return NS_OK;
259 }
Cancel()260 nsresult Cancel() override { return Run(); }
261
262 private:
263 virtual ~nsThreadShutdownAckEvent() = default;
264
265 NotNull<nsThreadShutdownContext*> mShutdownContext;
266 };
267
268 // This event is responsible for setting mShutdownContext
269 class nsThreadShutdownEvent : public Runnable {
270 public:
nsThreadShutdownEvent(NotNull<nsThread * > aThr,NotNull<nsThreadShutdownContext * > aCtx)271 nsThreadShutdownEvent(NotNull<nsThread*> aThr,
272 NotNull<nsThreadShutdownContext*> aCtx)
273 : Runnable("nsThreadShutdownEvent"),
274 mThread(aThr),
275 mShutdownContext(aCtx) {}
Run()276 NS_IMETHOD Run() override {
277 mThread->mShutdownContext = mShutdownContext;
278 MessageLoop::current()->Quit();
279 return NS_OK;
280 }
281
282 private:
283 NotNull<RefPtr<nsThread>> mThread;
284 NotNull<nsThreadShutdownContext*> mShutdownContext;
285 };
286
287 //-----------------------------------------------------------------------------
288
SetThreadAffinity(unsigned int cpu)289 static void SetThreadAffinity(unsigned int cpu) {
290 #ifdef HAVE_SCHED_SETAFFINITY
291 cpu_set_t cpus;
292 CPU_ZERO(&cpus);
293 CPU_SET(cpu, &cpus);
294 sched_setaffinity(0, sizeof(cpus), &cpus);
295 // Don't assert sched_setaffinity's return value because it intermittently (?)
296 // fails with EINVAL on Linux x64 try runs.
297 #elif defined(XP_MACOSX)
298 // OS X does not provide APIs to pin threads to specific processors, but you
299 // can tag threads as belonging to the same "affinity set" and the OS will try
300 // to run them on the same processor. To run threads on different processors,
301 // tag them as belonging to different affinity sets. Tag 0, the default, means
302 // "no affinity" so let's pretend each CPU has its own tag `cpu+1`.
303 thread_affinity_policy_data_t policy;
304 policy.affinity_tag = cpu + 1;
305 MOZ_ALWAYS_TRUE(thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
306 &policy.affinity_tag, 1) == KERN_SUCCESS);
307 #elif defined(XP_WIN)
308 MOZ_ALWAYS_TRUE(SetThreadIdealProcessor(GetCurrentThread(), cpu) !=
309 (DWORD)-1);
310 #endif
311 }
312
SetupCurrentThreadForChaosMode()313 static void SetupCurrentThreadForChaosMode() {
314 if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
315 return;
316 }
317
318 #ifdef XP_LINUX
319 // PR_SetThreadPriority doesn't really work since priorities >
320 // PR_PRIORITY_NORMAL can't be set by non-root users. Instead we'll just use
321 // setpriority(2) to set random 'nice values'. In regular Linux this is only
322 // a dynamic adjustment so it still doesn't really do what we want, but tools
323 // like 'rr' can be more aggressive about honoring these values.
324 // Some of these calls may fail due to trying to lower the priority
325 // (e.g. something may have already called setpriority() for this thread).
326 // This makes it hard to have non-main threads with higher priority than the
327 // main thread, but that's hard to fix. Tools like rr can choose to honor the
328 // requested values anyway.
329 // Use just 4 priorities so there's a reasonable chance of any two threads
330 // having equal priority.
331 setpriority(PRIO_PROCESS, 0, ChaosMode::randomUint32LessThan(4));
332 #else
333 // We should set the affinity here but NSPR doesn't provide a way to expose
334 // it.
335 uint32_t priority = ChaosMode::randomUint32LessThan(PR_PRIORITY_LAST + 1);
336 PR_SetThreadPriority(PR_GetCurrentThread(), PRThreadPriority(priority));
337 #endif
338
339 // Force half the threads to CPU 0 so they compete for CPU
340 if (ChaosMode::randomUint32LessThan(2)) {
341 SetThreadAffinity(0);
342 }
343 }
344
345 namespace {
346
347 struct ThreadInitData {
348 nsThread* thread;
349 const nsACString& name;
350 };
351
352 } // namespace
353
ThreadListMutex()354 /* static */ mozilla::OffTheBooksMutex& nsThread::ThreadListMutex() {
355 static OffTheBooksMutex sMutex("nsThread::ThreadListMutex");
356 return sMutex;
357 }
358
ThreadList()359 /* static */ LinkedList<nsThread>& nsThread::ThreadList() {
360 static LinkedList<nsThread> sList;
361 return sList;
362 }
363
364 /* static */
ClearThreadList()365 void nsThread::ClearThreadList() {
366 OffTheBooksMutexAutoLock mal(ThreadListMutex());
367 while (ThreadList().popFirst()) {
368 }
369 }
370
371 /* static */
Enumerate()372 nsThreadEnumerator nsThread::Enumerate() { return {}; }
373
374 /* static */
MaxActiveThreads()375 uint32_t nsThread::MaxActiveThreads() {
376 OffTheBooksMutexAutoLock mal(ThreadListMutex());
377 return sMaxActiveThreads;
378 }
379
AddToThreadList()380 void nsThread::AddToThreadList() {
381 OffTheBooksMutexAutoLock mal(ThreadListMutex());
382 MOZ_ASSERT(!isInList());
383
384 sActiveThreads++;
385 sMaxActiveThreads = std::max(sActiveThreads, sMaxActiveThreads);
386
387 ThreadList().insertBack(this);
388 }
389
MaybeRemoveFromThreadList()390 void nsThread::MaybeRemoveFromThreadList() {
391 OffTheBooksMutexAutoLock mal(ThreadListMutex());
392 if (isInList()) {
393 sActiveThreads--;
394 removeFrom(ThreadList());
395 }
396 }
397
398 /*static*/
ThreadFunc(void * aArg)399 void nsThread::ThreadFunc(void* aArg) {
400 using mozilla::ipc::BackgroundChild;
401
402 ThreadInitData* initData = static_cast<ThreadInitData*>(aArg);
403 nsThread* self = initData->thread; // strong reference
404
405 MOZ_ASSERT(self->mEventTarget);
406 MOZ_ASSERT(self->mEvents);
407
408 self->mThread = PR_GetCurrentThread();
409 self->mEventTarget->SetCurrentThread();
410 SetupCurrentThreadForChaosMode();
411
412 if (!initData->name.IsEmpty()) {
413 NS_SetCurrentThreadName(initData->name.BeginReading());
414 }
415
416 self->InitCommon();
417
418 // Inform the ThreadManager
419 nsThreadManager::get().RegisterCurrentThread(*self);
420
421 mozilla::IOInterposer::RegisterCurrentThread();
422
423 // This must come after the call to nsThreadManager::RegisterCurrentThread(),
424 // because that call is needed to properly set up this thread as an nsThread,
425 // which profiler_register_thread() requires. See bug 1347007.
426 if (!initData->name.IsEmpty()) {
427 PROFILER_REGISTER_THREAD(initData->name.BeginReading());
428 }
429
430 // Wait for and process startup event
431 nsCOMPtr<nsIRunnable> event = self->mEvents->GetEvent(true, nullptr);
432 MOZ_ASSERT(event);
433
434 initData = nullptr; // clear before unblocking nsThread::Init
435
436 event->Run(); // unblocks nsThread::Init
437 event = nullptr;
438
439 {
440 // Scope for MessageLoop.
441 MessageLoop loop(MessageLoop::TYPE_MOZILLA_NONMAINTHREAD, self);
442
443 // Now, process incoming events...
444 loop.Run();
445
446 BackgroundChild::CloseForCurrentThread();
447
448 // NB: The main thread does not shut down here! It shuts down via
449 // nsThreadManager::Shutdown.
450
451 // Do NS_ProcessPendingEvents but with special handling to set
452 // mEventsAreDoomed atomically with the removal of the last event. The key
453 // invariant here is that we will never permit PutEvent to succeed if the
454 // event would be left in the queue after our final call to
455 // NS_ProcessPendingEvents. We also have to keep processing events as long
456 // as we have outstanding mRequestedShutdownContexts.
457 while (true) {
458 // Check and see if we're waiting on any threads.
459 self->WaitForAllAsynchronousShutdowns();
460
461 if (self->mEvents->ShutdownIfNoPendingEvents()) {
462 break;
463 }
464 NS_ProcessPendingEvents(self);
465 }
466 }
467
468 mozilla::IOInterposer::UnregisterCurrentThread();
469
470 // Inform the threadmanager that this thread is going away
471 nsThreadManager::get().UnregisterCurrentThread(*self);
472
473 PROFILER_UNREGISTER_THREAD();
474
475 // Dispatch shutdown ACK
476 NotNull<nsThreadShutdownContext*> context =
477 WrapNotNull(self->mShutdownContext);
478 MOZ_ASSERT(context->mTerminatingThread == self);
479 event = do_QueryObject(new nsThreadShutdownAckEvent(context));
480 if (context->mIsMainThreadJoining) {
481 SchedulerGroup::Dispatch(TaskCategory::Other, event.forget());
482 } else {
483 context->mJoiningThread->Dispatch(event, NS_DISPATCH_NORMAL);
484 }
485
486 // Release any observer of the thread here.
487 self->SetObserver(nullptr);
488
489 #ifdef MOZ_TASK_TRACER
490 FreeTraceInfo();
491 #endif
492
493 // The PRThread will be deleted in PR_JoinThread(), so clear references.
494 self->mThread = nullptr;
495 self->mEventTarget->ClearCurrentThread();
496 NS_RELEASE(self);
497 }
498
InitCommon()499 void nsThread::InitCommon() {
500 mThreadId = uint32_t(PlatformThread::CurrentId());
501
502 {
503 #if defined(XP_LINUX)
504 pthread_attr_t attr;
505 pthread_attr_init(&attr);
506 pthread_getattr_np(pthread_self(), &attr);
507
508 size_t stackSize;
509 pthread_attr_getstack(&attr, &mStackBase, &stackSize);
510
511 // Glibc prior to 2.27 reports the stack size and base including the guard
512 // region, so we need to compensate for it to get accurate accounting.
513 // Also, this behavior difference isn't guarded by a versioned symbol, so we
514 // actually need to check the runtime glibc version, not the version we were
515 // compiled against.
516 static bool sAdjustForGuardSize = ({
517 # ifdef __GLIBC__
518 unsigned major, minor;
519 sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
520 major < 2 || (major == 2 && minor < 27);
521 # else
522 false;
523 # endif
524 });
525 if (sAdjustForGuardSize) {
526 size_t guardSize;
527 pthread_attr_getguardsize(&attr, &guardSize);
528
529 // Note: This assumes that the stack grows down, as is the case on all of
530 // our tier 1 platforms. On platforms where the stack grows up, the
531 // mStackBase adjustment is unnecessary, but doesn't cause any harm other
532 // than under-counting stack memory usage by one page.
533 mStackBase = reinterpret_cast<char*>(mStackBase) + guardSize;
534 stackSize -= guardSize;
535 }
536
537 mStackSize = stackSize;
538
539 // This is a bit of a hack.
540 //
541 // We really do want the NOHUGEPAGE flag on our thread stacks, since we
542 // don't expect any of them to need anywhere near 2MB of space. But setting
543 // it here is too late to have an effect, since the first stack page has
544 // already been faulted in existence, and NSPR doesn't give us a way to set
545 // it beforehand.
546 //
547 // What this does get us, however, is a different set of VM flags on our
548 // thread stacks compared to normal heap memory. Which makes the Linux
549 // kernel report them as separate regions, even when they are adjacent to
550 // heap memory. This allows us to accurately track the actual memory
551 // consumption of our allocated stacks.
552 madvise(mStackBase, stackSize, MADV_NOHUGEPAGE);
553
554 pthread_attr_destroy(&attr);
555 #elif defined(XP_WIN)
556 static const StaticDynamicallyLinkedFunctionPtr<
557 GetCurrentThreadStackLimitsFn>
558 sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
559
560 if (sGetStackLimits) {
561 ULONG_PTR stackBottom, stackTop;
562 sGetStackLimits(&stackBottom, &stackTop);
563 mStackBase = reinterpret_cast<void*>(stackBottom);
564 mStackSize = stackTop - stackBottom;
565 }
566 #endif
567 }
568
569 InitThreadLocalVariables();
570 AddToThreadList();
571 }
572
573 //-----------------------------------------------------------------------------
574
575 #ifdef MOZ_CANARY
576 int sCanaryOutputFD = -1;
577 #endif
578
nsThread(NotNull<SynchronizedEventQueue * > aQueue,MainThreadFlag aMainThread,uint32_t aStackSize)579 nsThread::nsThread(NotNull<SynchronizedEventQueue*> aQueue,
580 MainThreadFlag aMainThread, uint32_t aStackSize)
581 : mEvents(aQueue.get()),
582 mEventTarget(
583 new ThreadEventTarget(mEvents.get(), aMainThread == MAIN_THREAD)),
584 mShutdownContext(nullptr),
585 mScriptObserver(nullptr),
586 mStackSize(aStackSize),
587 mNestedEventLoopDepth(0),
588 mShutdownRequired(false),
589 mPriority(PRIORITY_NORMAL),
590 mIsMainThread(aMainThread == MAIN_THREAD),
591 mUseHangMonitor(aMainThread == MAIN_THREAD),
592 mIsAPoolThreadFree(nullptr),
593 mCanInvokeJS(false),
594 #ifdef EARLY_BETA_OR_EARLIER
595 mLastWakeupCheckTime(TimeStamp::Now()),
596 #endif
597 mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
598 }
599
nsThread()600 nsThread::nsThread()
601 : mEvents(nullptr),
602 mEventTarget(nullptr),
603 mShutdownContext(nullptr),
604 mScriptObserver(nullptr),
605 mStackSize(0),
606 mNestedEventLoopDepth(0),
607 mShutdownRequired(false),
608 mPriority(PRIORITY_NORMAL),
609 mIsMainThread(false),
610 mUseHangMonitor(false),
611 mCanInvokeJS(false),
612 #ifdef EARLY_BETA_OR_EARLIER
613 mLastWakeupCheckTime(TimeStamp::Now()),
614 #endif
615 mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
616 MOZ_ASSERT(!NS_IsMainThread());
617 }
618
~nsThread()619 nsThread::~nsThread() {
620 NS_ASSERTION(mRequestedShutdownContexts.IsEmpty(),
621 "shouldn't be waiting on other threads to shutdown");
622
623 MaybeRemoveFromThreadList();
624
625 #ifdef DEBUG
626 // We deliberately leak these so they can be tracked by the leak checker.
627 // If you're having nsThreadShutdownContext leaks, you can set:
628 // XPCOM_MEM_LOG_CLASSES=nsThreadShutdownContext
629 // during a test run and that will at least tell you what thread is
630 // requesting shutdown on another, which can be helpful for diagnosing
631 // the leak.
632 for (size_t i = 0; i < mRequestedShutdownContexts.Length(); ++i) {
633 Unused << mRequestedShutdownContexts[i].release();
634 }
635 #endif
636 }
637
Init(const nsACString & aName)638 nsresult nsThread::Init(const nsACString& aName) {
639 MOZ_ASSERT(mEvents);
640 MOZ_ASSERT(mEventTarget);
641
642 // spawn thread and wait until it is fully setup
643 RefPtr<nsThreadStartupEvent> startup = new nsThreadStartupEvent();
644
645 NS_ADDREF_THIS();
646
647 mShutdownRequired = true;
648
649 ThreadInitData initData = {this, aName};
650
651 // ThreadFunc is responsible for setting mThread
652 if (!PR_CreateThread(PR_USER_THREAD, ThreadFunc, &initData,
653 PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD,
654 mStackSize)) {
655 NS_RELEASE_THIS();
656 return NS_ERROR_OUT_OF_MEMORY;
657 }
658
659 // ThreadFunc will wait for this event to be run before it tries to access
660 // mThread. By delaying insertion of this event into the queue, we ensure
661 // that mThread is set properly.
662 {
663 mEvents->PutEvent(do_AddRef(startup),
664 EventQueuePriority::Normal); // retain a reference
665 }
666
667 // Wait for thread to call ThreadManager::SetupCurrentThread, which completes
668 // initialization of ThreadFunc.
669 startup->Wait();
670 return NS_OK;
671 }
672
InitCurrentThread()673 nsresult nsThread::InitCurrentThread() {
674 mThread = PR_GetCurrentThread();
675 SetupCurrentThreadForChaosMode();
676 InitCommon();
677
678 nsThreadManager::get().RegisterCurrentThread(*this);
679 return NS_OK;
680 }
681
682 //-----------------------------------------------------------------------------
683 // nsIEventTarget
684
685 NS_IMETHODIMP
DispatchFromScript(nsIRunnable * aEvent,uint32_t aFlags)686 nsThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
687 MOZ_ASSERT(mEventTarget);
688 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
689
690 nsCOMPtr<nsIRunnable> event(aEvent);
691 return mEventTarget->Dispatch(event.forget(), aFlags);
692 }
693
694 NS_IMETHODIMP
Dispatch(already_AddRefed<nsIRunnable> aEvent,uint32_t aFlags)695 nsThread::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
696 MOZ_ASSERT(mEventTarget);
697 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
698
699 LOG(("THRD(%p) Dispatch [%p %x]\n", this, /* XXX aEvent */ nullptr, aFlags));
700
701 return mEventTarget->Dispatch(std::move(aEvent), aFlags);
702 }
703
704 NS_IMETHODIMP
DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,uint32_t aDelayMs)705 nsThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
706 uint32_t aDelayMs) {
707 MOZ_ASSERT(mEventTarget);
708 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
709
710 return mEventTarget->DelayedDispatch(std::move(aEvent), aDelayMs);
711 }
712
713 NS_IMETHODIMP
GetRunningEventDelay(TimeDuration * aDelay,TimeStamp * aStart)714 nsThread::GetRunningEventDelay(TimeDuration* aDelay, TimeStamp* aStart) {
715 if (mIsAPoolThreadFree && *mIsAPoolThreadFree) {
716 // if there are unstarted threads in the pool, a new event to the
717 // pool would not be delayed at all (beyond thread start time)
718 *aDelay = TimeDuration();
719 *aStart = TimeStamp();
720 } else {
721 *aDelay = mLastEventDelay;
722 *aStart = mLastEventStart;
723 }
724 return NS_OK;
725 }
726
727 NS_IMETHODIMP
SetRunningEventDelay(TimeDuration aDelay,TimeStamp aStart)728 nsThread::SetRunningEventDelay(TimeDuration aDelay, TimeStamp aStart) {
729 mLastEventDelay = aDelay;
730 mLastEventStart = aStart;
731 return NS_OK;
732 }
733
734 NS_IMETHODIMP
IsOnCurrentThread(bool * aResult)735 nsThread::IsOnCurrentThread(bool* aResult) {
736 if (mEventTarget) {
737 return mEventTarget->IsOnCurrentThread(aResult);
738 }
739 *aResult = PR_GetCurrentThread() == mThread;
740 return NS_OK;
741 }
742
NS_IMETHODIMP_(bool)743 NS_IMETHODIMP_(bool)
744 nsThread::IsOnCurrentThreadInfallible() {
745 // This method is only going to be called if `mThread` is null, which
746 // only happens when the thread has exited the event loop. Therefore, when
747 // we are called, we can never be on this thread.
748 return false;
749 }
750
751 //-----------------------------------------------------------------------------
752 // nsIThread
753
754 NS_IMETHODIMP
GetPRThread(PRThread ** aResult)755 nsThread::GetPRThread(PRThread** aResult) {
756 PRThread* thread = mThread; // atomic load
757 *aResult = thread;
758 return thread ? NS_OK : NS_ERROR_NOT_AVAILABLE;
759 }
760
761 NS_IMETHODIMP
GetCanInvokeJS(bool * aResult)762 nsThread::GetCanInvokeJS(bool* aResult) {
763 *aResult = mCanInvokeJS;
764 return NS_OK;
765 }
766
767 NS_IMETHODIMP
SetCanInvokeJS(bool aCanInvokeJS)768 nsThread::SetCanInvokeJS(bool aCanInvokeJS) {
769 mCanInvokeJS = aCanInvokeJS;
770 return NS_OK;
771 }
772
773 NS_IMETHODIMP
GetLastLongTaskEnd(TimeStamp * _retval)774 nsThread::GetLastLongTaskEnd(TimeStamp* _retval) {
775 *_retval = mPerformanceCounterState.LastLongTaskEnd();
776 return NS_OK;
777 }
778
779 NS_IMETHODIMP
GetLastLongNonIdleTaskEnd(TimeStamp * _retval)780 nsThread::GetLastLongNonIdleTaskEnd(TimeStamp* _retval) {
781 *_retval = mPerformanceCounterState.LastLongNonIdleTaskEnd();
782 return NS_OK;
783 }
784
785 NS_IMETHODIMP
SetNameForWakeupTelemetry(const nsACString & aName)786 nsThread::SetNameForWakeupTelemetry(const nsACString& aName) {
787 #ifdef EARLY_BETA_OR_EARLIER
788 mNameForWakeupTelemetry = aName;
789 #endif
790 return NS_OK;
791 }
792
793 NS_IMETHODIMP
AsyncShutdown()794 nsThread::AsyncShutdown() {
795 LOG(("THRD(%p) async shutdown\n", this));
796
797 ShutdownInternal(/* aSync = */ false);
798 return NS_OK;
799 }
800
ShutdownInternal(bool aSync)801 nsThreadShutdownContext* nsThread::ShutdownInternal(bool aSync) {
802 MOZ_ASSERT(mEvents);
803 MOZ_ASSERT(mEventTarget);
804 MOZ_ASSERT(mThread != PR_GetCurrentThread());
805 if (NS_WARN_IF(mThread == PR_GetCurrentThread())) {
806 return nullptr;
807 }
808
809 // Prevent multiple calls to this method.
810 if (!mShutdownRequired.compareExchange(true, false)) {
811 return nullptr;
812 }
813 MOZ_ASSERT(mThread);
814
815 MaybeRemoveFromThreadList();
816
817 NotNull<nsThread*> currentThread =
818 WrapNotNull(nsThreadManager::get().GetCurrentThread());
819
820 MOZ_DIAGNOSTIC_ASSERT(currentThread->EventQueue(),
821 "Shutdown() may only be called from an XPCOM thread");
822
823 // Allocate a shutdown context and store a strong ref.
824 auto context =
825 new nsThreadShutdownContext(WrapNotNull(this), currentThread, aSync);
826 Unused << *currentThread->mRequestedShutdownContexts.EmplaceBack(context);
827
828 // Set mShutdownContext and wake up the thread in case it is waiting for
829 // events to process.
830 nsCOMPtr<nsIRunnable> event =
831 new nsThreadShutdownEvent(WrapNotNull(this), WrapNotNull(context));
832 // XXXroc What if posting the event fails due to OOM?
833 mEvents->PutEvent(event.forget(), EventQueuePriority::Normal);
834
835 // We could still end up with other events being added after the shutdown
836 // task, but that's okay because we process pending events in ThreadFunc
837 // after setting mShutdownContext just before exiting.
838 return context;
839 }
840
ShutdownComplete(NotNull<nsThreadShutdownContext * > aContext)841 void nsThread::ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext) {
842 MOZ_ASSERT(mEvents);
843 MOZ_ASSERT(mEventTarget);
844 MOZ_ASSERT(aContext->mTerminatingThread == this);
845
846 MaybeRemoveFromThreadList();
847
848 if (aContext->mAwaitingShutdownAck) {
849 // We're in a synchronous shutdown, so tell whatever is up the stack that
850 // we're done and unwind the stack so it can call us again.
851 aContext->mAwaitingShutdownAck = false;
852 return;
853 }
854
855 // Now, it should be safe to join without fear of dead-locking.
856 PR_JoinThread(aContext->mTerminatingPRThread);
857 MOZ_ASSERT(!mThread);
858
859 #ifdef DEBUG
860 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
861 MOZ_ASSERT(!obs, "Should have been cleared at shutdown!");
862 #endif
863
864 // Delete aContext.
865 // aContext might not be in mRequestedShutdownContexts if it belongs to a
866 // thread that was leaked by calling nsIThreadPool::ShutdownWithTimeout.
867 aContext->mJoiningThread->mRequestedShutdownContexts.RemoveElement(
868 aContext, ShutdownContextsComp{});
869 }
870
WaitForAllAsynchronousShutdowns()871 void nsThread::WaitForAllAsynchronousShutdowns() {
872 // This is the motivating example for why SpinEventLoop has the template
873 // parameter we are providing here.
874 SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
875 [&]() { return mRequestedShutdownContexts.IsEmpty(); }, this);
876 }
877
878 NS_IMETHODIMP
Shutdown()879 nsThread::Shutdown() {
880 LOG(("THRD(%p) sync shutdown\n", this));
881
882 nsThreadShutdownContext* maybeContext = ShutdownInternal(/* aSync = */ true);
883 if (!maybeContext) {
884 return NS_OK; // The thread has already shut down.
885 }
886
887 NotNull<nsThreadShutdownContext*> context = WrapNotNull(maybeContext);
888
889 // Process events on the current thread until we receive a shutdown ACK.
890 // Allows waiting; ensure no locks are held that would deadlock us!
891 SpinEventLoopUntil([&, context]() { return !context->mAwaitingShutdownAck; },
892 context->mJoiningThread);
893
894 ShutdownComplete(context);
895
896 return NS_OK;
897 }
898
899 NS_IMETHODIMP
HasPendingEvents(bool * aResult)900 nsThread::HasPendingEvents(bool* aResult) {
901 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
902 return NS_ERROR_NOT_SAME_THREAD;
903 }
904
905 *aResult = mEvents->HasPendingEvent();
906 return NS_OK;
907 }
908
909 NS_IMETHODIMP
HasPendingHighPriorityEvents(bool * aResult)910 nsThread::HasPendingHighPriorityEvents(bool* aResult) {
911 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
912 return NS_ERROR_NOT_SAME_THREAD;
913 }
914
915 *aResult = mEvents->HasPendingHighPriorityEvents();
916 return NS_OK;
917 }
918
919 NS_IMETHODIMP
DispatchToQueue(already_AddRefed<nsIRunnable> aEvent,EventQueuePriority aQueue)920 nsThread::DispatchToQueue(already_AddRefed<nsIRunnable> aEvent,
921 EventQueuePriority aQueue) {
922 nsCOMPtr<nsIRunnable> event = aEvent;
923
924 if (NS_WARN_IF(!event)) {
925 return NS_ERROR_INVALID_ARG;
926 }
927
928 if (!mEvents->PutEvent(event.forget(), aQueue)) {
929 NS_WARNING(
930 "An idle event was posted to a thread that will never run it "
931 "(rejected)");
932 return NS_ERROR_UNEXPECTED;
933 }
934
935 return NS_OK;
936 }
937
938 #ifdef MOZ_CANARY
939 void canary_alarm_handler(int signum);
940
941 class Canary {
942 // XXX ToDo: support nested loops
943 public:
Canary()944 Canary() {
945 if (sCanaryOutputFD > 0 && EventLatencyIsImportant()) {
946 signal(SIGALRM, canary_alarm_handler);
947 ualarm(15000, 0);
948 }
949 }
950
~Canary()951 ~Canary() {
952 if (sCanaryOutputFD != 0 && EventLatencyIsImportant()) {
953 ualarm(0, 0);
954 }
955 }
956
EventLatencyIsImportant()957 static bool EventLatencyIsImportant() {
958 return NS_IsMainThread() && XRE_IsParentProcess();
959 }
960 };
961
canary_alarm_handler(int signum)962 void canary_alarm_handler(int signum) {
963 void* array[30];
964 const char msg[29] = "event took too long to run:\n";
965 // use write to be safe in the signal handler
966 write(sCanaryOutputFD, msg, sizeof(msg));
967 backtrace_symbols_fd(array, backtrace(array, 30), sCanaryOutputFD);
968 }
969
970 #endif
971
972 #define NOTIFY_EVENT_OBSERVERS(observers_, func_, params_) \
973 do { \
974 if (!observers_.IsEmpty()) { \
975 nsTObserverArray<nsCOMPtr<nsIThreadObserver>>::ForwardIterator iter_( \
976 observers_); \
977 nsCOMPtr<nsIThreadObserver> obs_; \
978 while (iter_.HasMore()) { \
979 obs_ = iter_.GetNext(); \
980 obs_->func_ params_; \
981 } \
982 } \
983 } while (0)
984
985 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
GetLabeledRunnableName(nsIRunnable * aEvent,nsACString & aName,EventQueuePriority aPriority)986 static bool GetLabeledRunnableName(nsIRunnable* aEvent, nsACString& aName,
987 EventQueuePriority aPriority) {
988 bool labeled = false;
989 if (RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent)) {
990 labeled = true;
991 MOZ_ALWAYS_TRUE(NS_SUCCEEDED(groupRunnable->GetName(aName)));
992 } else if (nsCOMPtr<nsINamed> named = do_QueryInterface(aEvent)) {
993 MOZ_ALWAYS_TRUE(NS_SUCCEEDED(named->GetName(aName)));
994 } else {
995 aName.AssignLiteral("non-nsINamed runnable");
996 }
997 if (aName.IsEmpty()) {
998 aName.AssignLiteral("anonymous runnable");
999 }
1000
1001 if (!labeled && aPriority > EventQueuePriority::Input) {
1002 aName.AppendLiteral("(unlabeled)");
1003 }
1004
1005 return labeled;
1006 }
1007 #endif
1008
GetPerformanceCounter(nsIRunnable * aEvent) const1009 mozilla::PerformanceCounter* nsThread::GetPerformanceCounter(
1010 nsIRunnable* aEvent) const {
1011 RefPtr<SchedulerGroup::Runnable> docRunnable = do_QueryObject(aEvent);
1012 if (docRunnable) {
1013 mozilla::dom::DocGroup* docGroup = docRunnable->DocGroup();
1014 if (docGroup) {
1015 return docGroup->GetPerformanceCounter();
1016 }
1017 }
1018 return nullptr;
1019 }
1020
ShallowSizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const1021 size_t nsThread::ShallowSizeOfIncludingThis(
1022 mozilla::MallocSizeOf aMallocSizeOf) const {
1023 size_t n = 0;
1024 if (mShutdownContext) {
1025 n += aMallocSizeOf(mShutdownContext);
1026 }
1027 n += mRequestedShutdownContexts.ShallowSizeOfExcludingThis(aMallocSizeOf);
1028 return aMallocSizeOf(this) + aMallocSizeOf(mThread) + n;
1029 }
1030
SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const1031 size_t nsThread::SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const {
1032 size_t n = 0;
1033 if (mEventTarget) {
1034 // The size of mEvents is reported by mEventTarget.
1035 n += mEventTarget->SizeOfIncludingThis(aMallocSizeOf);
1036 }
1037 return n;
1038 }
1039
SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const1040 size_t nsThread::SizeOfIncludingThis(
1041 mozilla::MallocSizeOf aMallocSizeOf) const {
1042 return ShallowSizeOfIncludingThis(aMallocSizeOf) +
1043 SizeOfEventQueues(aMallocSizeOf);
1044 }
1045
1046 NS_IMETHODIMP
ProcessNextEvent(bool aMayWait,bool * aResult)1047 nsThread::ProcessNextEvent(bool aMayWait, bool* aResult) {
1048 MOZ_ASSERT(mEvents);
1049 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1050
1051 LOG(("THRD(%p) ProcessNextEvent [%u %u]\n", this, aMayWait,
1052 mNestedEventLoopDepth));
1053
1054 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1055 return NS_ERROR_NOT_SAME_THREAD;
1056 }
1057
1058 // The toplevel event loop normally blocks waiting for the next event, but
1059 // if we're trying to shut this thread down, we must exit the event loop when
1060 // the event queue is empty.
1061 // This only applys to the toplevel event loop! Nested event loops (e.g.
1062 // during sync dispatch) are waiting for some state change and must be able
1063 // to block even if something has requested shutdown of the thread. Otherwise
1064 // we'll just busywait as we endlessly look for an event, fail to find one,
1065 // and repeat the nested event loop since its state change hasn't happened
1066 // yet.
1067 bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown());
1068
1069 if (mIsInLocalExecutionMode) {
1070 EventQueuePriority priority;
1071 if (nsCOMPtr<nsIRunnable> event =
1072 mEvents->GetEvent(reallyWait, &priority)) {
1073 *aResult = true;
1074 LogRunnable::Run log(event);
1075 event->Run();
1076 event = nullptr;
1077 } else {
1078 *aResult = false;
1079 }
1080 return NS_OK;
1081 }
1082
1083 Maybe<dom::AutoNoJSAPI> noJSAPI;
1084
1085 if (mUseHangMonitor && reallyWait) {
1086 BackgroundHangMonitor().NotifyWait();
1087 }
1088
1089 if (mIsMainThread) {
1090 DoMainThreadSpecificProcessing();
1091 }
1092
1093 ++mNestedEventLoopDepth;
1094
1095 // We only want to create an AutoNoJSAPI on threads that actually do DOM stuff
1096 // (including workers). Those are exactly the threads that have an
1097 // mScriptObserver.
1098 bool callScriptObserver = !!mScriptObserver;
1099 if (callScriptObserver) {
1100 noJSAPI.emplace();
1101 mScriptObserver->BeforeProcessTask(reallyWait);
1102 }
1103
1104 #ifdef EARLY_BETA_OR_EARLIER
1105 // Need to capture mayWaitForWakeup state before OnProcessNextEvent,
1106 // since on the main thread OnProcessNextEvent ends up waiting for the new
1107 // events.
1108 bool mayWaitForWakeup = reallyWait && !mEvents->HasPendingEvent();
1109 #endif
1110
1111 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserverOnThread();
1112 if (obs) {
1113 obs->OnProcessNextEvent(this, reallyWait);
1114 }
1115
1116 NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), OnProcessNextEvent,
1117 (this, reallyWait));
1118
1119 #ifdef MOZ_CANARY
1120 Canary canary;
1121 #endif
1122 nsresult rv = NS_OK;
1123
1124 {
1125 // Scope for |event| to make sure that its destructor fires while
1126 // mNestedEventLoopDepth has been incremented, since that destructor can
1127 // also do work.
1128 EventQueuePriority priority;
1129 nsCOMPtr<nsIRunnable> event =
1130 mEvents->GetEvent(reallyWait, &priority, &mLastEventDelay);
1131
1132 *aResult = (event.get() != nullptr);
1133
1134 if (event) {
1135 #ifdef EARLY_BETA_OR_EARLIER
1136 if (mayWaitForWakeup && mThread) {
1137 ++mWakeupCount;
1138 if (mWakeupCount == kTelemetryWakeupCountLimit) {
1139 TimeStamp now = TimeStamp::Now();
1140 double ms = (now - mLastWakeupCheckTime).ToMilliseconds();
1141 if (ms < 0) {
1142 ms = 0;
1143 }
1144 const char* name = !mNameForWakeupTelemetry.IsEmpty()
1145 ? mNameForWakeupTelemetry.get()
1146 : PR_GetThreadName(mThread);
1147 if (!name) {
1148 name = mIsMainThread ? "MainThread" : "(nameless thread)";
1149 }
1150 nsDependentCString key(name);
1151 Telemetry::Accumulate(Telemetry::THREAD_WAKEUP, key,
1152 static_cast<uint32_t>(ms));
1153 mLastWakeupCheckTime = now;
1154 mWakeupCount = 0;
1155 }
1156 }
1157 #endif
1158
1159 LOG(("THRD(%p) running [%p]\n", this, event.get()));
1160
1161 LogRunnable::Run log(event);
1162
1163 // Delay event processing to encourage whoever dispatched this event
1164 // to run.
1165 DelayForChaosMode(ChaosFeature::TaskRunning, 1000);
1166
1167 mozilla::TimeStamp now = mozilla::TimeStamp::Now();
1168
1169 if (mUseHangMonitor) {
1170 BackgroundHangMonitor().NotifyActivity();
1171 }
1172
1173 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
1174 // If we're on the main thread, we want to record our current runnable's
1175 // name in a static so that BHR can record it.
1176 Array<char, kRunnableNameBufSize> restoreRunnableName;
1177 restoreRunnableName[0] = '\0';
1178 auto clear = MakeScopeExit([&] {
1179 if (mIsMainThread) {
1180 MOZ_ASSERT(NS_IsMainThread());
1181 sMainThreadRunnableName = restoreRunnableName;
1182 }
1183 });
1184 if (mIsMainThread) {
1185 nsAutoCString name;
1186 GetLabeledRunnableName(event, name, priority);
1187
1188 MOZ_ASSERT(NS_IsMainThread());
1189 restoreRunnableName = sMainThreadRunnableName;
1190
1191 // Copy the name into sMainThreadRunnableName's buffer, and append a
1192 // terminating null.
1193 uint32_t length = std::min((uint32_t)kRunnableNameBufSize - 1,
1194 (uint32_t)name.Length());
1195 memcpy(sMainThreadRunnableName.begin(), name.BeginReading(), length);
1196 sMainThreadRunnableName[length] = '\0';
1197 }
1198 #endif
1199 Maybe<AutoTimeDurationHelper> timeDurationHelper;
1200 if (priority == EventQueuePriority::Input) {
1201 timeDurationHelper.emplace();
1202 }
1203
1204 PerformanceCounterState::Snapshot snapshot =
1205 mPerformanceCounterState.RunnableWillRun(
1206 GetPerformanceCounter(event), now,
1207 priority == EventQueuePriority::Idle);
1208
1209 mLastEventStart = now;
1210
1211 event->Run();
1212
1213 mEvents->DidRunEvent();
1214
1215 mPerformanceCounterState.RunnableDidRun(std::move(snapshot));
1216
1217 // To cover the event's destructor code inside the LogRunnable span.
1218 event = nullptr;
1219 } else {
1220 mLastEventDelay = TimeDuration();
1221 mLastEventStart = TimeStamp();
1222 if (aMayWait) {
1223 MOZ_ASSERT(ShuttingDown(),
1224 "This should only happen when shutting down");
1225 rv = NS_ERROR_UNEXPECTED;
1226 }
1227 }
1228 }
1229
1230 NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), AfterProcessNextEvent,
1231 (this, *aResult));
1232
1233 if (obs) {
1234 obs->AfterProcessNextEvent(this, *aResult);
1235 }
1236
1237 if (callScriptObserver) {
1238 if (mScriptObserver) {
1239 mScriptObserver->AfterProcessTask(mNestedEventLoopDepth);
1240 }
1241 noJSAPI.reset();
1242 }
1243
1244 --mNestedEventLoopDepth;
1245
1246 return rv;
1247 }
1248
1249 //-----------------------------------------------------------------------------
1250 // nsISupportsPriority
1251
1252 NS_IMETHODIMP
GetPriority(int32_t * aPriority)1253 nsThread::GetPriority(int32_t* aPriority) {
1254 *aPriority = mPriority;
1255 return NS_OK;
1256 }
1257
1258 NS_IMETHODIMP
SetPriority(int32_t aPriority)1259 nsThread::SetPriority(int32_t aPriority) {
1260 if (NS_WARN_IF(!mThread)) {
1261 return NS_ERROR_NOT_INITIALIZED;
1262 }
1263
1264 // NSPR defines the following four thread priorities:
1265 // PR_PRIORITY_LOW
1266 // PR_PRIORITY_NORMAL
1267 // PR_PRIORITY_HIGH
1268 // PR_PRIORITY_URGENT
1269 // We map the priority values defined on nsISupportsPriority to these values.
1270
1271 mPriority = aPriority;
1272
1273 PRThreadPriority pri;
1274 if (mPriority <= PRIORITY_HIGHEST) {
1275 pri = PR_PRIORITY_URGENT;
1276 } else if (mPriority < PRIORITY_NORMAL) {
1277 pri = PR_PRIORITY_HIGH;
1278 } else if (mPriority > PRIORITY_NORMAL) {
1279 pri = PR_PRIORITY_LOW;
1280 } else {
1281 pri = PR_PRIORITY_NORMAL;
1282 }
1283 // If chaos mode is active, retain the randomly chosen priority
1284 if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
1285 PR_SetThreadPriority(mThread, pri);
1286 }
1287
1288 return NS_OK;
1289 }
1290
1291 NS_IMETHODIMP
AdjustPriority(int32_t aDelta)1292 nsThread::AdjustPriority(int32_t aDelta) {
1293 return SetPriority(mPriority + aDelta);
1294 }
1295
1296 //-----------------------------------------------------------------------------
1297 // nsIThreadInternal
1298
1299 NS_IMETHODIMP
GetObserver(nsIThreadObserver ** aObs)1300 nsThread::GetObserver(nsIThreadObserver** aObs) {
1301 MOZ_ASSERT(mEvents);
1302 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1303
1304 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
1305 obs.forget(aObs);
1306 return NS_OK;
1307 }
1308
1309 NS_IMETHODIMP
SetObserver(nsIThreadObserver * aObs)1310 nsThread::SetObserver(nsIThreadObserver* aObs) {
1311 MOZ_ASSERT(mEvents);
1312 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1313
1314 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1315 return NS_ERROR_NOT_SAME_THREAD;
1316 }
1317
1318 mEvents->SetObserver(aObs);
1319 return NS_OK;
1320 }
1321
RecursionDepth() const1322 uint32_t nsThread::RecursionDepth() const {
1323 MOZ_ASSERT(PR_GetCurrentThread() == mThread);
1324 return mNestedEventLoopDepth;
1325 }
1326
1327 NS_IMETHODIMP
AddObserver(nsIThreadObserver * aObserver)1328 nsThread::AddObserver(nsIThreadObserver* aObserver) {
1329 MOZ_ASSERT(mEvents);
1330 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1331
1332 if (NS_WARN_IF(!aObserver)) {
1333 return NS_ERROR_INVALID_ARG;
1334 }
1335 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1336 return NS_ERROR_NOT_SAME_THREAD;
1337 }
1338
1339 EventQueue()->AddObserver(aObserver);
1340
1341 return NS_OK;
1342 }
1343
1344 NS_IMETHODIMP
RemoveObserver(nsIThreadObserver * aObserver)1345 nsThread::RemoveObserver(nsIThreadObserver* aObserver) {
1346 MOZ_ASSERT(mEvents);
1347 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1348
1349 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1350 return NS_ERROR_NOT_SAME_THREAD;
1351 }
1352
1353 EventQueue()->RemoveObserver(aObserver);
1354
1355 return NS_OK;
1356 }
1357
SetScriptObserver(mozilla::CycleCollectedJSContext * aScriptObserver)1358 void nsThread::SetScriptObserver(
1359 mozilla::CycleCollectedJSContext* aScriptObserver) {
1360 if (!aScriptObserver) {
1361 mScriptObserver = nullptr;
1362 return;
1363 }
1364
1365 MOZ_ASSERT(!mScriptObserver);
1366 mScriptObserver = aScriptObserver;
1367 }
1368
DoMainThreadSpecificProcessing() const1369 void nsThread::DoMainThreadSpecificProcessing() const {
1370 MOZ_ASSERT(mIsMainThread);
1371
1372 ipc::CancelCPOWs();
1373
1374 // Fire a memory pressure notification, if one is pending.
1375 if (!ShuttingDown()) {
1376 MemoryPressureState mpPending = NS_GetPendingMemoryPressure();
1377 if (mpPending != MemPressure_None) {
1378 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
1379
1380 if (os) {
1381 if (mpPending == MemPressure_Stopping) {
1382 os->NotifyObservers(nullptr, "memory-pressure-stop", nullptr);
1383 } else {
1384 os->NotifyObservers(nullptr, "memory-pressure",
1385 mpPending == MemPressure_New
1386 ? u"low-memory"
1387 : u"low-memory-ongoing");
1388 }
1389 } else {
1390 NS_WARNING("Can't get observer service!");
1391 }
1392 }
1393 }
1394 }
1395
1396 NS_IMETHODIMP
GetEventTarget(nsIEventTarget ** aEventTarget)1397 nsThread::GetEventTarget(nsIEventTarget** aEventTarget) {
1398 nsCOMPtr<nsIEventTarget> target = this;
1399 target.forget(aEventTarget);
1400 return NS_OK;
1401 }
1402
EventTarget()1403 nsIEventTarget* nsThread::EventTarget() { return this; }
1404
SerialEventTarget()1405 nsISerialEventTarget* nsThread::SerialEventTarget() { return this; }
1406
EnterLocalExecution()1407 nsLocalExecutionRecord nsThread::EnterLocalExecution() {
1408 MOZ_RELEASE_ASSERT(!mIsInLocalExecutionMode);
1409 MOZ_ASSERT(IsOnCurrentThread());
1410 MOZ_ASSERT(EventQueue());
1411 return nsLocalExecutionRecord(*EventQueue(), mIsInLocalExecutionMode);
1412 }
1413
nsLocalExecutionGuard(nsLocalExecutionRecord && aLocalExecutionRecord)1414 nsLocalExecutionGuard::nsLocalExecutionGuard(
1415 nsLocalExecutionRecord&& aLocalExecutionRecord)
1416 : mEventQueueStack(aLocalExecutionRecord.mEventQueueStack),
1417 mLocalEventTarget(mEventQueueStack.PushEventQueue()),
1418 mLocalExecutionFlag(aLocalExecutionRecord.mLocalExecutionFlag) {
1419 MOZ_ASSERT(mLocalEventTarget);
1420 MOZ_ASSERT(!mLocalExecutionFlag);
1421 mLocalExecutionFlag = true;
1422 }
1423
~nsLocalExecutionGuard()1424 nsLocalExecutionGuard::~nsLocalExecutionGuard() {
1425 MOZ_ASSERT(mLocalExecutionFlag);
1426 mLocalExecutionFlag = false;
1427 mEventQueueStack.PopEventQueue(mLocalEventTarget);
1428 }
1429
1430 namespace mozilla {
RunnableWillRun(PerformanceCounter * aCounter,TimeStamp aNow,bool aIsIdleRunnable)1431 PerformanceCounterState::Snapshot PerformanceCounterState::RunnableWillRun(
1432 PerformanceCounter* aCounter, TimeStamp aNow, bool aIsIdleRunnable) {
1433 if (IsNestedRunnable()) {
1434 // Flush out any accumulated time that should be accounted to the
1435 // current runnable before we start running a nested runnable.
1436 MaybeReportAccumulatedTime(aNow);
1437 }
1438
1439 Snapshot snapshot(mCurrentEventLoopDepth, mCurrentPerformanceCounter,
1440 mCurrentRunnableIsIdleRunnable);
1441
1442 mCurrentEventLoopDepth = mNestedEventLoopDepth;
1443 mCurrentPerformanceCounter = aCounter;
1444 mCurrentRunnableIsIdleRunnable = aIsIdleRunnable;
1445 mCurrentTimeSliceStart = aNow;
1446
1447 return snapshot;
1448 }
1449
RunnableDidRun(Snapshot && aSnapshot)1450 void PerformanceCounterState::RunnableDidRun(Snapshot&& aSnapshot) {
1451 // First thing: Restore our mCurrentEventLoopDepth so we can use
1452 // IsNestedRunnable().
1453 mCurrentEventLoopDepth = aSnapshot.mOldEventLoopDepth;
1454
1455 // We may not need the current timestamp; don't bother computing it if we
1456 // don't.
1457 TimeStamp now;
1458 if (mCurrentPerformanceCounter || mIsMainThread || IsNestedRunnable()) {
1459 now = TimeStamp::Now();
1460 }
1461 if (mCurrentPerformanceCounter || mIsMainThread) {
1462 MaybeReportAccumulatedTime(now);
1463 }
1464
1465 // And now restore the rest of our state.
1466 mCurrentPerformanceCounter = std::move(aSnapshot.mOldPerformanceCounter);
1467 mCurrentRunnableIsIdleRunnable = aSnapshot.mOldIsIdleRunnable;
1468 if (IsNestedRunnable()) {
1469 // Reset mCurrentTimeSliceStart to right now, so our parent runnable's next
1470 // slice can be properly accounted for.
1471 mCurrentTimeSliceStart = now;
1472 } else {
1473 // We are done at the outermost level; we are no longer in a timeslice.
1474 mCurrentTimeSliceStart = TimeStamp();
1475 }
1476 }
1477
MaybeReportAccumulatedTime(TimeStamp aNow)1478 void PerformanceCounterState::MaybeReportAccumulatedTime(TimeStamp aNow) {
1479 MOZ_ASSERT(mCurrentTimeSliceStart,
1480 "How did we get here if we're not in a timeslice?");
1481
1482 if (!mCurrentPerformanceCounter && !mIsMainThread) {
1483 // No one cares about this timeslice.
1484 return;
1485 }
1486
1487 TimeDuration duration = aNow - mCurrentTimeSliceStart;
1488 if (mCurrentPerformanceCounter) {
1489 mCurrentPerformanceCounter->IncrementExecutionDuration(
1490 duration.ToMicroseconds());
1491 }
1492
1493 // Long tasks only matter on the main thread.
1494 if (mIsMainThread && duration.ToMilliseconds() > LONGTASK_BUSY_WINDOW_MS) {
1495 // Idle events (gc...) don't *really* count here
1496 if (!mCurrentRunnableIsIdleRunnable) {
1497 mLastLongNonIdleTaskEnd = aNow;
1498 }
1499 mLastLongTaskEnd = aNow;
1500
1501 #ifdef MOZ_GECKO_PROFILER
1502 if (profiler_thread_is_being_profiled()) {
1503 PROFILER_ADD_MARKER_WITH_PAYLOAD(
1504 mCurrentRunnableIsIdleRunnable ? "LongIdleTask" : "LongTask", OTHER,
1505 LongTaskMarkerPayload, (mCurrentTimeSliceStart, aNow));
1506 }
1507 #endif
1508 }
1509 }
1510
1511 } // namespace mozilla
1512