1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include <ctype.h>
8 #include <errno.h>
9 #include <limits.h>
10 #include <stdarg.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #if !defined(MOZ_PROFILING)
16 #  error "DMD requires MOZ_PROFILING"
17 #endif
18 
19 #ifdef XP_WIN
20 #  include <windows.h>
21 #  include <process.h>
22 #else
23 #  include <pthread.h>
24 #  include <sys/types.h>
25 #  include <unistd.h>
26 #endif
27 
28 #ifdef ANDROID
29 #  include <android/log.h>
30 #endif
31 
32 #include "nscore.h"
33 
34 #include "mozilla/Assertions.h"
35 #include "mozilla/FastBernoulliTrial.h"
36 #include "mozilla/HashFunctions.h"
37 #include "mozilla/HashTable.h"
38 #include "mozilla/IntegerPrintfMacros.h"
39 #include "mozilla/JSONWriter.h"
40 #include "mozilla/Likely.h"
41 #include "mozilla/MemoryReporting.h"
42 #include "mozilla/PodOperations.h"
43 #include "mozilla/StackWalk.h"
44 #include "mozilla/ThreadLocal.h"
45 
46 // CodeAddressService is defined entirely in the header, so this does not make
47 // DMD depend on XPCOM's object file.
48 #include "CodeAddressService.h"
49 
50 // replace_malloc.h needs to be included before replace_malloc_bridge.h,
51 // which DMD.h includes, so DMD.h needs to be included after replace_malloc.h.
52 #include "replace_malloc.h"
53 #include "DMD.h"
54 
55 namespace mozilla {
56 namespace dmd {
57 
58 class DMDBridge : public ReplaceMallocBridge {
59   virtual DMDFuncs* GetDMDFuncs() override;
60 };
61 
62 static DMDBridge* gDMDBridge;
63 static DMDFuncs gDMDFuncs;
64 
GetDMDFuncs()65 DMDFuncs* DMDBridge::GetDMDFuncs() { return &gDMDFuncs; }
66 
67 MOZ_FORMAT_PRINTF(1, 2)
StatusMsg(const char * aFmt,...)68 inline void StatusMsg(const char* aFmt, ...) {
69   va_list ap;
70   va_start(ap, aFmt);
71   gDMDFuncs.StatusMsg(aFmt, ap);
72   va_end(ap);
73 }
74 
75 //---------------------------------------------------------------------------
76 // Utilities
77 //---------------------------------------------------------------------------
78 
79 #ifndef DISALLOW_COPY_AND_ASSIGN
80 #  define DISALLOW_COPY_AND_ASSIGN(T) \
81     T(const T&);                      \
82     void operator=(const T&)
83 #endif
84 
85 static malloc_table_t gMallocTable;
86 
87 // This provides infallible allocations (they abort on OOM).  We use it for all
88 // of DMD's own allocations, which fall into the following three cases.
89 //
90 // - Direct allocations (the easy case).
91 //
92 // - Indirect allocations in mozilla::{Vector,HashSet,HashMap} -- this class
93 //   serves as their AllocPolicy.
94 //
95 // - Other indirect allocations (e.g. MozStackWalk) -- see the comments on
96 //   Thread::mBlockIntercepts and in replace_malloc for how these work.
97 //
98 // It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
99 // but DMD cannot use mozalloc.
100 //
101 class InfallibleAllocPolicy {
102   static void ExitOnFailure(const void* aP);
103 
104  public:
105   template <typename T>
maybe_pod_malloc(size_t aNumElems)106   static T* maybe_pod_malloc(size_t aNumElems) {
107     if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
108       return nullptr;
109     return (T*)gMallocTable.malloc(aNumElems * sizeof(T));
110   }
111 
112   template <typename T>
maybe_pod_calloc(size_t aNumElems)113   static T* maybe_pod_calloc(size_t aNumElems) {
114     return (T*)gMallocTable.calloc(aNumElems, sizeof(T));
115   }
116 
117   template <typename T>
maybe_pod_realloc(T * aPtr,size_t aOldSize,size_t aNewSize)118   static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
119     if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
120       return nullptr;
121     return (T*)gMallocTable.realloc(aPtr, aNewSize * sizeof(T));
122   }
123 
malloc_(size_t aSize)124   static void* malloc_(size_t aSize) {
125     void* p = gMallocTable.malloc(aSize);
126     ExitOnFailure(p);
127     return p;
128   }
129 
130   template <typename T>
pod_malloc(size_t aNumElems)131   static T* pod_malloc(size_t aNumElems) {
132     T* p = maybe_pod_malloc<T>(aNumElems);
133     ExitOnFailure(p);
134     return p;
135   }
136 
calloc_(size_t aCount,size_t aSize)137   static void* calloc_(size_t aCount, size_t aSize) {
138     void* p = gMallocTable.calloc(aCount, aSize);
139     ExitOnFailure(p);
140     return p;
141   }
142 
143   template <typename T>
pod_calloc(size_t aNumElems)144   static T* pod_calloc(size_t aNumElems) {
145     T* p = maybe_pod_calloc<T>(aNumElems);
146     ExitOnFailure(p);
147     return p;
148   }
149 
realloc_(void * aPtr,size_t aNewSize)150   static void* realloc_(void* aPtr, size_t aNewSize) {
151     void* p = gMallocTable.realloc(aPtr, aNewSize);
152     ExitOnFailure(p);
153     return p;
154   }
155 
156   template <typename T>
pod_realloc(T * aPtr,size_t aOldSize,size_t aNewSize)157   static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
158     T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize);
159     ExitOnFailure(p);
160     return p;
161   }
162 
memalign_(size_t aAlignment,size_t aSize)163   static void* memalign_(size_t aAlignment, size_t aSize) {
164     void* p = gMallocTable.memalign(aAlignment, aSize);
165     ExitOnFailure(p);
166     return p;
167   }
168 
169   template <typename T>
free_(T * aPtr,size_t aSize=0)170   static void free_(T* aPtr, size_t aSize = 0) {
171     gMallocTable.free(aPtr);
172   }
173 
strdup_(const char * aStr)174   static char* strdup_(const char* aStr) {
175     char* s = (char*)InfallibleAllocPolicy::malloc_(strlen(aStr) + 1);
176     strcpy(s, aStr);
177     return s;
178   }
179 
180   template <class T>
new_()181   static T* new_() {
182     void* mem = malloc_(sizeof(T));
183     return new (mem) T;
184   }
185 
186   template <class T, typename P1>
new_(const P1 & aP1)187   static T* new_(const P1& aP1) {
188     void* mem = malloc_(sizeof(T));
189     return new (mem) T(aP1);
190   }
191 
192   template <class T>
delete_(T * aPtr)193   static void delete_(T* aPtr) {
194     if (aPtr) {
195       aPtr->~T();
196       InfallibleAllocPolicy::free_(aPtr);
197     }
198   }
199 
reportAllocOverflow()200   static void reportAllocOverflow() { ExitOnFailure(nullptr); }
checkSimulatedOOM() const201   bool checkSimulatedOOM() const { return true; }
202 };
203 
204 // This is only needed because of the |const void*| vs |void*| arg mismatch.
MallocSizeOf(const void * aPtr)205 static size_t MallocSizeOf(const void* aPtr) {
206   return gMallocTable.malloc_usable_size(const_cast<void*>(aPtr));
207 }
208 
StatusMsg(const char * aFmt,va_list aAp)209 void DMDFuncs::StatusMsg(const char* aFmt, va_list aAp) {
210 #ifdef ANDROID
211   __android_log_vprint(ANDROID_LOG_INFO, "DMD", aFmt, aAp);
212 #else
213   // The +64 is easily enough for the "DMD[<pid>] " prefix and the NUL.
214   char* fmt = (char*)InfallibleAllocPolicy::malloc_(strlen(aFmt) + 64);
215   sprintf(fmt, "DMD[%d] %s", getpid(), aFmt);
216   vfprintf(stderr, fmt, aAp);
217   InfallibleAllocPolicy::free_(fmt);
218 #endif
219 }
220 
221 /* static */
ExitOnFailure(const void * aP)222 void InfallibleAllocPolicy::ExitOnFailure(const void* aP) {
223   if (!aP) {
224     MOZ_CRASH("DMD out of memory; aborting");
225   }
226 }
227 
Percent(size_t part,size_t whole)228 static double Percent(size_t part, size_t whole) {
229   return (whole == 0) ? 0 : 100 * (double)part / whole;
230 }
231 
232 // Commifies the number.
Show(size_t n,char * buf,size_t buflen)233 static char* Show(size_t n, char* buf, size_t buflen) {
234   int nc = 0, i = 0, lasti = buflen - 2;
235   buf[lasti + 1] = '\0';
236   if (n == 0) {
237     buf[lasti - i] = '0';
238     i++;
239   } else {
240     while (n > 0) {
241       if (((i - nc) % 3) == 0 && i != 0) {
242         buf[lasti - i] = ',';
243         i++;
244         nc++;
245       }
246       buf[lasti - i] = static_cast<char>((n % 10) + '0');
247       i++;
248       n /= 10;
249     }
250   }
251   int firstCharIndex = lasti - i + 1;
252 
253   MOZ_ASSERT(firstCharIndex >= 0);
254   return &buf[firstCharIndex];
255 }
256 
257 //---------------------------------------------------------------------------
258 // Options (Part 1)
259 //---------------------------------------------------------------------------
260 
261 class Options {
262   template <typename T>
263   struct NumOption {
264     const T mDefault;
265     const T mMax;
266     T mActual;
NumOptionmozilla::dmd::Options::NumOption267     NumOption(T aDefault, T aMax)
268         : mDefault(aDefault), mMax(aMax), mActual(aDefault) {}
269   };
270 
271   // DMD has several modes. These modes affect what data is recorded and
272   // written to the output file, and the written data affects the
273   // post-processing that dmd.py can do.
274   //
275   // Users specify the mode as soon as DMD starts. This leads to minimal memory
276   // usage and log file size. It has the disadvantage that is inflexible -- if
277   // you want to change modes you have to re-run DMD. But in practice changing
278   // modes seems to be rare, so it's not much of a problem.
279   //
280   // An alternative possibility would be to always record and output *all* the
281   // information needed for all modes. This would let you choose the mode when
282   // running dmd.py, and so you could do multiple kinds of profiling on a
283   // single DMD run. But if you are only interested in one of the simpler
284   // modes, you'd pay the price of (a) increased memory usage and (b) *very*
285   // large log files.
286   //
287   // Finally, another alternative possibility would be to do mode selection
288   // partly at DMD startup or recording, and then partly in dmd.py. This would
289   // give some extra flexibility at moderate memory and file size cost. But
290   // certain mode pairs wouldn't work, which would be confusing.
291   //
292   enum class Mode {
293     // For each live block, this mode outputs: size (usable and slop) and
294     // (possibly) and allocation stack. This mode is good for live heap
295     // profiling.
296     Live,
297 
298     // Like "Live", but for each live block it also outputs: zero or more
299     // report stacks. This mode is good for identifying where memory reporters
300     // should be added. This is the default mode.
301     DarkMatter,
302 
303     // Like "Live", but also outputs the same data for dead blocks. This mode
304     // does cumulative heap profiling, which is good for identifying where large
305     // amounts of short-lived allocations ("heap churn") occur.
306     Cumulative,
307 
308     // Like "Live", but this mode also outputs for each live block the address
309     // of the block and the values contained in the blocks. This mode is useful
310     // for investigating leaks, by helping to figure out which blocks refer to
311     // other blocks. This mode force-enables full stacks coverage.
312     Scan
313   };
314 
315   // With full stacks, every heap block gets a stack trace recorded for it.
316   // This is complete but slow.
317   //
318   // With partial stacks, not all heap blocks will get a stack trace recorded.
319   // A Bernoulli trial (see mfbt/FastBernoulliTrial.h for details) is performed
320   // for each heap block to decide if it gets one. Because bigger heap blocks
321   // are more likely to get a stack trace, even though most heap *blocks* won't
322   // get a stack trace, most heap *bytes* will.
323   enum class Stacks { Full, Partial };
324 
325   char* mDMDEnvVar;  // a saved copy, for later printing
326 
327   Mode mMode;
328   Stacks mStacks;
329   bool mShowDumpStats;
330 
331   void BadArg(const char* aArg);
332   static const char* ValueIfMatch(const char* aArg, const char* aOptionName);
333   static bool GetLong(const char* aArg, const char* aOptionName, long aMin,
334                       long aMax, long* aValue);
335   static bool GetBool(const char* aArg, const char* aOptionName, bool* aValue);
336 
337  public:
338   explicit Options(const char* aDMDEnvVar);
339 
IsLiveMode() const340   bool IsLiveMode() const { return mMode == Mode::Live; }
IsDarkMatterMode() const341   bool IsDarkMatterMode() const { return mMode == Mode::DarkMatter; }
IsCumulativeMode() const342   bool IsCumulativeMode() const { return mMode == Mode::Cumulative; }
IsScanMode() const343   bool IsScanMode() const { return mMode == Mode::Scan; }
344 
345   const char* ModeString() const;
346 
DMDEnvVar() const347   const char* DMDEnvVar() const { return mDMDEnvVar; }
348 
DoFullStacks() const349   bool DoFullStacks() const { return mStacks == Stacks::Full; }
ShowDumpStats() const350   size_t ShowDumpStats() const { return mShowDumpStats; }
351 };
352 
353 static Options* gOptions;
354 
355 //---------------------------------------------------------------------------
356 // The global lock
357 //---------------------------------------------------------------------------
358 
359 // MutexBase implements the platform-specific parts of a mutex.
360 
361 #ifdef XP_WIN
362 
363 class MutexBase {
364   CRITICAL_SECTION mCS;
365 
366   DISALLOW_COPY_AND_ASSIGN(MutexBase);
367 
368  public:
MutexBase()369   MutexBase() { InitializeCriticalSection(&mCS); }
~MutexBase()370   ~MutexBase() { DeleteCriticalSection(&mCS); }
371 
Lock()372   void Lock() { EnterCriticalSection(&mCS); }
Unlock()373   void Unlock() { LeaveCriticalSection(&mCS); }
374 };
375 
376 #else
377 
378 class MutexBase {
379   pthread_mutex_t mMutex;
380 
381   MutexBase(const MutexBase&) = delete;
382 
383   const MutexBase& operator=(const MutexBase&) = delete;
384 
385  public:
MutexBase()386   MutexBase() { pthread_mutex_init(&mMutex, nullptr); }
387 
Lock()388   void Lock() { pthread_mutex_lock(&mMutex); }
Unlock()389   void Unlock() { pthread_mutex_unlock(&mMutex); }
390 };
391 
392 #endif
393 
394 class Mutex : private MutexBase {
395   bool mIsLocked;
396 
397   Mutex(const Mutex&) = delete;
398 
399   const Mutex& operator=(const Mutex&) = delete;
400 
401  public:
Mutex()402   Mutex() : mIsLocked(false) {}
403 
Lock()404   void Lock() {
405     MutexBase::Lock();
406     MOZ_ASSERT(!mIsLocked);
407     mIsLocked = true;
408   }
409 
Unlock()410   void Unlock() {
411     MOZ_ASSERT(mIsLocked);
412     mIsLocked = false;
413     MutexBase::Unlock();
414   }
415 
IsLocked()416   bool IsLocked() { return mIsLocked; }
417 };
418 
419 // This lock must be held while manipulating global state such as
420 // gStackTraceTable, gLiveBlockTable, gDeadBlockTable. Note that gOptions is
421 // *not* protected by this lock because it is only written to by Options(),
422 // which is only invoked at start-up and in ResetEverything(), which is only
423 // used by SmokeDMD.cpp.
424 static Mutex* gStateLock = nullptr;
425 
426 class AutoLockState {
427   AutoLockState(const AutoLockState&) = delete;
428 
429   const AutoLockState& operator=(const AutoLockState&) = delete;
430 
431  public:
AutoLockState()432   AutoLockState() { gStateLock->Lock(); }
~AutoLockState()433   ~AutoLockState() { gStateLock->Unlock(); }
434 };
435 
436 class AutoUnlockState {
437   AutoUnlockState(const AutoUnlockState&) = delete;
438 
439   const AutoUnlockState& operator=(const AutoUnlockState&) = delete;
440 
441  public:
AutoUnlockState()442   AutoUnlockState() { gStateLock->Unlock(); }
~AutoUnlockState()443   ~AutoUnlockState() { gStateLock->Lock(); }
444 };
445 
446 //---------------------------------------------------------------------------
447 // Per-thread blocking of intercepts
448 //---------------------------------------------------------------------------
449 
450 // On MacOS, the first __thread/thread_local access calls malloc, which leads
451 // to an infinite loop. So we use pthread-based TLS instead, which somehow
452 // doesn't have this problem.
453 #if !defined(XP_DARWIN)
454 #  define DMD_THREAD_LOCAL(T) MOZ_THREAD_LOCAL(T)
455 #else
456 #  define DMD_THREAD_LOCAL(T) \
457     detail::ThreadLocal<T, detail::ThreadLocalKeyStorage>
458 #endif
459 
460 class Thread {
461   // Required for allocation via InfallibleAllocPolicy::new_.
462   friend class InfallibleAllocPolicy;
463 
464   // When true, this blocks intercepts, which allows malloc interception
465   // functions to themselves call malloc.  (Nb: for direct calls to malloc we
466   // can just use InfallibleAllocPolicy::{malloc_,new_}, but we sometimes
467   // indirectly call vanilla malloc via functions like MozStackWalk.)
468   bool mBlockIntercepts;
469 
Thread()470   Thread() : mBlockIntercepts(false) {}
471 
472   Thread(const Thread&) = delete;
473 
474   const Thread& operator=(const Thread&) = delete;
475 
476   static DMD_THREAD_LOCAL(Thread*) tlsThread;
477 
478  public:
Init()479   static void Init() {
480     if (!tlsThread.init()) {
481       MOZ_CRASH();
482     }
483   }
484 
Fetch()485   static Thread* Fetch() {
486     Thread* t = tlsThread.get();
487     if (MOZ_UNLIKELY(!t)) {
488       // This memory is never freed, even if the thread dies. It's a leak, but
489       // only a tiny one.
490       t = InfallibleAllocPolicy::new_<Thread>();
491       tlsThread.set(t);
492     }
493 
494     return t;
495   }
496 
BlockIntercepts()497   bool BlockIntercepts() {
498     MOZ_ASSERT(!mBlockIntercepts);
499     return mBlockIntercepts = true;
500   }
501 
UnblockIntercepts()502   bool UnblockIntercepts() {
503     MOZ_ASSERT(mBlockIntercepts);
504     return mBlockIntercepts = false;
505   }
506 
InterceptsAreBlocked() const507   bool InterceptsAreBlocked() const { return mBlockIntercepts; }
508 };
509 
510 DMD_THREAD_LOCAL(Thread*) Thread::tlsThread;
511 
512 // An object of this class must be created (on the stack) before running any
513 // code that might allocate.
514 class AutoBlockIntercepts {
515   Thread* const mT;
516 
517   AutoBlockIntercepts(const AutoBlockIntercepts&) = delete;
518 
519   const AutoBlockIntercepts& operator=(const AutoBlockIntercepts&) = delete;
520 
521  public:
AutoBlockIntercepts(Thread * aT)522   explicit AutoBlockIntercepts(Thread* aT) : mT(aT) { mT->BlockIntercepts(); }
~AutoBlockIntercepts()523   ~AutoBlockIntercepts() {
524     MOZ_ASSERT(mT->InterceptsAreBlocked());
525     mT->UnblockIntercepts();
526   }
527 };
528 
529 //---------------------------------------------------------------------------
530 // Location service
531 //---------------------------------------------------------------------------
532 
533 struct DescribeCodeAddressLock {
Unlockmozilla::dmd::DescribeCodeAddressLock534   static void Unlock() { gStateLock->Unlock(); }
Lockmozilla::dmd::DescribeCodeAddressLock535   static void Lock() { gStateLock->Lock(); }
IsLockedmozilla::dmd::DescribeCodeAddressLock536   static bool IsLocked() { return gStateLock->IsLocked(); }
537 };
538 
539 typedef CodeAddressService<InfallibleAllocPolicy, DescribeCodeAddressLock>
540     CodeAddressService;
541 
542 //---------------------------------------------------------------------------
543 // Stack traces
544 //---------------------------------------------------------------------------
545 
546 class StackTrace {
547  public:
548   static const uint32_t MaxFrames = 24;
549 
550  private:
551   uint32_t mLength;             // The number of PCs.
552   const void* mPcs[MaxFrames];  // The PCs themselves.
553 
554  public:
StackTrace()555   StackTrace() : mLength(0) {}
StackTrace(const StackTrace & aOther)556   StackTrace(const StackTrace& aOther) : mLength(aOther.mLength) {
557     PodCopy(mPcs, aOther.mPcs, mLength);
558   }
559 
Length() const560   uint32_t Length() const { return mLength; }
Pc(uint32_t i) const561   const void* Pc(uint32_t i) const {
562     MOZ_ASSERT(i < mLength);
563     return mPcs[i];
564   }
565 
Size() const566   uint32_t Size() const { return mLength * sizeof(mPcs[0]); }
567 
568   // The stack trace returned by this function is interned in gStackTraceTable,
569   // and so is immortal and unmovable.
570   static const StackTrace* Get(Thread* aT);
571 
572   // Hash policy.
573 
574   typedef StackTrace* Lookup;
575 
hash(const StackTrace * const & aSt)576   static mozilla::HashNumber hash(const StackTrace* const& aSt) {
577     return mozilla::HashBytes(aSt->mPcs, aSt->Size());
578   }
579 
match(const StackTrace * const & aA,const StackTrace * const & aB)580   static bool match(const StackTrace* const& aA, const StackTrace* const& aB) {
581     return aA->mLength == aB->mLength &&
582            memcmp(aA->mPcs, aB->mPcs, aA->Size()) == 0;
583   }
584 
585  private:
StackWalkCallback(uint32_t aFrameNumber,void * aPc,void * aSp,void * aClosure)586   static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
587                                 void* aClosure) {
588     StackTrace* st = (StackTrace*)aClosure;
589     MOZ_ASSERT(st->mLength < MaxFrames);
590     st->mPcs[st->mLength] = aPc;
591     st->mLength++;
592     MOZ_ASSERT(st->mLength == aFrameNumber);
593   }
594 };
595 
596 typedef mozilla::HashSet<StackTrace*, StackTrace, InfallibleAllocPolicy>
597     StackTraceTable;
598 static StackTraceTable* gStackTraceTable = nullptr;
599 
600 typedef mozilla::HashSet<const StackTrace*,
601                          mozilla::DefaultHasher<const StackTrace*>,
602                          InfallibleAllocPolicy>
603     StackTraceSet;
604 
605 typedef mozilla::HashSet<const void*, mozilla::DefaultHasher<const void*>,
606                          InfallibleAllocPolicy>
607     PointerSet;
608 typedef mozilla::HashMap<const void*, uint32_t,
609                          mozilla::DefaultHasher<const void*>,
610                          InfallibleAllocPolicy>
611     PointerIdMap;
612 
613 // We won't GC the stack trace table until it this many elements.
614 static uint32_t gGCStackTraceTableWhenSizeExceeds = 4 * 1024;
615 
Get(Thread * aT)616 /* static */ const StackTrace* StackTrace::Get(Thread* aT) {
617   MOZ_ASSERT(gStateLock->IsLocked());
618   MOZ_ASSERT(aT->InterceptsAreBlocked());
619 
620   // On Windows, MozStackWalk can acquire a lock from the shared library
621   // loader.  Another thread might call malloc while holding that lock (when
622   // loading a shared library).  So we can't be in gStateLock during the call
623   // to MozStackWalk.  For details, see
624   // https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
625   // On Linux, something similar can happen;  see bug 824340.
626   // So let's just release it on all platforms.
627   StackTrace tmp;
628   {
629     AutoUnlockState unlock;
630     // In each of the following cases, skipFrames is chosen so that the
631     // first frame in each stack trace is a replace_* function (or as close as
632     // possible, given the vagaries of inlining on different platforms).
633 #if defined(XP_WIN) && defined(_M_IX86)
634     // This avoids MozStackWalk(), which causes unusably slow startup on Win32
635     // when it is called during static initialization (see bug 1241684).
636     //
637     // This code is cribbed from the Gecko Profiler, which also uses
638     // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the
639     // frame pointer, and GetStackTop() for the stack end.
640     CONTEXT context;
641     RtlCaptureContext(&context);
642     void** fp = reinterpret_cast<void**>(context.Ebp);
643 
644     PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
645     void* stackEnd = static_cast<void*>(pTib->StackBase);
646     FramePointerStackWalk(StackWalkCallback, MaxFrames, &tmp, fp, stackEnd);
647 #elif defined(XP_MACOSX)
648     // This avoids MozStackWalk(), which has become unusably slow on Mac due to
649     // changes in libunwind.
650     //
651     // This code is cribbed from the Gecko Profiler, which also uses
652     // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame
653     // pointer, and GetStackTop() for the stack end.
654     void** fp;
655 #  if defined(__x86_64__)
656     asm(
657         // Dereference %rbp to get previous %rbp
658         "movq (%%rbp), %0\n\t"
659         : "=r"(fp));
660 #  else
661     asm("ldr %0, [x29]\n\t" : "=r"(fp));
662 #  endif
663     void* stackEnd = pthread_get_stackaddr_np(pthread_self());
664     FramePointerStackWalk(StackWalkCallback, MaxFrames, &tmp, fp, stackEnd);
665 #else
666     MozStackWalk(StackWalkCallback, nullptr, MaxFrames, &tmp);
667 #endif
668   }
669 
670   StackTraceTable::AddPtr p = gStackTraceTable->lookupForAdd(&tmp);
671   if (!p) {
672     StackTrace* stnew = InfallibleAllocPolicy::new_<StackTrace>(tmp);
673     MOZ_ALWAYS_TRUE(gStackTraceTable->add(p, stnew));
674   }
675   return *p;
676 }
677 
678 //---------------------------------------------------------------------------
679 // Heap blocks
680 //---------------------------------------------------------------------------
681 
682 // This class combines a 2-byte-aligned pointer (i.e. one whose bottom bit
683 // is zero) with a 1-bit tag.
684 //
685 // |T| is the pointer type, e.g. |int*|, not the pointed-to type.  This makes
686 // is easier to have const pointers, e.g. |TaggedPtr<const int*>|.
687 template <typename T>
688 class TaggedPtr {
689   union {
690     T mPtr;
691     uintptr_t mUint;
692   };
693 
694   static const uintptr_t kTagMask = uintptr_t(0x1);
695   static const uintptr_t kPtrMask = ~kTagMask;
696 
IsTwoByteAligned(T aPtr)697   static bool IsTwoByteAligned(T aPtr) {
698     return (uintptr_t(aPtr) & kTagMask) == 0;
699   }
700 
701  public:
TaggedPtr()702   TaggedPtr() : mPtr(nullptr) {}
703 
TaggedPtr(T aPtr,bool aBool)704   TaggedPtr(T aPtr, bool aBool) : mPtr(aPtr) {
705     MOZ_ASSERT(IsTwoByteAligned(aPtr));
706     uintptr_t tag = uintptr_t(aBool);
707     MOZ_ASSERT(tag <= kTagMask);
708     mUint |= (tag & kTagMask);
709   }
710 
Set(T aPtr,bool aBool)711   void Set(T aPtr, bool aBool) {
712     MOZ_ASSERT(IsTwoByteAligned(aPtr));
713     mPtr = aPtr;
714     uintptr_t tag = uintptr_t(aBool);
715     MOZ_ASSERT(tag <= kTagMask);
716     mUint |= (tag & kTagMask);
717   }
718 
Ptr() const719   T Ptr() const { return reinterpret_cast<T>(mUint & kPtrMask); }
720 
Tag() const721   bool Tag() const { return bool(mUint & kTagMask); }
722 };
723 
724 // A live heap block. Stores both basic data and data about reports, if we're
725 // in DarkMatter mode.
726 class LiveBlock {
727   const void* mPtr;
728   const size_t mReqSize;  // size requested
729 
730   // The stack trace where this block was allocated, or nullptr if we didn't
731   // record one.
732   const StackTrace* const mAllocStackTrace;
733 
734   // This array has two elements because we record at most two reports of a
735   // block.
736   // - Ptr: |mReportStackTrace| - stack trace where this block was reported.
737   //   nullptr if not reported.
738   // - Tag bit 0: |mReportedOnAlloc| - was the block reported immediately on
739   //   allocation?  If so, DMD must not clear the report at the end of
740   //   Analyze(). Only relevant if |mReportStackTrace| is non-nullptr.
741   //
742   // |mPtr| is used as the key in LiveBlockTable, so it's ok for this member
743   // to be |mutable|.
744   //
745   // Only used in DarkMatter mode.
746   mutable TaggedPtr<const StackTrace*> mReportStackTrace_mReportedOnAlloc[2];
747 
748  public:
LiveBlock(const void * aPtr,size_t aReqSize,const StackTrace * aAllocStackTrace)749   LiveBlock(const void* aPtr, size_t aReqSize,
750             const StackTrace* aAllocStackTrace)
751       : mPtr(aPtr),
752         mReqSize(aReqSize),
753         mAllocStackTrace(aAllocStackTrace),
754         mReportStackTrace_mReportedOnAlloc()  // all fields get zeroed
755   {}
756 
Address() const757   const void* Address() const { return mPtr; }
758 
ReqSize() const759   size_t ReqSize() const { return mReqSize; }
760 
SlopSize() const761   size_t SlopSize() const { return MallocSizeOf(mPtr) - mReqSize; }
762 
AllocStackTrace() const763   const StackTrace* AllocStackTrace() const { return mAllocStackTrace; }
764 
ReportStackTrace1() const765   const StackTrace* ReportStackTrace1() const {
766     MOZ_ASSERT(gOptions->IsDarkMatterMode());
767     return mReportStackTrace_mReportedOnAlloc[0].Ptr();
768   }
769 
ReportStackTrace2() const770   const StackTrace* ReportStackTrace2() const {
771     MOZ_ASSERT(gOptions->IsDarkMatterMode());
772     return mReportStackTrace_mReportedOnAlloc[1].Ptr();
773   }
774 
ReportedOnAlloc1() const775   bool ReportedOnAlloc1() const {
776     MOZ_ASSERT(gOptions->IsDarkMatterMode());
777     return mReportStackTrace_mReportedOnAlloc[0].Tag();
778   }
779 
ReportedOnAlloc2() const780   bool ReportedOnAlloc2() const {
781     MOZ_ASSERT(gOptions->IsDarkMatterMode());
782     return mReportStackTrace_mReportedOnAlloc[1].Tag();
783   }
784 
AddStackTracesToTable(StackTraceSet & aStackTraces) const785   void AddStackTracesToTable(StackTraceSet& aStackTraces) const {
786     if (AllocStackTrace()) {
787       MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));
788     }
789     if (gOptions->IsDarkMatterMode()) {
790       if (ReportStackTrace1()) {
791         MOZ_ALWAYS_TRUE(aStackTraces.put(ReportStackTrace1()));
792       }
793       if (ReportStackTrace2()) {
794         MOZ_ALWAYS_TRUE(aStackTraces.put(ReportStackTrace2()));
795       }
796     }
797   }
798 
NumReports() const799   uint32_t NumReports() const {
800     MOZ_ASSERT(gOptions->IsDarkMatterMode());
801     if (ReportStackTrace2()) {
802       MOZ_ASSERT(ReportStackTrace1());
803       return 2;
804     }
805     if (ReportStackTrace1()) {
806       return 1;
807     }
808     return 0;
809   }
810 
811   // This is |const| thanks to the |mutable| fields above.
Report(Thread * aT,bool aReportedOnAlloc) const812   void Report(Thread* aT, bool aReportedOnAlloc) const {
813     MOZ_ASSERT(gOptions->IsDarkMatterMode());
814     // We don't bother recording reports after the 2nd one.
815     uint32_t numReports = NumReports();
816     if (numReports < 2) {
817       mReportStackTrace_mReportedOnAlloc[numReports].Set(StackTrace::Get(aT),
818                                                          aReportedOnAlloc);
819     }
820   }
821 
UnreportIfNotReportedOnAlloc() const822   void UnreportIfNotReportedOnAlloc() const {
823     MOZ_ASSERT(gOptions->IsDarkMatterMode());
824     if (!ReportedOnAlloc1() && !ReportedOnAlloc2()) {
825       mReportStackTrace_mReportedOnAlloc[0].Set(nullptr, 0);
826       mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
827 
828     } else if (!ReportedOnAlloc1() && ReportedOnAlloc2()) {
829       // Shift the 2nd report down to the 1st one.
830       mReportStackTrace_mReportedOnAlloc[0] =
831           mReportStackTrace_mReportedOnAlloc[1];
832       mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
833 
834     } else if (ReportedOnAlloc1() && !ReportedOnAlloc2()) {
835       mReportStackTrace_mReportedOnAlloc[1].Set(nullptr, 0);
836     }
837   }
838 
839   // Hash policy.
840 
841   typedef const void* Lookup;
842 
hash(const void * const & aPtr)843   static mozilla::HashNumber hash(const void* const& aPtr) {
844     return mozilla::HashGeneric(aPtr);
845   }
846 
match(const LiveBlock & aB,const void * const & aPtr)847   static bool match(const LiveBlock& aB, const void* const& aPtr) {
848     return aB.mPtr == aPtr;
849   }
850 };
851 
852 // A table of live blocks where the lookup key is the block address.
853 typedef mozilla::HashSet<LiveBlock, LiveBlock, InfallibleAllocPolicy>
854     LiveBlockTable;
855 static LiveBlockTable* gLiveBlockTable = nullptr;
856 
857 class AggregatedLiveBlockHashPolicy {
858  public:
859   typedef const LiveBlock* const Lookup;
860 
hash(const LiveBlock * const & aB)861   static mozilla::HashNumber hash(const LiveBlock* const& aB) {
862     return gOptions->IsDarkMatterMode()
863                ? mozilla::HashGeneric(
864                      aB->ReqSize(), aB->SlopSize(), aB->AllocStackTrace(),
865                      aB->ReportedOnAlloc1(), aB->ReportedOnAlloc2())
866                : mozilla::HashGeneric(aB->ReqSize(), aB->SlopSize(),
867                                       aB->AllocStackTrace());
868   }
869 
match(const LiveBlock * const & aA,const LiveBlock * const & aB)870   static bool match(const LiveBlock* const& aA, const LiveBlock* const& aB) {
871     return gOptions->IsDarkMatterMode()
872                ? aA->ReqSize() == aB->ReqSize() &&
873                      aA->SlopSize() == aB->SlopSize() &&
874                      aA->AllocStackTrace() == aB->AllocStackTrace() &&
875                      aA->ReportStackTrace1() == aB->ReportStackTrace1() &&
876                      aA->ReportStackTrace2() == aB->ReportStackTrace2()
877                : aA->ReqSize() == aB->ReqSize() &&
878                      aA->SlopSize() == aB->SlopSize() &&
879                      aA->AllocStackTrace() == aB->AllocStackTrace();
880   }
881 };
882 
883 // A table of live blocks where the lookup key is everything but the block
884 // address. For aggregating similar live blocks at output time.
885 typedef mozilla::HashMap<const LiveBlock*, size_t,
886                          AggregatedLiveBlockHashPolicy, InfallibleAllocPolicy>
887     AggregatedLiveBlockTable;
888 
889 // A freed heap block.
890 class DeadBlock {
891   const size_t mReqSize;   // size requested
892   const size_t mSlopSize;  // slop above size requested
893 
894   // The stack trace where this block was allocated.
895   const StackTrace* const mAllocStackTrace;
896 
897  public:
DeadBlock()898   DeadBlock() : mReqSize(0), mSlopSize(0), mAllocStackTrace(nullptr) {}
899 
DeadBlock(const LiveBlock & aLb)900   explicit DeadBlock(const LiveBlock& aLb)
901       : mReqSize(aLb.ReqSize()),
902         mSlopSize(aLb.SlopSize()),
903         mAllocStackTrace(aLb.AllocStackTrace()) {}
904 
~DeadBlock()905   ~DeadBlock() {}
906 
ReqSize() const907   size_t ReqSize() const { return mReqSize; }
SlopSize() const908   size_t SlopSize() const { return mSlopSize; }
909 
AllocStackTrace() const910   const StackTrace* AllocStackTrace() const { return mAllocStackTrace; }
911 
AddStackTracesToTable(StackTraceSet & aStackTraces) const912   void AddStackTracesToTable(StackTraceSet& aStackTraces) const {
913     if (AllocStackTrace()) {
914       MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));
915     }
916   }
917 
918   // Hash policy.
919 
920   typedef DeadBlock Lookup;
921 
hash(const DeadBlock & aB)922   static mozilla::HashNumber hash(const DeadBlock& aB) {
923     return mozilla::HashGeneric(aB.ReqSize(), aB.SlopSize(),
924                                 aB.AllocStackTrace());
925   }
926 
match(const DeadBlock & aA,const DeadBlock & aB)927   static bool match(const DeadBlock& aA, const DeadBlock& aB) {
928     return aA.ReqSize() == aB.ReqSize() && aA.SlopSize() == aB.SlopSize() &&
929            aA.AllocStackTrace() == aB.AllocStackTrace();
930   }
931 };
932 
933 // For each unique DeadBlock value we store a count of how many actual dead
934 // blocks have that value.
935 typedef mozilla::HashMap<DeadBlock, size_t, DeadBlock, InfallibleAllocPolicy>
936     DeadBlockTable;
937 static DeadBlockTable* gDeadBlockTable = nullptr;
938 
939 // Add the dead block to the dead block table, if that's appropriate.
MaybeAddToDeadBlockTable(const DeadBlock & aDb)940 void MaybeAddToDeadBlockTable(const DeadBlock& aDb) {
941   if (gOptions->IsCumulativeMode() && aDb.AllocStackTrace()) {
942     AutoLockState lock;
943     if (DeadBlockTable::AddPtr p = gDeadBlockTable->lookupForAdd(aDb)) {
944       p->value() += 1;
945     } else {
946       MOZ_ALWAYS_TRUE(gDeadBlockTable->add(p, aDb, 1));
947     }
948   }
949 }
950 
951 // Add a pointer to each live stack trace into the given StackTraceSet.  (A
952 // stack trace is live if it's used by one of the live blocks.)
GatherUsedStackTraces(StackTraceSet & aStackTraces)953 static void GatherUsedStackTraces(StackTraceSet& aStackTraces) {
954   MOZ_ASSERT(gStateLock->IsLocked());
955   MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
956 
957   aStackTraces.clear();
958   MOZ_ALWAYS_TRUE(aStackTraces.reserve(512));
959 
960   for (auto iter = gLiveBlockTable->iter(); !iter.done(); iter.next()) {
961     iter.get().AddStackTracesToTable(aStackTraces);
962   }
963 
964   for (auto iter = gDeadBlockTable->iter(); !iter.done(); iter.next()) {
965     iter.get().key().AddStackTracesToTable(aStackTraces);
966   }
967 }
968 
969 // Delete stack traces that we aren't using, and compact our hashtable.
GCStackTraces()970 static void GCStackTraces() {
971   MOZ_ASSERT(gStateLock->IsLocked());
972   MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
973 
974   StackTraceSet usedStackTraces;
975   GatherUsedStackTraces(usedStackTraces);
976 
977   // Delete all unused stack traces from gStackTraceTable.  The ModIterator
978   // destructor will automatically rehash and compact the table.
979   for (auto iter = gStackTraceTable->modIter(); !iter.done(); iter.next()) {
980     StackTrace* const& st = iter.get();
981     if (!usedStackTraces.has(st)) {
982       iter.remove();
983       InfallibleAllocPolicy::delete_(st);
984     }
985   }
986 
987   // Schedule a GC when we have twice as many stack traces as we had right after
988   // this GC finished.
989   gGCStackTraceTableWhenSizeExceeds = 2 * gStackTraceTable->count();
990 }
991 
992 //---------------------------------------------------------------------------
993 // malloc/free callbacks
994 //---------------------------------------------------------------------------
995 
996 static FastBernoulliTrial* gBernoulli;
997 
998 // In testing, a probability of 0.003 resulted in ~25% of heap blocks getting
999 // a stack trace and ~80% of heap bytes getting a stack trace. (This is
1000 // possible because big heap blocks are more likely to get a stack trace.)
1001 //
1002 // We deliberately choose not to give the user control over this probability
1003 // (other than effectively setting it to 1 via --stacks=full) because it's
1004 // quite inscrutable and generally the user just wants "faster and imprecise"
1005 // or "slower and precise".
1006 //
1007 // The random number seeds are arbitrary and were obtained from random.org. If
1008 // you change them you'll need to change the tests as well, because their
1009 // expected output is based on the particular sequence of trial results that we
1010 // get with these seeds.
ResetBernoulli()1011 static void ResetBernoulli() {
1012   new (gBernoulli)
1013       FastBernoulliTrial(0.003, 0x8e26eeee166bc8ca, 0x56820f304a9c9ae0);
1014 }
1015 
AllocCallback(void * aPtr,size_t aReqSize,Thread * aT)1016 static void AllocCallback(void* aPtr, size_t aReqSize, Thread* aT) {
1017   if (!aPtr) {
1018     return;
1019   }
1020 
1021   AutoLockState lock;
1022   AutoBlockIntercepts block(aT);
1023 
1024   size_t actualSize = gMallocTable.malloc_usable_size(aPtr);
1025 
1026   // We may or may not record the allocation stack trace, depending on the
1027   // options and the outcome of a Bernoulli trial.
1028   bool getTrace = gOptions->DoFullStacks() || gBernoulli->trial(actualSize);
1029   LiveBlock b(aPtr, aReqSize, getTrace ? StackTrace::Get(aT) : nullptr);
1030   LiveBlockTable::AddPtr p = gLiveBlockTable->lookupForAdd(aPtr);
1031   if (!p) {
1032     // Most common case: there wasn't a record already.
1033     MOZ_ALWAYS_TRUE(gLiveBlockTable->add(p, b));
1034   } else {
1035     // Edge-case: there was a record for the same address. We'll assume the
1036     // allocator is not giving out a pointer to an existing allocation, so
1037     // this means the previously recorded allocation was freed while we were
1038     // blocking interceptions. This can happen while processing the data in
1039     // e.g. AnalyzeImpl.
1040     if (gOptions->IsCumulativeMode()) {
1041       // Copy it out so it can be added to the dead block list later.
1042       DeadBlock db(*p);
1043       MaybeAddToDeadBlockTable(db);
1044     }
1045     gLiveBlockTable->remove(p);
1046     MOZ_ALWAYS_TRUE(gLiveBlockTable->putNew(aPtr, b));
1047   }
1048 }
1049 
FreeCallback(void * aPtr,Thread * aT,DeadBlock * aDeadBlock)1050 static void FreeCallback(void* aPtr, Thread* aT, DeadBlock* aDeadBlock) {
1051   if (!aPtr) {
1052     return;
1053   }
1054 
1055   AutoLockState lock;
1056   AutoBlockIntercepts block(aT);
1057 
1058   if (LiveBlockTable::Ptr lb = gLiveBlockTable->lookup(aPtr)) {
1059     if (gOptions->IsCumulativeMode()) {
1060       // Copy it out so it can be added to the dead block list later.
1061       new (aDeadBlock) DeadBlock(*lb);
1062     }
1063     gLiveBlockTable->remove(lb);
1064   } else {
1065     // We have no record of the block. It must be a bogus pointer, or one that
1066     // DMD wasn't able to see allocated. This should be extremely rare.
1067   }
1068 
1069   if (gStackTraceTable->count() > gGCStackTraceTableWhenSizeExceeds) {
1070     GCStackTraces();
1071   }
1072 }
1073 
1074 //---------------------------------------------------------------------------
1075 // malloc/free interception
1076 //---------------------------------------------------------------------------
1077 
1078 static bool Init(malloc_table_t* aMallocTable);
1079 
1080 }  // namespace dmd
1081 }  // namespace mozilla
1082 
replace_malloc(size_t aSize)1083 static void* replace_malloc(size_t aSize) {
1084   using namespace mozilla::dmd;
1085 
1086   Thread* t = Thread::Fetch();
1087   if (t->InterceptsAreBlocked()) {
1088     // Intercepts are blocked, which means this must be a call to malloc
1089     // triggered indirectly by DMD (e.g. via MozStackWalk).  Be infallible.
1090     return InfallibleAllocPolicy::malloc_(aSize);
1091   }
1092 
1093   // This must be a call to malloc from outside DMD.  Intercept it.
1094   void* ptr = gMallocTable.malloc(aSize);
1095   AllocCallback(ptr, aSize, t);
1096   return ptr;
1097 }
1098 
replace_calloc(size_t aCount,size_t aSize)1099 static void* replace_calloc(size_t aCount, size_t aSize) {
1100   using namespace mozilla::dmd;
1101 
1102   Thread* t = Thread::Fetch();
1103   if (t->InterceptsAreBlocked()) {
1104     return InfallibleAllocPolicy::calloc_(aCount, aSize);
1105   }
1106 
1107   // |aCount * aSize| could overflow, but if that happens then
1108   // |gMallocTable.calloc()| will return nullptr and |AllocCallback()| will
1109   // return immediately without using the overflowed value.
1110   void* ptr = gMallocTable.calloc(aCount, aSize);
1111   AllocCallback(ptr, aCount * aSize, t);
1112   return ptr;
1113 }
1114 
replace_realloc(void * aOldPtr,size_t aSize)1115 static void* replace_realloc(void* aOldPtr, size_t aSize) {
1116   using namespace mozilla::dmd;
1117 
1118   Thread* t = Thread::Fetch();
1119   if (t->InterceptsAreBlocked()) {
1120     return InfallibleAllocPolicy::realloc_(aOldPtr, aSize);
1121   }
1122 
1123   // If |aOldPtr| is nullptr, the call is equivalent to |malloc(aSize)|.
1124   if (!aOldPtr) {
1125     return replace_malloc(aSize);
1126   }
1127 
1128   // Be very careful here!  Must remove the block from the table before doing
1129   // the realloc to avoid races, just like in replace_free().
1130   // Nb: This does an unnecessary hashtable remove+add if the block doesn't
1131   // move, but doing better isn't worth the effort.
1132   DeadBlock db;
1133   FreeCallback(aOldPtr, t, &db);
1134   void* ptr = gMallocTable.realloc(aOldPtr, aSize);
1135   if (ptr) {
1136     AllocCallback(ptr, aSize, t);
1137     MaybeAddToDeadBlockTable(db);
1138   } else {
1139     // If realloc fails, we undo the prior operations by re-inserting the old
1140     // pointer into the live block table. We don't have to do anything with the
1141     // dead block list because the dead block hasn't yet been inserted. The
1142     // block will end up looking like it was allocated for the first time here,
1143     // which is untrue, and the slop bytes will be zero, which may be untrue.
1144     // But this case is rare and doing better isn't worth the effort.
1145     AllocCallback(aOldPtr, gMallocTable.malloc_usable_size(aOldPtr), t);
1146   }
1147   return ptr;
1148 }
1149 
replace_memalign(size_t aAlignment,size_t aSize)1150 static void* replace_memalign(size_t aAlignment, size_t aSize) {
1151   using namespace mozilla::dmd;
1152 
1153   Thread* t = Thread::Fetch();
1154   if (t->InterceptsAreBlocked()) {
1155     return InfallibleAllocPolicy::memalign_(aAlignment, aSize);
1156   }
1157 
1158   void* ptr = gMallocTable.memalign(aAlignment, aSize);
1159   AllocCallback(ptr, aSize, t);
1160   return ptr;
1161 }
1162 
replace_free(void * aPtr)1163 static void replace_free(void* aPtr) {
1164   using namespace mozilla::dmd;
1165 
1166   Thread* t = Thread::Fetch();
1167   if (t->InterceptsAreBlocked()) {
1168     return InfallibleAllocPolicy::free_(aPtr);
1169   }
1170 
1171   // Do the actual free after updating the table.  Otherwise, another thread
1172   // could call malloc and get the freed block and update the table, and then
1173   // our update here would remove the newly-malloc'd block.
1174   DeadBlock db;
1175   FreeCallback(aPtr, t, &db);
1176   MaybeAddToDeadBlockTable(db);
1177   gMallocTable.free(aPtr);
1178 }
1179 
replace_init(malloc_table_t * aMallocTable,ReplaceMallocBridge ** aBridge)1180 void replace_init(malloc_table_t* aMallocTable, ReplaceMallocBridge** aBridge) {
1181   if (mozilla::dmd::Init(aMallocTable)) {
1182 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
1183 #define MALLOC_DECL(name, ...) aMallocTable->name = replace_##name;
1184 #include "malloc_decls.h"
1185     *aBridge = mozilla::dmd::gDMDBridge;
1186   }
1187 }
1188 
1189 namespace mozilla {
1190 namespace dmd {
1191 
1192 //---------------------------------------------------------------------------
1193 // Options (Part 2)
1194 //---------------------------------------------------------------------------
1195 
1196 // Given an |aOptionName| like "foo", succeed if |aArg| has the form "foo=blah"
1197 // (where "blah" is non-empty) and return the pointer to "blah".  |aArg| can
1198 // have leading space chars (but not other whitespace).
ValueIfMatch(const char * aArg,const char * aOptionName)1199 const char* Options::ValueIfMatch(const char* aArg, const char* aOptionName) {
1200   MOZ_ASSERT(!isspace(*aArg));  // any leading whitespace should not remain
1201   size_t optionLen = strlen(aOptionName);
1202   if (strncmp(aArg, aOptionName, optionLen) == 0 && aArg[optionLen] == '=' &&
1203       aArg[optionLen + 1]) {
1204     return aArg + optionLen + 1;
1205   }
1206   return nullptr;
1207 }
1208 
1209 // Extracts a |long| value for an option from an argument.  It must be within
1210 // the range |aMin..aMax| (inclusive).
GetLong(const char * aArg,const char * aOptionName,long aMin,long aMax,long * aValue)1211 bool Options::GetLong(const char* aArg, const char* aOptionName, long aMin,
1212                       long aMax, long* aValue) {
1213   if (const char* optionValue = ValueIfMatch(aArg, aOptionName)) {
1214     char* endPtr;
1215     *aValue = strtol(optionValue, &endPtr, /* base */ 10);
1216     if (!*endPtr && aMin <= *aValue && *aValue <= aMax && *aValue != LONG_MIN &&
1217         *aValue != LONG_MAX) {
1218       return true;
1219     }
1220   }
1221   return false;
1222 }
1223 
1224 // Extracts a |bool| value for an option -- encoded as "yes" or "no" -- from an
1225 // argument.
GetBool(const char * aArg,const char * aOptionName,bool * aValue)1226 bool Options::GetBool(const char* aArg, const char* aOptionName, bool* aValue) {
1227   if (const char* optionValue = ValueIfMatch(aArg, aOptionName)) {
1228     if (strcmp(optionValue, "yes") == 0) {
1229       *aValue = true;
1230       return true;
1231     }
1232     if (strcmp(optionValue, "no") == 0) {
1233       *aValue = false;
1234       return true;
1235     }
1236   }
1237   return false;
1238 }
1239 
Options(const char * aDMDEnvVar)1240 Options::Options(const char* aDMDEnvVar)
1241     : mDMDEnvVar(aDMDEnvVar ? InfallibleAllocPolicy::strdup_(aDMDEnvVar)
1242                             : nullptr),
1243       mMode(Mode::DarkMatter),
1244       mStacks(Stacks::Partial),
1245       mShowDumpStats(false) {
1246   char* e = mDMDEnvVar;
1247   if (e && strcmp(e, "1") != 0) {
1248     bool isEnd = false;
1249     while (!isEnd) {
1250       // Consume leading whitespace.
1251       while (isspace(*e)) {
1252         e++;
1253       }
1254 
1255       // Save the start of the arg.
1256       const char* arg = e;
1257 
1258       // Find the first char after the arg, and temporarily change it to '\0'
1259       // to isolate the arg.
1260       while (!isspace(*e) && *e != '\0') {
1261         e++;
1262       }
1263       char replacedChar = *e;
1264       isEnd = replacedChar == '\0';
1265       *e = '\0';
1266 
1267       // Handle arg
1268       bool myBool;
1269       if (strcmp(arg, "--mode=live") == 0) {
1270         mMode = Mode::Live;
1271       } else if (strcmp(arg, "--mode=dark-matter") == 0) {
1272         mMode = Mode::DarkMatter;
1273       } else if (strcmp(arg, "--mode=cumulative") == 0) {
1274         mMode = Mode::Cumulative;
1275       } else if (strcmp(arg, "--mode=scan") == 0) {
1276         mMode = Mode::Scan;
1277 
1278       } else if (strcmp(arg, "--stacks=full") == 0) {
1279         mStacks = Stacks::Full;
1280       } else if (strcmp(arg, "--stacks=partial") == 0) {
1281         mStacks = Stacks::Partial;
1282 
1283       } else if (GetBool(arg, "--show-dump-stats", &myBool)) {
1284         mShowDumpStats = myBool;
1285 
1286       } else if (strcmp(arg, "") == 0) {
1287         // This can only happen if there is trailing whitespace.  Ignore.
1288         MOZ_ASSERT(isEnd);
1289 
1290       } else {
1291         BadArg(arg);
1292       }
1293 
1294       // Undo the temporary isolation.
1295       *e = replacedChar;
1296     }
1297   }
1298 
1299   if (mMode == Mode::Scan) {
1300     mStacks = Stacks::Full;
1301   }
1302 }
1303 
BadArg(const char * aArg)1304 void Options::BadArg(const char* aArg) {
1305   StatusMsg("\n");
1306   StatusMsg("Bad entry in the $DMD environment variable: '%s'.\n", aArg);
1307   StatusMsg("See the output of |mach help run| for the allowed options.\n");
1308   exit(1);
1309 }
1310 
ModeString() const1311 const char* Options::ModeString() const {
1312   switch (mMode) {
1313     case Mode::Live:
1314       return "live";
1315     case Mode::DarkMatter:
1316       return "dark-matter";
1317     case Mode::Cumulative:
1318       return "cumulative";
1319     case Mode::Scan:
1320       return "scan";
1321     default:
1322       MOZ_ASSERT(false);
1323       return "(unknown DMD mode)";
1324   }
1325 }
1326 
1327 //---------------------------------------------------------------------------
1328 // DMD start-up
1329 //---------------------------------------------------------------------------
1330 
1331 #ifndef XP_WIN
prefork()1332 static void prefork() {
1333   if (gStateLock) {
1334     gStateLock->Lock();
1335   }
1336 }
1337 
postfork()1338 static void postfork() {
1339   if (gStateLock) {
1340     gStateLock->Unlock();
1341   }
1342 }
1343 #endif
1344 
1345 // WARNING: this function runs *very* early -- before all static initializers
1346 // have run.  For this reason, non-scalar globals such as gStateLock and
1347 // gStackTraceTable are allocated dynamically (so we can guarantee their
1348 // construction in this function) rather than statically.
Init(malloc_table_t * aMallocTable)1349 static bool Init(malloc_table_t* aMallocTable) {
1350   // DMD is controlled by the |DMD| environment variable.
1351   const char* e = getenv("DMD");
1352 
1353   if (!e) {
1354     return false;
1355   }
1356   // Initialize the function table first, because StatusMsg uses
1357   // InfallibleAllocPolicy::malloc_, which uses it.
1358   gMallocTable = *aMallocTable;
1359 
1360   StatusMsg("$DMD = '%s'\n", e);
1361 
1362   gDMDBridge = InfallibleAllocPolicy::new_<DMDBridge>();
1363 
1364 #ifndef XP_WIN
1365   // Avoid deadlocks when forking by acquiring our state lock prior to forking
1366   // and releasing it after forking. See |LogAlloc|'s |replace_init| for
1367   // in-depth details.
1368   //
1369   // Note: This must run after attempting an allocation so as to give the
1370   // system malloc a chance to insert its own atfork handler.
1371   pthread_atfork(prefork, postfork, postfork);
1372 #endif
1373   // Parse $DMD env var.
1374   gOptions = InfallibleAllocPolicy::new_<Options>(e);
1375 
1376   gStateLock = InfallibleAllocPolicy::new_<Mutex>();
1377 
1378   gBernoulli = (FastBernoulliTrial*)InfallibleAllocPolicy::malloc_(
1379       sizeof(FastBernoulliTrial));
1380   ResetBernoulli();
1381 
1382   Thread::Init();
1383 
1384   {
1385     AutoLockState lock;
1386 
1387     gStackTraceTable = InfallibleAllocPolicy::new_<StackTraceTable>(8192);
1388     gLiveBlockTable = InfallibleAllocPolicy::new_<LiveBlockTable>(8192);
1389 
1390     // Create this even if the mode isn't Cumulative (albeit with a small
1391     // size), in case the mode is changed later on (as is done by SmokeDMD.cpp,
1392     // for example).
1393     size_t tableSize = gOptions->IsCumulativeMode() ? 8192 : 4;
1394     gDeadBlockTable = InfallibleAllocPolicy::new_<DeadBlockTable>(tableSize);
1395   }
1396 
1397   return true;
1398 }
1399 
1400 //---------------------------------------------------------------------------
1401 // Block reporting and unreporting
1402 //---------------------------------------------------------------------------
1403 
ReportHelper(const void * aPtr,bool aReportedOnAlloc)1404 static void ReportHelper(const void* aPtr, bool aReportedOnAlloc) {
1405   if (!gOptions->IsDarkMatterMode() || !aPtr) {
1406     return;
1407   }
1408 
1409   Thread* t = Thread::Fetch();
1410 
1411   AutoBlockIntercepts block(t);
1412   AutoLockState lock;
1413 
1414   if (LiveBlockTable::Ptr p = gLiveBlockTable->lookup(aPtr)) {
1415     p->Report(t, aReportedOnAlloc);
1416   } else {
1417     // We have no record of the block. It must be a bogus pointer. This should
1418     // be extremely rare because Report() is almost always called in
1419     // conjunction with a malloc_size_of-style function. Print a message so
1420     // that we get some feedback.
1421     StatusMsg("Unknown pointer %p\n", aPtr);
1422   }
1423 }
1424 
Report(const void * aPtr)1425 void DMDFuncs::Report(const void* aPtr) {
1426   ReportHelper(aPtr, /* onAlloc */ false);
1427 }
1428 
ReportOnAlloc(const void * aPtr)1429 void DMDFuncs::ReportOnAlloc(const void* aPtr) {
1430   ReportHelper(aPtr, /* onAlloc */ true);
1431 }
1432 
1433 //---------------------------------------------------------------------------
1434 // DMD output
1435 //---------------------------------------------------------------------------
1436 
1437 // The version number of the output format. Increment this if you make
1438 // backwards-incompatible changes to the format. See DMD.h for the version
1439 // history.
1440 static const int kOutputVersionNumber = 5;
1441 
1442 // Note that, unlike most SizeOf* functions, this function does not take a
1443 // |mozilla::MallocSizeOf| argument.  That's because those arguments are
1444 // primarily to aid DMD track heap blocks... but DMD deliberately doesn't track
1445 // heap blocks it allocated for itself!
1446 //
1447 // SizeOfInternal should be called while you're holding the state lock and
1448 // while intercepts are blocked; SizeOf acquires the lock and blocks
1449 // intercepts.
1450 
SizeOfInternal(Sizes * aSizes)1451 static void SizeOfInternal(Sizes* aSizes) {
1452   MOZ_ASSERT(gStateLock->IsLocked());
1453   MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
1454 
1455   aSizes->Clear();
1456 
1457   StackTraceSet usedStackTraces;
1458   GatherUsedStackTraces(usedStackTraces);
1459 
1460   for (auto iter = gStackTraceTable->iter(); !iter.done(); iter.next()) {
1461     StackTrace* const& st = iter.get();
1462 
1463     if (usedStackTraces.has(st)) {
1464       aSizes->mStackTracesUsed += MallocSizeOf(st);
1465     } else {
1466       aSizes->mStackTracesUnused += MallocSizeOf(st);
1467     }
1468   }
1469 
1470   aSizes->mStackTraceTable =
1471       gStackTraceTable->shallowSizeOfIncludingThis(MallocSizeOf);
1472 
1473   aSizes->mLiveBlockTable =
1474       gLiveBlockTable->shallowSizeOfIncludingThis(MallocSizeOf);
1475 
1476   aSizes->mDeadBlockTable =
1477       gDeadBlockTable->shallowSizeOfIncludingThis(MallocSizeOf);
1478 }
1479 
SizeOf(Sizes * aSizes)1480 void DMDFuncs::SizeOf(Sizes* aSizes) {
1481   aSizes->Clear();
1482 
1483   AutoBlockIntercepts block(Thread::Fetch());
1484   AutoLockState lock;
1485   SizeOfInternal(aSizes);
1486 }
1487 
ClearReports()1488 void DMDFuncs::ClearReports() {
1489   if (!gOptions->IsDarkMatterMode()) {
1490     return;
1491   }
1492 
1493   AutoLockState lock;
1494 
1495   // Unreport all blocks that were marked reported by a memory reporter.  This
1496   // excludes those that were reported on allocation, because they need to keep
1497   // their reported marking.
1498   for (auto iter = gLiveBlockTable->iter(); !iter.done(); iter.next()) {
1499     iter.get().UnreportIfNotReportedOnAlloc();
1500   }
1501 }
1502 
1503 class ToIdStringConverter final {
1504  public:
ToIdStringConverter()1505   ToIdStringConverter() : mIdMap(512), mNextId(0) {}
1506 
1507   // Converts a pointer to a unique ID. Reuses the existing ID for the pointer
1508   // if it's been seen before.
ToIdString(const void * aPtr)1509   const char* ToIdString(const void* aPtr) {
1510     uint32_t id;
1511     PointerIdMap::AddPtr p = mIdMap.lookupForAdd(aPtr);
1512     if (!p) {
1513       id = mNextId++;
1514       MOZ_ALWAYS_TRUE(mIdMap.add(p, aPtr, id));
1515     } else {
1516       id = p->value();
1517     }
1518     return Base32(id);
1519   }
1520 
sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const1521   size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
1522     return mIdMap.shallowSizeOfExcludingThis(aMallocSizeOf);
1523   }
1524 
1525  private:
1526   // This function converts an integer to base-32. We use base-32 values for
1527   // indexing into the traceTable and the frameTable, for the following reasons.
1528   //
1529   // - Base-32 gives more compact indices than base-16.
1530   //
1531   // - 32 is a power-of-two, which makes the necessary div/mod calculations
1532   //   fast.
1533   //
1534   // - We can (and do) choose non-numeric digits for base-32. When
1535   //   inspecting/debugging the JSON output, non-numeric indices are easier to
1536   //   search for than numeric indices.
1537   //
Base32(uint32_t aN)1538   char* Base32(uint32_t aN) {
1539     static const char digits[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef";
1540 
1541     char* b = mIdBuf + kIdBufLen - 1;
1542     *b = '\0';
1543     do {
1544       b--;
1545       if (b == mIdBuf) {
1546         MOZ_CRASH("Base32 buffer too small");
1547       }
1548       *b = digits[aN % 32];
1549       aN /= 32;
1550     } while (aN);
1551 
1552     return b;
1553   }
1554 
1555   PointerIdMap mIdMap;
1556   uint32_t mNextId;
1557 
1558   // |mIdBuf| must have space for at least eight chars, which is the space
1559   // needed to hold 'Dffffff' (including the terminating null char), which is
1560   // the base-32 representation of 0xffffffff.
1561   static const size_t kIdBufLen = 16;
1562   char mIdBuf[kIdBufLen];
1563 };
1564 
1565 // Helper class for converting a pointer value to a string.
1566 class ToStringConverter {
1567  public:
ToPtrString(const void * aPtr)1568   const char* ToPtrString(const void* aPtr) {
1569     snprintf(kPtrBuf, sizeof(kPtrBuf) - 1, "%" PRIxPTR, (uintptr_t)aPtr);
1570     return kPtrBuf;
1571   }
1572 
1573  private:
1574   char kPtrBuf[32];
1575 };
1576 
WriteBlockContents(JSONWriter & aWriter,const LiveBlock & aBlock)1577 static void WriteBlockContents(JSONWriter& aWriter, const LiveBlock& aBlock) {
1578   size_t numWords = aBlock.ReqSize() / sizeof(uintptr_t*);
1579   if (numWords == 0) {
1580     return;
1581   }
1582 
1583   aWriter.StartArrayProperty("contents", aWriter.SingleLineStyle);
1584   {
1585     const uintptr_t** block = (const uintptr_t**)aBlock.Address();
1586     ToStringConverter sc;
1587     for (size_t i = 0; i < numWords; ++i) {
1588       aWriter.StringElement(MakeStringSpan(sc.ToPtrString(block[i])));
1589     }
1590   }
1591   aWriter.EndArray();
1592 }
1593 
AnalyzeImpl(UniquePtr<JSONWriteFunc> aWriter)1594 static void AnalyzeImpl(UniquePtr<JSONWriteFunc> aWriter) {
1595   // Some blocks may have been allocated while creating |aWriter|. Those blocks
1596   // will be freed at the end of this function when |write| is destroyed. The
1597   // allocations will have occurred while intercepts were not blocked, so the
1598   // frees better be as well, otherwise we'll get assertion failures.
1599   // Therefore, this declaration must precede the AutoBlockIntercepts
1600   // declaration, to ensure that |write| is destroyed *after* intercepts are
1601   // unblocked.
1602   JSONWriter writer(std::move(aWriter));
1603 
1604   AutoBlockIntercepts block(Thread::Fetch());
1605   AutoLockState lock;
1606 
1607   // Allocate this on the heap instead of the stack because it's fairly large.
1608   auto locService = InfallibleAllocPolicy::new_<CodeAddressService>();
1609 
1610   StackTraceSet usedStackTraces(512);
1611   PointerSet usedPcs(512);
1612 
1613   size_t iscSize;
1614 
1615   static int analysisCount = 1;
1616   StatusMsg("Dump %d {\n", analysisCount++);
1617 
1618   writer.Start();
1619   {
1620     writer.IntProperty("version", kOutputVersionNumber);
1621 
1622     writer.StartObjectProperty("invocation");
1623     {
1624       const char* var = gOptions->DMDEnvVar();
1625       if (var) {
1626         writer.StringProperty("dmdEnvVar", MakeStringSpan(var));
1627       } else {
1628         writer.NullProperty("dmdEnvVar");
1629       }
1630 
1631       writer.StringProperty("mode", MakeStringSpan(gOptions->ModeString()));
1632     }
1633     writer.EndObject();
1634 
1635     StatusMsg("  Constructing the heap block list...\n");
1636 
1637     ToIdStringConverter isc;
1638     ToStringConverter sc;
1639 
1640     writer.StartArrayProperty("blockList");
1641     {
1642       // Lambda that writes out a live block.
1643       auto writeLiveBlock = [&](const LiveBlock& aB, size_t aNum) {
1644         aB.AddStackTracesToTable(usedStackTraces);
1645 
1646         MOZ_ASSERT_IF(gOptions->IsScanMode(), aNum == 1);
1647 
1648         writer.StartObjectElement(writer.SingleLineStyle);
1649         {
1650           if (gOptions->IsScanMode()) {
1651             writer.StringProperty("addr",
1652                                   MakeStringSpan(sc.ToPtrString(aB.Address())));
1653             WriteBlockContents(writer, aB);
1654           }
1655           writer.IntProperty("req", aB.ReqSize());
1656           if (aB.SlopSize() > 0) {
1657             writer.IntProperty("slop", aB.SlopSize());
1658           }
1659 
1660           if (aB.AllocStackTrace()) {
1661             writer.StringProperty(
1662                 "alloc", MakeStringSpan(isc.ToIdString(aB.AllocStackTrace())));
1663           }
1664 
1665           if (gOptions->IsDarkMatterMode() && aB.NumReports() > 0) {
1666             writer.StartArrayProperty("reps");
1667             {
1668               if (aB.ReportStackTrace1()) {
1669                 writer.StringElement(
1670                     MakeStringSpan(isc.ToIdString(aB.ReportStackTrace1())));
1671               }
1672               if (aB.ReportStackTrace2()) {
1673                 writer.StringElement(
1674                     MakeStringSpan(isc.ToIdString(aB.ReportStackTrace2())));
1675               }
1676             }
1677             writer.EndArray();
1678           }
1679 
1680           if (aNum > 1) {
1681             writer.IntProperty("num", aNum);
1682           }
1683         }
1684         writer.EndObject();
1685       };
1686 
1687       // Live blocks.
1688       if (!gOptions->IsScanMode()) {
1689         // At this point we typically have many LiveBlocks that differ only in
1690         // their address. Aggregate them to reduce the size of the output file.
1691         AggregatedLiveBlockTable agg(8192);
1692         for (auto iter = gLiveBlockTable->iter(); !iter.done(); iter.next()) {
1693           const LiveBlock& b = iter.get();
1694           b.AddStackTracesToTable(usedStackTraces);
1695 
1696           if (AggregatedLiveBlockTable::AddPtr p = agg.lookupForAdd(&b)) {
1697             p->value() += 1;
1698           } else {
1699             MOZ_ALWAYS_TRUE(agg.add(p, &b, 1));
1700           }
1701         }
1702 
1703         // Now iterate over the aggregated table.
1704         for (auto iter = agg.iter(); !iter.done(); iter.next()) {
1705           const LiveBlock& b = *iter.get().key();
1706           size_t num = iter.get().value();
1707           writeLiveBlock(b, num);
1708         }
1709 
1710       } else {
1711         // In scan mode we cannot aggregate because we print each live block's
1712         // address and contents.
1713         for (auto iter = gLiveBlockTable->iter(); !iter.done(); iter.next()) {
1714           const LiveBlock& b = iter.get();
1715           b.AddStackTracesToTable(usedStackTraces);
1716 
1717           writeLiveBlock(b, 1);
1718         }
1719       }
1720 
1721       // Dead blocks.
1722       for (auto iter = gDeadBlockTable->iter(); !iter.done(); iter.next()) {
1723         const DeadBlock& b = iter.get().key();
1724         b.AddStackTracesToTable(usedStackTraces);
1725 
1726         size_t num = iter.get().value();
1727         MOZ_ASSERT(num > 0);
1728 
1729         writer.StartObjectElement(writer.SingleLineStyle);
1730         {
1731           writer.IntProperty("req", b.ReqSize());
1732           if (b.SlopSize() > 0) {
1733             writer.IntProperty("slop", b.SlopSize());
1734           }
1735           if (b.AllocStackTrace()) {
1736             writer.StringProperty(
1737                 "alloc", MakeStringSpan(isc.ToIdString(b.AllocStackTrace())));
1738           }
1739 
1740           if (num > 1) {
1741             writer.IntProperty("num", num);
1742           }
1743         }
1744         writer.EndObject();
1745       }
1746     }
1747     writer.EndArray();
1748 
1749     StatusMsg("  Constructing the stack trace table...\n");
1750 
1751     writer.StartObjectProperty("traceTable");
1752     {
1753       for (auto iter = usedStackTraces.iter(); !iter.done(); iter.next()) {
1754         const StackTrace* const st = iter.get();
1755         writer.StartArrayProperty(MakeStringSpan(isc.ToIdString(st)),
1756                                   writer.SingleLineStyle);
1757         {
1758           for (uint32_t i = 0; i < st->Length(); i++) {
1759             const void* pc = st->Pc(i);
1760             writer.StringElement(MakeStringSpan(isc.ToIdString(pc)));
1761             MOZ_ALWAYS_TRUE(usedPcs.put(pc));
1762           }
1763         }
1764         writer.EndArray();
1765       }
1766     }
1767     writer.EndObject();
1768 
1769     StatusMsg("  Constructing the stack frame table...\n");
1770 
1771     writer.StartObjectProperty("frameTable");
1772     {
1773       static const size_t locBufLen = 1024;
1774       char locBuf[locBufLen];
1775 
1776       for (auto iter = usedPcs.iter(); !iter.done(); iter.next()) {
1777         const void* const pc = iter.get();
1778 
1779         // Use 0 for the frame number. See the JSON format description comment
1780         // in DMD.h to understand why.
1781         locService->GetLocation(0, pc, locBuf, locBufLen);
1782         writer.StringProperty(MakeStringSpan(isc.ToIdString(pc)),
1783                               MakeStringSpan(locBuf));
1784       }
1785     }
1786     writer.EndObject();
1787 
1788     iscSize = isc.sizeOfExcludingThis(MallocSizeOf);
1789   }
1790   writer.End();
1791 
1792   if (gOptions->ShowDumpStats()) {
1793     Sizes sizes;
1794     SizeOfInternal(&sizes);
1795 
1796     static const size_t kBufLen = 64;
1797     char buf1[kBufLen];
1798     char buf2[kBufLen];
1799     char buf3[kBufLen];
1800 
1801     StatusMsg("  Execution measurements {\n");
1802 
1803     StatusMsg("    Data structures that persist after Dump() ends {\n");
1804 
1805     StatusMsg("      Used stack traces:    %10s bytes\n",
1806               Show(sizes.mStackTracesUsed, buf1, kBufLen));
1807 
1808     StatusMsg("      Unused stack traces:  %10s bytes\n",
1809               Show(sizes.mStackTracesUnused, buf1, kBufLen));
1810 
1811     StatusMsg("      Stack trace table:    %10s bytes (%s entries, %s used)\n",
1812               Show(sizes.mStackTraceTable, buf1, kBufLen),
1813               Show(gStackTraceTable->capacity(), buf2, kBufLen),
1814               Show(gStackTraceTable->count(), buf3, kBufLen));
1815 
1816     StatusMsg("      Live block table:     %10s bytes (%s entries, %s used)\n",
1817               Show(sizes.mLiveBlockTable, buf1, kBufLen),
1818               Show(gLiveBlockTable->capacity(), buf2, kBufLen),
1819               Show(gLiveBlockTable->count(), buf3, kBufLen));
1820 
1821     StatusMsg("      Dead block table:     %10s bytes (%s entries, %s used)\n",
1822               Show(sizes.mDeadBlockTable, buf1, kBufLen),
1823               Show(gDeadBlockTable->capacity(), buf2, kBufLen),
1824               Show(gDeadBlockTable->count(), buf3, kBufLen));
1825 
1826     StatusMsg("    }\n");
1827     StatusMsg("    Data structures that are destroyed after Dump() ends {\n");
1828 
1829     StatusMsg(
1830         "      Location service:      %10s bytes\n",
1831         Show(locService->SizeOfIncludingThis(MallocSizeOf), buf1, kBufLen));
1832     StatusMsg("      Used stack traces set: %10s bytes\n",
1833               Show(usedStackTraces.shallowSizeOfExcludingThis(MallocSizeOf),
1834                    buf1, kBufLen));
1835     StatusMsg(
1836         "      Used PCs set:          %10s bytes\n",
1837         Show(usedPcs.shallowSizeOfExcludingThis(MallocSizeOf), buf1, kBufLen));
1838     StatusMsg("      Pointer ID map:        %10s bytes\n",
1839               Show(iscSize, buf1, kBufLen));
1840 
1841     StatusMsg("    }\n");
1842     StatusMsg("    Counts {\n");
1843 
1844     size_t hits = locService->NumCacheHits();
1845     size_t misses = locService->NumCacheMisses();
1846     size_t requests = hits + misses;
1847     StatusMsg("      Location service:    %10s requests\n",
1848               Show(requests, buf1, kBufLen));
1849 
1850     size_t count = locService->CacheCount();
1851     size_t capacity = locService->CacheCapacity();
1852     StatusMsg(
1853         "      Location service cache:  "
1854         "%4.1f%% hit rate, %.1f%% occupancy at end\n",
1855         Percent(hits, requests), Percent(count, capacity));
1856 
1857     StatusMsg("    }\n");
1858     StatusMsg("  }\n");
1859   }
1860 
1861   InfallibleAllocPolicy::delete_(locService);
1862 
1863   StatusMsg("}\n");
1864 }
1865 
Analyze(UniquePtr<JSONWriteFunc> aWriter)1866 void DMDFuncs::Analyze(UniquePtr<JSONWriteFunc> aWriter) {
1867   AnalyzeImpl(std::move(aWriter));
1868   ClearReports();
1869 }
1870 
1871 //---------------------------------------------------------------------------
1872 // Testing
1873 //---------------------------------------------------------------------------
1874 
ResetEverything(const char * aOptions)1875 void DMDFuncs::ResetEverything(const char* aOptions) {
1876   AutoLockState lock;
1877 
1878   // Reset options.
1879   InfallibleAllocPolicy::delete_(gOptions);
1880   gOptions = InfallibleAllocPolicy::new_<Options>(aOptions);
1881 
1882   // Clear all existing blocks.
1883   gLiveBlockTable->clear();
1884   gDeadBlockTable->clear();
1885 
1886   // Reset gBernoulli to a deterministic state. (Its current state depends on
1887   // all previous trials.)
1888   ResetBernoulli();
1889 }
1890 
1891 }  // namespace dmd
1892 }  // namespace mozilla
1893