1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "nsMemoryReporterManager.h"
8 
9 #include "nsAtomTable.h"
10 #include "nsCOMPtr.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
15 #include "nsITimer.h"
16 #include "nsThreadUtils.h"
17 #include "nsPIDOMWindow.h"
18 #include "nsIObserverService.h"
19 #include "nsIOService.h"
20 #include "nsIGlobalObject.h"
21 #include "nsIXPConnect.h"
22 #ifdef MOZ_GECKO_PROFILER
23 #  include "GeckoProfilerReporter.h"
24 #endif
25 #if defined(XP_UNIX) || defined(MOZ_DMD)
26 #  include "nsMemoryInfoDumper.h"
27 #endif
28 #include "nsNetCID.h"
29 #include "nsThread.h"
30 #include "VRProcessManager.h"
31 #include "mozilla/Attributes.h"
32 #include "mozilla/MemoryReportingProcess.h"
33 #include "mozilla/PodOperations.h"
34 #include "mozilla/Preferences.h"
35 #include "mozilla/RDDProcessManager.h"
36 #include "mozilla/ResultExtensions.h"
37 #include "mozilla/Services.h"
38 #include "mozilla/Telemetry.h"
39 #include "mozilla/UniquePtrExtensions.h"
40 #include "mozilla/dom/MemoryReportTypes.h"
41 #include "mozilla/dom/ContentParent.h"
42 #include "mozilla/gfx/GPUProcessManager.h"
43 #include "mozilla/ipc/FileDescriptorUtils.h"
44 
45 #ifdef XP_WIN
46 #  include "mozilla/MemoryInfo.h"
47 
48 #  include <process.h>
49 #  ifndef getpid
50 #    define getpid _getpid
51 #  endif
52 #else
53 #  include <unistd.h>
54 #endif
55 
56 using namespace mozilla;
57 using namespace dom;
58 
59 #if defined(MOZ_MEMORY)
60 #  define HAVE_JEMALLOC_STATS 1
61 #  include "mozmemory.h"
62 #endif  // MOZ_MEMORY
63 
64 #if defined(XP_LINUX)
65 
66 #  include "mozilla/MemoryMapping.h"
67 
68 #  include <malloc.h>
69 #  include <string.h>
70 #  include <stdlib.h>
71 
GetProcSelfStatmField(int aField,int64_t * aN)72 [[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
73   // There are more than two fields, but we're only interested in the first
74   // two.
75   static const int MAX_FIELD = 2;
76   size_t fields[MAX_FIELD];
77   MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
78   FILE* f = fopen("/proc/self/statm", "r");
79   if (f) {
80     int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
81     fclose(f);
82     if (nread == MAX_FIELD) {
83       *aN = fields[aField] * getpagesize();
84       return NS_OK;
85     }
86   }
87   return NS_ERROR_FAILURE;
88 }
89 
GetProcSelfSmapsPrivate(int64_t * aN,pid_t aPid)90 [[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
91   // You might be tempted to calculate USS by subtracting the "shared" value
92   // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
93   // statm's "shared" value actually counts pages backed by files, which has
94   // little to do with whether the pages are actually shared. /proc/self/smaps
95   // on the other hand appears to give us the correct information.
96 
97   nsTArray<MemoryMapping> mappings(1024);
98   MOZ_TRY(GetMemoryMappings(mappings, aPid));
99 
100   int64_t amount = 0;
101   for (auto& mapping : mappings) {
102     amount += mapping.Private_Clean();
103     amount += mapping.Private_Dirty();
104   }
105   *aN = amount;
106   return NS_OK;
107 }
108 
109 #  define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
VsizeDistinguishedAmount(int64_t * aN)110 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
111   return GetProcSelfStatmField(0, aN);
112 }
113 
ResidentDistinguishedAmount(int64_t * aN)114 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
115   return GetProcSelfStatmField(1, aN);
116 }
117 
ResidentFastDistinguishedAmount(int64_t * aN)118 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
119   return ResidentDistinguishedAmount(aN);
120 }
121 
122 #  define HAVE_RESIDENT_UNIQUE_REPORTER 1
ResidentUniqueDistinguishedAmount(int64_t * aN,pid_t aPid=0)123 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
124     int64_t* aN, pid_t aPid = 0) {
125   return GetProcSelfSmapsPrivate(aN, aPid);
126 }
127 
128 #  ifdef HAVE_MALLINFO
129 #    define HAVE_SYSTEM_HEAP_REPORTER 1
SystemHeapSize(int64_t * aSizeOut)130 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
131   struct mallinfo info = mallinfo();
132 
133   // The documentation in the glibc man page makes it sound like |uordblks|
134   // would suffice, but that only gets the small allocations that are put in
135   // the brk heap. We need |hblkhd| as well to get the larger allocations
136   // that are mmapped.
137   //
138   // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
139   // unreliable if memory usage gets high. However, the system heap size on
140   // Linux should usually be zero (so long as jemalloc is enabled) so that
141   // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
142   // adding them to provide a small amount of extra overflow protection.
143   *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
144   return NS_OK;
145 }
146 #  endif
147 
148 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
149     defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
150 
151 #  include <sys/param.h>
152 #  include <sys/sysctl.h>
153 #  if defined(__DragonFly__) || defined(__FreeBSD__) || \
154       defined(__FreeBSD_kernel__)
155 #    include <sys/user.h>
156 #  endif
157 
158 #  include <unistd.h>
159 
160 #  if defined(__NetBSD__)
161 #    undef KERN_PROC
162 #    define KERN_PROC KERN_PROC2
163 #    define KINFO_PROC struct kinfo_proc2
164 #  else
165 #    define KINFO_PROC struct kinfo_proc
166 #  endif
167 
168 #  if defined(__DragonFly__)
169 #    define KP_SIZE(kp) (kp.kp_vm_map_size)
170 #    define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
171 #  elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
172 #    define KP_SIZE(kp) (kp.ki_size)
173 #    define KP_RSS(kp) (kp.ki_rssize * getpagesize())
174 #  elif defined(__NetBSD__)
175 #    define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
176 #    define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
177 #  elif defined(__OpenBSD__)
178 #    define KP_SIZE(kp) \
179       ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
180 #    define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
181 #  endif
182 
GetKinfoProcSelf(KINFO_PROC * aProc)183 [[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
184 #  if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
185   static LazyLogModule sPledgeLog("SandboxPledge");
186   MOZ_LOG(sPledgeLog, LogLevel::Debug,
187           ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
188   return NS_ERROR_FAILURE;
189 #  endif
190   int mib[] = {
191     CTL_KERN,
192     KERN_PROC,
193     KERN_PROC_PID,
194     getpid(),
195 #  if defined(__NetBSD__) || defined(__OpenBSD__)
196     sizeof(KINFO_PROC),
197     1,
198 #  endif
199   };
200   u_int miblen = sizeof(mib) / sizeof(mib[0]);
201   size_t size = sizeof(KINFO_PROC);
202   if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
203     return NS_ERROR_FAILURE;
204   }
205   return NS_OK;
206 }
207 
208 #  define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
VsizeDistinguishedAmount(int64_t * aN)209 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
210   KINFO_PROC proc;
211   nsresult rv = GetKinfoProcSelf(&proc);
212   if (NS_SUCCEEDED(rv)) {
213     *aN = KP_SIZE(proc);
214   }
215   return rv;
216 }
217 
ResidentDistinguishedAmount(int64_t * aN)218 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
219   KINFO_PROC proc;
220   nsresult rv = GetKinfoProcSelf(&proc);
221   if (NS_SUCCEEDED(rv)) {
222     *aN = KP_RSS(proc);
223   }
224   return rv;
225 }
226 
ResidentFastDistinguishedAmount(int64_t * aN)227 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
228   return ResidentDistinguishedAmount(aN);
229 }
230 
231 #  ifdef __FreeBSD__
232 #    include <libutil.h>
233 #    include <algorithm>
234 
GetKinfoVmentrySelf(int64_t * aPrss,uint64_t * aMaxreg)235 [[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
236                                                   uint64_t* aMaxreg) {
237   int cnt;
238   struct kinfo_vmentry* vmmap;
239   struct kinfo_vmentry* kve;
240   if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
241     return NS_ERROR_FAILURE;
242   }
243   if (aPrss) {
244     *aPrss = 0;
245   }
246   if (aMaxreg) {
247     *aMaxreg = 0;
248   }
249 
250   for (int i = 0; i < cnt; i++) {
251     kve = &vmmap[i];
252     if (aPrss) {
253       *aPrss += kve->kve_private_resident;
254     }
255     if (aMaxreg) {
256       *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
257     }
258   }
259 
260   free(vmmap);
261   return NS_OK;
262 }
263 
264 #    define HAVE_PRIVATE_REPORTER 1
PrivateDistinguishedAmount(int64_t * aN)265 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
266   int64_t priv;
267   nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
268   NS_ENSURE_SUCCESS(rv, rv);
269   *aN = priv * getpagesize();
270   return NS_OK;
271 }
272 
273 #    define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
VsizeMaxContiguousDistinguishedAmount(int64_t * aN)274 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
275     int64_t* aN) {
276   uint64_t biggestRegion;
277   nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
278   if (NS_SUCCEEDED(rv)) {
279     *aN = biggestRegion;
280   }
281   return NS_OK;
282 }
283 #  endif  // FreeBSD
284 
285 #elif defined(SOLARIS)
286 
287 #  include <procfs.h>
288 #  include <fcntl.h>
289 #  include <unistd.h>
290 
XMappingIter(int64_t & aVsize,int64_t & aResident,int64_t & aShared)291 static void XMappingIter(int64_t& aVsize, int64_t& aResident,
292                          int64_t& aShared) {
293   aVsize = -1;
294   aResident = -1;
295   aShared = -1;
296   int mapfd = open("/proc/self/xmap", O_RDONLY);
297   struct stat st;
298   prxmap_t* prmapp = nullptr;
299   if (mapfd >= 0) {
300     if (!fstat(mapfd, &st)) {
301       int nmap = st.st_size / sizeof(prxmap_t);
302       while (1) {
303         // stat(2) on /proc/<pid>/xmap returns an incorrect value,
304         // prior to the release of Solaris 11.
305         // Here is a workaround for it.
306         nmap *= 2;
307         prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
308         if (!prmapp) {
309           // out of memory
310           break;
311         }
312         int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
313         if (n < 0) {
314           break;
315         }
316         if (nmap >= n / sizeof(prxmap_t)) {
317           aVsize = 0;
318           aResident = 0;
319           aShared = 0;
320           for (int i = 0; i < n / sizeof(prxmap_t); i++) {
321             aVsize += prmapp[i].pr_size;
322             aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
323             if (prmapp[i].pr_mflags & MA_SHARED) {
324               aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
325             }
326           }
327           break;
328         }
329         free(prmapp);
330       }
331       free(prmapp);
332     }
333     close(mapfd);
334   }
335 }
336 
337 #  define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
VsizeDistinguishedAmount(int64_t * aN)338 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
339   int64_t vsize, resident, shared;
340   XMappingIter(vsize, resident, shared);
341   if (vsize == -1) {
342     return NS_ERROR_FAILURE;
343   }
344   *aN = vsize;
345   return NS_OK;
346 }
347 
ResidentDistinguishedAmount(int64_t * aN)348 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
349   int64_t vsize, resident, shared;
350   XMappingIter(vsize, resident, shared);
351   if (resident == -1) {
352     return NS_ERROR_FAILURE;
353   }
354   *aN = resident;
355   return NS_OK;
356 }
357 
ResidentFastDistinguishedAmount(int64_t * aN)358 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
359   return ResidentDistinguishedAmount(aN);
360 }
361 
362 #  define HAVE_RESIDENT_UNIQUE_REPORTER 1
ResidentUniqueDistinguishedAmount(int64_t * aN)363 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
364   int64_t vsize, resident, shared;
365   XMappingIter(vsize, resident, shared);
366   if (resident == -1) {
367     return NS_ERROR_FAILURE;
368   }
369   *aN = resident - shared;
370   return NS_OK;
371 }
372 
373 #elif defined(XP_MACOSX)
374 
375 #  include <mach/mach_init.h>
376 #  include <mach/mach_vm.h>
377 #  include <mach/shared_region.h>
378 #  include <mach/task.h>
379 #  include <sys/sysctl.h>
380 
GetTaskBasicInfo(struct task_basic_info * aTi)381 [[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
382   mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
383   kern_return_t kr =
384       task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
385   return kr == KERN_SUCCESS;
386 }
387 
388 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
389 // absurdly high, eg. 2GB+ even at start-up.  But both 'top' and 'ps' report
390 // it, so we might as well too.
391 #  define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
VsizeDistinguishedAmount(int64_t * aN)392 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
393   task_basic_info ti;
394   if (!GetTaskBasicInfo(&ti)) {
395     return NS_ERROR_FAILURE;
396   }
397   *aN = ti.virtual_size;
398   return NS_OK;
399 }
400 
401 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
402 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
403 // an accurate result.  The OS will take away MADV_FREE'd pages when there's
404 // memory pressure, so ideally, they shouldn't count against our RSS.
405 //
406 // Purging these pages can take a long time for some users (see bug 789975),
407 // so we provide the option to get the RSS without purging first.
ResidentDistinguishedAmountHelper(int64_t * aN,bool aDoPurge)408 [[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
409                                                                 bool aDoPurge) {
410 #  ifdef HAVE_JEMALLOC_STATS
411   if (aDoPurge) {
412     Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
413     jemalloc_purge_freed_pages();
414   }
415 #  endif
416 
417   task_basic_info ti;
418   if (!GetTaskBasicInfo(&ti)) {
419     return NS_ERROR_FAILURE;
420   }
421   *aN = ti.resident_size;
422   return NS_OK;
423 }
424 
ResidentFastDistinguishedAmount(int64_t * aN)425 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
426   return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
427 }
428 
ResidentDistinguishedAmount(int64_t * aN)429 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
430   return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
431 }
432 
433 #  define HAVE_RESIDENT_UNIQUE_REPORTER 1
434 
InSharedRegion(mach_vm_address_t aAddr,cpu_type_t aType)435 static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
436   mach_vm_address_t base;
437   mach_vm_address_t size;
438 
439   switch (aType) {
440     case CPU_TYPE_ARM:
441       base = SHARED_REGION_BASE_ARM;
442       size = SHARED_REGION_SIZE_ARM;
443       break;
444     case CPU_TYPE_I386:
445       base = SHARED_REGION_BASE_I386;
446       size = SHARED_REGION_SIZE_I386;
447       break;
448     case CPU_TYPE_X86_64:
449       base = SHARED_REGION_BASE_X86_64;
450       size = SHARED_REGION_SIZE_X86_64;
451       break;
452     default:
453       return false;
454   }
455 
456   return base <= aAddr && aAddr < (base + size);
457 }
458 
ResidentUniqueDistinguishedAmount(int64_t * aN,mach_port_t aPort=0)459 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
460     int64_t* aN, mach_port_t aPort = 0) {
461   if (!aN) {
462     return NS_ERROR_FAILURE;
463   }
464 
465   cpu_type_t cpu_type;
466   size_t len = sizeof(cpu_type);
467   if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
468     return NS_ERROR_FAILURE;
469   }
470 
471   // Roughly based on libtop_update_vm_regions in
472   // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
473   size_t privatePages = 0;
474   mach_vm_size_t size = 0;
475   for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += size) {
476     vm_region_top_info_data_t info;
477     mach_msg_type_number_t infoCount = VM_REGION_TOP_INFO_COUNT;
478     mach_port_t objectName;
479 
480     kern_return_t kr = mach_vm_region(
481         aPort ? aPort : mach_task_self(), &addr, &size, VM_REGION_TOP_INFO,
482         reinterpret_cast<vm_region_info_t>(&info), &infoCount, &objectName);
483     if (kr == KERN_INVALID_ADDRESS) {
484       // Done iterating VM regions.
485       break;
486     } else if (kr != KERN_SUCCESS) {
487       return NS_ERROR_FAILURE;
488     }
489 
490     if (InSharedRegion(addr, cpu_type) && info.share_mode != SM_PRIVATE) {
491       continue;
492     }
493 
494     switch (info.share_mode) {
495       case SM_LARGE_PAGE:
496         // NB: Large pages are not shareable and always resident.
497       case SM_PRIVATE:
498         privatePages += info.private_pages_resident;
499         privatePages += info.shared_pages_resident;
500         break;
501       case SM_COW:
502         privatePages += info.private_pages_resident;
503         if (info.ref_count == 1) {
504           // Treat copy-on-write pages as private if they only have one
505           // reference.
506           privatePages += info.shared_pages_resident;
507         }
508         break;
509       case SM_SHARED:
510       default:
511         break;
512     }
513   }
514 
515   vm_size_t pageSize;
516   if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
517       KERN_SUCCESS) {
518     pageSize = PAGE_SIZE;
519   }
520 
521   *aN = privatePages * pageSize;
522   return NS_OK;
523 }
524 
525 #elif defined(XP_WIN)
526 
527 #  include <windows.h>
528 #  include <psapi.h>
529 #  include <algorithm>
530 
531 #  define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
VsizeDistinguishedAmount(int64_t * aN)532 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
533   MEMORYSTATUSEX s;
534   s.dwLength = sizeof(s);
535 
536   if (!GlobalMemoryStatusEx(&s)) {
537     return NS_ERROR_FAILURE;
538   }
539 
540   *aN = s.ullTotalVirtual - s.ullAvailVirtual;
541   return NS_OK;
542 }
543 
ResidentDistinguishedAmount(int64_t * aN)544 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
545   PROCESS_MEMORY_COUNTERS pmc;
546   pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
547 
548   if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
549     return NS_ERROR_FAILURE;
550   }
551 
552   *aN = pmc.WorkingSetSize;
553   return NS_OK;
554 }
555 
ResidentFastDistinguishedAmount(int64_t * aN)556 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
557   return ResidentDistinguishedAmount(aN);
558 }
559 
560 #  define HAVE_RESIDENT_UNIQUE_REPORTER 1
561 
ResidentUniqueDistinguishedAmount(int64_t * aN,HANDLE aProcess=nullptr)562 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
563     int64_t* aN, HANDLE aProcess = nullptr) {
564   // Determine how many entries we need.
565   PSAPI_WORKING_SET_INFORMATION tmp;
566   DWORD tmpSize = sizeof(tmp);
567   memset(&tmp, 0, tmpSize);
568 
569   HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
570   QueryWorkingSet(proc, &tmp, tmpSize);
571 
572   // Fudge the size in case new entries are added between calls.
573   size_t entries = tmp.NumberOfEntries * 2;
574 
575   if (!entries) {
576     return NS_ERROR_FAILURE;
577   }
578 
579   DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
580   UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
581       static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
582 
583   if (!infoArray) {
584     return NS_ERROR_FAILURE;
585   }
586 
587   if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
588     return NS_ERROR_FAILURE;
589   }
590 
591   entries = static_cast<size_t>(infoArray->NumberOfEntries);
592   size_t privatePages = 0;
593   for (size_t i = 0; i < entries; i++) {
594     // Count shared pages that only one process is using as private.
595     if (!infoArray->WorkingSetInfo[i].Shared ||
596         infoArray->WorkingSetInfo[i].ShareCount <= 1) {
597       privatePages++;
598     }
599   }
600 
601   SYSTEM_INFO si;
602   GetSystemInfo(&si);
603 
604   *aN = privatePages * si.dwPageSize;
605   return NS_OK;
606 }
607 
608 #  define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
VsizeMaxContiguousDistinguishedAmount(int64_t * aN)609 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
610     int64_t* aN) {
611   SIZE_T biggestRegion = 0;
612   MEMORY_BASIC_INFORMATION vmemInfo = {0};
613   for (size_t currentAddress = 0;;) {
614     if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
615       // Something went wrong, just return whatever we've got already.
616       break;
617     }
618 
619     if (vmemInfo.State == MEM_FREE) {
620       biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
621     }
622 
623     SIZE_T lastAddress = currentAddress;
624     currentAddress += vmemInfo.RegionSize;
625 
626     // If we overflow, we've examined all of the address space.
627     if (currentAddress < lastAddress) {
628       break;
629     }
630   }
631 
632   *aN = biggestRegion;
633   return NS_OK;
634 }
635 
636 #  define HAVE_PRIVATE_REPORTER 1
PrivateDistinguishedAmount(int64_t * aN)637 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
638   PROCESS_MEMORY_COUNTERS_EX pmcex;
639   pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
640 
641   if (!GetProcessMemoryInfo(GetCurrentProcess(),
642                             (PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
643     return NS_ERROR_FAILURE;
644   }
645 
646   *aN = pmcex.PrivateUsage;
647   return NS_OK;
648 }
649 
650 #  define HAVE_SYSTEM_HEAP_REPORTER 1
651 // Windows can have multiple separate heaps, but we should not touch non-default
652 // heaps because they may be destroyed at anytime while we hold a handle.  So we
653 // count only the default heap.
SystemHeapSize(int64_t * aSizeOut)654 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
655   HANDLE heap = GetProcessHeap();
656 
657   NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
658 
659   int64_t heapSize = 0;
660   PROCESS_HEAP_ENTRY entry;
661   entry.lpData = nullptr;
662   while (HeapWalk(heap, &entry)) {
663     // We don't count entry.cbOverhead, because we just want to measure the
664     // space available to the program.
665     if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
666       heapSize += entry.cbData;
667     }
668   }
669 
670   // Check this result only after unlocking the heap, so that we don't leave
671   // the heap locked if there was an error.
672   DWORD lastError = GetLastError();
673 
674   // I have no idea how things would proceed if unlocking this heap failed...
675   NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
676 
677   NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
678 
679   *aSizeOut = heapSize;
680   return NS_OK;
681 }
682 
683 struct SegmentKind {
684   DWORD mState;
685   DWORD mType;
686   DWORD mProtect;
687   int mIsStack;
688 };
689 
690 struct SegmentEntry : public PLDHashEntryHdr {
HashKeySegmentEntry691   static PLDHashNumber HashKey(const void* aKey) {
692     auto kind = static_cast<const SegmentKind*>(aKey);
693     return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
694                                 kind->mIsStack);
695   }
696 
MatchEntrySegmentEntry697   static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
698     auto kind = static_cast<const SegmentKind*>(aKey);
699     auto entry = static_cast<const SegmentEntry*>(aEntry);
700     return kind->mState == entry->mKind.mState &&
701            kind->mType == entry->mKind.mType &&
702            kind->mProtect == entry->mKind.mProtect &&
703            kind->mIsStack == entry->mKind.mIsStack;
704   }
705 
InitEntrySegmentEntry706   static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
707     auto kind = static_cast<const SegmentKind*>(aKey);
708     auto entry = static_cast<SegmentEntry*>(aEntry);
709     entry->mKind = *kind;
710     entry->mCount = 0;
711     entry->mSize = 0;
712   }
713 
714   static const PLDHashTableOps Ops;
715 
716   SegmentKind mKind;  // The segment kind.
717   uint32_t mCount;    // The number of segments of this kind.
718   size_t mSize;       // The combined size of segments of this kind.
719 };
720 
721 /* static */ const PLDHashTableOps SegmentEntry::Ops = {
722     SegmentEntry::HashKey, SegmentEntry::MatchEntry,
723     PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
724     SegmentEntry::InitEntry};
725 
726 class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
~WindowsAddressSpaceReporter()727   ~WindowsAddressSpaceReporter() {}
728 
729  public:
730   NS_DECL_ISUPPORTS
731 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)732   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
733                             nsISupports* aData, bool aAnonymize) override {
734     // First iterate over all the segments and record how many of each kind
735     // there were and their aggregate sizes. We use a hash table for this
736     // because there are a couple of dozen different kinds possible.
737 
738     PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
739     MEMORY_BASIC_INFORMATION info = {0};
740     bool isPrevSegStackGuard = false;
741     for (size_t currentAddress = 0;;) {
742       if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
743         // Something went wrong, just return whatever we've got already.
744         break;
745       }
746 
747       size_t size = info.RegionSize;
748 
749       // Note that |type| and |protect| are ignored in some cases.
750       DWORD state = info.State;
751       DWORD type =
752           (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
753       DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
754       bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
755                      type == MEM_PRIVATE && protect == PAGE_READWRITE;
756 
757       SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
758       auto entry =
759           static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
760       if (entry) {
761         entry->mCount += 1;
762         entry->mSize += size;
763       }
764 
765       isPrevSegStackGuard = info.State == MEM_COMMIT &&
766                             info.Type == MEM_PRIVATE &&
767                             info.Protect == (PAGE_READWRITE | PAGE_GUARD);
768 
769       size_t lastAddress = currentAddress;
770       currentAddress += size;
771 
772       // If we overflow, we've examined all of the address space.
773       if (currentAddress < lastAddress) {
774         break;
775       }
776     }
777 
778     // Then iterate over the hash table and report the details for each segment
779     // kind.
780 
781     for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
782       // For each range of pages, we consider one or more of its State, Type
783       // and Protect values. These are documented at
784       // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
785       // (for State and Type) and
786       // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
787       // (for Protect).
788       //
789       // Not all State values have accompanying Type and Protection values.
790       bool doType = false;
791       bool doProtect = false;
792 
793       auto entry = static_cast<const SegmentEntry*>(iter.Get());
794 
795       nsCString path("address-space");
796 
797       switch (entry->mKind.mState) {
798         case MEM_FREE:
799           path.AppendLiteral("/free");
800           break;
801 
802         case MEM_RESERVE:
803           path.AppendLiteral("/reserved");
804           doType = true;
805           break;
806 
807         case MEM_COMMIT:
808           path.AppendLiteral("/commit");
809           doType = true;
810           doProtect = true;
811           break;
812 
813         default:
814           // Should be impossible, but handle it just in case.
815           path.AppendLiteral("/???");
816           break;
817       }
818 
819       if (doType) {
820         switch (entry->mKind.mType) {
821           case MEM_IMAGE:
822             path.AppendLiteral("/image");
823             break;
824 
825           case MEM_MAPPED:
826             path.AppendLiteral("/mapped");
827             break;
828 
829           case MEM_PRIVATE:
830             path.AppendLiteral("/private");
831             break;
832 
833           default:
834             // Should be impossible, but handle it just in case.
835             path.AppendLiteral("/???");
836             break;
837         }
838       }
839 
840       if (doProtect) {
841         DWORD protect = entry->mKind.mProtect;
842         // Basic attributes. Exactly one of these should be set.
843         if (protect & PAGE_EXECUTE) {
844           path.AppendLiteral("/execute");
845         }
846         if (protect & PAGE_EXECUTE_READ) {
847           path.AppendLiteral("/execute-read");
848         }
849         if (protect & PAGE_EXECUTE_READWRITE) {
850           path.AppendLiteral("/execute-readwrite");
851         }
852         if (protect & PAGE_EXECUTE_WRITECOPY) {
853           path.AppendLiteral("/execute-writecopy");
854         }
855         if (protect & PAGE_NOACCESS) {
856           path.AppendLiteral("/noaccess");
857         }
858         if (protect & PAGE_READONLY) {
859           path.AppendLiteral("/readonly");
860         }
861         if (protect & PAGE_READWRITE) {
862           path.AppendLiteral("/readwrite");
863         }
864         if (protect & PAGE_WRITECOPY) {
865           path.AppendLiteral("/writecopy");
866         }
867 
868         // Modifiers. At most one of these should be set.
869         if (protect & PAGE_GUARD) {
870           path.AppendLiteral("+guard");
871         }
872         if (protect & PAGE_NOCACHE) {
873           path.AppendLiteral("+nocache");
874         }
875         if (protect & PAGE_WRITECOMBINE) {
876           path.AppendLiteral("+writecombine");
877         }
878 
879         // Annotate likely stack segments, too.
880         if (entry->mKind.mIsStack) {
881           path.AppendLiteral("+stack");
882         }
883       }
884 
885       // Append the segment count.
886       path.AppendPrintf("(segments=%u)", entry->mCount);
887 
888       aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
889                               entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
890                               aData);
891     }
892 
893     return NS_OK;
894   }
895 };
896 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
897 
898 #endif  // XP_<PLATFORM>
899 
900 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
901 class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
~VsizeMaxContiguousReporter()902   ~VsizeMaxContiguousReporter() {}
903 
904  public:
905   NS_DECL_ISUPPORTS
906 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)907   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
908                             nsISupports* aData, bool aAnonymize) override {
909     int64_t amount;
910     if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
911       MOZ_COLLECT_REPORT(
912           "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
913           "Size of the maximum contiguous block of available virtual memory.");
914     }
915     return NS_OK;
916   }
917 };
918 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
919 #endif
920 
921 #ifdef HAVE_PRIVATE_REPORTER
922 class PrivateReporter final : public nsIMemoryReporter {
~PrivateReporter()923   ~PrivateReporter() {}
924 
925  public:
926   NS_DECL_ISUPPORTS
927 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)928   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
929                             nsISupports* aData, bool aAnonymize) override {
930     int64_t amount;
931     if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
932       // clang-format off
933       MOZ_COLLECT_REPORT(
934         "private", KIND_OTHER, UNITS_BYTES, amount,
935 "Memory that cannot be shared with other processes, including memory that is "
936 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
937 "pages that have been written to.");
938       // clang-format on
939     }
940     return NS_OK;
941   }
942 };
943 NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
944 #endif
945 
946 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
947 class VsizeReporter final : public nsIMemoryReporter {
948   ~VsizeReporter() = default;
949 
950  public:
951   NS_DECL_ISUPPORTS
952 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)953   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
954                             nsISupports* aData, bool aAnonymize) override {
955     int64_t amount;
956     if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
957       // clang-format off
958       MOZ_COLLECT_REPORT(
959         "vsize", KIND_OTHER, UNITS_BYTES, amount,
960 "Memory mapped by the process, including code and data segments, the heap, "
961 "thread stacks, memory explicitly mapped by the process via mmap and similar "
962 "operations, and memory shared with other processes. This is the vsize figure "
963 "as reported by 'top' and 'ps'.  This figure is of limited use on Mac, where "
964 "processes share huge amounts of memory with one another.  But even on other "
965 "operating systems, 'resident' is a much better measure of the memory "
966 "resources used by the process.");
967       // clang-format on
968     }
969     return NS_OK;
970   }
971 };
972 NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
973 
974 class ResidentReporter final : public nsIMemoryReporter {
975   ~ResidentReporter() = default;
976 
977  public:
978   NS_DECL_ISUPPORTS
979 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)980   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
981                             nsISupports* aData, bool aAnonymize) override {
982     int64_t amount;
983     if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
984       // clang-format off
985       MOZ_COLLECT_REPORT(
986         "resident", KIND_OTHER, UNITS_BYTES, amount,
987 "Memory mapped by the process that is present in physical memory, also known "
988 "as the resident set size (RSS).  This is the best single figure to use when "
989 "considering the memory resources used by the process, but it depends both on "
990 "other processes being run and details of the OS kernel and so is best used "
991 "for comparing the memory usage of a single process at different points in "
992 "time.");
993       // clang-format on
994     }
995     return NS_OK;
996   }
997 };
998 NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
999 
1000 #endif  // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1001 
1002 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1003 class ResidentUniqueReporter final : public nsIMemoryReporter {
1004   ~ResidentUniqueReporter() = default;
1005 
1006  public:
1007   NS_DECL_ISUPPORTS
1008 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1009   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1010                             nsISupports* aData, bool aAnonymize) override {
1011     int64_t amount = 0;
1012     if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1013       // clang-format off
1014       MOZ_COLLECT_REPORT(
1015         "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1016 "Memory mapped by the process that is present in physical memory and not "
1017 "shared with any other processes.  This is also known as the process's unique "
1018 "set size (USS).  This is the amount of RAM we'd expect to be freed if we "
1019 "closed this process.");
1020       // clang-format on
1021     }
1022     return NS_OK;
1023   }
1024 };
1025 NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1026 
1027 #endif  // HAVE_RESIDENT_UNIQUE_REPORTER
1028 
1029 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1030 
1031 class SystemHeapReporter final : public nsIMemoryReporter {
1032   ~SystemHeapReporter() = default;
1033 
1034  public:
1035   NS_DECL_ISUPPORTS
1036 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1037   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1038                             nsISupports* aData, bool aAnonymize) override {
1039     int64_t amount;
1040     if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1041       // clang-format off
1042       MOZ_COLLECT_REPORT(
1043         "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1044 "Memory used by the system allocator that is currently allocated to the "
1045 "application. This is distinct from the jemalloc heap that Firefox uses for "
1046 "most or all of its heap allocations. Ideally this number is zero, but "
1047 "on some platforms we cannot force every heap allocation through jemalloc.");
1048       // clang-format on
1049     }
1050     return NS_OK;
1051   }
1052 };
NS_IMPL_ISUPPORTS(SystemHeapReporter,nsIMemoryReporter)1053 NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1054 #endif  // HAVE_SYSTEM_HEAP_REPORTER
1055 
1056 #ifdef XP_UNIX
1057 
1058 #  include <sys/resource.h>
1059 
1060 #  define HAVE_RESIDENT_PEAK_REPORTER 1
1061 
1062 [[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
1063   struct rusage usage;
1064   if (0 == getrusage(RUSAGE_SELF, &usage)) {
1065     // The units for ru_maxrrs:
1066     // - Mac: bytes
1067     // - Solaris: pages? But some sources it actually always returns 0, so
1068     //   check for that
1069     // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1070 #  ifdef XP_MACOSX
1071     *aN = usage.ru_maxrss;
1072 #  elif defined(SOLARIS)
1073     *aN = usage.ru_maxrss * getpagesize();
1074 #  else
1075     *aN = usage.ru_maxrss * 1024;
1076 #  endif
1077     if (*aN > 0) {
1078       return NS_OK;
1079     }
1080   }
1081   return NS_ERROR_FAILURE;
1082 }
1083 
1084 class ResidentPeakReporter final : public nsIMemoryReporter {
1085   ~ResidentPeakReporter() = default;
1086 
1087  public:
1088   NS_DECL_ISUPPORTS
1089 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1090   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1091                             nsISupports* aData, bool aAnonymize) override {
1092     int64_t amount = 0;
1093     if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1094       MOZ_COLLECT_REPORT(
1095           "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1096           "The peak 'resident' value for the lifetime of the process.");
1097     }
1098     return NS_OK;
1099   }
1100 };
1101 NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1102 
1103 #  define HAVE_PAGE_FAULT_REPORTERS 1
1104 
1105 class PageFaultsSoftReporter final : public nsIMemoryReporter {
1106   ~PageFaultsSoftReporter() = default;
1107 
1108  public:
1109   NS_DECL_ISUPPORTS
1110 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1111   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1112                             nsISupports* aData, bool aAnonymize) override {
1113     struct rusage usage;
1114     int err = getrusage(RUSAGE_SELF, &usage);
1115     if (err == 0) {
1116       int64_t amount = usage.ru_minflt;
1117       // clang-format off
1118       MOZ_COLLECT_REPORT(
1119         "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1120 "The number of soft page faults (also known as 'minor page faults') that "
1121 "have occurred since the process started.  A soft page fault occurs when the "
1122 "process tries to access a page which is present in physical memory but is "
1123 "not mapped into the process's address space.  For instance, a process might "
1124 "observe soft page faults when it loads a shared library which is already "
1125 "present in physical memory. A process may experience many thousands of soft "
1126 "page faults even when the machine has plenty of available physical memory, "
1127 "and because the OS services a soft page fault without accessing the disk, "
1128 "they impact performance much less than hard page faults.");
1129       // clang-format on
1130     }
1131     return NS_OK;
1132   }
1133 };
NS_IMPL_ISUPPORTS(PageFaultsSoftReporter,nsIMemoryReporter)1134 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1135 
1136 [[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
1137     int64_t* aAmount) {
1138   struct rusage usage;
1139   int err = getrusage(RUSAGE_SELF, &usage);
1140   if (err != 0) {
1141     return NS_ERROR_FAILURE;
1142   }
1143   *aAmount = usage.ru_majflt;
1144   return NS_OK;
1145 }
1146 
1147 class PageFaultsHardReporter final : public nsIMemoryReporter {
1148   ~PageFaultsHardReporter() = default;
1149 
1150  public:
1151   NS_DECL_ISUPPORTS
1152 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1153   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1154                             nsISupports* aData, bool aAnonymize) override {
1155     int64_t amount = 0;
1156     if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1157       // clang-format off
1158       MOZ_COLLECT_REPORT(
1159         "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1160 "The number of hard page faults (also known as 'major page faults') that have "
1161 "occurred since the process started.  A hard page fault occurs when a process "
1162 "tries to access a page which is not present in physical memory. The "
1163 "operating system must access the disk in order to fulfill a hard page fault. "
1164 "When memory is plentiful, you should see very few hard page faults. But if "
1165 "the process tries to use more memory than your machine has available, you "
1166 "may see many thousands of hard page faults. Because accessing the disk is up "
1167 "to a million times slower than accessing RAM, the program may run very "
1168 "slowly when it is experiencing more than 100 or so hard page faults a "
1169 "second.");
1170       // clang-format on
1171     }
1172     return NS_OK;
1173   }
1174 };
NS_IMPL_ISUPPORTS(PageFaultsHardReporter,nsIMemoryReporter)1175 NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1176 
1177 #endif  // XP_UNIX
1178 
1179 /**
1180  ** memory reporter implementation for jemalloc and OSX malloc,
1181  ** to obtain info on total memory in use (that we know about,
1182  ** at least -- on OSX, there are sometimes other zones in use).
1183  **/
1184 
1185 #ifdef HAVE_JEMALLOC_STATS
1186 
1187 static size_t HeapOverhead(jemalloc_stats_t* aStats) {
1188   return aStats->waste + aStats->bookkeeping + aStats->page_cache +
1189          aStats->bin_unused;
1190 }
1191 
1192 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1193 // 100x for the percentage.
HeapOverheadFraction(jemalloc_stats_t * aStats)1194 static int64_t HeapOverheadFraction(jemalloc_stats_t* aStats) {
1195   size_t heapOverhead = HeapOverhead(aStats);
1196   size_t heapCommitted = aStats->allocated + heapOverhead;
1197   return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1198 }
1199 
1200 class JemallocHeapReporter final : public nsIMemoryReporter {
1201   ~JemallocHeapReporter() = default;
1202 
1203  public:
1204   NS_DECL_ISUPPORTS
1205 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1206   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1207                             nsISupports* aData, bool aAnonymize) override {
1208     jemalloc_stats_t stats;
1209     jemalloc_bin_stats_t bin_stats[JEMALLOC_MAX_STATS_BINS];
1210     jemalloc_stats(&stats, bin_stats);
1211 
1212     // clang-format off
1213     MOZ_COLLECT_REPORT(
1214       "heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1215 "Memory mapped by the heap allocator that is currently allocated to the "
1216 "application.  This may exceed the amount of memory requested by the "
1217 "application because the allocator regularly rounds up request sizes. (The "
1218 "exact amount requested is not recorded.)");
1219 
1220     MOZ_COLLECT_REPORT(
1221       "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1222 "The same as 'heap-committed/allocated'.");
1223 
1224     // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1225     // because KIND_HEAP memory means "counted in heap-allocated", which
1226     // this is not.
1227     for (auto& bin : bin_stats) {
1228       if (!bin.size) {
1229         continue;
1230       }
1231       nsPrintfCString path("explicit/heap-overhead/bin-unused/bin-%zu",
1232           bin.size);
1233       aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
1234         bin.bytes_unused,
1235         nsLiteralCString(
1236           "Unused bytes in all runs of all bins for this size class"),
1237         aData);
1238     }
1239 
1240     if (stats.waste > 0) {
1241       MOZ_COLLECT_REPORT(
1242         "explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
1243         stats.waste,
1244 "Committed bytes which do not correspond to an active allocation and which the "
1245 "allocator is not intentionally keeping alive (i.e., not "
1246 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1247     }
1248 
1249     MOZ_COLLECT_REPORT(
1250       "explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1251       stats.bookkeeping,
1252 "Committed bytes which the heap allocator uses for internal data structures.");
1253 
1254     MOZ_COLLECT_REPORT(
1255       "explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
1256       stats.page_cache,
1257 "Memory which the allocator could return to the operating system, but hasn't. "
1258 "The allocator keeps this memory around as an optimization, so it doesn't "
1259 "have to ask the OS the next time it needs to fulfill a request. This value "
1260 "is typically not larger than a few megabytes.");
1261 
1262     MOZ_COLLECT_REPORT(
1263       "heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
1264       HeapOverhead(&stats),
1265 "The sum of 'explicit/heap-overhead/*'.");
1266 
1267     MOZ_COLLECT_REPORT(
1268       "heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
1269 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1270 "neither in physical memory nor paged to disk.");
1271 
1272     MOZ_COLLECT_REPORT(
1273       "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1274       "Size of chunks.");
1275     // clang-format on
1276 
1277     return NS_OK;
1278   }
1279 };
1280 NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1281 
1282 #endif  // HAVE_JEMALLOC_STATS
1283 
1284 // Why is this here?  At first glance, you'd think it could be defined and
1285 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1286 // However, the obvious time to register it is when the table is initialized,
1287 // and that happens before XPCOM components are initialized, which means the
1288 // RegisterStrongMemoryReporter call fails.  So instead we do it here.
1289 class AtomTablesReporter final : public nsIMemoryReporter {
1290   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1291 
1292   ~AtomTablesReporter() = default;
1293 
1294  public:
1295   NS_DECL_ISUPPORTS
1296 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1297   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1298                             nsISupports* aData, bool aAnonymize) override {
1299     AtomsSizes sizes;
1300     NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1301 
1302     MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
1303                        sizes.mTable, "Memory used by the atom table.");
1304 
1305     MOZ_COLLECT_REPORT(
1306         "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1307         sizes.mDynamicAtoms,
1308         "Memory used by dynamic atom objects and chars (which are stored "
1309         "at the end of each atom object).");
1310 
1311     return NS_OK;
1312   }
1313 };
1314 NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1315 
1316 class ThreadsReporter final : public nsIMemoryReporter {
1317   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1318   ~ThreadsReporter() = default;
1319 
1320  public:
1321   NS_DECL_ISUPPORTS
1322 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1323   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1324                             nsISupports* aData, bool aAnonymize) override {
1325 #ifdef XP_LINUX
1326     nsTArray<MemoryMapping> mappings(1024);
1327     MOZ_TRY(GetMemoryMappings(mappings));
1328 #endif
1329 
1330     // Enumerating over active threads requires holding a lock, so we collect
1331     // info on all threads, and then call our reporter callbacks after releasing
1332     // the lock.
1333     struct ThreadData {
1334       nsCString mName;
1335       uint32_t mThreadId;
1336       size_t mPrivateSize;
1337     };
1338     AutoTArray<ThreadData, 32> threads;
1339 
1340     size_t eventQueueSizes = 0;
1341     size_t wrapperSizes = 0;
1342     size_t threadCount = 0;
1343 
1344     for (auto* thread : nsThread::Enumerate()) {
1345       threadCount++;
1346       eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1347       wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1348 
1349       if (!thread->StackBase()) {
1350         continue;
1351       }
1352 
1353 #if defined(XP_LINUX)
1354       int idx = mappings.BinaryIndexOf(thread->StackBase());
1355       if (idx < 0) {
1356         continue;
1357       }
1358       // Referenced() is the combined size of all pages in the region which have
1359       // ever been touched, and are therefore consuming memory. For stack
1360       // regions, these pages are guaranteed to be un-shared unless we fork
1361       // after creating threads (which we don't).
1362       size_t privateSize = mappings[idx].Referenced();
1363 
1364       // On Linux, we have to be very careful matching memory regions to thread
1365       // stacks.
1366       //
1367       // To begin with, the kernel only reports VM stats for regions of all
1368       // adjacent pages with the same flags, protection, and backing file.
1369       // There's no way to get finer-grained usage information for a subset of
1370       // those pages.
1371       //
1372       // Stack segments always have a guard page at the bottom of the stack
1373       // (assuming we only support stacks that grow down), so there's no danger
1374       // of them being merged with other stack regions. At the top, there's no
1375       // protection page, and no way to allocate one without using pthreads
1376       // directly and allocating our own stacks. So we get around the problem by
1377       // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1378       // don't expect to be set on any heap regions. But this is not fool-proof.
1379       //
1380       // A second kink is that different C libraries (and different versions
1381       // thereof) report stack base locations and sizes differently with regard
1382       // to the guard page. For the libraries that include the guard page in the
1383       // stack size base pointer, we need to adjust those values to compensate.
1384       // But it's possible that our logic will get out of sync with library
1385       // changes, or someone will compile with an unexpected library.
1386       //
1387       //
1388       // The upshot of all of this is that there may be configurations that our
1389       // special cases don't cover. And if there are, we want to know about it.
1390       // So assert that total size of the memory region we're reporting actually
1391       // matches the allocated size of the thread stack.
1392 #  ifndef ANDROID
1393       MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1394                  "Mapping region size doesn't match stack allocation size");
1395 #  endif
1396 #elif defined(XP_WIN)
1397       auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1398       size_t privateSize = memInfo.Committed();
1399 #else
1400       size_t privateSize = thread->StackSize();
1401       MOZ_ASSERT_UNREACHABLE(
1402           "Shouldn't have stack base pointer on this "
1403           "platform");
1404 #endif
1405 
1406       threads.AppendElement(ThreadData{
1407           nsCString(PR_GetThreadName(thread->GetPRThread())),
1408           thread->ThreadId(),
1409           // On Linux, it's possible (but unlikely) that our stack region will
1410           // have been merged with adjacent heap regions, in which case we'll
1411           // get combined size information for both. So we take the minimum of
1412           // the reported private size and the requested stack size to avoid the
1413           // possible of majorly over-reporting in that case.
1414           std::min(privateSize, thread->StackSize()),
1415       });
1416     }
1417 
1418     for (auto& thread : threads) {
1419       nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1420                            thread.mName.get(), thread.mThreadId);
1421 
1422       aHandleReport->Callback(
1423           ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
1424           nsLiteralCString("The sizes of thread stacks which have been "
1425                            "committed to memory."),
1426           aData);
1427     }
1428 
1429     MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
1430                        UNITS_BYTES, eventQueueSizes,
1431                        "The sizes of nsThread event queues and observers.");
1432 
1433     MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
1434                        UNITS_BYTES, wrapperSizes,
1435                        "The sizes of nsThread/PRThread wrappers.");
1436 
1437 #if defined(XP_WIN)
1438     // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1439     // that's 12K. For 64 bit, it's 24K.
1440     //
1441     // See
1442     // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1443     constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1444 #elif defined(XP_LINUX)
1445     // On Linux, kernel stacks are usually 8K. However, on x86, they are
1446     // allocated virtually, and start out at 4K. They may grow to 8K, but we
1447     // have no way of knowing which ones do, so all we can do is guess.
1448 #  if defined(__x86_64__) || defined(__i386__)
1449     constexpr size_t kKernelSize = 4 * 1024;
1450 #  else
1451     constexpr size_t kKernelSize = 8 * 1024;
1452 #  endif
1453 #elif defined(XP_MACOSX)
1454     // On Darwin, kernel stacks are 16K:
1455     //
1456     // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1457     constexpr size_t kKernelSize = 16 * 1024;
1458 #else
1459     // Elsewhere, just assume that kernel stacks require at least 8K.
1460     constexpr size_t kKernelSize = 8 * 1024;
1461 #endif
1462 
1463     MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
1464                        UNITS_BYTES, threadCount * kKernelSize,
1465                        "The total kernel overhead for all active threads.");
1466 
1467     return NS_OK;
1468   }
1469 };
1470 NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1471 
1472 #ifdef DEBUG
1473 
1474 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1475 // However, this ends up breaking the linking step of various unit tests due
1476 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1477 // in  DMD  builds. So instead we do it here.
1478 class DeadlockDetectorReporter final : public nsIMemoryReporter {
1479   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1480 
1481   ~DeadlockDetectorReporter() = default;
1482 
1483  public:
1484   NS_DECL_ISUPPORTS
1485 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1486   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1487                             nsISupports* aData, bool aAnonymize) override {
1488     MOZ_COLLECT_REPORT(
1489         "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1490         BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1491         "Memory used by the deadlock detector.");
1492 
1493     return NS_OK;
1494   }
1495 };
1496 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1497 
1498 #endif
1499 
1500 #ifdef MOZ_DMD
1501 
1502 namespace mozilla {
1503 namespace dmd {
1504 
1505 class DMDReporter final : public nsIMemoryReporter {
1506  public:
1507   NS_DECL_ISUPPORTS
1508 
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1509   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1510                             nsISupports* aData, bool aAnonymize) override {
1511     dmd::Sizes sizes;
1512     dmd::SizeOf(&sizes);
1513 
1514     MOZ_COLLECT_REPORT(
1515         "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1516         sizes.mStackTracesUsed,
1517         "Memory used by stack traces which correspond to at least "
1518         "one heap block DMD is tracking.");
1519 
1520     MOZ_COLLECT_REPORT(
1521         "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1522         sizes.mStackTracesUnused,
1523         "Memory used by stack traces which don't correspond to any heap "
1524         "blocks DMD is currently tracking.");
1525 
1526     MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
1527                        UNITS_BYTES, sizes.mStackTraceTable,
1528                        "Memory used by DMD's stack trace table.");
1529 
1530     MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1531                        sizes.mLiveBlockTable,
1532                        "Memory used by DMD's live block table.");
1533 
1534     MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1535                        sizes.mDeadBlockTable,
1536                        "Memory used by DMD's dead block list.");
1537 
1538     return NS_OK;
1539   }
1540 
1541  private:
1542   ~DMDReporter() = default;
1543 };
1544 NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1545 
1546 }  // namespace dmd
1547 }  // namespace mozilla
1548 
1549 #endif  // MOZ_DMD
1550 
1551 /**
1552  ** nsMemoryReporterManager implementation
1553  **/
1554 
NS_IMPL_ISUPPORTS(nsMemoryReporterManager,nsIMemoryReporterManager,nsIMemoryReporter)1555 NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
1556                   nsIMemoryReporter)
1557 
1558 NS_IMETHODIMP
1559 nsMemoryReporterManager::Init() {
1560   if (!NS_IsMainThread()) {
1561     MOZ_CRASH();
1562   }
1563 
1564   // Under normal circumstances this function is only called once. However,
1565   // we've (infrequently) seen memory report dumps in crash reports that
1566   // suggest that this function is sometimes called multiple times. That in
1567   // turn means that multiple reporters of each kind are registered, which
1568   // leads to duplicated reports of individual measurements such as "resident",
1569   // "vsize", etc.
1570   //
1571   // It's unclear how these multiple calls can occur. The only plausible theory
1572   // so far is badly-written extensions, because this function is callable from
1573   // JS code via nsIMemoryReporter.idl.
1574   //
1575   // Whatever the cause, it's a bad thing. So we protect against it with the
1576   // following check.
1577   static bool isInited = false;
1578   if (isInited) {
1579     NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1580     return NS_OK;
1581   }
1582   isInited = true;
1583 
1584 #ifdef HAVE_JEMALLOC_STATS
1585   RegisterStrongReporter(new JemallocHeapReporter());
1586 #endif
1587 
1588 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1589   RegisterStrongReporter(new VsizeReporter());
1590   RegisterStrongReporter(new ResidentReporter());
1591 #endif
1592 
1593 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1594   RegisterStrongReporter(new VsizeMaxContiguousReporter());
1595 #endif
1596 
1597 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1598   RegisterStrongReporter(new ResidentPeakReporter());
1599 #endif
1600 
1601 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1602   RegisterStrongReporter(new ResidentUniqueReporter());
1603 #endif
1604 
1605 #ifdef HAVE_PAGE_FAULT_REPORTERS
1606   RegisterStrongReporter(new PageFaultsSoftReporter());
1607   RegisterStrongReporter(new PageFaultsHardReporter());
1608 #endif
1609 
1610 #ifdef HAVE_PRIVATE_REPORTER
1611   RegisterStrongReporter(new PrivateReporter());
1612 #endif
1613 
1614 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1615   RegisterStrongReporter(new SystemHeapReporter());
1616 #endif
1617 
1618   RegisterStrongReporter(new AtomTablesReporter());
1619 
1620   RegisterStrongReporter(new ThreadsReporter());
1621 
1622 #ifdef DEBUG
1623   RegisterStrongReporter(new DeadlockDetectorReporter());
1624 #endif
1625 
1626 #ifdef MOZ_GECKO_PROFILER
1627   // We have to register this here rather than in profiler_init() because
1628   // profiler_init() runs prior to nsMemoryReporterManager's creation.
1629   RegisterStrongReporter(new GeckoProfilerReporter());
1630 #endif
1631 
1632 #ifdef MOZ_DMD
1633   RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1634 #endif
1635 
1636 #ifdef XP_WIN
1637   RegisterStrongReporter(new WindowsAddressSpaceReporter());
1638 #endif
1639 
1640 #ifdef XP_UNIX
1641   nsMemoryInfoDumper::Initialize();
1642 #endif
1643 
1644   // Report our own memory usage as well.
1645   RegisterWeakReporter(this);
1646 
1647   return NS_OK;
1648 }
1649 
nsMemoryReporterManager()1650 nsMemoryReporterManager::nsMemoryReporterManager()
1651     : mMutex("nsMemoryReporterManager::mMutex"),
1652       mIsRegistrationBlocked(false),
1653       mStrongReporters(new StrongReportersTable()),
1654       mWeakReporters(new WeakReportersTable()),
1655       mSavedStrongReporters(nullptr),
1656       mSavedWeakReporters(nullptr),
1657       mNextGeneration(1),
1658       mPendingProcessesState(nullptr),
1659       mPendingReportersState(nullptr)
1660 #ifdef HAVE_JEMALLOC_STATS
1661       ,
1662       mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1663 #endif
1664 {
1665 }
1666 
~nsMemoryReporterManager()1667 nsMemoryReporterManager::~nsMemoryReporterManager() {
1668   delete mStrongReporters;
1669   delete mWeakReporters;
1670   NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1671   NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1672 }
1673 
1674 NS_IMETHODIMP
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1675 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1676                                         nsISupports* aData, bool aAnonymize) {
1677   size_t n = MallocSizeOf(this);
1678   n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1679   n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1680 
1681   MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1682                      n, "Memory used by the memory reporter infrastructure.");
1683 
1684   return NS_OK;
1685 }
1686 
1687 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1688 #  define MEMORY_REPORTING_LOG(format, ...) \
1689     printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1690 #else
1691 #  define MEMORY_REPORTING_LOG(...)
1692 #endif
1693 
1694 NS_IMETHODIMP
GetReports(nsIHandleReportCallback * aHandleReport,nsISupports * aHandleReportData,nsIFinishReportingCallback * aFinishReporting,nsISupports * aFinishReportingData,bool aAnonymize)1695 nsMemoryReporterManager::GetReports(
1696     nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1697     nsIFinishReportingCallback* aFinishReporting,
1698     nsISupports* aFinishReportingData, bool aAnonymize) {
1699   return GetReportsExtended(aHandleReport, aHandleReportData, aFinishReporting,
1700                             aFinishReportingData, aAnonymize,
1701                             /* minimize = */ false,
1702                             /* DMDident = */ u""_ns);
1703 }
1704 
1705 NS_IMETHODIMP
GetReportsExtended(nsIHandleReportCallback * aHandleReport,nsISupports * aHandleReportData,nsIFinishReportingCallback * aFinishReporting,nsISupports * aFinishReportingData,bool aAnonymize,bool aMinimize,const nsAString & aDMDDumpIdent)1706 nsMemoryReporterManager::GetReportsExtended(
1707     nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1708     nsIFinishReportingCallback* aFinishReporting,
1709     nsISupports* aFinishReportingData, bool aAnonymize, bool aMinimize,
1710     const nsAString& aDMDDumpIdent) {
1711   nsresult rv;
1712 
1713   // Memory reporters are not necessarily threadsafe, so this function must
1714   // be called from the main thread.
1715   if (!NS_IsMainThread()) {
1716     MOZ_CRASH();
1717   }
1718 
1719   uint32_t generation = mNextGeneration++;
1720 
1721   if (mPendingProcessesState) {
1722     // A request is in flight.  Don't start another one.  And don't report
1723     // an error;  just ignore it, and let the in-flight request finish.
1724     MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
1725                          mPendingProcessesState->mGeneration);
1726     return NS_OK;
1727   }
1728 
1729   MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1730 
1731   uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1732   MOZ_ASSERT(concurrency >= 1);
1733   if (concurrency < 1) {
1734     concurrency = 1;
1735   }
1736   mPendingProcessesState = new PendingProcessesState(
1737       generation, aAnonymize, aMinimize, concurrency, aHandleReport,
1738       aHandleReportData, aFinishReporting, aFinishReportingData, aDMDDumpIdent);
1739 
1740   if (aMinimize) {
1741     nsCOMPtr<nsIRunnable> callback =
1742         NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1743                           &nsMemoryReporterManager::StartGettingReports);
1744     rv = MinimizeMemoryUsage(callback);
1745   } else {
1746     rv = StartGettingReports();
1747   }
1748   return rv;
1749 }
1750 
StartGettingReports()1751 nsresult nsMemoryReporterManager::StartGettingReports() {
1752   PendingProcessesState* s = mPendingProcessesState;
1753   nsresult rv;
1754 
1755   // Get reports for this process.
1756   FILE* parentDMDFile = nullptr;
1757 #ifdef MOZ_DMD
1758   if (!s->mDMDDumpIdent.IsEmpty()) {
1759     rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1760                                          &parentDMDFile);
1761     if (NS_WARN_IF(NS_FAILED(rv))) {
1762       // Proceed with the memory report as if DMD were disabled.
1763       parentDMDFile = nullptr;
1764     }
1765   }
1766 #endif
1767 
1768   // This is async.
1769   GetReportsForThisProcessExtended(
1770       s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
1771       s->mFinishReporting, s->mFinishReportingData);
1772 
1773   nsTArray<dom::ContentParent*> childWeakRefs;
1774   dom::ContentParent::GetAll(childWeakRefs);
1775   if (!childWeakRefs.IsEmpty()) {
1776     // Request memory reports from child processes.  This happens
1777     // after the parent report so that the parent's main thread will
1778     // be free to process the child reports, instead of causing them
1779     // to be buffered and consume (possibly scarce) memory.
1780 
1781     for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1782       s->mChildrenPending.AppendElement(childWeakRefs[i]);
1783     }
1784   }
1785 
1786   if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1787     if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1788       s->mChildrenPending.AppendElement(proc.forget());
1789     }
1790   }
1791 
1792   if (RDDProcessManager* rdd = RDDProcessManager::Get()) {
1793     if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
1794       s->mChildrenPending.AppendElement(proc.forget());
1795     }
1796   }
1797 
1798   if (gfx::VRProcessManager* vr = gfx::VRProcessManager::Get()) {
1799     if (RefPtr<MemoryReportingProcess> proc = vr->GetProcessMemoryReporter()) {
1800       s->mChildrenPending.AppendElement(proc.forget());
1801     }
1802   }
1803 
1804   if (!mIsRegistrationBlocked && net::gIOService) {
1805     if (RefPtr<MemoryReportingProcess> proc =
1806             net::gIOService->GetSocketProcessMemoryReporter()) {
1807       s->mChildrenPending.AppendElement(proc.forget());
1808     }
1809   }
1810 
1811   if (!s->mChildrenPending.IsEmpty()) {
1812     nsCOMPtr<nsITimer> timer;
1813     rv = NS_NewTimerWithFuncCallback(
1814         getter_AddRefs(timer), TimeoutCallback, this, kTimeoutLengthMS,
1815         nsITimer::TYPE_ONE_SHOT,
1816         "nsMemoryReporterManager::StartGettingReports");
1817     if (NS_WARN_IF(NS_FAILED(rv))) {
1818       FinishReporting();
1819       return rv;
1820     }
1821 
1822     MOZ_ASSERT(!s->mTimer);
1823     s->mTimer.swap(timer);
1824   }
1825 
1826   return NS_OK;
1827 }
1828 
DispatchReporter(nsIMemoryReporter * aReporter,bool aIsAsync,nsIHandleReportCallback * aHandleReport,nsISupports * aHandleReportData,bool aAnonymize)1829 void nsMemoryReporterManager::DispatchReporter(
1830     nsIMemoryReporter* aReporter, bool aIsAsync,
1831     nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1832     bool aAnonymize) {
1833   MOZ_ASSERT(mPendingReportersState);
1834 
1835   // Grab refs to everything used in the lambda function.
1836   RefPtr<nsMemoryReporterManager> self = this;
1837   nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
1838   nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
1839   nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
1840 
1841   nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
1842       "nsMemoryReporterManager::DispatchReporter",
1843       [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
1844         reporter->CollectReports(handleReport, handleReportData, aAnonymize);
1845         if (!aIsAsync) {
1846           self->EndReport();
1847         }
1848       });
1849 
1850   NS_DispatchToMainThread(event);
1851   mPendingReportersState->mReportsPending++;
1852 }
1853 
1854 NS_IMETHODIMP
GetReportsForThisProcessExtended(nsIHandleReportCallback * aHandleReport,nsISupports * aHandleReportData,bool aAnonymize,FILE * aDMDFile,nsIFinishReportingCallback * aFinishReporting,nsISupports * aFinishReportingData)1855 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1856     nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1857     bool aAnonymize, FILE* aDMDFile,
1858     nsIFinishReportingCallback* aFinishReporting,
1859     nsISupports* aFinishReportingData) {
1860   // Memory reporters are not necessarily threadsafe, so this function must
1861   // be called from the main thread.
1862   if (!NS_IsMainThread()) {
1863     MOZ_CRASH();
1864   }
1865 
1866   if (NS_WARN_IF(mPendingReportersState)) {
1867     // Report is already in progress.
1868     return NS_ERROR_IN_PROGRESS;
1869   }
1870 
1871 #ifdef MOZ_DMD
1872   if (aDMDFile) {
1873     // Clear DMD's reportedness state before running the memory
1874     // reporters, to avoid spurious twice-reported warnings.
1875     dmd::ClearReports();
1876   }
1877 #else
1878   MOZ_ASSERT(!aDMDFile);
1879 #endif
1880 
1881   mPendingReportersState = new PendingReportersState(
1882       aFinishReporting, aFinishReportingData, aDMDFile);
1883 
1884   {
1885     mozilla::MutexAutoLock autoLock(mMutex);
1886 
1887     for (const auto& entry : *mStrongReporters) {
1888       DispatchReporter(entry.GetKey(), entry.GetData(), aHandleReport,
1889                        aHandleReportData, aAnonymize);
1890     }
1891 
1892     for (const auto& entry : *mWeakReporters) {
1893       nsCOMPtr<nsIMemoryReporter> reporter = entry.GetKey();
1894       DispatchReporter(reporter, entry.GetData(), aHandleReport,
1895                        aHandleReportData, aAnonymize);
1896     }
1897   }
1898 
1899   return NS_OK;
1900 }
1901 
1902 NS_IMETHODIMP
EndReport()1903 nsMemoryReporterManager::EndReport() {
1904   if (--mPendingReportersState->mReportsPending == 0) {
1905 #ifdef MOZ_DMD
1906     if (mPendingReportersState->mDMDFile) {
1907       nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
1908     }
1909 #endif
1910     if (mPendingProcessesState) {
1911       // This is the parent process.
1912       EndProcessReport(mPendingProcessesState->mGeneration, true);
1913     } else {
1914       mPendingReportersState->mFinishReporting->Callback(
1915           mPendingReportersState->mFinishReportingData);
1916     }
1917 
1918     delete mPendingReportersState;
1919     mPendingReportersState = nullptr;
1920   }
1921 
1922   return NS_OK;
1923 }
1924 
1925 nsMemoryReporterManager::PendingProcessesState*
GetStateForGeneration(uint32_t aGeneration)1926 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration) {
1927   // Memory reporting only happens on the main thread.
1928   MOZ_RELEASE_ASSERT(NS_IsMainThread());
1929 
1930   PendingProcessesState* s = mPendingProcessesState;
1931 
1932   if (!s) {
1933     // If we reach here, then:
1934     //
1935     // - A child process reported back too late, and no subsequent request
1936     //   is in flight.
1937     //
1938     // So there's nothing to be done.  Just ignore it.
1939     MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
1940                          aGeneration);
1941     return nullptr;
1942   }
1943 
1944   if (aGeneration != s->mGeneration) {
1945     // If we reach here, a child process must have reported back, too late,
1946     // while a subsequent (higher-numbered) request is in flight.  Again,
1947     // ignore it.
1948     MOZ_ASSERT(aGeneration < s->mGeneration);
1949     MEMORY_REPORTING_LOG(
1950         "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration,
1951         s->mGeneration);
1952     return nullptr;
1953   }
1954 
1955   return s;
1956 }
1957 
1958 // This function has no return value.  If something goes wrong, there's no
1959 // clear place to report the problem to, but that's ok -- we will end up
1960 // hitting the timeout and executing TimeoutCallback().
HandleChildReport(uint32_t aGeneration,const dom::MemoryReport & aChildReport)1961 void nsMemoryReporterManager::HandleChildReport(
1962     uint32_t aGeneration, const dom::MemoryReport& aChildReport) {
1963   PendingProcessesState* s = GetStateForGeneration(aGeneration);
1964   if (!s) {
1965     return;
1966   }
1967 
1968   // Child reports should have a non-empty process.
1969   MOZ_ASSERT(!aChildReport.process().IsEmpty());
1970 
1971   // If the call fails, ignore and continue.
1972   s->mHandleReport->Callback(aChildReport.process(), aChildReport.path(),
1973                              aChildReport.kind(), aChildReport.units(),
1974                              aChildReport.amount(), aChildReport.desc(),
1975                              s->mHandleReportData);
1976 }
1977 
1978 /* static */
StartChildReport(mozilla::MemoryReportingProcess * aChild,const PendingProcessesState * aState)1979 bool nsMemoryReporterManager::StartChildReport(
1980     mozilla::MemoryReportingProcess* aChild,
1981     const PendingProcessesState* aState) {
1982   if (!aChild->IsAlive()) {
1983     MEMORY_REPORTING_LOG(
1984         "StartChildReports (gen=%u): child exited before"
1985         " its report was started\n",
1986         aState->mGeneration);
1987     return false;
1988   }
1989 
1990   Maybe<mozilla::ipc::FileDescriptor> dmdFileDesc;
1991 #ifdef MOZ_DMD
1992   if (!aState->mDMDDumpIdent.IsEmpty()) {
1993     FILE* dmdFile = nullptr;
1994     nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
1995                                                   aChild->Pid(), &dmdFile);
1996     if (NS_WARN_IF(NS_FAILED(rv))) {
1997       // Proceed with the memory report as if DMD were disabled.
1998       dmdFile = nullptr;
1999     }
2000     if (dmdFile) {
2001       dmdFileDesc = Some(mozilla::ipc::FILEToFileDescriptor(dmdFile));
2002       fclose(dmdFile);
2003     }
2004   }
2005 #endif
2006   return aChild->SendRequestMemoryReport(
2007       aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2008 }
2009 
EndProcessReport(uint32_t aGeneration,bool aSuccess)2010 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration,
2011                                                bool aSuccess) {
2012   PendingProcessesState* s = GetStateForGeneration(aGeneration);
2013   if (!s) {
2014     return;
2015   }
2016 
2017   MOZ_ASSERT(s->mNumProcessesRunning > 0);
2018   s->mNumProcessesRunning--;
2019   s->mNumProcessesCompleted++;
2020   MEMORY_REPORTING_LOG(
2021       "HandleChildReports (aGen=%u): process %u %s"
2022       " (%u running, %u pending)\n",
2023       aGeneration, s->mNumProcessesCompleted,
2024       aSuccess ? "completed" : "exited during report", s->mNumProcessesRunning,
2025       static_cast<unsigned>(s->mChildrenPending.Length()));
2026 
2027   // Start pending children up to the concurrency limit.
2028   while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2029          !s->mChildrenPending.IsEmpty()) {
2030     // Pop last element from s->mChildrenPending
2031     const RefPtr<MemoryReportingProcess> nextChild =
2032         s->mChildrenPending.PopLastElement();
2033     // Start report (if the child is still alive).
2034     if (StartChildReport(nextChild, s)) {
2035       ++s->mNumProcessesRunning;
2036       MEMORY_REPORTING_LOG(
2037           "HandleChildReports (aGen=%u): started child report"
2038           " (%u running, %u pending)\n",
2039           aGeneration, s->mNumProcessesRunning,
2040           static_cast<unsigned>(s->mChildrenPending.Length()));
2041     }
2042   }
2043 
2044   // If all the child processes (if any) have reported, we can cancel
2045   // the timer (if started) and finish up.  Otherwise, just return.
2046   if (s->mNumProcessesRunning == 0) {
2047     MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2048     if (s->mTimer) {
2049       s->mTimer->Cancel();
2050     }
2051     FinishReporting();
2052   }
2053 }
2054 
2055 /* static */
TimeoutCallback(nsITimer * aTimer,void * aData)2056 void nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData) {
2057   nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2058   PendingProcessesState* s = mgr->mPendingProcessesState;
2059 
2060   // Release assert because: if the pointer is null we're about to
2061   // crash regardless of DEBUG, and this way the compiler doesn't
2062   // complain about unused variables.
2063   MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2064   MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2065                        s->mGeneration, s->mNumProcessesRunning,
2066                        static_cast<unsigned>(s->mChildrenPending.Length()));
2067 
2068   // We don't bother sending any kind of cancellation message to the child
2069   // processes that haven't reported back.
2070   mgr->FinishReporting();
2071 }
2072 
FinishReporting()2073 nsresult nsMemoryReporterManager::FinishReporting() {
2074   // Memory reporting only happens on the main thread.
2075   if (!NS_IsMainThread()) {
2076     MOZ_CRASH();
2077   }
2078 
2079   MOZ_ASSERT(mPendingProcessesState);
2080   MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2081                        mPendingProcessesState->mGeneration,
2082                        mPendingProcessesState->mNumProcessesCompleted);
2083 
2084   // Call this before deleting |mPendingProcessesState|.  That way, if
2085   // |mFinishReportData| calls GetReports(), it will silently abort, as
2086   // required.
2087   nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2088       mPendingProcessesState->mFinishReportingData);
2089 
2090   delete mPendingProcessesState;
2091   mPendingProcessesState = nullptr;
2092   return rv;
2093 }
2094 
PendingProcessesState(uint32_t aGeneration,bool aAnonymize,bool aMinimize,uint32_t aConcurrencyLimit,nsIHandleReportCallback * aHandleReport,nsISupports * aHandleReportData,nsIFinishReportingCallback * aFinishReporting,nsISupports * aFinishReportingData,const nsAString & aDMDDumpIdent)2095 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2096     uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2097     uint32_t aConcurrencyLimit, nsIHandleReportCallback* aHandleReport,
2098     nsISupports* aHandleReportData,
2099     nsIFinishReportingCallback* aFinishReporting,
2100     nsISupports* aFinishReportingData, const nsAString& aDMDDumpIdent)
2101     : mGeneration(aGeneration),
2102       mAnonymize(aAnonymize),
2103       mMinimize(aMinimize),
2104       mChildrenPending(),
2105       mNumProcessesRunning(1),  // reporting starts with the parent
2106       mNumProcessesCompleted(0),
2107       mConcurrencyLimit(aConcurrencyLimit),
2108       mHandleReport(aHandleReport),
2109       mHandleReportData(aHandleReportData),
2110       mFinishReporting(aFinishReporting),
2111       mFinishReportingData(aFinishReportingData),
2112       mDMDDumpIdent(aDMDDumpIdent) {}
2113 
CrashIfRefcountIsZero(nsISupports * aObj)2114 static void CrashIfRefcountIsZero(nsISupports* aObj) {
2115   // This will probably crash if the object's refcount is 0.
2116   uint32_t refcnt = NS_ADDREF(aObj);
2117   if (refcnt <= 1) {
2118     MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2119   }
2120   NS_RELEASE(aObj);
2121 }
2122 
RegisterReporterHelper(nsIMemoryReporter * aReporter,bool aForce,bool aStrong,bool aIsAsync)2123 nsresult nsMemoryReporterManager::RegisterReporterHelper(
2124     nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync) {
2125   // This method is thread-safe.
2126   mozilla::MutexAutoLock autoLock(mMutex);
2127 
2128   if (mIsRegistrationBlocked && !aForce) {
2129     return NS_ERROR_FAILURE;
2130   }
2131 
2132   if (mStrongReporters->Contains(aReporter) ||
2133       mWeakReporters->Contains(aReporter)) {
2134     return NS_ERROR_FAILURE;
2135   }
2136 
2137   // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2138   // a kung fu death grip before calling PutEntry.  Otherwise, if PutEntry
2139   // addref'ed and released |aReporter| before finally addref'ing it for
2140   // good, it would free aReporter!  The kung fu death grip could itself be
2141   // problematic if PutEntry didn't addref |aReporter| (because then when the
2142   // death grip goes out of scope, we would delete the reporter).  In debug
2143   // mode, we check that this doesn't happen.
2144   //
2145   // If |aStrong| is false, we require that |aReporter| have a non-zero
2146   // refcnt.
2147   //
2148   if (aStrong) {
2149     nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2150     mStrongReporters->InsertOrUpdate(aReporter, aIsAsync);
2151     CrashIfRefcountIsZero(aReporter);
2152   } else {
2153     CrashIfRefcountIsZero(aReporter);
2154     nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2155     if (jsComponent) {
2156       // We cannot allow non-native reporters (WrappedJS), since we'll be
2157       // holding onto a raw pointer, which would point to the wrapper,
2158       // and that wrapper is likely to go away as soon as this register
2159       // call finishes.  This would then lead to subsequent crashes in
2160       // CollectReports().
2161       return NS_ERROR_XPC_BAD_CONVERT_JS;
2162     }
2163     mWeakReporters->InsertOrUpdate(aReporter, aIsAsync);
2164   }
2165 
2166   return NS_OK;
2167 }
2168 
2169 NS_IMETHODIMP
RegisterStrongReporter(nsIMemoryReporter * aReporter)2170 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter) {
2171   return RegisterReporterHelper(aReporter, /* force = */ false,
2172                                 /* strong = */ true,
2173                                 /* async = */ false);
2174 }
2175 
2176 NS_IMETHODIMP
RegisterStrongAsyncReporter(nsIMemoryReporter * aReporter)2177 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2178     nsIMemoryReporter* aReporter) {
2179   return RegisterReporterHelper(aReporter, /* force = */ false,
2180                                 /* strong = */ true,
2181                                 /* async = */ true);
2182 }
2183 
2184 NS_IMETHODIMP
RegisterWeakReporter(nsIMemoryReporter * aReporter)2185 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter) {
2186   return RegisterReporterHelper(aReporter, /* force = */ false,
2187                                 /* strong = */ false,
2188                                 /* async = */ false);
2189 }
2190 
2191 NS_IMETHODIMP
RegisterWeakAsyncReporter(nsIMemoryReporter * aReporter)2192 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2193     nsIMemoryReporter* aReporter) {
2194   return RegisterReporterHelper(aReporter, /* force = */ false,
2195                                 /* strong = */ false,
2196                                 /* async = */ true);
2197 }
2198 
2199 NS_IMETHODIMP
RegisterStrongReporterEvenIfBlocked(nsIMemoryReporter * aReporter)2200 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2201     nsIMemoryReporter* aReporter) {
2202   return RegisterReporterHelper(aReporter, /* force = */ true,
2203                                 /* strong = */ true,
2204                                 /* async = */ false);
2205 }
2206 
2207 NS_IMETHODIMP
UnregisterStrongReporter(nsIMemoryReporter * aReporter)2208 nsMemoryReporterManager::UnregisterStrongReporter(
2209     nsIMemoryReporter* aReporter) {
2210   // This method is thread-safe.
2211   mozilla::MutexAutoLock autoLock(mMutex);
2212 
2213   MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2214 
2215   if (mStrongReporters->Contains(aReporter)) {
2216     mStrongReporters->Remove(aReporter);
2217     return NS_OK;
2218   }
2219 
2220   // We don't register new reporters when the block is in place, but we do
2221   // unregister existing reporters. This is so we don't keep holding strong
2222   // references that these reporters aren't expecting (which can keep them
2223   // alive longer than intended).
2224   if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2225     mSavedStrongReporters->Remove(aReporter);
2226     return NS_OK;
2227   }
2228 
2229   return NS_ERROR_FAILURE;
2230 }
2231 
2232 NS_IMETHODIMP
UnregisterWeakReporter(nsIMemoryReporter * aReporter)2233 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter) {
2234   // This method is thread-safe.
2235   mozilla::MutexAutoLock autoLock(mMutex);
2236 
2237   MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2238 
2239   if (mWeakReporters->Contains(aReporter)) {
2240     mWeakReporters->Remove(aReporter);
2241     return NS_OK;
2242   }
2243 
2244   // We don't register new reporters when the block is in place, but we do
2245   // unregister existing reporters. This is so we don't keep holding weak
2246   // references that the old reporters aren't expecting (which can end up as
2247   // dangling pointers that lead to use-after-frees).
2248   if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2249     mSavedWeakReporters->Remove(aReporter);
2250     return NS_OK;
2251   }
2252 
2253   return NS_ERROR_FAILURE;
2254 }
2255 
2256 NS_IMETHODIMP
BlockRegistrationAndHideExistingReporters()2257 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2258   // This method is thread-safe.
2259   mozilla::MutexAutoLock autoLock(mMutex);
2260   if (mIsRegistrationBlocked) {
2261     return NS_ERROR_FAILURE;
2262   }
2263   mIsRegistrationBlocked = true;
2264 
2265   // Hide the existing reporters, saving them for later restoration.
2266   MOZ_ASSERT(!mSavedStrongReporters);
2267   MOZ_ASSERT(!mSavedWeakReporters);
2268   mSavedStrongReporters = mStrongReporters;
2269   mSavedWeakReporters = mWeakReporters;
2270   mStrongReporters = new StrongReportersTable();
2271   mWeakReporters = new WeakReportersTable();
2272 
2273   return NS_OK;
2274 }
2275 
2276 NS_IMETHODIMP
UnblockRegistrationAndRestoreOriginalReporters()2277 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2278   // This method is thread-safe.
2279   mozilla::MutexAutoLock autoLock(mMutex);
2280   if (!mIsRegistrationBlocked) {
2281     return NS_ERROR_FAILURE;
2282   }
2283 
2284   // Banish the current reporters, and restore the hidden ones.
2285   delete mStrongReporters;
2286   delete mWeakReporters;
2287   mStrongReporters = mSavedStrongReporters;
2288   mWeakReporters = mSavedWeakReporters;
2289   mSavedStrongReporters = nullptr;
2290   mSavedWeakReporters = nullptr;
2291 
2292   mIsRegistrationBlocked = false;
2293   return NS_OK;
2294 }
2295 
2296 NS_IMETHODIMP
GetVsize(int64_t * aVsize)2297 nsMemoryReporterManager::GetVsize(int64_t* aVsize) {
2298 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2299   return VsizeDistinguishedAmount(aVsize);
2300 #else
2301   *aVsize = 0;
2302   return NS_ERROR_NOT_AVAILABLE;
2303 #endif
2304 }
2305 
2306 NS_IMETHODIMP
GetVsizeMaxContiguous(int64_t * aAmount)2307 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount) {
2308 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2309   return VsizeMaxContiguousDistinguishedAmount(aAmount);
2310 #else
2311   *aAmount = 0;
2312   return NS_ERROR_NOT_AVAILABLE;
2313 #endif
2314 }
2315 
2316 NS_IMETHODIMP
GetResident(int64_t * aAmount)2317 nsMemoryReporterManager::GetResident(int64_t* aAmount) {
2318 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2319   return ResidentDistinguishedAmount(aAmount);
2320 #else
2321   *aAmount = 0;
2322   return NS_ERROR_NOT_AVAILABLE;
2323 #endif
2324 }
2325 
2326 NS_IMETHODIMP
GetResidentFast(int64_t * aAmount)2327 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount) {
2328 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2329   return ResidentFastDistinguishedAmount(aAmount);
2330 #else
2331   *aAmount = 0;
2332   return NS_ERROR_NOT_AVAILABLE;
2333 #endif
2334 }
2335 
2336 /*static*/
ResidentFast()2337 int64_t nsMemoryReporterManager::ResidentFast() {
2338 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2339   int64_t amount;
2340   nsresult rv = ResidentFastDistinguishedAmount(&amount);
2341   NS_ENSURE_SUCCESS(rv, 0);
2342   return amount;
2343 #else
2344   return 0;
2345 #endif
2346 }
2347 
2348 NS_IMETHODIMP
GetResidentPeak(int64_t * aAmount)2349 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount) {
2350 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2351   return ResidentPeakDistinguishedAmount(aAmount);
2352 #else
2353   *aAmount = 0;
2354   return NS_ERROR_NOT_AVAILABLE;
2355 #endif
2356 }
2357 
2358 /*static*/
ResidentPeak()2359 int64_t nsMemoryReporterManager::ResidentPeak() {
2360 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2361   int64_t amount = 0;
2362   nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2363   NS_ENSURE_SUCCESS(rv, 0);
2364   return amount;
2365 #else
2366   return 0;
2367 #endif
2368 }
2369 
2370 NS_IMETHODIMP
GetResidentUnique(int64_t * aAmount)2371 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount) {
2372 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2373   return ResidentUniqueDistinguishedAmount(aAmount);
2374 #else
2375   *aAmount = 0;
2376   return NS_ERROR_NOT_AVAILABLE;
2377 #endif
2378 }
2379 
2380 typedef
2381 #ifdef XP_WIN
2382     HANDLE
2383 #elif XP_MACOSX
2384     mach_port_t
2385 #elif XP_LINUX
2386     pid_t
2387 #else
2388     int /*dummy type */
2389 #endif
2390         ResidentUniqueArg;
2391 
2392 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2393 
2394 /*static*/
ResidentUnique(ResidentUniqueArg aProcess)2395 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess) {
2396   int64_t amount = 0;
2397   nsresult rv = ResidentUniqueDistinguishedAmount(&amount, aProcess);
2398   NS_ENSURE_SUCCESS(rv, 0);
2399   return amount;
2400 }
2401 
2402 #else
2403 
2404 /*static*/
ResidentUnique(ResidentUniqueArg)2405 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg) {
2406 #  ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2407   int64_t amount = 0;
2408   nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2409   NS_ENSURE_SUCCESS(rv, 0);
2410   return amount;
2411 #  else
2412   return 0;
2413 #  endif
2414 }
2415 
2416 #endif  // XP_{WIN, MACOSX, LINUX, *}
2417 
2418 NS_IMETHODIMP
GetHeapAllocated(int64_t * aAmount)2419 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount) {
2420 #ifdef HAVE_JEMALLOC_STATS
2421   jemalloc_stats_t stats;
2422   jemalloc_stats(&stats);
2423   *aAmount = stats.allocated;
2424   return NS_OK;
2425 #else
2426   *aAmount = 0;
2427   return NS_ERROR_NOT_AVAILABLE;
2428 #endif
2429 }
2430 
2431 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2432 NS_IMETHODIMP
GetHeapOverheadFraction(int64_t * aAmount)2433 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount) {
2434 #ifdef HAVE_JEMALLOC_STATS
2435   jemalloc_stats_t stats;
2436   jemalloc_stats(&stats);
2437   *aAmount = HeapOverheadFraction(&stats);
2438   return NS_OK;
2439 #else
2440   *aAmount = 0;
2441   return NS_ERROR_NOT_AVAILABLE;
2442 #endif
2443 }
2444 
GetInfallibleAmount(InfallibleAmountFn aAmountFn,int64_t * aAmount)2445 [[nodiscard]] static nsresult GetInfallibleAmount(InfallibleAmountFn aAmountFn,
2446                                                   int64_t* aAmount) {
2447   if (aAmountFn) {
2448     *aAmount = aAmountFn();
2449     return NS_OK;
2450   }
2451   *aAmount = 0;
2452   return NS_ERROR_NOT_AVAILABLE;
2453 }
2454 
2455 NS_IMETHODIMP
GetJSMainRuntimeGCHeap(int64_t * aAmount)2456 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount) {
2457   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2458 }
2459 
2460 NS_IMETHODIMP
GetJSMainRuntimeTemporaryPeak(int64_t * aAmount)2461 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount) {
2462   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2463 }
2464 
2465 NS_IMETHODIMP
GetJSMainRuntimeCompartmentsSystem(int64_t * aAmount)2466 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount) {
2467   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsSystem,
2468                              aAmount);
2469 }
2470 
2471 NS_IMETHODIMP
GetJSMainRuntimeCompartmentsUser(int64_t * aAmount)2472 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount) {
2473   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsUser,
2474                              aAmount);
2475 }
2476 
2477 NS_IMETHODIMP
GetJSMainRuntimeRealmsSystem(int64_t * aAmount)2478 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount) {
2479   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem, aAmount);
2480 }
2481 
2482 NS_IMETHODIMP
GetJSMainRuntimeRealmsUser(int64_t * aAmount)2483 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount) {
2484   return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser, aAmount);
2485 }
2486 
2487 NS_IMETHODIMP
GetImagesContentUsedUncompressed(int64_t * aAmount)2488 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount) {
2489   return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2490                              aAmount);
2491 }
2492 
2493 NS_IMETHODIMP
GetStorageSQLite(int64_t * aAmount)2494 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount) {
2495   return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2496 }
2497 
2498 NS_IMETHODIMP
GetLowMemoryEventsPhysical(int64_t * aAmount)2499 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount) {
2500   return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2501 }
2502 
2503 NS_IMETHODIMP
GetGhostWindows(int64_t * aAmount)2504 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount) {
2505   return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2506 }
2507 
2508 NS_IMETHODIMP
GetPageFaultsHard(int64_t * aAmount)2509 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount) {
2510 #ifdef HAVE_PAGE_FAULT_REPORTERS
2511   return PageFaultsHardDistinguishedAmount(aAmount);
2512 #else
2513   *aAmount = 0;
2514   return NS_ERROR_NOT_AVAILABLE;
2515 #endif
2516 }
2517 
2518 NS_IMETHODIMP
GetHasMozMallocUsableSize(bool * aHas)2519 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas) {
2520   void* p = malloc(16);
2521   if (!p) {
2522     return NS_ERROR_OUT_OF_MEMORY;
2523   }
2524   size_t usable = moz_malloc_usable_size(p);
2525   free(p);
2526   *aHas = !!(usable > 0);
2527   return NS_OK;
2528 }
2529 
2530 NS_IMETHODIMP
GetIsDMDEnabled(bool * aIsEnabled)2531 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled) {
2532 #ifdef MOZ_DMD
2533   *aIsEnabled = true;
2534 #else
2535   *aIsEnabled = false;
2536 #endif
2537   return NS_OK;
2538 }
2539 
2540 NS_IMETHODIMP
GetIsDMDRunning(bool * aIsRunning)2541 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning) {
2542 #ifdef MOZ_DMD
2543   *aIsRunning = dmd::IsRunning();
2544 #else
2545   *aIsRunning = false;
2546 #endif
2547   return NS_OK;
2548 }
2549 
2550 namespace {
2551 
2552 /**
2553  * This runnable lets us implement
2554  * nsIMemoryReporterManager::MinimizeMemoryUsage().  We fire a heap-minimize
2555  * notification, spin the event loop, and repeat this process a few times.
2556  *
2557  * When this sequence finishes, we invoke the callback function passed to the
2558  * runnable's constructor.
2559  */
2560 class MinimizeMemoryUsageRunnable : public Runnable {
2561  public:
MinimizeMemoryUsageRunnable(nsIRunnable * aCallback)2562   explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2563       : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2564         mCallback(aCallback),
2565         mRemainingIters(sNumIters) {}
2566 
Run()2567   NS_IMETHOD Run() override {
2568     nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2569     if (!os) {
2570       return NS_ERROR_FAILURE;
2571     }
2572 
2573     if (mRemainingIters == 0) {
2574       os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2575                           u"MinimizeMemoryUsageRunnable");
2576       if (mCallback) {
2577         mCallback->Run();
2578       }
2579       return NS_OK;
2580     }
2581 
2582     os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2583     mRemainingIters--;
2584     NS_DispatchToMainThread(this);
2585 
2586     return NS_OK;
2587   }
2588 
2589  private:
2590   // Send sNumIters heap-minimize notifications, spinning the event
2591   // loop after each notification (see bug 610166 comment 12 for an
2592   // explanation), because one notification doesn't cut it.
2593   static const uint32_t sNumIters = 3;
2594 
2595   nsCOMPtr<nsIRunnable> mCallback;
2596   uint32_t mRemainingIters;
2597 };
2598 
2599 }  // namespace
2600 
2601 NS_IMETHODIMP
MinimizeMemoryUsage(nsIRunnable * aCallback)2602 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback) {
2603   RefPtr<MinimizeMemoryUsageRunnable> runnable =
2604       new MinimizeMemoryUsageRunnable(aCallback);
2605 
2606   return NS_DispatchToMainThread(runnable);
2607 }
2608 
2609 NS_IMETHODIMP
SizeOfTab(mozIDOMWindowProxy * aTopWindow,int64_t * aJSObjectsSize,int64_t * aJSStringsSize,int64_t * aJSOtherSize,int64_t * aDomSize,int64_t * aStyleSize,int64_t * aOtherSize,int64_t * aTotalSize,double * aJSMilliseconds,double * aNonJSMilliseconds)2610 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2611                                    int64_t* aJSObjectsSize,
2612                                    int64_t* aJSStringsSize,
2613                                    int64_t* aJSOtherSize, int64_t* aDomSize,
2614                                    int64_t* aStyleSize, int64_t* aOtherSize,
2615                                    int64_t* aTotalSize, double* aJSMilliseconds,
2616                                    double* aNonJSMilliseconds) {
2617   nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2618   auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2619   if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2620     return NS_ERROR_FAILURE;
2621   }
2622 
2623   TimeStamp t1 = TimeStamp::Now();
2624 
2625   // Measure JS memory consumption (and possibly some non-JS consumption, via
2626   // |jsPrivateSize|).
2627   size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2628   nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(), &jsObjectsSize,
2629                                   &jsStringsSize, &jsPrivateSize, &jsOtherSize);
2630   if (NS_WARN_IF(NS_FAILED(rv))) {
2631     return rv;
2632   }
2633 
2634   TimeStamp t2 = TimeStamp::Now();
2635 
2636   // Measure non-JS memory consumption.
2637   size_t domSize, styleSize, otherSize;
2638   rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2639   if (NS_WARN_IF(NS_FAILED(rv))) {
2640     return rv;
2641   }
2642 
2643   TimeStamp t3 = TimeStamp::Now();
2644 
2645   *aTotalSize = 0;
2646 #define DO(aN, n)       \
2647   {                     \
2648     *aN = (n);          \
2649     *aTotalSize += (n); \
2650   }
2651   DO(aJSObjectsSize, jsObjectsSize);
2652   DO(aJSStringsSize, jsStringsSize);
2653   DO(aJSOtherSize, jsOtherSize);
2654   DO(aDomSize, jsPrivateSize + domSize);
2655   DO(aStyleSize, styleSize);
2656   DO(aOtherSize, otherSize);
2657 #undef DO
2658 
2659   *aJSMilliseconds = (t2 - t1).ToMilliseconds();
2660   *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2661 
2662   return NS_OK;
2663 }
2664 
2665 namespace mozilla {
2666 
2667 #define GET_MEMORY_REPORTER_MANAGER(mgr)      \
2668   RefPtr<nsMemoryReporterManager> mgr =       \
2669       nsMemoryReporterManager::GetOrCreate(); \
2670   if (!mgr) {                                 \
2671     return NS_ERROR_FAILURE;                  \
2672   }
2673 
RegisterStrongMemoryReporter(nsIMemoryReporter * aReporter)2674 nsresult RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2675   // Hold a strong reference to the argument to make sure it gets released if
2676   // we return early below.
2677   nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2678   GET_MEMORY_REPORTER_MANAGER(mgr)
2679   return mgr->RegisterStrongReporter(reporter);
2680 }
2681 
RegisterStrongAsyncMemoryReporter(nsIMemoryReporter * aReporter)2682 nsresult RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2683   // Hold a strong reference to the argument to make sure it gets released if
2684   // we return early below.
2685   nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2686   GET_MEMORY_REPORTER_MANAGER(mgr)
2687   return mgr->RegisterStrongAsyncReporter(reporter);
2688 }
2689 
RegisterWeakMemoryReporter(nsIMemoryReporter * aReporter)2690 nsresult RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2691   GET_MEMORY_REPORTER_MANAGER(mgr)
2692   return mgr->RegisterWeakReporter(aReporter);
2693 }
2694 
RegisterWeakAsyncMemoryReporter(nsIMemoryReporter * aReporter)2695 nsresult RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2696   GET_MEMORY_REPORTER_MANAGER(mgr)
2697   return mgr->RegisterWeakAsyncReporter(aReporter);
2698 }
2699 
UnregisterStrongMemoryReporter(nsIMemoryReporter * aReporter)2700 nsresult UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2701   GET_MEMORY_REPORTER_MANAGER(mgr)
2702   return mgr->UnregisterStrongReporter(aReporter);
2703 }
2704 
UnregisterWeakMemoryReporter(nsIMemoryReporter * aReporter)2705 nsresult UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2706   GET_MEMORY_REPORTER_MANAGER(mgr)
2707   return mgr->UnregisterWeakReporter(aReporter);
2708 }
2709 
2710 // Macro for generating functions that register distinguished amount functions
2711 // with the memory reporter manager.
2712 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name)                   \
2713   nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2714     GET_MEMORY_REPORTER_MANAGER(mgr)                                       \
2715     mgr->mAmountFns.m##name = aAmountFn;                                   \
2716     return NS_OK;                                                          \
2717   }
2718 
2719 // Macro for generating functions that unregister distinguished amount
2720 // functions with the memory reporter manager.
2721 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2722   nsresult Unregister##name##DistinguishedAmount() { \
2723     GET_MEMORY_REPORTER_MANAGER(mgr)                 \
2724     mgr->mAmountFns.m##name = nullptr;               \
2725     return NS_OK;                                    \
2726   }
2727 
2728 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2729 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2730 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible,
2731                                      JSMainRuntimeCompartmentsSystem)
2732 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeCompartmentsUser)
2733 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2734 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2735 
2736 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2737 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2738 
2739 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2740 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2741 
2742 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2743 
2744 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2745 
2746 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2747 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2748 
2749 #define DEFINE_REGISTER_SIZE_OF_TAB(name)                              \
2750   nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2751     GET_MEMORY_REPORTER_MANAGER(mgr)                                   \
2752     mgr->mSizeOfTabFns.m##name = aSizeOfTabFn;                         \
2753     return NS_OK;                                                      \
2754   }
2755 
2756 DEFINE_REGISTER_SIZE_OF_TAB(JS);
2757 DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2758 
2759 #undef DEFINE_REGISTER_SIZE_OF_TAB
2760 
2761 #undef GET_MEMORY_REPORTER_MANAGER
2762 
2763 }  // namespace mozilla
2764