1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #include "HeapSnapshot.h"
7
8 #include <google/protobuf/io/coded_stream.h>
9 #include <google/protobuf/io/gzip_stream.h>
10 #include <google/protobuf/io/zero_copy_stream_impl_lite.h>
11
12 #include "js/Array.h" // JS::NewArrayObject
13 #include "js/Debug.h"
14 #include "js/TypeDecls.h"
15 #include "js/UbiNodeBreadthFirst.h"
16 #include "js/UbiNodeCensus.h"
17 #include "js/UbiNodeDominatorTree.h"
18 #include "js/UbiNodeShortestPaths.h"
19 #include "mozilla/Attributes.h"
20 #include "mozilla/CycleCollectedJSContext.h"
21 #include "mozilla/devtools/AutoMemMap.h"
22 #include "mozilla/devtools/CoreDump.pb.h"
23 #include "mozilla/devtools/DeserializedNode.h"
24 #include "mozilla/devtools/DominatorTree.h"
25 #include "mozilla/devtools/FileDescriptorOutputStream.h"
26 #include "mozilla/devtools/HeapSnapshotTempFileHelperChild.h"
27 #include "mozilla/devtools/ZeroCopyNSIOutputStream.h"
28 #include "mozilla/dom/ChromeUtils.h"
29 #include "mozilla/dom/ContentChild.h"
30 #include "mozilla/dom/HeapSnapshotBinding.h"
31 #include "mozilla/RangedPtr.h"
32 #include "mozilla/Telemetry.h"
33 #include "mozilla/Unused.h"
34
35 #include "jsapi.h"
36 #include "jsfriendapi.h"
37 #include "js/Object.h" // JS::GetCompartment
38 #include "nsCycleCollectionParticipant.h"
39 #include "nsCRTGlue.h"
40 #include "nsIFile.h"
41 #include "nsIOutputStream.h"
42 #include "nsISupportsImpl.h"
43 #include "nsNetUtil.h"
44 #include "nsPrintfCString.h"
45 #include "prerror.h"
46 #include "prio.h"
47 #include "prtypes.h"
48 #include "SpecialSystemDirectory.h"
49
50 namespace mozilla {
51 namespace devtools {
52
53 using namespace JS;
54 using namespace dom;
55
56 using ::google::protobuf::io::ArrayInputStream;
57 using ::google::protobuf::io::CodedInputStream;
58 using ::google::protobuf::io::GzipInputStream;
59 using ::google::protobuf::io::ZeroCopyInputStream;
60
61 using JS::ubi::AtomOrTwoByteChars;
62 using JS::ubi::ShortestPaths;
63
GetCurrentThreadDebuggerMallocSizeOf()64 MallocSizeOf GetCurrentThreadDebuggerMallocSizeOf() {
65 auto ccjscx = CycleCollectedJSContext::Get();
66 MOZ_ASSERT(ccjscx);
67 auto cx = ccjscx->Context();
68 MOZ_ASSERT(cx);
69 auto mallocSizeOf = JS::dbg::GetDebuggerMallocSizeOf(cx);
70 MOZ_ASSERT(mallocSizeOf);
71 return mallocSizeOf;
72 }
73
74 /*** Cycle Collection Boilerplate *********************************************/
75
NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(HeapSnapshot,mParent)76 NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(HeapSnapshot, mParent)
77
78 NS_IMPL_CYCLE_COLLECTING_ADDREF(HeapSnapshot)
79 NS_IMPL_CYCLE_COLLECTING_RELEASE(HeapSnapshot)
80
81 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(HeapSnapshot)
82 NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
83 NS_INTERFACE_MAP_ENTRY(nsISupports)
84 NS_INTERFACE_MAP_END
85
86 /* virtual */
87 JSObject* HeapSnapshot::WrapObject(JSContext* aCx, HandleObject aGivenProto) {
88 return HeapSnapshot_Binding::Wrap(aCx, this, aGivenProto);
89 }
90
91 /*** Reading Heap Snapshots ***************************************************/
92
93 /* static */
Create(JSContext * cx,GlobalObject & global,const uint8_t * buffer,uint32_t size,ErrorResult & rv)94 already_AddRefed<HeapSnapshot> HeapSnapshot::Create(JSContext* cx,
95 GlobalObject& global,
96 const uint8_t* buffer,
97 uint32_t size,
98 ErrorResult& rv) {
99 RefPtr<HeapSnapshot> snapshot = new HeapSnapshot(cx, global.GetAsSupports());
100 if (!snapshot->init(cx, buffer, size)) {
101 rv.Throw(NS_ERROR_UNEXPECTED);
102 return nullptr;
103 }
104 return snapshot.forget();
105 }
106
107 template <typename MessageType>
parseMessage(ZeroCopyInputStream & stream,uint32_t sizeOfMessage,MessageType & message)108 static bool parseMessage(ZeroCopyInputStream& stream, uint32_t sizeOfMessage,
109 MessageType& message) {
110 // We need to create a new `CodedInputStream` for each message so that the
111 // 64MB limit is applied per-message rather than to the whole stream.
112 CodedInputStream codedStream(&stream);
113
114 // The protobuf message nesting that core dumps exhibit is dominated by
115 // allocation stacks' frames. In the most deeply nested case, each frame has
116 // two messages: a StackFrame message and a StackFrame::Data message. These
117 // frames are on top of a small constant of other messages. There are a
118 // MAX_STACK_DEPTH number of frames, so we multiply this by 3 to make room for
119 // the two messages per frame plus some head room for the constant number of
120 // non-dominating messages.
121 codedStream.SetRecursionLimit(HeapSnapshot::MAX_STACK_DEPTH * 3);
122
123 auto limit = codedStream.PushLimit(sizeOfMessage);
124 if (NS_WARN_IF(!message.ParseFromCodedStream(&codedStream)) ||
125 NS_WARN_IF(!codedStream.ConsumedEntireMessage()) ||
126 NS_WARN_IF(codedStream.BytesUntilLimit() != 0)) {
127 return false;
128 }
129
130 codedStream.PopLimit(limit);
131 return true;
132 }
133
134 template <typename CharT, typename InternedStringSet>
135 struct GetOrInternStringMatcher {
136 InternedStringSet& internedStrings;
137
GetOrInternStringMatchermozilla::devtools::GetOrInternStringMatcher138 explicit GetOrInternStringMatcher(InternedStringSet& strings)
139 : internedStrings(strings) {}
140
operator ()mozilla::devtools::GetOrInternStringMatcher141 const CharT* operator()(const std::string* str) {
142 MOZ_ASSERT(str);
143 size_t length = str->length() / sizeof(CharT);
144 auto tempString = reinterpret_cast<const CharT*>(str->data());
145
146 UniqueFreePtr<CharT[]> owned(NS_xstrndup(tempString, length));
147 if (!internedStrings.append(std::move(owned))) return nullptr;
148
149 return internedStrings.back().get();
150 }
151
operator ()mozilla::devtools::GetOrInternStringMatcher152 const CharT* operator()(uint64_t ref) {
153 if (MOZ_LIKELY(ref < internedStrings.length())) {
154 auto& string = internedStrings[ref];
155 MOZ_ASSERT(string);
156 return string.get();
157 }
158
159 return nullptr;
160 }
161 };
162
163 template <
164 // Either char or char16_t.
165 typename CharT,
166 // A reference to either `internedOneByteStrings` or
167 // `internedTwoByteStrings` if CharT is char or char16_t respectively.
168 typename InternedStringSet>
getOrInternString(InternedStringSet & internedStrings,Maybe<StringOrRef> & maybeStrOrRef)169 const CharT* HeapSnapshot::getOrInternString(
170 InternedStringSet& internedStrings, Maybe<StringOrRef>& maybeStrOrRef) {
171 // Incomplete message: has neither a string nor a reference to an already
172 // interned string.
173 if (MOZ_UNLIKELY(maybeStrOrRef.isNothing())) return nullptr;
174
175 GetOrInternStringMatcher<CharT, InternedStringSet> m(internedStrings);
176 return maybeStrOrRef->match(m);
177 }
178
179 // Get a de-duplicated string as a Maybe<StringOrRef> from the given `msg`.
180 #define GET_STRING_OR_REF_WITH_PROP_NAMES(msg, strPropertyName, \
181 refPropertyName) \
182 (msg.has_##refPropertyName() ? Some(StringOrRef(msg.refPropertyName())) \
183 : msg.has_##strPropertyName() ? Some(StringOrRef(&msg.strPropertyName())) \
184 : Nothing())
185
186 #define GET_STRING_OR_REF(msg, property) \
187 (msg.has_##property##ref() ? Some(StringOrRef(msg.property##ref())) \
188 : msg.has_##property() ? Some(StringOrRef(&msg.property())) \
189 : Nothing())
190
saveNode(const protobuf::Node & node,NodeIdSet & edgeReferents)191 bool HeapSnapshot::saveNode(const protobuf::Node& node,
192 NodeIdSet& edgeReferents) {
193 // NB: de-duplicated string properties must be read back and interned in the
194 // same order here as they are written and serialized in
195 // `CoreDumpWriter::writeNode` or else indices in references to already
196 // serialized strings will be off.
197
198 if (NS_WARN_IF(!node.has_id())) return false;
199 NodeId id = node.id();
200
201 // NodeIds are derived from pointers (at most 48 bits) and we rely on them
202 // fitting into JS numbers (IEEE 754 doubles, can precisely store 53 bit
203 // integers) despite storing them on disk as 64 bit integers.
204 if (NS_WARN_IF(!JS::Value::isNumberRepresentable(id))) return false;
205
206 // Should only deserialize each node once.
207 if (NS_WARN_IF(nodes.has(id))) return false;
208
209 if (NS_WARN_IF(!JS::ubi::Uint32IsValidCoarseType(node.coarsetype())))
210 return false;
211 auto coarseType = JS::ubi::Uint32ToCoarseType(node.coarsetype());
212
213 Maybe<StringOrRef> typeNameOrRef =
214 GET_STRING_OR_REF_WITH_PROP_NAMES(node, typename_, typenameref);
215 auto typeName =
216 getOrInternString<char16_t>(internedTwoByteStrings, typeNameOrRef);
217 if (NS_WARN_IF(!typeName)) return false;
218
219 if (NS_WARN_IF(!node.has_size())) return false;
220 uint64_t size = node.size();
221
222 auto edgesLength = node.edges_size();
223 DeserializedNode::EdgeVector edges;
224 if (NS_WARN_IF(!edges.reserve(edgesLength))) return false;
225 for (decltype(edgesLength) i = 0; i < edgesLength; i++) {
226 auto& protoEdge = node.edges(i);
227
228 if (NS_WARN_IF(!protoEdge.has_referent())) return false;
229 NodeId referent = protoEdge.referent();
230
231 if (NS_WARN_IF(!edgeReferents.put(referent))) return false;
232
233 const char16_t* edgeName = nullptr;
234 if (protoEdge.EdgeNameOrRef_case() !=
235 protobuf::Edge::EDGENAMEORREF_NOT_SET) {
236 Maybe<StringOrRef> edgeNameOrRef = GET_STRING_OR_REF(protoEdge, name);
237 edgeName =
238 getOrInternString<char16_t>(internedTwoByteStrings, edgeNameOrRef);
239 if (NS_WARN_IF(!edgeName)) return false;
240 }
241
242 edges.infallibleAppend(DeserializedEdge(referent, edgeName));
243 }
244
245 Maybe<StackFrameId> allocationStack;
246 if (node.has_allocationstack()) {
247 StackFrameId id = 0;
248 if (NS_WARN_IF(!saveStackFrame(node.allocationstack(), id))) return false;
249 allocationStack.emplace(id);
250 }
251 MOZ_ASSERT(allocationStack.isSome() == node.has_allocationstack());
252
253 const char* jsObjectClassName = nullptr;
254 if (node.JSObjectClassNameOrRef_case() !=
255 protobuf::Node::JSOBJECTCLASSNAMEORREF_NOT_SET) {
256 Maybe<StringOrRef> clsNameOrRef =
257 GET_STRING_OR_REF(node, jsobjectclassname);
258 jsObjectClassName =
259 getOrInternString<char>(internedOneByteStrings, clsNameOrRef);
260 if (NS_WARN_IF(!jsObjectClassName)) return false;
261 }
262
263 const char* scriptFilename = nullptr;
264 if (node.ScriptFilenameOrRef_case() !=
265 protobuf::Node::SCRIPTFILENAMEORREF_NOT_SET) {
266 Maybe<StringOrRef> scriptFilenameOrRef =
267 GET_STRING_OR_REF(node, scriptfilename);
268 scriptFilename =
269 getOrInternString<char>(internedOneByteStrings, scriptFilenameOrRef);
270 if (NS_WARN_IF(!scriptFilename)) return false;
271 }
272
273 const char16_t* descriptiveTypeName = nullptr;
274 if (node.descriptiveTypeNameOrRef_case() !=
275 protobuf::Node::DESCRIPTIVETYPENAMEORREF_NOT_SET) {
276 Maybe<StringOrRef> descriptiveTypeNameOrRef =
277 GET_STRING_OR_REF(node, descriptivetypename);
278 descriptiveTypeName = getOrInternString<char16_t>(internedTwoByteStrings,
279 descriptiveTypeNameOrRef);
280 if (NS_WARN_IF(!descriptiveTypeName)) return false;
281 }
282
283 if (NS_WARN_IF(!nodes.putNew(
284 id, DeserializedNode(id, coarseType, typeName, size, std::move(edges),
285 allocationStack, jsObjectClassName,
286 scriptFilename, descriptiveTypeName, *this)))) {
287 return false;
288 };
289
290 return true;
291 }
292
saveStackFrame(const protobuf::StackFrame & frame,StackFrameId & outFrameId)293 bool HeapSnapshot::saveStackFrame(const protobuf::StackFrame& frame,
294 StackFrameId& outFrameId) {
295 // NB: de-duplicated string properties must be read in the same order here as
296 // they are written in `CoreDumpWriter::getProtobufStackFrame` or else indices
297 // in references to already serialized strings will be off.
298
299 if (frame.has_ref()) {
300 // We should only get a reference to the previous frame if we have already
301 // seen the previous frame.
302 if (!frames.has(frame.ref())) return false;
303
304 outFrameId = frame.ref();
305 return true;
306 }
307
308 // Incomplete message.
309 if (!frame.has_data()) return false;
310
311 auto data = frame.data();
312
313 if (!data.has_id()) return false;
314 StackFrameId id = data.id();
315
316 // This should be the first and only time we see this frame.
317 if (frames.has(id)) return false;
318
319 if (!data.has_line()) return false;
320 uint32_t line = data.line();
321
322 if (!data.has_column()) return false;
323 uint32_t column = data.column();
324
325 if (!data.has_issystem()) return false;
326 bool isSystem = data.issystem();
327
328 if (!data.has_isselfhosted()) return false;
329 bool isSelfHosted = data.isselfhosted();
330
331 Maybe<StringOrRef> sourceOrRef = GET_STRING_OR_REF(data, source);
332 auto source =
333 getOrInternString<char16_t>(internedTwoByteStrings, sourceOrRef);
334 if (!source) return false;
335
336 const char16_t* functionDisplayName = nullptr;
337 if (data.FunctionDisplayNameOrRef_case() !=
338 protobuf::StackFrame_Data::FUNCTIONDISPLAYNAMEORREF_NOT_SET) {
339 Maybe<StringOrRef> nameOrRef = GET_STRING_OR_REF(data, functiondisplayname);
340 functionDisplayName =
341 getOrInternString<char16_t>(internedTwoByteStrings, nameOrRef);
342 if (!functionDisplayName) return false;
343 }
344
345 Maybe<StackFrameId> parent;
346 if (data.has_parent()) {
347 StackFrameId parentId = 0;
348 if (!saveStackFrame(data.parent(), parentId)) return false;
349 parent = Some(parentId);
350 }
351
352 if (!frames.putNew(id,
353 DeserializedStackFrame(id, parent, line, column, source,
354 functionDisplayName, isSystem,
355 isSelfHosted, *this))) {
356 return false;
357 }
358
359 outFrameId = id;
360 return true;
361 }
362
363 #undef GET_STRING_OR_REF_WITH_PROP_NAMES
364 #undef GET_STRING_OR_REF
365
366 // Because protobuf messages aren't self-delimiting, we serialize each message
367 // preceded by its size in bytes. When deserializing, we read this size and then
368 // limit reading from the stream to the given byte size. If we didn't, then the
369 // first message would consume the entire stream.
readSizeOfNextMessage(ZeroCopyInputStream & stream,uint32_t * sizep)370 static bool readSizeOfNextMessage(ZeroCopyInputStream& stream,
371 uint32_t* sizep) {
372 MOZ_ASSERT(sizep);
373 CodedInputStream codedStream(&stream);
374 return codedStream.ReadVarint32(sizep) && *sizep > 0;
375 }
376
init(JSContext * cx,const uint8_t * buffer,uint32_t size)377 bool HeapSnapshot::init(JSContext* cx, const uint8_t* buffer, uint32_t size) {
378 ArrayInputStream stream(buffer, size);
379 GzipInputStream gzipStream(&stream);
380 uint32_t sizeOfMessage = 0;
381
382 // First is the metadata.
383
384 protobuf::Metadata metadata;
385 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage)))
386 return false;
387 if (!parseMessage(gzipStream, sizeOfMessage, metadata)) return false;
388 if (metadata.has_timestamp()) timestamp.emplace(metadata.timestamp());
389
390 // Next is the root node.
391
392 protobuf::Node root;
393 if (NS_WARN_IF(!readSizeOfNextMessage(gzipStream, &sizeOfMessage)))
394 return false;
395 if (!parseMessage(gzipStream, sizeOfMessage, root)) return false;
396
397 // Although the id is optional in the protobuf format for future proofing, we
398 // can't currently do anything without it.
399 if (NS_WARN_IF(!root.has_id())) return false;
400 rootId = root.id();
401
402 // The set of all node ids we've found edges pointing to.
403 NodeIdSet edgeReferents(cx);
404
405 if (NS_WARN_IF(!saveNode(root, edgeReferents))) return false;
406
407 // Finally, the rest of the nodes in the core dump.
408
409 // Test for the end of the stream. The protobuf library gives no way to tell
410 // the difference between an underlying read error and the stream being
411 // done. All we can do is attempt to read the size of the next message and
412 // extrapolate guestimations from the result of that operation.
413 while (readSizeOfNextMessage(gzipStream, &sizeOfMessage)) {
414 protobuf::Node node;
415 if (!parseMessage(gzipStream, sizeOfMessage, node)) return false;
416 if (NS_WARN_IF(!saveNode(node, edgeReferents))) return false;
417 }
418
419 // Check the set of node ids referred to by edges we found and ensure that we
420 // have the node corresponding to each id. If we don't have all of them, it is
421 // unsafe to perform analyses of this heap snapshot.
422 for (auto iter = edgeReferents.iter(); !iter.done(); iter.next()) {
423 if (NS_WARN_IF(!nodes.has(iter.get()))) return false;
424 }
425
426 return true;
427 }
428
429 /*** Heap Snapshot Analyses ***************************************************/
430
TakeCensus(JSContext * cx,JS::HandleObject options,JS::MutableHandleValue rval,ErrorResult & rv)431 void HeapSnapshot::TakeCensus(JSContext* cx, JS::HandleObject options,
432 JS::MutableHandleValue rval, ErrorResult& rv) {
433 JS::ubi::Census census(cx);
434
435 JS::ubi::CountTypePtr rootType;
436 if (NS_WARN_IF(!JS::ubi::ParseCensusOptions(cx, census, options, rootType))) {
437 rv.Throw(NS_ERROR_UNEXPECTED);
438 return;
439 }
440
441 JS::ubi::RootedCount rootCount(cx, rootType->makeCount());
442 if (NS_WARN_IF(!rootCount)) {
443 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
444 return;
445 }
446
447 JS::ubi::CensusHandler handler(census, rootCount,
448 GetCurrentThreadDebuggerMallocSizeOf());
449
450 {
451 JS::AutoCheckCannotGC nogc;
452
453 JS::ubi::CensusTraversal traversal(cx, handler, nogc);
454
455 if (NS_WARN_IF(!traversal.addStart(getRoot()))) {
456 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
457 return;
458 }
459
460 if (NS_WARN_IF(!traversal.traverse())) {
461 rv.Throw(NS_ERROR_UNEXPECTED);
462 return;
463 }
464 }
465
466 if (NS_WARN_IF(!handler.report(cx, rval))) {
467 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
468 return;
469 }
470 }
471
DescribeNode(JSContext * cx,JS::HandleObject breakdown,uint64_t nodeId,JS::MutableHandleValue rval,ErrorResult & rv)472 void HeapSnapshot::DescribeNode(JSContext* cx, JS::HandleObject breakdown,
473 uint64_t nodeId, JS::MutableHandleValue rval,
474 ErrorResult& rv) {
475 MOZ_ASSERT(breakdown);
476 JS::RootedValue breakdownVal(cx, JS::ObjectValue(*breakdown));
477 JS::ubi::CountTypePtr rootType = JS::ubi::ParseBreakdown(cx, breakdownVal);
478 if (NS_WARN_IF(!rootType)) {
479 rv.Throw(NS_ERROR_UNEXPECTED);
480 return;
481 }
482
483 JS::ubi::RootedCount rootCount(cx, rootType->makeCount());
484 if (NS_WARN_IF(!rootCount)) {
485 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
486 return;
487 }
488
489 JS::ubi::Node::Id id(nodeId);
490 Maybe<JS::ubi::Node> node = getNodeById(id);
491 if (NS_WARN_IF(node.isNothing())) {
492 rv.Throw(NS_ERROR_INVALID_ARG);
493 return;
494 }
495
496 MallocSizeOf mallocSizeOf = GetCurrentThreadDebuggerMallocSizeOf();
497 if (NS_WARN_IF(!rootCount->count(mallocSizeOf, *node))) {
498 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
499 return;
500 }
501
502 if (NS_WARN_IF(!rootCount->report(cx, rval))) {
503 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
504 return;
505 }
506 }
507
ComputeDominatorTree(ErrorResult & rv)508 already_AddRefed<DominatorTree> HeapSnapshot::ComputeDominatorTree(
509 ErrorResult& rv) {
510 Maybe<JS::ubi::DominatorTree> maybeTree;
511 {
512 auto ccjscx = CycleCollectedJSContext::Get();
513 MOZ_ASSERT(ccjscx);
514 auto cx = ccjscx->Context();
515 MOZ_ASSERT(cx);
516 JS::AutoCheckCannotGC nogc(cx);
517 maybeTree = JS::ubi::DominatorTree::Create(cx, nogc, getRoot());
518 }
519
520 if (NS_WARN_IF(maybeTree.isNothing())) {
521 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
522 return nullptr;
523 }
524
525 return MakeAndAddRef<DominatorTree>(std::move(*maybeTree), this, mParent);
526 }
527
ComputeShortestPaths(JSContext * cx,uint64_t start,const Sequence<uint64_t> & targets,uint64_t maxNumPaths,JS::MutableHandleObject results,ErrorResult & rv)528 void HeapSnapshot::ComputeShortestPaths(JSContext* cx, uint64_t start,
529 const Sequence<uint64_t>& targets,
530 uint64_t maxNumPaths,
531 JS::MutableHandleObject results,
532 ErrorResult& rv) {
533 // First ensure that our inputs are valid.
534
535 if (NS_WARN_IF(maxNumPaths == 0)) {
536 rv.Throw(NS_ERROR_INVALID_ARG);
537 return;
538 }
539
540 Maybe<JS::ubi::Node> startNode = getNodeById(start);
541 if (NS_WARN_IF(startNode.isNothing())) {
542 rv.Throw(NS_ERROR_INVALID_ARG);
543 return;
544 }
545
546 if (NS_WARN_IF(targets.Length() == 0)) {
547 rv.Throw(NS_ERROR_INVALID_ARG);
548 return;
549 }
550
551 // Aggregate the targets into a set and make sure that they exist in the heap
552 // snapshot.
553
554 JS::ubi::NodeSet targetsSet;
555
556 for (const auto& target : targets) {
557 Maybe<JS::ubi::Node> targetNode = getNodeById(target);
558 if (NS_WARN_IF(targetNode.isNothing())) {
559 rv.Throw(NS_ERROR_INVALID_ARG);
560 return;
561 }
562
563 if (NS_WARN_IF(!targetsSet.put(*targetNode))) {
564 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
565 return;
566 }
567 }
568
569 // Walk the heap graph and find the shortest paths.
570
571 Maybe<ShortestPaths> maybeShortestPaths;
572 {
573 JS::AutoCheckCannotGC nogc(cx);
574 maybeShortestPaths = ShortestPaths::Create(
575 cx, nogc, maxNumPaths, *startNode, std::move(targetsSet));
576 }
577
578 if (NS_WARN_IF(maybeShortestPaths.isNothing())) {
579 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
580 return;
581 }
582
583 auto& shortestPaths = *maybeShortestPaths;
584
585 // Convert the results into a Map object mapping target node IDs to arrays of
586 // paths found.
587
588 RootedObject resultsMap(cx, JS::NewMapObject(cx));
589 if (NS_WARN_IF(!resultsMap)) {
590 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
591 return;
592 }
593
594 for (auto iter = shortestPaths.targetIter(); !iter.done(); iter.next()) {
595 JS::RootedValue key(cx, JS::NumberValue(iter.get().identifier()));
596 JS::RootedValueVector paths(cx);
597
598 bool ok = shortestPaths.forEachPath(iter.get(), [&](JS::ubi::Path& path) {
599 JS::RootedValueVector pathValues(cx);
600
601 for (JS::ubi::BackEdge* edge : path) {
602 JS::RootedObject pathPart(cx, JS_NewPlainObject(cx));
603 if (!pathPart) {
604 return false;
605 }
606
607 JS::RootedValue predecessor(
608 cx, NumberValue(edge->predecessor().identifier()));
609 if (!JS_DefineProperty(cx, pathPart, "predecessor", predecessor,
610 JSPROP_ENUMERATE)) {
611 return false;
612 }
613
614 RootedValue edgeNameVal(cx, NullValue());
615 if (edge->name()) {
616 RootedString edgeName(cx, JS_AtomizeUCString(cx, edge->name().get()));
617 if (!edgeName) {
618 return false;
619 }
620 edgeNameVal = StringValue(edgeName);
621 }
622
623 if (!JS_DefineProperty(cx, pathPart, "edge", edgeNameVal,
624 JSPROP_ENUMERATE)) {
625 return false;
626 }
627
628 if (!pathValues.append(ObjectValue(*pathPart))) {
629 return false;
630 }
631 }
632
633 RootedObject pathObj(cx, JS::NewArrayObject(cx, pathValues));
634 return pathObj && paths.append(ObjectValue(*pathObj));
635 });
636
637 if (NS_WARN_IF(!ok)) {
638 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
639 return;
640 }
641
642 JS::RootedObject pathsArray(cx, JS::NewArrayObject(cx, paths));
643 if (NS_WARN_IF(!pathsArray)) {
644 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
645 return;
646 }
647
648 JS::RootedValue pathsVal(cx, ObjectValue(*pathsArray));
649 if (NS_WARN_IF(!JS::MapSet(cx, resultsMap, key, pathsVal))) {
650 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
651 return;
652 }
653 }
654
655 results.set(resultsMap);
656 }
657
658 /*** Saving Heap Snapshots ****************************************************/
659
660 // If we are only taking a snapshot of the heap affected by the given set of
661 // globals, find the set of compartments the globals are allocated
662 // within. Returns false on OOM failure.
PopulateCompartmentsWithGlobals(CompartmentSet & compartments,HandleObjectVector globals)663 static bool PopulateCompartmentsWithGlobals(CompartmentSet& compartments,
664 HandleObjectVector globals) {
665 unsigned length = globals.length();
666 for (unsigned i = 0; i < length; i++) {
667 if (!compartments.put(JS::GetCompartment(globals[i]))) return false;
668 }
669
670 return true;
671 }
672
673 // Add the given set of globals as explicit roots in the given roots
674 // list. Returns false on OOM failure.
AddGlobalsAsRoots(HandleObjectVector globals,ubi::RootList & roots)675 static bool AddGlobalsAsRoots(HandleObjectVector globals,
676 ubi::RootList& roots) {
677 unsigned length = globals.length();
678 for (unsigned i = 0; i < length; i++) {
679 if (!roots.addRoot(ubi::Node(globals[i].get()), u"heap snapshot global")) {
680 return false;
681 }
682 }
683 return true;
684 }
685
686 // Choose roots and limits for a traversal, given `boundaries`. Set `roots` to
687 // the set of nodes within the boundaries that are referred to by nodes
688 // outside. If `boundaries` does not include all JS compartments, initialize
689 // `compartments` to the set of included compartments; otherwise, leave
690 // `compartments` uninitialized. (You can use compartments.initialized() to
691 // check.)
692 //
693 // If `boundaries` is incoherent, or we encounter an error while trying to
694 // handle it, or we run out of memory, set `rv` appropriately and return
695 // `false`.
EstablishBoundaries(JSContext * cx,ErrorResult & rv,const HeapSnapshotBoundaries & boundaries,ubi::RootList & roots,CompartmentSet & compartments)696 static bool EstablishBoundaries(JSContext* cx, ErrorResult& rv,
697 const HeapSnapshotBoundaries& boundaries,
698 ubi::RootList& roots,
699 CompartmentSet& compartments) {
700 MOZ_ASSERT(!roots.initialized());
701 MOZ_ASSERT(compartments.empty());
702
703 bool foundBoundaryProperty = false;
704
705 if (boundaries.mRuntime.WasPassed()) {
706 foundBoundaryProperty = true;
707
708 if (!boundaries.mRuntime.Value()) {
709 rv.Throw(NS_ERROR_INVALID_ARG);
710 return false;
711 }
712
713 if (!roots.init()) {
714 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
715 return false;
716 }
717 }
718
719 if (boundaries.mDebugger.WasPassed()) {
720 if (foundBoundaryProperty) {
721 rv.Throw(NS_ERROR_INVALID_ARG);
722 return false;
723 }
724 foundBoundaryProperty = true;
725
726 JSObject* dbgObj = boundaries.mDebugger.Value();
727 if (!dbgObj || !dbg::IsDebugger(*dbgObj)) {
728 rv.Throw(NS_ERROR_INVALID_ARG);
729 return false;
730 }
731
732 RootedObjectVector globals(cx);
733 if (!dbg::GetDebuggeeGlobals(cx, *dbgObj, &globals) ||
734 !PopulateCompartmentsWithGlobals(compartments, globals) ||
735 !roots.init(compartments) || !AddGlobalsAsRoots(globals, roots)) {
736 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
737 return false;
738 }
739 }
740
741 if (boundaries.mGlobals.WasPassed()) {
742 if (foundBoundaryProperty) {
743 rv.Throw(NS_ERROR_INVALID_ARG);
744 return false;
745 }
746 foundBoundaryProperty = true;
747
748 uint32_t length = boundaries.mGlobals.Value().Length();
749 if (length == 0) {
750 rv.Throw(NS_ERROR_INVALID_ARG);
751 return false;
752 }
753
754 RootedObjectVector globals(cx);
755 for (uint32_t i = 0; i < length; i++) {
756 JSObject* global = boundaries.mGlobals.Value().ElementAt(i);
757 if (!JS_IsGlobalObject(global)) {
758 rv.Throw(NS_ERROR_INVALID_ARG);
759 return false;
760 }
761 if (!globals.append(global)) {
762 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
763 return false;
764 }
765 }
766
767 if (!PopulateCompartmentsWithGlobals(compartments, globals) ||
768 !roots.init(compartments) || !AddGlobalsAsRoots(globals, roots)) {
769 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
770 return false;
771 }
772 }
773
774 if (!foundBoundaryProperty) {
775 rv.Throw(NS_ERROR_INVALID_ARG);
776 return false;
777 }
778
779 MOZ_ASSERT(roots.initialized());
780 return true;
781 }
782
783 // A variant covering all the various two-byte strings that we can get from the
784 // ubi::Node API.
785 class TwoByteString
786 : public Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName> {
787 using Base = Variant<JSAtom*, const char16_t*, JS::ubi::EdgeName>;
788
789 struct CopyToBufferMatcher {
790 RangedPtr<char16_t> destination;
791 size_t maxLength;
792
CopyToBufferMatchermozilla::devtools::TwoByteString::CopyToBufferMatcher793 CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength)
794 : destination(destination), maxLength(maxLength) {}
795
operator ()mozilla::devtools::TwoByteString::CopyToBufferMatcher796 size_t operator()(JS::ubi::EdgeName& ptr) {
797 return ptr ? operator()(ptr.get()) : 0;
798 }
799
operator ()mozilla::devtools::TwoByteString::CopyToBufferMatcher800 size_t operator()(JSAtom* atom) {
801 MOZ_ASSERT(atom);
802 JS::ubi::AtomOrTwoByteChars s(atom);
803 return s.copyToBuffer(destination, maxLength);
804 }
805
operator ()mozilla::devtools::TwoByteString::CopyToBufferMatcher806 size_t operator()(const char16_t* chars) {
807 MOZ_ASSERT(chars);
808 JS::ubi::AtomOrTwoByteChars s(chars);
809 return s.copyToBuffer(destination, maxLength);
810 }
811 };
812
813 public:
814 template <typename T>
TwoByteString(T && rhs)815 MOZ_IMPLICIT TwoByteString(T&& rhs) : Base(std::forward<T>(rhs)) {}
816
817 template <typename T>
operator =(T && rhs)818 TwoByteString& operator=(T&& rhs) {
819 MOZ_ASSERT(this != &rhs, "self-move disallowed");
820 this->~TwoByteString();
821 new (this) TwoByteString(std::forward<T>(rhs));
822 return *this;
823 }
824
825 TwoByteString(const TwoByteString&) = delete;
826 TwoByteString& operator=(const TwoByteString&) = delete;
827
828 // Rewrap the inner value of a JS::ubi::AtomOrTwoByteChars as a TwoByteString.
from(JS::ubi::AtomOrTwoByteChars && s)829 static TwoByteString from(JS::ubi::AtomOrTwoByteChars&& s) {
830 return s.match([](auto* a) { return TwoByteString(a); });
831 }
832
833 // Returns true if the given TwoByteString is non-null, false otherwise.
isNonNull() const834 bool isNonNull() const {
835 return match([](auto& t) { return t != nullptr; });
836 }
837
838 // Return the length of the string, 0 if it is null.
length() const839 size_t length() const {
840 return match(
841 [](JSAtom* atom) -> size_t {
842 MOZ_ASSERT(atom);
843 JS::ubi::AtomOrTwoByteChars s(atom);
844 return s.length();
845 },
846 [](const char16_t* chars) -> size_t {
847 MOZ_ASSERT(chars);
848 return NS_strlen(chars);
849 },
850 [](const JS::ubi::EdgeName& ptr) -> size_t {
851 MOZ_ASSERT(ptr);
852 return NS_strlen(ptr.get());
853 });
854 }
855
856 // Copy the contents of a TwoByteString into the provided buffer. The buffer
857 // is NOT null terminated. The number of characters written is returned.
copyToBuffer(RangedPtr<char16_t> destination,size_t maxLength)858 size_t copyToBuffer(RangedPtr<char16_t> destination, size_t maxLength) {
859 CopyToBufferMatcher m(destination, maxLength);
860 return match(m);
861 }
862
863 struct HashPolicy;
864 };
865
866 // A hashing policy for TwoByteString.
867 //
868 // Atoms are pointer hashed and use pointer equality, which means that we
869 // tolerate some duplication across atoms and the other two types of two-byte
870 // strings. In practice, we expect the amount of this duplication to be very low
871 // because each type is generally a different semantic thing in addition to
872 // having a slightly different representation. For example, the set of edge
873 // names and the set stack frames' source names naturally tend not to overlap
874 // very much if at all.
875 struct TwoByteString::HashPolicy {
876 using Lookup = TwoByteString;
877
hashmozilla::devtools::TwoByteString::HashPolicy878 static js::HashNumber hash(const Lookup& l) {
879 return l.match(
880 [](const JSAtom* atom) {
881 return js::DefaultHasher<const JSAtom*>::hash(atom);
882 },
883 [](const char16_t* chars) {
884 MOZ_ASSERT(chars);
885 auto length = NS_strlen(chars);
886 return HashString(chars, length);
887 },
888 [](const JS::ubi::EdgeName& ptr) {
889 const char16_t* chars = ptr.get();
890 MOZ_ASSERT(chars);
891 auto length = NS_strlen(chars);
892 return HashString(chars, length);
893 });
894 }
895
896 struct EqualityMatcher {
897 const TwoByteString& rhs;
EqualityMatchermozilla::devtools::TwoByteString::HashPolicy::EqualityMatcher898 explicit EqualityMatcher(const TwoByteString& rhs) : rhs(rhs) {}
899
operator ()mozilla::devtools::TwoByteString::HashPolicy::EqualityMatcher900 bool operator()(const JSAtom* atom) {
901 return rhs.is<JSAtom*>() && rhs.as<JSAtom*>() == atom;
902 }
903
operator ()mozilla::devtools::TwoByteString::HashPolicy::EqualityMatcher904 bool operator()(const char16_t* chars) {
905 MOZ_ASSERT(chars);
906
907 const char16_t* rhsChars = nullptr;
908 if (rhs.is<const char16_t*>())
909 rhsChars = rhs.as<const char16_t*>();
910 else if (rhs.is<JS::ubi::EdgeName>())
911 rhsChars = rhs.as<JS::ubi::EdgeName>().get();
912 else
913 return false;
914 MOZ_ASSERT(rhsChars);
915
916 auto length = NS_strlen(chars);
917 if (NS_strlen(rhsChars) != length) return false;
918
919 return memcmp(chars, rhsChars, length * sizeof(char16_t)) == 0;
920 }
921
operator ()mozilla::devtools::TwoByteString::HashPolicy::EqualityMatcher922 bool operator()(const JS::ubi::EdgeName& ptr) {
923 MOZ_ASSERT(ptr);
924 return operator()(ptr.get());
925 }
926 };
927
matchmozilla::devtools::TwoByteString::HashPolicy928 static bool match(const TwoByteString& k, const Lookup& l) {
929 EqualityMatcher eq(l);
930 return k.match(eq);
931 }
932
rekeymozilla::devtools::TwoByteString::HashPolicy933 static void rekey(TwoByteString& k, TwoByteString&& newKey) {
934 k = std::move(newKey);
935 }
936 };
937
938 // Returns whether `edge` should be included in a heap snapshot of
939 // `compartments`. The optional `policy` out-param is set to INCLUDE_EDGES
940 // if we want to include the referent's edges, or EXCLUDE_EDGES if we don't
941 // want to include them.
ShouldIncludeEdge(JS::CompartmentSet * compartments,const ubi::Node & origin,const ubi::Edge & edge,CoreDumpWriter::EdgePolicy * policy=nullptr)942 static bool ShouldIncludeEdge(JS::CompartmentSet* compartments,
943 const ubi::Node& origin, const ubi::Edge& edge,
944 CoreDumpWriter::EdgePolicy* policy = nullptr) {
945 if (policy) {
946 *policy = CoreDumpWriter::INCLUDE_EDGES;
947 }
948
949 if (!compartments) {
950 // We aren't targeting a particular set of compartments, so serialize all
951 // the things!
952 return true;
953 }
954
955 // We are targeting a particular set of compartments. If this node is in our
956 // target set, serialize it and all of its edges. If this node is _not_ in our
957 // target set, we also serialize under the assumption that it is a shared
958 // resource being used by something in our target compartments since we
959 // reached it by traversing the heap graph. However, we do not serialize its
960 // outgoing edges and we abandon further traversal from this node.
961 //
962 // If the node does not belong to any compartment, we also serialize its
963 // outgoing edges. This case is relevant for Shapes: they don't belong to a
964 // specific compartment and contain edges to parent/kids Shapes we want to
965 // include. Note that these Shapes may contain pointers into our target
966 // compartment (the Shape's getter/setter JSObjects). However, we do not
967 // serialize nodes in other compartments that are reachable from these
968 // non-compartment nodes.
969
970 JS::Compartment* compartment = edge.referent.compartment();
971
972 if (!compartment || compartments->has(compartment)) {
973 return true;
974 }
975
976 if (policy) {
977 *policy = CoreDumpWriter::EXCLUDE_EDGES;
978 }
979
980 return !!origin.compartment();
981 }
982
983 // A `CoreDumpWriter` that serializes nodes to protobufs and writes them to the
984 // given `ZeroCopyOutputStream`.
985 class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter {
986 using FrameSet = js::HashSet<uint64_t>;
987 using TwoByteStringMap =
988 js::HashMap<TwoByteString, uint64_t, TwoByteString::HashPolicy>;
989 using OneByteStringMap = js::HashMap<const char*, uint64_t>;
990
991 JSContext* cx;
992 bool wantNames;
993 // The set of |JS::ubi::StackFrame::identifier()|s that have already been
994 // serialized and written to the core dump.
995 FrameSet framesAlreadySerialized;
996 // The set of two-byte strings that have already been serialized and written
997 // to the core dump.
998 TwoByteStringMap twoByteStringsAlreadySerialized;
999 // The set of one-byte strings that have already been serialized and written
1000 // to the core dump.
1001 OneByteStringMap oneByteStringsAlreadySerialized;
1002
1003 ::google::protobuf::io::ZeroCopyOutputStream& stream;
1004
1005 JS::CompartmentSet* compartments;
1006
writeMessage(const::google::protobuf::MessageLite & message)1007 bool writeMessage(const ::google::protobuf::MessageLite& message) {
1008 // We have to create a new CodedOutputStream when writing each message so
1009 // that the 64MB size limit used by Coded{Output,Input}Stream to prevent
1010 // integer overflow is enforced per message rather than on the whole stream.
1011 ::google::protobuf::io::CodedOutputStream codedStream(&stream);
1012 codedStream.WriteVarint32(message.ByteSizeLong());
1013 message.SerializeWithCachedSizes(&codedStream);
1014 return !codedStream.HadError();
1015 }
1016
1017 // Attach the full two-byte string or a reference to a two-byte string that
1018 // has already been serialized to a protobuf message.
1019 template <typename SetStringFunction, typename SetRefFunction>
attachTwoByteString(TwoByteString & string,SetStringFunction setString,SetRefFunction setRef)1020 bool attachTwoByteString(TwoByteString& string, SetStringFunction setString,
1021 SetRefFunction setRef) {
1022 auto ptr = twoByteStringsAlreadySerialized.lookupForAdd(string);
1023 if (ptr) {
1024 setRef(ptr->value());
1025 return true;
1026 }
1027
1028 auto length = string.length();
1029 auto stringData = MakeUnique<std::string>(length * sizeof(char16_t), '\0');
1030 if (!stringData) return false;
1031
1032 auto buf = const_cast<char16_t*>(
1033 reinterpret_cast<const char16_t*>(stringData->data()));
1034 string.copyToBuffer(RangedPtr<char16_t>(buf, length), length);
1035
1036 uint64_t ref = twoByteStringsAlreadySerialized.count();
1037 if (!twoByteStringsAlreadySerialized.add(ptr, std::move(string), ref))
1038 return false;
1039
1040 setString(stringData.release());
1041 return true;
1042 }
1043
1044 // Attach the full one-byte string or a reference to a one-byte string that
1045 // has already been serialized to a protobuf message.
1046 template <typename SetStringFunction, typename SetRefFunction>
attachOneByteString(const char * string,SetStringFunction setString,SetRefFunction setRef)1047 bool attachOneByteString(const char* string, SetStringFunction setString,
1048 SetRefFunction setRef) {
1049 auto ptr = oneByteStringsAlreadySerialized.lookupForAdd(string);
1050 if (ptr) {
1051 setRef(ptr->value());
1052 return true;
1053 }
1054
1055 auto length = strlen(string);
1056 auto stringData = MakeUnique<std::string>(string, length);
1057 if (!stringData) return false;
1058
1059 uint64_t ref = oneByteStringsAlreadySerialized.count();
1060 if (!oneByteStringsAlreadySerialized.add(ptr, string, ref)) return false;
1061
1062 setString(stringData.release());
1063 return true;
1064 }
1065
getProtobufStackFrame(JS::ubi::StackFrame & frame,size_t depth=1)1066 protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame,
1067 size_t depth = 1) {
1068 // NB: de-duplicated string properties must be written in the same order
1069 // here as they are read in `HeapSnapshot::saveStackFrame` or else indices
1070 // in references to already serialized strings will be off.
1071
1072 MOZ_ASSERT(frame,
1073 "null frames should be represented as the lack of a serialized "
1074 "stack frame");
1075
1076 auto id = frame.identifier();
1077 auto protobufStackFrame = MakeUnique<protobuf::StackFrame>();
1078 if (!protobufStackFrame) return nullptr;
1079
1080 if (framesAlreadySerialized.has(id)) {
1081 protobufStackFrame->set_ref(id);
1082 return protobufStackFrame.release();
1083 }
1084
1085 auto data = MakeUnique<protobuf::StackFrame_Data>();
1086 if (!data) return nullptr;
1087
1088 data->set_id(id);
1089 data->set_line(frame.line());
1090 data->set_column(frame.column());
1091 data->set_issystem(frame.isSystem());
1092 data->set_isselfhosted(frame.isSelfHosted(cx));
1093
1094 auto dupeSource = TwoByteString::from(frame.source());
1095 if (!attachTwoByteString(
1096 dupeSource,
1097 [&](std::string* source) { data->set_allocated_source(source); },
1098 [&](uint64_t ref) { data->set_sourceref(ref); })) {
1099 return nullptr;
1100 }
1101
1102 auto dupeName = TwoByteString::from(frame.functionDisplayName());
1103 if (dupeName.isNonNull()) {
1104 if (!attachTwoByteString(
1105 dupeName,
1106 [&](std::string* name) {
1107 data->set_allocated_functiondisplayname(name);
1108 },
1109 [&](uint64_t ref) { data->set_functiondisplaynameref(ref); })) {
1110 return nullptr;
1111 }
1112 }
1113
1114 auto parent = frame.parent();
1115 if (parent && depth < HeapSnapshot::MAX_STACK_DEPTH) {
1116 auto protobufParent = getProtobufStackFrame(parent, depth + 1);
1117 if (!protobufParent) return nullptr;
1118 data->set_allocated_parent(protobufParent);
1119 }
1120
1121 protobufStackFrame->set_allocated_data(data.release());
1122
1123 if (!framesAlreadySerialized.put(id)) return nullptr;
1124
1125 return protobufStackFrame.release();
1126 }
1127
1128 public:
StreamWriter(JSContext * cx,::google::protobuf::io::ZeroCopyOutputStream & stream,bool wantNames,JS::CompartmentSet * compartments)1129 StreamWriter(JSContext* cx,
1130 ::google::protobuf::io::ZeroCopyOutputStream& stream,
1131 bool wantNames, JS::CompartmentSet* compartments)
1132 : cx(cx),
1133 wantNames(wantNames),
1134 framesAlreadySerialized(cx),
1135 twoByteStringsAlreadySerialized(cx),
1136 oneByteStringsAlreadySerialized(cx),
1137 stream(stream),
1138 compartments(compartments) {}
1139
~StreamWriter()1140 ~StreamWriter() override {}
1141
writeMetadata(uint64_t timestamp)1142 bool writeMetadata(uint64_t timestamp) final {
1143 protobuf::Metadata metadata;
1144 metadata.set_timestamp(timestamp);
1145 return writeMessage(metadata);
1146 }
1147
writeNode(const JS::ubi::Node & ubiNode,EdgePolicy includeEdges)1148 bool writeNode(const JS::ubi::Node& ubiNode, EdgePolicy includeEdges) final {
1149 // NB: de-duplicated string properties must be written in the same order
1150 // here as they are read in `HeapSnapshot::saveNode` or else indices in
1151 // references to already serialized strings will be off.
1152
1153 protobuf::Node protobufNode;
1154 protobufNode.set_id(ubiNode.identifier());
1155
1156 protobufNode.set_coarsetype(
1157 JS::ubi::CoarseTypeToUint32(ubiNode.coarseType()));
1158
1159 auto typeName = TwoByteString(ubiNode.typeName());
1160 if (NS_WARN_IF(!attachTwoByteString(
1161 typeName,
1162 [&](std::string* name) {
1163 protobufNode.set_allocated_typename_(name);
1164 },
1165 [&](uint64_t ref) { protobufNode.set_typenameref(ref); }))) {
1166 return false;
1167 }
1168
1169 mozilla::MallocSizeOf mallocSizeOf = dbg::GetDebuggerMallocSizeOf(cx);
1170 MOZ_ASSERT(mallocSizeOf);
1171 protobufNode.set_size(ubiNode.size(mallocSizeOf));
1172
1173 if (includeEdges) {
1174 auto edges = ubiNode.edges(cx, wantNames);
1175 if (NS_WARN_IF(!edges)) return false;
1176
1177 for (; !edges->empty(); edges->popFront()) {
1178 ubi::Edge& ubiEdge = edges->front();
1179 if (!ShouldIncludeEdge(compartments, ubiNode, ubiEdge)) {
1180 continue;
1181 }
1182
1183 protobuf::Edge* protobufEdge = protobufNode.add_edges();
1184 if (NS_WARN_IF(!protobufEdge)) {
1185 return false;
1186 }
1187
1188 protobufEdge->set_referent(ubiEdge.referent.identifier());
1189
1190 if (wantNames && ubiEdge.name) {
1191 TwoByteString edgeName(std::move(ubiEdge.name));
1192 if (NS_WARN_IF(!attachTwoByteString(
1193 edgeName,
1194 [&](std::string* name) {
1195 protobufEdge->set_allocated_name(name);
1196 },
1197 [&](uint64_t ref) { protobufEdge->set_nameref(ref); }))) {
1198 return false;
1199 }
1200 }
1201 }
1202 }
1203
1204 if (ubiNode.hasAllocationStack()) {
1205 auto ubiStackFrame = ubiNode.allocationStack();
1206 auto protoStackFrame = getProtobufStackFrame(ubiStackFrame);
1207 if (NS_WARN_IF(!protoStackFrame)) return false;
1208 protobufNode.set_allocated_allocationstack(protoStackFrame);
1209 }
1210
1211 if (auto className = ubiNode.jsObjectClassName()) {
1212 if (NS_WARN_IF(!attachOneByteString(
1213 className,
1214 [&](std::string* name) {
1215 protobufNode.set_allocated_jsobjectclassname(name);
1216 },
1217 [&](uint64_t ref) {
1218 protobufNode.set_jsobjectclassnameref(ref);
1219 }))) {
1220 return false;
1221 }
1222 }
1223
1224 if (auto scriptFilename = ubiNode.scriptFilename()) {
1225 if (NS_WARN_IF(!attachOneByteString(
1226 scriptFilename,
1227 [&](std::string* name) {
1228 protobufNode.set_allocated_scriptfilename(name);
1229 },
1230 [&](uint64_t ref) {
1231 protobufNode.set_scriptfilenameref(ref);
1232 }))) {
1233 return false;
1234 }
1235 }
1236
1237 if (ubiNode.descriptiveTypeName()) {
1238 auto descriptiveTypeName = TwoByteString(ubiNode.descriptiveTypeName());
1239 if (NS_WARN_IF(!attachTwoByteString(
1240 descriptiveTypeName,
1241 [&](std::string* name) {
1242 protobufNode.set_allocated_descriptivetypename(name);
1243 },
1244 [&](uint64_t ref) {
1245 protobufNode.set_descriptivetypenameref(ref);
1246 }))) {
1247 return false;
1248 }
1249 }
1250
1251 return writeMessage(protobufNode);
1252 }
1253 };
1254
1255 // A JS::ubi::BreadthFirst handler that serializes a snapshot of the heap into a
1256 // core dump.
1257 class MOZ_STACK_CLASS HeapSnapshotHandler {
1258 CoreDumpWriter& writer;
1259 JS::CompartmentSet* compartments;
1260
1261 public:
1262 // For telemetry.
1263 uint32_t nodeCount;
1264 uint32_t edgeCount;
1265
HeapSnapshotHandler(CoreDumpWriter & writer,JS::CompartmentSet * compartments)1266 HeapSnapshotHandler(CoreDumpWriter& writer, JS::CompartmentSet* compartments)
1267 : writer(writer),
1268 compartments(compartments),
1269 nodeCount(0),
1270 edgeCount(0) {}
1271
1272 // JS::ubi::BreadthFirst handler interface.
1273
1274 class NodeData {};
1275 typedef JS::ubi::BreadthFirst<HeapSnapshotHandler> Traversal;
operator ()(Traversal & traversal,JS::ubi::Node origin,const JS::ubi::Edge & edge,NodeData *,bool first)1276 bool operator()(Traversal& traversal, JS::ubi::Node origin,
1277 const JS::ubi::Edge& edge, NodeData*, bool first) {
1278 edgeCount++;
1279
1280 // We're only interested in the first time we reach edge.referent, not in
1281 // every edge arriving at that node. "But, don't we want to serialize every
1282 // edge in the heap graph?" you ask. Don't worry! This edge is still
1283 // serialized into the core dump. Serializing a node also serializes each of
1284 // its edges, and if we are traversing a given edge, we must have already
1285 // visited and serialized the origin node and its edges.
1286 if (!first) return true;
1287
1288 CoreDumpWriter::EdgePolicy policy;
1289 if (!ShouldIncludeEdge(compartments, origin, edge, &policy)) {
1290 // Because ShouldIncludeEdge considers the |origin| node as well, we don't
1291 // want to consider this node 'visited' until we write it to the core
1292 // dump.
1293 traversal.doNotMarkReferentAsVisited();
1294 return true;
1295 }
1296
1297 nodeCount++;
1298
1299 if (policy == CoreDumpWriter::EXCLUDE_EDGES) traversal.abandonReferent();
1300
1301 return writer.writeNode(edge.referent, policy);
1302 }
1303 };
1304
WriteHeapGraph(JSContext * cx,const JS::ubi::Node & node,CoreDumpWriter & writer,bool wantNames,JS::CompartmentSet * compartments,JS::AutoCheckCannotGC & noGC,uint32_t & outNodeCount,uint32_t & outEdgeCount)1305 bool WriteHeapGraph(JSContext* cx, const JS::ubi::Node& node,
1306 CoreDumpWriter& writer, bool wantNames,
1307 JS::CompartmentSet* compartments,
1308 JS::AutoCheckCannotGC& noGC, uint32_t& outNodeCount,
1309 uint32_t& outEdgeCount) {
1310 // Serialize the starting node to the core dump.
1311
1312 if (NS_WARN_IF(!writer.writeNode(node, CoreDumpWriter::INCLUDE_EDGES))) {
1313 return false;
1314 }
1315
1316 // Walk the heap graph starting from the given node and serialize it into the
1317 // core dump.
1318
1319 HeapSnapshotHandler handler(writer, compartments);
1320 HeapSnapshotHandler::Traversal traversal(cx, handler, noGC);
1321 traversal.wantNames = wantNames;
1322
1323 bool ok = traversal.addStartVisited(node) && traversal.traverse();
1324
1325 if (ok) {
1326 outNodeCount = handler.nodeCount;
1327 outEdgeCount = handler.edgeCount;
1328 }
1329
1330 return ok;
1331 }
1332
msSinceProcessCreation(const TimeStamp & now)1333 static unsigned long msSinceProcessCreation(const TimeStamp& now) {
1334 auto duration = now - TimeStamp::ProcessCreation();
1335 return (unsigned long)duration.ToMilliseconds();
1336 }
1337
1338 /* static */
CreateUniqueCoreDumpFile(ErrorResult & rv,const TimeStamp & now,nsAString & outFilePath,nsAString & outSnapshotId)1339 already_AddRefed<nsIFile> HeapSnapshot::CreateUniqueCoreDumpFile(
1340 ErrorResult& rv, const TimeStamp& now, nsAString& outFilePath,
1341 nsAString& outSnapshotId) {
1342 MOZ_RELEASE_ASSERT(XRE_IsParentProcess());
1343 nsCOMPtr<nsIFile> file;
1344 rv = GetSpecialSystemDirectory(OS_TemporaryDirectory, getter_AddRefs(file));
1345 if (NS_WARN_IF(rv.Failed())) return nullptr;
1346
1347 nsAutoString tempPath;
1348 rv = file->GetPath(tempPath);
1349 if (NS_WARN_IF(rv.Failed())) return nullptr;
1350
1351 auto ms = msSinceProcessCreation(now);
1352 rv = file->AppendNative(nsPrintfCString("%lu.fxsnapshot", ms));
1353 if (NS_WARN_IF(rv.Failed())) return nullptr;
1354
1355 rv = file->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0666);
1356 if (NS_WARN_IF(rv.Failed())) return nullptr;
1357
1358 rv = file->GetPath(outFilePath);
1359 if (NS_WARN_IF(rv.Failed())) return nullptr;
1360
1361 // The snapshot ID must be computed in the process that created the
1362 // temp file, because TmpD may not be the same in all processes.
1363 outSnapshotId.Assign(Substring(
1364 outFilePath, tempPath.Length() + 1,
1365 outFilePath.Length() - tempPath.Length() - sizeof(".fxsnapshot")));
1366
1367 return file.forget();
1368 }
1369
1370 // Deletion policy for cleaning up PHeapSnapshotTempFileHelperChild pointers.
1371 class DeleteHeapSnapshotTempFileHelperChild {
1372 public:
DeleteHeapSnapshotTempFileHelperChild()1373 constexpr DeleteHeapSnapshotTempFileHelperChild() {}
1374
operator ()(PHeapSnapshotTempFileHelperChild * ptr) const1375 void operator()(PHeapSnapshotTempFileHelperChild* ptr) const {
1376 Unused << NS_WARN_IF(!HeapSnapshotTempFileHelperChild::Send__delete__(ptr));
1377 }
1378 };
1379
1380 // A UniquePtr alias to automatically manage PHeapSnapshotTempFileHelperChild
1381 // pointers.
1382 using UniqueHeapSnapshotTempFileHelperChild =
1383 UniquePtr<PHeapSnapshotTempFileHelperChild,
1384 DeleteHeapSnapshotTempFileHelperChild>;
1385
1386 // Get an nsIOutputStream that we can write the heap snapshot to. In non-e10s
1387 // and in the e10s parent process, open a file directly and create an output
1388 // stream for it. In e10s child processes, we are sandboxed without access to
1389 // the filesystem. Use IPDL to request a file descriptor from the parent
1390 // process.
getCoreDumpOutputStream(ErrorResult & rv,TimeStamp & start,nsAString & outFilePath,nsAString & outSnapshotId)1391 static already_AddRefed<nsIOutputStream> getCoreDumpOutputStream(
1392 ErrorResult& rv, TimeStamp& start, nsAString& outFilePath,
1393 nsAString& outSnapshotId) {
1394 if (XRE_IsParentProcess()) {
1395 // Create the file and open the output stream directly.
1396
1397 nsCOMPtr<nsIFile> file = HeapSnapshot::CreateUniqueCoreDumpFile(
1398 rv, start, outFilePath, outSnapshotId);
1399 if (NS_WARN_IF(rv.Failed())) return nullptr;
1400
1401 nsCOMPtr<nsIOutputStream> outputStream;
1402 rv = NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), file,
1403 PR_WRONLY, -1, 0);
1404 if (NS_WARN_IF(rv.Failed())) return nullptr;
1405
1406 return outputStream.forget();
1407 }
1408 // Request a file descriptor from the parent process over IPDL.
1409
1410 auto cc = ContentChild::GetSingleton();
1411 if (!cc) {
1412 rv.Throw(NS_ERROR_UNEXPECTED);
1413 return nullptr;
1414 }
1415
1416 UniqueHeapSnapshotTempFileHelperChild helper(
1417 cc->SendPHeapSnapshotTempFileHelperConstructor());
1418 if (NS_WARN_IF(!helper)) {
1419 rv.Throw(NS_ERROR_UNEXPECTED);
1420 return nullptr;
1421 }
1422
1423 OpenHeapSnapshotTempFileResponse response;
1424 if (!helper->SendOpenHeapSnapshotTempFile(&response)) {
1425 rv.Throw(NS_ERROR_UNEXPECTED);
1426 return nullptr;
1427 }
1428 if (response.type() == OpenHeapSnapshotTempFileResponse::Tnsresult) {
1429 rv.Throw(response.get_nsresult());
1430 return nullptr;
1431 }
1432
1433 auto opened = response.get_OpenedFile();
1434 outFilePath = opened.path();
1435 outSnapshotId = opened.snapshotId();
1436 nsCOMPtr<nsIOutputStream> outputStream =
1437 FileDescriptorOutputStream::Create(opened.descriptor());
1438 if (NS_WARN_IF(!outputStream)) {
1439 rv.Throw(NS_ERROR_UNEXPECTED);
1440 return nullptr;
1441 }
1442
1443 return outputStream.forget();
1444 }
1445
1446 } // namespace devtools
1447
1448 namespace dom {
1449
1450 using namespace JS;
1451 using namespace devtools;
1452
1453 /* static */
SaveHeapSnapshotShared(GlobalObject & global,const HeapSnapshotBoundaries & boundaries,nsAString & outFilePath,nsAString & outSnapshotId,ErrorResult & rv)1454 void ChromeUtils::SaveHeapSnapshotShared(
1455 GlobalObject& global, const HeapSnapshotBoundaries& boundaries,
1456 nsAString& outFilePath, nsAString& outSnapshotId, ErrorResult& rv) {
1457 auto start = TimeStamp::Now();
1458
1459 bool wantNames = true;
1460 CompartmentSet compartments;
1461 uint32_t nodeCount = 0;
1462 uint32_t edgeCount = 0;
1463
1464 nsCOMPtr<nsIOutputStream> outputStream =
1465 getCoreDumpOutputStream(rv, start, outFilePath, outSnapshotId);
1466 if (NS_WARN_IF(rv.Failed())) return;
1467
1468 ZeroCopyNSIOutputStream zeroCopyStream(outputStream);
1469 ::google::protobuf::io::GzipOutputStream gzipStream(&zeroCopyStream);
1470
1471 JSContext* cx = global.Context();
1472
1473 {
1474 Maybe<AutoCheckCannotGC> maybeNoGC;
1475 ubi::RootList rootList(cx, maybeNoGC, wantNames);
1476 if (!EstablishBoundaries(cx, rv, boundaries, rootList, compartments))
1477 return;
1478
1479 StreamWriter writer(cx, gzipStream, wantNames,
1480 !compartments.empty() ? &compartments : nullptr);
1481
1482 MOZ_ASSERT(maybeNoGC.isSome());
1483 ubi::Node roots(&rootList);
1484
1485 // Serialize the initial heap snapshot metadata to the core dump.
1486 if (!writer.writeMetadata(PR_Now()) ||
1487 // Serialize the heap graph to the core dump, starting from our list of
1488 // roots.
1489 !WriteHeapGraph(cx, roots, writer, wantNames,
1490 !compartments.empty() ? &compartments : nullptr,
1491 maybeNoGC.ref(), nodeCount, edgeCount)) {
1492 rv.Throw(zeroCopyStream.failed() ? zeroCopyStream.result()
1493 : NS_ERROR_UNEXPECTED);
1494 return;
1495 }
1496 }
1497
1498 Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS,
1499 start);
1500 Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT,
1501 nodeCount);
1502 Telemetry::Accumulate(Telemetry::DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT,
1503 edgeCount);
1504 }
1505
1506 /* static */
GetObjectNodeId(GlobalObject & global,JS::HandleObject val)1507 uint64_t ChromeUtils::GetObjectNodeId(GlobalObject& global,
1508 JS::HandleObject val) {
1509 JS::RootedObject obj(global.Context(), val);
1510
1511 JS::ubi::Node node(obj);
1512 return node.identifier();
1513 }
1514
1515 /* static */
SaveHeapSnapshot(GlobalObject & global,const HeapSnapshotBoundaries & boundaries,nsAString & outFilePath,ErrorResult & rv)1516 void ChromeUtils::SaveHeapSnapshot(GlobalObject& global,
1517 const HeapSnapshotBoundaries& boundaries,
1518 nsAString& outFilePath, ErrorResult& rv) {
1519 nsAutoString snapshotId;
1520 SaveHeapSnapshotShared(global, boundaries, outFilePath, snapshotId, rv);
1521 }
1522
1523 /* static */
SaveHeapSnapshotGetId(GlobalObject & global,const HeapSnapshotBoundaries & boundaries,nsAString & outSnapshotId,ErrorResult & rv)1524 void ChromeUtils::SaveHeapSnapshotGetId(
1525 GlobalObject& global, const HeapSnapshotBoundaries& boundaries,
1526 nsAString& outSnapshotId, ErrorResult& rv) {
1527 nsAutoString filePath;
1528 SaveHeapSnapshotShared(global, boundaries, filePath, outSnapshotId, rv);
1529 }
1530
1531 /* static */
ReadHeapSnapshot(GlobalObject & global,const nsAString & filePath,ErrorResult & rv)1532 already_AddRefed<HeapSnapshot> ChromeUtils::ReadHeapSnapshot(
1533 GlobalObject& global, const nsAString& filePath, ErrorResult& rv) {
1534 auto start = TimeStamp::Now();
1535
1536 UniquePtr<char[]> path(ToNewCString(filePath, mozilla::fallible));
1537 if (!path) {
1538 rv.Throw(NS_ERROR_OUT_OF_MEMORY);
1539 return nullptr;
1540 }
1541
1542 AutoMemMap mm;
1543 rv = mm.init(path.get());
1544 if (rv.Failed()) return nullptr;
1545
1546 RefPtr<HeapSnapshot> snapshot = HeapSnapshot::Create(
1547 global.Context(), global, reinterpret_cast<const uint8_t*>(mm.address()),
1548 mm.size(), rv);
1549
1550 if (!rv.Failed())
1551 Telemetry::AccumulateTimeDelta(Telemetry::DEVTOOLS_READ_HEAP_SNAPSHOT_MS,
1552 start);
1553
1554 return snapshot.forget();
1555 }
1556
1557 } // namespace dom
1558 } // namespace mozilla
1559