1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <stddef.h>
8
9 #include "base/memory/aligned_memory.h"
10 #include "base/memory/ptr_util.h"
11 #include "base/memory/shared_memory_tracker.h"
12 #include "base/memory/writable_shared_memory_region.h"
13 #include "base/process/process_metrics.h"
14 #include "base/trace_event/memory_allocator_dump_guid.h"
15 #include "base/trace_event/memory_infra_background_allowlist.h"
16 #include "base/trace_event/trace_log.h"
17 #include "base/trace_event/traced_value.h"
18 #include "build/build_config.h"
19 #include "testing/gtest/include/gtest/gtest.h"
20
21 #if defined(OS_WIN)
22 #include <windows.h>
23 #include "winbase.h"
24 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
25 #include <sys/mman.h>
26 #endif
27
28 #if defined(OS_IOS)
29 #include "base/ios/ios_util.h"
30 #endif
31
32 namespace base {
33 namespace trace_event {
34
35 namespace {
36
37 const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
38 const char* const kTestDumpNameWhitelist[] = {
39 "Whitelisted/TestName", "Whitelisted/TestName_0x?",
40 "Whitelisted/0x?/TestName", "Whitelisted/0x?", nullptr};
41
Map(size_t size)42 void* Map(size_t size) {
43 #if defined(OS_WIN)
44 return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
45 PAGE_READWRITE);
46 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
47 return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
48 0, 0);
49 #endif
50 }
51
Unmap(void * addr,size_t size)52 void Unmap(void* addr, size_t size) {
53 #if defined(OS_WIN)
54 ::VirtualFree(addr, 0, MEM_DECOMMIT);
55 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
56 ::munmap(addr, size);
57 #else
58 #error This architecture is not (yet) supported.
59 #endif
60 }
61
62 } // namespace
63
TEST(ProcessMemoryDumpTest,MoveConstructor)64 TEST(ProcessMemoryDumpTest, MoveConstructor) {
65 ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
66 pmd1.CreateAllocatorDump("mad1");
67 pmd1.CreateAllocatorDump("mad2");
68 pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
69 MemoryAllocatorDumpGuid(4242));
70
71 ProcessMemoryDump pmd2(std::move(pmd1));
72
73 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
74 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
75 EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
76 pmd2.dump_args().level_of_detail);
77 EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
78
79 // Check that calling serialization routines doesn't cause a crash.
80 auto traced_value = std::make_unique<TracedValue>();
81 pmd2.SerializeAllocatorDumpsInto(traced_value.get());
82 }
83
TEST(ProcessMemoryDumpTest,MoveAssignment)84 TEST(ProcessMemoryDumpTest, MoveAssignment) {
85 ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
86 pmd1.CreateAllocatorDump("mad1");
87 pmd1.CreateAllocatorDump("mad2");
88 pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
89 MemoryAllocatorDumpGuid(4242));
90
91 ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::BACKGROUND});
92 pmd2.CreateAllocatorDump("malloc");
93
94 pmd2 = std::move(pmd1);
95 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
96 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
97 EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
98 EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
99 pmd2.dump_args().level_of_detail);
100 EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
101
102 // Check that calling serialization routines doesn't cause a crash.
103 auto traced_value = std::make_unique<TracedValue>();
104 pmd2.SerializeAllocatorDumpsInto(traced_value.get());
105 }
106
TEST(ProcessMemoryDumpTest,Clear)107 TEST(ProcessMemoryDumpTest, Clear) {
108 std::unique_ptr<ProcessMemoryDump> pmd1(
109 new ProcessMemoryDump(kDetailedDumpArgs));
110 pmd1->CreateAllocatorDump("mad1");
111 pmd1->CreateAllocatorDump("mad2");
112 ASSERT_FALSE(pmd1->allocator_dumps().empty());
113
114 pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
115 MemoryAllocatorDumpGuid(4242));
116
117 MemoryAllocatorDumpGuid shared_mad_guid1(1);
118 MemoryAllocatorDumpGuid shared_mad_guid2(2);
119 pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
120 pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
121
122 pmd1->Clear();
123 ASSERT_TRUE(pmd1->allocator_dumps().empty());
124 ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
125 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
126 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
127 ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
128 ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
129
130 // Check that calling serialization routines doesn't cause a crash.
131 auto traced_value = std::make_unique<TracedValue>();
132 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
133
134 // Check that the pmd can be reused and behaves as expected.
135 auto* mad1 = pmd1->CreateAllocatorDump("mad1");
136 auto* mad3 = pmd1->CreateAllocatorDump("mad3");
137 auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
138 auto* shared_mad2 =
139 pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
140 ASSERT_EQ(4u, pmd1->allocator_dumps().size());
141 ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
142 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
143 ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
144 ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
145 ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
146 ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
147 ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
148
149 traced_value.reset(new TracedValue);
150 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
151
152 pmd1.reset();
153 }
154
TEST(ProcessMemoryDumpTest,TakeAllDumpsFrom)155 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
156 std::unique_ptr<TracedValue> traced_value(new TracedValue);
157 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
158 metrics_by_context[AllocationContext()] = {1, 1};
159 TraceEventMemoryOverhead overhead;
160
161 std::unique_ptr<ProcessMemoryDump> pmd1(
162 new ProcessMemoryDump(kDetailedDumpArgs));
163 auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
164 auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
165 pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
166 pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
167 pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
168
169 std::unique_ptr<ProcessMemoryDump> pmd2(
170 new ProcessMemoryDump(kDetailedDumpArgs));
171 auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
172 auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
173 pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
174 pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
175 pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
176
177 MemoryAllocatorDumpGuid shared_mad_guid1(1);
178 MemoryAllocatorDumpGuid shared_mad_guid2(2);
179 auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
180 auto* shared_mad2 =
181 pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
182
183 pmd1->TakeAllDumpsFrom(pmd2.get());
184
185 // Make sure that pmd2 is empty but still usable after it has been emptied.
186 ASSERT_TRUE(pmd2->allocator_dumps().empty());
187 ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
188 pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
189 ASSERT_EQ(1u, pmd2->allocator_dumps().size());
190 ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
191 pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
192 MemoryAllocatorDumpGuid(4242));
193
194 // Check that calling serialization routines doesn't cause a crash.
195 pmd2->SerializeAllocatorDumpsInto(traced_value.get());
196
197 // Free the |pmd2| to check that the memory ownership of the two MAD(s)
198 // has been transferred to |pmd1|.
199 pmd2.reset();
200
201 // Now check that |pmd1| has been effectively merged.
202 ASSERT_EQ(6u, pmd1->allocator_dumps().size());
203 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
204 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
205 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
206 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
207 ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
208 ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
209 ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
210 ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
211
212 // Check that calling serialization routines doesn't cause a crash.
213 traced_value.reset(new TracedValue);
214 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
215
216 pmd1.reset();
217 }
218
TEST(ProcessMemoryDumpTest,OverrideOwnershipEdge)219 TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
220 std::unique_ptr<ProcessMemoryDump> pmd(
221 new ProcessMemoryDump(kDetailedDumpArgs));
222
223 auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
224 auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
225 auto* shm_dump3 = pmd->CreateAllocatorDump("shared_mem/seg3");
226 auto* shm_dump4 = pmd->CreateAllocatorDump("shared_mem/seg4");
227
228 // Create one allocation with an auto-assigned guid and mark it as a
229 // suballocation of "fakealloc/allocated_objects".
230 auto* child1_dump = pmd->CreateAllocatorDump("shared_mem/child/seg1");
231 pmd->AddOverridableOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
232 0 /* importance */);
233 auto* child2_dump = pmd->CreateAllocatorDump("shared_mem/child/seg2");
234 pmd->AddOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
235 3 /* importance */);
236 MemoryAllocatorDumpGuid shared_mad_guid(1);
237 pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
238 pmd->AddOverridableOwnershipEdge(shm_dump3->guid(), shared_mad_guid,
239 0 /* importance */);
240 auto* child4_dump = pmd->CreateAllocatorDump("shared_mem/child/seg4");
241 pmd->AddOverridableOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
242 4 /* importance */);
243
244 const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
245 pmd->allocator_dumps_edges();
246 EXPECT_EQ(4u, edges.size());
247 EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
248 EXPECT_EQ(0, edges.find(child1_dump->guid())->second.importance);
249 EXPECT_TRUE(edges.find(child1_dump->guid())->second.overridable);
250 EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
251 EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
252 EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
253 EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
254 EXPECT_EQ(0, edges.find(shm_dump3->guid())->second.importance);
255 EXPECT_TRUE(edges.find(shm_dump3->guid())->second.overridable);
256 EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
257 EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
258 EXPECT_TRUE(edges.find(child4_dump->guid())->second.overridable);
259
260 // These should override old edges:
261 pmd->AddOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
262 1 /* importance */);
263 pmd->AddOwnershipEdge(shm_dump3->guid(), shared_mad_guid, 2 /* importance */);
264 // This should not change the old edges.
265 pmd->AddOverridableOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
266 0 /* importance */);
267 pmd->AddOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
268 0 /* importance */);
269
270 EXPECT_EQ(4u, edges.size());
271 EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
272 EXPECT_EQ(1, edges.find(child1_dump->guid())->second.importance);
273 EXPECT_FALSE(edges.find(child1_dump->guid())->second.overridable);
274 EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
275 EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
276 EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
277 EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
278 EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
279 EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
280 EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
281 EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
282 EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
283 }
284
TEST(ProcessMemoryDumpTest,Suballocations)285 TEST(ProcessMemoryDumpTest, Suballocations) {
286 std::unique_ptr<ProcessMemoryDump> pmd(
287 new ProcessMemoryDump(kDetailedDumpArgs));
288 const std::string allocator_dump_name = "fakealloc/allocated_objects";
289 pmd->CreateAllocatorDump(allocator_dump_name);
290
291 // Create one allocation with an auto-assigned guid and mark it as a
292 // suballocation of "fakealloc/allocated_objects".
293 auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
294 pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
295
296 // Same here, but this time create an allocation with an explicit guid.
297 auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
298 MemoryAllocatorDumpGuid(0x42));
299 pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
300
301 // Now check that AddSuballocation() has created anonymous child dumps under
302 // "fakealloc/allocated_objects".
303 auto anon_node_1_it = pmd->allocator_dumps().find(
304 allocator_dump_name + "/__" + pic1_dump->guid().ToString());
305 ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
306
307 auto anon_node_2_it =
308 pmd->allocator_dumps().find(allocator_dump_name + "/__42");
309 ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
310
311 // Finally check that AddSuballocation() has created also the
312 // edges between the pictures and the anonymous allocator child dumps.
313 bool found_edge[2]{false, false};
314 for (const auto& e : pmd->allocator_dumps_edges()) {
315 found_edge[0] |= (e.first == pic1_dump->guid() &&
316 e.second.target == anon_node_1_it->second->guid());
317 found_edge[1] |= (e.first == pic2_dump->guid() &&
318 e.second.target == anon_node_2_it->second->guid());
319 }
320 ASSERT_TRUE(found_edge[0]);
321 ASSERT_TRUE(found_edge[1]);
322
323 // Check that calling serialization routines doesn't cause a crash.
324 std::unique_ptr<TracedValue> traced_value(new TracedValue);
325 pmd->SerializeAllocatorDumpsInto(traced_value.get());
326
327 pmd.reset();
328 }
329
TEST(ProcessMemoryDumpTest,GlobalAllocatorDumpTest)330 TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
331 std::unique_ptr<ProcessMemoryDump> pmd(
332 new ProcessMemoryDump(kDetailedDumpArgs));
333 MemoryAllocatorDumpGuid shared_mad_guid(1);
334 auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
335 ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
336 ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
337
338 auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
339 ASSERT_EQ(shared_mad1, shared_mad2);
340 ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
341
342 auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
343 ASSERT_EQ(shared_mad1, shared_mad3);
344 ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
345
346 auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
347 ASSERT_EQ(shared_mad1, shared_mad4);
348 ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
349
350 auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
351 ASSERT_EQ(shared_mad1, shared_mad5);
352 ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
353 }
354
TEST(ProcessMemoryDumpTest,SharedMemoryOwnershipTest)355 TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
356 std::unique_ptr<ProcessMemoryDump> pmd(
357 new ProcessMemoryDump(kDetailedDumpArgs));
358 const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
359 pmd->allocator_dumps_edges();
360
361 auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
362 auto shm_token2 = UnguessableToken::Create();
363 MemoryAllocatorDumpGuid shm_local_guid2 =
364 pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
365 MemoryAllocatorDumpGuid shm_global_guid2 =
366 SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
367 pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
368 0 /* importance */);
369
370 pmd->CreateSharedMemoryOwnershipEdge(client_dump2->guid(), shm_token2,
371 1 /* importance */);
372 EXPECT_EQ(2u, edges.size());
373
374 EXPECT_EQ(shm_global_guid2, edges.find(shm_local_guid2)->second.target);
375 EXPECT_EQ(1, edges.find(shm_local_guid2)->second.importance);
376 EXPECT_FALSE(edges.find(shm_local_guid2)->second.overridable);
377 EXPECT_EQ(shm_local_guid2, edges.find(client_dump2->guid())->second.target);
378 EXPECT_EQ(1, edges.find(client_dump2->guid())->second.importance);
379 EXPECT_FALSE(edges.find(client_dump2->guid())->second.overridable);
380 }
381
TEST(ProcessMemoryDumpTest,BackgroundModeTest)382 TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
383 MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
384 std::unique_ptr<ProcessMemoryDump> pmd(
385 new ProcessMemoryDump(background_args));
386 ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
387 SetAllocatorDumpNameAllowlistForTesting(kTestDumpNameWhitelist);
388 MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
389
390 // GetAllocatorDump works for uncreated dumps.
391 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
392 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("Whitelisted/TestName"));
393
394 // Invalid dump names.
395 EXPECT_EQ(black_hole_mad,
396 pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
397 EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
398 EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
399 EXPECT_EQ(black_hole_mad,
400 pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
401 EXPECT_EQ(black_hole_mad,
402 pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
403 EXPECT_EQ(black_hole_mad,
404 pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
405 EXPECT_EQ(black_hole_mad,
406 pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
407
408 // Suballocations.
409 MemoryAllocatorDumpGuid guid(1);
410 pmd->AddSuballocation(guid, "malloc/allocated_objects");
411 EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
412 EXPECT_EQ(0u, pmd->allocator_dumps_.size());
413
414 // Global dumps.
415 EXPECT_NE(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
416 EXPECT_NE(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
417 EXPECT_NE(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
418
419 // Valid dump names.
420 EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
421 EXPECT_NE(black_hole_mad,
422 pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
423 EXPECT_NE(black_hole_mad,
424 pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
425
426 // GetAllocatorDump is consistent.
427 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
428 EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
429
430 // Test whitelisted entries.
431 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Whitelisted/TestName"));
432
433 // Global dumps should be whitelisted.
434 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("global/13456"));
435
436 // Global dumps with non-guids should not be.
437 ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("global/random"));
438
439 // Random names should not.
440 ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("NotWhitelisted/TestName"));
441
442 // Check hex processing.
443 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Whitelisted/0xA1b2"));
444 }
445
TEST(ProcessMemoryDumpTest,GuidsTest)446 TEST(ProcessMemoryDumpTest, GuidsTest) {
447 MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
448
449 const auto process_token_one = UnguessableToken::Create();
450 const auto process_token_two = UnguessableToken::Create();
451
452 ProcessMemoryDump pmd1(dump_args);
453 pmd1.set_process_token_for_testing(process_token_one);
454 MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
455
456 ProcessMemoryDump pmd2(dump_args);
457 pmd2.set_process_token_for_testing(process_token_one);
458 MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
459
460 // If we don't pass the argument we get a random PMD:
461 ProcessMemoryDump pmd3(dump_args);
462 MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
463
464 // PMD's for different processes produce different GUIDs even for the same
465 // names:
466 ProcessMemoryDump pmd4(dump_args);
467 pmd4.set_process_token_for_testing(process_token_two);
468 MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
469
470 ASSERT_EQ(mad1->guid(), mad2->guid());
471
472 ASSERT_NE(mad2->guid(), mad3->guid());
473 ASSERT_NE(mad3->guid(), mad4->guid());
474 ASSERT_NE(mad4->guid(), mad2->guid());
475
476 ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
477 }
478
479 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
480 #if defined(OS_FUCHSIA)
481 // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
482 #define MAYBE_CountResidentBytes DISABLED_CountResidentBytes
483 #else
484 #define MAYBE_CountResidentBytes CountResidentBytes
485 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytes)486 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
487 const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
488
489 // Allocate few page of dirty memory and check if it is resident.
490 const size_t size1 = 5 * page_size;
491 void* memory1 = Map(size1);
492 memset(memory1, 0, size1);
493 size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1, size1);
494 ASSERT_EQ(res1, size1);
495 Unmap(memory1, size1);
496
497 // Allocate a large memory segment (> 8Mib).
498 const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
499 void* memory2 = Map(kVeryLargeMemorySize);
500 memset(memory2, 0, kVeryLargeMemorySize);
501 size_t res2 =
502 ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
503 ASSERT_EQ(res2, kVeryLargeMemorySize);
504 Unmap(memory2, kVeryLargeMemorySize);
505 }
506
507 #if defined(OS_FUCHSIA)
508 // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
509 #define MAYBE_CountResidentBytesInSharedMemory \
510 DISABLED_CountResidentBytesInSharedMemory
511 #else
512 #define MAYBE_CountResidentBytesInSharedMemory CountResidentBytesInSharedMemory
513 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytesInSharedMemory)514 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytesInSharedMemory) {
515 #if defined(OS_IOS)
516 // TODO(crbug.com/748410): Reenable this test.
517 if (!base::ios::IsRunningOnIOS10OrLater()) {
518 return;
519 }
520 #endif
521
522 const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
523
524 // Allocate few page of dirty memory and check if it is resident.
525 {
526 const size_t kDirtyMemorySize = 5 * page_size;
527 auto region = base::WritableSharedMemoryRegion::Create(kDirtyMemorySize);
528 base::WritableSharedMemoryMapping mapping = region.Map();
529 memset(mapping.memory(), 0, kDirtyMemorySize);
530 base::Optional<size_t> res1 =
531 ProcessMemoryDump::CountResidentBytesInSharedMemory(
532 mapping.memory(), mapping.mapped_size());
533 ASSERT_TRUE(res1.has_value());
534 ASSERT_EQ(res1.value(), kDirtyMemorySize);
535 }
536
537 // Allocate a large memory segment (> 8Mib).
538 {
539 const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
540 auto region =
541 base::WritableSharedMemoryRegion::Create(kVeryLargeMemorySize);
542 base::WritableSharedMemoryMapping mapping = region.Map();
543 memset(mapping.memory(), 0, kVeryLargeMemorySize);
544 base::Optional<size_t> res2 =
545 ProcessMemoryDump::CountResidentBytesInSharedMemory(
546 mapping.memory(), mapping.mapped_size());
547 ASSERT_TRUE(res2.has_value());
548 ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
549 }
550
551 // Allocate a large memory segment, but touch about half of all pages.
552 {
553 const size_t kTouchedMemorySize = 7 * 1024 * 1024;
554 auto region = base::WritableSharedMemoryRegion::Create(kTouchedMemorySize);
555 base::WritableSharedMemoryMapping mapping = region.Map();
556 memset(mapping.memory(), 0, kTouchedMemorySize);
557 base::Optional<size_t> res3 =
558 ProcessMemoryDump::CountResidentBytesInSharedMemory(
559 mapping.memory(), mapping.mapped_size());
560 ASSERT_TRUE(res3.has_value());
561 ASSERT_EQ(res3.value(), kTouchedMemorySize);
562 }
563 }
564 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
565
566 } // namespace trace_event
567 } // namespace base
568