1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_histogram_allocator.h"
6 
7 #include <limits>
8 #include <utility>
9 
10 #include "base/atomicops.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/files/important_file_writer.h"
14 #include "base/files/memory_mapped_file.h"
15 #include "base/lazy_instance.h"
16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h"
18 #include "base/memory/shared_memory_mapping.h"
19 #include "base/memory/writable_shared_memory_region.h"
20 #include "base/metrics/histogram.h"
21 #include "base/metrics/histogram_base.h"
22 #include "base/metrics/histogram_samples.h"
23 #include "base/metrics/metrics_hashes.h"
24 #include "base/metrics/persistent_sample_map.h"
25 #include "base/metrics/sparse_histogram.h"
26 #include "base/metrics/statistics_recorder.h"
27 #include "base/numerics/safe_conversions.h"
28 #include "base/pickle.h"
29 #include "base/process/process_handle.h"
30 #include "base/strings/string_number_conversions.h"
31 #include "base/strings/string_split.h"
32 #include "base/strings/stringprintf.h"
33 #include "base/synchronization/lock.h"
34 
35 namespace base {
36 
37 namespace {
38 
39 // Type identifiers used when storing in persistent memory so they can be
40 // identified during extraction; the first 4 bytes of the SHA1 of the name
41 // is used as a unique integer. A "version number" is added to the base
42 // so that, if the structure of that object changes, stored older versions
43 // will be safely ignored.
44 enum : uint32_t {
45   kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
46   kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
47 };
48 
49 // The current globally-active persistent allocator for all new histograms.
50 // The object held here will obviously not be destructed at process exit
51 // but that's best since PersistentMemoryAllocator objects (that underlie
52 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
53 // anything essential at exit anyway due to the fact that they depend on data
54 // managed elsewhere and which could be destructed first. An AtomicWord is
55 // used instead of std::atomic because the latter can create global ctors
56 // and dtors.
57 subtle::AtomicWord g_histogram_allocator = 0;
58 
59 // Take an array of range boundaries and create a proper BucketRanges object
60 // which is returned to the caller. A return of nullptr indicates that the
61 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)62 std::unique_ptr<BucketRanges> CreateRangesFromData(
63     HistogramBase::Sample* ranges_data,
64     uint32_t ranges_checksum,
65     size_t count) {
66   // To avoid racy destruction at shutdown, the following may be leaked.
67   std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
68   DCHECK_EQ(count, ranges->size());
69   for (size_t i = 0; i < count; ++i) {
70     if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
71       return nullptr;
72     ranges->set_range(i, ranges_data[i]);
73   }
74 
75   ranges->ResetChecksum();
76   if (ranges->checksum() != ranges_checksum)
77     return nullptr;
78 
79   return ranges;
80 }
81 
82 // Calculate the number of bytes required to store all of a histogram's
83 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)84 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
85   // 2 because each "sample count" also requires a backup "logged count"
86   // used for calculating the delta during snapshot operations.
87   const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
88 
89   // If the |bucket_count| is such that it would overflow the return type,
90   // perhaps as the result of a malicious actor, then return zero to
91   // indicate the problem to the caller.
92   if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
93     return 0;
94 
95   return bucket_count * kBytesPerBucket;
96 }
97 
98 }  // namespace
99 
100 const Feature kPersistentHistogramsFeature{
101   "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
102 };
103 
104 
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)105 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
106     PersistentMemoryAllocator* allocator)
107     : allocator_(allocator), record_iterator_(allocator) {}
108 
109 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
110     default;
111 
112 PersistentSampleMapRecords*
UseSampleMapRecords(uint64_t id,const void * user)113 PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
114                                                           const void* user) {
115   base::AutoLock auto_lock(lock_);
116   return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
117 }
118 
119 PersistentSampleMapRecords*
GetSampleMapRecordsWhileLocked(uint64_t id)120 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
121     uint64_t id) {
122   auto found = sample_records_.find(id);
123   if (found != sample_records_.end())
124     return found->second.get();
125 
126   std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
127   samples = std::make_unique<PersistentSampleMapRecords>(this, id);
128   return samples.get();
129 }
130 
LoadRecords(PersistentSampleMapRecords * sample_map_records)131 bool PersistentSparseHistogramDataManager::LoadRecords(
132     PersistentSampleMapRecords* sample_map_records) {
133   // DataManager must be locked in order to access the found_ field of any
134   // PersistentSampleMapRecords object.
135   base::AutoLock auto_lock(lock_);
136   bool found = false;
137 
138   // If there are already "found" entries for the passed object, move them.
139   if (!sample_map_records->found_.empty()) {
140     sample_map_records->records_.reserve(sample_map_records->records_.size() +
141                                          sample_map_records->found_.size());
142     sample_map_records->records_.insert(sample_map_records->records_.end(),
143                                         sample_map_records->found_.begin(),
144                                         sample_map_records->found_.end());
145     sample_map_records->found_.clear();
146     found = true;
147   }
148 
149   // Acquiring a lock is a semi-expensive operation so load some records with
150   // each call. More than this number may be loaded if it takes longer to
151   // find at least one matching record for the passed object.
152   const int kMinimumNumberToLoad = 10;
153   const uint64_t match_id = sample_map_records->sample_map_id_;
154 
155   // Loop while no enty is found OR we haven't yet loaded the minimum number.
156   // This will continue reading even after a match is found.
157   for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
158     // Get the next sample-record. The iterator will always resume from where
159     // it left off even if it previously had nothing further to return.
160     uint64_t found_id;
161     PersistentMemoryAllocator::Reference ref =
162         PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
163                                                      &found_id);
164 
165     // Stop immediately if there are none.
166     if (!ref)
167       break;
168 
169     // The sample-record could be for any sparse histogram. Add the reference
170     // to the appropriate collection for later use.
171     if (found_id == match_id) {
172       sample_map_records->records_.push_back(ref);
173       found = true;
174     } else {
175       PersistentSampleMapRecords* samples =
176           GetSampleMapRecordsWhileLocked(found_id);
177       DCHECK(samples);
178       samples->found_.push_back(ref);
179     }
180   }
181 
182   return found;
183 }
184 
185 
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id)186 PersistentSampleMapRecords::PersistentSampleMapRecords(
187     PersistentSparseHistogramDataManager* data_manager,
188     uint64_t sample_map_id)
189     : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
190 
191 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
192 
Acquire(const void * user)193 PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
194     const void* user) {
195   DCHECK(!user_);
196   user_ = user;
197   seen_ = 0;
198   return this;
199 }
200 
Release(const void * user)201 void PersistentSampleMapRecords::Release(const void* user) {
202   DCHECK_EQ(user_, user);
203   user_ = nullptr;
204 }
205 
GetNext()206 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
207   DCHECK(user_);
208 
209   // If there are no unseen records, lock and swap in all the found ones.
210   if (records_.size() == seen_) {
211     if (!data_manager_->LoadRecords(this))
212       return false;
213   }
214 
215   // Return the next record. Records *must* be returned in the same order
216   // they are found in the persistent memory in order to ensure that all
217   // objects using this data always have the same state. Race conditions
218   // can cause duplicate records so using the "first found" is the only
219   // guarantee that all objects always access the same one.
220   DCHECK_LT(seen_, records_.size());
221   return records_[seen_++];
222 }
223 
CreateNew(HistogramBase::Sample value)224 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
225     HistogramBase::Sample value) {
226   return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
227                                                      sample_map_id_, value);
228 }
229 
230 
231 // This data will be held in persistent memory in order for processes to
232 // locate and use histograms created elsewhere.
233 struct PersistentHistogramAllocator::PersistentHistogramData {
234   // SHA1(Histogram): Increment this if structure changes!
235   static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
236 
237   // Expected size for 32/64-bit check.
238   static constexpr size_t kExpectedInstanceSize =
239       40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
240 
241   int32_t histogram_type;
242   int32_t flags;
243   int32_t minimum;
244   int32_t maximum;
245   uint32_t bucket_count;
246   PersistentMemoryAllocator::Reference ranges_ref;
247   uint32_t ranges_checksum;
248   subtle::Atomic32 counts_ref;  // PersistentMemoryAllocator::Reference
249   HistogramSamples::Metadata samples_metadata;
250   HistogramSamples::Metadata logged_metadata;
251 
252   // Space for the histogram name will be added during the actual allocation
253   // request. This must be the last field of the structure. A zero-size array
254   // or a "flexible" array would be preferred but is not (yet) valid C++.
255   char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
256 };
257 
Iterator(PersistentHistogramAllocator * allocator)258 PersistentHistogramAllocator::Iterator::Iterator(
259     PersistentHistogramAllocator* allocator)
260     : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
261 
262 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)263 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
264   PersistentMemoryAllocator::Reference ref;
265   while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
266     if (ref != ignore)
267       return allocator_->GetHistogram(ref);
268   }
269   return nullptr;
270 }
271 
272 
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)273 PersistentHistogramAllocator::PersistentHistogramAllocator(
274     std::unique_ptr<PersistentMemoryAllocator> memory)
275     : memory_allocator_(std::move(memory)),
276       sparse_histogram_data_manager_(memory_allocator_.get()) {}
277 
278 PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
279 
GetHistogram(Reference ref)280 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
281     Reference ref) {
282   // Unfortunately, the histogram "pickle" methods cannot be used as part of
283   // the persistance because the deserialization methods always create local
284   // count data (while these must reference the persistent counts) and always
285   // add it to the local list of known histograms (while these may be simple
286   // references to histograms in other processes).
287   PersistentHistogramData* data =
288       memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
289   const size_t length = memory_allocator_->GetAllocSize(ref);
290 
291   // Check that metadata is reasonable: name is null-terminated and non-empty,
292   // ID fields have been loaded with a hash of the name (0 is considered
293   // unset/invalid).
294   if (!data || data->name[0] == '\0' ||
295       reinterpret_cast<char*>(data)[length - 1] != '\0' ||
296       data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
297       // Note: Sparse histograms use |id + 1| in |logged_metadata|.
298       (data->logged_metadata.id != data->samples_metadata.id &&
299        data->logged_metadata.id != data->samples_metadata.id + 1) ||
300       // Most non-matching values happen due to truncated names. Ideally, we
301       // could just verify the name length based on the overall alloc length,
302       // but that doesn't work because the allocated block may have been
303       // aligned to the next boundary value.
304       HashMetricName(data->name) != data->samples_metadata.id) {
305     return nullptr;
306   }
307   return CreateHistogram(data);
308 }
309 
AllocateHistogram(HistogramType histogram_type,const std::string & name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)310 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
311     HistogramType histogram_type,
312     const std::string& name,
313     int minimum,
314     int maximum,
315     const BucketRanges* bucket_ranges,
316     int32_t flags,
317     Reference* ref_ptr) {
318   // If the allocator is corrupt, don't waste time trying anything else.
319   // This also allows differentiating on the dashboard between allocations
320   // failed due to a corrupt allocator and the number of process instances
321   // with one, the latter being idicated by "newly corrupt", below.
322   if (memory_allocator_->IsCorrupt())
323     return nullptr;
324 
325   // Create the metadata necessary for a persistent sparse histogram. This
326   // is done first because it is a small subset of what is required for
327   // other histograms. The type is "under construction" so that a crash
328   // during the datafill doesn't leave a bad record around that could cause
329   // confusion by another process trying to read it. It will be corrected
330   // once histogram construction is complete.
331   PersistentHistogramData* histogram_data =
332       memory_allocator_->New<PersistentHistogramData>(
333           offsetof(PersistentHistogramData, name) + name.length() + 1);
334   if (histogram_data) {
335     memcpy(histogram_data->name, name.c_str(), name.size() + 1);
336     histogram_data->histogram_type = histogram_type;
337     histogram_data->flags = flags | HistogramBase::kIsPersistent;
338   }
339 
340   // Create the remaining metadata necessary for regular histograms.
341   if (histogram_type != SPARSE_HISTOGRAM) {
342     size_t bucket_count = bucket_ranges->bucket_count();
343     size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
344     if (counts_bytes == 0) {
345       // |bucket_count| was out-of-range.
346       return nullptr;
347     }
348 
349     // Since the StasticsRecorder keeps a global collection of BucketRanges
350     // objects for re-use, it would be dangerous for one to hold a reference
351     // from a persistent allocator that is not the global one (which is
352     // permanent once set). If this stops being the case, this check can
353     // become an "if" condition beside "!ranges_ref" below and before
354     // set_persistent_reference() farther down.
355     DCHECK_EQ(this, GlobalHistogramAllocator::Get());
356 
357     // Re-use an existing BucketRanges persistent allocation if one is known;
358     // otherwise, create one.
359     PersistentMemoryAllocator::Reference ranges_ref =
360         bucket_ranges->persistent_reference();
361     if (!ranges_ref) {
362       size_t ranges_count = bucket_count + 1;
363       size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
364       ranges_ref =
365           memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
366       if (ranges_ref) {
367         HistogramBase::Sample* ranges_data =
368             memory_allocator_->GetAsArray<HistogramBase::Sample>(
369                 ranges_ref, kTypeIdRangesArray, ranges_count);
370         if (ranges_data) {
371           for (size_t i = 0; i < bucket_ranges->size(); ++i)
372             ranges_data[i] = bucket_ranges->range(i);
373           bucket_ranges->set_persistent_reference(ranges_ref);
374         } else {
375           // This should never happen but be tolerant if it does.
376           ranges_ref = PersistentMemoryAllocator::kReferenceNull;
377         }
378       }
379     } else {
380       DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
381     }
382 
383 
384     // Only continue here if all allocations were successful. If they weren't,
385     // there is no way to free the space but that's not really a problem since
386     // the allocations only fail because the space is full or corrupt and so
387     // any future attempts will also fail.
388     if (ranges_ref && histogram_data) {
389       histogram_data->minimum = minimum;
390       histogram_data->maximum = maximum;
391       // |bucket_count| must fit within 32-bits or the allocation of the counts
392       // array would have failed for being too large; the allocator supports
393       // less than 4GB total size.
394       histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
395       histogram_data->ranges_ref = ranges_ref;
396       histogram_data->ranges_checksum = bucket_ranges->checksum();
397     } else {
398       histogram_data = nullptr;  // Clear this for proper handling below.
399     }
400   }
401 
402   if (histogram_data) {
403     // Create the histogram using resources in persistent memory. This ends up
404     // resolving the "ref" values stored in histogram_data instad of just
405     // using what is already known above but avoids duplicating the switch
406     // statement here and serves as a double-check that everything is
407     // correct before commiting the new histogram to persistent space.
408     std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
409     DCHECK(histogram);
410     DCHECK_NE(0U, histogram_data->samples_metadata.id);
411     DCHECK_NE(0U, histogram_data->logged_metadata.id);
412 
413     PersistentMemoryAllocator::Reference histogram_ref =
414         memory_allocator_->GetAsReference(histogram_data);
415     if (ref_ptr != nullptr)
416       *ref_ptr = histogram_ref;
417 
418     // By storing the reference within the allocator to this histogram, the
419     // next import (which will happen before the next histogram creation)
420     // will know to skip it.
421     // See also the comment in ImportHistogramsToStatisticsRecorder().
422     subtle::NoBarrier_Store(&last_created_, histogram_ref);
423     return histogram;
424   }
425 
426   return nullptr;
427 }
428 
FinalizeHistogram(Reference ref,bool registered)429 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
430                                                      bool registered) {
431   if (registered) {
432     // If the created persistent histogram was registered then it needs to
433     // be marked as "iterable" in order to be found by other processes. This
434     // happens only after the histogram is fully formed so it's impossible for
435     // code iterating through the allocator to read a partially created record.
436     memory_allocator_->MakeIterable(ref);
437   } else {
438     // If it wasn't registered then a race condition must have caused two to
439     // be created. The allocator does not support releasing the acquired memory
440     // so just change the type to be empty.
441     memory_allocator_->ChangeType(ref, 0,
442                                   PersistentHistogramData::kPersistentTypeId,
443                                   /*clear=*/false);
444   }
445 }
446 
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)447 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
448     HistogramBase* histogram) {
449   DCHECK(histogram);
450 
451   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
452   if (!existing) {
453     // The above should never fail but if it does, no real harm is done.
454     // The data won't be merged but it also won't be recorded as merged
455     // so a future try, if successful, will get what was missed. If it
456     // continues to fail, some metric data will be lost but that is better
457     // than crashing.
458     return;
459   }
460 
461   // Merge the delta from the passed object to the one in the SR.
462   existing->AddSamples(*histogram->SnapshotDelta());
463 }
464 
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)465 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
466     const HistogramBase* histogram) {
467   DCHECK(histogram);
468 
469   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
470   if (!existing) {
471     // The above should never fail but if it does, no real harm is done.
472     // Some metric data will be lost but that is better than crashing.
473     return;
474   }
475 
476   // Merge the delta from the passed object to the one in the SR.
477   existing->AddSamples(*histogram->SnapshotFinalDelta());
478 }
479 
UseSampleMapRecords(uint64_t id,const void * user)480 PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
481     uint64_t id,
482     const void* user) {
483   return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
484 }
485 
CreateTrackingHistograms(StringPiece name)486 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
487   memory_allocator_->CreateTrackingHistograms(name);
488 }
489 
UpdateTrackingHistograms()490 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
491   memory_allocator_->UpdateTrackingHistograms();
492 }
493 
ClearLastCreatedReferenceForTesting()494 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
495   subtle::NoBarrier_Store(&last_created_, 0);
496 }
497 
CreateHistogram(PersistentHistogramData * histogram_data_ptr)498 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
499     PersistentHistogramData* histogram_data_ptr) {
500   if (!histogram_data_ptr)
501     return nullptr;
502 
503   // Sparse histograms are quite different so handle them as a special case.
504   if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
505     std::unique_ptr<HistogramBase> histogram =
506         SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
507                                           &histogram_data_ptr->samples_metadata,
508                                           &histogram_data_ptr->logged_metadata);
509     DCHECK(histogram);
510     histogram->SetFlags(histogram_data_ptr->flags);
511     return histogram;
512   }
513 
514   // Copy the configuration fields from histogram_data_ptr to local storage
515   // because anything in persistent memory cannot be trusted as it could be
516   // changed at any moment by a malicious actor that shares access. The local
517   // values are validated below and then used to create the histogram, knowing
518   // they haven't changed between validation and use.
519   int32_t histogram_type = histogram_data_ptr->histogram_type;
520   int32_t histogram_flags = histogram_data_ptr->flags;
521   int32_t histogram_minimum = histogram_data_ptr->minimum;
522   int32_t histogram_maximum = histogram_data_ptr->maximum;
523   uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
524   uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
525   uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
526 
527   HistogramBase::Sample* ranges_data =
528       memory_allocator_->GetAsArray<HistogramBase::Sample>(
529           histogram_ranges_ref, kTypeIdRangesArray,
530           PersistentMemoryAllocator::kSizeAny);
531 
532   const uint32_t max_buckets =
533       std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
534   size_t required_bytes =
535       (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
536   size_t allocated_bytes =
537       memory_allocator_->GetAllocSize(histogram_ranges_ref);
538   if (!ranges_data || histogram_bucket_count < 2 ||
539       histogram_bucket_count >= max_buckets ||
540       allocated_bytes < required_bytes) {
541     return nullptr;
542   }
543 
544   std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
545       ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
546   if (!created_ranges)
547     return nullptr;
548   const BucketRanges* ranges =
549       StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
550           created_ranges.release());
551 
552   size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
553   PersistentMemoryAllocator::Reference counts_ref =
554       subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
555   if (counts_bytes == 0 ||
556       (counts_ref != 0 &&
557        memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
558     return nullptr;
559   }
560 
561   // The "counts" data (including both samples and logged samples) is a delayed
562   // persistent allocation meaning that though its size and storage for a
563   // reference is defined, no space is reserved until actually needed. When
564   // it is needed, memory will be allocated from the persistent segment and
565   // a reference to it stored at the passed address. Other threads can then
566   // notice the valid reference and access the same data.
567   DelayedPersistentAllocation counts_data(memory_allocator_.get(),
568                                           &histogram_data_ptr->counts_ref,
569                                           kTypeIdCountsArray, counts_bytes, 0);
570 
571   // A second delayed allocations is defined using the same reference storage
572   // location as the first so the allocation of one will automatically be found
573   // by the other. Within the block, the first half of the space is for "counts"
574   // and the second half is for "logged counts".
575   DelayedPersistentAllocation logged_data(
576       memory_allocator_.get(), &histogram_data_ptr->counts_ref,
577       kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
578       /*make_iterable=*/false);
579 
580   // Create the right type of histogram.
581   const char* name = histogram_data_ptr->name;
582   std::unique_ptr<HistogramBase> histogram;
583   switch (histogram_type) {
584     case HISTOGRAM:
585       histogram = Histogram::PersistentCreate(
586           name, histogram_minimum, histogram_maximum, ranges, counts_data,
587           logged_data, &histogram_data_ptr->samples_metadata,
588           &histogram_data_ptr->logged_metadata);
589       DCHECK(histogram);
590       break;
591     case LINEAR_HISTOGRAM:
592       histogram = LinearHistogram::PersistentCreate(
593           name, histogram_minimum, histogram_maximum, ranges, counts_data,
594           logged_data, &histogram_data_ptr->samples_metadata,
595           &histogram_data_ptr->logged_metadata);
596       DCHECK(histogram);
597       break;
598     case BOOLEAN_HISTOGRAM:
599       histogram = BooleanHistogram::PersistentCreate(
600           name, ranges, counts_data, logged_data,
601           &histogram_data_ptr->samples_metadata,
602           &histogram_data_ptr->logged_metadata);
603       DCHECK(histogram);
604       break;
605     case CUSTOM_HISTOGRAM:
606       histogram = CustomHistogram::PersistentCreate(
607           name, ranges, counts_data, logged_data,
608           &histogram_data_ptr->samples_metadata,
609           &histogram_data_ptr->logged_metadata);
610       DCHECK(histogram);
611       break;
612     default:
613       return nullptr;
614   }
615 
616   if (histogram) {
617     DCHECK_EQ(histogram_type, histogram->GetHistogramType());
618     histogram->SetFlags(histogram_flags);
619   }
620 
621   return histogram;
622 }
623 
624 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)625 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
626     const HistogramBase* histogram) {
627   // This should never be called on the global histogram allocator as objects
628   // created there are already within the global statistics recorder.
629   DCHECK_NE(GlobalHistogramAllocator::Get(), this);
630   DCHECK(histogram);
631 
632   HistogramBase* existing =
633       StatisticsRecorder::FindHistogram(histogram->histogram_name());
634   if (existing)
635     return existing;
636 
637   // Adding the passed histogram to the SR would cause a problem if the
638   // allocator that holds it eventually goes away. Instead, create a new
639   // one from a serialized version. Deserialization calls the appropriate
640   // FactoryGet() which will create the histogram in the global persistent-
641   // histogram allocator if such is set.
642   base::Pickle pickle;
643   histogram->SerializeInfo(&pickle);
644   PickleIterator iter(pickle);
645   existing = DeserializeHistogramInfo(&iter);
646   if (!existing)
647     return nullptr;
648 
649   // Make sure there is no "serialization" flag set.
650   DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
651   // Record the newly created histogram in the SR.
652   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
653 }
654 
655 GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
656 
657 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,StringPiece name)658 void GlobalHistogramAllocator::CreateWithPersistentMemory(
659     void* base,
660     size_t size,
661     size_t page_size,
662     uint64_t id,
663     StringPiece name) {
664   Set(WrapUnique(
665       new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
666           base, size, page_size, id, name, false))));
667 }
668 
669 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name)670 void GlobalHistogramAllocator::CreateWithLocalMemory(
671     size_t size,
672     uint64_t id,
673     StringPiece name) {
674   Set(WrapUnique(new GlobalHistogramAllocator(
675       std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
676 }
677 
678 #if !defined(OS_NACL)
679 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name)680 bool GlobalHistogramAllocator::CreateWithFile(
681     const FilePath& file_path,
682     size_t size,
683     uint64_t id,
684     StringPiece name) {
685   bool exists = PathExists(file_path);
686   File file(
687       file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
688                  File::FLAG_READ | File::FLAG_WRITE);
689 
690   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
691   bool success = false;
692   if (exists) {
693     size = saturated_cast<size_t>(file.GetLength());
694     success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
695   } else {
696     success = mmfile->Initialize(std::move(file), {0, size},
697                                  MemoryMappedFile::READ_WRITE_EXTEND);
698   }
699   if (!success ||
700       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
701     return false;
702   }
703 
704   Set(WrapUnique(new GlobalHistogramAllocator(
705       std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
706                                                       id, name, false))));
707   Get()->SetPersistentLocation(file_path);
708   return true;
709 }
710 
711 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,const FilePath & spare_path,size_t size,uint64_t id,StringPiece name)712 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
713                                                     const FilePath& active_path,
714                                                     const FilePath& spare_path,
715                                                     size_t size,
716                                                     uint64_t id,
717                                                     StringPiece name) {
718   // Old "active" becomes "base".
719   if (!base::ReplaceFile(active_path, base_path, nullptr))
720     base::DeleteFile(base_path);
721   if (base::PathExists(active_path))
722     return false;
723 
724   // Move any "spare" into "active". Okay to continue if file doesn't exist.
725   if (!spare_path.empty())
726     base::ReplaceFile(spare_path, active_path, nullptr);
727 
728   return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
729                                                         name);
730 }
731 
732 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,StringPiece name)733 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
734                                                          size_t size,
735                                                          uint64_t id,
736                                                          StringPiece name) {
737   FilePath base_path, active_path, spare_path;
738   ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
739   return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
740                               name);
741 }
742 
743 // static
ConstructFilePath(const FilePath & dir,StringPiece name)744 FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
745                                                      StringPiece name) {
746   return dir.AppendASCII(name).AddExtension(
747       PersistentMemoryAllocator::kFileExtension);
748 }
749 
750 // static
ConstructFilePathForUploadDir(const FilePath & dir,StringPiece name,base::Time stamp,ProcessId pid)751 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
752     const FilePath& dir,
753     StringPiece name,
754     base::Time stamp,
755     ProcessId pid) {
756   return ConstructFilePath(
757       dir,
758       StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
759                    static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
760 }
761 
762 // static
ParseFilePath(const FilePath & path,std::string * out_name,Time * out_stamp,ProcessId * out_pid)763 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
764                                              std::string* out_name,
765                                              Time* out_stamp,
766                                              ProcessId* out_pid) {
767   std::string filename = path.BaseName().AsUTF8Unsafe();
768   std::vector<base::StringPiece> parts = base::SplitStringPiece(
769       filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
770   if (parts.size() != 4)
771     return false;
772 
773   if (out_name)
774     *out_name = parts[0].as_string();
775 
776   if (out_stamp) {
777     int64_t stamp;
778     if (!HexStringToInt64(parts[1], &stamp))
779       return false;
780     *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
781   }
782 
783   if (out_pid) {
784     int64_t pid;
785     if (!HexStringToInt64(parts[2], &pid))
786       return false;
787     *out_pid = static_cast<ProcessId>(pid);
788   }
789 
790   return true;
791 }
792 
793 // static
ConstructFilePaths(const FilePath & dir,StringPiece name,FilePath * out_base_path,FilePath * out_active_path,FilePath * out_spare_path)794 void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
795                                                   StringPiece name,
796                                                   FilePath* out_base_path,
797                                                   FilePath* out_active_path,
798                                                   FilePath* out_spare_path) {
799   if (out_base_path)
800     *out_base_path = ConstructFilePath(dir, name);
801 
802   if (out_active_path) {
803     *out_active_path =
804         ConstructFilePath(dir, name.as_string().append("-active"));
805   }
806 
807   if (out_spare_path) {
808     *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
809   }
810 }
811 
812 // static
ConstructFilePathsForUploadDir(const FilePath & active_dir,const FilePath & upload_dir,const std::string & name,FilePath * out_upload_path,FilePath * out_active_path,FilePath * out_spare_path)813 void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
814     const FilePath& active_dir,
815     const FilePath& upload_dir,
816     const std::string& name,
817     FilePath* out_upload_path,
818     FilePath* out_active_path,
819     FilePath* out_spare_path) {
820   if (out_upload_path) {
821     *out_upload_path = ConstructFilePathForUploadDir(
822         upload_dir, name, Time::Now(), GetCurrentProcId());
823   }
824 
825   if (out_active_path) {
826     *out_active_path =
827         ConstructFilePath(active_dir, name + std::string("-active"));
828   }
829 
830   if (out_spare_path) {
831     *out_spare_path =
832         ConstructFilePath(active_dir, name + std::string("-spare"));
833   }
834 }
835 
836 // static
CreateSpareFile(const FilePath & spare_path,size_t size)837 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
838                                                size_t size) {
839   FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
840   bool success;
841   {
842     File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
843                                          File::FLAG_READ | File::FLAG_WRITE);
844     success = spare_file.IsValid();
845 
846     if (success) {
847       MemoryMappedFile mmfile;
848       success = mmfile.Initialize(std::move(spare_file), {0, size},
849                                   MemoryMappedFile::READ_WRITE_EXTEND);
850     }
851   }
852 
853   if (success)
854     success = ReplaceFile(temp_spare_path, spare_path, nullptr);
855 
856   if (!success)
857     DeleteFile(temp_spare_path);
858 
859   return success;
860 }
861 
862 // static
CreateSpareFileInDir(const FilePath & dir,size_t size,StringPiece name)863 bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
864                                                     size_t size,
865                                                     StringPiece name) {
866   FilePath spare_path;
867   ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
868   return CreateSpareFile(spare_path, size);
869 }
870 #endif  // !defined(OS_NACL)
871 
872 // static
CreateWithSharedMemoryRegion(const WritableSharedMemoryRegion & region)873 void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
874     const WritableSharedMemoryRegion& region) {
875   base::WritableSharedMemoryMapping mapping = region.Map();
876   if (!mapping.IsValid() ||
877       !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
878           mapping)) {
879     return;
880   }
881 
882   Set(WrapUnique(new GlobalHistogramAllocator(
883       std::make_unique<WritableSharedPersistentMemoryAllocator>(
884           std::move(mapping), 0, StringPiece()))));
885 }
886 
887 // static
Set(std::unique_ptr<GlobalHistogramAllocator> allocator)888 void GlobalHistogramAllocator::Set(
889     std::unique_ptr<GlobalHistogramAllocator> allocator) {
890   // Releasing or changing an allocator is extremely dangerous because it
891   // likely has histograms stored within it. If the backing memory is also
892   // also released, future accesses to those histograms will seg-fault.
893   CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
894   subtle::Release_Store(&g_histogram_allocator,
895                         reinterpret_cast<uintptr_t>(allocator.release()));
896   size_t existing = StatisticsRecorder::GetHistogramCount();
897 
898   DVLOG_IF(1, existing)
899       << existing << " histograms were created before persistence was enabled.";
900 }
901 
902 // static
Get()903 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
904   return reinterpret_cast<GlobalHistogramAllocator*>(
905       subtle::Acquire_Load(&g_histogram_allocator));
906 }
907 
908 // static
909 std::unique_ptr<GlobalHistogramAllocator>
ReleaseForTesting()910 GlobalHistogramAllocator::ReleaseForTesting() {
911   GlobalHistogramAllocator* histogram_allocator = Get();
912   if (!histogram_allocator)
913     return nullptr;
914   PersistentMemoryAllocator* memory_allocator =
915       histogram_allocator->memory_allocator();
916 
917   // Before releasing the memory, it's necessary to have the Statistics-
918   // Recorder forget about the histograms contained therein; otherwise,
919   // some operations will try to access them and the released memory.
920   PersistentMemoryAllocator::Iterator iter(memory_allocator);
921   const PersistentHistogramData* data;
922   while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
923     StatisticsRecorder::ForgetHistogramForTesting(data->name);
924   }
925 
926   subtle::Release_Store(&g_histogram_allocator, 0);
927   return WrapUnique(histogram_allocator);
928 }
929 
SetPersistentLocation(const FilePath & location)930 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
931   persistent_location_ = location;
932 }
933 
GetPersistentLocation() const934 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
935   return persistent_location_;
936 }
937 
WriteToPersistentLocation()938 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
939 #if defined(OS_NACL)
940   // NACL doesn't support file operations, including ImportantFileWriter.
941   NOTREACHED();
942   return false;
943 #else
944   // Stop if no destination is set.
945   if (persistent_location_.empty()) {
946     NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
947                  << " to file because no location was set.";
948     return false;
949   }
950 
951   StringPiece contents(static_cast<const char*>(data()), used());
952   if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
953                                                 contents)) {
954     LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
955                << " to file: " << persistent_location_.value();
956     return false;
957   }
958 
959   return true;
960 #endif
961 }
962 
DeletePersistentLocation()963 void GlobalHistogramAllocator::DeletePersistentLocation() {
964   memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
965 
966 #if defined(OS_NACL)
967   NOTREACHED();
968 #else
969   if (persistent_location_.empty())
970     return;
971 
972   // Open (with delete) and then immediately close the file by going out of
973   // scope. This is the only cross-platform safe way to delete a file that may
974   // be open elsewhere. Open handles will continue to operate normally but
975   // new opens will not be possible.
976   File file(persistent_location_,
977             File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
978 #endif
979 }
980 
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)981 GlobalHistogramAllocator::GlobalHistogramAllocator(
982     std::unique_ptr<PersistentMemoryAllocator> memory)
983     : PersistentHistogramAllocator(std::move(memory)),
984       import_iterator_(this) {
985 }
986 
ImportHistogramsToStatisticsRecorder()987 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
988   // Skip the import if it's the histogram that was last created. Should a
989   // race condition cause the "last created" to be overwritten before it
990   // is recognized here then the histogram will be created and be ignored
991   // when it is detected as a duplicate by the statistics-recorder. This
992   // simple check reduces the time of creating persistent histograms by
993   // about 40%.
994   Reference record_to_ignore = last_created();
995 
996   // There is no lock on this because the iterator is lock-free while still
997   // guaranteed to only return each entry only once. The StatisticsRecorder
998   // has its own lock so the Register operation is safe.
999   while (true) {
1000     std::unique_ptr<HistogramBase> histogram =
1001         import_iterator_.GetNextWithIgnore(record_to_ignore);
1002     if (!histogram)
1003       break;
1004     StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1005   }
1006 }
1007 
1008 }  // namespace base
1009