1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/shared_memory_security_policy.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 
10 #include "base/bits.h"
11 #include "base/numerics/checked_math.h"
12 #include "base/optional.h"
13 #include "base/process/process_metrics.h"
14 #include "build/build_config.h"
15 
16 namespace base {
17 
18 namespace {
19 
20 // Note: pointers are 32 bits on all architectures in NaCl. See
21 // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
22 #if defined(ARCH_CPU_32_BITS) || defined(OS_NACL)
23 // No effective limit on 32-bit, since there simply isn't enough address space
24 // for ASLR to be particularly effective.
25 constexpr size_t kTotalMappedSizeLimit = -1;
26 #elif defined(ARCH_CPU_64_BITS)
27 // 32 GB of mappings ought to be enough for anybody.
28 constexpr size_t kTotalMappedSizeLimit = 32ULL * 1024 * 1024 * 1024;
29 #endif
30 
31 static std::atomic_size_t total_mapped_size_;
32 
AlignWithPageSize(size_t size)33 base::Optional<size_t> AlignWithPageSize(size_t size) {
34 #if defined(OS_WIN)
35   // TODO(crbug.com/210609): Matches alignment requirements defined in
36   // platform_shared_memory_region_win.cc:PlatformSharedMemoryRegion::Create.
37   // Remove this when NaCl is gone.
38   static const size_t kSectionSize = 65536;
39   const size_t page_size = std::max(kSectionSize, GetPageSize());
40 #else
41   const size_t page_size = GetPageSize();
42 #endif  // defined(OS_WIN)
43   size_t rounded_size = bits::Align(size, page_size);
44 
45   // Fail on overflow.
46   if (rounded_size < size)
47     return base::nullopt;
48 
49   return rounded_size;
50 }
51 
52 }  // namespace
53 
54 // static
AcquireReservationForMapping(size_t size)55 bool SharedMemorySecurityPolicy::AcquireReservationForMapping(size_t size) {
56   size_t previous_mapped_size =
57       total_mapped_size_.load(std::memory_order_relaxed);
58   size_t total_mapped_size;
59 
60   base::Optional<size_t> page_aligned_size = AlignWithPageSize(size);
61 
62   if (!page_aligned_size)
63     return false;
64 
65   // Relaxed memory ordering is all that's needed since all atomicity is all
66   // that's required. If the value is stale, compare_exchange_weak() will fail
67   // and the loop will retry the operation with an updated total mapped size.
68   do {
69     if (!CheckAdd(previous_mapped_size, *page_aligned_size)
70              .AssignIfValid(&total_mapped_size)) {
71       return false;
72     }
73     if (total_mapped_size >= kTotalMappedSizeLimit)
74       return false;
75   } while (!total_mapped_size_.compare_exchange_weak(
76       previous_mapped_size, total_mapped_size, std::memory_order_relaxed,
77       std::memory_order_relaxed));
78 
79   return true;
80 }
81 
82 // static
ReleaseReservationForMapping(size_t size)83 void SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_t size) {
84   // Note #1: relaxed memory ordering is sufficient since atomicity is all
85   // that's required.
86   // Note #2: |size| should never overflow when aligned to page size, since
87   // this should only be called if AcquireReservationForMapping() returned true.
88   base::Optional<size_t> page_aligned_size = AlignWithPageSize(size);
89   total_mapped_size_.fetch_sub(*page_aligned_size, std::memory_order_relaxed);
90 }
91 
92 }  // namespace base
93