1 // Copyright 2019 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <sys/mman.h>
8 #include <sys/types.h>
9 #include <sys/utsname.h>
10
11 #include <atomic>
12
13 #include "base/atomicops.h"
14 #include "base/bits.h"
15 #include "base/callback.h"
16 #include "base/logging.h"
17 #include "base/memory/madv_free_discardable_memory_allocator_posix.h"
18 #include "base/memory/madv_free_discardable_memory_posix.h"
19 #include "base/process/process_metrics.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/tracing_buildflags.h"
23
24 #if BUILDFLAG(ENABLE_BASE_TRACING)
25 #include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
26 #include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
27 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
28
29 #if defined(ADDRESS_SANITIZER)
30 #include <sanitizer/asan_interface.h>
31 #endif // defined(ADDRESS_SANITIZER)
32
33 namespace {
34
35 constexpr intptr_t kPageMagicCookie = 1;
36
AllocatePages(size_t size_in_pages)37 void* AllocatePages(size_t size_in_pages) {
38 void* data = mmap(nullptr, size_in_pages * base::GetPageSize(),
39 PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
40 PCHECK(data != MAP_FAILED);
41 return data;
42 }
43
44 // Checks if the system supports usage of MADV_FREE as a backing for discardable
45 // memory.
ProbePlatformMadvFreeSupport()46 base::MadvFreeSupport ProbePlatformMadvFreeSupport() {
47 // Note: If the compiling system does not have headers for Linux 4.5+, then
48 // the MADV_FREE define will not exist and the probe will default to
49 // unsupported, regardless of whether the target system actually supports
50 // MADV_FREE.
51 #if !defined(OS_APPLE) && defined(MADV_FREE)
52 uint8_t* dummy_page = static_cast<uint8_t*>(AllocatePages(1));
53 dummy_page[0] = 1;
54
55 base::MadvFreeSupport support = base::MadvFreeSupport::kUnsupported;
56
57 // Check if the MADV_FREE advice value exists.
58 int retval = madvise(dummy_page, base::GetPageSize(), MADV_FREE);
59 if (!retval) {
60 // For Linux 4.5 to 4.12, MADV_FREE on a swapless system will lead to memory
61 // being immediately discarded. Verify that the memory was not discarded.
62 if (dummy_page[0]) {
63 support = base::MadvFreeSupport::kSupported;
64 }
65 }
66 PCHECK(!munmap(dummy_page, base::GetPageSize()));
67 return support;
68 #else
69 return base::MadvFreeSupport::kUnsupported;
70 #endif
71 }
72
73 } // namespace
74
75 namespace base {
76
MadvFreeDiscardableMemoryPosix(size_t size_in_bytes,std::atomic<size_t> * allocator_byte_count)77 MadvFreeDiscardableMemoryPosix::MadvFreeDiscardableMemoryPosix(
78 size_t size_in_bytes,
79 std::atomic<size_t>* allocator_byte_count)
80 : size_in_bytes_(size_in_bytes),
81 allocated_pages_((size_in_bytes_ + base::GetPageSize() - 1) /
82 base::GetPageSize()),
83 allocator_byte_count_(allocator_byte_count),
84 page_first_word_((size_in_bytes_ + base::GetPageSize() - 1) /
85 base::GetPageSize()) {
86 data_ = AllocatePages(allocated_pages_);
87 (*allocator_byte_count_) += size_in_bytes_;
88 }
89
~MadvFreeDiscardableMemoryPosix()90 MadvFreeDiscardableMemoryPosix::~MadvFreeDiscardableMemoryPosix() {
91 if (Deallocate()) {
92 DVLOG(1) << "Region evicted during destructor with " << allocated_pages_
93 << " pages";
94 }
95 }
96
Lock()97 bool MadvFreeDiscardableMemoryPosix::Lock() {
98 DFAKE_SCOPED_LOCK(thread_collision_warner_);
99 DCHECK(!is_locked_);
100 // Locking fails if the memory has been deallocated.
101 if (!data_)
102 return false;
103
104 #if defined(ADDRESS_SANITIZER)
105 // We need to unpoison here since locking pages writes to them.
106 // Note that even if locking fails, we want to unpoison anyways after
107 // deallocation.
108 ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
109 #endif // defined(ADDRESS_SANITIZER)
110
111 size_t page_index;
112 for (page_index = 0; page_index < allocated_pages_; ++page_index) {
113 if (!LockPage(page_index))
114 break;
115 }
116
117 if (page_index < allocated_pages_) {
118 DVLOG(1) << "Region eviction discovered during lock with "
119 << allocated_pages_ << " pages";
120 Deallocate();
121 return false;
122 }
123 DCHECK(IsResident());
124
125 is_locked_ = true;
126 return true;
127 }
128
Unlock()129 void MadvFreeDiscardableMemoryPosix::Unlock() {
130 DFAKE_SCOPED_LOCK(thread_collision_warner_);
131 DCHECK(is_locked_);
132 DCHECK(data_ != nullptr);
133
134 for (size_t page_index = 0; page_index < allocated_pages_; ++page_index) {
135 UnlockPage(page_index);
136 }
137
138 #ifdef MADV_FREE
139 if (!keep_memory_for_testing_) {
140 int retval =
141 madvise(data_, allocated_pages_ * base::GetPageSize(), MADV_FREE);
142 DPCHECK(!retval);
143 }
144 #endif
145
146 #if defined(ADDRESS_SANITIZER)
147 ASAN_POISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
148 #endif // defined(ADDRESS_SANITIZER)
149
150 is_locked_ = false;
151 }
152
data() const153 void* MadvFreeDiscardableMemoryPosix::data() const {
154 DFAKE_SCOPED_LOCK(thread_collision_warner_);
155 DCHECK(is_locked_);
156 DCHECK(data_ != nullptr);
157
158 return data_;
159 }
160
LockPage(size_t page_index)161 bool MadvFreeDiscardableMemoryPosix::LockPage(size_t page_index) {
162 // We require the byte-level representation of std::atomic<intptr_t> to be
163 // equivalent to that of an intptr_t. Since std::atomic<intptr_t> has standard
164 // layout, having equal size is sufficient but not necessary for them to have
165 // the same byte-level representation.
166 static_assert(sizeof(intptr_t) == sizeof(std::atomic<intptr_t>),
167 "Incompatible layout of std::atomic.");
168 DCHECK(std::atomic<intptr_t>{}.is_lock_free());
169 std::atomic<intptr_t>* page_as_atomic =
170 reinterpret_cast<std::atomic<intptr_t>*>(
171 static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
172
173 intptr_t expected = kPageMagicCookie;
174
175 // Recall that we set the first word of the page to |kPageMagicCookie|
176 // (non-zero) during unlocking. Thus, if the value has changed, the page has
177 // been discarded. Restore the page's original first word from before
178 // unlocking only if the page has not been discarded.
179 if (!std::atomic_compare_exchange_strong_explicit(
180 page_as_atomic, &expected,
181 static_cast<intptr_t>(page_first_word_[page_index]),
182 std::memory_order_relaxed, std::memory_order_relaxed)) {
183 return false;
184 }
185
186 return true;
187 }
188
UnlockPage(size_t page_index)189 void MadvFreeDiscardableMemoryPosix::UnlockPage(size_t page_index) {
190 DCHECK(std::atomic<intptr_t>{}.is_lock_free());
191
192 std::atomic<intptr_t>* page_as_atomic =
193 reinterpret_cast<std::atomic<intptr_t>*>(
194 static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
195
196 // Store the first word of the page for use during unlocking.
197 page_first_word_[page_index].store(*page_as_atomic,
198 std::memory_order_relaxed);
199 // Store a non-zero value into the first word of the page, so we can tell when
200 // the page is discarded during locking.
201 page_as_atomic->store(kPageMagicCookie, std::memory_order_relaxed);
202 }
203
DiscardPage(size_t page_index)204 void MadvFreeDiscardableMemoryPosix::DiscardPage(size_t page_index) {
205 DFAKE_SCOPED_LOCK(thread_collision_warner_);
206 DCHECK(!is_locked_);
207 DCHECK(page_index < allocated_pages_);
208 int retval =
209 madvise(static_cast<uint8_t*>(data_) + base::GetPageSize() * page_index,
210 base::GetPageSize(), MADV_DONTNEED);
211 DPCHECK(!retval);
212 }
213
IsLockedForTesting() const214 bool MadvFreeDiscardableMemoryPosix::IsLockedForTesting() const {
215 DFAKE_SCOPED_LOCK(thread_collision_warner_);
216 return is_locked_;
217 }
218
DiscardForTesting()219 void MadvFreeDiscardableMemoryPosix::DiscardForTesting() {
220 DFAKE_SCOPED_LOCK(thread_collision_warner_);
221 DCHECK(!is_locked_);
222 int retval =
223 madvise(data_, base::GetPageSize() * allocated_pages_, MADV_DONTNEED);
224 DPCHECK(!retval);
225 }
226
227 trace_event::MemoryAllocatorDump*
CreateMemoryAllocatorDump(const char * name,trace_event::ProcessMemoryDump * pmd) const228 MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
229 const char* name,
230 trace_event::ProcessMemoryDump* pmd) const {
231 #if BUILDFLAG(ENABLE_BASE_TRACING)
232 DFAKE_SCOPED_LOCK(thread_collision_warner_);
233
234 using base::trace_event::MemoryAllocatorDump;
235 std::string allocator_dump_name = base::StringPrintf(
236 "discardable/segment_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
237
238 MemoryAllocatorDump* allocator_dump =
239 pmd->CreateAllocatorDump(allocator_dump_name);
240
241 bool is_discarded = IsDiscarded();
242
243 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name);
244 // The effective_size is the amount of unused space as a result of being
245 // page-aligned.
246 dump->AddScalar(MemoryAllocatorDump::kNameSize,
247 MemoryAllocatorDump::kUnitsBytes,
248 is_discarded ? 0U : static_cast<uint64_t>(size_in_bytes_));
249
250 allocator_dump->AddScalar(
251 MemoryAllocatorDump::kNameSize, MemoryAllocatorDump::kUnitsBytes,
252 is_discarded
253 ? 0U
254 : static_cast<uint64_t>(allocated_pages_ * base::GetPageSize()));
255 allocator_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
256 MemoryAllocatorDump::kUnitsObjects, 1U);
257 allocator_dump->AddScalar(
258 "wasted_size", MemoryAllocatorDump::kUnitsBytes,
259 static_cast<uint64_t>(allocated_pages_ * base::GetPageSize() -
260 size_in_bytes_));
261 allocator_dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
262 is_locked_ ? size_in_bytes_ : 0U);
263 allocator_dump->AddScalar("page_count", MemoryAllocatorDump::kUnitsObjects,
264 static_cast<uint64_t>(allocated_pages_));
265
266 // The amount of space that is discarded, but not unmapped (i.e. the memory
267 // was discarded while unlocked, but the pages are still mapped in memory
268 // since Deallocate() has not been called yet). This instance is discarded if
269 // it is unlocked and not all pages are resident in memory.
270 allocator_dump->AddScalar(
271 "discarded_size", MemoryAllocatorDump::kUnitsBytes,
272 is_discarded ? allocated_pages_ * base::GetPageSize() : 0U);
273
274 pmd->AddSuballocation(dump->guid(), allocator_dump_name);
275 return dump;
276 #else // BUILDFLAG(ENABLE_BASE_TRACING)
277 NOTREACHED();
278 return nullptr;
279 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
280 }
281
IsValid() const282 bool MadvFreeDiscardableMemoryPosix::IsValid() const {
283 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
284 return data_ != nullptr;
285 }
286
SetKeepMemoryForTesting(bool keep_memory)287 void MadvFreeDiscardableMemoryPosix::SetKeepMemoryForTesting(bool keep_memory) {
288 DFAKE_SCOPED_LOCK(thread_collision_warner_);
289 DCHECK(is_locked_);
290 keep_memory_for_testing_ = keep_memory;
291 }
292
IsResident() const293 bool MadvFreeDiscardableMemoryPosix::IsResident() const {
294 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
295 #if defined(OS_APPLE) || defined(OS_BSD)
296 std::vector<char> vec(allocated_pages_);
297 #else
298 std::vector<unsigned char> vec(allocated_pages_);
299 #endif
300
301 int retval =
302 mincore(data_, allocated_pages_ * base::GetPageSize(), vec.data());
303 DPCHECK(retval == 0 || errno == EAGAIN);
304
305 for (size_t i = 0; i < allocated_pages_; ++i) {
306 if (!(vec[i] & 1))
307 return false;
308 }
309 return true;
310 }
311
IsDiscarded() const312 bool MadvFreeDiscardableMemoryPosix::IsDiscarded() const {
313 return !is_locked_ && !IsResident();
314 }
315
Deallocate()316 bool MadvFreeDiscardableMemoryPosix::Deallocate() {
317 DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
318 if (data_) {
319 #if defined(ADDRESS_SANITIZER)
320 ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
321 #endif // defined(ADDRESS_SANITIZER)
322
323 int retval = munmap(data_, allocated_pages_ * base::GetPageSize());
324 PCHECK(!retval);
325 data_ = nullptr;
326 (*allocator_byte_count_) -= size_in_bytes_;
327 return true;
328 }
329 return false;
330 }
331
GetMadvFreeSupport()332 MadvFreeSupport GetMadvFreeSupport() {
333 static MadvFreeSupport kMadvFreeSupport = ProbePlatformMadvFreeSupport();
334 return kMadvFreeSupport;
335 }
336
337 } // namespace base
338