1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/memory/discardable_shared_memory.h"
6 
7 #include <stdint.h>
8 
9 #include <algorithm>
10 
11 #include "base/atomicops.h"
12 #include "base/bits.h"
13 #include "base/feature_list.h"
14 #include "base/logging.h"
15 #include "base/memory/discardable_memory.h"
16 #include "base/memory/discardable_memory_internal.h"
17 #include "base/memory/shared_memory_tracker.h"
18 #include "base/numerics/safe_math.h"
19 #include "base/process/process_metrics.h"
20 #include "base/tracing_buildflags.h"
21 #include "build/build_config.h"
22 
23 #if defined(OS_POSIX) && !defined(OS_NACL)
24 // For madvise() which is available on all POSIX compatible systems.
25 #include <sys/mman.h>
26 #endif
27 
28 #if defined(OS_ANDROID)
29 #include "third_party/ashmem/ashmem.h"
30 #endif
31 
32 #if defined(OS_WIN)
33 #include <windows.h>
34 #include "base/win/windows_version.h"
35 #endif
36 
37 #if defined(OS_FUCHSIA)
38 #include <lib/zx/vmar.h>
39 #include <zircon/types.h>
40 #include "base/fuchsia/fuchsia_logging.h"
41 #endif
42 
43 #if BUILDFLAG(ENABLE_BASE_TRACING)
44 #include "base/trace_event/memory_allocator_dump.h"  // no-presubmit-check
45 #include "base/trace_event/process_memory_dump.h"    // no-presubmit-check
46 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
47 
48 namespace base {
49 namespace {
50 
51 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
52 // Atomic64 routines, depending on the architecture.
53 typedef intptr_t AtomicType;
54 typedef uintptr_t UAtomicType;
55 
56 // Template specialization for timestamp serialization/deserialization. This
57 // is used to serialize timestamps using Unix time on systems where AtomicType
58 // does not have enough precision to contain a timestamp in the standard
59 // serialized format.
60 template <int>
61 Time TimeFromWireFormat(int64_t value);
62 template <int>
63 int64_t TimeToWireFormat(Time time);
64 
65 // Serialize to Unix time when using 4-byte wire format.
66 // Note: 19 January 2038, this will cease to work.
67 template <>
TimeFromWireFormat(int64_t value)68 Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
69   return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
70 }
71 template <>
TimeToWireFormat(Time time)72 int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
73   return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
74 }
75 
76 // Standard serialization format when using 8-byte wire format.
77 template <>
TimeFromWireFormat(int64_t value)78 Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
79   return Time::FromInternalValue(value);
80 }
81 template <>
TimeToWireFormat(Time time)82 int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
83   return time.ToInternalValue();
84 }
85 
86 struct SharedState {
87   enum LockState { UNLOCKED = 0, LOCKED = 1 };
88 
SharedStatebase::__anondc83e4470111::SharedState89   explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedStatebase::__anondc83e4470111::SharedState90   SharedState(LockState lock_state, Time timestamp) {
91     int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
92     DCHECK_GE(wire_timestamp, 0);
93     DCHECK_EQ(lock_state & ~1, 0);
94     value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
95   }
96 
GetLockStatebase::__anondc83e4470111::SharedState97   LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
98 
GetTimestampbase::__anondc83e4470111::SharedState99   Time GetTimestamp() const {
100     return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
101   }
102 
103   // Bit 1: Lock state. Bit is set when locked.
104   // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
105   // purged.
106   union {
107     AtomicType i;
108     UAtomicType u;
109   } value;
110 };
111 
112 // Shared state is stored at offset 0 in shared memory segments.
SharedStateFromSharedMemory(const WritableSharedMemoryMapping & shared_memory)113 SharedState* SharedStateFromSharedMemory(
114     const WritableSharedMemoryMapping& shared_memory) {
115   DCHECK(shared_memory.IsValid());
116   return static_cast<SharedState*>(shared_memory.memory());
117 }
118 
119 // Round up |size| to a multiple of page size.
AlignToPageSize(size_t size)120 size_t AlignToPageSize(size_t size) {
121   return bits::Align(size, base::GetPageSize());
122 }
123 
124 #if defined(OS_ANDROID)
UseAshmemUnpinningForDiscardableMemory()125 bool UseAshmemUnpinningForDiscardableMemory() {
126   if (!ashmem_device_is_supported())
127     return false;
128 
129   // If we are participating in the discardable memory backing trial, only
130   // enable ashmem unpinning when we are in the corresponding trial group.
131   if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
132     return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
133            base::DiscardableMemoryTrialGroup::kAshmem;
134   }
135   return true;
136 }
137 #endif  // defined(OS_ANDROID)
138 
139 }  // namespace
140 
DiscardableSharedMemory()141 DiscardableSharedMemory::DiscardableSharedMemory()
142     : mapped_size_(0), locked_page_count_(0) {
143 }
144 
DiscardableSharedMemory(UnsafeSharedMemoryRegion shared_memory_region)145 DiscardableSharedMemory::DiscardableSharedMemory(
146     UnsafeSharedMemoryRegion shared_memory_region)
147     : shared_memory_region_(std::move(shared_memory_region)),
148       mapped_size_(0),
149       locked_page_count_(0) {}
150 
151 DiscardableSharedMemory::~DiscardableSharedMemory() = default;
152 
CreateAndMap(size_t size)153 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
154   CheckedNumeric<size_t> checked_size = size;
155   checked_size += AlignToPageSize(sizeof(SharedState));
156   if (!checked_size.IsValid())
157     return false;
158 
159   shared_memory_region_ =
160       UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
161 
162   if (!shared_memory_region_.IsValid())
163     return false;
164 
165   shared_memory_mapping_ = shared_memory_region_.Map();
166   if (!shared_memory_mapping_.IsValid())
167     return false;
168 
169   mapped_size_ = shared_memory_mapping_.mapped_size() -
170                  AlignToPageSize(sizeof(SharedState));
171 
172   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
173 #if DCHECK_IS_ON()
174   for (size_t page = 0; page < locked_page_count_; ++page)
175     locked_pages_.insert(page);
176 #endif
177 
178   DCHECK(last_known_usage_.is_null());
179   SharedState new_state(SharedState::LOCKED, Time());
180   subtle::Release_Store(
181       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
182       new_state.value.i);
183   return true;
184 }
185 
Map(size_t size)186 bool DiscardableSharedMemory::Map(size_t size) {
187   DCHECK(!shared_memory_mapping_.IsValid());
188   if (shared_memory_mapping_.IsValid())
189     return false;
190 
191   shared_memory_mapping_ = shared_memory_region_.MapAt(
192       0, AlignToPageSize(sizeof(SharedState)) + size);
193   if (!shared_memory_mapping_.IsValid())
194     return false;
195 
196   mapped_size_ = shared_memory_mapping_.mapped_size() -
197                  AlignToPageSize(sizeof(SharedState));
198 
199   locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
200 #if DCHECK_IS_ON()
201   for (size_t page = 0; page < locked_page_count_; ++page)
202     locked_pages_.insert(page);
203 #endif
204 
205   return true;
206 }
207 
Unmap()208 bool DiscardableSharedMemory::Unmap() {
209   if (!shared_memory_mapping_.IsValid())
210     return false;
211 
212   shared_memory_mapping_ = WritableSharedMemoryMapping();
213   locked_page_count_ = 0;
214 #if DCHECK_IS_ON()
215   locked_pages_.clear();
216 #endif
217   mapped_size_ = 0;
218   return true;
219 }
220 
Lock(size_t offset,size_t length)221 DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
222     size_t offset, size_t length) {
223   DCHECK_EQ(AlignToPageSize(offset), offset);
224   DCHECK_EQ(AlignToPageSize(length), length);
225 
226   // Calls to this function must be synchronized properly.
227   DFAKE_SCOPED_LOCK(thread_collision_warner_);
228 
229   DCHECK(shared_memory_mapping_.IsValid());
230 
231   // We need to successfully acquire the platform independent lock before
232   // individual pages can be locked.
233   if (!locked_page_count_) {
234     // Return false when instance has been purged or not initialized properly
235     // by checking if |last_known_usage_| is NULL.
236     if (last_known_usage_.is_null())
237       return FAILED;
238 
239     SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
240     SharedState new_state(SharedState::LOCKED, Time());
241     SharedState result(subtle::Acquire_CompareAndSwap(
242         &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
243         old_state.value.i, new_state.value.i));
244     if (result.value.u != old_state.value.u) {
245       // Update |last_known_usage_| in case the above CAS failed because of
246       // an incorrect timestamp.
247       last_known_usage_ = result.GetTimestamp();
248       return FAILED;
249     }
250   }
251 
252   // Zero for length means "everything onward".
253   if (!length)
254     length = AlignToPageSize(mapped_size_) - offset;
255 
256   size_t start = offset / base::GetPageSize();
257   size_t end = start + length / base::GetPageSize();
258   DCHECK_LE(start, end);
259   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
260 
261   // Add pages to |locked_page_count_|.
262   // Note: Locking a page that is already locked is an error.
263   locked_page_count_ += end - start;
264 #if DCHECK_IS_ON()
265   // Detect incorrect usage by keeping track of exactly what pages are locked.
266   for (auto page = start; page < end; ++page) {
267     auto result = locked_pages_.insert(page);
268     DCHECK(result.second);
269   }
270   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
271 #endif
272 
273   // Always behave as if memory was purged when trying to lock a 0 byte segment.
274   if (!length)
275       return PURGED;
276 
277 #if defined(OS_ANDROID)
278   // Ensure that the platform won't discard the required pages.
279   return LockPages(shared_memory_region_,
280                    AlignToPageSize(sizeof(SharedState)) + offset, length);
281 #elif defined(OS_APPLE)
282   // On macOS, there is no mechanism to lock pages. However, we do need to call
283   // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
284   // footprint via task_info().
285   //
286   // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
287   // madvise(MADV_FREE_REUSABLE) called on them has no effect.
288   //
289   // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
290   // that's where the memory is actually released, rather than Unlock(), which
291   // is a no-op on macOS.
292   //
293   // For more information, see
294   // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
295   madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
296               AlignToPageSize(sizeof(SharedState)),
297           AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
298   return DiscardableSharedMemory::SUCCESS;
299 #else
300   return DiscardableSharedMemory::SUCCESS;
301 #endif
302 }
303 
Unlock(size_t offset,size_t length)304 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
305   DCHECK_EQ(AlignToPageSize(offset), offset);
306   DCHECK_EQ(AlignToPageSize(length), length);
307 
308   // Calls to this function must be synchronized properly.
309   DFAKE_SCOPED_LOCK(thread_collision_warner_);
310 
311   // Passing zero for |length| means "everything onward". Note that |length| may
312   // still be zero after this calculation, e.g. if |mapped_size_| is zero.
313   if (!length)
314     length = AlignToPageSize(mapped_size_) - offset;
315 
316   DCHECK(shared_memory_mapping_.IsValid());
317 
318   // Allow the pages to be discarded by the platform, if supported.
319   UnlockPages(shared_memory_region_,
320               AlignToPageSize(sizeof(SharedState)) + offset, length);
321 
322   size_t start = offset / base::GetPageSize();
323   size_t end = start + length / base::GetPageSize();
324   DCHECK_LE(start, end);
325   DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
326 
327   // Remove pages from |locked_page_count_|.
328   // Note: Unlocking a page that is not locked is an error.
329   DCHECK_GE(locked_page_count_, end - start);
330   locked_page_count_ -= end - start;
331 #if DCHECK_IS_ON()
332   // Detect incorrect usage by keeping track of exactly what pages are locked.
333   for (auto page = start; page < end; ++page) {
334     auto erased_count = locked_pages_.erase(page);
335     DCHECK_EQ(1u, erased_count);
336   }
337   DCHECK_EQ(locked_pages_.size(), locked_page_count_);
338 #endif
339 
340   // Early out and avoid releasing the platform independent lock if some pages
341   // are still locked.
342   if (locked_page_count_)
343     return;
344 
345   Time current_time = Now();
346   DCHECK(!current_time.is_null());
347 
348   SharedState old_state(SharedState::LOCKED, Time());
349   SharedState new_state(SharedState::UNLOCKED, current_time);
350   // Note: timestamp cannot be NULL as that is a unique value used when
351   // locked or purged.
352   DCHECK(!new_state.GetTimestamp().is_null());
353   // Timestamp precision should at least be accurate to the second.
354   DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
355             (current_time - Time::UnixEpoch()).InSeconds());
356   SharedState result(subtle::Release_CompareAndSwap(
357       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
358       old_state.value.i, new_state.value.i));
359 
360   DCHECK_EQ(old_state.value.u, result.value.u);
361 
362   last_known_usage_ = current_time;
363 }
364 
memory() const365 void* DiscardableSharedMemory::memory() const {
366   return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
367          AlignToPageSize(sizeof(SharedState));
368 }
369 
Purge(Time current_time)370 bool DiscardableSharedMemory::Purge(Time current_time) {
371   // Calls to this function must be synchronized properly.
372   DFAKE_SCOPED_LOCK(thread_collision_warner_);
373   DCHECK(shared_memory_mapping_.IsValid());
374 
375   SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
376   SharedState new_state(SharedState::UNLOCKED, Time());
377   SharedState result(subtle::Acquire_CompareAndSwap(
378       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
379       old_state.value.i, new_state.value.i));
380 
381   // Update |last_known_usage_| to |current_time| if the memory is locked. This
382   // allows the caller to determine if purging failed because last known usage
383   // was incorrect or memory was locked. In the second case, the caller should
384   // most likely wait for some amount of time before attempting to purge the
385   // the memory again.
386   if (result.value.u != old_state.value.u) {
387     last_known_usage_ = result.GetLockState() == SharedState::LOCKED
388                             ? current_time
389                             : result.GetTimestamp();
390     return false;
391   }
392 
393 // The next section will release as much resource as can be done
394 // from the purging process, until the client process notices the
395 // purge and releases its own references.
396 // Note: this memory will not be accessed again.  The segment will be
397 // freed asynchronously at a later time, so just do the best
398 // immediately.
399 #if defined(OS_POSIX) && !defined(OS_NACL)
400 // Linux and Android provide MADV_REMOVE which is preferred as it has a
401 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
402 // provide MADV_FREE which has the same result but memory is purged lazily.
403 #if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
404 #define MADV_PURGE_ARGUMENT MADV_REMOVE
405 #elif defined(OS_APPLE)
406 // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
407 // reusable bit, which allows both Activity Monitor and memory-infra to
408 // correctly track the pages.
409 #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
410 #else
411 #define MADV_PURGE_ARGUMENT MADV_FREE
412 #endif
413   // Advise the kernel to remove resources associated with purged pages.
414   // Subsequent accesses of memory pages will succeed, but might result in
415   // zero-fill-on-demand pages.
416   if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
417                   AlignToPageSize(sizeof(SharedState)),
418               AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
419     DPLOG(ERROR) << "madvise() failed";
420   }
421 #elif defined(OS_WIN)
422   // On Windows, discarded pages are not returned to the system immediately and
423   // not guaranteed to be zeroed when returned to the application.
424   using DiscardVirtualMemoryFunction =
425       DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
426   static DiscardVirtualMemoryFunction discard_virtual_memory =
427       reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
428           GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
429 
430   char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
431                   AlignToPageSize(sizeof(SharedState));
432   size_t length = AlignToPageSize(mapped_size_);
433 
434   // Use DiscardVirtualMemory when available because it releases faster than
435   // MEM_RESET.
436   DWORD ret = ERROR_NOT_SUPPORTED;
437   if (discard_virtual_memory) {
438     ret = discard_virtual_memory(address, length);
439   }
440 
441   // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
442   // failure.
443   if (ret != ERROR_SUCCESS) {
444     void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
445     CHECK(ptr);
446   }
447 #elif defined(OS_FUCHSIA)
448   // De-commit via our VMAR, rather than relying on the VMO handle, since the
449   // handle may have been closed after the memory was mapped into this process.
450   uint64_t address_int = reinterpret_cast<uint64_t>(
451       static_cast<char*>(shared_memory_mapping_.memory()) +
452       AlignToPageSize(sizeof(SharedState)));
453   zx_status_t status = zx::vmar::root_self()->op_range(
454       ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
455       0);
456   ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
457 #endif  // defined(OS_FUCHSIA)
458 
459   last_known_usage_ = Time();
460   return true;
461 }
462 
IsMemoryResident() const463 bool DiscardableSharedMemory::IsMemoryResident() const {
464   DCHECK(shared_memory_mapping_.IsValid());
465 
466   SharedState result(subtle::NoBarrier_Load(
467       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
468 
469   return result.GetLockState() == SharedState::LOCKED ||
470          !result.GetTimestamp().is_null();
471 }
472 
IsMemoryLocked() const473 bool DiscardableSharedMemory::IsMemoryLocked() const {
474   DCHECK(shared_memory_mapping_.IsValid());
475 
476   SharedState result(subtle::NoBarrier_Load(
477       &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
478 
479   return result.GetLockState() == SharedState::LOCKED;
480 }
481 
Close()482 void DiscardableSharedMemory::Close() {
483   shared_memory_region_ = UnsafeSharedMemoryRegion();
484 }
485 
CreateSharedMemoryOwnershipEdge(trace_event::MemoryAllocatorDump * local_segment_dump,trace_event::ProcessMemoryDump * pmd,bool is_owned) const486 void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
487     trace_event::MemoryAllocatorDump* local_segment_dump,
488     trace_event::ProcessMemoryDump* pmd,
489     bool is_owned) const {
490 // Memory dumps are only supported when tracing support is enabled,.
491 #if BUILDFLAG(ENABLE_BASE_TRACING)
492   auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
493       shared_memory_mapping_, pmd);
494   // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
495   // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
496   size_t resident_size = shared_memory_dump->GetSizeInternal();
497   local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
498                                 trace_event::MemoryAllocatorDump::kUnitsBytes,
499                                 resident_size);
500 
501   // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
502   // the tracing UI will account the effective size of the segment to the
503   // client instead of manager.
504   // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
505   // values, crbug.com/754793.
506   const int kImportance = is_owned ? 2 : 0;
507   auto shared_memory_guid = shared_memory_mapping_.guid();
508   local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
509 
510   // Owned discardable segments which are allocated by client process, could
511   // have been cleared by the discardable manager. So, the segment need not
512   // exist in memory and weak dumps are created to indicate the UI that the dump
513   // should exist only if the manager also created the global dump edge.
514   if (is_owned) {
515     pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
516                                              shared_memory_guid, kImportance);
517   } else {
518     pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
519                                          shared_memory_guid, kImportance);
520   }
521 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
522 }
523 
524 // static
LockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)525 DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
526     const UnsafeSharedMemoryRegion& region,
527     size_t offset,
528     size_t length) {
529 #if defined(OS_ANDROID)
530   if (region.IsValid()) {
531     if (UseAshmemUnpinningForDiscardableMemory()) {
532       int pin_result =
533           ashmem_pin_region(region.GetPlatformHandle(), offset, length);
534       if (pin_result == ASHMEM_WAS_PURGED)
535         return PURGED;
536       if (pin_result < 0)
537         return FAILED;
538     }
539   }
540 #endif
541   return SUCCESS;
542 }
543 
544 // static
UnlockPages(const UnsafeSharedMemoryRegion & region,size_t offset,size_t length)545 void DiscardableSharedMemory::UnlockPages(
546     const UnsafeSharedMemoryRegion& region,
547     size_t offset,
548     size_t length) {
549 #if defined(OS_ANDROID)
550   if (region.IsValid()) {
551     if (UseAshmemUnpinningForDiscardableMemory()) {
552       int unpin_result =
553           ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
554       DCHECK_EQ(0, unpin_result);
555     }
556   }
557 #endif
558 }
559 
Now() const560 Time DiscardableSharedMemory::Now() const {
561   return Time::Now();
562 }
563 
564 #if defined(OS_ANDROID)
565 // static
IsAshmemDeviceSupportedForTesting()566 bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
567   return UseAshmemUnpinningForDiscardableMemory();
568 }
569 #endif
570 
571 }  // namespace base
572