1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/arena.h>
32
33 #include <algorithm>
34 #include <atomic>
35 #include <cstddef>
36 #include <cstdint>
37 #include <limits>
38 #include <typeinfo>
39
40 #include <google/protobuf/arena_impl.h>
41
42 #include <google/protobuf/stubs/mutex.h>
43 #ifdef ADDRESS_SANITIZER
44 #include <sanitizer/asan_interface.h>
45 #endif // ADDRESS_SANITIZER
46
47 #include <google/protobuf/port_def.inc>
48
49 namespace google {
50 namespace protobuf {
51 namespace internal {
52
AllocateMemory(const AllocationPolicy * policy_ptr,size_t last_size,size_t min_bytes)53 static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
54 size_t last_size, size_t min_bytes) {
55 AllocationPolicy policy; // default policy
56 if (policy_ptr) policy = *policy_ptr;
57 size_t size;
58 if (last_size != 0) {
59 // Double the current block size, up to a limit.
60 auto max_size = policy.max_block_size;
61 size = std::min(2 * last_size, max_size);
62 } else {
63 size = policy.start_block_size;
64 }
65 // Verify that min_bytes + kBlockHeaderSize won't overflow.
66 GOOGLE_CHECK_LE(min_bytes,
67 std::numeric_limits<size_t>::max() - SerialArena::kBlockHeaderSize);
68 size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
69
70 void* mem;
71 if (policy.block_alloc == nullptr) {
72 mem = ::operator new(size);
73 } else {
74 mem = policy.block_alloc(size);
75 }
76 return {mem, size};
77 }
78
79 class GetDeallocator {
80 public:
GetDeallocator(const AllocationPolicy * policy,size_t * space_allocated)81 GetDeallocator(const AllocationPolicy* policy, size_t* space_allocated)
82 : dealloc_(policy ? policy->block_dealloc : nullptr),
83 space_allocated_(space_allocated) {}
84
operator ()(SerialArena::Memory mem) const85 void operator()(SerialArena::Memory mem) const {
86 #ifdef ADDRESS_SANITIZER
87 // This memory was provided by the underlying allocator as unpoisoned,
88 // so return it in an unpoisoned state.
89 ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
90 #endif // ADDRESS_SANITIZER
91 if (dealloc_) {
92 dealloc_(mem.ptr, mem.size);
93 } else {
94 #if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
95 ::operator delete(mem.ptr, mem.size);
96 #else
97 ::operator delete(mem.ptr);
98 #endif
99 }
100 *space_allocated_ += mem.size;
101 }
102
103 private:
104 void (*dealloc_)(void*, size_t);
105 size_t* space_allocated_;
106 };
107
SerialArena(Block * b,void * owner)108 SerialArena::SerialArena(Block* b, void* owner) : space_allocated_(b->size) {
109 owner_ = owner;
110 head_ = b;
111 ptr_ = b->Pointer(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize);
112 limit_ = b->Pointer(b->size & static_cast<size_t>(-8));
113 }
114
New(Memory mem,void * owner)115 SerialArena* SerialArena::New(Memory mem, void* owner) {
116 GOOGLE_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.size);
117
118 auto b = new (mem.ptr) Block{nullptr, mem.size};
119 return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, owner);
120 }
121
122 template <typename Deallocator>
Free(Deallocator deallocator)123 SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
124 Block* b = head_;
125 Memory mem = {b, b->size};
126 while (b->next) {
127 b = b->next; // We must first advance before deleting this block
128 deallocator(mem);
129 mem = {b, b->size};
130 }
131 return mem;
132 }
133
134 PROTOBUF_NOINLINE
135 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n,const AllocationPolicy * policy)136 SerialArena::AllocateAlignedWithCleanupFallback(
137 size_t n, const AllocationPolicy* policy) {
138 AllocateNewBlock(n + kCleanupSize, policy);
139 return AllocateAlignedWithCleanup(n, policy);
140 }
141
142 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n,const AllocationPolicy * policy)143 void* SerialArena::AllocateAlignedFallback(size_t n,
144 const AllocationPolicy* policy) {
145 AllocateNewBlock(n, policy);
146 return AllocateAligned(n, policy);
147 }
148
AllocateNewBlock(size_t n,const AllocationPolicy * policy)149 void SerialArena::AllocateNewBlock(size_t n, const AllocationPolicy* policy) {
150 // Sync limit to block
151 head_->start = reinterpret_cast<CleanupNode*>(limit_);
152
153 // Record how much used in this block.
154 space_used_ += ptr_ - head_->Pointer(kBlockHeaderSize);
155
156 auto mem = AllocateMemory(policy, head_->size, n);
157 // We don't want to emit an expensive RMW instruction that requires
158 // exclusive access to a cacheline. Hence we write it in terms of a
159 // regular add.
160 auto relaxed = std::memory_order_relaxed;
161 space_allocated_.store(space_allocated_.load(relaxed) + mem.size, relaxed);
162 head_ = new (mem.ptr) Block{head_, mem.size};
163 ptr_ = head_->Pointer(kBlockHeaderSize);
164 limit_ = head_->Pointer(head_->size);
165
166 #ifdef ADDRESS_SANITIZER
167 ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
168 #endif // ADDRESS_SANITIZER
169 }
170
SpaceUsed() const171 uint64 SerialArena::SpaceUsed() const {
172 uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
173 space_used += space_used_;
174 // Remove the overhead of the SerialArena itself.
175 space_used -= ThreadSafeArena::kSerialArenaSize;
176 return space_used;
177 }
178
CleanupList()179 void SerialArena::CleanupList() {
180 Block* b = head_;
181 b->start = reinterpret_cast<CleanupNode*>(limit_);
182 do {
183 auto* limit = reinterpret_cast<CleanupNode*>(
184 b->Pointer(b->size & static_cast<size_t>(-8)));
185 auto it = b->start;
186 auto num = limit - it;
187 if (num > 0) {
188 for (; it < limit; it++) {
189 it->cleanup(it->elem);
190 }
191 }
192 b = b->next;
193 } while (b);
194 }
195
196
197 ThreadSafeArena::CacheAlignedLifecycleIdGenerator
198 ThreadSafeArena::lifecycle_id_generator_;
199 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
thread_cache()200 ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
201 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
202 new internal::ThreadLocalStorage<ThreadCache>();
203 return *thread_cache_->Get();
204 }
205 #elif defined(PROTOBUF_USE_DLLS)
thread_cache()206 ThreadSafeArena::ThreadCache& ThreadSafeArena::thread_cache() {
207 static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_ = {
208 0, static_cast<LifecycleIdAtomic>(-1), nullptr};
209 return thread_cache_;
210 }
211 #else
212 PROTOBUF_THREAD_LOCAL ThreadSafeArena::ThreadCache
213 ThreadSafeArena::thread_cache_ = {0, static_cast<LifecycleIdAtomic>(-1),
214 nullptr};
215 #endif
216
InitializeFrom(void * mem,size_t size)217 void ThreadSafeArena::InitializeFrom(void* mem, size_t size) {
218 GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
219 Init(false);
220
221 // Ignore initial block if it is too small.
222 if (mem != nullptr && size >= kBlockHeaderSize + kSerialArenaSize) {
223 alloc_policy_ |= kUserOwnedInitialBlock;
224 SetInitialBlock(mem, size);
225 }
226 }
227
InitializeWithPolicy(void * mem,size_t size,bool record_allocs,AllocationPolicy policy)228 void ThreadSafeArena::InitializeWithPolicy(void* mem, size_t size,
229 bool record_allocs,
230 AllocationPolicy policy) {
231 GOOGLE_DCHECK_EQ(reinterpret_cast<uintptr_t>(mem) & 7, 0u);
232
233 Init(record_allocs);
234
235 // Ignore initial block if it is too small. We include an optional
236 // AllocationPolicy in this check, so that this can be allocated on the
237 // first block.
238 constexpr size_t kAPSize = internal::AlignUpTo8(sizeof(AllocationPolicy));
239 constexpr size_t kMinimumSize = kBlockHeaderSize + kSerialArenaSize + kAPSize;
240 if (mem != nullptr && size >= kMinimumSize) {
241 alloc_policy_ = kUserOwnedInitialBlock;
242 } else {
243 alloc_policy_ = 0;
244 auto tmp = AllocateMemory(&policy, 0, kMinimumSize);
245 mem = tmp.ptr;
246 size = tmp.size;
247 }
248 SetInitialBlock(mem, size);
249
250 auto sa = threads_.load(std::memory_order_relaxed);
251 // We ensured enough space so this cannot fail.
252 void* p;
253 if (!sa || !sa->MaybeAllocateAligned(kAPSize, &p)) {
254 GOOGLE_LOG(FATAL) << "MaybeAllocateAligned cannot fail here.";
255 return;
256 }
257 new (p) AllocationPolicy{policy};
258 alloc_policy_ |= reinterpret_cast<intptr_t>(p);
259 }
260
Init(bool record_allocs)261 void ThreadSafeArena::Init(bool record_allocs) {
262 ThreadCache& tc = thread_cache();
263 auto id = tc.next_lifecycle_id;
264 // We increment lifecycle_id's by multiples of two so we can use bit 0 as
265 // a tag.
266 constexpr uint64 kDelta = 2;
267 constexpr uint64 kInc = ThreadCache::kPerThreadIds * kDelta;
268 if (PROTOBUF_PREDICT_FALSE((id & (kInc - 1)) == 0)) {
269 constexpr auto relaxed = std::memory_order_relaxed;
270 // On platforms that don't support uint64 atomics we can certainly not
271 // afford to increment by large intervals and expect uniqueness due to
272 // wrapping, hence we only add by 1.
273 id = lifecycle_id_generator_.id.fetch_add(1, relaxed) * kInc;
274 }
275 tc.next_lifecycle_id = id + kDelta;
276 tag_and_id_ = id | (record_allocs ? kRecordAllocs : 0);
277 hint_.store(nullptr, std::memory_order_relaxed);
278 threads_.store(nullptr, std::memory_order_relaxed);
279 }
280
SetInitialBlock(void * mem,size_t size)281 void ThreadSafeArena::SetInitialBlock(void* mem, size_t size) {
282 SerialArena* serial = SerialArena::New({mem, size}, &thread_cache());
283 serial->set_next(NULL);
284 threads_.store(serial, std::memory_order_relaxed);
285 CacheSerialArena(serial);
286 }
287
~ThreadSafeArena()288 ThreadSafeArena::~ThreadSafeArena() {
289 // Have to do this in a first pass, because some of the destructors might
290 // refer to memory in other blocks.
291 CleanupList();
292
293 size_t space_allocated = 0;
294 auto mem = Free(&space_allocated);
295
296 // Policy is about to get deleted.
297 auto p = AllocPolicy();
298 ArenaMetricsCollector* collector = p ? p->metrics_collector : nullptr;
299
300 if (alloc_policy_ & kUserOwnedInitialBlock) {
301 space_allocated += mem.size;
302 } else {
303 GetDeallocator(AllocPolicy(), &space_allocated)(mem);
304 }
305
306 if (collector) collector->OnDestroy(space_allocated);
307 }
308
Free(size_t * space_allocated)309 SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
310 SerialArena::Memory mem = {nullptr, 0};
311 auto deallocator = GetDeallocator(AllocPolicy(), space_allocated);
312 PerSerialArena([deallocator, &mem](SerialArena* a) {
313 if (mem.ptr) deallocator(mem);
314 mem = a->Free(deallocator);
315 });
316 return mem;
317 }
318
Reset()319 uint64 ThreadSafeArena::Reset() {
320 // Have to do this in a first pass, because some of the destructors might
321 // refer to memory in other blocks.
322 CleanupList();
323
324 // Discard all blocks except the special block (if present).
325 size_t space_allocated = 0;
326 auto mem = Free(&space_allocated);
327
328 if (AllocPolicy()) {
329 auto saved_policy = *AllocPolicy();
330 if (alloc_policy_ & kUserOwnedInitialBlock) {
331 space_allocated += mem.size;
332 } else {
333 GetDeallocator(AllocPolicy(), &space_allocated)(mem);
334 mem.ptr = nullptr;
335 mem.size = 0;
336 }
337 ArenaMetricsCollector* collector = saved_policy.metrics_collector;
338 if (collector) collector->OnReset(space_allocated);
339 InitializeWithPolicy(mem.ptr, mem.size, ShouldRecordAlloc(), saved_policy);
340 } else {
341 // Nullptr policy
342 if (alloc_policy_ & kUserOwnedInitialBlock) {
343 space_allocated += mem.size;
344 InitializeFrom(mem.ptr, mem.size);
345 } else {
346 GetDeallocator(AllocPolicy(), &space_allocated)(mem);
347 Init(false);
348 }
349 }
350
351 return space_allocated;
352 }
353
354 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n,const std::type_info * type)355 ThreadSafeArena::AllocateAlignedWithCleanup(size_t n,
356 const std::type_info* type) {
357 SerialArena* arena;
358 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(tag_and_id_, &arena))) {
359 return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
360 } else {
361 return AllocateAlignedWithCleanupFallback(n, type);
362 }
363 }
364
AddCleanup(void * elem,void (* cleanup)(void *))365 void ThreadSafeArena::AddCleanup(void* elem, void (*cleanup)(void*)) {
366 SerialArena* arena;
367 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
368 arena->AddCleanup(elem, cleanup, AllocPolicy());
369 } else {
370 return AddCleanupFallback(elem, cleanup);
371 }
372 }
373
374 PROTOBUF_NOINLINE
AllocateAlignedFallback(size_t n,const std::type_info * type)375 void* ThreadSafeArena::AllocateAlignedFallback(size_t n,
376 const std::type_info* type) {
377 if (ShouldRecordAlloc()) {
378 RecordAlloc(type, n);
379 SerialArena* arena;
380 if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(LifeCycleId(), &arena))) {
381 return arena->AllocateAligned(n, AllocPolicy());
382 }
383 }
384 return GetSerialArenaFallback(&thread_cache())
385 ->AllocateAligned(n, AllocPolicy());
386 }
387
388 PROTOBUF_NOINLINE
389 std::pair<void*, SerialArena::CleanupNode*>
AllocateAlignedWithCleanupFallback(size_t n,const std::type_info * type)390 ThreadSafeArena::AllocateAlignedWithCleanupFallback(
391 size_t n, const std::type_info* type) {
392 if (ShouldRecordAlloc()) {
393 RecordAlloc(type, n);
394 SerialArena* arena;
395 if (GetSerialArenaFast(LifeCycleId(), &arena)) {
396 return arena->AllocateAlignedWithCleanup(n, AllocPolicy());
397 }
398 }
399 return GetSerialArenaFallback(&thread_cache())
400 ->AllocateAlignedWithCleanup(n, AllocPolicy());
401 }
402
403 PROTOBUF_NOINLINE
AddCleanupFallback(void * elem,void (* cleanup)(void *))404 void ThreadSafeArena::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
405 GetSerialArenaFallback(&thread_cache())
406 ->AddCleanup(elem, cleanup, AllocPolicy());
407 }
408
SpaceAllocated() const409 uint64 ThreadSafeArena::SpaceAllocated() const {
410 SerialArena* serial = threads_.load(std::memory_order_acquire);
411 uint64 res = 0;
412 for (; serial; serial = serial->next()) {
413 res += serial->SpaceAllocated();
414 }
415 return res;
416 }
417
SpaceUsed() const418 uint64 ThreadSafeArena::SpaceUsed() const {
419 SerialArena* serial = threads_.load(std::memory_order_acquire);
420 uint64 space_used = 0;
421 for (; serial; serial = serial->next()) {
422 space_used += serial->SpaceUsed();
423 }
424 return space_used - (AllocPolicy() ? sizeof(AllocationPolicy) : 0);
425 }
426
CleanupList()427 void ThreadSafeArena::CleanupList() {
428 PerSerialArena([](SerialArena* a) { a->CleanupList(); });
429 }
430
431 PROTOBUF_NOINLINE
GetSerialArenaFallback(void * me)432 SerialArena* ThreadSafeArena::GetSerialArenaFallback(void* me) {
433 // Look for this SerialArena in our linked list.
434 SerialArena* serial = threads_.load(std::memory_order_acquire);
435 for (; serial; serial = serial->next()) {
436 if (serial->owner() == me) {
437 break;
438 }
439 }
440
441 if (!serial) {
442 // This thread doesn't have any SerialArena, which also means it doesn't
443 // have any blocks yet. So we'll allocate its first block now.
444 serial = SerialArena::New(
445 AllocateMemory(AllocPolicy(), 0, kSerialArenaSize), me);
446
447 SerialArena* head = threads_.load(std::memory_order_relaxed);
448 do {
449 serial->set_next(head);
450 } while (!threads_.compare_exchange_weak(
451 head, serial, std::memory_order_release, std::memory_order_relaxed));
452 }
453
454 CacheSerialArena(serial);
455 return serial;
456 }
457
458 } // namespace internal
459
460 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedNoHook(size_t n)461 void* Arena::AllocateAlignedNoHook(size_t n) {
462 return impl_.AllocateAligned(n, nullptr);
463 }
464
465 PROTOBUF_FUNC_ALIGN(32)
AllocateAlignedWithHook(size_t n,const std::type_info * type)466 void* Arena::AllocateAlignedWithHook(size_t n, const std::type_info* type) {
467 return impl_.AllocateAligned(n, type);
468 }
469
470 PROTOBUF_FUNC_ALIGN(32)
471 std::pair<void*, internal::SerialArena::CleanupNode*>
AllocateAlignedWithCleanup(size_t n,const std::type_info * type)472 Arena::AllocateAlignedWithCleanup(size_t n, const std::type_info* type) {
473 return impl_.AllocateAlignedWithCleanup(n, type);
474 }
475
476 } // namespace protobuf
477 } // namespace google
478
479 #include <google/protobuf/port_undef.inc>
480