1 //===-- asan_thread.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_allocator.h"
14 #include "asan_interceptors.h"
15 #include "asan_poisoning.h"
16 #include "asan_stack.h"
17 #include "asan_thread.h"
18 #include "asan_mapping.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "lsan/lsan_common.h"
24
25 namespace __asan {
26
27 // AsanThreadContext implementation.
28
OnCreated(void * arg)29 void AsanThreadContext::OnCreated(void *arg) {
30 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
31 if (args->stack)
32 stack_id = StackDepotPut(*args->stack);
33 thread = args->thread;
34 thread->set_context(this);
35 }
36
OnFinished()37 void AsanThreadContext::OnFinished() {
38 // Drop the link to the AsanThread object.
39 thread = nullptr;
40 }
41
42 // MIPS requires aligned address
43 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
44 static ThreadRegistry *asan_thread_registry;
45
46 static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
47 static LowLevelAllocator allocator_for_thread_context;
48
GetAsanThreadContext(u32 tid)49 static ThreadContextBase *GetAsanThreadContext(u32 tid) {
50 BlockingMutexLock lock(&mu_for_thread_context);
51 return new(allocator_for_thread_context) AsanThreadContext(tid);
52 }
53
asanThreadRegistry()54 ThreadRegistry &asanThreadRegistry() {
55 static bool initialized;
56 // Don't worry about thread_safety - this should be called when there is
57 // a single thread.
58 if (!initialized) {
59 // Never reuse ASan threads: we store pointer to AsanThreadContext
60 // in TSD and can't reliably tell when no more TSD destructors will
61 // be called. It would be wrong to reuse AsanThreadContext for another
62 // thread before all TSD destructors will be called for it.
63 asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
64 GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
65 initialized = true;
66 }
67 return *asan_thread_registry;
68 }
69
GetThreadContextByTidLocked(u32 tid)70 AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
71 return static_cast<AsanThreadContext *>(
72 asanThreadRegistry().GetThreadLocked(tid));
73 }
74
75 // AsanThread implementation.
76
Create(thread_callback_t start_routine,void * arg,u32 parent_tid,StackTrace * stack,bool detached)77 AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
78 u32 parent_tid, StackTrace *stack,
79 bool detached) {
80 uptr PageSize = GetPageSizeCached();
81 uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
82 AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
83 thread->start_routine_ = start_routine;
84 thread->arg_ = arg;
85 AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
86 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
87 parent_tid, &args);
88
89 return thread;
90 }
91
TSDDtor(void * tsd)92 void AsanThread::TSDDtor(void *tsd) {
93 AsanThreadContext *context = (AsanThreadContext*)tsd;
94 VReport(1, "T%d TSDDtor\n", context->tid);
95 if (context->thread)
96 context->thread->Destroy();
97 }
98
Destroy()99 void AsanThread::Destroy() {
100 int tid = this->tid();
101 VReport(1, "T%d exited\n", tid);
102
103 malloc_storage().CommitBack();
104 if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
105 asanThreadRegistry().FinishThread(tid);
106 FlushToDeadThreadStats(&stats_);
107 // We also clear the shadow on thread destruction because
108 // some code may still be executing in later TSD destructors
109 // and we don't want it to have any poisoned stack.
110 ClearShadowForThreadStackAndTLS();
111 DeleteFakeStack(tid);
112 uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
113 UnmapOrDie(this, size);
114 DTLS_Destroy();
115 }
116
StartSwitchFiber(FakeStack ** fake_stack_save,uptr bottom,uptr size)117 void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
118 uptr size) {
119 if (atomic_load(&stack_switching_, memory_order_relaxed)) {
120 Report("ERROR: starting fiber switch while in fiber switch\n");
121 Die();
122 }
123
124 next_stack_bottom_ = bottom;
125 next_stack_top_ = bottom + size;
126 atomic_store(&stack_switching_, 1, memory_order_release);
127
128 FakeStack *current_fake_stack = fake_stack_;
129 if (fake_stack_save)
130 *fake_stack_save = fake_stack_;
131 fake_stack_ = nullptr;
132 SetTLSFakeStack(nullptr);
133 // if fake_stack_save is null, the fiber will die, delete the fakestack
134 if (!fake_stack_save && current_fake_stack)
135 current_fake_stack->Destroy(this->tid());
136 }
137
FinishSwitchFiber(FakeStack * fake_stack_save,uptr * bottom_old,uptr * size_old)138 void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
139 uptr *bottom_old,
140 uptr *size_old) {
141 if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
142 Report("ERROR: finishing a fiber switch that has not started\n");
143 Die();
144 }
145
146 if (fake_stack_save) {
147 SetTLSFakeStack(fake_stack_save);
148 fake_stack_ = fake_stack_save;
149 }
150
151 if (bottom_old)
152 *bottom_old = stack_bottom_;
153 if (size_old)
154 *size_old = stack_top_ - stack_bottom_;
155 stack_bottom_ = next_stack_bottom_;
156 stack_top_ = next_stack_top_;
157 atomic_store(&stack_switching_, 0, memory_order_release);
158 next_stack_top_ = 0;
159 next_stack_bottom_ = 0;
160 }
161
GetStackBounds() const162 inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
163 if (!atomic_load(&stack_switching_, memory_order_acquire)) {
164 // Make sure the stack bounds are fully initialized.
165 if (stack_bottom_ >= stack_top_) return {0, 0};
166 return {stack_bottom_, stack_top_};
167 }
168 char local;
169 const uptr cur_stack = (uptr)&local;
170 // Note: need to check next stack first, because FinishSwitchFiber
171 // may be in process of overwriting stack_top_/bottom_. But in such case
172 // we are already on the next stack.
173 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
174 return {next_stack_bottom_, next_stack_top_};
175 return {stack_bottom_, stack_top_};
176 }
177
stack_top()178 uptr AsanThread::stack_top() {
179 return GetStackBounds().top;
180 }
181
stack_bottom()182 uptr AsanThread::stack_bottom() {
183 return GetStackBounds().bottom;
184 }
185
stack_size()186 uptr AsanThread::stack_size() {
187 const auto bounds = GetStackBounds();
188 return bounds.top - bounds.bottom;
189 }
190
191 // We want to create the FakeStack lazily on the first use, but not earlier
192 // than the stack size is known and the procedure has to be async-signal safe.
AsyncSignalSafeLazyInitFakeStack()193 FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
194 uptr stack_size = this->stack_size();
195 if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
196 return nullptr;
197 uptr old_val = 0;
198 // fake_stack_ has 3 states:
199 // 0 -- not initialized
200 // 1 -- being initialized
201 // ptr -- initialized
202 // This CAS checks if the state was 0 and if so changes it to state 1,
203 // if that was successful, it initializes the pointer.
204 if (atomic_compare_exchange_strong(
205 reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
206 memory_order_relaxed)) {
207 uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
208 CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
209 stack_size_log =
210 Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
211 stack_size_log =
212 Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
213 fake_stack_ = FakeStack::Create(stack_size_log);
214 DCHECK_EQ(GetCurrentThread(), this);
215 SetTLSFakeStack(fake_stack_);
216 return fake_stack_;
217 }
218 return nullptr;
219 }
220
Init(const InitOptions * options)221 void AsanThread::Init(const InitOptions *options) {
222 DCHECK_NE(tid(), ThreadRegistry::kUnknownTid);
223 next_stack_top_ = next_stack_bottom_ = 0;
224 atomic_store(&stack_switching_, false, memory_order_release);
225 CHECK_EQ(this->stack_size(), 0U);
226 SetThreadStackAndTls(options);
227 if (stack_top_ != stack_bottom_) {
228 CHECK_GT(this->stack_size(), 0U);
229 CHECK(AddrIsInMem(stack_bottom_));
230 CHECK(AddrIsInMem(stack_top_ - 1));
231 }
232 ClearShadowForThreadStackAndTLS();
233 fake_stack_ = nullptr;
234 if (__asan_option_detect_stack_use_after_return &&
235 tid() == GetCurrentTidOrInvalid()) {
236 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
237 // called from the context of the thread it is initializing, not its parent.
238 // Most platforms call AsanThread::Init on the newly-spawned thread, but
239 // Fuchsia calls this function from the parent thread. To support that
240 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
241 // be called by the new thread when it first attempts to access the fake
242 // stack.
243 AsyncSignalSafeLazyInitFakeStack();
244 }
245 int local = 0;
246 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
247 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
248 &local);
249 }
250
251 // Fuchsia and RTEMS don't use ThreadStart.
252 // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
253 // SetThreadStackAndTls.
254 #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
255
ThreadStart(tid_t os_id)256 thread_return_t AsanThread::ThreadStart(tid_t os_id) {
257 Init();
258 asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
259
260 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
261
262 if (!start_routine_) {
263 // start_routine_ == 0 if we're on the main thread or on one of the
264 // OS X libdispatch worker threads. But nobody is supposed to call
265 // ThreadStart() for the worker threads.
266 CHECK_EQ(tid(), 0);
267 return 0;
268 }
269
270 thread_return_t res = start_routine_(arg_);
271
272 // On POSIX systems we defer this to the TSD destructor. LSan will consider
273 // the thread's memory as non-live from the moment we call Destroy(), even
274 // though that memory might contain pointers to heap objects which will be
275 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
276 // the TSD destructors have run might cause false positives in LSan.
277 if (!SANITIZER_POSIX)
278 this->Destroy();
279
280 return res;
281 }
282
CreateMainThread()283 AsanThread *CreateMainThread() {
284 AsanThread *main_thread = AsanThread::Create(
285 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
286 /* stack */ nullptr, /* detached */ true);
287 SetCurrentThread(main_thread);
288 main_thread->ThreadStart(internal_getpid());
289 return main_thread;
290 }
291
292 // This implementation doesn't use the argument, which is just passed down
293 // from the caller of Init (which see, above). It's only there to support
294 // OS-specific implementations that need more information passed through.
SetThreadStackAndTls(const InitOptions * options)295 void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
296 DCHECK_EQ(options, nullptr);
297 uptr tls_size = 0;
298 uptr stack_size = 0;
299 GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
300 &tls_size);
301 stack_top_ = stack_bottom_ + stack_size;
302 tls_end_ = tls_begin_ + tls_size;
303 dtls_ = DTLS_Get();
304
305 if (stack_top_ != stack_bottom_) {
306 int local;
307 CHECK(AddrIsInStack((uptr)&local));
308 }
309 }
310
311 #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
312
ClearShadowForThreadStackAndTLS()313 void AsanThread::ClearShadowForThreadStackAndTLS() {
314 if (stack_top_ != stack_bottom_)
315 PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
316 if (tls_begin_ != tls_end_) {
317 uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
318 uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
319 FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
320 tls_end_ - tls_begin_aligned,
321 tls_end_aligned - tls_end_, 0);
322 }
323 }
324
GetStackFrameAccessByAddr(uptr addr,StackFrameAccess * access)325 bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
326 StackFrameAccess *access) {
327 if (stack_top_ == stack_bottom_)
328 return false;
329
330 uptr bottom = 0;
331 if (AddrIsInStack(addr)) {
332 bottom = stack_bottom();
333 } else if (has_fake_stack()) {
334 bottom = fake_stack()->AddrIsInFakeStack(addr);
335 CHECK(bottom);
336 access->offset = addr - bottom;
337 access->frame_pc = ((uptr*)bottom)[2];
338 access->frame_descr = (const char *)((uptr*)bottom)[1];
339 return true;
340 }
341 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
342 uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
343 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
344 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
345
346 while (shadow_ptr >= shadow_bottom &&
347 *shadow_ptr != kAsanStackLeftRedzoneMagic) {
348 shadow_ptr--;
349 mem_ptr -= SHADOW_GRANULARITY;
350 }
351
352 while (shadow_ptr >= shadow_bottom &&
353 *shadow_ptr == kAsanStackLeftRedzoneMagic) {
354 shadow_ptr--;
355 mem_ptr -= SHADOW_GRANULARITY;
356 }
357
358 if (shadow_ptr < shadow_bottom) {
359 return false;
360 }
361
362 uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
363 CHECK(ptr[0] == kCurrentStackFrameMagic);
364 access->offset = addr - (uptr)ptr;
365 access->frame_pc = ptr[2];
366 access->frame_descr = (const char*)ptr[1];
367 return true;
368 }
369
GetStackVariableShadowStart(uptr addr)370 uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
371 uptr bottom = 0;
372 if (AddrIsInStack(addr)) {
373 bottom = stack_bottom();
374 } else if (has_fake_stack()) {
375 bottom = fake_stack()->AddrIsInFakeStack(addr);
376 if (bottom == 0) {
377 return 0;
378 }
379 } else {
380 return 0;
381 }
382
383 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
384 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
385 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
386
387 while (shadow_ptr >= shadow_bottom &&
388 (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
389 *shadow_ptr != kAsanStackMidRedzoneMagic &&
390 *shadow_ptr != kAsanStackRightRedzoneMagic))
391 shadow_ptr--;
392
393 return (uptr)shadow_ptr + 1;
394 }
395
AddrIsInStack(uptr addr)396 bool AsanThread::AddrIsInStack(uptr addr) {
397 const auto bounds = GetStackBounds();
398 return addr >= bounds.bottom && addr < bounds.top;
399 }
400
ThreadStackContainsAddress(ThreadContextBase * tctx_base,void * addr)401 static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
402 void *addr) {
403 AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
404 AsanThread *t = tctx->thread;
405 if (!t) return false;
406 if (t->AddrIsInStack((uptr)addr)) return true;
407 if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
408 return true;
409 return false;
410 }
411
GetCurrentThread()412 AsanThread *GetCurrentThread() {
413 if (SANITIZER_RTEMS && !asan_inited)
414 return nullptr;
415
416 AsanThreadContext *context =
417 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
418 if (!context) {
419 if (SANITIZER_ANDROID) {
420 // On Android, libc constructor is called _after_ asan_init, and cleans up
421 // TSD. Try to figure out if this is still the main thread by the stack
422 // address. We are not entirely sure that we have correct main thread
423 // limits, so only do this magic on Android, and only if the found thread
424 // is the main thread.
425 AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
426 if (tctx && ThreadStackContainsAddress(tctx, &context)) {
427 SetCurrentThread(tctx->thread);
428 return tctx->thread;
429 }
430 }
431 return nullptr;
432 }
433 return context->thread;
434 }
435
SetCurrentThread(AsanThread * t)436 void SetCurrentThread(AsanThread *t) {
437 CHECK(t->context());
438 VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
439 (void *)GetThreadSelf());
440 // Make sure we do not reset the current AsanThread.
441 CHECK_EQ(0, AsanTSDGet());
442 AsanTSDSet(t->context());
443 CHECK_EQ(t->context(), AsanTSDGet());
444 }
445
GetCurrentTidOrInvalid()446 u32 GetCurrentTidOrInvalid() {
447 AsanThread *t = GetCurrentThread();
448 return t ? t->tid() : kInvalidTid;
449 }
450
FindThreadByStackAddress(uptr addr)451 AsanThread *FindThreadByStackAddress(uptr addr) {
452 asanThreadRegistry().CheckLocked();
453 AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
454 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
455 (void *)addr));
456 return tctx ? tctx->thread : nullptr;
457 }
458
EnsureMainThreadIDIsCorrect()459 void EnsureMainThreadIDIsCorrect() {
460 AsanThreadContext *context =
461 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
462 if (context && (context->tid == 0))
463 context->os_id = GetTid();
464 }
465
GetAsanThreadByOsIDLocked(tid_t os_id)466 __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
467 __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
468 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
469 if (!context) return nullptr;
470 return context->thread;
471 }
472 } // namespace __asan
473
474 // --- Implementation of LSan-specific functions --- {{{1
475 namespace __lsan {
GetThreadRangesLocked(tid_t os_id,uptr * stack_begin,uptr * stack_end,uptr * tls_begin,uptr * tls_end,uptr * cache_begin,uptr * cache_end,DTLS ** dtls)476 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
477 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
478 uptr *cache_end, DTLS **dtls) {
479 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
480 if (!t) return false;
481 *stack_begin = t->stack_bottom();
482 *stack_end = t->stack_top();
483 *tls_begin = t->tls_begin();
484 *tls_end = t->tls_end();
485 // ASan doesn't keep allocator caches in TLS, so these are unused.
486 *cache_begin = 0;
487 *cache_end = 0;
488 *dtls = t->dtls();
489 return true;
490 }
491
GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> * caches)492 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
493
ForEachExtraStackRange(tid_t os_id,RangeIteratorCallback callback,void * arg)494 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
495 void *arg) {
496 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
497 if (t && t->has_fake_stack())
498 t->fake_stack()->ForEachFakeFrame(callback, arg);
499 }
500
LockThreadRegistry()501 void LockThreadRegistry() {
502 __asan::asanThreadRegistry().Lock();
503 }
504
UnlockThreadRegistry()505 void UnlockThreadRegistry() {
506 __asan::asanThreadRegistry().Unlock();
507 }
508
GetThreadRegistryLocked()509 ThreadRegistry *GetThreadRegistryLocked() {
510 __asan::asanThreadRegistry().CheckLocked();
511 return &__asan::asanThreadRegistry();
512 }
513
EnsureMainThreadIDIsCorrect()514 void EnsureMainThreadIDIsCorrect() {
515 __asan::EnsureMainThreadIDIsCorrect();
516 }
517 } // namespace __lsan
518
519 // ---------------------- Interface ---------------- {{{1
520 using namespace __asan;
521
522 extern "C" {
523 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_start_switch_fiber(void ** fakestacksave,const void * bottom,uptr size)524 void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
525 uptr size) {
526 AsanThread *t = GetCurrentThread();
527 if (!t) {
528 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
529 return;
530 }
531 t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
532 }
533
534 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_finish_switch_fiber(void * fakestack,const void ** bottom_old,uptr * size_old)535 void __sanitizer_finish_switch_fiber(void* fakestack,
536 const void **bottom_old,
537 uptr *size_old) {
538 AsanThread *t = GetCurrentThread();
539 if (!t) {
540 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
541 return;
542 }
543 t->FinishSwitchFiber((FakeStack*)fakestack,
544 (uptr*)bottom_old,
545 (uptr*)size_old);
546 }
547 }
548