1 /**
2  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3  * SPDX-License-Identifier: Apache-2.0.
4  */
5 
6 #include <aws/common/atomics.h>
7 #include <aws/common/byte_buf.h>
8 #include <aws/common/hash_table.h>
9 #include <aws/common/logging.h>
10 #include <aws/common/mutex.h>
11 #include <aws/common/priority_queue.h>
12 #include <aws/common/string.h>
13 #include <aws/common/system_info.h>
14 #include <aws/common/time.h>
15 
16 /* describes a single live allocation.
17  * allocated by aws_default_allocator() */
18 struct alloc_info {
19     size_t size;
20     time_t time;
21     uint64_t stack; /* hash of stack frame pointers */
22 };
23 
24 /* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header.
25  *
26  * MSVC doesn't know this for some reason so we need to use a pragma to make
27  * it happy.
28  */
29 #ifdef _MSC_VER
30 #    pragma warning(push)
31 #    pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */
32 #endif
33 
34 /* one of these is stored per unique stack
35  * allocated by aws_default_allocator() */
36 struct stack_trace {
37     size_t depth;         /* length of frames[] */
38     void *const frames[]; /* rest of frames are allocated after */
39 };
40 
41 #ifdef _MSC_VER
42 #    pragma warning(pop)
43 #endif
44 
45 /* Tracking structure, used as the allocator impl.
46  * This structure, and all its bookkeeping datastructures, are created with the aws_default_allocator().
47  * This is not customizeable because it's too expensive for every little allocation to store
48  * a pointer back to its original allocator. */
49 struct alloc_tracer {
50     struct aws_allocator *traced_allocator; /* underlying allocator */
51     enum aws_mem_trace_level level;         /* level to trace at */
52     size_t frames_per_stack;                /* how many frames to keep per stack */
53     struct aws_atomic_var allocated;        /* bytes currently allocated */
54     struct aws_mutex mutex;                 /* protects everything below */
55     struct aws_hash_table allocs;           /* live allocations, maps address -> alloc_info */
56     struct aws_hash_table stacks;           /* unique stack traces, maps hash -> stack_trace */
57 };
58 
59 /* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */
60 #define FRAMES_TO_SKIP 2
61 
62 static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size);
63 static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr);
64 static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
65 static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
66 
67 static struct aws_allocator s_trace_allocator = {
68     .mem_acquire = s_trace_mem_acquire,
69     .mem_release = s_trace_mem_release,
70     .mem_realloc = s_trace_mem_realloc,
71     .mem_calloc = s_trace_mem_calloc,
72 };
73 
74 /* for the hash table, to destroy elements */
s_destroy_alloc(void * data)75 static void s_destroy_alloc(void *data) {
76     struct alloc_info *alloc = data;
77     aws_mem_release(aws_default_allocator(), alloc);
78 }
79 
s_destroy_stacktrace(void * data)80 static void s_destroy_stacktrace(void *data) {
81     struct stack_trace *stack = data;
82     aws_mem_release(aws_default_allocator(), stack);
83 }
84 
s_alloc_tracer_init(struct alloc_tracer * tracer,struct aws_allocator * traced_allocator,enum aws_mem_trace_level level,size_t frames_per_stack)85 static void s_alloc_tracer_init(
86     struct alloc_tracer *tracer,
87     struct aws_allocator *traced_allocator,
88     enum aws_mem_trace_level level,
89     size_t frames_per_stack) {
90 
91     void *stack[1];
92     if (!aws_backtrace(stack, 1)) {
93         /* clamp level if tracing isn't available */
94         level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level;
95     }
96 
97     tracer->traced_allocator = traced_allocator;
98     tracer->level = level;
99 
100     if (tracer->level >= AWS_MEMTRACE_BYTES) {
101         aws_atomic_init_int(&tracer->allocated, 0);
102         AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex));
103         AWS_FATAL_ASSERT(
104             AWS_OP_SUCCESS ==
105             aws_hash_table_init(
106                 &tracer->allocs, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc));
107     }
108 
109     if (tracer->level == AWS_MEMTRACE_STACKS) {
110         if (frames_per_stack > 128) {
111             frames_per_stack = 128;
112         }
113         tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8;
114         AWS_FATAL_ASSERT(
115             AWS_OP_SUCCESS ==
116             aws_hash_table_init(
117                 &tracer->stacks, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace));
118     }
119 }
120 
s_alloc_tracer_track(struct alloc_tracer * tracer,void * ptr,size_t size)121 static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) {
122     if (tracer->level == AWS_MEMTRACE_NONE) {
123         return;
124     }
125 
126     aws_atomic_fetch_add(&tracer->allocated, size);
127 
128     struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info));
129     AWS_FATAL_ASSERT(alloc);
130     alloc->size = size;
131     alloc->time = time(NULL);
132 
133     if (tracer->level == AWS_MEMTRACE_STACKS) {
134         /* capture stack frames, skip 2 for this function and the allocation vtable function */
135         AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack));
136         size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack);
137         if (stack_depth) {
138             /* hash the stack pointers */
139             struct aws_byte_cursor stack_cursor =
140                 aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
141             uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
142             alloc->stack = stack_id; /* associate the stack with the alloc */
143 
144             aws_mutex_lock(&tracer->mutex);
145             struct aws_hash_element *item = NULL;
146             int was_created = 0;
147             AWS_FATAL_ASSERT(
148                 AWS_OP_SUCCESS ==
149                 aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
150             /* If this is a new stack, save it to the hash */
151             if (was_created) {
152                 struct stack_trace *stack = aws_mem_calloc(
153                     aws_default_allocator(),
154                     1,
155                     sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
156                 AWS_FATAL_ASSERT(stack);
157                 memcpy(
158                     (void **)&stack->frames[0],
159                     &stack_frames[FRAMES_TO_SKIP],
160                     (stack_depth - FRAMES_TO_SKIP) * sizeof(void *));
161                 stack->depth = stack_depth - FRAMES_TO_SKIP;
162                 item->value = stack;
163             }
164             aws_mutex_unlock(&tracer->mutex);
165         }
166     }
167 
168     aws_mutex_lock(&tracer->mutex);
169     AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL));
170     aws_mutex_unlock(&tracer->mutex);
171 }
172 
s_alloc_tracer_untrack(struct alloc_tracer * tracer,void * ptr)173 static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) {
174     if (tracer->level == AWS_MEMTRACE_NONE) {
175         return;
176     }
177 
178     aws_mutex_lock(&tracer->mutex);
179     struct aws_hash_element *item;
180     AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item));
181     /* because the tracer can be installed at any time, it is possible for an allocation to not
182      * be tracked. Therefore, we make sure the find succeeds, but then check the returned
183      * value */
184     if (item) {
185         AWS_FATAL_ASSERT(item->key == ptr && item->value);
186         struct alloc_info *alloc = item->value;
187         aws_atomic_fetch_sub(&tracer->allocated, alloc->size);
188         s_destroy_alloc(item->value);
189         AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item));
190     }
191     aws_mutex_unlock(&tracer->mutex);
192 }
193 
194 /* used only to resolve stacks -> trace, count, size at dump time */
195 struct stack_metadata {
196     struct aws_string *trace;
197     size_t count;
198     size_t size;
199 };
200 
s_collect_stack_trace(void * context,struct aws_hash_element * item)201 static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
202     struct alloc_tracer *tracer = context;
203     struct aws_hash_table *all_stacks = &tracer->stacks;
204     struct stack_metadata *stack_info = item->value;
205     struct aws_hash_element *stack_item = NULL;
206     AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item));
207     AWS_FATAL_ASSERT(stack_item);
208     struct stack_trace *stack = stack_item->value;
209     void *const *stack_frames = &stack->frames[0];
210 
211     /* convert the frame pointers to symbols, and concat into a buffer */
212     char buf[4096] = {0};
213     struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf));
214     struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n");
215     char **symbols = aws_backtrace_symbols(stack_frames, stack->depth);
216     for (size_t idx = 0; idx < stack->depth; ++idx) {
217         if (idx > 0) {
218             aws_byte_buf_append(&stacktrace, &newline);
219         }
220         const char *caller = symbols[idx];
221         if (!caller || !caller[0]) {
222             break;
223         }
224         struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
225         aws_byte_buf_append(&stacktrace, &cursor);
226     }
227     aws_mem_release(aws_default_allocator(), symbols);
228     /* record the resultant buffer as a string */
229     stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
230     AWS_FATAL_ASSERT(stack_info->trace);
231     aws_byte_buf_clean_up(&stacktrace);
232     return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
233 }
234 
s_stack_info_compare_size(const void * a,const void * b)235 static int s_stack_info_compare_size(const void *a, const void *b) {
236     const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
237     const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
238     return stack_b->size > stack_a->size;
239 }
240 
s_stack_info_compare_count(const void * a,const void * b)241 static int s_stack_info_compare_count(const void *a, const void *b) {
242     const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
243     const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
244     return stack_b->count > stack_a->count;
245 }
246 
s_stack_info_destroy(void * data)247 static void s_stack_info_destroy(void *data) {
248     struct stack_metadata *stack = data;
249     struct aws_allocator *allocator = stack->trace->allocator;
250     aws_string_destroy(stack->trace);
251     aws_mem_release(allocator, stack);
252 }
253 
254 /* tally up count/size per stack from all allocs */
s_collect_stack_stats(void * context,struct aws_hash_element * item)255 static int s_collect_stack_stats(void *context, struct aws_hash_element *item) {
256     struct aws_hash_table *stack_info = context;
257     struct alloc_info *alloc = item->value;
258     struct aws_hash_element *stack_item = NULL;
259     int was_created = 0;
260     AWS_FATAL_ASSERT(
261         AWS_OP_SUCCESS ==
262         aws_hash_table_create(stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created));
263     if (was_created) {
264         stack_item->value = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct stack_metadata));
265         AWS_FATAL_ASSERT(stack_item->value);
266     }
267     struct stack_metadata *stack = stack_item->value;
268     stack->count++;
269     stack->size += alloc->size;
270     return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
271 }
272 
s_insert_stacks(void * context,struct aws_hash_element * item)273 static int s_insert_stacks(void *context, struct aws_hash_element *item) {
274     struct aws_priority_queue *pq = context;
275     struct stack_metadata *stack = item->value;
276     AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack));
277     return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
278 }
279 
s_insert_allocs(void * context,struct aws_hash_element * item)280 static int s_insert_allocs(void *context, struct aws_hash_element *item) {
281     struct aws_priority_queue *allocs = context;
282     struct alloc_info *alloc = item->value;
283     AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc));
284     return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
285 }
286 
s_alloc_compare(const void * a,const void * b)287 static int s_alloc_compare(const void *a, const void *b) {
288     const struct alloc_info *alloc_a = *(const struct alloc_info **)a;
289     const struct alloc_info *alloc_b = *(const struct alloc_info **)b;
290     return alloc_a->time > alloc_b->time;
291 }
292 
aws_mem_tracer_dump(struct aws_allocator * trace_allocator)293 void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
294     struct alloc_tracer *tracer = trace_allocator->impl;
295     if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) {
296         return;
297     }
298 
299     aws_mutex_lock(&tracer->mutex);
300 
301     size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
302     AWS_LOGF_TRACE(
303         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
304     AWS_LOGF_TRACE(
305         AWS_LS_COMMON_MEMTRACE, "#  BEGIN MEMTRACE DUMP                                                         #\n");
306     AWS_LOGF_TRACE(
307         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
308     AWS_LOGF_TRACE(
309         AWS_LS_COMMON_MEMTRACE,
310         "tracer: %zu bytes still allocated in %zu allocations\n",
311         aws_atomic_load_int(&tracer->allocated),
312         num_allocs);
313 
314     /* convert stacks from pointers -> symbols */
315     struct aws_hash_table stack_info;
316     AWS_ZERO_STRUCT(stack_info);
317     if (tracer->level == AWS_MEMTRACE_STACKS) {
318         AWS_FATAL_ASSERT(
319             AWS_OP_SUCCESS ==
320             aws_hash_table_init(
321                 &stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy));
322         /* collect active stacks, tally up sizes and counts */
323         aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info);
324         /* collect stack traces for active stacks */
325         aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer);
326     }
327 
328     /* sort allocs by time */
329     struct aws_priority_queue allocs;
330     AWS_FATAL_ASSERT(
331         AWS_OP_SUCCESS ==
332         aws_priority_queue_init_dynamic(
333             &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
334     aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
335     /* dump allocs by time */
336     AWS_LOGF_TRACE(
337         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
338     AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n");
339     AWS_LOGF_TRACE(
340         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
341     while (aws_priority_queue_size(&allocs)) {
342         struct alloc_info *alloc = NULL;
343         aws_priority_queue_pop(&allocs, &alloc);
344         AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size);
345         if (alloc->stack) {
346             struct aws_hash_element *item = NULL;
347             AWS_FATAL_ASSERT(
348                 AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
349             struct stack_metadata *stack = item->value;
350             AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "  stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace));
351         }
352     }
353 
354     aws_priority_queue_clean_up(&allocs);
355 
356     if (tracer->level == AWS_MEMTRACE_STACKS) {
357         size_t num_stacks = aws_hash_table_get_entry_count(&stack_info);
358         /* sort stacks by total size leaked */
359         struct aws_priority_queue stacks_by_size;
360         AWS_FATAL_ASSERT(
361             AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
362                                   &stacks_by_size,
363                                   aws_default_allocator(),
364                                   num_stacks,
365                                   sizeof(struct stack_metadata *),
366                                   s_stack_info_compare_size));
367         aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
368         AWS_LOGF_TRACE(
369             AWS_LS_COMMON_MEMTRACE,
370             "################################################################################\n");
371         AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n");
372         AWS_LOGF_TRACE(
373             AWS_LS_COMMON_MEMTRACE,
374             "################################################################################\n");
375         while (aws_priority_queue_size(&stacks_by_size) > 0) {
376             struct stack_metadata *stack = NULL;
377             aws_priority_queue_pop(&stacks_by_size, &stack);
378             AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count);
379             AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
380         }
381         aws_priority_queue_clean_up(&stacks_by_size);
382 
383         /* sort stacks by number of leaks */
384         struct aws_priority_queue stacks_by_count;
385         AWS_FATAL_ASSERT(
386             AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
387                                   &stacks_by_count,
388                                   aws_default_allocator(),
389                                   num_stacks,
390                                   sizeof(struct stack_metadata *),
391                                   s_stack_info_compare_count));
392         AWS_LOGF_TRACE(
393             AWS_LS_COMMON_MEMTRACE,
394             "################################################################################\n");
395         AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n");
396         AWS_LOGF_TRACE(
397             AWS_LS_COMMON_MEMTRACE,
398             "################################################################################\n");
399         aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
400         while (aws_priority_queue_size(&stacks_by_count) > 0) {
401             struct stack_metadata *stack = NULL;
402             aws_priority_queue_pop(&stacks_by_count, &stack);
403             AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size);
404             AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
405         }
406         aws_priority_queue_clean_up(&stacks_by_count);
407         aws_hash_table_clean_up(&stack_info);
408     }
409 
410     AWS_LOGF_TRACE(
411         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
412     AWS_LOGF_TRACE(
413         AWS_LS_COMMON_MEMTRACE, "#  END MEMTRACE DUMP                                                           #\n");
414     AWS_LOGF_TRACE(
415         AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
416 
417     aws_mutex_unlock(&tracer->mutex);
418 }
419 
s_trace_mem_acquire(struct aws_allocator * allocator,size_t size)420 static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) {
421     struct alloc_tracer *tracer = allocator->impl;
422     void *ptr = aws_mem_acquire(tracer->traced_allocator, size);
423     if (ptr) {
424         s_alloc_tracer_track(tracer, ptr, size);
425     }
426     return ptr;
427 }
428 
s_trace_mem_release(struct aws_allocator * allocator,void * ptr)429 static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) {
430     struct alloc_tracer *tracer = allocator->impl;
431     s_alloc_tracer_untrack(tracer, ptr);
432     aws_mem_release(tracer->traced_allocator, ptr);
433 }
434 
s_trace_mem_realloc(struct aws_allocator * allocator,void * old_ptr,size_t old_size,size_t new_size)435 static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
436     struct alloc_tracer *tracer = allocator->impl;
437     void *new_ptr = old_ptr;
438     if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) {
439         return NULL;
440     }
441 
442     s_alloc_tracer_untrack(tracer, old_ptr);
443     s_alloc_tracer_track(tracer, new_ptr, new_size);
444 
445     return new_ptr;
446 }
447 
s_trace_mem_calloc(struct aws_allocator * allocator,size_t num,size_t size)448 static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
449     struct alloc_tracer *tracer = allocator->impl;
450     void *ptr = aws_mem_calloc(tracer->traced_allocator, num, size);
451     if (ptr) {
452         s_alloc_tracer_track(tracer, ptr, num * size);
453     }
454     return ptr;
455 }
456 
aws_mem_tracer_new(struct aws_allocator * allocator,struct aws_allocator * deprecated,enum aws_mem_trace_level level,size_t frames_per_stack)457 struct aws_allocator *aws_mem_tracer_new(
458     struct aws_allocator *allocator,
459     struct aws_allocator *deprecated,
460     enum aws_mem_trace_level level,
461     size_t frames_per_stack) {
462 
463     /* deprecated customizeable bookkeeping allocator */
464     (void)deprecated;
465 
466     struct alloc_tracer *tracer = NULL;
467     struct aws_allocator *trace_allocator = NULL;
468     aws_mem_acquire_many(
469         aws_default_allocator(),
470         2,
471         &tracer,
472         sizeof(struct alloc_tracer),
473         &trace_allocator,
474         sizeof(struct aws_allocator));
475 
476     AWS_FATAL_ASSERT(trace_allocator);
477     AWS_FATAL_ASSERT(tracer);
478 
479     AWS_ZERO_STRUCT(*trace_allocator);
480     AWS_ZERO_STRUCT(*tracer);
481 
482     /* copy the template vtable s*/
483     *trace_allocator = s_trace_allocator;
484     trace_allocator->impl = tracer;
485 
486     s_alloc_tracer_init(tracer, allocator, level, frames_per_stack);
487     return trace_allocator;
488 }
489 
aws_mem_tracer_destroy(struct aws_allocator * trace_allocator)490 struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) {
491     struct alloc_tracer *tracer = trace_allocator->impl;
492     struct aws_allocator *allocator = tracer->traced_allocator;
493 
494     if (tracer->level != AWS_MEMTRACE_NONE) {
495         aws_mutex_lock(&tracer->mutex);
496         aws_hash_table_clean_up(&tracer->allocs);
497         aws_hash_table_clean_up(&tracer->stacks);
498         aws_mutex_unlock(&tracer->mutex);
499         aws_mutex_clean_up(&tracer->mutex);
500     }
501 
502     aws_mem_release(aws_default_allocator(), tracer);
503     /* trace_allocator is freed as part of the block tracer was allocated in */
504 
505     return allocator;
506 }
507 
aws_mem_tracer_bytes(struct aws_allocator * trace_allocator)508 size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) {
509     struct alloc_tracer *tracer = trace_allocator->impl;
510     if (tracer->level == AWS_MEMTRACE_NONE) {
511         return 0;
512     }
513 
514     return aws_atomic_load_int(&tracer->allocated);
515 }
516 
aws_mem_tracer_count(struct aws_allocator * trace_allocator)517 size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) {
518     struct alloc_tracer *tracer = trace_allocator->impl;
519     if (tracer->level == AWS_MEMTRACE_NONE) {
520         return 0;
521     }
522 
523     aws_mutex_lock(&tracer->mutex);
524     size_t count = aws_hash_table_get_entry_count(&tracer->allocs);
525     aws_mutex_unlock(&tracer->mutex);
526     return count;
527 }
528