1 /**
2  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3  * SPDX-License-Identifier: Apache-2.0.
4  */
5 
6 #include <aws/testing/aws_test_harness.h>
7 
8 #include <aws/common/allocator.h>
9 #include <aws/common/device_random.h>
10 
11 #include "logging/test_logger.h"
12 
13 #define NUM_ALLOCS 100
s_test_memtrace_count(struct aws_allocator * allocator,void * ctx)14 static int s_test_memtrace_count(struct aws_allocator *allocator, void *ctx) {
15     (void)ctx;
16 
17     struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0);
18 
19     void *allocs[NUM_ALLOCS] = {0};
20     size_t sizes[NUM_ALLOCS] = {0};
21     size_t total = 0;
22 
23     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
24         uint32_t size = 0;
25         aws_device_random_u32(&size);
26         size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */
27         allocs[idx] = aws_mem_acquire(tracer, size);
28         sizes[idx] = size;
29         total += size;
30     }
31 
32     ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer));
33     ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer));
34 
35     size_t freed = 0;
36     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
37         uint32_t roll = 0;
38         aws_device_random_u32(&roll);
39         if (roll % 3 == 0) {
40             aws_mem_release(tracer, allocs[idx]);
41             allocs[idx] = NULL;
42             total -= sizes[idx];
43             ++freed;
44         }
45     }
46 
47     ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer));
48     ASSERT_UINT_EQUALS(NUM_ALLOCS - freed, aws_mem_tracer_count(tracer));
49 
50     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
51         if (allocs[idx]) {
52             aws_mem_release(tracer, allocs[idx]);
53         }
54     }
55 
56     ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer));
57     ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer));
58 
59     struct aws_allocator *original = aws_mem_tracer_destroy(tracer);
60     ASSERT_PTR_EQUALS(allocator, original);
61 
62     return 0;
63 }
AWS_TEST_CASE(test_memtrace_count,s_test_memtrace_count)64 AWS_TEST_CASE(test_memtrace_count, s_test_memtrace_count)
65 
66 #if defined(__GNUC__) || defined(__clang__)
67 #    define AWS_PREVENT_OPTIMIZATION __asm__ __volatile__("" ::: "memory")
68 #else
69 #    define AWS_PREVENT_OPTIMIZATION
70 #endif
71 
72 AWS_NO_INLINE void *s_alloc_1(struct aws_allocator *allocator, size_t size) {
73     AWS_PREVENT_OPTIMIZATION;
74     return aws_mem_acquire(allocator, size);
75 }
76 
s_alloc_2(struct aws_allocator * allocator,size_t size)77 AWS_NO_INLINE void *s_alloc_2(struct aws_allocator *allocator, size_t size) {
78     AWS_PREVENT_OPTIMIZATION;
79     return aws_mem_acquire(allocator, size);
80 }
81 
s_alloc_3(struct aws_allocator * allocator,size_t size)82 AWS_NO_INLINE void *s_alloc_3(struct aws_allocator *allocator, size_t size) {
83     AWS_PREVENT_OPTIMIZATION;
84     return aws_mem_acquire(allocator, size);
85 }
86 
s_alloc_4(struct aws_allocator * allocator,size_t size)87 AWS_NO_INLINE void *s_alloc_4(struct aws_allocator *allocator, size_t size) {
88     AWS_PREVENT_OPTIMIZATION;
89     return aws_mem_acquire(allocator, size);
90 }
91 
92 static struct aws_logger s_test_logger;
93 
s_test_memtrace_stacks(struct aws_allocator * allocator,void * ctx)94 static int s_test_memtrace_stacks(struct aws_allocator *allocator, void *ctx) {
95     (void)ctx;
96 
97     /* only bother to run this test if the platform can do a backtrace */
98     void *probe_stack[1];
99     if (!aws_backtrace(probe_stack, 1)) {
100         return 0;
101     }
102 
103     test_logger_init(&s_test_logger, allocator, AWS_LL_TRACE, 0);
104     aws_logger_set(&s_test_logger);
105 
106     struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_STACKS, 8);
107 
108     void *allocs[NUM_ALLOCS] = {0};
109     size_t total = 0;
110 
111     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
112         uint32_t size = 0;
113         aws_device_random_u32(&size);
114         size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */
115 
116         void *(*allocate)(struct aws_allocator *, size_t) = NULL;
117         switch (idx % 4) {
118             case 0:
119                 allocate = s_alloc_1;
120                 break;
121             case 1:
122                 allocate = s_alloc_2;
123                 break;
124             case 2:
125                 allocate = s_alloc_3;
126                 break;
127             case 3:
128                 allocate = s_alloc_4;
129                 break;
130         }
131 
132         allocs[idx] = allocate(tracer, size);
133         total += size;
134     }
135 
136     ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer));
137     ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer));
138     aws_mem_tracer_dump(tracer);
139 
140     /* make sure all of the functions that allocated are found */
141     struct test_logger_impl *test_logger = s_test_logger.p_impl;
142     /* if this is not a debug build, there may not be symbols, so the test cannot
143      * verify if a best effort was made */
144 #if defined(DEBUG_BUILD)
145     /* fprintf(stderr, "%s\n", test_logger->log_buffer.buffer); */
146     char s_alloc_1_addr[32];
147     char s_alloc_2_addr[32];
148     char s_alloc_3_addr[32];
149     char s_alloc_4_addr[32];
150     snprintf(s_alloc_1_addr, AWS_ARRAY_SIZE(s_alloc_1_addr), "0x%tx", (uintptr_t)(void *)s_alloc_1);
151     snprintf(s_alloc_2_addr, AWS_ARRAY_SIZE(s_alloc_2_addr), "0x%tx", (uintptr_t)(void *)s_alloc_2);
152     snprintf(s_alloc_3_addr, AWS_ARRAY_SIZE(s_alloc_3_addr), "0x%tx", (uintptr_t)(void *)s_alloc_3);
153     snprintf(s_alloc_4_addr, AWS_ARRAY_SIZE(s_alloc_4_addr), "0x%tx", (uintptr_t)(void *)s_alloc_4);
154     const char *log_buffer = (const char *)test_logger->log_buffer.buffer;
155     ASSERT_TRUE(strstr(log_buffer, "s_alloc_1") || strstr(log_buffer, s_alloc_1_addr));
156     ASSERT_TRUE(strstr(log_buffer, "s_alloc_2") || strstr(log_buffer, s_alloc_2_addr));
157     ASSERT_TRUE(strstr(log_buffer, "s_alloc_3") || strstr(log_buffer, s_alloc_3_addr));
158     ASSERT_TRUE(strstr(log_buffer, "s_alloc_4") || strstr(log_buffer, s_alloc_4_addr));
159 #endif
160 
161     /* reset log */
162     aws_byte_buf_reset(&test_logger->log_buffer, true);
163 
164     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
165         if (allocs[idx]) {
166             aws_mem_release(tracer, allocs[idx]);
167         }
168     }
169 
170     ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer));
171     ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer));
172     aws_mem_tracer_dump(tracer);
173 
174     /* Make sure no known allocs are left */
175     ASSERT_UINT_EQUALS(0, test_logger->log_buffer.len);
176 
177     struct aws_allocator *original = aws_mem_tracer_destroy(tracer);
178     ASSERT_PTR_EQUALS(allocator, original);
179 
180     aws_logger_clean_up(&s_test_logger);
181 
182     return 0;
183 }
AWS_TEST_CASE(test_memtrace_stacks,s_test_memtrace_stacks)184 AWS_TEST_CASE(test_memtrace_stacks, s_test_memtrace_stacks)
185 
186 static int s_test_memtrace_none(struct aws_allocator *allocator, void *ctx) {
187     (void)ctx;
188     struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_NONE, 0);
189 
190     void *allocs[NUM_ALLOCS] = {0};
191     size_t total = 0;
192 
193     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
194         uint32_t size = 0;
195         aws_device_random_u32(&size);
196         size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */
197         allocs[idx] = aws_mem_acquire(tracer, size);
198         total += size;
199     }
200 
201     ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer));
202 
203     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
204         if (allocs[idx]) {
205             aws_mem_release(tracer, allocs[idx]);
206         }
207     }
208 
209     ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer));
210 
211     struct aws_allocator *original = aws_mem_tracer_destroy(tracer);
212     ASSERT_PTR_EQUALS(allocator, original);
213 
214     return 0;
215 }
AWS_TEST_CASE(test_memtrace_none,s_test_memtrace_none)216 AWS_TEST_CASE(test_memtrace_none, s_test_memtrace_none)
217 
218 static int s_test_memtrace_midstream(struct aws_allocator *allocator, void *ctx) {
219     (void)ctx;
220 
221     void *allocs[NUM_ALLOCS] = {0};
222 
223     /* allocate some from the base allocator first */
224     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs) / 4; ++idx) {
225         uint32_t size = 0;
226         aws_device_random_u32(&size);
227         size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */
228         allocs[idx] = aws_mem_acquire(allocator, size);
229     }
230 
231     struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0);
232 
233     /* Now allocate from the tracer, and make sure everything still works */
234     size_t total = 0;
235     size_t tracked_allocs = 0;
236     for (size_t idx = AWS_ARRAY_SIZE(allocs) / 4 + 1; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
237         uint32_t size = 0;
238         aws_device_random_u32(&size);
239         size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */
240         allocs[idx] = aws_mem_acquire(tracer, size);
241         total += size;
242         ++tracked_allocs;
243     }
244 
245     ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer));
246     ASSERT_UINT_EQUALS(tracked_allocs, aws_mem_tracer_count(tracer));
247 
248     for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) {
249         if (allocs[idx]) {
250             aws_mem_release(tracer, allocs[idx]);
251         }
252     }
253 
254     ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer));
255     ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer));
256 
257     struct aws_allocator *original = aws_mem_tracer_destroy(tracer);
258     ASSERT_PTR_EQUALS(allocator, original);
259 
260     return 0;
261 }
262 AWS_TEST_CASE(test_memtrace_midstream, s_test_memtrace_midstream)
263