1 /*
2  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License").
5  * You may not use this file except in compliance with the License.
6  * A copy of the License is located at
7  *
8  *  http://aws.amazon.com/apache2.0
9  *
10  * or in the "license" file accompanying this file. This file is distributed
11  * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12  * express or implied. See the License for the specific language governing
13  * permissions and limitations under the License.
14  */
15 
16 #define  _DEFAULT_SOURCE 1
17 #if !defined(__APPLE__) && !defined(__FreeBSD__)
18 #include <features.h>
19 #endif
20 
21 #include <stdint.h>
22 #include <unistd.h>
23 #include <stdlib.h>
24 #include <sys/mman.h>
25 
26 #include "error/s2n_errno.h"
27 
28 #include "utils/s2n_blob.h"
29 #include "utils/s2n_mem.h"
30 #include "utils/s2n_safety.h"
31 
32 static uint32_t page_size = 4096;
33 static bool initialized = false;
34 
35 static int s2n_mem_init_impl(void);
36 static int s2n_mem_cleanup_impl(void);
37 static int s2n_mem_free_no_mlock_impl(void *ptr, uint32_t size);
38 static int s2n_mem_free_mlock_impl(void *ptr, uint32_t size);
39 static int s2n_mem_malloc_no_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated);
40 static int s2n_mem_malloc_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated);
41 
42 static s2n_mem_init_callback s2n_mem_init_cb = s2n_mem_init_impl;
43 static s2n_mem_cleanup_callback s2n_mem_cleanup_cb = s2n_mem_cleanup_impl;
44 static s2n_mem_malloc_callback s2n_mem_malloc_cb = s2n_mem_malloc_mlock_impl;
45 static s2n_mem_free_callback s2n_mem_free_cb = s2n_mem_free_mlock_impl;
46 
s2n_mem_init_impl(void)47 static int s2n_mem_init_impl(void)
48 {
49     long sysconf_rc = sysconf(_SC_PAGESIZE);
50 
51     /* sysconf must not error, and page_size cannot be 0 */
52     POSIX_ENSURE(sysconf_rc > 0, S2N_FAILURE);
53 
54     /* page_size must be a valid uint32 */
55     POSIX_ENSURE(sysconf_rc <= UINT32_MAX, S2N_FAILURE);
56 
57     page_size = (uint32_t) sysconf_rc;
58 
59     if (getenv("S2N_DONT_MLOCK") || s2n_in_unit_test()) {
60         s2n_mem_malloc_cb = s2n_mem_malloc_no_mlock_impl;
61         s2n_mem_free_cb = s2n_mem_free_no_mlock_impl;
62     }
63     return S2N_SUCCESS;
64 }
65 
s2n_mem_cleanup_impl(void)66 static int s2n_mem_cleanup_impl(void)
67 {
68     page_size = 4096;
69     s2n_mem_malloc_cb = s2n_mem_malloc_no_mlock_impl;
70     s2n_mem_free_cb = s2n_mem_free_no_mlock_impl;
71     return S2N_SUCCESS;
72 }
73 
s2n_mem_free_mlock_impl(void * ptr,uint32_t size)74 static int s2n_mem_free_mlock_impl(void *ptr, uint32_t size)
75 {
76     /* Perform a best-effort `munlock`: ignore any errors during unlocking. */
77     munlock(ptr, size);
78     free(ptr);
79     return S2N_SUCCESS;
80 }
81 
s2n_mem_free_no_mlock_impl(void * ptr,uint32_t size)82 static int s2n_mem_free_no_mlock_impl(void *ptr, uint32_t size)
83 {
84     free(ptr);
85 
86     return S2N_SUCCESS;
87 }
88 
s2n_mem_malloc_mlock_impl(void ** ptr,uint32_t requested,uint32_t * allocated)89 static int s2n_mem_malloc_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated)
90 {
91     POSIX_ENSURE_REF(ptr);
92 
93     /* Page aligned allocation required for mlock */
94     uint32_t allocate;
95 
96     POSIX_GUARD(s2n_align_to(requested, page_size, &allocate));
97 
98     *ptr = NULL;
99     POSIX_ENSURE(posix_memalign(ptr, page_size, allocate) == 0, S2N_ERR_ALLOC);
100     *allocated = allocate;
101 
102 /*
103 ** We disable MAD_DONTDUMP when fuzz-testing or using the address sanitizer because
104 ** both need to be able to dump pages to function. It's how they map heap output.
105 */
106 #if defined(MADV_DONTDUMP) && !defined(S2N_ADDRESS_SANITIZER) && !defined(S2N_FUZZ_TESTING)
107     if (madvise(*ptr, *allocated, MADV_DONTDUMP) != 0) {
108         POSIX_GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
109         POSIX_BAIL(S2N_ERR_MADVISE);
110     }
111 #endif
112 
113     if (mlock(*ptr, *allocated) != 0) {
114         /* When mlock fails, no memory will be locked, so we don't use munlock on free */
115         POSIX_GUARD(s2n_mem_free_no_mlock_impl(*ptr, *allocated));
116         POSIX_BAIL(S2N_ERR_MLOCK);
117     }
118 
119     POSIX_ENSURE(*ptr != NULL, S2N_ERR_ALLOC);
120 
121     return S2N_SUCCESS;
122 }
123 
s2n_mem_malloc_no_mlock_impl(void ** ptr,uint32_t requested,uint32_t * allocated)124 static int s2n_mem_malloc_no_mlock_impl(void **ptr, uint32_t requested, uint32_t *allocated)
125 {
126     *ptr = malloc(requested);
127     POSIX_ENSURE(*ptr != NULL, S2N_ERR_ALLOC);
128     *allocated = requested;
129 
130     return S2N_SUCCESS;
131 }
132 
s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback,s2n_mem_cleanup_callback mem_cleanup_callback,s2n_mem_malloc_callback mem_malloc_callback,s2n_mem_free_callback mem_free_callback)133 int s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback, s2n_mem_cleanup_callback mem_cleanup_callback,
134                           s2n_mem_malloc_callback mem_malloc_callback, s2n_mem_free_callback mem_free_callback)
135 {
136     POSIX_ENSURE(!initialized, S2N_ERR_INITIALIZED);
137 
138     POSIX_ENSURE_REF(mem_init_callback);
139     POSIX_ENSURE_REF(mem_cleanup_callback);
140     POSIX_ENSURE_REF(mem_malloc_callback);
141     POSIX_ENSURE_REF(mem_free_callback);
142 
143     s2n_mem_init_cb = mem_init_callback;
144     s2n_mem_cleanup_cb = mem_cleanup_callback;
145     s2n_mem_malloc_cb = mem_malloc_callback;
146     s2n_mem_free_cb = mem_free_callback;
147 
148     return S2N_SUCCESS;
149 }
150 
s2n_alloc(struct s2n_blob * b,uint32_t size)151 int s2n_alloc(struct s2n_blob *b, uint32_t size)
152 {
153     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
154     POSIX_ENSURE_REF(b);
155     const struct s2n_blob temp = {0};
156     *b = temp;
157     POSIX_GUARD(s2n_realloc(b, size));
158     return S2N_SUCCESS;
159 }
160 
161 /* A blob is growable if it is either explicitly marked as such, or if it contains no data */
s2n_blob_is_growable(const struct s2n_blob * b)162 bool s2n_blob_is_growable(const struct s2n_blob* b)
163 {
164     return b && (b->growable || (b->data == NULL && b->size == 0 && b->allocated == 0));
165 }
166 
167 /* Tries to realloc the requested bytes.
168  * If successful, updates *b.
169  * If failed, *b remains unchanged
170  */
s2n_realloc(struct s2n_blob * b,uint32_t size)171 int s2n_realloc(struct s2n_blob *b, uint32_t size)
172 {
173     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
174     POSIX_ENSURE_REF(b);
175     POSIX_ENSURE(s2n_blob_is_growable(b), S2N_ERR_RESIZE_STATIC_BLOB);
176     if (size == 0) {
177         return s2n_free(b);
178     }
179 
180     /* blob already has space for the request */
181     if (size <= b->allocated) {
182 
183         if (size < b->size) {
184             /* Zero the existing blob memory before the we release it */
185             struct s2n_blob slice = {0};
186             POSIX_GUARD(s2n_blob_slice(b, &slice, size, b->size - size));
187             POSIX_GUARD(s2n_blob_zero(&slice));
188         }
189 
190         b->size = size;
191         return S2N_SUCCESS;
192     }
193 
194     struct s2n_blob new_memory = {.data = NULL, .size = size, .allocated = 0, .growable = 1};
195     if (s2n_mem_malloc_cb((void **) &new_memory.data, new_memory.size, &new_memory.allocated) != 0) {
196         S2N_ERROR_PRESERVE_ERRNO();
197     }
198 
199     POSIX_ENSURE(new_memory.allocated >= new_memory.size, S2N_ERR_ALLOC);
200     POSIX_ENSURE(new_memory.data != NULL, S2N_ERR_ALLOC);
201 
202     if (b->size) {
203         POSIX_CHECKED_MEMCPY(new_memory.data, b->data, b->size);
204     }
205 
206     if (b->allocated) {
207         POSIX_GUARD(s2n_free(b));
208     }
209 
210     *b = new_memory;
211     return S2N_SUCCESS;
212 }
213 
s2n_free_object(uint8_t ** p_data,uint32_t size)214 int s2n_free_object(uint8_t **p_data, uint32_t size)
215 {
216     POSIX_ENSURE_REF(p_data);
217 
218     if (*p_data == NULL) {
219         return S2N_SUCCESS;
220     }
221 
222     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
223     struct s2n_blob b = {.data = *p_data, .allocated = size, .size = size, .growable = 1};
224 
225     /* s2n_free() will call free() even if it returns error (for a growable blob).
226     ** This makes sure *p_data is not used after free() */
227     *p_data = NULL;
228 
229     return s2n_free(&b);
230 }
231 
s2n_dup(struct s2n_blob * from,struct s2n_blob * to)232 int s2n_dup(struct s2n_blob *from, struct s2n_blob *to)
233 {
234     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
235     POSIX_ENSURE_EQ(to->size, 0);
236     POSIX_ENSURE_EQ(to->data, NULL);
237     POSIX_ENSURE_NE(from->size, 0);
238     POSIX_ENSURE_NE(from->data, NULL);
239 
240     POSIX_GUARD(s2n_alloc(to, from->size));
241 
242     POSIX_CHECKED_MEMCPY(to->data, from->data, to->size);
243 
244     return S2N_SUCCESS;
245 }
246 
s2n_mem_init(void)247 int s2n_mem_init(void)
248 {
249     POSIX_GUARD(s2n_mem_init_cb());
250 
251     initialized = true;
252 
253     return S2N_SUCCESS;
254 }
255 
s2n_mem_is_init(void)256 bool s2n_mem_is_init(void)
257 {
258     return initialized;
259 }
260 
s2n_mem_get_page_size(void)261 uint32_t s2n_mem_get_page_size(void)
262 {
263     return page_size;
264 }
265 
s2n_mem_cleanup(void)266 int s2n_mem_cleanup(void)
267 {
268     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
269     POSIX_GUARD(s2n_mem_cleanup_cb());
270 
271     initialized = false;
272 
273     return S2N_SUCCESS;
274 }
275 
s2n_free(struct s2n_blob * b)276 int s2n_free(struct s2n_blob *b)
277 {
278     POSIX_PRECONDITION(s2n_blob_validate(b));
279 
280     /* To avoid memory leaks, don't exit the function until the memory
281        has been freed */
282     int zero_rc = s2n_blob_zero(b);
283 
284     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
285     POSIX_ENSURE(s2n_blob_is_growable(b), S2N_ERR_FREE_STATIC_BLOB);
286 
287     POSIX_GUARD(s2n_mem_free_cb(b->data, b->allocated));
288 
289     *b = (struct s2n_blob) {0};
290 
291     POSIX_GUARD(zero_rc);
292 
293     return S2N_SUCCESS;
294 }
295 
s2n_blob_zeroize_free(struct s2n_blob * b)296 int s2n_blob_zeroize_free(struct s2n_blob *b) {
297     POSIX_ENSURE(initialized, S2N_ERR_NOT_INITIALIZED);
298     POSIX_ENSURE_REF(b);
299 
300     POSIX_GUARD(s2n_blob_zero(b));
301     if (b->allocated) {
302         POSIX_GUARD(s2n_free(b));
303     }
304     return S2N_SUCCESS;
305 }
306