1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
3 #ident "$Id$"
4 /*======
5 This file is part of PerconaFT.
6 
7 
8 Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
9 
10     PerconaFT is free software: you can redistribute it and/or modify
11     it under the terms of the GNU General Public License, version 2,
12     as published by the Free Software Foundation.
13 
14     PerconaFT is distributed in the hope that it will be useful,
15     but WITHOUT ANY WARRANTY; without even the implied warranty of
16     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17     GNU General Public License for more details.
18 
19     You should have received a copy of the GNU General Public License
20     along with PerconaFT.  If not, see <http://www.gnu.org/licenses/>.
21 
22 ----------------------------------------
23 
24     PerconaFT is free software: you can redistribute it and/or modify
25     it under the terms of the GNU Affero General Public License, version 3,
26     as published by the Free Software Foundation.
27 
28     PerconaFT is distributed in the hope that it will be useful,
29     but WITHOUT ANY WARRANTY; without even the implied warranty of
30     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
31     GNU Affero General Public License for more details.
32 
33     You should have received a copy of the GNU Affero General Public License
34     along with PerconaFT.  If not, see <http://www.gnu.org/licenses/>.
35 
36 ----------------------------------------
37 
38    Licensed under the Apache License, Version 2.0 (the "License");
39    you may not use this file except in compliance with the License.
40    You may obtain a copy of the License at
41 
42        http://www.apache.org/licenses/LICENSE-2.0
43 
44    Unless required by applicable law or agreed to in writing, software
45    distributed under the License is distributed on an "AS IS" BASIS,
46    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
47    See the License for the specific language governing permissions and
48    limitations under the License.
49 ======= */
50 
51 #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
52 
53 #include <portability/toku_config.h>
54 
55 #include <toku_portability.h>
56 #include <string.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #if defined(HAVE_MALLOC_H)
60 # include <malloc.h>
61 #elif defined(HAVE_SYS_MALLOC_H)
62 # include <sys/malloc.h>
63 #endif
64 #include <dlfcn.h>
65 #include <toku_race_tools.h>
66 #include "memory.h"
67 #include "toku_assert.h"
68 #include <portability/toku_atomic.h>
69 
70 static malloc_fun_t  t_malloc  = 0;
71 static malloc_aligned_fun_t t_malloc_aligned = 0;
72 static malloc_fun_t  t_xmalloc = 0;
73 static malloc_aligned_fun_t t_xmalloc_aligned = 0;
74 static free_fun_t    t_free    = 0;
75 static realloc_fun_t t_realloc = 0;
76 static realloc_aligned_fun_t t_realloc_aligned = 0;
77 static realloc_fun_t t_xrealloc = 0;
78 
79 static LOCAL_MEMORY_STATUS_S status;
80 int toku_memory_do_stats = 0;
81 
82 static bool memory_startup_complete = false;
83 
84 int
toku_memory_startup(void)85 toku_memory_startup(void) {
86     if (memory_startup_complete) {
87         return 0;
88     }
89     memory_startup_complete = true;
90 
91     int result = 0;
92 
93     // The ASAN doesn't support mallopt, it simply returns 0 or -1
94     // depending on GCC version.
95 #if defined(HAVE_M_MMAP_THRESHOLD) && !defined(UNDER_ASAN)
96     // initialize libc malloc
97     size_t mmap_threshold = 64 * 1024; // 64K and larger should be malloced with mmap().
98     int success = mallopt(M_MMAP_THRESHOLD, mmap_threshold);
99     if (success) {
100         status.mallocator_version = "libc";
101         status.mmap_threshold = mmap_threshold;
102     } else {
103         result = EINVAL;
104     }
105     assert(result == 0);
106 #else
107     // just a guess
108     status.mallocator_version = "darwin";
109     status.mmap_threshold = 16 * 1024;
110 #endif
111 
112     // jemalloc has a mallctl function, while libc malloc does not.  we can check if jemalloc
113     // is loaded by checking if the mallctl function can be found.  if it can, we call it
114     // to get version and mmap threshold configuration.
115     typedef int (*mallctl_fun_t)(const char *, void *, size_t *, void *, size_t);
116     mallctl_fun_t mallctl_f;
117     mallctl_f = (mallctl_fun_t) dlsym(RTLD_DEFAULT, "mallctl");
118     if (mallctl_f) { // jemalloc is loaded
119         size_t version_length = sizeof status.mallocator_version;
120         result = mallctl_f("version", &status.mallocator_version, &version_length, NULL, 0);
121         assert(result == 0);
122         if (result == 0) {
123             size_t lg_chunk; // log2 of the mmap threshold
124             size_t lg_chunk_length = sizeof lg_chunk;
125             result  = mallctl_f("opt.lg_chunk", &lg_chunk, &lg_chunk_length, NULL, 0);
126             if (result == 0) {
127                 status.mmap_threshold = 1 << lg_chunk;
128             } else {
129                 status.mmap_threshold = 1 << 22;
130                 result = 0;
131             }
132         }
133     }
134 
135     return result;
136 }
137 
138 static bool memory_shutdown_complete;
139 
140 void
toku_memory_shutdown(void)141 toku_memory_shutdown(void) {
142     if (memory_shutdown_complete) {
143         return;
144     }
145     memory_shutdown_complete = true;
146 }
147 
148 void
toku_memory_get_status(LOCAL_MEMORY_STATUS s)149 toku_memory_get_status(LOCAL_MEMORY_STATUS s) {
150     *s = status;
151 }
152 
153 // jemalloc's malloc_usable_size does not work with a NULL pointer, so we implement a version that works
154 static size_t
my_malloc_usable_size(void * p)155 my_malloc_usable_size(void *p) {
156     return p == NULL ? 0 : os_malloc_usable_size(p);
157 }
158 
159 // Note that max_in_use may be slightly off because use of max_in_use is not thread-safe.
160 // It is not worth the overhead to make it completely accurate, but
161 // this logic is intended to guarantee that it increases monotonically.
162 // Note that status.sum_used and status.sum_freed increase monotonically
163 // and that status.max_in_use is declared volatile.
164 static inline void
set_max(uint64_t sum_used,uint64_t sum_freed)165 set_max(uint64_t sum_used, uint64_t sum_freed) {
166     if (sum_used >= sum_freed) {
167         uint64_t in_use = sum_used - sum_freed;
168         uint64_t old_max;
169         do {
170             old_max = status.max_in_use;
171         } while (old_max < in_use &&
172                  !toku_sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use));
173     }
174 }
175 
176 // Effect: Like toku_memory_footprint, except instead of passing p,
177 //   we pass toku_malloc_usable_size(p).
178 size_t
toku_memory_footprint_given_usable_size(size_t touched,size_t usable)179 toku_memory_footprint_given_usable_size(size_t touched, size_t usable)
180 {
181     size_t pagesize = toku_os_get_pagesize();
182     if (usable >= status.mmap_threshold) {
183         int num_pages = (touched + pagesize) / pagesize;
184         return num_pages * pagesize;
185     }
186     return usable;
187 }
188 
189 // Effect: Return an estimate how how much space an object is using, possibly by
190 //   using toku_malloc_usable_size(p).
191 //   If p is NULL then returns 0.
192 size_t
toku_memory_footprint(void * p,size_t touched)193 toku_memory_footprint(void * p, size_t touched)
194 {
195     if (!p) return 0;
196     return toku_memory_footprint_given_usable_size(touched,
197                                                    my_malloc_usable_size(p));
198 }
199 
200 void *
toku_malloc(size_t size)201 toku_malloc(size_t size) {
202 #if defined(__APPLE__)
203     if (size == 0) {
204         return nullptr;
205     }
206 #endif
207 
208     if (size > status.max_requested_size) {
209         status.max_requested_size = size;
210     }
211     void *p = t_malloc ? t_malloc(size) : os_malloc(size);
212     if (p) {
213         TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
214         if (toku_memory_do_stats) {
215             size_t used = my_malloc_usable_size(p);
216             toku_sync_add_and_fetch(&status.malloc_count, 1);
217             toku_sync_add_and_fetch(&status.requested,size);
218             toku_sync_add_and_fetch(&status.used, used);
219             set_max(status.used, status.freed);
220         }
221     } else {
222         toku_sync_add_and_fetch(&status.malloc_fail, 1);
223         status.last_failed_size = size;
224     }
225   return p;
226 }
227 
toku_malloc_aligned(size_t alignment,size_t size)228 void *toku_malloc_aligned(size_t alignment, size_t size) {
229 #if defined(__APPLE__)
230     if (size == 0) {
231         return nullptr;
232     }
233 #endif
234 
235     if (size > status.max_requested_size) {
236         status.max_requested_size = size;
237     }
238     void *p = t_malloc_aligned ? t_malloc_aligned(alignment, size) : os_malloc_aligned(alignment, size);
239     if (p) {
240         TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
241         if (toku_memory_do_stats) {
242             size_t used = my_malloc_usable_size(p);
243             toku_sync_add_and_fetch(&status.malloc_count, 1);
244             toku_sync_add_and_fetch(&status.requested,size);
245             toku_sync_add_and_fetch(&status.used, used);
246             set_max(status.used, status.freed);
247         }
248     } else {
249         toku_sync_add_and_fetch(&status.malloc_fail, 1);
250         status.last_failed_size = size;
251     }
252   return p;
253 }
254 
255 void *
toku_calloc(size_t nmemb,size_t size)256 toku_calloc(size_t nmemb, size_t size) {
257     size_t newsize = nmemb * size;
258     void *p = toku_malloc(newsize);
259     if (p) memset(p, 0, newsize);
260     return p;
261 }
262 
263 void *
toku_realloc(void * p,size_t size)264 toku_realloc(void *p, size_t size) {
265 #if defined(__APPLE__)
266     if (size == 0) {
267         if (p != nullptr) {
268             toku_free(p);
269         }
270         return nullptr;
271     }
272 #endif
273 
274     if (size > status.max_requested_size) {
275         status.max_requested_size = size;
276     }
277     size_t used_orig = p ? my_malloc_usable_size(p) : 0;
278     void *q = t_realloc ? t_realloc(p, size) : os_realloc(p, size);
279     if (q) {
280         if (toku_memory_do_stats) {
281             size_t used = my_malloc_usable_size(q);
282             toku_sync_add_and_fetch(&status.realloc_count, 1);
283             toku_sync_add_and_fetch(&status.requested, size);
284             toku_sync_add_and_fetch(&status.used, used);
285             toku_sync_add_and_fetch(&status.freed, used_orig);
286             set_max(status.used, status.freed);
287         }
288     } else {
289         toku_sync_add_and_fetch(&status.realloc_fail, 1);
290         status.last_failed_size = size;
291     }
292     return q;
293 }
294 
toku_realloc_aligned(size_t alignment,void * p,size_t size)295 void *toku_realloc_aligned(size_t alignment, void *p, size_t size) {
296 #if defined(__APPLE__)
297     if (size == 0) {
298         if (p != nullptr) {
299             toku_free(p);
300         }
301         return nullptr;
302     }
303 #endif
304 
305     if (size > status.max_requested_size) {
306         status.max_requested_size = size;
307     }
308     size_t used_orig = p ? my_malloc_usable_size(p) : 0;
309     void *q = t_realloc_aligned ? t_realloc_aligned(alignment, p, size) : os_realloc_aligned(alignment, p, size);
310     if (q) {
311         if (toku_memory_do_stats) {
312             size_t used = my_malloc_usable_size(q);
313             toku_sync_add_and_fetch(&status.realloc_count, 1);
314             toku_sync_add_and_fetch(&status.requested, size);
315             toku_sync_add_and_fetch(&status.used, used);
316             toku_sync_add_and_fetch(&status.freed, used_orig);
317             set_max(status.used, status.freed);
318         }
319     } else {
320         toku_sync_add_and_fetch(&status.realloc_fail, 1);
321         status.last_failed_size = size;
322     }
323     return q;
324 }
325 
326 
327 void *
toku_memdup(const void * v,size_t len)328 toku_memdup(const void *v, size_t len) {
329     void *p = toku_malloc(len);
330     if (p) memcpy(p, v,len);
331     return p;
332 }
333 
334 char *
toku_strdup(const char * s)335 toku_strdup(const char *s) {
336     return (char *) toku_memdup(s, strlen(s)+1);
337 }
338 
toku_strndup(const char * s,size_t n)339 char *toku_strndup(const char *s, size_t n) {
340     size_t s_size = strlen(s);
341     size_t bytes_to_copy = n > s_size ? s_size : n;
342     ++bytes_to_copy;
343     char *result = (char *)toku_memdup(s, bytes_to_copy);
344     result[bytes_to_copy - 1] = 0;
345     return result;
346 }
347 
348 void
toku_free(void * p)349 toku_free(void *p) {
350     if (p) {
351         if (toku_memory_do_stats) {
352             size_t used = my_malloc_usable_size(p);
353             toku_sync_add_and_fetch(&status.free_count, 1);
354             toku_sync_add_and_fetch(&status.freed, used);
355         }
356         if (t_free)
357             t_free(p);
358         else
359             os_free(p);
360     }
361 }
362 
363 void *
toku_xmalloc(size_t size)364 toku_xmalloc(size_t size) {
365 #if defined(__APPLE__)
366     if (size == 0) {
367         return nullptr;
368     }
369 #endif
370 
371     if (size > status.max_requested_size) {
372         status.max_requested_size = size;
373     }
374     void *p = t_xmalloc ? t_xmalloc(size) : os_malloc(size);
375     if (p == NULL) {  // avoid function call in common case
376         status.last_failed_size = size;
377         resource_assert(p);
378     }
379     TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
380     if (toku_memory_do_stats) {
381         size_t used = my_malloc_usable_size(p);
382         toku_sync_add_and_fetch(&status.malloc_count, 1);
383         toku_sync_add_and_fetch(&status.requested, size);
384         toku_sync_add_and_fetch(&status.used, used);
385         set_max(status.used, status.freed);
386     }
387     return p;
388 }
389 
toku_xmalloc_aligned(size_t alignment,size_t size)390 void* toku_xmalloc_aligned(size_t alignment, size_t size)
391 // Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
392 //  Fail with a resource_assert if the allocation fails (don't return an error code).
393 // Requires: alignment is a power of two.
394 {
395 #if defined(__APPLE__)
396     if (size == 0) {
397         return nullptr;
398     }
399 #endif
400 
401     if (size > status.max_requested_size) {
402         status.max_requested_size = size;
403     }
404     void *p = t_xmalloc_aligned ? t_xmalloc_aligned(alignment, size) : os_malloc_aligned(alignment,size);
405     if (p == NULL && size != 0) {
406         status.last_failed_size = size;
407         resource_assert(p);
408     }
409     if (toku_memory_do_stats) {
410         size_t used = my_malloc_usable_size(p);
411         toku_sync_add_and_fetch(&status.malloc_count, 1);
412         toku_sync_add_and_fetch(&status.requested, size);
413         toku_sync_add_and_fetch(&status.used, used);
414         set_max(status.used, status.freed);
415     }
416     return p;
417 }
418 
419 void *
toku_xcalloc(size_t nmemb,size_t size)420 toku_xcalloc(size_t nmemb, size_t size) {
421     size_t newsize = nmemb * size;
422     void *vp = toku_xmalloc(newsize);
423     if (vp) memset(vp, 0, newsize);
424     return vp;
425 }
426 
427 void *
toku_xrealloc(void * v,size_t size)428 toku_xrealloc(void *v, size_t size) {
429 #if defined(__APPLE__)
430     if (size == 0) {
431         if (v != nullptr) {
432             toku_free(v);
433         }
434         return nullptr;
435     }
436 #endif
437 
438     if (size > status.max_requested_size) {
439         status.max_requested_size = size;
440     }
441     size_t used_orig = v ? my_malloc_usable_size(v) : 0;
442     void *p = t_xrealloc ? t_xrealloc(v, size) : os_realloc(v, size);
443     if (p == 0) {  // avoid function call in common case
444         status.last_failed_size = size;
445         resource_assert(p);
446     }
447     if (toku_memory_do_stats) {
448         size_t used = my_malloc_usable_size(p);
449         toku_sync_add_and_fetch(&status.realloc_count, 1);
450         toku_sync_add_and_fetch(&status.requested, size);
451         toku_sync_add_and_fetch(&status.used, used);
452         toku_sync_add_and_fetch(&status.freed, used_orig);
453         set_max(status.used, status.freed);
454     }
455     return p;
456 }
457 
458 size_t
toku_malloc_usable_size(void * p)459 toku_malloc_usable_size(void *p) {
460     return my_malloc_usable_size(p);
461 }
462 
463 void *
toku_xmemdup(const void * v,size_t len)464 toku_xmemdup (const void *v, size_t len) {
465     void *p = toku_xmalloc(len);
466     memcpy(p, v, len);
467     return p;
468 }
469 
470 char *
toku_xstrdup(const char * s)471 toku_xstrdup (const char *s) {
472     return (char *) toku_xmemdup(s, strlen(s)+1);
473 }
474 
475 void
toku_set_func_malloc(malloc_fun_t f)476 toku_set_func_malloc(malloc_fun_t f) {
477     t_malloc = f;
478     t_xmalloc = f;
479 }
480 
481 void
toku_set_func_xmalloc_only(malloc_fun_t f)482 toku_set_func_xmalloc_only(malloc_fun_t f) {
483     t_xmalloc = f;
484 }
485 
486 void
toku_set_func_malloc_only(malloc_fun_t f)487 toku_set_func_malloc_only(malloc_fun_t f) {
488     t_malloc = f;
489 }
490 
491 void
toku_set_func_realloc(realloc_fun_t f)492 toku_set_func_realloc(realloc_fun_t f) {
493     t_realloc = f;
494     t_xrealloc = f;
495 }
496 
497 void
toku_set_func_xrealloc_only(realloc_fun_t f)498 toku_set_func_xrealloc_only(realloc_fun_t f) {
499     t_xrealloc = f;
500 }
501 
502 void
toku_set_func_realloc_only(realloc_fun_t f)503 toku_set_func_realloc_only(realloc_fun_t f) {
504     t_realloc = f;
505 
506 }
507 
508 void
toku_set_func_free(free_fun_t f)509 toku_set_func_free(free_fun_t f) {
510     t_free = f;
511 }
512 
513 #include <toku_race_tools.h>
514 void __attribute__((constructor)) toku_memory_helgrind_ignore(void);
515 void
toku_memory_helgrind_ignore(void)516 toku_memory_helgrind_ignore(void) {
517     TOKU_VALGRIND_HG_DISABLE_CHECKING(&status, sizeof status);
518 }
519