1 /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
3 #ident "$Id$"
4 /*======
5 This file is part of PerconaFT.
6 
7 
8 Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
9 
10     PerconaFT is free software: you can redistribute it and/or modify
11     it under the terms of the GNU General Public License, version 2,
12     as published by the Free Software Foundation.
13 
14     PerconaFT is distributed in the hope that it will be useful,
15     but WITHOUT ANY WARRANTY; without even the implied warranty of
16     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17     GNU General Public License for more details.
18 
19     You should have received a copy of the GNU General Public License
20     along with PerconaFT.  If not, see <http://www.gnu.org/licenses/>.
21 
22 ----------------------------------------
23 
24     PerconaFT is free software: you can redistribute it and/or modify
25     it under the terms of the GNU Affero General Public License, version 3,
26     as published by the Free Software Foundation.
27 
28     PerconaFT is distributed in the hope that it will be useful,
29     but WITHOUT ANY WARRANTY; without even the implied warranty of
30     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
31     GNU Affero General Public License for more details.
32 
33     You should have received a copy of the GNU Affero General Public License
34     along with PerconaFT.  If not, see <http://www.gnu.org/licenses/>.
35 
36 ----------------------------------------
37 
38    Licensed under the Apache License, Version 2.0 (the "License");
39    you may not use this file except in compliance with the License.
40    You may obtain a copy of the License at
41 
42        http://www.apache.org/licenses/LICENSE-2.0
43 
44    Unless required by applicable law or agreed to in writing, software
45    distributed under the License is distributed on an "AS IS" BASIS,
46    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
47    See the License for the specific language governing permissions and
48    limitations under the License.
49 ======= */
50 
51 #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
52 
53 #include <portability/toku_config.h>
54 
55 #include <toku_portability.h>
56 #include <string.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #if defined(HAVE_MALLOC_H)
60 # include <malloc.h>
61 #elif defined(HAVE_SYS_MALLOC_H)
62 # include <sys/malloc.h>
63 #endif
64 #include <dlfcn.h>
65 #include <toku_race_tools.h>
66 #include "memory.h"
67 #include "toku_assert.h"
68 #include <portability/toku_atomic.h>
69 
70 static malloc_fun_t  t_malloc  = 0;
71 static malloc_aligned_fun_t t_malloc_aligned = 0;
72 static malloc_fun_t  t_xmalloc = 0;
73 static malloc_aligned_fun_t t_xmalloc_aligned = 0;
74 static free_fun_t    t_free    = 0;
75 static realloc_fun_t t_realloc = 0;
76 static realloc_aligned_fun_t t_realloc_aligned = 0;
77 static realloc_fun_t t_xrealloc = 0;
78 
79 static LOCAL_MEMORY_STATUS_S status;
80 int toku_memory_do_stats = 0;
81 
82 static bool memory_startup_complete = false;
83 
84 int
toku_memory_startup(void)85 toku_memory_startup(void) {
86     if (memory_startup_complete) {
87         return 0;
88     }
89     memory_startup_complete = true;
90 
91     int result = 0;
92 
93 #if defined(HAVE_M_MMAP_THRESHOLD)
94     // initialize libc malloc
95     size_t mmap_threshold = 64 * 1024; // 64K and larger should be malloced with mmap().
96     int success = mallopt(M_MMAP_THRESHOLD, mmap_threshold);
97     if (success) {
98         status.mallocator_version = "libc";
99         status.mmap_threshold = mmap_threshold;
100     } else {
101         result = EINVAL;
102     }
103     assert(result == 0);
104 #else
105     // just a guess
106     status.mallocator_version = "darwin";
107     status.mmap_threshold = 16 * 1024;
108 #endif
109 
110     // jemalloc has a mallctl function, while libc malloc does not.  we can check if jemalloc
111     // is loaded by checking if the mallctl function can be found.  if it can, we call it
112     // to get version and mmap threshold configuration.
113     typedef int (*mallctl_fun_t)(const char *, void *, size_t *, void *, size_t);
114     mallctl_fun_t mallctl_f;
115     mallctl_f = (mallctl_fun_t) dlsym(RTLD_DEFAULT, "mallctl");
116     if (mallctl_f) { // jemalloc is loaded
117         size_t version_length = sizeof status.mallocator_version;
118         result = mallctl_f("version", &status.mallocator_version, &version_length, NULL, 0);
119         assert(result == 0);
120         if (result == 0) {
121             size_t lg_chunk; // log2 of the mmap threshold
122             size_t lg_chunk_length = sizeof lg_chunk;
123             result  = mallctl_f("opt.lg_chunk", &lg_chunk, &lg_chunk_length, NULL, 0);
124             if (result == 0) {
125                 status.mmap_threshold = 1 << lg_chunk;
126             } else {
127                 status.mmap_threshold = 1 << 22;
128                 result = 0;
129             }
130         }
131     }
132 
133     return result;
134 }
135 
136 static bool memory_shutdown_complete;
137 
138 void
toku_memory_shutdown(void)139 toku_memory_shutdown(void) {
140     if (memory_shutdown_complete) {
141         return;
142     }
143     memory_shutdown_complete = true;
144 }
145 
146 void
toku_memory_get_status(LOCAL_MEMORY_STATUS s)147 toku_memory_get_status(LOCAL_MEMORY_STATUS s) {
148     *s = status;
149 }
150 
151 // jemalloc's malloc_usable_size does not work with a NULL pointer, so we implement a version that works
152 static size_t
my_malloc_usable_size(void * p)153 my_malloc_usable_size(void *p) {
154     return p == NULL ? 0 : os_malloc_usable_size(p);
155 }
156 
157 // Note that max_in_use may be slightly off because use of max_in_use is not thread-safe.
158 // It is not worth the overhead to make it completely accurate, but
159 // this logic is intended to guarantee that it increases monotonically.
160 // Note that status.sum_used and status.sum_freed increase monotonically
161 // and that status.max_in_use is declared volatile.
162 static inline void
set_max(uint64_t sum_used,uint64_t sum_freed)163 set_max(uint64_t sum_used, uint64_t sum_freed) {
164     if (sum_used >= sum_freed) {
165         uint64_t in_use = sum_used - sum_freed;
166         uint64_t old_max;
167         do {
168             old_max = status.max_in_use;
169         } while (old_max < in_use &&
170                  !toku_sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use));
171     }
172 }
173 
174 // Effect: Like toku_memory_footprint, except instead of passing p,
175 //   we pass toku_malloc_usable_size(p).
176 size_t
toku_memory_footprint_given_usable_size(size_t touched,size_t usable)177 toku_memory_footprint_given_usable_size(size_t touched, size_t usable)
178 {
179     size_t pagesize = toku_os_get_pagesize();
180     if (usable >= status.mmap_threshold) {
181         int num_pages = (touched + pagesize) / pagesize;
182         return num_pages * pagesize;
183     }
184     return usable;
185 }
186 
187 // Effect: Return an estimate how how much space an object is using, possibly by
188 //   using toku_malloc_usable_size(p).
189 //   If p is NULL then returns 0.
190 size_t
toku_memory_footprint(void * p,size_t touched)191 toku_memory_footprint(void * p, size_t touched)
192 {
193     if (!p) return 0;
194     return toku_memory_footprint_given_usable_size(touched,
195                                                    my_malloc_usable_size(p));
196 }
197 
198 void *
toku_malloc(size_t size)199 toku_malloc(size_t size) {
200 #if defined(__APPLE__)
201     if (size == 0) {
202         return nullptr;
203     }
204 #endif
205 
206     if (size > status.max_requested_size) {
207         status.max_requested_size = size;
208     }
209     void *p = t_malloc ? t_malloc(size) : os_malloc(size);
210     if (p) {
211         TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
212         if (toku_memory_do_stats) {
213             size_t used = my_malloc_usable_size(p);
214             toku_sync_add_and_fetch(&status.malloc_count, 1);
215             toku_sync_add_and_fetch(&status.requested,size);
216             toku_sync_add_and_fetch(&status.used, used);
217             set_max(status.used, status.freed);
218         }
219     } else {
220         toku_sync_add_and_fetch(&status.malloc_fail, 1);
221         status.last_failed_size = size;
222     }
223   return p;
224 }
225 
toku_malloc_aligned(size_t alignment,size_t size)226 void *toku_malloc_aligned(size_t alignment, size_t size) {
227 #if defined(__APPLE__)
228     if (size == 0) {
229         return nullptr;
230     }
231 #endif
232 
233     if (size > status.max_requested_size) {
234         status.max_requested_size = size;
235     }
236     void *p = t_malloc_aligned ? t_malloc_aligned(alignment, size) : os_malloc_aligned(alignment, size);
237     if (p) {
238         TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
239         if (toku_memory_do_stats) {
240             size_t used = my_malloc_usable_size(p);
241             toku_sync_add_and_fetch(&status.malloc_count, 1);
242             toku_sync_add_and_fetch(&status.requested,size);
243             toku_sync_add_and_fetch(&status.used, used);
244             set_max(status.used, status.freed);
245         }
246     } else {
247         toku_sync_add_and_fetch(&status.malloc_fail, 1);
248         status.last_failed_size = size;
249     }
250   return p;
251 }
252 
253 void *
toku_calloc(size_t nmemb,size_t size)254 toku_calloc(size_t nmemb, size_t size) {
255     size_t newsize = nmemb * size;
256     void *p = toku_malloc(newsize);
257     if (p) memset(p, 0, newsize);
258     return p;
259 }
260 
261 void *
toku_realloc(void * p,size_t size)262 toku_realloc(void *p, size_t size) {
263 #if defined(__APPLE__)
264     if (size == 0) {
265         if (p != nullptr) {
266             toku_free(p);
267         }
268         return nullptr;
269     }
270 #endif
271 
272     if (size > status.max_requested_size) {
273         status.max_requested_size = size;
274     }
275     size_t used_orig = p ? my_malloc_usable_size(p) : 0;
276     void *q = t_realloc ? t_realloc(p, size) : os_realloc(p, size);
277     if (q) {
278         if (toku_memory_do_stats) {
279             size_t used = my_malloc_usable_size(q);
280             toku_sync_add_and_fetch(&status.realloc_count, 1);
281             toku_sync_add_and_fetch(&status.requested, size);
282             toku_sync_add_and_fetch(&status.used, used);
283             toku_sync_add_and_fetch(&status.freed, used_orig);
284             set_max(status.used, status.freed);
285         }
286     } else {
287         toku_sync_add_and_fetch(&status.realloc_fail, 1);
288         status.last_failed_size = size;
289     }
290     return q;
291 }
292 
toku_realloc_aligned(size_t alignment,void * p,size_t size)293 void *toku_realloc_aligned(size_t alignment, void *p, size_t size) {
294 #if defined(__APPLE__)
295     if (size == 0) {
296         if (p != nullptr) {
297             toku_free(p);
298         }
299         return nullptr;
300     }
301 #endif
302 
303     if (size > status.max_requested_size) {
304         status.max_requested_size = size;
305     }
306     size_t used_orig = p ? my_malloc_usable_size(p) : 0;
307     void *q = t_realloc_aligned ? t_realloc_aligned(alignment, p, size) : os_realloc_aligned(alignment, p, size);
308     if (q) {
309         if (toku_memory_do_stats) {
310             size_t used = my_malloc_usable_size(q);
311             toku_sync_add_and_fetch(&status.realloc_count, 1);
312             toku_sync_add_and_fetch(&status.requested, size);
313             toku_sync_add_and_fetch(&status.used, used);
314             toku_sync_add_and_fetch(&status.freed, used_orig);
315             set_max(status.used, status.freed);
316         }
317     } else {
318         toku_sync_add_and_fetch(&status.realloc_fail, 1);
319         status.last_failed_size = size;
320     }
321     return q;
322 }
323 
324 
325 void *
toku_memdup(const void * v,size_t len)326 toku_memdup(const void *v, size_t len) {
327     void *p = toku_malloc(len);
328     if (p) memcpy(p, v,len);
329     return p;
330 }
331 
332 char *
toku_strdup(const char * s)333 toku_strdup(const char *s) {
334     return (char *) toku_memdup(s, strlen(s)+1);
335 }
336 
toku_strndup(const char * s,size_t n)337 char *toku_strndup(const char *s, size_t n) {
338     size_t s_size = strlen(s);
339     size_t bytes_to_copy = n > s_size ? s_size : n;
340     ++bytes_to_copy;
341     char *result = (char *)toku_memdup(s, bytes_to_copy);
342     result[bytes_to_copy - 1] = 0;
343     return result;
344 }
345 
346 void
toku_free(void * p)347 toku_free(void *p) {
348     if (p) {
349         if (toku_memory_do_stats) {
350             size_t used = my_malloc_usable_size(p);
351             toku_sync_add_and_fetch(&status.free_count, 1);
352             toku_sync_add_and_fetch(&status.freed, used);
353         }
354         if (t_free)
355             t_free(p);
356         else
357             os_free(p);
358     }
359 }
360 
361 void *
toku_xmalloc(size_t size)362 toku_xmalloc(size_t size) {
363 #if defined(__APPLE__)
364     if (size == 0) {
365         return nullptr;
366     }
367 #endif
368 
369     if (size > status.max_requested_size) {
370         status.max_requested_size = size;
371     }
372     void *p = t_xmalloc ? t_xmalloc(size) : os_malloc(size);
373     if (p == NULL) {  // avoid function call in common case
374         status.last_failed_size = size;
375         resource_assert(p);
376     }
377     TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
378     if (toku_memory_do_stats) {
379         size_t used = my_malloc_usable_size(p);
380         toku_sync_add_and_fetch(&status.malloc_count, 1);
381         toku_sync_add_and_fetch(&status.requested, size);
382         toku_sync_add_and_fetch(&status.used, used);
383         set_max(status.used, status.freed);
384     }
385     return p;
386 }
387 
toku_xmalloc_aligned(size_t alignment,size_t size)388 void* toku_xmalloc_aligned(size_t alignment, size_t size)
389 // Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
390 //  Fail with a resource_assert if the allocation fails (don't return an error code).
391 // Requires: alignment is a power of two.
392 {
393 #if defined(__APPLE__)
394     if (size == 0) {
395         return nullptr;
396     }
397 #endif
398 
399     if (size > status.max_requested_size) {
400         status.max_requested_size = size;
401     }
402     void *p = t_xmalloc_aligned ? t_xmalloc_aligned(alignment, size) : os_malloc_aligned(alignment,size);
403     if (p == NULL && size != 0) {
404         status.last_failed_size = size;
405         resource_assert(p);
406     }
407     if (toku_memory_do_stats) {
408         size_t used = my_malloc_usable_size(p);
409         toku_sync_add_and_fetch(&status.malloc_count, 1);
410         toku_sync_add_and_fetch(&status.requested, size);
411         toku_sync_add_and_fetch(&status.used, used);
412         set_max(status.used, status.freed);
413     }
414     return p;
415 }
416 
417 void *
toku_xcalloc(size_t nmemb,size_t size)418 toku_xcalloc(size_t nmemb, size_t size) {
419     size_t newsize = nmemb * size;
420     void *vp = toku_xmalloc(newsize);
421     if (vp) memset(vp, 0, newsize);
422     return vp;
423 }
424 
425 void *
toku_xrealloc(void * v,size_t size)426 toku_xrealloc(void *v, size_t size) {
427 #if defined(__APPLE__)
428     if (size == 0) {
429         if (v != nullptr) {
430             toku_free(v);
431         }
432         return nullptr;
433     }
434 #endif
435 
436     if (size > status.max_requested_size) {
437         status.max_requested_size = size;
438     }
439     size_t used_orig = v ? my_malloc_usable_size(v) : 0;
440     void *p = t_xrealloc ? t_xrealloc(v, size) : os_realloc(v, size);
441     if (p == 0) {  // avoid function call in common case
442         status.last_failed_size = size;
443         resource_assert(p);
444     }
445     if (toku_memory_do_stats) {
446         size_t used = my_malloc_usable_size(p);
447         toku_sync_add_and_fetch(&status.realloc_count, 1);
448         toku_sync_add_and_fetch(&status.requested, size);
449         toku_sync_add_and_fetch(&status.used, used);
450         toku_sync_add_and_fetch(&status.freed, used_orig);
451         set_max(status.used, status.freed);
452     }
453     return p;
454 }
455 
456 size_t
toku_malloc_usable_size(void * p)457 toku_malloc_usable_size(void *p) {
458     return my_malloc_usable_size(p);
459 }
460 
461 void *
toku_xmemdup(const void * v,size_t len)462 toku_xmemdup (const void *v, size_t len) {
463     void *p = toku_xmalloc(len);
464     memcpy(p, v, len);
465     return p;
466 }
467 
468 char *
toku_xstrdup(const char * s)469 toku_xstrdup (const char *s) {
470     return (char *) toku_xmemdup(s, strlen(s)+1);
471 }
472 
473 void
toku_set_func_malloc(malloc_fun_t f)474 toku_set_func_malloc(malloc_fun_t f) {
475     t_malloc = f;
476     t_xmalloc = f;
477 }
478 
479 void
toku_set_func_xmalloc_only(malloc_fun_t f)480 toku_set_func_xmalloc_only(malloc_fun_t f) {
481     t_xmalloc = f;
482 }
483 
484 void
toku_set_func_malloc_only(malloc_fun_t f)485 toku_set_func_malloc_only(malloc_fun_t f) {
486     t_malloc = f;
487 }
488 
489 void
toku_set_func_realloc(realloc_fun_t f)490 toku_set_func_realloc(realloc_fun_t f) {
491     t_realloc = f;
492     t_xrealloc = f;
493 }
494 
495 void
toku_set_func_xrealloc_only(realloc_fun_t f)496 toku_set_func_xrealloc_only(realloc_fun_t f) {
497     t_xrealloc = f;
498 }
499 
500 void
toku_set_func_realloc_only(realloc_fun_t f)501 toku_set_func_realloc_only(realloc_fun_t f) {
502     t_realloc = f;
503 
504 }
505 
506 void
toku_set_func_free(free_fun_t f)507 toku_set_func_free(free_fun_t f) {
508     t_free = f;
509 }
510 
511 #include <toku_race_tools.h>
512 void __attribute__((constructor)) toku_memory_helgrind_ignore(void);
513 void
toku_memory_helgrind_ignore(void)514 toku_memory_helgrind_ignore(void) {
515     TOKU_VALGRIND_HG_DISABLE_CHECKING(&status, sizeof status);
516 }
517