1 /*
2  * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_SERVICES_MALLOCTRACKER_HPP
26 #define SHARE_SERVICES_MALLOCTRACKER_HPP
27 
28 #if INCLUDE_NMT
29 
30 #include "memory/allocation.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/threadCritical.hpp"
33 #include "services/nmtCommon.hpp"
34 #include "utilities/nativeCallStack.hpp"
35 
36 /*
37  * This counter class counts memory allocation and deallocation,
38  * records total memory allocation size and number of allocations.
39  * The counters are updated atomically.
40  */
41 class MemoryCounter {
42  private:
43   volatile size_t   _count;
44   volatile size_t   _size;
45 
46   DEBUG_ONLY(size_t   _peak_count;)
47   DEBUG_ONLY(size_t   _peak_size; )
48 
49  public:
MemoryCounter()50   MemoryCounter() : _count(0), _size(0) {
51     DEBUG_ONLY(_peak_count = 0;)
52     DEBUG_ONLY(_peak_size  = 0;)
53   }
54 
allocate(size_t sz)55   inline void allocate(size_t sz) {
56     Atomic::inc(&_count);
57     if (sz > 0) {
58       Atomic::add(&_size, sz);
59       DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
60     }
61     DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
62   }
63 
deallocate(size_t sz)64   inline void deallocate(size_t sz) {
65     assert(_count > 0, "Nothing allocated yet");
66     assert(_size >= sz, "deallocation > allocated");
67     Atomic::dec(&_count);
68     if (sz > 0) {
69       Atomic::sub(&_size, sz);
70     }
71   }
72 
resize(ssize_t sz)73   inline void resize(ssize_t sz) {
74     if (sz != 0) {
75       assert(sz >= 0 || _size >= size_t(-sz), "Must be");
76       Atomic::add(&_size, size_t(sz));
77       DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
78     }
79   }
80 
count() const81   inline size_t count() const { return _count; }
size() const82   inline size_t size()  const { return _size;  }
83   DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
84   DEBUG_ONLY(inline size_t peak_size()  const { return _peak_size; })
85 
86 };
87 
88 /*
89  * Malloc memory used by a particular subsystem.
90  * It includes the memory acquired through os::malloc()
91  * call and arena's backing memory.
92  */
93 class MallocMemory {
94  private:
95   MemoryCounter _malloc;
96   MemoryCounter _arena;
97 
98  public:
MallocMemory()99   MallocMemory() { }
100 
record_malloc(size_t sz)101   inline void record_malloc(size_t sz) {
102     _malloc.allocate(sz);
103   }
104 
record_free(size_t sz)105   inline void record_free(size_t sz) {
106     _malloc.deallocate(sz);
107   }
108 
record_new_arena()109   inline void record_new_arena() {
110     _arena.allocate(0);
111   }
112 
record_arena_free()113   inline void record_arena_free() {
114     _arena.deallocate(0);
115   }
116 
record_arena_size_change(ssize_t sz)117   inline void record_arena_size_change(ssize_t sz) {
118     _arena.resize(sz);
119   }
120 
malloc_size() const121   inline size_t malloc_size()  const { return _malloc.size(); }
malloc_count() const122   inline size_t malloc_count() const { return _malloc.count();}
arena_size() const123   inline size_t arena_size()   const { return _arena.size();  }
arena_count() const124   inline size_t arena_count()  const { return _arena.count(); }
125 
126   DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
127   DEBUG_ONLY(inline const MemoryCounter& arena_counter()  const { return _arena;  })
128 };
129 
130 class MallocMemorySummary;
131 
132 // A snapshot of malloc'd memory, includes malloc memory
133 // usage by types and memory used by tracking itself.
134 class MallocMemorySnapshot : public ResourceObj {
135   friend class MallocMemorySummary;
136 
137  private:
138   MallocMemory      _malloc[mt_number_of_types];
139   MemoryCounter     _tracking_header;
140 
141 
142  public:
by_type(MEMFLAGS flags)143   inline MallocMemory*  by_type(MEMFLAGS flags) {
144     int index = NMTUtil::flag_to_index(flags);
145     return &_malloc[index];
146   }
147 
malloc_overhead()148   inline MemoryCounter* malloc_overhead() {
149     return &_tracking_header;
150   }
151 
152   // Total malloc'd memory amount
153   size_t total() const;
154   // Total malloc'd memory used by arenas
155   size_t total_arena() const;
156 
thread_count() const157   inline size_t thread_count() const {
158     MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
159     return s->by_type(mtThreadStack)->malloc_count();
160   }
161 
copy_to(MallocMemorySnapshot * s)162   void copy_to(MallocMemorySnapshot* s) {
163     // Need to make sure that mtChunks don't get deallocated while the
164     // copy is going on, because their size is adjusted using this
165     // buffer in make_adjustment().
166     ThreadCritical tc;
167     s->_tracking_header = _tracking_header;
168     for (int index = 0; index < mt_number_of_types; index ++) {
169       s->_malloc[index] = _malloc[index];
170     }
171   }
172 
173   // Make adjustment by subtracting chunks used by arenas
174   // from total chunks to get total free chunk size
175   void make_adjustment();
176 };
177 
178 /*
179  * This class is for collecting malloc statistics at summary level
180  */
181 class MallocMemorySummary : AllStatic {
182  private:
183   // Reserve memory for placement of MallocMemorySnapshot object
184   static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
185 
186  public:
187    static void initialize();
188 
record_malloc(size_t size,MEMFLAGS flag)189    static inline void record_malloc(size_t size, MEMFLAGS flag) {
190      as_snapshot()->by_type(flag)->record_malloc(size);
191    }
192 
record_free(size_t size,MEMFLAGS flag)193    static inline void record_free(size_t size, MEMFLAGS flag) {
194      as_snapshot()->by_type(flag)->record_free(size);
195    }
196 
record_new_arena(MEMFLAGS flag)197    static inline void record_new_arena(MEMFLAGS flag) {
198      as_snapshot()->by_type(flag)->record_new_arena();
199    }
200 
record_arena_free(MEMFLAGS flag)201    static inline void record_arena_free(MEMFLAGS flag) {
202      as_snapshot()->by_type(flag)->record_arena_free();
203    }
204 
record_arena_size_change(ssize_t size,MEMFLAGS flag)205    static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) {
206      as_snapshot()->by_type(flag)->record_arena_size_change(size);
207    }
208 
snapshot(MallocMemorySnapshot * s)209    static void snapshot(MallocMemorySnapshot* s) {
210      as_snapshot()->copy_to(s);
211      s->make_adjustment();
212    }
213 
214    // Record memory used by malloc tracking header
record_new_malloc_header(size_t sz)215    static inline void record_new_malloc_header(size_t sz) {
216      as_snapshot()->malloc_overhead()->allocate(sz);
217    }
218 
record_free_malloc_header(size_t sz)219    static inline void record_free_malloc_header(size_t sz) {
220      as_snapshot()->malloc_overhead()->deallocate(sz);
221    }
222 
223    // The memory used by malloc tracking headers
tracking_overhead()224    static inline size_t tracking_overhead() {
225      return as_snapshot()->malloc_overhead()->size();
226    }
227 
as_snapshot()228   static MallocMemorySnapshot* as_snapshot() {
229     return (MallocMemorySnapshot*)_snapshot;
230   }
231 };
232 
233 
234 /*
235  * Malloc tracking header.
236  * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
237  * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
238  */
239 
240 class MallocHeader {
241 #ifdef _LP64
242   size_t           _size      : 64;
243   size_t           _flags     : 8;
244   size_t           _pos_idx   : 16;
245   size_t           _bucket_idx: 40;
246 #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
247 #define MAX_BUCKET_LENGTH         right_n_bits(16)
248 #else
249   size_t           _size      : 32;
250   size_t           _flags     : 8;
251   size_t           _pos_idx   : 8;
252   size_t           _bucket_idx: 16;
253 #define MAX_MALLOCSITE_TABLE_SIZE  right_n_bits(16)
254 #define MAX_BUCKET_LENGTH          right_n_bits(8)
255 #endif  // _LP64
256 
257  public:
MallocHeader(size_t size,MEMFLAGS flags,const NativeCallStack & stack,NMT_TrackingLevel level)258   MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
259     assert(sizeof(MallocHeader) == sizeof(void*) * 2,
260       "Wrong header size");
261 
262     if (level == NMT_minimal) {
263       return;
264     }
265 
266     _flags = NMTUtil::flag_to_index(flags);
267     set_size(size);
268     if (level == NMT_detail) {
269       size_t bucket_idx;
270       size_t pos_idx;
271       if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) {
272         assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
273         assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
274         _bucket_idx = bucket_idx;
275         _pos_idx = pos_idx;
276       }
277     }
278 
279     MallocMemorySummary::record_malloc(size, flags);
280     MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
281   }
282 
size() const283   inline size_t   size()  const { return _size; }
flags() const284   inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
285   bool get_stack(NativeCallStack& stack) const;
286 
287   // Cleanup tracking information before the memory is released.
288   void release() const;
289 
290  private:
set_size(size_t size)291   inline void set_size(size_t size) {
292     _size = size;
293   }
294   bool record_malloc_site(const NativeCallStack& stack, size_t size,
295     size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const;
296 };
297 
298 
299 // Main class called from MemTracker to track malloc activities
300 class MallocTracker : AllStatic {
301  public:
302   // Initialize malloc tracker for specific tracking level
303   static bool initialize(NMT_TrackingLevel level);
304 
305   static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
306 
307   // malloc tracking header size for specific tracking level
malloc_header_size(NMT_TrackingLevel level)308   static inline size_t malloc_header_size(NMT_TrackingLevel level) {
309     return (level == NMT_off) ? 0 : sizeof(MallocHeader);
310   }
311 
312   // Parameter name convention:
313   // memblock :   the beginning address for user data
314   // malloc_base: the beginning address that includes malloc tracking header
315   //
316   // The relationship:
317   // memblock = (char*)malloc_base + sizeof(nmt header)
318   //
319 
320   // Record  malloc on specified memory block
321   static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
322     const NativeCallStack& stack, NMT_TrackingLevel level);
323 
324   // Record free on specified memory block
325   static void* record_free(void* memblock);
326 
327   // Offset memory address to header address
328   static inline void* get_base(void* memblock);
get_base(void * memblock,NMT_TrackingLevel level)329   static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
330     if (memblock == NULL || level == NMT_off) return memblock;
331     return (char*)memblock - malloc_header_size(level);
332   }
333 
334   // Get memory size
get_size(void * memblock)335   static inline size_t get_size(void* memblock) {
336     MallocHeader* header = malloc_header(memblock);
337     return header->size();
338   }
339 
340   // Get memory type
get_flags(void * memblock)341   static inline MEMFLAGS get_flags(void* memblock) {
342     MallocHeader* header = malloc_header(memblock);
343     return header->flags();
344   }
345 
346   // Get header size
get_header_size(void * memblock)347   static inline size_t get_header_size(void* memblock) {
348     return (memblock == NULL) ? 0 : sizeof(MallocHeader);
349   }
350 
record_new_arena(MEMFLAGS flags)351   static inline void record_new_arena(MEMFLAGS flags) {
352     MallocMemorySummary::record_new_arena(flags);
353   }
354 
record_arena_free(MEMFLAGS flags)355   static inline void record_arena_free(MEMFLAGS flags) {
356     MallocMemorySummary::record_arena_free(flags);
357   }
358 
record_arena_size_change(ssize_t size,MEMFLAGS flags)359   static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) {
360     MallocMemorySummary::record_arena_size_change(size, flags);
361   }
362  private:
malloc_header(void * memblock)363   static inline MallocHeader* malloc_header(void *memblock) {
364     assert(memblock != NULL, "NULL pointer");
365     MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
366     return header;
367   }
368 };
369 
370 #endif // INCLUDE_NMT
371 
372 
373 #endif // SHARE_SERVICES_MALLOCTRACKER_HPP
374