1 #ifndef JEMALLOC_INTERNAL_STATS_H
2 #define JEMALLOC_INTERNAL_STATS_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex_prof.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/size_classes.h"
8 #include "jemalloc/internal/stats_tsd.h"
9 
10 /*  OPTION(opt,		var_name,	default,	set_value_to) */
11 #define STATS_PRINT_OPTIONS						\
12     OPTION('J',		json,		false,		true)		\
13     OPTION('g',		general,	true,		false)		\
14     OPTION('m',		merged,		config_stats,	false)		\
15     OPTION('d',		destroyed,	config_stats,	false)		\
16     OPTION('a',		unmerged,	config_stats,	false)		\
17     OPTION('b',		bins,		true,		false)		\
18     OPTION('l',		large,		true,		false)		\
19     OPTION('x',		mutex,		true,		false)
20 
21 enum {
22 #define OPTION(o, v, d, s) stats_print_option_num_##v,
23     STATS_PRINT_OPTIONS
24 #undef OPTION
25     stats_print_tot_num_options
26 };
27 
28 /* Options for stats_print. */
29 extern bool opt_stats_print;
30 extern char opt_stats_print_opts[stats_print_tot_num_options+1];
31 
32 /* Implements je_malloc_stats_print. */
33 void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
34     const char *opts);
35 
36 /*
37  * In those architectures that support 64-bit atomics, we use atomic updates for
38  * our 64-bit values.  Otherwise, we use a plain uint64_t and synchronize
39  * externally.
40  */
41 #ifdef JEMALLOC_ATOMIC_U64
42 typedef atomic_u64_t arena_stats_u64_t;
43 #else
44 /* Must hold the arena stats mutex while reading atomically. */
45 typedef uint64_t arena_stats_u64_t;
46 #endif
47 
48 typedef struct malloc_bin_stats_s {
49 	/*
50 	 * Total number of allocation/deallocation requests served directly by
51 	 * the bin.  Note that tcache may allocate an object, then recycle it
52 	 * many times, resulting many increments to nrequests, but only one
53 	 * each to nmalloc and ndalloc.
54 	 */
55 	uint64_t	nmalloc;
56 	uint64_t	ndalloc;
57 
58 	/*
59 	 * Number of allocation requests that correspond to the size of this
60 	 * bin.  This includes requests served by tcache, though tcache only
61 	 * periodically merges into this counter.
62 	 */
63 	uint64_t	nrequests;
64 
65 	/*
66 	 * Current number of regions of this size class, including regions
67 	 * currently cached by tcache.
68 	 */
69 	size_t		curregs;
70 
71 	/* Number of tcache fills from this bin. */
72 	uint64_t	nfills;
73 
74 	/* Number of tcache flushes to this bin. */
75 	uint64_t	nflushes;
76 
77 	/* Total number of slabs created for this bin's size class. */
78 	uint64_t	nslabs;
79 
80 	/*
81 	 * Total number of slabs reused by extracting them from the slabs heap
82 	 * for this bin's size class.
83 	 */
84 	uint64_t	reslabs;
85 
86 	/* Current number of slabs in this bin. */
87 	size_t		curslabs;
88 
89 	mutex_prof_data_t mutex_data;
90 } malloc_bin_stats_t;
91 
92 typedef struct malloc_large_stats_s {
93 	/*
94 	 * Total number of allocation/deallocation requests served directly by
95 	 * the arena.
96 	 */
97 	arena_stats_u64_t	nmalloc;
98 	arena_stats_u64_t	ndalloc;
99 
100 	/*
101 	 * Number of allocation requests that correspond to this size class.
102 	 * This includes requests served by tcache, though tcache only
103 	 * periodically merges into this counter.
104 	 */
105 	arena_stats_u64_t	nrequests; /* Partially derived. */
106 
107 	/* Current number of allocations of this size class. */
108 	size_t		curlextents; /* Derived. */
109 } malloc_large_stats_t;
110 
111 typedef struct decay_stats_s {
112 	/* Total number of purge sweeps. */
113 	arena_stats_u64_t	npurge;
114 	/* Total number of madvise calls made. */
115 	arena_stats_u64_t	nmadvise;
116 	/* Total number of pages purged. */
117 	arena_stats_u64_t	purged;
118 } decay_stats_t;
119 
120 /*
121  * Arena stats.  Note that fields marked "derived" are not directly maintained
122  * within the arena code; rather their values are derived during stats merge
123  * requests.
124  */
125 typedef struct arena_stats_s {
126 #ifndef JEMALLOC_ATOMIC_U64
127 	malloc_mutex_t		mtx;
128 #endif
129 
130 	/* Number of bytes currently mapped, excluding retained memory. */
131 	atomic_zu_t		mapped; /* Partially derived. */
132 
133 	/*
134 	 * Number of unused virtual memory bytes currently retained.  Retained
135 	 * bytes are technically mapped (though always decommitted or purged),
136 	 * but they are excluded from the mapped statistic (above).
137 	 */
138 	atomic_zu_t		retained; /* Derived. */
139 
140 	decay_stats_t		decay_dirty;
141 	decay_stats_t		decay_muzzy;
142 
143 	atomic_zu_t		base; /* Derived. */
144 	atomic_zu_t		internal;
145 	atomic_zu_t		resident; /* Derived. */
146 
147 	atomic_zu_t		allocated_large; /* Derived. */
148 	arena_stats_u64_t	nmalloc_large; /* Derived. */
149 	arena_stats_u64_t	ndalloc_large; /* Derived. */
150 	arena_stats_u64_t	nrequests_large; /* Derived. */
151 
152 	/* Number of bytes cached in tcache associated with this arena. */
153 	atomic_zu_t		tcache_bytes; /* Derived. */
154 
155 	mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
156 
157 	/* One element for each large size class. */
158 	malloc_large_stats_t	lstats[NSIZES - NBINS];
159 
160 	/* Arena uptime. */
161 	nstime_t		uptime;
162 } arena_stats_t;
163 
164 #endif /* JEMALLOC_INTERNAL_STATS_H */
165