1 #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
2 #define JEMALLOC_INTERNAL_MUTEX_PROF_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/nstime.h"
6 #include "jemalloc/internal/tsd_types.h"
7 
8 #define MUTEX_PROF_GLOBAL_MUTEXES					\
9     OP(background_thread)						\
10     OP(ctl)								\
11     OP(prof)
12 
13 typedef enum {
14 #define OP(mtx) global_prof_mutex_##mtx,
15 	MUTEX_PROF_GLOBAL_MUTEXES
16 #undef OP
17 	mutex_prof_num_global_mutexes
18 } mutex_prof_global_ind_t;
19 
20 #define MUTEX_PROF_ARENA_MUTEXES					\
21     OP(large)								\
22     OP(extent_avail)							\
23     OP(extents_dirty)							\
24     OP(extents_muzzy)							\
25     OP(extents_retained)						\
26     OP(decay_dirty)							\
27     OP(decay_muzzy)							\
28     OP(base)								\
29     OP(tcache_list)
30 
31 typedef enum {
32 #define OP(mtx) arena_prof_mutex_##mtx,
33 	MUTEX_PROF_ARENA_MUTEXES
34 #undef OP
35 	mutex_prof_num_arena_mutexes
36 } mutex_prof_arena_ind_t;
37 
38 /*
39  * The forth parameter is a boolean value that is true for derived rate counters
40  * and false for real ones.
41  */
42 #define MUTEX_PROF_UINT64_COUNTERS					\
43     OP(num_ops, uint64_t, "n_lock_ops", false, num_ops)					\
44     OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops)				\
45     OP(num_wait, uint64_t, "n_waiting", false, num_wait)				\
46     OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait)				\
47     OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq)			\
48     OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq)			\
49     OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch)		\
50     OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch)	\
51     OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time)		\
52     OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time)		\
53     OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
54 
55 #define MUTEX_PROF_UINT32_COUNTERS					\
56     OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
57 
58 #define MUTEX_PROF_COUNTERS						\
59 		MUTEX_PROF_UINT64_COUNTERS				\
60 		MUTEX_PROF_UINT32_COUNTERS
61 
62 #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
63 
64 #define COUNTER_ENUM(counter_list, t)					\
65 		typedef enum {						\
66 			counter_list					\
67 			mutex_prof_num_##t##_counters			\
68 		} mutex_prof_##t##_counter_ind_t;
69 
70 COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
71 COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
72 
73 #undef COUNTER_ENUM
74 #undef OP
75 
76 typedef struct {
77 	/*
78 	 * Counters touched on the slow path, i.e. when there is lock
79 	 * contention.  We update them once we have the lock.
80 	 */
81 	/* Total time (in nano seconds) spent waiting on this mutex. */
82 	nstime_t		tot_wait_time;
83 	/* Max time (in nano seconds) spent on a single lock operation. */
84 	nstime_t		max_wait_time;
85 	/* # of times have to wait for this mutex (after spinning). */
86 	uint64_t		n_wait_times;
87 	/* # of times acquired the mutex through local spinning. */
88 	uint64_t		n_spin_acquired;
89 	/* Max # of threads waiting for the mutex at the same time. */
90 	uint32_t		max_n_thds;
91 	/* Current # of threads waiting on the lock.  Atomic synced. */
92 	atomic_u32_t		n_waiting_thds;
93 
94 	/*
95 	 * Data touched on the fast path.  These are modified right after we
96 	 * grab the lock, so it's placed closest to the end (i.e. right before
97 	 * the lock) so that we have a higher chance of them being on the same
98 	 * cacheline.
99 	 */
100 	/* # of times the mutex holder is different than the previous one. */
101 	uint64_t		n_owner_switches;
102 	/* Previous mutex holder, to facilitate n_owner_switches. */
103 	tsdn_t			*prev_owner;
104 	/* # of lock() operations in total. */
105 	uint64_t		n_lock_ops;
106 } mutex_prof_data_t;
107 
108 #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
109