1 #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
2 #define JEMALLOC_INTERNAL_MUTEX_PROF_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/nstime.h"
6 #include "jemalloc/internal/tsd_types.h"
7 
8 #define MUTEX_PROF_GLOBAL_MUTEXES					\
9     OP(background_thread)						\
10     OP(ctl)								\
11     OP(prof)
12 
13 typedef enum {
14 #define OP(mtx) global_prof_mutex_##mtx,
15 	MUTEX_PROF_GLOBAL_MUTEXES
16 #undef OP
17 	mutex_prof_num_global_mutexes
18 } mutex_prof_global_ind_t;
19 
20 #define MUTEX_PROF_ARENA_MUTEXES					\
21     OP(large)								\
22     OP(extent_avail)							\
23     OP(extents_dirty)							\
24     OP(extents_muzzy)							\
25     OP(extents_retained)						\
26     OP(decay_dirty)							\
27     OP(decay_muzzy)							\
28     OP(base)								\
29     OP(tcache_list)
30 
31 typedef enum {
32 #define OP(mtx) arena_prof_mutex_##mtx,
33 	MUTEX_PROF_ARENA_MUTEXES
34 #undef OP
35 	mutex_prof_num_arena_mutexes
36 } mutex_prof_arena_ind_t;
37 
38 #define MUTEX_PROF_UINT64_COUNTERS					\
39     OP(num_ops, uint64_t, "n_lock_ops")					\
40     OP(num_wait, uint64_t, "n_waiting")					\
41     OP(num_spin_acq, uint64_t, "n_spin_acq")				\
42     OP(num_owner_switch, uint64_t, "n_owner_switch")			\
43     OP(total_wait_time, uint64_t, "total_wait_ns")			\
44     OP(max_wait_time, uint64_t, "max_wait_ns")
45 
46 #define MUTEX_PROF_UINT32_COUNTERS					\
47     OP(max_num_thds, uint32_t, "max_n_thds")
48 
49 #define MUTEX_PROF_COUNTERS						\
50 		MUTEX_PROF_UINT64_COUNTERS				\
51 		MUTEX_PROF_UINT32_COUNTERS
52 
53 #define OP(counter, type, human) mutex_counter_##counter,
54 
55 #define COUNTER_ENUM(counter_list, t)					\
56 		typedef enum {						\
57 			counter_list					\
58 			mutex_prof_num_##t##_counters			\
59 		} mutex_prof_##t##_counter_ind_t;
60 
61 COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
62 COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
63 
64 #undef COUNTER_ENUM
65 #undef OP
66 
67 typedef struct {
68 	/*
69 	 * Counters touched on the slow path, i.e. when there is lock
70 	 * contention.  We update them once we have the lock.
71 	 */
72 	/* Total time (in nano seconds) spent waiting on this mutex. */
73 	nstime_t		tot_wait_time;
74 	/* Max time (in nano seconds) spent on a single lock operation. */
75 	nstime_t		max_wait_time;
76 	/* # of times have to wait for this mutex (after spinning). */
77 	uint64_t		n_wait_times;
78 	/* # of times acquired the mutex through local spinning. */
79 	uint64_t		n_spin_acquired;
80 	/* Max # of threads waiting for the mutex at the same time. */
81 	uint32_t		max_n_thds;
82 	/* Current # of threads waiting on the lock.  Atomic synced. */
83 	atomic_u32_t		n_waiting_thds;
84 
85 	/*
86 	 * Data touched on the fast path.  These are modified right after we
87 	 * grab the lock, so it's placed closest to the end (i.e. right before
88 	 * the lock) so that we have a higher chance of them being on the same
89 	 * cacheline.
90 	 */
91 	/* # of times the mutex holder is different than the previous one. */
92 	uint64_t		n_owner_switches;
93 	/* Previous mutex holder, to facilitate n_owner_switches. */
94 	tsdn_t			*prev_owner;
95 	/* # of lock() operations in total. */
96 	uint64_t		n_lock_ops;
97 } mutex_prof_data_t;
98 
99 #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
100