1b7eaed25SJason Evans #ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H 2b7eaed25SJason Evans #define JEMALLOC_INTERNAL_MUTEX_PROF_H 3b7eaed25SJason Evans 4b7eaed25SJason Evans #include "jemalloc/internal/atomic.h" 5b7eaed25SJason Evans #include "jemalloc/internal/nstime.h" 6b7eaed25SJason Evans #include "jemalloc/internal/tsd_types.h" 7b7eaed25SJason Evans 8b7eaed25SJason Evans #define MUTEX_PROF_GLOBAL_MUTEXES \ 9b7eaed25SJason Evans OP(background_thread) \ 10b7eaed25SJason Evans OP(ctl) \ 11b7eaed25SJason Evans OP(prof) 12b7eaed25SJason Evans 13b7eaed25SJason Evans typedef enum { 14b7eaed25SJason Evans #define OP(mtx) global_prof_mutex_##mtx, 15b7eaed25SJason Evans MUTEX_PROF_GLOBAL_MUTEXES 16b7eaed25SJason Evans #undef OP 17b7eaed25SJason Evans mutex_prof_num_global_mutexes 18b7eaed25SJason Evans } mutex_prof_global_ind_t; 19b7eaed25SJason Evans 20b7eaed25SJason Evans #define MUTEX_PROF_ARENA_MUTEXES \ 21b7eaed25SJason Evans OP(large) \ 22b7eaed25SJason Evans OP(extent_avail) \ 23b7eaed25SJason Evans OP(extents_dirty) \ 24b7eaed25SJason Evans OP(extents_muzzy) \ 25b7eaed25SJason Evans OP(extents_retained) \ 26b7eaed25SJason Evans OP(decay_dirty) \ 27b7eaed25SJason Evans OP(decay_muzzy) \ 28b7eaed25SJason Evans OP(base) \ 29b7eaed25SJason Evans OP(tcache_list) 30b7eaed25SJason Evans 31b7eaed25SJason Evans typedef enum { 32b7eaed25SJason Evans #define OP(mtx) arena_prof_mutex_##mtx, 33b7eaed25SJason Evans MUTEX_PROF_ARENA_MUTEXES 34b7eaed25SJason Evans #undef OP 35b7eaed25SJason Evans mutex_prof_num_arena_mutexes 36b7eaed25SJason Evans } mutex_prof_arena_ind_t; 37b7eaed25SJason Evans 38c5ad8142SEric van Gyzen /* 39c5ad8142SEric van Gyzen * The forth parameter is a boolean value that is true for derived rate counters 40c5ad8142SEric van Gyzen * and false for real ones. 41c5ad8142SEric van Gyzen */ 420ef50b4eSJason Evans #define MUTEX_PROF_UINT64_COUNTERS \ 43c5ad8142SEric van Gyzen OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ 44c5ad8142SEric van Gyzen OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ 45c5ad8142SEric van Gyzen OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ 46c5ad8142SEric van Gyzen OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ 47c5ad8142SEric van Gyzen OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ 48c5ad8142SEric van Gyzen OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ 49c5ad8142SEric van Gyzen OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ 50c5ad8142SEric van Gyzen OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ 51c5ad8142SEric van Gyzen OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ 52c5ad8142SEric van Gyzen OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ 53c5ad8142SEric van Gyzen OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) 54b7eaed25SJason Evans 550ef50b4eSJason Evans #define MUTEX_PROF_UINT32_COUNTERS \ 56c5ad8142SEric van Gyzen OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) 570ef50b4eSJason Evans 580ef50b4eSJason Evans #define MUTEX_PROF_COUNTERS \ 590ef50b4eSJason Evans MUTEX_PROF_UINT64_COUNTERS \ 600ef50b4eSJason Evans MUTEX_PROF_UINT32_COUNTERS 610ef50b4eSJason Evans 62c5ad8142SEric van Gyzen #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, 630ef50b4eSJason Evans 640ef50b4eSJason Evans #define COUNTER_ENUM(counter_list, t) \ 650ef50b4eSJason Evans typedef enum { \ 660ef50b4eSJason Evans counter_list \ 670ef50b4eSJason Evans mutex_prof_num_##t##_counters \ 680ef50b4eSJason Evans } mutex_prof_##t##_counter_ind_t; 690ef50b4eSJason Evans 700ef50b4eSJason Evans COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) 710ef50b4eSJason Evans COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) 720ef50b4eSJason Evans 730ef50b4eSJason Evans #undef COUNTER_ENUM 74b7eaed25SJason Evans #undef OP 75b7eaed25SJason Evans 76b7eaed25SJason Evans typedef struct { 77b7eaed25SJason Evans /* 78b7eaed25SJason Evans * Counters touched on the slow path, i.e. when there is lock 79b7eaed25SJason Evans * contention. We update them once we have the lock. 80b7eaed25SJason Evans */ 81b7eaed25SJason Evans /* Total time (in nano seconds) spent waiting on this mutex. */ 82b7eaed25SJason Evans nstime_t tot_wait_time; 83b7eaed25SJason Evans /* Max time (in nano seconds) spent on a single lock operation. */ 84b7eaed25SJason Evans nstime_t max_wait_time; 85b7eaed25SJason Evans /* # of times have to wait for this mutex (after spinning). */ 86b7eaed25SJason Evans uint64_t n_wait_times; 87b7eaed25SJason Evans /* # of times acquired the mutex through local spinning. */ 88b7eaed25SJason Evans uint64_t n_spin_acquired; 89b7eaed25SJason Evans /* Max # of threads waiting for the mutex at the same time. */ 90b7eaed25SJason Evans uint32_t max_n_thds; 91b7eaed25SJason Evans /* Current # of threads waiting on the lock. Atomic synced. */ 92b7eaed25SJason Evans atomic_u32_t n_waiting_thds; 93b7eaed25SJason Evans 94b7eaed25SJason Evans /* 95b7eaed25SJason Evans * Data touched on the fast path. These are modified right after we 96b7eaed25SJason Evans * grab the lock, so it's placed closest to the end (i.e. right before 97b7eaed25SJason Evans * the lock) so that we have a higher chance of them being on the same 98b7eaed25SJason Evans * cacheline. 99b7eaed25SJason Evans */ 100b7eaed25SJason Evans /* # of times the mutex holder is different than the previous one. */ 101b7eaed25SJason Evans uint64_t n_owner_switches; 102b7eaed25SJason Evans /* Previous mutex holder, to facilitate n_owner_switches. */ 103b7eaed25SJason Evans tsdn_t *prev_owner; 104b7eaed25SJason Evans /* # of lock() operations in total. */ 105b7eaed25SJason Evans uint64_t n_lock_ops; 106b7eaed25SJason Evans } mutex_prof_data_t; 107b7eaed25SJason Evans 108b7eaed25SJason Evans #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ 109