1b7eaed25SJason Evans #ifndef JEMALLOC_INTERNAL_TICKER_H
2b7eaed25SJason Evans #define JEMALLOC_INTERNAL_TICKER_H
3df0d881dSJason Evans 
4b7eaed25SJason Evans #include "jemalloc/internal/util.h"
5df0d881dSJason Evans 
6b7eaed25SJason Evans /**
7b7eaed25SJason Evans  * A ticker makes it easy to count-down events until some limit.  You
8b7eaed25SJason Evans  * ticker_init the ticker to trigger every nticks events.  You then notify it
9b7eaed25SJason Evans  * that an event has occurred with calls to ticker_tick (or that nticks events
10b7eaed25SJason Evans  * have occurred with a call to ticker_ticks), which will return true (and reset
11b7eaed25SJason Evans  * the counter) if the countdown hit zero.
12b7eaed25SJason Evans  */
13df0d881dSJason Evans 
14b7eaed25SJason Evans typedef struct {
15df0d881dSJason Evans 	int32_t tick;
16df0d881dSJason Evans 	int32_t nticks;
17b7eaed25SJason Evans } ticker_t;
18df0d881dSJason Evans 
19b7eaed25SJason Evans static inline void
ticker_init(ticker_t * ticker,int32_t nticks)20b7eaed25SJason Evans ticker_init(ticker_t *ticker, int32_t nticks) {
21df0d881dSJason Evans 	ticker->tick = nticks;
22df0d881dSJason Evans 	ticker->nticks = nticks;
23df0d881dSJason Evans }
24df0d881dSJason Evans 
25b7eaed25SJason Evans static inline void
ticker_copy(ticker_t * ticker,const ticker_t * other)26b7eaed25SJason Evans ticker_copy(ticker_t *ticker, const ticker_t *other) {
27df0d881dSJason Evans 	*ticker = *other;
28df0d881dSJason Evans }
29df0d881dSJason Evans 
30b7eaed25SJason Evans static inline int32_t
ticker_read(const ticker_t * ticker)31b7eaed25SJason Evans ticker_read(const ticker_t *ticker) {
32b7eaed25SJason Evans 	return ticker->tick;
33df0d881dSJason Evans }
34df0d881dSJason Evans 
350ef50b4eSJason Evans /*
360ef50b4eSJason Evans  * Not intended to be a public API.  Unfortunately, on x86, neither gcc nor
370ef50b4eSJason Evans  * clang seems smart enough to turn
380ef50b4eSJason Evans  *   ticker->tick -= nticks;
390ef50b4eSJason Evans  *   if (unlikely(ticker->tick < 0)) {
400ef50b4eSJason Evans  *     fixup ticker
410ef50b4eSJason Evans  *     return true;
420ef50b4eSJason Evans  *   }
430ef50b4eSJason Evans  *   return false;
440ef50b4eSJason Evans  * into
450ef50b4eSJason Evans  *   subq %nticks_reg, (%ticker_reg)
460ef50b4eSJason Evans  *   js fixup ticker
470ef50b4eSJason Evans  *
480ef50b4eSJason Evans  * unless we force "fixup ticker" out of line.  In that case, gcc gets it right,
490ef50b4eSJason Evans  * but clang now does worse than before.  So, on x86 with gcc, we force it out
500ef50b4eSJason Evans  * of line, but otherwise let the inlining occur.  Ordinarily this wouldn't be
510ef50b4eSJason Evans  * worth the hassle, but this is on the fast path of both malloc and free (via
520ef50b4eSJason Evans  * tcache_event).
530ef50b4eSJason Evans  */
540ef50b4eSJason Evans #if defined(__GNUC__) && !defined(__clang__)				\
550ef50b4eSJason Evans     && (defined(__x86_64__) || defined(__i386__))
560ef50b4eSJason Evans JEMALLOC_NOINLINE
570ef50b4eSJason Evans #endif
580ef50b4eSJason Evans static bool
ticker_fixup(ticker_t * ticker)590ef50b4eSJason Evans ticker_fixup(ticker_t *ticker) {
60df0d881dSJason Evans 	ticker->tick = ticker->nticks;
61b7eaed25SJason Evans 	return true;
62df0d881dSJason Evans }
630ef50b4eSJason Evans 
640ef50b4eSJason Evans static inline bool
ticker_ticks(ticker_t * ticker,int32_t nticks)650ef50b4eSJason Evans ticker_ticks(ticker_t *ticker, int32_t nticks) {
66df0d881dSJason Evans 	ticker->tick -= nticks;
670ef50b4eSJason Evans 	if (unlikely(ticker->tick < 0)) {
680ef50b4eSJason Evans 		return ticker_fixup(ticker);
690ef50b4eSJason Evans 	}
700ef50b4eSJason Evans 	return false;
71df0d881dSJason Evans }
72df0d881dSJason Evans 
73b7eaed25SJason Evans static inline bool
ticker_tick(ticker_t * ticker)74b7eaed25SJason Evans ticker_tick(ticker_t *ticker) {
75b7eaed25SJason Evans 	return ticker_ticks(ticker, 1);
76df0d881dSJason Evans }
77df0d881dSJason Evans 
78c5ad8142SEric van Gyzen /*
79c5ad8142SEric van Gyzen  * Try to tick.  If ticker would fire, return true, but rely on
80c5ad8142SEric van Gyzen  * slowpath to reset ticker.
81c5ad8142SEric van Gyzen  */
82c5ad8142SEric van Gyzen static inline bool
ticker_trytick(ticker_t * ticker)83c5ad8142SEric van Gyzen ticker_trytick(ticker_t *ticker) {
84c5ad8142SEric van Gyzen 	--ticker->tick;
85c5ad8142SEric van Gyzen 	if (unlikely(ticker->tick < 0)) {
86c5ad8142SEric van Gyzen 		return true;
87c5ad8142SEric van Gyzen 	}
88c5ad8142SEric van Gyzen 	return false;
89c5ad8142SEric van Gyzen }
90c5ad8142SEric van Gyzen 
91b7eaed25SJason Evans #endif /* JEMALLOC_INTERNAL_TICKER_H */
92