1 /*
2  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3  *
4  * This Source Code Form is subject to the terms of the Mozilla Public
5  * License, v. 2.0. If a copy of the MPL was not distributed with this
6  * file, you can obtain one at https://mozilla.org/MPL/2.0/.
7  *
8  * See the COPYRIGHT file distributed with this work for additional
9  * information regarding copyright ownership.
10  */
11 
12 
13 /*! \file */
14 
15 #include <config.h>
16 
17 #include <inttypes.h>
18 #include <string.h>
19 
20 #include <isc/atomic.h>
21 #include <isc/buffer.h>
22 #include <isc/magic.h>
23 #include <isc/mem.h>
24 #include <isc/platform.h>
25 #include <isc/print.h>
26 #include <isc/rwlock.h>
27 #include <isc/stats.h>
28 #include <isc/util.h>
29 
30 #if defined(ISC_PLATFORM_HAVESTDATOMIC)
31 #if defined(__cplusplus)
32 #include <isc/stdatomic.h>
33 #else
34 #include <stdatomic.h>
35 #endif
36 #endif
37 
38 #define ISC_STATS_MAGIC			ISC_MAGIC('S', 't', 'a', 't')
39 #define ISC_STATS_VALID(x)		ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
40 
41 #if defined(ISC_PLATFORM_HAVESTDATOMIC)
42 /*%
43  * Just use stdatomics
44  */
45 #elif defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ)
46 /*%
47  * Local macro confirming presence of 64-bit
48  * increment and store operations, just to make
49  * the later macros simpler
50  */
51 # define ISC_STATS_HAVEATOMICQ 1
52 #else
53 
54 /*%
55  * Only lock the counters if 64-bit atomic operations are
56  * not available but cheap atomic lock operations are.
57  * On a modern 64-bit system this should never be the case.
58  *
59  * Normal locks are too expensive to be used whenever a counter
60  * is updated.
61  */
62 # if ISC_RWLOCK_USEATOMIC
63 #  define ISC_STATS_LOCKCOUNTERS 1
64 # endif /* ISC_RWLOCK_USEATOMIC */
65 
66 /*%
67  * If 64-bit atomic operations are not available but
68  * 32-bit operations are then split the counter into two,
69  * using the atomic operations to try to ensure that any carry
70  * from the low word is correctly carried into the high word.
71  *
72  * Otherwise, just rely on standard 64-bit data types
73  * and operations
74  */
75 # if defined(ISC_PLATFORM_HAVEXADD)
76 #  define ISC_STATS_USEMULTIFIELDS 1
77 # endif /* ISC_PLATFORM_HAVEXADD */
78 #endif /* ISC_PLATFORM_HAVESTDATOMIC */
79 
80 #if ISC_STATS_LOCKCOUNTERS
81 # define MAYBE_RWLOCK(a, b) isc_rwlock_lock(a, b);
82 # define MAYBE_RWUNLOCK(a, b) isc_rwlock_unlock(a, b)
83 #else
84 # define MAYBE_RWLOCK(a, b)
85 # define MAYBE_RWUNLOCK(a, b)
86 #endif
87 
88 #if ISC_PLATFORM_HAVESTDATOMIC
89 typedef atomic_uint_fast64_t isc_stat_t;
90 #elif ISC_STATS_HAVEATOMICQ
91 typedef uint64_t isc_stat_t;
92 #elif ISC_STATS_USEMULTIFIELDS
93 typedef struct {
94 	uint32_t hi;
95 	uint32_t lo;
96 } isc_stat_t;
97 #else
98 typedef uint64_t isc_stat_t;
99 #endif
100 
101 struct isc_stats {
102 	/*% Unlocked */
103 	unsigned int	magic;
104 	isc_mem_t	*mctx;
105 	int		ncounters;
106 
107 	isc_mutex_t	lock;
108 	unsigned int	references; /* locked by lock */
109 
110 	/*%
111 	 * Locked by counterlock or unlocked if efficient rwlock is not
112 	 * available.
113 	 */
114 #if ISC_STATS_LOCKCOUNTERS
115 	isc_rwlock_t	counterlock;
116 #endif
117 	isc_stat_t	*counters;
118 };
119 
120 static isc_result_t
create_stats(isc_mem_t * mctx,int ncounters,isc_stats_t ** statsp)121 create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
122 	isc_stats_t *stats;
123 	isc_result_t result = ISC_R_SUCCESS;
124 
125 	REQUIRE(statsp != NULL && *statsp == NULL);
126 
127 	stats = isc_mem_get(mctx, sizeof(*stats));
128 	if (stats == NULL)
129 		return (ISC_R_NOMEMORY);
130 
131 	result = isc_mutex_init(&stats->lock);
132 	if (result != ISC_R_SUCCESS)
133 		goto clean_stats;
134 
135 	stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
136 	if (stats->counters == NULL) {
137 		result = ISC_R_NOMEMORY;
138 		goto clean_mutex;
139 	}
140 
141 #if ISC_STATS_LOCKCOUNTERS
142 	result = isc_rwlock_init(&stats->counterlock, 0, 0);
143 	if (result != ISC_R_SUCCESS)
144 		goto clean_counters;
145 #endif
146 
147 	stats->references = 1;
148 	memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
149 	stats->mctx = NULL;
150 	isc_mem_attach(mctx, &stats->mctx);
151 	stats->ncounters = ncounters;
152 	stats->magic = ISC_STATS_MAGIC;
153 
154 	*statsp = stats;
155 
156 	return (result);
157 
158 #if ISC_STATS_LOCKCOUNTERS
159 clean_counters:
160 	isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
161 #endif
162 
163 clean_mutex:
164 	DESTROYLOCK(&stats->lock);
165 
166 clean_stats:
167 	isc_mem_put(mctx, stats, sizeof(*stats));
168 
169 	return (result);
170 }
171 
172 void
isc_stats_attach(isc_stats_t * stats,isc_stats_t ** statsp)173 isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
174 	REQUIRE(ISC_STATS_VALID(stats));
175 	REQUIRE(statsp != NULL && *statsp == NULL);
176 
177 	LOCK(&stats->lock);
178 	stats->references++;
179 	UNLOCK(&stats->lock);
180 
181 	*statsp = stats;
182 }
183 
184 void
isc_stats_detach(isc_stats_t ** statsp)185 isc_stats_detach(isc_stats_t **statsp) {
186 	isc_stats_t *stats;
187 
188 	REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
189 
190 	stats = *statsp;
191 	*statsp = NULL;
192 
193 	LOCK(&stats->lock);
194 	stats->references--;
195 
196 	if (stats->references == 0) {
197 		isc_mem_put(stats->mctx, stats->counters,
198 			    sizeof(isc_stat_t) * stats->ncounters);
199 		UNLOCK(&stats->lock);
200 		DESTROYLOCK(&stats->lock);
201 #if ISC_STATS_LOCKCOUNTERS
202 		isc_rwlock_destroy(&stats->counterlock);
203 #endif
204 		isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
205 		return;
206 	}
207 
208 	UNLOCK(&stats->lock);
209 }
210 
211 int
isc_stats_ncounters(isc_stats_t * stats)212 isc_stats_ncounters(isc_stats_t *stats) {
213 	REQUIRE(ISC_STATS_VALID(stats));
214 
215 	return (stats->ncounters);
216 }
217 
218 /*
219  * Inline the code if we can use atomic operations.
220  */
221 #if defined(ISC_PLATFORM_HAVESTDATOMIC) || defined(ISC_STATS_HAVEATOMICQ) || \
222     defined(ISC_STATS_USEMULTIFIELDS)
223 static inline void
incrementcounter(isc_stats_t * stats,int counter)224 incrementcounter(isc_stats_t *stats, int counter) {
225 #if ISC_PLATFORM_HAVESTDATOMIC
226 	(void)atomic_fetch_add_explicit(&stats->counters[counter], 1,
227 					memory_order_relaxed);
228 #elif ISC_STATS_HAVEATOMICQ
229 	isc_atomic_xaddq((int64_t *)&stats->counters[counter], 1);
230 #elif ISC_STATS_USEMULTIFIELDS
231 	int32_t prev = isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, 1);
232 	/*
233 	 * If the lower 32-bit field overflows, increment the higher field.
234 	 * Note that it's *theoretically* possible that the lower field
235 	 * overlaps again before the higher field is incremented.  It doesn't
236 	 * matter, however, because we don't read the value until
237 	 * isc_stats_copy() is called where the whole process is protected
238 	 * by the write (exclusive) lock.
239 	 */
240 	if (prev == (int32_t)0xffffffff) {
241 		isc_atomic_xadd((int32_t *)&stats->counters[counter].hi, 1);
242 	}
243 #endif
244 }
245 
246 static inline void
decrementcounter(isc_stats_t * stats,int counter)247 decrementcounter(isc_stats_t *stats, int counter) {
248 #if ISC_PLATFORM_HAVESTDATOMIC
249 	(void)atomic_fetch_sub_explicit(&stats->counters[counter], 1,
250 					memory_order_relaxed);
251 #elif ISC_STATS_HAVEATOMICQ
252 	(void)isc_atomic_xaddq((int64_t *)&stats->counters[counter], -1);
253 #elif ISC_STATS_USEMULTIFIELDS
254 	int32_t prev =
255 		isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, -1);
256 	if (prev == 0) {
257 		(void)isc_atomic_xadd((int32_t *)&stats->counters[counter].hi,
258 				      -1);
259 	}
260 #endif
261 }
262 
263 static inline uint64_t
getcounter(isc_stats_t * stats,const int counter)264 getcounter(isc_stats_t *stats, const int counter) {
265 #if ISC_PLATFORM_HAVESTDATOMIC
266 	return(atomic_load_explicit(&stats->counters[counter],
267 				    memory_order_relaxed));
268 #elif ISC_STATS_HAVEATOMICQ
269 	/* use xaddq(..., 0) as an atomic load */
270 	return((uint64_t)isc_atomic_xaddq((int64_t *)&stats->counters[counter],
271 					  0));
272 #else
273 	uint64_t curr_value;
274 	curr_value = ((uint64_t)stats->counters[counter].hi << 32) |
275 			stats->counters[counter].lo;
276 	return (curr_value);
277 #endif
278 }
279 
280 static inline void
setcounter(isc_stats_t * stats,const isc_statscounter_t counter,const uint64_t value)281 setcounter(isc_stats_t *stats,
282 	   const isc_statscounter_t counter,
283 	   const uint64_t value)
284 {
285 #if ISC_PLATFORM_HAVESTDATOMIC
286 	atomic_store_explicit(&stats->counters[counter], value,
287 			      memory_order_relaxed);
288 #elif ISC_STATS_HAVEATOMICQ
289 	isc_atomic_storeq((int64_t *)&stats->counters[counter], value);
290 #else
291 # if ISC_STATS_USEMULTIFIELDS
292 	isc_atomic_store((int32_t *)&stats->counters[counter].hi,
293 			 (uint32_t)((value >> 32) & 0xffffffff));
294 	isc_atomic_store((int32_t *)&stats->counters[counter].lo,
295 			 (uint32_t)(value & 0xffffffff));
296 # endif
297 #endif
298 }
299 #else
300 ISC_NO_SANITIZE_THREAD static ISC_NO_SANITIZE_INLINE void
incrementcounter(isc_stats_t * stats,int counter)301 incrementcounter(isc_stats_t *stats, int counter) {
302 	stats->counters[counter]++;
303 }
304 
305 ISC_NO_SANITIZE_THREAD static ISC_NO_SANITIZE_INLINE void
decrementcounter(isc_stats_t * stats,int counter)306 decrementcounter(isc_stats_t *stats, int counter) {
307 	stats->counters[counter]--;
308 }
309 
310 ISC_NO_SANITIZE_THREAD static ISC_NO_SANITIZE_INLINE uint64_t
getcounter(isc_stats_t * stats,const int counter)311 getcounter(isc_stats_t *stats, const int counter) {
312 	return (stats->counters[counter]);
313 }
314 
315 ISC_NO_SANITIZE_THREAD static ISC_NO_SANITIZE_INLINE void
setcounter(isc_stats_t * stats,const isc_statscounter_t counter,const uint64_t value)316 setcounter(isc_stats_t *stats,
317 	   const isc_statscounter_t counter,
318 	   const uint64_t value)
319 {
320 	stats->counters[counter] = value;
321 }
322 #endif
323 
324 static void
copy_counters(isc_stats_t * stats,uint64_t * counters)325 copy_counters(isc_stats_t *stats, uint64_t *counters) {
326 	/*
327 	 * We use a "write" lock before "reading" the statistics counters as
328 	 * an exclusive lock.
329 	 */
330 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_write);
331 	for (isc_statscounter_t counter = 0;
332 	     counter < stats->ncounters;
333 	     counter++)
334 	{
335 		counters[counter] = getcounter(stats, counter);
336 	}
337 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_write);
338 }
339 
340 isc_result_t
isc_stats_create(isc_mem_t * mctx,isc_stats_t ** statsp,int ncounters)341 isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
342 	REQUIRE(statsp != NULL && *statsp == NULL);
343 
344 	return (create_stats(mctx, ncounters, statsp));
345 }
346 
347 void
isc_stats_increment(isc_stats_t * stats,isc_statscounter_t counter)348 isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
349 	REQUIRE(ISC_STATS_VALID(stats));
350 	REQUIRE(counter < stats->ncounters);
351 
352 	/*
353 	 * We use a "read" lock to prevent other threads from reading the
354 	 * counter while we "writing" a counter field.  The write access itself
355 	 * is protected by the atomic operation.
356 	 */
357 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_read);
358 	incrementcounter(stats, (int)counter);
359 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_read);
360 }
361 
362 void
isc_stats_decrement(isc_stats_t * stats,isc_statscounter_t counter)363 isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
364 	REQUIRE(ISC_STATS_VALID(stats));
365 	REQUIRE(counter < stats->ncounters);
366 
367 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_read);
368 	decrementcounter(stats, (int)counter);
369 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_read);
370 }
371 
372 void
isc_stats_dump(isc_stats_t * stats,isc_stats_dumper_t dump_fn,void * arg,unsigned int options)373 isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
374 	       void *arg, unsigned int options)
375 {
376 	REQUIRE(ISC_STATS_VALID(stats));
377 
378 	uint64_t *counters;
379 	bool verbose = ((options & ISC_STATSDUMP_VERBOSE) != 0);
380 
381 	counters = isc_mem_get(stats->mctx,
382 			       sizeof(uint64_t) * stats->ncounters);
383 
384 	copy_counters(stats, counters);
385 
386 	for (isc_statscounter_t counter = 0;
387 	     counter < stats->ncounters;
388 	     counter++)
389 	{
390 		if (!verbose && counters[counter] == 0)
391 		{
392 			continue;
393 		}
394 		dump_fn(counter, counters[counter], arg);
395 	}
396 
397 	isc_mem_put(stats->mctx,
398 		    counters, sizeof(isc_stat_t) * stats->ncounters);
399 }
400 
401 void
isc_stats_set(isc_stats_t * stats,uint64_t val,isc_statscounter_t counter)402 isc_stats_set(isc_stats_t *stats, uint64_t val,
403 	      isc_statscounter_t counter)
404 {
405 	REQUIRE(ISC_STATS_VALID(stats));
406 	REQUIRE(counter < stats->ncounters);
407 
408 	/*
409 	 * We use a "write" lock before "reading" the statistics counters as
410 	 * an exclusive lock.
411 	 */
412 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_write);
413 	setcounter(stats, counter, val);
414 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_write);
415 }
416 
417 void
isc_stats_update_if_greater(isc_stats_t * stats,isc_statscounter_t counter,uint64_t value)418 isc_stats_update_if_greater(isc_stats_t *stats,
419 				 isc_statscounter_t counter,
420 				 uint64_t value)
421 {
422 	REQUIRE(ISC_STATS_VALID(stats));
423 	REQUIRE(counter < stats->ncounters);
424 
425 #if ISC_PLATFORM_HAVESTDATOMIC
426 	uint64_t curr_value = atomic_load_explicit(&stats->counters[counter],
427 						   memory_order_relaxed);
428 	do {
429 		if (curr_value >= value) {
430 			break;
431 		}
432 
433 	} while (!atomic_compare_exchange_strong(&stats->counters[counter],
434 						 &curr_value,
435 						 value));
436 #else
437 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_write);
438 	uint64_t curr_value = getcounter(stats, counter);
439 	if (curr_value < value) {
440 		setcounter(stats, counter, value);
441 	}
442 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_write);
443 #endif
444 }
445 
446 uint64_t
isc_stats_get_counter(isc_stats_t * stats,isc_statscounter_t counter)447 isc_stats_get_counter(isc_stats_t *stats, isc_statscounter_t counter)
448 {
449 	REQUIRE(ISC_STATS_VALID(stats));
450 	REQUIRE(counter < stats->ncounters);
451 
452 	MAYBE_RWLOCK(&stats->counterlock, isc_rwlocktype_read);
453 	uint64_t curr_value = getcounter(stats, counter);
454 	MAYBE_RWUNLOCK(&stats->counterlock, isc_rwlocktype_read);
455 
456 	return (curr_value);
457 }
458