xref: /freebsd/sys/sys/refcount.h (revision e8900461)
1d6fe50b6SJohn Baldwin /*-
2c4e20cadSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3c4e20cadSPedro F. Giffuni  *
4d6fe50b6SJohn Baldwin  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5d6fe50b6SJohn Baldwin  *
6d6fe50b6SJohn Baldwin  * Redistribution and use in source and binary forms, with or without
7d6fe50b6SJohn Baldwin  * modification, are permitted provided that the following conditions
8d6fe50b6SJohn Baldwin  * are met:
9d6fe50b6SJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
10d6fe50b6SJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
11d6fe50b6SJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
12d6fe50b6SJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
13d6fe50b6SJohn Baldwin  *    documentation and/or other materials provided with the distribution.
14d6fe50b6SJohn Baldwin  *
15d6fe50b6SJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16d6fe50b6SJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17d6fe50b6SJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18d6fe50b6SJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19d6fe50b6SJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20d6fe50b6SJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21d6fe50b6SJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22d6fe50b6SJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23d6fe50b6SJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24d6fe50b6SJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25d6fe50b6SJohn Baldwin  * SUCH DAMAGE.
26d6fe50b6SJohn Baldwin  *
27d6fe50b6SJohn Baldwin  * $FreeBSD$
28d6fe50b6SJohn Baldwin  */
29d6fe50b6SJohn Baldwin 
30d6fe50b6SJohn Baldwin #ifndef __SYS_REFCOUNT_H__
31d6fe50b6SJohn Baldwin #define __SYS_REFCOUNT_H__
32d6fe50b6SJohn Baldwin 
3375f31a5fSDag-Erling Smørgrav #include <machine/atomic.h>
3475f31a5fSDag-Erling Smørgrav 
3575f31a5fSDag-Erling Smørgrav #ifdef _KERNEL
3675f31a5fSDag-Erling Smørgrav #include <sys/systm.h>
3775f31a5fSDag-Erling Smørgrav #else
3810040398SKonstantin Belousov #include <stdbool.h>
3975f31a5fSDag-Erling Smørgrav #define	KASSERT(exp, msg)	/* */
4075f31a5fSDag-Erling Smørgrav #endif
4175f31a5fSDag-Erling Smørgrav 
42c99d0c58SMark Johnston #define	REFCOUNT_SATURATED(val)		(((val) & (1U << 31)) != 0)
43c99d0c58SMark Johnston #define	REFCOUNT_SATURATION_VALUE	(3U << 30)
440b21d894SMark Johnston 
450b21d894SMark Johnston /*
460b21d894SMark Johnston  * Attempt to handle reference count overflow and underflow.  Force the counter
470b21d894SMark Johnston  * to stay at the saturation value so that a counter overflow cannot trigger
480b21d894SMark Johnston  * destruction of the containing object and instead leads to a less harmful
490b21d894SMark Johnston  * memory leak.
500b21d894SMark Johnston  */
510b21d894SMark Johnston static __inline void
520b21d894SMark Johnston _refcount_update_saturated(volatile u_int *count)
530b21d894SMark Johnston {
540b21d894SMark Johnston #ifdef INVARIANTS
550b21d894SMark Johnston 	panic("refcount %p wraparound", count);
560b21d894SMark Johnston #else
570b21d894SMark Johnston 	atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
580b21d894SMark Johnston #endif
590b21d894SMark Johnston }
600b21d894SMark Johnston 
61d6fe50b6SJohn Baldwin static __inline void
62d6fe50b6SJohn Baldwin refcount_init(volatile u_int *count, u_int value)
63d6fe50b6SJohn Baldwin {
640b21d894SMark Johnston 	KASSERT(!REFCOUNT_SATURATED(value),
650b21d894SMark Johnston 	    ("invalid initial refcount value %u", value));
661a297ee5SMateusz Guzik 	atomic_store_int(count, value);
67d6fe50b6SJohn Baldwin }
68d6fe50b6SJohn Baldwin 
69a67d5408SJeff Roberson static __inline u_int
70e8900461SMark Johnston refcount_load(volatile u_int *count)
71e8900461SMark Johnston {
72e8900461SMark Johnston 	return (atomic_load_int(count));
73e8900461SMark Johnston }
74e8900461SMark Johnston 
75e8900461SMark Johnston static __inline u_int
76d6fe50b6SJohn Baldwin refcount_acquire(volatile u_int *count)
77d6fe50b6SJohn Baldwin {
780b21d894SMark Johnston 	u_int old;
79d6fe50b6SJohn Baldwin 
800b21d894SMark Johnston 	old = atomic_fetchadd_int(count, 1);
810b21d894SMark Johnston 	if (__predict_false(REFCOUNT_SATURATED(old)))
820b21d894SMark Johnston 		_refcount_update_saturated(count);
83a67d5408SJeff Roberson 
84a67d5408SJeff Roberson 	return (old);
85d6fe50b6SJohn Baldwin }
86d6fe50b6SJohn Baldwin 
87a67d5408SJeff Roberson static __inline u_int
8833205c60SJeff Roberson refcount_acquiren(volatile u_int *count, u_int n)
8933205c60SJeff Roberson {
9033205c60SJeff Roberson 	u_int old;
9133205c60SJeff Roberson 
9233205c60SJeff Roberson 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
9340617291SHans Petter Selasky 	    ("refcount_acquiren: n=%u too large", n));
9433205c60SJeff Roberson 	old = atomic_fetchadd_int(count, n);
9533205c60SJeff Roberson 	if (__predict_false(REFCOUNT_SATURATED(old)))
9633205c60SJeff Roberson 		_refcount_update_saturated(count);
97a67d5408SJeff Roberson 
98a67d5408SJeff Roberson 	return (old);
9933205c60SJeff Roberson }
10033205c60SJeff Roberson 
101f1cf2b9dSKonstantin Belousov static __inline __result_use_check bool
102f1cf2b9dSKonstantin Belousov refcount_acquire_checked(volatile u_int *count)
103f1cf2b9dSKonstantin Belousov {
1041a297ee5SMateusz Guzik 	u_int old;
105f1cf2b9dSKonstantin Belousov 
1061a297ee5SMateusz Guzik 	old = atomic_load_int(count);
1071a297ee5SMateusz Guzik 	for (;;) {
1081a297ee5SMateusz Guzik 		if (__predict_false(REFCOUNT_SATURATED(old + 1)))
109f1cf2b9dSKonstantin Belousov 			return (false);
1101a297ee5SMateusz Guzik 		if (__predict_true(atomic_fcmpset_int(count, &old,
1111a297ee5SMateusz Guzik 		    old + 1) == 1))
112f1cf2b9dSKonstantin Belousov 			return (true);
113f1cf2b9dSKonstantin Belousov 	}
114f1cf2b9dSKonstantin Belousov }
115f1cf2b9dSKonstantin Belousov 
116f4043145SAndriy Gapon /*
117127a9d73SHans Petter Selasky  * This functions returns non-zero if the refcount was
118127a9d73SHans Petter Selasky  * incremented. Else zero is returned.
119f4043145SAndriy Gapon  */
12013ff4eb1SKonstantin Belousov static __inline __result_use_check bool
121a67d5408SJeff Roberson refcount_acquire_if_gt(volatile u_int *count, u_int n)
122f4043145SAndriy Gapon {
123f4043145SAndriy Gapon 	u_int old;
124f4043145SAndriy Gapon 
1251a297ee5SMateusz Guzik 	old = atomic_load_int(count);
126f4043145SAndriy Gapon 	for (;;) {
127c99d0c58SMark Johnston 		if (old <= n)
12813ff4eb1SKonstantin Belousov 			return (false);
1290b21d894SMark Johnston 		if (__predict_false(REFCOUNT_SATURATED(old)))
1300b21d894SMark Johnston 			return (true);
131f4043145SAndriy Gapon 		if (atomic_fcmpset_int(count, &old, old + 1))
13213ff4eb1SKonstantin Belousov 			return (true);
133f4043145SAndriy Gapon 	}
134f4043145SAndriy Gapon }
135f4043145SAndriy Gapon 
13613ff4eb1SKonstantin Belousov static __inline __result_use_check bool
137a67d5408SJeff Roberson refcount_acquire_if_not_zero(volatile u_int *count)
138f4043145SAndriy Gapon {
139f4043145SAndriy Gapon 
140c99d0c58SMark Johnston 	return (refcount_acquire_if_gt(count, 0));
141c99d0c58SMark Johnston }
142c99d0c58SMark Johnston 
143c99d0c58SMark Johnston static __inline bool
144c99d0c58SMark Johnston refcount_releasen(volatile u_int *count, u_int n)
145c99d0c58SMark Johnston {
146c99d0c58SMark Johnston 	u_int old;
147c99d0c58SMark Johnston 
148c99d0c58SMark Johnston 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
149c99d0c58SMark Johnston 	    ("refcount_releasen: n=%u too large", n));
150c99d0c58SMark Johnston 
151c99d0c58SMark Johnston 	atomic_thread_fence_rel();
152c99d0c58SMark Johnston 	old = atomic_fetchadd_int(count, -n);
153c99d0c58SMark Johnston 	if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
154c99d0c58SMark Johnston 		_refcount_update_saturated(count);
155c99d0c58SMark Johnston 		return (false);
156c99d0c58SMark Johnston 	}
157c99d0c58SMark Johnston 	if (old > n)
158c99d0c58SMark Johnston 		return (false);
159c99d0c58SMark Johnston 
160c99d0c58SMark Johnston 	/*
161c99d0c58SMark Johnston 	 * Last reference.  Signal the user to call the destructor.
162c99d0c58SMark Johnston 	 *
163c99d0c58SMark Johnston 	 * Ensure that the destructor sees all updates. This synchronizes with
164c99d0c58SMark Johnston 	 * release fences from all routines which drop the count.
165c99d0c58SMark Johnston 	 */
166c99d0c58SMark Johnston 	atomic_thread_fence_acq();
167c99d0c58SMark Johnston 	return (true);
168c99d0c58SMark Johnston }
169c99d0c58SMark Johnston 
170c99d0c58SMark Johnston static __inline bool
171c99d0c58SMark Johnston refcount_release(volatile u_int *count)
172c99d0c58SMark Johnston {
173c99d0c58SMark Johnston 
174c99d0c58SMark Johnston 	return (refcount_releasen(count, 1));
175f4043145SAndriy Gapon }
176f4043145SAndriy Gapon 
177e8900461SMark Johnston #define	_refcount_release_if_cond(cond, name)				\
178e8900461SMark Johnston static __inline __result_use_check bool					\
179e8900461SMark Johnston _refcount_release_if_##name(volatile u_int *count, u_int n)		\
180e8900461SMark Johnston {									\
181e8900461SMark Johnston 	u_int old;							\
182e8900461SMark Johnston 									\
183e8900461SMark Johnston 	KASSERT(n > 0, ("%s: zero increment", __func__));		\
184e8900461SMark Johnston 	old = atomic_load_int(count);					\
185e8900461SMark Johnston 	for (;;) {							\
186e8900461SMark Johnston 		if (!(cond))						\
187e8900461SMark Johnston 			return (false);					\
188e8900461SMark Johnston 		if (__predict_false(REFCOUNT_SATURATED(old)))		\
189e8900461SMark Johnston 			return (false);					\
190e8900461SMark Johnston 		if (atomic_fcmpset_rel_int(count, &old, old - 1))	\
191e8900461SMark Johnston 			return (true);					\
192e8900461SMark Johnston 	}								\
193e8900461SMark Johnston }
194e8900461SMark Johnston _refcount_release_if_cond(old > n, gt)
195e8900461SMark Johnston _refcount_release_if_cond(old == n, eq)
196e8900461SMark Johnston 
19751df5321SJeff Roberson static __inline __result_use_check bool
19851df5321SJeff Roberson refcount_release_if_gt(volatile u_int *count, u_int n)
19951df5321SJeff Roberson {
20051df5321SJeff Roberson 
201e8900461SMark Johnston 	return (_refcount_release_if_gt(count, n));
202e8900461SMark Johnston }
203e8900461SMark Johnston 
204e8900461SMark Johnston static __inline __result_use_check bool
205e8900461SMark Johnston refcount_release_if_last(volatile u_int *count)
206e8900461SMark Johnston {
207e8900461SMark Johnston 
208e8900461SMark Johnston 	if (_refcount_release_if_eq(count, 1)) {
209e8900461SMark Johnston 		/* See the comment in refcount_releasen(). */
210e8900461SMark Johnston 		atomic_thread_fence_acq();
21151df5321SJeff Roberson 		return (true);
21251df5321SJeff Roberson 	}
213e8900461SMark Johnston 	return (false);
21451df5321SJeff Roberson }
21551df5321SJeff Roberson 
216a67d5408SJeff Roberson static __inline __result_use_check bool
217a67d5408SJeff Roberson refcount_release_if_not_last(volatile u_int *count)
218a67d5408SJeff Roberson {
219a67d5408SJeff Roberson 
220e8900461SMark Johnston 	return (_refcount_release_if_gt(count, 1));
221a67d5408SJeff Roberson }
222c99d0c58SMark Johnston 
223d6fe50b6SJohn Baldwin #endif /* !__SYS_REFCOUNT_H__ */
224