1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #ifndef __SYS_REFCOUNT_H__ 31 #define __SYS_REFCOUNT_H__ 32 33 #include <machine/atomic.h> 34 35 #ifdef _KERNEL 36 #include <sys/systm.h> 37 #else 38 #include <stdbool.h> 39 #define KASSERT(exp, msg) /* */ 40 #endif 41 42 #define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0) 43 #define REFCOUNT_SATURATION_VALUE (3U << 30) 44 45 /* 46 * Attempt to handle reference count overflow and underflow. Force the counter 47 * to stay at the saturation value so that a counter overflow cannot trigger 48 * destruction of the containing object and instead leads to a less harmful 49 * memory leak. 50 */ 51 static __inline void 52 _refcount_update_saturated(volatile u_int *count) 53 { 54 #ifdef INVARIANTS 55 panic("refcount %p wraparound", count); 56 #else 57 atomic_store_int(count, REFCOUNT_SATURATION_VALUE); 58 #endif 59 } 60 61 static __inline void 62 refcount_init(volatile u_int *count, u_int value) 63 { 64 KASSERT(!REFCOUNT_SATURATED(value), 65 ("invalid initial refcount value %u", value)); 66 *count = value; 67 } 68 69 static __inline u_int 70 refcount_acquire(volatile u_int *count) 71 { 72 u_int old; 73 74 old = atomic_fetchadd_int(count, 1); 75 if (__predict_false(REFCOUNT_SATURATED(old))) 76 _refcount_update_saturated(count); 77 78 return (old); 79 } 80 81 static __inline u_int 82 refcount_acquiren(volatile u_int *count, u_int n) 83 { 84 u_int old; 85 86 KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, 87 ("refcount_acquiren: n=%u too large", n)); 88 old = atomic_fetchadd_int(count, n); 89 if (__predict_false(REFCOUNT_SATURATED(old))) 90 _refcount_update_saturated(count); 91 92 return (old); 93 } 94 95 static __inline __result_use_check bool 96 refcount_acquire_checked(volatile u_int *count) 97 { 98 u_int lcount; 99 100 for (lcount = *count;;) { 101 if (__predict_false(REFCOUNT_SATURATED(lcount + 1))) 102 return (false); 103 if (__predict_true(atomic_fcmpset_int(count, &lcount, 104 lcount + 1) == 1)) 105 return (true); 106 } 107 } 108 109 /* 110 * This functions returns non-zero if the refcount was 111 * incremented. Else zero is returned. 112 */ 113 static __inline __result_use_check bool 114 refcount_acquire_if_gt(volatile u_int *count, u_int n) 115 { 116 u_int old; 117 118 old = *count; 119 for (;;) { 120 if (old <= n) 121 return (false); 122 if (__predict_false(REFCOUNT_SATURATED(old))) 123 return (true); 124 if (atomic_fcmpset_int(count, &old, old + 1)) 125 return (true); 126 } 127 } 128 129 static __inline __result_use_check bool 130 refcount_acquire_if_not_zero(volatile u_int *count) 131 { 132 133 return (refcount_acquire_if_gt(count, 0)); 134 } 135 136 static __inline bool 137 refcount_releasen(volatile u_int *count, u_int n) 138 { 139 u_int old; 140 141 KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, 142 ("refcount_releasen: n=%u too large", n)); 143 144 atomic_thread_fence_rel(); 145 old = atomic_fetchadd_int(count, -n); 146 if (__predict_false(old < n || REFCOUNT_SATURATED(old))) { 147 _refcount_update_saturated(count); 148 return (false); 149 } 150 if (old > n) 151 return (false); 152 153 /* 154 * Last reference. Signal the user to call the destructor. 155 * 156 * Ensure that the destructor sees all updates. This synchronizes with 157 * release fences from all routines which drop the count. 158 */ 159 atomic_thread_fence_acq(); 160 return (true); 161 } 162 163 static __inline bool 164 refcount_release(volatile u_int *count) 165 { 166 167 return (refcount_releasen(count, 1)); 168 } 169 170 static __inline __result_use_check bool 171 refcount_release_if_gt(volatile u_int *count, u_int n) 172 { 173 u_int old; 174 175 KASSERT(n > 0, 176 ("refcount_release_if_gt: Use refcount_release for final ref")); 177 old = *count; 178 for (;;) { 179 if (old <= n) 180 return (false); 181 if (__predict_false(REFCOUNT_SATURATED(old))) 182 return (true); 183 /* 184 * Paired with acquire fence in refcount_releasen(). 185 */ 186 if (atomic_fcmpset_rel_int(count, &old, old - 1)) 187 return (true); 188 } 189 } 190 191 static __inline __result_use_check bool 192 refcount_release_if_not_last(volatile u_int *count) 193 { 194 195 return (refcount_release_if_gt(count, 1)); 196 } 197 198 #endif /* !__SYS_REFCOUNT_H__ */ 199