xref: /netbsd/sys/external/bsd/drm2/include/linux/kref.h (revision e34df6c4)
1 /*	$NetBSD: kref.h,v 1.14 2023/02/24 11:02:06 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_KREF_H_
33 #define _LINUX_KREF_H_
34 
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/systm.h>
38 
39 #include <linux/atomic.h>
40 #include <linux/refcount.h>
41 #include <linux/mutex.h>
42 #include <linux/spinlock.h>
43 
44 struct kref {
45 	unsigned int kr_count;
46 };
47 
48 static inline void
kref_init(struct kref * kref)49 kref_init(struct kref *kref)
50 {
51 	atomic_store_relaxed(&kref->kr_count, 1);
52 }
53 
54 static inline void
kref_get(struct kref * kref)55 kref_get(struct kref *kref)
56 {
57 	const unsigned int count __unused =
58 	    atomic_inc_uint_nv(&kref->kr_count);
59 
60 	KASSERTMSG((count > 1), "getting released kref");
61 }
62 
63 static inline bool
kref_get_unless_zero(struct kref * kref)64 kref_get_unless_zero(struct kref *kref)
65 {
66 	unsigned count;
67 
68 	do {
69 		count = atomic_load_relaxed(&kref->kr_count);
70 		if ((count == 0) || (count == UINT_MAX))
71 			return false;
72 	} while (atomic_cas_uint(&kref->kr_count, count, (count + 1)) !=
73 	    count);
74 
75 	return true;
76 }
77 
78 static inline int
kref_sub(struct kref * kref,unsigned int count,void (* release)(struct kref *))79 kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *))
80 {
81 	unsigned int old, new;
82 
83 	membar_release();
84 
85 	do {
86 		old = atomic_load_relaxed(&kref->kr_count);
87 		KASSERTMSG((count <= old), "overreleasing kref: %u - %u",
88 		    old, count);
89 		new = (old - count);
90 	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
91 
92 	if (new == 0) {
93 		membar_acquire();
94 		(*release)(kref);
95 		return 1;
96 	}
97 
98 	return 0;
99 }
100 
101 static inline int
kref_put_lock(struct kref * kref,void (* release)(struct kref *),spinlock_t * interlock)102 kref_put_lock(struct kref *kref, void (*release)(struct kref *),
103     spinlock_t *interlock)
104 {
105 	unsigned int old, new;
106 
107 	membar_release();
108 
109 	do {
110 		old = atomic_load_relaxed(&kref->kr_count);
111 		KASSERT(old > 0);
112 		if (old == 1) {
113 			spin_lock(interlock);
114 			if (atomic_add_int_nv(&kref->kr_count, -1) == 0) {
115 				membar_acquire();
116 				(*release)(kref);
117 				return 1;
118 			}
119 			spin_unlock(interlock);
120 			return 0;
121 		}
122 		new = (old - 1);
123 	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
124 
125 	return 0;
126 }
127 
128 static inline int
kref_put(struct kref * kref,void (* release)(struct kref *))129 kref_put(struct kref *kref, void (*release)(struct kref *))
130 {
131 
132 	return kref_sub(kref, 1, release);
133 }
134 
135 static inline int
kref_put_mutex(struct kref * kref,void (* release)(struct kref *),struct mutex * interlock)136 kref_put_mutex(struct kref *kref, void (*release)(struct kref *),
137     struct mutex *interlock)
138 {
139 	unsigned int old, new;
140 
141 	membar_release();
142 
143 	do {
144 		old = atomic_load_relaxed(&kref->kr_count);
145 		KASSERT(old > 0);
146 		if (old == 1) {
147 			mutex_lock(interlock);
148 			if (atomic_add_int_nv(&kref->kr_count, -1) == 0) {
149 				membar_acquire();
150 				(*release)(kref);
151 				return 1;
152 			}
153 			mutex_unlock(interlock);
154 			return 0;
155 		}
156 		new = (old - 1);
157 	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
158 
159 	return 0;
160 }
161 
162 static inline unsigned
kref_read(const struct kref * kref)163 kref_read(const struct kref *kref)
164 {
165 
166 	return atomic_load_relaxed(&kref->kr_count);
167 }
168 
169 /*
170  * Not native to Linux.  Mostly used for assertions...
171  */
172 
173 static inline bool
kref_referenced_p(struct kref * kref)174 kref_referenced_p(struct kref *kref)
175 {
176 
177 	return (0 < kref->kr_count);
178 }
179 
180 static inline bool
kref_exclusive_p(struct kref * kref)181 kref_exclusive_p(struct kref *kref)
182 {
183 
184 	KASSERT(0 < kref->kr_count);
185 	return (kref->kr_count == 1);
186 }
187 
188 #endif  /* _LINUX_KREF_H_ */
189