1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *   the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef __sctp_process_lock_h__
35 #define __sctp_process_lock_h__
36 
37 /*
38  * Need to yet define five atomic fuctions or
39  * their equivalant.
40  * - atomic_add_int(&foo, val) - add atomically the value
41  * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
42  *				      but value it was is returned.
43  * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
44  *
45  * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
46  *					        in foo if and only if
47  *					        foo is value. Returns 0
48  *					        on success.
49  */
50 
51 #ifdef SCTP_PER_SOCKET_LOCKING
52 /*
53  * per socket level locking
54  */
55 
56 #if defined(__Userspace_os_Windows)
57 /* Lock for INFO stuff */
58 #define SCTP_INP_INFO_LOCK_INIT()
59 #define SCTP_INP_INFO_RLOCK()
60 #define SCTP_INP_INFO_RUNLOCK()
61 #define SCTP_INP_INFO_WLOCK()
62 #define SCTP_INP_INFO_WUNLOCK()
63 #define SCTP_INP_INFO_LOCK_DESTROY()
64 #define SCTP_IPI_COUNT_INIT()
65 #define SCTP_IPI_COUNT_DESTROY()
66 #else
67 #define SCTP_INP_INFO_LOCK_INIT()
68 #define SCTP_INP_INFO_RLOCK()
69 #define SCTP_INP_INFO_RUNLOCK()
70 #define SCTP_INP_INFO_WLOCK()
71 #define SCTP_INP_INFO_WUNLOCK()
72 #define SCTP_INP_INFO_LOCK_DESTROY()
73 #define SCTP_IPI_COUNT_INIT()
74 #define SCTP_IPI_COUNT_DESTROY()
75 #endif
76 
77 #define SCTP_TCB_SEND_LOCK_INIT(_tcb)
78 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
79 #define SCTP_TCB_SEND_LOCK(_tcb)
80 #define SCTP_TCB_SEND_UNLOCK(_tcb)
81 
82 /* Lock for INP */
83 #define SCTP_INP_LOCK_INIT(_inp)
84 #define SCTP_INP_LOCK_DESTROY(_inp)
85 
86 #define SCTP_INP_RLOCK(_inp)
87 #define SCTP_INP_RUNLOCK(_inp)
88 #define SCTP_INP_WLOCK(_inp)
89 #define SCTP_INP_WUNLOCK(_inep)
90 #define SCTP_INP_INCR_REF(_inp)
91 #define SCTP_INP_DECR_REF(_inp)
92 
93 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
94 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
95 #define SCTP_ASOC_CREATE_LOCK(_inp)
96 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
97 
98 #define SCTP_INP_READ_INIT(_inp)
99 #define SCTP_INP_READ_DESTROY(_inp)
100 #define SCTP_INP_READ_LOCK(_inp)
101 #define SCTP_INP_READ_UNLOCK(_inp)
102 
103 /* Lock for TCB */
104 #define SCTP_TCB_LOCK_INIT(_tcb)
105 #define SCTP_TCB_LOCK_DESTROY(_tcb)
106 #define SCTP_TCB_LOCK(_tcb)
107 #define SCTP_TCB_TRYLOCK(_tcb) 1
108 #define SCTP_TCB_UNLOCK(_tcb)
109 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
110 #define SCTP_TCB_LOCK_ASSERT(_tcb)
111 
112 #else
113 /*
114  * per tcb level locking
115  */
116 #define SCTP_IPI_COUNT_INIT()
117 
118 #if defined(__Userspace_os_Windows)
119 #define SCTP_WQ_ADDR_INIT() \
120 	InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
121 #define SCTP_WQ_ADDR_DESTROY() \
122 	DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
123 #define SCTP_WQ_ADDR_LOCK() \
124 	EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
125 #define SCTP_WQ_ADDR_UNLOCK() \
126 	LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
127 
128 
129 #define SCTP_INP_INFO_LOCK_INIT() \
130 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
131 #define SCTP_INP_INFO_LOCK_DESTROY() \
132 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
133 #define SCTP_INP_INFO_RLOCK() \
134 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
135 #define SCTP_INP_INFO_TRYLOCK()	\
136 	TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
137 #define SCTP_INP_INFO_WLOCK() \
138 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
139 #define SCTP_INP_INFO_RUNLOCK() \
140 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
141 #define SCTP_INP_INFO_WUNLOCK()	\
142 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
143 
144 #define SCTP_IP_PKTLOG_INIT() \
145 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
146 #define SCTP_IP_PKTLOG_DESTROY () \
147 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
148 #define SCTP_IP_PKTLOG_LOCK() \
149 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
150 #define SCTP_IP_PKTLOG_UNLOCK() \
151 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
152 
153 /*
154  * The INP locks we will use for locking an SCTP endpoint, so for example if
155  * we want to change something at the endpoint level for example random_store
156  * or cookie secrets we lock the INP level.
157  */
158 #define SCTP_INP_READ_INIT(_inp) \
159 	InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
160 #define SCTP_INP_READ_DESTROY(_inp) \
161 	DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
162 #define SCTP_INP_READ_LOCK(_inp) \
163 	EnterCriticalSection(&(_inp)->inp_rdata_mtx)
164 #define SCTP_INP_READ_UNLOCK(_inp) \
165 	LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
166 
167 #define SCTP_INP_LOCK_INIT(_inp) \
168 	InitializeCriticalSection(&(_inp)->inp_mtx)
169 #define SCTP_INP_LOCK_DESTROY(_inp) \
170 	DeleteCriticalSection(&(_inp)->inp_mtx)
171 #ifdef SCTP_LOCK_LOGGING
172 #define SCTP_INP_RLOCK(_inp) do { 						\
173 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
174 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
175 		EnterCriticalSection(&(_inp)->inp_mtx);				\
176 } while (0)
177 #define SCTP_INP_WLOCK(_inp) do { 						\
178 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
179 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
180 	EnterCriticalSection(&(_inp)->inp_mtx);					\
181 } while (0)
182 #else
183 #define SCTP_INP_RLOCK(_inp) \
184 	EnterCriticalSection(&(_inp)->inp_mtx)
185 #define SCTP_INP_WLOCK(_inp) \
186 	EnterCriticalSection(&(_inp)->inp_mtx)
187 #endif
188 
189 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
190 	InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
191 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
192 	DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
193 #define SCTP_TCB_SEND_LOCK(_tcb) \
194 	EnterCriticalSection(&(_tcb)->tcb_send_mtx)
195 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
196 	LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
197 
198 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
199 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
200 
201 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
202 	InitializeCriticalSection(&(_inp)->inp_create_mtx)
203 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
204 	DeleteCriticalSection(&(_inp)->inp_create_mtx)
205 #ifdef SCTP_LOCK_LOGGING
206 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
207 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
208 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
209 	EnterCriticalSection(&(_inp)->inp_create_mtx);				\
210 } while (0)
211 #else
212 #define SCTP_ASOC_CREATE_LOCK(_inp) \
213 	EnterCriticalSection(&(_inp)->inp_create_mtx)
214 #endif
215 
216 #define SCTP_INP_RUNLOCK(_inp) \
217 	LeaveCriticalSection(&(_inp)->inp_mtx)
218 #define SCTP_INP_WUNLOCK(_inp) \
219 	LeaveCriticalSection(&(_inp)->inp_mtx)
220 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
221 	LeaveCriticalSection(&(_inp)->inp_create_mtx)
222 
223 /*
224  * For the majority of things (once we have found the association) we will
225  * lock the actual association mutex. This will protect all the assoiciation
226  * level queues and streams and such. We will need to lock the socket layer
227  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
228  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
229  */
230 
231 #define SCTP_TCB_LOCK_INIT(_tcb) \
232 	InitializeCriticalSection(&(_tcb)->tcb_mtx)
233 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
234 	DeleteCriticalSection(&(_tcb)->tcb_mtx)
235 #ifdef SCTP_LOCK_LOGGING
236 #define SCTP_TCB_LOCK(_tcb) do {						\
237 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
238 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
239 	EnterCriticalSection(&(_tcb)->tcb_mtx);					\
240 } while (0)
241 #else
242 #define SCTP_TCB_LOCK(_tcb) \
243 	EnterCriticalSection(&(_tcb)->tcb_mtx)
244 #endif
245 #define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
246 #define SCTP_TCB_UNLOCK(_tcb) \
247 	LeaveCriticalSection(&(_tcb)->tcb_mtx)
248 #define SCTP_TCB_LOCK_ASSERT(_tcb)
249 
250 #else /* all Userspaces except Windows */
251 #define SCTP_WQ_ADDR_INIT() \
252 	(void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
253 #define SCTP_WQ_ADDR_DESTROY() \
254 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
255 #ifdef INVARIANTS
256 #define SCTP_WQ_ADDR_LOCK() \
257 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx already locked", __func__))
258 #define SCTP_WQ_ADDR_UNLOCK() \
259 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx not locked", __func__))
260 #else
261 #define SCTP_WQ_ADDR_LOCK() \
262 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
263 #define SCTP_WQ_ADDR_UNLOCK() \
264 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
265 #endif
266 
267 #define SCTP_INP_INFO_LOCK_INIT() \
268 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(mtx_attr))
269 #define SCTP_INP_INFO_LOCK_DESTROY() \
270 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
271 #ifdef INVARIANTS
272 #define SCTP_INP_INFO_RLOCK() \
273 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
274 #define SCTP_INP_INFO_WLOCK() \
275 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
276 #define SCTP_INP_INFO_RUNLOCK() \
277 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
278 #define SCTP_INP_INFO_WUNLOCK() \
279 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
280 #else
281 #define SCTP_INP_INFO_RLOCK() \
282 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
283 #define SCTP_INP_INFO_WLOCK() \
284 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
285 #define SCTP_INP_INFO_RUNLOCK() \
286 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
287 #define SCTP_INP_INFO_WUNLOCK() \
288 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
289 #endif
290 #define SCTP_INP_INFO_TRYLOCK() \
291 	(!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
292 
293 #define SCTP_IP_PKTLOG_INIT() \
294 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
295 #define SCTP_IP_PKTLOG_DESTROY() \
296 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
297 #ifdef INVARIANTS
298 #define SCTP_IP_PKTLOG_LOCK() \
299 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx already locked", __func__))
300 #define SCTP_IP_PKTLOG_UNLOCK() \
301 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx not locked", __func__))
302 #else
303 #define SCTP_IP_PKTLOG_LOCK() \
304 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
305 #define SCTP_IP_PKTLOG_UNLOCK() \
306 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
307 #endif
308 
309 
310 /*
311  * The INP locks we will use for locking an SCTP endpoint, so for example if
312  * we want to change something at the endpoint level for example random_store
313  * or cookie secrets we lock the INP level.
314  */
315 #define SCTP_INP_READ_INIT(_inp) \
316 	(void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
317 #define SCTP_INP_READ_DESTROY(_inp) \
318 	(void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
319 #ifdef INVARIANTS
320 #define SCTP_INP_READ_LOCK(_inp) \
321 	KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx already locked", __func__))
322 #define SCTP_INP_READ_UNLOCK(_inp) \
323 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx not locked", __func__))
324 #else
325 #define SCTP_INP_READ_LOCK(_inp) \
326 	(void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
327 #define SCTP_INP_READ_UNLOCK(_inp) \
328 	(void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
329 #endif
330 
331 #define SCTP_INP_LOCK_INIT(_inp) \
332 	(void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
333 #define SCTP_INP_LOCK_DESTROY(_inp) \
334 	(void)pthread_mutex_destroy(&(_inp)->inp_mtx)
335 #ifdef INVARIANTS
336 #ifdef SCTP_LOCK_LOGGING
337 #define SCTP_INP_RLOCK(_inp) do {									\
338 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)				\
339 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);						\
340 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))	\
341 } while (0)
342 #define SCTP_INP_WLOCK(_inp) do {									\
343 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)				\
344 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);						\
345 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
346 } while (0)
347 #else
348 #define SCTP_INP_RLOCK(_inp) \
349 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
350 #define SCTP_INP_WLOCK(_inp) \
351 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
352 #endif
353 #define SCTP_INP_RUNLOCK(_inp) \
354 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
355 #define SCTP_INP_WUNLOCK(_inp) \
356 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
357 #else
358 #ifdef SCTP_LOCK_LOGGING
359 #define SCTP_INP_RLOCK(_inp) do {						\
360 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
361 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
362 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
363 } while (0)
364 #define SCTP_INP_WLOCK(_inp) do {						\
365 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
366 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
367 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
368 } while (0)
369 #else
370 #define SCTP_INP_RLOCK(_inp) \
371 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
372 #define SCTP_INP_WLOCK(_inp) \
373 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
374 #endif
375 #define SCTP_INP_RUNLOCK(_inp) \
376 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
377 #define SCTP_INP_WUNLOCK(_inp) \
378 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
379 #endif
380 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
381 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
382 
383 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
384 	(void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, &SCTP_BASE_VAR(mtx_attr))
385 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
386 	(void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
387 #ifdef INVARIANTS
388 #define SCTP_TCB_SEND_LOCK(_tcb) \
389 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx already locked", __func__))
390 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
391 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx not locked", __func__))
392 #else
393 #define SCTP_TCB_SEND_LOCK(_tcb) \
394 	(void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx)
395 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
396 	(void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
397 #endif
398 
399 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
400 	(void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
401 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
402 	(void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
403 #ifdef INVARIANTS
404 #ifdef SCTP_LOCK_LOGGING
405 #define SCTP_ASOC_CREATE_LOCK(_inp) do {										\
406 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
407 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);							\
408 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))	\
409 } while (0)
410 #else
411 #define SCTP_ASOC_CREATE_LOCK(_inp) \
412 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))
413 #endif
414 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
415 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx not locked", __func__))
416 #else
417 #ifdef SCTP_LOCK_LOGGING
418 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
419 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
420 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
421 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);			\
422 } while (0)
423 #else
424 #define SCTP_ASOC_CREATE_LOCK(_inp) \
425 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
426 #endif
427 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
428 	(void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
429 #endif
430 /*
431  * For the majority of things (once we have found the association) we will
432  * lock the actual association mutex. This will protect all the assoiciation
433  * level queues and streams and such. We will need to lock the socket layer
434  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
435  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
436  */
437 
438 #define SCTP_TCB_LOCK_INIT(_tcb) \
439 	(void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
440 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
441 	(void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
442 #ifdef INVARIANTS
443 #ifdef SCTP_LOCK_LOGGING
444 #define SCTP_TCB_LOCK(_tcb) do {									\
445 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 				\
446 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);					\
447 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))	\
448 } while (0)
449 #else
450 #define SCTP_TCB_LOCK(_tcb) \
451 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))
452 #endif
453 #define SCTP_TCB_UNLOCK(_tcb) \
454 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx not locked", __func__))
455 #else
456 #ifdef SCTP_LOCK_LOGGING
457 #define SCTP_TCB_LOCK(_tcb) do {						\
458 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 	\
459 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
460 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);				\
461 } while (0)
462 #else
463 #define SCTP_TCB_LOCK(_tcb) \
464 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
465 #endif
466 #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
467 #endif
468 #define SCTP_TCB_LOCK_ASSERT(_tcb) \
469 	KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s: tcb_mtx not locked", __func__))
470 #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
471 #endif
472 
473 #endif /* SCTP_PER_SOCKET_LOCKING */
474 
475 
476 /*
477  * common locks
478  */
479 
480 /* copied over to compile */
481 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
482 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
483 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
484 
485 /* socket locks */
486 
487 #if defined(__Userspace_os_Windows)
488 #define SOCKBUF_LOCK_ASSERT(_so_buf)
489 #define SOCKBUF_LOCK(_so_buf) \
490 	EnterCriticalSection(&(_so_buf)->sb_mtx)
491 #define SOCKBUF_UNLOCK(_so_buf) \
492 	LeaveCriticalSection(&(_so_buf)->sb_mtx)
493 #define SOCK_LOCK(_so) \
494 	SOCKBUF_LOCK(&(_so)->so_rcv)
495 #define SOCK_UNLOCK(_so) \
496 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
497 #else
498 #define SOCKBUF_LOCK_ASSERT(_so_buf) \
499 	KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
500 #ifdef INVARIANTS
501 #define SOCKBUF_LOCK(_so_buf) \
502 	KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx already locked", __func__))
503 #define SOCKBUF_UNLOCK(_so_buf) \
504 	KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx not locked", __func__))
505 #else
506 #define SOCKBUF_LOCK(_so_buf) \
507 	pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
508 #define SOCKBUF_UNLOCK(_so_buf) \
509 	pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
510 #endif
511 #define SOCK_LOCK(_so) \
512 	SOCKBUF_LOCK(&(_so)->so_rcv)
513 #define SOCK_UNLOCK(_so) \
514 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
515 #endif
516 
517 #define SCTP_STATLOG_INIT_LOCK()
518 #define SCTP_STATLOG_LOCK()
519 #define SCTP_STATLOG_UNLOCK()
520 #define SCTP_STATLOG_DESTROY()
521 
522 #if defined(__Userspace_os_Windows)
523 /* address list locks */
524 #define SCTP_IPI_ADDR_INIT() \
525 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
526 #define SCTP_IPI_ADDR_DESTROY() \
527 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
528 #define SCTP_IPI_ADDR_RLOCK() \
529 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
530 #define SCTP_IPI_ADDR_RUNLOCK() \
531 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
532 #define SCTP_IPI_ADDR_WLOCK() \
533 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
534 #define SCTP_IPI_ADDR_WUNLOCK() \
535 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
536 
537 
538 /* iterator locks */
539 #define SCTP_ITERATOR_LOCK_INIT() \
540 	InitializeCriticalSection(&sctp_it_ctl.it_mtx)
541 #define SCTP_ITERATOR_LOCK_DESTROY() \
542 	DeleteCriticalSection(&sctp_it_ctl.it_mtx)
543 #define SCTP_ITERATOR_LOCK() \
544 		EnterCriticalSection(&sctp_it_ctl.it_mtx)
545 #define SCTP_ITERATOR_UNLOCK() \
546 	LeaveCriticalSection(&sctp_it_ctl.it_mtx)
547 
548 #define SCTP_IPI_ITERATOR_WQ_INIT() \
549 	InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
550 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
551 	DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
552 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
553 	EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
554 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
555 	LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
556 
557 #else /* end of __Userspace_os_Windows */
558 /* address list locks */
559 #define SCTP_IPI_ADDR_INIT() \
560 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
561 #define SCTP_IPI_ADDR_DESTROY() \
562 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
563 #ifdef INVARIANTS
564 #define SCTP_IPI_ADDR_RLOCK() \
565 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
566 #define SCTP_IPI_ADDR_RUNLOCK() \
567 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
568 #define SCTP_IPI_ADDR_WLOCK() \
569 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
570 #define SCTP_IPI_ADDR_WUNLOCK() \
571 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
572 #else
573 #define SCTP_IPI_ADDR_RLOCK() \
574 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
575 #define SCTP_IPI_ADDR_RUNLOCK() \
576 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
577 #define SCTP_IPI_ADDR_WLOCK() \
578 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
579 #define SCTP_IPI_ADDR_WUNLOCK() \
580 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
581 #endif
582 
583 /* iterator locks */
584 #define SCTP_ITERATOR_LOCK_INIT() \
585 	(void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
586 #define SCTP_ITERATOR_LOCK_DESTROY() \
587 	(void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
588 #ifdef INVARIANTS
589 #define SCTP_ITERATOR_LOCK() \
590 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx already locked", __func__))
591 #define SCTP_ITERATOR_UNLOCK() \
592 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx not locked", __func__))
593 #else
594 #define SCTP_ITERATOR_LOCK() \
595 	(void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
596 #define SCTP_ITERATOR_UNLOCK() \
597 	(void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
598 #endif
599 
600 #define SCTP_IPI_ITERATOR_WQ_INIT() \
601 	(void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
602 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
603 	(void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
604 #ifdef INVARIANTS
605 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
606 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx already locked", __func__))
607 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
608 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx not locked", __func__))
609 #else
610 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
611 	(void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
612 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
613 	(void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
614 #endif
615 #endif
616 
617 #define SCTP_INCR_EP_COUNT() \
618 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
619 
620 #define SCTP_DECR_EP_COUNT() \
621 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
622 
623 #define SCTP_INCR_ASOC_COUNT() \
624 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
625 
626 #define SCTP_DECR_ASOC_COUNT() \
627 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
628 
629 #define SCTP_INCR_LADDR_COUNT() \
630 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
631 
632 #define SCTP_DECR_LADDR_COUNT() \
633 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
634 
635 #define SCTP_INCR_RADDR_COUNT() \
636 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
637 
638 #define SCTP_DECR_RADDR_COUNT() \
639 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
640 
641 #define SCTP_INCR_CHK_COUNT() \
642 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
643 
644 #define SCTP_DECR_CHK_COUNT() \
645 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
646 
647 #define SCTP_INCR_READQ_COUNT() \
648 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
649 
650 #define SCTP_DECR_READQ_COUNT() \
651 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
652 
653 #define SCTP_INCR_STRMOQ_COUNT() \
654 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
655 
656 #define SCTP_DECR_STRMOQ_COUNT() \
657 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
658 
659 #endif
660