1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *   the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef __sctp_process_lock_h__
35 #define __sctp_process_lock_h__
36 
37 /*
38  * Need to yet define five atomic fuctions or
39  * their equivalant.
40  * - atomic_add_int(&foo, val) - add atomically the value
41  * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
42  *				      but value it was is returned.
43  * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
44  *
45  * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
46  *					        in foo if and only if
47  *					        foo is value. Returns 0
48  *					        on success.
49  */
50 
51 #ifdef SCTP_PER_SOCKET_LOCKING
52 /*
53  * per socket level locking
54  */
55 
56 #if defined(_WIN32)
57 /* Lock for INFO stuff */
58 #define SCTP_INP_INFO_LOCK_INIT()
59 #define SCTP_INP_INFO_RLOCK()
60 #define SCTP_INP_INFO_RUNLOCK()
61 #define SCTP_INP_INFO_WLOCK()
62 #define SCTP_INP_INFO_WUNLOCK()
63 #define SCTP_INP_INFO_LOCK_ASSERT()
64 #define SCTP_INP_INFO_RLOCK_ASSERT()
65 #define SCTP_INP_INFO_WLOCK_ASSERT()
66 #define SCTP_INP_INFO_LOCK_DESTROY()
67 #define SCTP_IPI_COUNT_INIT()
68 #define SCTP_IPI_COUNT_DESTROY()
69 #else
70 #define SCTP_INP_INFO_LOCK_INIT()
71 #define SCTP_INP_INFO_RLOCK()
72 #define SCTP_INP_INFO_RUNLOCK()
73 #define SCTP_INP_INFO_WLOCK()
74 #define SCTP_INP_INFO_WUNLOCK()
75 #define SCTP_INP_INFO_LOCK_ASSERT()
76 #define SCTP_INP_INFO_RLOCK_ASSERT()
77 #define SCTP_INP_INFO_WLOCK_ASSERT()
78 #define SCTP_INP_INFO_LOCK_DESTROY()
79 #define SCTP_IPI_COUNT_INIT()
80 #define SCTP_IPI_COUNT_DESTROY()
81 #endif
82 
83 #define SCTP_TCB_SEND_LOCK_INIT(_tcb)
84 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
85 #define SCTP_TCB_SEND_LOCK(_tcb)
86 #define SCTP_TCB_SEND_UNLOCK(_tcb)
87 #define SCTP_TCB_SEND_LOCK_ASSERT(_tcb)
88 
89 /* Lock for INP */
90 #define SCTP_INP_LOCK_INIT(_inp)
91 #define SCTP_INP_LOCK_DESTROY(_inp)
92 
93 #define SCTP_INP_RLOCK(_inp)
94 #define SCTP_INP_RUNLOCK(_inp)
95 #define SCTP_INP_WLOCK(_inp)
96 #define SCTP_INP_WUNLOCK(_inp)
97 #define SCTP_INP_RLOCK_ASSERT(_inp)
98 #define SCTP_INP_WLOCK_ASSERT(_inp)
99 #define SCTP_INP_INCR_REF(_inp)
100 #define SCTP_INP_DECR_REF(_inp)
101 
102 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
103 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
104 #define SCTP_ASOC_CREATE_LOCK(_inp)
105 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
106 
107 #define SCTP_INP_READ_INIT(_inp)
108 #define SCTP_INP_READ_DESTROY(_inp)
109 #define SCTP_INP_READ_LOCK(_inp)
110 #define SCTP_INP_READ_UNLOCK(_inp)
111 
112 /* Lock for TCB */
113 #define SCTP_TCB_LOCK_INIT(_tcb)
114 #define SCTP_TCB_LOCK_DESTROY(_tcb)
115 #define SCTP_TCB_LOCK(_tcb)
116 #define SCTP_TCB_TRYLOCK(_tcb) 1
117 #define SCTP_TCB_UNLOCK(_tcb)
118 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
119 #define SCTP_TCB_LOCK_ASSERT(_tcb)
120 
121 #else
122 /*
123  * per tcb level locking
124  */
125 #define SCTP_IPI_COUNT_INIT()
126 
127 #if defined(_WIN32)
128 #define SCTP_WQ_ADDR_INIT() \
129 	InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
130 #define SCTP_WQ_ADDR_DESTROY() \
131 	DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
132 #define SCTP_WQ_ADDR_LOCK() \
133 	EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
134 #define SCTP_WQ_ADDR_UNLOCK() \
135 	LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
136 #define SCTP_WQ_ADDR_LOCK_ASSERT()
137 
138 #if WINVER < 0x0600
139 #define SCTP_INP_INFO_LOCK_INIT() \
140 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
141 #define SCTP_INP_INFO_LOCK_DESTROY() \
142 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
143 #define SCTP_INP_INFO_RLOCK() \
144 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
145 #define SCTP_INP_INFO_TRYLOCK()	\
146 	TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
147 #define SCTP_INP_INFO_WLOCK() \
148 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
149 #define SCTP_INP_INFO_RUNLOCK() \
150 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
151 #define SCTP_INP_INFO_WUNLOCK()	\
152 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
153 #define SCTP_INP_INFO_LOCK_ASSERT()
154 #define SCTP_INP_INFO_RLOCK_ASSERT()
155 #define SCTP_INP_INFO_WLOCK_ASSERT()
156 #else
157 #define SCTP_INP_INFO_LOCK_INIT() \
158 	InitializeSRWLock(&SCTP_BASE_INFO(ipi_ep_mtx))
159 #define SCTP_INP_INFO_LOCK_DESTROY()
160 #define SCTP_INP_INFO_RLOCK() \
161 	AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
162 #define SCTP_INP_INFO_TRYLOCK() \
163 	TryAcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
164 #define SCTP_INP_INFO_WLOCK() \
165 	AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
166 #define SCTP_INP_INFO_RUNLOCK() \
167 	ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
168 #define SCTP_INP_INFO_WUNLOCK() \
169 	ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
170 #define SCTP_INP_INFO_LOCK_ASSERT()
171 #define SCTP_INP_INFO_RLOCK_ASSERT()
172 #define SCTP_INP_INFO_WLOCK_ASSERT()
173 #endif
174 
175 #define SCTP_IP_PKTLOG_INIT() \
176 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
177 #define SCTP_IP_PKTLOG_DESTROY () \
178 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
179 #define SCTP_IP_PKTLOG_LOCK() \
180 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
181 #define SCTP_IP_PKTLOG_UNLOCK() \
182 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
183 
184 /*
185  * The INP locks we will use for locking an SCTP endpoint, so for example if
186  * we want to change something at the endpoint level for example random_store
187  * or cookie secrets we lock the INP level.
188  */
189 #define SCTP_INP_READ_INIT(_inp) \
190 	InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
191 #define SCTP_INP_READ_DESTROY(_inp) \
192 	DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
193 #define SCTP_INP_READ_LOCK(_inp) \
194 	EnterCriticalSection(&(_inp)->inp_rdata_mtx)
195 #define SCTP_INP_READ_UNLOCK(_inp) \
196 	LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
197 
198 #define SCTP_INP_LOCK_INIT(_inp) \
199 	InitializeCriticalSection(&(_inp)->inp_mtx)
200 #define SCTP_INP_LOCK_DESTROY(_inp) \
201 	DeleteCriticalSection(&(_inp)->inp_mtx)
202 #ifdef SCTP_LOCK_LOGGING
203 #define SCTP_INP_RLOCK(_inp) do { 						\
204 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
205 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
206 		EnterCriticalSection(&(_inp)->inp_mtx);				\
207 } while (0)
208 #define SCTP_INP_WLOCK(_inp) do { 						\
209 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
210 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
211 	EnterCriticalSection(&(_inp)->inp_mtx);					\
212 } while (0)
213 #else
214 #define SCTP_INP_RLOCK(_inp) \
215 	EnterCriticalSection(&(_inp)->inp_mtx)
216 #define SCTP_INP_WLOCK(_inp) \
217 	EnterCriticalSection(&(_inp)->inp_mtx)
218 #endif
219 #define SCTP_INP_RLOCK_ASSERT(_tcb)
220 #define SCTP_INP_WLOCK_ASSERT(_tcb)
221 
222 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
223 	InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
224 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
225 	DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
226 #define SCTP_TCB_SEND_LOCK(_tcb) \
227 	EnterCriticalSection(&(_tcb)->tcb_send_mtx)
228 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
229 	LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
230 #define SCTP_TCB_SEND_LOCK_ASSERT(_tcb)
231 
232 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
233 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
234 
235 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
236 	InitializeCriticalSection(&(_inp)->inp_create_mtx)
237 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
238 	DeleteCriticalSection(&(_inp)->inp_create_mtx)
239 #ifdef SCTP_LOCK_LOGGING
240 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
241 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
242 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
243 	EnterCriticalSection(&(_inp)->inp_create_mtx);				\
244 } while (0)
245 #else
246 #define SCTP_ASOC_CREATE_LOCK(_inp) \
247 	EnterCriticalSection(&(_inp)->inp_create_mtx)
248 #endif
249 
250 #define SCTP_INP_RUNLOCK(_inp) \
251 	LeaveCriticalSection(&(_inp)->inp_mtx)
252 #define SCTP_INP_WUNLOCK(_inp) \
253 	LeaveCriticalSection(&(_inp)->inp_mtx)
254 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
255 	LeaveCriticalSection(&(_inp)->inp_create_mtx)
256 
257 /*
258  * For the majority of things (once we have found the association) we will
259  * lock the actual association mutex. This will protect all the assoiciation
260  * level queues and streams and such. We will need to lock the socket layer
261  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
262  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
263  */
264 
265 #define SCTP_TCB_LOCK_INIT(_tcb) \
266 	InitializeCriticalSection(&(_tcb)->tcb_mtx)
267 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
268 	DeleteCriticalSection(&(_tcb)->tcb_mtx)
269 #ifdef SCTP_LOCK_LOGGING
270 #define SCTP_TCB_LOCK(_tcb) do {						\
271 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
272 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
273 	EnterCriticalSection(&(_tcb)->tcb_mtx);					\
274 } while (0)
275 #else
276 #define SCTP_TCB_LOCK(_tcb) \
277 	EnterCriticalSection(&(_tcb)->tcb_mtx)
278 #endif
279 #define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
280 #define SCTP_TCB_UNLOCK(_tcb) \
281 	LeaveCriticalSection(&(_tcb)->tcb_mtx)
282 #define SCTP_TCB_LOCK_ASSERT(_tcb)
283 
284 #else /* all Userspaces except Windows */
285 #define SCTP_WQ_ADDR_INIT() \
286 	(void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
287 #define SCTP_WQ_ADDR_DESTROY() \
288 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
289 #ifdef INVARIANTS
290 #define SCTP_WQ_ADDR_LOCK() \
291 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx already locked", __FILE__, __LINE__))
292 #define SCTP_WQ_ADDR_UNLOCK() \
293 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
294 #else
295 #define SCTP_WQ_ADDR_LOCK() \
296 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
297 #define SCTP_WQ_ADDR_UNLOCK() \
298 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
299 #endif
300 #define SCTP_WQ_ADDR_LOCK_ASSERT() \
301 	KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
302 
303 #define SCTP_INP_INFO_LOCK_INIT() \
304 	(void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(rwlock_attr))
305 #define SCTP_INP_INFO_LOCK_DESTROY() \
306 	(void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
307 #ifdef INVARIANTS
308 #define SCTP_INP_INFO_RLOCK() \
309 	KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
310 #define SCTP_INP_INFO_WLOCK() \
311 	KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
312 #define SCTP_INP_INFO_RUNLOCK() \
313 	KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
314 #define SCTP_INP_INFO_WUNLOCK() \
315 	KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
316 #else
317 #define SCTP_INP_INFO_RLOCK() \
318 	(void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx))
319 #define SCTP_INP_INFO_WLOCK() \
320 	(void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx))
321 #define SCTP_INP_INFO_RUNLOCK() \
322 	(void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
323 #define SCTP_INP_INFO_WUNLOCK() \
324 	(void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
325 #endif
326 #define SCTP_INP_INFO_LOCK_ASSERT()
327 #define SCTP_INP_INFO_RLOCK_ASSERT()
328 #define SCTP_INP_INFO_WLOCK_ASSERT()
329 #define SCTP_INP_INFO_TRYLOCK() \
330 	(!(pthread_rwlock_tryrdlock(&SCTP_BASE_INFO(ipi_ep_mtx))))
331 
332 #define SCTP_IP_PKTLOG_INIT() \
333 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
334 #define SCTP_IP_PKTLOG_DESTROY() \
335 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
336 #ifdef INVARIANTS
337 #define SCTP_IP_PKTLOG_LOCK() \
338 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx already locked", __FILE__, __LINE__))
339 #define SCTP_IP_PKTLOG_UNLOCK() \
340 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx not locked", __FILE__, __LINE__))
341 #else
342 #define SCTP_IP_PKTLOG_LOCK() \
343 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
344 #define SCTP_IP_PKTLOG_UNLOCK() \
345 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
346 #endif
347 
348 
349 /*
350  * The INP locks we will use for locking an SCTP endpoint, so for example if
351  * we want to change something at the endpoint level for example random_store
352  * or cookie secrets we lock the INP level.
353  */
354 #define SCTP_INP_READ_INIT(_inp) \
355 	(void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
356 #define SCTP_INP_READ_DESTROY(_inp) \
357 	(void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
358 #ifdef INVARIANTS
359 #define SCTP_INP_READ_LOCK(_inp) \
360 	KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx already locked", __FILE__, __LINE__))
361 #define SCTP_INP_READ_UNLOCK(_inp) \
362 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx not locked", __FILE__, __LINE__))
363 #else
364 #define SCTP_INP_READ_LOCK(_inp) \
365 	(void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
366 #define SCTP_INP_READ_UNLOCK(_inp) \
367 	(void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
368 #endif
369 
370 #define SCTP_INP_LOCK_INIT(_inp) \
371 	(void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
372 #define SCTP_INP_LOCK_DESTROY(_inp) \
373 	(void)pthread_mutex_destroy(&(_inp)->inp_mtx)
374 #ifdef INVARIANTS
375 #ifdef SCTP_LOCK_LOGGING
376 #define SCTP_INP_RLOCK(_inp) do {											\
377 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
378 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);								\
379 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__));	\
380 } while (0)
381 #define SCTP_INP_WLOCK(_inp) do {											\
382 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
383 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);								\
384 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__));	\
385 } while (0)
386 #else
387 #define SCTP_INP_RLOCK(_inp) \
388 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
389 #define SCTP_INP_WLOCK(_inp) \
390 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
391 #endif
392 #define SCTP_INP_RUNLOCK(_inp) \
393 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
394 #define SCTP_INP_WUNLOCK(_inp) \
395 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
396 #else
397 #ifdef SCTP_LOCK_LOGGING
398 #define SCTP_INP_RLOCK(_inp) do {						\
399 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
400 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
401 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
402 } while (0)
403 #define SCTP_INP_WLOCK(_inp) do {						\
404 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
405 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
406 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
407 } while (0)
408 #else
409 #define SCTP_INP_RLOCK(_inp) \
410 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
411 #define SCTP_INP_WLOCK(_inp) \
412 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
413 #endif
414 #define SCTP_INP_RUNLOCK(_inp) \
415 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
416 #define SCTP_INP_WUNLOCK(_inp) \
417 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
418 #endif
419 #define SCTP_INP_RLOCK_ASSERT(_inp) \
420 	KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
421 #define SCTP_INP_WLOCK_ASSERT(_inp) \
422 	KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
423 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
424 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
425 
426 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
427 	(void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, &SCTP_BASE_VAR(mtx_attr))
428 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
429 	(void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
430 #ifdef INVARIANTS
431 #define SCTP_TCB_SEND_LOCK(_tcb) \
432 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_send_mtx) == 0, ("%s:%d: tcb_send_mtx already locked", __FILE__, __LINE__))
433 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
434 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_send_mtx) == 0, ("%s:%d: tcb_send_mtx not locked", __FILE__, __LINE__))
435 #else
436 #define SCTP_TCB_SEND_LOCK(_tcb) \
437 	(void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx)
438 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
439 	(void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
440 #endif
441 #define SCTP_TCB_SEND_LOCK_ASSERT(_tcb) \
442 	KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_send_mtx) == EBUSY, ("%s:%d: tcb_send_mtx not locked", __FILE__, __LINE__))
443 
444 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
445 	(void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
446 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
447 	(void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
448 #ifdef INVARIANTS
449 #ifdef SCTP_LOCK_LOGGING
450 #define SCTP_ASOC_CREATE_LOCK(_inp) do {											\
451 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)							\
452 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);								\
453 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__));	\
454 } while (0)
455 #else
456 #define SCTP_ASOC_CREATE_LOCK(_inp) \
457 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__))
458 #endif
459 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
460 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx not locked", __FILE__, __LINE__))
461 #else
462 #ifdef SCTP_LOCK_LOGGING
463 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
464 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
465 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
466 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);			\
467 } while (0)
468 #else
469 #define SCTP_ASOC_CREATE_LOCK(_inp) \
470 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
471 #endif
472 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
473 	(void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
474 #endif
475 /*
476  * For the majority of things (once we have found the association) we will
477  * lock the actual association mutex. This will protect all the assoiciation
478  * level queues and streams and such. We will need to lock the socket layer
479  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
480  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
481  */
482 
483 #define SCTP_TCB_LOCK_INIT(_tcb) \
484 	(void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
485 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
486 	(void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
487 #ifdef INVARIANTS
488 #ifdef SCTP_LOCK_LOGGING
489 #define SCTP_TCB_LOCK(_tcb) do {											\
490 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 						\
491 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);							\
492 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__))	\
493 } while (0)
494 #else
495 #define SCTP_TCB_LOCK(_tcb) \
496 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__))
497 #endif
498 #define SCTP_TCB_UNLOCK(_tcb) \
499 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
500 #else
501 #ifdef SCTP_LOCK_LOGGING
502 #define SCTP_TCB_LOCK(_tcb) do {						\
503 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 	\
504 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
505 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);				\
506 } while (0)
507 #else
508 #define SCTP_TCB_LOCK(_tcb) \
509 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
510 #endif
511 #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
512 #endif
513 #define SCTP_TCB_LOCK_ASSERT(_tcb) \
514 	KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
515 #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
516 #endif
517 
518 #endif /* SCTP_PER_SOCKET_LOCKING */
519 
520 
521 /*
522  * common locks
523  */
524 
525 /* copied over to compile */
526 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
527 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
528 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
529 
530 /* socket locks */
531 
532 #if defined(_WIN32)
533 #define SOCKBUF_LOCK_ASSERT(_so_buf)
534 #define SOCKBUF_LOCK(_so_buf) \
535 	EnterCriticalSection(&(_so_buf)->sb_mtx)
536 #define SOCKBUF_UNLOCK(_so_buf) \
537 	LeaveCriticalSection(&(_so_buf)->sb_mtx)
538 #define SOCK_LOCK(_so) \
539 	SOCKBUF_LOCK(&(_so)->so_rcv)
540 #define SOCK_UNLOCK(_so) \
541 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
542 #else
543 #define SOCKBUF_LOCK_ASSERT(_so_buf) \
544 	KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s:%d: socket buffer not locked", __FILE__, __LINE__))
545 #ifdef INVARIANTS
546 #define SOCKBUF_LOCK(_so_buf) \
547 	KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx already locked", __FILE__, __LINE__))
548 #define SOCKBUF_UNLOCK(_so_buf) \
549 	KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx not locked", __FILE__, __LINE__))
550 #else
551 #define SOCKBUF_LOCK(_so_buf) \
552 	pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
553 #define SOCKBUF_UNLOCK(_so_buf) \
554 	pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
555 #endif
556 #define SOCK_LOCK(_so) \
557 	SOCKBUF_LOCK(&(_so)->so_rcv)
558 #define SOCK_UNLOCK(_so) \
559 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
560 #endif
561 
562 #define SCTP_STATLOG_INIT_LOCK()
563 #define SCTP_STATLOG_LOCK()
564 #define SCTP_STATLOG_UNLOCK()
565 #define SCTP_STATLOG_DESTROY()
566 
567 #if defined(_WIN32)
568 /* address list locks */
569 #if WINVER < 0x0600
570 #define SCTP_IPI_ADDR_INIT() \
571 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
572 #define SCTP_IPI_ADDR_DESTROY() \
573 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
574 #define SCTP_IPI_ADDR_RLOCK() \
575 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
576 #define SCTP_IPI_ADDR_RUNLOCK() \
577 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
578 #define SCTP_IPI_ADDR_WLOCK() \
579 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
580 #define SCTP_IPI_ADDR_WUNLOCK() \
581 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
582 #define SCTP_IPI_ADDR_LOCK_ASSERT()
583 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
584 #else
585 #define SCTP_IPI_ADDR_INIT() \
586 	InitializeSRWLock(&SCTP_BASE_INFO(ipi_addr_mtx))
587 #define SCTP_IPI_ADDR_DESTROY()
588 #define SCTP_IPI_ADDR_RLOCK() \
589 	AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
590 #define SCTP_IPI_ADDR_RUNLOCK() \
591 	ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
592 #define SCTP_IPI_ADDR_WLOCK() \
593 	AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
594 #define SCTP_IPI_ADDR_WUNLOCK() \
595 	ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
596 #define SCTP_IPI_ADDR_LOCK_ASSERT()
597 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
598 #endif
599 
600 /* iterator locks */
601 #define SCTP_ITERATOR_LOCK_INIT() \
602 	InitializeCriticalSection(&sctp_it_ctl.it_mtx)
603 #define SCTP_ITERATOR_LOCK_DESTROY() \
604 	DeleteCriticalSection(&sctp_it_ctl.it_mtx)
605 #define SCTP_ITERATOR_LOCK() \
606 		EnterCriticalSection(&sctp_it_ctl.it_mtx)
607 #define SCTP_ITERATOR_UNLOCK() \
608 	LeaveCriticalSection(&sctp_it_ctl.it_mtx)
609 
610 #define SCTP_IPI_ITERATOR_WQ_INIT() \
611 	InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
612 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
613 	DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
614 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
615 	EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
616 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
617 	LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
618 
619 #else
620 /* address list locks */
621 #define SCTP_IPI_ADDR_INIT() \
622 	(void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(rwlock_attr))
623 #define SCTP_IPI_ADDR_DESTROY() \
624 	(void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
625 #ifdef INVARIANTS
626 #define SCTP_IPI_ADDR_RLOCK() \
627 	KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
628 #define SCTP_IPI_ADDR_RUNLOCK() \
629 	KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
630 #define SCTP_IPI_ADDR_WLOCK() \
631 	KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
632 #define SCTP_IPI_ADDR_WUNLOCK() \
633 	KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
634 #else
635 #define SCTP_IPI_ADDR_RLOCK() \
636 	(void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx))
637 #define SCTP_IPI_ADDR_RUNLOCK() \
638 	(void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
639 #define SCTP_IPI_ADDR_WLOCK() \
640 	(void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx))
641 #define SCTP_IPI_ADDR_WUNLOCK() \
642 	(void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
643 #endif
644 #define SCTP_IPI_ADDR_LOCK_ASSERT()
645 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
646 
647 /* iterator locks */
648 #define SCTP_ITERATOR_LOCK_INIT() \
649 	(void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
650 #define SCTP_ITERATOR_LOCK_DESTROY() \
651 	(void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
652 #ifdef INVARIANTS
653 #define SCTP_ITERATOR_LOCK() \
654 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx already locked", __FILE__, __LINE__))
655 #define SCTP_ITERATOR_UNLOCK() \
656 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx not locked", __FILE__, __LINE__))
657 #else
658 #define SCTP_ITERATOR_LOCK() \
659 	(void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
660 #define SCTP_ITERATOR_UNLOCK() \
661 	(void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
662 #endif
663 
664 #define SCTP_IPI_ITERATOR_WQ_INIT() \
665 	(void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
666 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
667 	(void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
668 #ifdef INVARIANTS
669 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
670 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx already locked", __FILE__, __LINE__))
671 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
672 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx not locked", __FILE__, __LINE__))
673 #else
674 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
675 	(void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
676 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
677 	(void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
678 #endif
679 #endif
680 
681 #define SCTP_INCR_EP_COUNT() \
682 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
683 
684 #define SCTP_DECR_EP_COUNT() \
685 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
686 
687 #define SCTP_INCR_ASOC_COUNT() \
688 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
689 
690 #define SCTP_DECR_ASOC_COUNT() \
691 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
692 
693 #define SCTP_INCR_LADDR_COUNT() \
694 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
695 
696 #define SCTP_DECR_LADDR_COUNT() \
697 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
698 
699 #define SCTP_INCR_RADDR_COUNT() \
700 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
701 
702 #define SCTP_DECR_RADDR_COUNT() \
703 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
704 
705 #define SCTP_INCR_CHK_COUNT() \
706 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
707 
708 #define SCTP_DECR_CHK_COUNT() \
709 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
710 
711 #define SCTP_INCR_READQ_COUNT() \
712 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
713 
714 #define SCTP_DECR_READQ_COUNT() \
715 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
716 
717 #define SCTP_INCR_STRMOQ_COUNT() \
718 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
719 
720 #define SCTP_DECR_STRMOQ_COUNT() \
721 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
722 
723 #endif
724