xref: /openbsd/sys/arch/hppa/include/mutex.h (revision d415bd75)
1 /*	$OpenBSD: mutex.h,v 1.9 2018/01/13 15:18:11 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
17  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
19  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef _MACHINE_MUTEX_H_
29 #define _MACHINE_MUTEX_H_
30 
31 #include <sys/_lock.h>
32 
33 #define	MUTEX_UNLOCKED	{ 1, 1, 1, 1 }
34 
35 /* Note: mtx_lock must be 16-byte aligned. */
36 struct mutex {
37 #ifdef MULTIPROCESSOR
38 	volatile int mtx_lock[4];
39 #endif
40 	int mtx_wantipl;
41 	int mtx_oldipl;
42 	volatile void *mtx_owner;
43 #ifdef WITNESS
44 	struct lock_object mtx_lock_obj;
45 #endif
46 };
47 
48 /*
49  * To prevent lock ordering problems with the kernel lock, we need to
50  * make sure we block all interrupts that can grab the kernel lock.
51  * The simplest way to achieve this is to make sure mutexes always
52  * raise the interrupt priority level to the highest level that has
53  * interrupts that grab the kernel lock.
54  */
55 #ifdef MULTIPROCESSOR
56 #define __MUTEX_IPL(ipl) \
57     (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
58 #ifdef WITNESS
59 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
60 	{ MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL, \
61 	  MTX_LO_INITIALIZER(name, flags) }
62 #else /* WITNESS */
63 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
64 	{ MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL }
65 #endif /* WITNESS */
66 #else /* MULTIPROCESSOR */
67 #define __MUTEX_IPL(ipl) (ipl)
68 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
69 	{ __MUTEX_IPL((ipl)), 0, NULL }
70 #endif /* MULTIPROCESSOR */
71 
72 void __mtx_init(struct mutex *, int);
73 #define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
74 
75 #ifdef DIAGNOSTIC
76 #define MUTEX_ASSERT_LOCKED(mtx) do {					\
77 	if ((mtx)->mtx_owner != curcpu())				\
78 		panic("mutex %p not held in %s", (mtx), __func__);	\
79 } while (0)
80 
81 #define MUTEX_ASSERT_UNLOCKED(mtx) do {					\
82 	if ((mtx)->mtx_owner == curcpu())				\
83 		panic("mutex %p held in %s", (mtx), __func__);		\
84 } while (0)
85 #else
86 #define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
87 #define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
88 #endif
89 
90 #define MUTEX_LOCK_OBJECT(mtx)	(&(mtx)->mtx_lock_obj)
91 #define MUTEX_OLDIPL(mtx)	(mtx)->mtx_oldipl
92 
93 #endif
94