1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #ifndef _SPL_MUTEX_H
26 #define	_SPL_MUTEX_H
27 
28 #include <sys/types.h>
29 #include <linux/sched.h>
30 #include <linux/mutex.h>
31 #include <linux/lockdep.h>
32 #include <linux/compiler_compat.h>
33 
34 typedef enum {
35 	MUTEX_DEFAULT	= 0,
36 	MUTEX_SPIN	= 1,
37 	MUTEX_ADAPTIVE	= 2,
38 	MUTEX_NOLOCKDEP	= 3
39 } kmutex_type_t;
40 
41 typedef struct {
42 	struct mutex		m_mutex;
43 	spinlock_t		m_lock;	/* used for serializing mutex_exit */
44 	kthread_t		*m_owner;
45 #ifdef CONFIG_LOCKDEP
46 	kmutex_type_t		m_type;
47 #endif /* CONFIG_LOCKDEP */
48 } kmutex_t;
49 
50 #define	MUTEX(mp)		(&((mp)->m_mutex))
51 
52 static inline void
53 spl_mutex_set_owner(kmutex_t *mp)
54 {
55 	mp->m_owner = current;
56 }
57 
58 static inline void
59 spl_mutex_clear_owner(kmutex_t *mp)
60 {
61 	mp->m_owner = NULL;
62 }
63 
64 #define	mutex_owner(mp)		(READ_ONCE((mp)->m_owner))
65 #define	mutex_owned(mp)		(mutex_owner(mp) == current)
66 #define	MUTEX_HELD(mp)		mutex_owned(mp)
67 #define	MUTEX_NOT_HELD(mp)	(!MUTEX_HELD(mp))
68 
69 #ifdef CONFIG_LOCKDEP
70 static inline void
71 spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
72 {
73 	mp->m_type = type;
74 }
75 static inline void
76 spl_mutex_lockdep_off_maybe(kmutex_t *mp)			\
77 {								\
78 	if (mp && mp->m_type == MUTEX_NOLOCKDEP)		\
79 		lockdep_off();					\
80 }
81 static inline void
82 spl_mutex_lockdep_on_maybe(kmutex_t *mp)			\
83 {								\
84 	if (mp && mp->m_type == MUTEX_NOLOCKDEP)		\
85 		lockdep_on();					\
86 }
87 #else  /* CONFIG_LOCKDEP */
88 #define	spl_mutex_set_type(mp, type)
89 #define	spl_mutex_lockdep_off_maybe(mp)
90 #define	spl_mutex_lockdep_on_maybe(mp)
91 #endif /* CONFIG_LOCKDEP */
92 
93 /*
94  * The following functions must be a #define	and not static inline.
95  * This ensures that the native linux mutex functions (lock/unlock)
96  * will be correctly located in the users code which is important
97  * for the built in kernel lock analysis tools
98  */
99 #undef mutex_init
100 #define	mutex_init(mp, name, type, ibc)				\
101 {								\
102 	static struct lock_class_key __key;			\
103 	ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
104 								\
105 	__mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
106 	spin_lock_init(&(mp)->m_lock);				\
107 	spl_mutex_clear_owner(mp);				\
108 	spl_mutex_set_type(mp, type);				\
109 }
110 
111 #undef mutex_destroy
112 #define	mutex_destroy(mp)					\
113 {								\
114 	VERIFY3P(mutex_owner(mp), ==, NULL);			\
115 }
116 
117 /* BEGIN CSTYLED */
118 #define	mutex_tryenter(mp)					\
119 ({								\
120 	int _rc_;						\
121 								\
122 	spl_mutex_lockdep_off_maybe(mp);			\
123 	if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1)		\
124 		spl_mutex_set_owner(mp);			\
125 	spl_mutex_lockdep_on_maybe(mp);				\
126 								\
127 	_rc_;							\
128 })
129 /* END CSTYLED */
130 
131 #define	NESTED_SINGLE 1
132 
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134 #define	mutex_enter_nested(mp, subclass)			\
135 {								\
136 	ASSERT3P(mutex_owner(mp), !=, current);			\
137 	spl_mutex_lockdep_off_maybe(mp);			\
138 	mutex_lock_nested(MUTEX(mp), (subclass));		\
139 	spl_mutex_lockdep_on_maybe(mp);				\
140 	spl_mutex_set_owner(mp);				\
141 }
142 #else /* CONFIG_DEBUG_LOCK_ALLOC */
143 #define	mutex_enter_nested(mp, subclass)			\
144 {								\
145 	ASSERT3P(mutex_owner(mp), !=, current);			\
146 	spl_mutex_lockdep_off_maybe(mp);			\
147 	mutex_lock(MUTEX(mp));					\
148 	spl_mutex_lockdep_on_maybe(mp);				\
149 	spl_mutex_set_owner(mp);				\
150 }
151 #endif /*  CONFIG_DEBUG_LOCK_ALLOC */
152 
153 #define	mutex_enter(mp) mutex_enter_nested((mp), 0)
154 
155 /*
156  * The reason for the spinlock:
157  *
158  * The Linux mutex is designed with a fast-path/slow-path design such that it
159  * does not guarantee serialization upon itself, allowing a race where latter
160  * acquirers finish mutex_unlock before former ones.
161  *
162  * The race renders it unsafe to be used for serializing the freeing of an
163  * object in which the mutex is embedded, where the latter acquirer could go
164  * on to free the object while the former one is still doing mutex_unlock and
165  * causing memory corruption.
166  *
167  * However, there are many places in ZFS where the mutex is used for
168  * serializing object freeing, and the code is shared among other OSes without
169  * this issue. Thus, we need the spinlock to force the serialization on
170  * mutex_exit().
171  *
172  * See http://lwn.net/Articles/575477/ for the information about the race.
173  */
174 #define	mutex_exit(mp)						\
175 {								\
176 	spl_mutex_clear_owner(mp);				\
177 	spin_lock(&(mp)->m_lock);				\
178 	spl_mutex_lockdep_off_maybe(mp);			\
179 	mutex_unlock(MUTEX(mp));				\
180 	spl_mutex_lockdep_on_maybe(mp);				\
181 	spin_unlock(&(mp)->m_lock);				\
182 	/* NOTE: do not dereference mp after this point */	\
183 }
184 
185 #endif /* _SPL_MUTEX_H */
186