xref: /dragonfly/sys/dev/drm/linux_wwmutex.c (revision a9783bc6)
1 /*-
2  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com> and
7  *    Michael Neumann <mneumann@ntecs.de>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/kernel.h>
34 #include <sys/spinlock.h>
35 #include <sys/spinlock2.h>
36 
37 #include <machine/atomic.h>
38 
39 #include <linux/ww_mutex.h>
40 
41 void
42 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
43 {
44 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
45 	ctx->acquired = 0;
46 	ctx->ww_class = ww_class;
47 }
48 
49 void
50 ww_acquire_done(struct ww_acquire_ctx *ctx __unused)
51 {
52 }
53 
54 void
55 ww_acquire_fini(struct ww_acquire_ctx *ctx __unused)
56 {
57 }
58 
59 void
60 ww_mutex_init(struct ww_mutex *ww, struct ww_class *ww_class)
61 {
62 	lockinit(&ww->base, ww_class->name, 0, LK_CANRECURSE);
63 	ww->ctx = NULL;
64 	ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
65 	ww->blocked = 0;
66 }
67 
68 void
69 ww_mutex_destroy(struct ww_mutex *ww)
70 {
71 	lockuninit(&ww->base);
72 }
73 
74 /*
75  * Optimized lock path.
76  *
77  * (slow) is optional as long as we block normally on the initial lock.
78  * Currently not implemented.
79  */
80 static __inline
81 int
82 __wwlock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx,
83 	 bool slow __unused, bool intr)
84 {
85 	int flags = LK_EXCLUSIVE;
86 	int error;
87 
88 	if (intr)
89 		flags |= LK_PCATCH;
90 
91 	/*
92 	 * Normal mutex if ctx is NULL
93 	 */
94 	if (ctx == NULL) {
95 		error = lockmgr(&ww->base, flags);
96 		if (error)
97 			error = -EINTR;
98 		return error;
99 	}
100 
101 	/*
102 	 * A normal blocking lock can be used when ctx->acquired is 0 (no
103 	 * prior locks are held).  If prior locks are held then we cannot
104 	 * block here.
105 	 *
106 	 * In the non-blocking case setup our tsleep interlock prior to
107 	 * attempting to acquire the lock.
108 	 */
109 	for (;;) {
110 		if (ctx->acquired != 0) {
111 			flags |= LK_NOWAIT;
112 			tsleep_interlock(ww, (intr ? PCATCH : 0));
113 		}
114 		error = lockmgr(&ww->base, flags);
115 		if (error == 0) {
116 			ww->ctx = ctx;
117 			ww->stamp = ctx->stamp;
118 			++ctx->acquired;
119 			return 0;
120 		}
121 
122 		/*
123 		 * EINTR or ERESTART returns -EINTR.  ENOLCK and EWOULDBLOCK
124 		 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
125 		 */
126 		if (error != EBUSY)
127 			return -EINTR;
128 
129 		/*
130 		 * acquired can only be non-zero in this path.
131 		 * NOTE: ww->ctx is not MPSAFE.
132 		 * NOTE: ww->stamp is heuristical, a race is possible.
133 		 */
134 		KKASSERT(ctx->acquired > 0);
135 
136 		/*
137 		 * Unwind if we aren't the oldest.
138 		 */
139 		if (ctx->stamp > ww->stamp)
140 			return -EDEADLK;
141 
142 		/*
143 		 * We have priority over the currently held lock.  Tell
144 		 * the remote lock holder that we want them to unwind.
145 		 *
146 		 * error is zero if woken up
147 		 *	    EINTR / ERESTART - signal
148 		 *	    EWOULDBLOCK	     - timeout expired (if not 0)
149 		 */
150 		atomic_swap_int(&ww->blocked, 1);
151 		error = tsleep(ww, PINTERLOCKED | (intr ? PCATCH : 0),
152 			       ctx->ww_class->name, 0);
153 		if (intr && (error == EINTR || error == ERESTART))
154 			return -EINTR;
155 		/* retry */
156 	}
157 }
158 
159 int
160 ww_mutex_lock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
161 {
162 	return __wwlock(ww, ctx, 0, 0);
163 }
164 
165 int
166 ww_mutex_lock_slow(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
167 {
168 	return __wwlock(ww, ctx, 1, 0);
169 }
170 
171 int
172 ww_mutex_lock_interruptible(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
173 {
174 	return __wwlock(ww, ctx, 0, 1);
175 }
176 
177 int
178 ww_mutex_lock_slow_interruptible(struct ww_mutex *ww,
179 				 struct ww_acquire_ctx *ctx)
180 {
181 	return __wwlock(ww, ctx, 1, 1);
182 }
183 
184 void
185 ww_mutex_unlock(struct ww_mutex *ww)
186 {
187 	struct ww_acquire_ctx *ctx;
188 
189 	ctx = ww->ctx;
190 	if (ctx) {
191 		KKASSERT(ctx->acquired > 0);
192 		--ctx->acquired;
193 		ww->ctx = NULL;
194 		ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
195 	}
196 	lockmgr(&ww->base, LK_RELEASE);
197 	if (atomic_swap_int(&ww->blocked, 0))
198 		wakeup(ww);
199 }
200