1 /* 2 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #ifndef _LINUX_WW_MUTEX_H_ 28 #define _LINUX_WW_MUTEX_H_ 29 30 /* 31 * A basic, unoptimized implementation of wound/wait mutexes for DragonFly 32 * modelled after the Linux API [1]. 33 * 34 * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h 35 */ 36 37 #include <sys/errno.h> 38 #include <sys/types.h> 39 #include <machine/atomic.h> 40 #include <sys/spinlock.h> 41 #include <sys/spinlock2.h> 42 #include <sys/stdbool.h> 43 44 struct ww_class { 45 volatile u_long stamp; 46 const char *name; 47 }; 48 49 struct ww_acquire_ctx { 50 u_long stamp; 51 struct ww_class *ww_class; 52 }; 53 54 struct ww_mutex { 55 struct spinlock lock; 56 volatile int acquired; 57 volatile struct ww_acquire_ctx *ctx; 58 volatile struct thread *owner; 59 }; 60 61 #define DEFINE_WW_CLASS(classname) \ 62 struct ww_class classname = { \ 63 .stamp = 0, \ 64 .name = #classname \ 65 } 66 67 static inline void 68 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { 69 ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1); 70 ctx->ww_class = ww_class; 71 } 72 73 static inline void 74 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) { 75 } 76 77 static inline void 78 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) { 79 } 80 81 static inline void 82 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { 83 spin_init(&lock->lock, ww_class->name); 84 lock->acquired = 0; 85 lock->ctx = NULL; 86 lock->owner = NULL; 87 } 88 89 static inline bool 90 ww_mutex_is_locked(struct ww_mutex *lock) { 91 bool res = false; 92 spin_lock(&lock->lock); 93 if (lock->acquired > 0) res = true; 94 spin_unlock(&lock->lock); 95 return res; 96 } 97 98 /* 99 * Return 1 if lock could be acquired, else 0 (contended). 100 */ 101 static inline int 102 ww_mutex_trylock(struct ww_mutex *lock) { 103 int res = 1; 104 KKASSERT(curthread); 105 106 spin_lock(&lock->lock); 107 /* 108 * In case no one holds the ww_mutex yet, we acquire it. 109 */ 110 if (lock->acquired == 0) { 111 KKASSERT(lock->ctx == NULL); 112 lock->acquired += 1; 113 lock->owner = curthread; 114 } 115 /* 116 * In case we already hold the ww_mutex, increase a count. 117 */ 118 else if (lock->owner == curthread) { 119 lock->acquired += 1; 120 } 121 else { 122 res = 0; 123 } 124 spin_unlock(&lock->lock); 125 return res; 126 } 127 128 /* 129 * When `slow` is `true`, it will always block if the ww_mutex is contended. 130 * It is assumed that the called will not hold any (ww_mutex) resources when 131 * calling the slow path as this could lead to deadlocks. 132 * 133 * When `intr` is `true`, the ssleep will be interruptable. 134 */ 135 static inline int 136 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) { 137 int err; 138 139 KKASSERT(curthread); 140 141 spin_lock(&lock->lock); 142 for (;;) { 143 /* 144 * In case no one holds the ww_mutex yet, we acquire it. 145 */ 146 if (lock->acquired == 0) { 147 KKASSERT(lock->ctx == NULL); 148 lock->acquired += 1; 149 lock->ctx = ctx; 150 lock->owner = curthread; 151 err = 0; 152 break; 153 } 154 /* 155 * In case we already hold the ww_mutex, simply increase 156 * a count and return -ALREADY. 157 */ 158 else if (lock->owner == curthread) { 159 KKASSERT(lock->ctx == ctx); 160 lock->acquired += 1; 161 err = -EALREADY; 162 break; 163 } 164 /* 165 * This is the contention case where the ww_mutex is 166 * already held by another context. 167 */ 168 else { 169 /* 170 * Three cases: 171 * 172 * - We are in the slow-path (first lock to obtain). 173 * 174 * - No context was specified. We assume a single 175 * resouce, so there is no danger of a deadlock. 176 * 177 * - An `older` process (`ctx`) tries to acquire a 178 * lock already held by a `younger` process. 179 * We put the `older` process to sleep until 180 * the `younger` process gives up all it's 181 * resources. 182 */ 183 if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) { 184 int s = ssleep(lock, &lock->lock, 185 intr ? PCATCH : 0, 186 ctx ? ctx->ww_class->name : "ww_mutex_lock", 0); 187 if (intr && (s == EINTR || s == ERESTART)) { 188 // XXX: Should we handle ERESTART? 189 err = -EINTR; 190 break; 191 } 192 } 193 /* 194 * If a `younger` process tries to acquire a lock 195 * already held by an `older` process, we `wound` it, 196 * i.e. we return -EDEADLK because there is a potential 197 * risk for a deadlock. The `younger` process then 198 * should give up all it's resources and try again to 199 * acquire the lock in question, this time in a 200 * blocking manner. 201 */ 202 else { 203 err = -EDEADLK; 204 break; 205 } 206 } 207 208 } /* for */ 209 spin_unlock(&lock->lock); 210 return err; 211 } 212 213 static inline int 214 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 215 return __ww_mutex_lock(lock, ctx, false, false); 216 } 217 218 static inline void 219 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 220 (void)__ww_mutex_lock(lock, ctx, true, false); 221 } 222 223 static inline int 224 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 225 return __ww_mutex_lock(lock, ctx, false, true); 226 } 227 228 static inline int __must_check 229 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { 230 return __ww_mutex_lock(lock, ctx, true, true); 231 } 232 233 static inline void 234 ww_mutex_unlock(struct ww_mutex *lock) { 235 spin_lock(&lock->lock); 236 KKASSERT(lock->owner == curthread); 237 KKASSERT(lock->acquired > 0); 238 239 --lock->acquired; 240 if (lock->acquired > 0) { 241 spin_unlock(&lock->lock); 242 return; 243 } 244 245 KKASSERT(lock->acquired == 0); 246 lock->ctx = NULL; 247 lock->owner = NULL; 248 spin_unlock(&lock->lock); 249 wakeup(lock); 250 } 251 252 static inline void 253 ww_mutex_destroy(struct ww_mutex *lock) { 254 KKASSERT(lock->acquired == 0); 255 KKASSERT(lock->ctx == NULL); 256 KKASSERT(lock->owner == NULL); 257 spin_uninit(&lock->lock); 258 } 259 260 #endif /* _LINUX_WW_MUTEX_H_ */ 261