1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.4 2005/11/19 17:19:48 dillon Exp $ 33 */ 34 35 #ifndef _SYS_SPINLOCK2_H_ 36 #define _SYS_SPINLOCK2_H_ 37 38 #include <sys/thread2.h> 39 #include <machine/atomic.h> 40 #include <machine/cpufunc.h> 41 42 #ifdef SMP 43 44 static __inline void 45 spin_lock_debug(int count) 46 { 47 #ifdef INVARIANTS 48 curthread->td_spinlocks += count; 49 #endif 50 } 51 52 static __inline boolean_t 53 spin_trylock(struct spinlock *mtx) 54 { 55 if (atomic_swap_int(&mtx->lock, 1) == 0) { 56 spin_lock_debug(1); 57 return (TRUE); 58 } 59 return (FALSE); 60 } 61 62 extern void spin_lock_contested(struct spinlock *mtx); 63 64 /* 65 * The quick versions should be used only if you are already 66 * in a critical section or you know the spinlock will never 67 * be used by an hard interrupt or soft interrupt. 68 */ 69 static __inline void 70 spin_lock_quick(struct spinlock *mtx) 71 { 72 spin_lock_debug(1); 73 if (atomic_swap_int(&mtx->lock, 1) != 0) 74 spin_lock_contested(mtx); /* slow path */ 75 } 76 77 static __inline void 78 spin_unlock_quick(struct spinlock *mtx) 79 { 80 spin_lock_debug(-1); 81 cpu_sfence(); 82 mtx->lock = 0; /* non-bus-locked lock release */ 83 } 84 85 static __inline boolean_t 86 spin_is_locked(struct spinlock *mtx) 87 { 88 return (mtx->lock); 89 } 90 91 static __inline void 92 spin_init(struct spinlock *mtx) 93 { 94 mtx->lock = 0; 95 } 96 97 #else /* SMP */ 98 99 static __inline boolean_t 100 spin_trylock(struct spinlock *mtx) 101 { 102 return (TRUE); 103 } 104 105 static __inline boolean_t 106 spin_is_locked(struct spinlock *mtx) 107 { 108 return (FALSE); 109 } 110 111 static __inline void spin_lock_quick(struct spinlock *mtx) { } 112 static __inline void spin_unlock_quick(struct spinlock *mtx) { } 113 static __inline void spin_init(struct spinlock *mtx) { } 114 115 #endif /* SMP */ 116 117 /* 118 * The normal spin_lock() API automatically enters and exits a 119 * critical section, preventing deadlocks from interrupt preemption 120 * if the interrupt thread accesses the same spinlock. 121 */ 122 static __inline void 123 spin_lock(struct spinlock *mtx) 124 { 125 crit_enter_id("spin"); 126 spin_lock_quick(mtx); 127 } 128 129 static __inline void 130 spin_unlock(struct spinlock *mtx) 131 { 132 spin_unlock_quick(mtx); 133 crit_exit_id("spin"); 134 } 135 136 #endif 137 138