1 /* 2 * Copyright (c) 2003 Matthew Dillon, All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. The name of the developer may NOT be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 13 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 * SUCH DAMAGE. 23 * 24 * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $ 25 * $DragonFly: src/sys/platform/pc64/include/lock.h,v 1.2 2007/09/23 04:29:31 yanyh Exp $ 26 */ 27 28 #ifndef _MACHINE_LOCK_H_ 29 #define _MACHINE_LOCK_H_ 30 31 #ifndef _CPU_PSL_H_ 32 #include <machine/psl.h> 33 #endif 34 35 /* 36 * MP_FREE_LOCK is used by both assembly and C under SMP. 37 */ 38 #ifdef SMP 39 #define MP_FREE_LOCK 0xffffffff /* value of lock when free */ 40 #endif 41 42 #ifdef LOCORE 43 44 /* 45 * Spinlock assembly support. Note: eax and ecx can be tromped. No 46 * other register will be. Note that these routines are sometimes 47 * called with (%edx) as the mem argument. 48 * 49 * Under UP the spinlock routines still serve to disable/restore 50 * interrupts. 51 */ 52 53 54 #ifdef SMP 55 56 #define SPIN_INIT(mem) \ 57 movl $0,mem ; \ 58 59 #define SPIN_INIT_NOREG(mem) \ 60 SPIN_INIT(mem) ; \ 61 62 #define SPIN_LOCK(mem) \ 63 pushfl ; \ 64 popl %ecx ; /* flags */ \ 65 cli ; \ 66 orl $PSL_C,%ecx ; /* make sure non-zero */ \ 67 7: ; \ 68 movl $0,%eax ; /* expected contents of lock */ \ 69 lock cmpxchgl %ecx,mem ; /* Z=1 (jz) on success */ \ 70 jnz 7b ; \ 71 72 #define SPIN_LOCK_PUSH_REGS \ 73 subl $8,%esp ; \ 74 movl %ecx,(%esp) ; \ 75 movl %eax,4(%esp) ; \ 76 77 #define SPIN_LOCK_POP_REGS \ 78 movl (%esp),%ecx ; \ 79 movl 4(%esp),%eax ; \ 80 addl $8,%esp ; \ 81 82 #define SPIN_LOCK_FRAME_SIZE 8 83 84 #define SPIN_LOCK_NOREG(mem) \ 85 SPIN_LOCK_PUSH_REGS ; \ 86 SPIN_LOCK(mem) ; \ 87 SPIN_LOCK_POP_REGS ; \ 88 89 #define SPIN_UNLOCK(mem) \ 90 pushl mem ; \ 91 movl $0,mem ; \ 92 popfl ; \ 93 94 #define SPIN_UNLOCK_PUSH_REGS 95 #define SPIN_UNLOCK_POP_REGS 96 #define SPIN_UNLOCK_FRAME_SIZE 0 97 98 #define SPIN_UNLOCK_NOREG(mem) \ 99 SPIN_UNLOCK(mem) ; \ 100 101 #else 102 103 #define SPIN_LOCK(mem) \ 104 pushfl ; \ 105 cli ; \ 106 orl $PSL_C,(%esp) ; \ 107 popl mem ; \ 108 109 #define SPIN_LOCK_PUSH_RESG 110 #define SPIN_LOCK_POP_REGS 111 #define SPIN_LOCK_FRAME_SIZE 0 112 113 #define SPIN_UNLOCK(mem) \ 114 pushl mem ; \ 115 movl $0,mem ; \ 116 popfl ; \ 117 118 #define SPIN_UNLOCK_PUSH_REGS 119 #define SPIN_UNLOCK_POP_REGS 120 #define SPIN_UNLOCK_FRAME_SIZE 0 121 122 #endif /* SMP */ 123 124 #else /* !LOCORE */ 125 126 #ifdef _KERNEL 127 128 /* 129 * Spinlock functions (UP and SMP). Under UP a spinlock still serves 130 * to disable/restore interrupts even if it doesn't spin. 131 */ 132 struct spinlock_deprecated { 133 volatile int opaque; 134 }; 135 136 typedef struct spinlock_deprecated *spinlock_t; 137 138 void mpintr_lock(void); /* disables int / spinlock combo */ 139 void mpintr_unlock(void); 140 void com_lock(void); /* disables int / spinlock combo */ 141 void com_unlock(void); 142 void imen_lock(void); /* disables int / spinlock combo */ 143 void imen_unlock(void); 144 void clock_lock(void); /* disables int / spinlock combo */ 145 void clock_unlock(void); 146 147 extern struct spinlock_deprecated smp_rv_spinlock; 148 149 void spin_lock_deprecated(spinlock_t); 150 void spin_unlock_deprecated(spinlock_t); 151 152 /* 153 * Inline version of spinlock routines -- overrides assembly. Only unlock 154 * and init here please. 155 */ 156 static __inline void 157 spin_lock_init(spinlock_t lock) 158 { 159 lock->opaque = 0; 160 } 161 162 #endif /* _KERNEL */ 163 164 #if defined(_KERNEL) || defined(_UTHREAD) 165 166 /* 167 * MP LOCK functions for SMP and UP. Under UP the MP lock does not exist 168 * but we leave a few functions intact as macros for convenience. 169 */ 170 #ifdef SMP 171 172 void get_mplock(void); 173 int try_mplock(void); 174 void rel_mplock(void); 175 int cpu_try_mplock(void); 176 void cpu_get_initial_mplock(void); 177 178 extern u_int mp_lock; 179 180 #define MP_LOCK_HELD() (mp_lock == mycpu->gd_cpuid) 181 #define ASSERT_MP_LOCK_HELD(td) KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD(): not held thread %p", td)) 182 183 static __inline void 184 cpu_rel_mplock(void) 185 { 186 mp_lock = MP_FREE_LOCK; 187 } 188 189 #else 190 191 #define get_mplock() 192 #define try_mplock() 1 193 #define rel_mplock() 194 #define ASSERT_MP_LOCK_HELD(td) 195 196 #endif /* SMP */ 197 #endif /* _KERNEL || _UTHREAD */ 198 #endif /* LOCORE */ 199 #endif /* !_MACHINE_LOCK_H_ */ 200