1 /* $NetBSD: lock.h,v 1.8 2001/06/04 21:37:12 ragge Exp $ */ 2 3 /* 4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed at Ludd, University of Lule}. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #ifndef _VAX_LOCK_H_ 34 #define _VAX_LOCK_H_ 35 36 typedef __volatile int __cpu_simple_lock_t; 37 38 #define __SIMPLELOCK_LOCKED 1 39 #define __SIMPLELOCK_UNLOCKED 0 40 41 static __inline void 42 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 43 { 44 __asm__ __volatile ("movl %0,r1;jsb Sunlock" 45 : /* No output */ 46 : "g"(alp) 47 : "r1","cc","memory"); 48 #if 0 49 __asm__ __volatile ("bbcci $0, %0, 1f;1:" 50 : /* No output */ 51 : "m"(*alp)); 52 #endif 53 } 54 55 static __inline int 56 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 57 { 58 int ret; 59 60 __asm__ __volatile ("movl %1,r1;jsb Slocktry;movl r0,%0" 61 : "=&r"(ret) 62 : "g"(alp) 63 : "r0","r1","cc","memory"); 64 #if 0 65 __asm__ __volatile ("movl $0,%0;bbssi $0,%1,1f;incl %0;1:" 66 : "=&r"(ret) 67 : "m"(*alp)); 68 #endif 69 70 return ret; 71 } 72 73 #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) 74 #define __cpu_simple_lock(alp) \ 75 { \ 76 struct cpu_info *__ci = curcpu(); \ 77 \ 78 while (__cpu_simple_lock_try(alp) == 0) { \ 79 int __s; \ 80 \ 81 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \ 82 __s = splipi(); \ 83 cpu_handle_ipi(); \ 84 splx(__s); \ 85 } \ 86 } \ 87 } 88 89 #if 0 90 static __inline void 91 __cpu_simple_lock(__cpu_simple_lock_t *alp) 92 { 93 struct cpu_info *ci = curcpu(); 94 95 while (__cpu_simple_lock_try(alp) == 0) { 96 int s; 97 98 if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) { 99 s = splipi(); 100 cpu_handle_ipi(); 101 splx(s); 102 } 103 } 104 105 #if 0 106 __asm__ __volatile ("movl %0,r1;jsb Slock" 107 : /* No output */ 108 : "g"(alp) 109 : "r0","r1","cc","memory"); 110 #endif 111 #if 0 112 __asm__ __volatile ("1:;bbssi $0, %0, 1b" 113 : /* No output */ 114 : "m"(*alp)); 115 #endif 116 } 117 #endif 118 119 static __inline void 120 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 121 { 122 __asm__ __volatile ("movl %0,r1;jsb Sunlock" 123 : /* No output */ 124 : "g"(alp) 125 : "r1","cc","memory"); 126 #if 0 127 __asm__ __volatile ("bbcci $0, %0, 1f;1:" 128 : /* No output */ 129 : "m"(*alp)); 130 #endif 131 } 132 133 #if defined(MULTIPROCESSOR) 134 /* 135 * On the Vax, interprocessor interrupts can come in at device priority 136 * level or lower. This can cause some problems while waiting for r/w 137 * spinlocks from a high'ish priority level: IPIs that come in will not 138 * be processed. This can lead to deadlock. 139 * 140 * This hook allows IPIs to be processed while a spinlock's interlock 141 * is released. 142 */ 143 #define SPINLOCK_SPIN_HOOK \ 144 do { \ 145 struct cpu_info *__ci = curcpu(); \ 146 int __s; \ 147 \ 148 if (__ci->ci_ipimsgs != 0) { \ 149 /* printf("CPU %lu has IPIs pending\n", \ 150 __ci->ci_cpuid); */ \ 151 __s = splipi(); \ 152 cpu_handle_ipi(); \ 153 splx(__s); \ 154 } \ 155 } while (0) 156 #endif /* MULTIPROCESSOR */ 157 #endif /* _VAX_LOCK_H_ */ 158