1 /* $NetBSD: lock.h,v 1.28 2009/11/25 14:28:50 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Machine-dependent spin lock operations. 35 */ 36 37 #ifndef _ALPHA_LOCK_H_ 38 #define _ALPHA_LOCK_H_ 39 40 #ifdef _KERNEL_OPT 41 #include "opt_multiprocessor.h" 42 #endif 43 44 static __inline int 45 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) 46 { 47 return *__ptr == __SIMPLELOCK_LOCKED; 48 } 49 50 static __inline int 51 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) 52 { 53 return *__ptr == __SIMPLELOCK_UNLOCKED; 54 } 55 56 static __inline void 57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58 { 59 *__ptr = __SIMPLELOCK_UNLOCKED; 60 } 61 62 static __inline void 63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64 { 65 *__ptr = __SIMPLELOCK_LOCKED; 66 } 67 68 static __inline void 69 __cpu_simple_lock_init(__cpu_simple_lock_t *alp) 70 { 71 72 __asm volatile( 73 "# BEGIN __cpu_simple_lock_init\n" 74 " stl $31, %0 \n" 75 " mb \n" 76 " # END __cpu_simple_lock_init" 77 : "=m" (*alp)); 78 } 79 80 static __inline void 81 __cpu_simple_lock(__cpu_simple_lock_t *alp) 82 { 83 unsigned long t0; 84 85 /* 86 * Note, if we detect that the lock is held when 87 * we do the initial load-locked, we spin using 88 * a non-locked load to save the coherency logic 89 * some work. 90 */ 91 92 __asm volatile( 93 "# BEGIN __cpu_simple_lock\n" 94 "1: ldl_l %0, %3 \n" 95 " bne %0, 2f \n" 96 " bis $31, %2, %0 \n" 97 " stl_c %0, %1 \n" 98 " beq %0, 3f \n" 99 " mb \n" 100 " br 4f \n" 101 "2: ldl %0, %3 \n" 102 " beq %0, 1b \n" 103 " br 2b \n" 104 "3: br 1b \n" 105 "4: \n" 106 " # END __cpu_simple_lock\n" 107 : "=&r" (t0), "=m" (*alp) 108 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 109 : "memory"); 110 } 111 112 static __inline int 113 __cpu_simple_lock_try(__cpu_simple_lock_t *alp) 114 { 115 unsigned long t0, v0; 116 117 __asm volatile( 118 "# BEGIN __cpu_simple_lock_try\n" 119 "1: ldl_l %0, %4 \n" 120 " bne %0, 2f \n" 121 " bis $31, %3, %0 \n" 122 " stl_c %0, %2 \n" 123 " beq %0, 3f \n" 124 " mb \n" 125 " bis $31, 1, %1 \n" 126 " br 4f \n" 127 "2: bis $31, $31, %1 \n" 128 " br 4f \n" 129 "3: br 1b \n" 130 "4: \n" 131 " # END __cpu_simple_lock_try" 132 : "=&r" (t0), "=r" (v0), "=m" (*alp) 133 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 134 : "memory"); 135 136 return (v0 != 0); 137 } 138 139 static __inline void 140 __cpu_simple_unlock(__cpu_simple_lock_t *alp) 141 { 142 143 __asm volatile( 144 "# BEGIN __cpu_simple_unlock\n" 145 " mb \n" 146 " stl $31, %0 \n" 147 " # END __cpu_simple_unlock" 148 : "=m" (*alp)); 149 } 150 151 #if defined(MULTIPROCESSOR) 152 /* 153 * On the Alpha, interprocessor interrupts come in at device priority 154 * level. This can cause some problems while waiting for r/w spinlocks 155 * from a high'ish priority level: IPIs that come in will not be processed. 156 * This can lead to deadlock. 157 * 158 * This hook allows IPIs to be processed while a spinlock's interlock 159 * is released. 160 */ 161 #define SPINLOCK_SPIN_HOOK \ 162 do { \ 163 struct cpu_info *__ci = curcpu(); \ 164 int __s; \ 165 \ 166 if (__ci->ci_ipis != 0) { \ 167 /* printf("CPU %lu has IPIs pending\n", \ 168 __ci->ci_cpuid); */ \ 169 __s = splhigh(); \ 170 alpha_ipi_process(__ci, NULL); \ 171 splx(__s); \ 172 } \ 173 } while (0) 174 #define SPINLOCK_BACKOFF_HOOK (void)nullop((void *)0) 175 #endif /* MULTIPROCESSOR */ 176 177 static __inline void 178 mb_read(void) 179 { 180 __asm __volatile("mb" : : : "memory"); 181 } 182 183 static __inline void 184 mb_write(void) 185 { 186 /* XXX wmb */ 187 __asm __volatile("mb" : : : "memory"); 188 } 189 190 static __inline void 191 mb_memory(void) 192 { 193 __asm __volatile("mb" : : : "memory"); 194 } 195 196 #endif /* _ALPHA_LOCK_H_ */ 197