1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #ifndef _SYS_SPINLOCK_H_ 34 #define _SYS_SPINLOCK_H_ 35 36 /* 37 * Note that the spinlock structure is retained whether we are SMP or not, 38 * so structures using embedded spinlocks do not change size for SMP vs UP 39 * builds. 40 * 41 * DragonFly spinlocks use a chasing counter. A core desiring a spinlock 42 * does a atomic_fetchadd_int() on countb and then waits for counta to 43 * reach its value using MWAIT. Releasing the spinlock involves an 44 * atomic_add_int() on counta. If no MWAIT is available the core can spin 45 * waiting for the value to change which is still represented by a shared+ro 46 * cache entry. 47 */ 48 struct spinlock { 49 int counta; 50 int countb; 51 const char *descr; 52 }; 53 54 #define SPINLOCK_INITIALIZER(head, d) { 0, 0, #d } 55 56 #define SPINLOCK_SHARED 0x80000000 57 #define SPINLOCK_EXCLWAIT 0x00100000 /* high bits counter */ 58 59 #endif 60 61