1 /* $NetBSD: mutex.h,v 1.4 2009/07/20 04:41:37 kiyohara Exp $ */ 2 3 /*- 4 * Copyright (c) 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Takayoshi Kochi. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _IA64_MUTEX_H_ 33 #define _IA64_MUTEX_H_ 34 35 #ifndef __MUTEX_PRIVATE 36 37 struct kmutex { 38 uintptr_t mtx_pad1; 39 uint32_t mtx_pad2[2]; 40 }; 41 42 #else 43 44 struct kmutex { 45 volatile uintptr_t mtx_owner; 46 ipl_cookie_t mtx_ipl; 47 __cpu_simple_lock_t mtx_lock; 48 }; 49 50 51 /* XXX when we implement mutex_enter()/mutex_exit(), uncomment this 52 #define __HAVE_MUTEX_STUBS 1 53 */ 54 /* XXX when we implement mutex_spin_enter()/mutex_spin_exit(), uncomment this 55 #define __HAVE_SPIN_MUTEX_STUBS 1 56 */ 57 #define __HAVE_SIMPLE_MUTEXES 1 58 59 /* 60 * MUTEX_RECEIVE: no memory barrier required, atomic_cas implies a load fence. 61 */ 62 #define MUTEX_RECEIVE(mtx) /* nothing */ 63 64 /* 65 * MUTEX_GIVE: no memory barrier required, as _lock_cas() will take care of it. 66 */ 67 #define MUTEX_GIVE(mtx) /* nothing */ 68 69 #define MUTEX_CAS(ptr, old, new) \ 70 (atomic_cas_ulong((volatile unsigned long *)(ptr), (old), (new)) == (old)) 71 72 #endif /* __MUTEX_PRIVATE */ 73 74 #endif /* _IA64_MUTEX_H_ */ 75