1 /*-
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #ifndef _THR_DFLY_UMTX_H_
29 #define _THR_DFLY_UMTX_H_
30 
31 #include <unistd.h>
32 
33 #define UMTX_LOCKED	1
34 #define UMTX_CONTESTED	2
35 
36 #define	cpu_pause()	__asm __volatile("pause":::"memory")
37 
38 typedef int umtx_t;
39 
40 int	__thr_umtx_lock(volatile umtx_t *mtx, int id, int timo);
41 int	__thr_umtx_timedlock(volatile umtx_t *mtx, int id,
42 		 const struct timespec *timeout);
43 void	__thr_umtx_unlock(volatile umtx_t *mtx, int v, int id);
44 
45 static inline void
46 _thr_umtx_init(volatile umtx_t *mtx)
47 {
48 	*mtx = 0;
49 }
50 
51 static inline int
52 _thr_umtx_trylock(volatile umtx_t *mtx, int id)
53 {
54 	if (atomic_cmpset_acq_int(mtx, 0, id))
55 		return (0);
56 	cpu_pause();
57 	if (atomic_cmpset_acq_int(mtx, 0, id))
58 		return (0);
59 	cpu_pause();
60 	if (atomic_cmpset_acq_int(mtx, 0, id))
61 		return (0);
62 	return (EBUSY);
63 }
64 
65 static inline int
66 _thr_umtx_lock(volatile umtx_t *mtx, int id)
67 {
68 	if (atomic_cmpset_acq_int(mtx, 0, id))
69 		return (0);
70 	return (__thr_umtx_lock(mtx, id, 0));
71 }
72 
73 static inline int
74 _thr_umtx_timedlock(volatile umtx_t *mtx, int id,
75     const struct timespec *timeout)
76 {
77 	if (atomic_cmpset_acq_int(mtx, 0, id)) {
78 		return (0);
79 	}
80 	return (__thr_umtx_timedlock(mtx, id, timeout));
81 }
82 
83 static inline void
84 _thr_umtx_unlock(volatile umtx_t *mtx, int id)
85 {
86 	int v;
87 
88 	v = atomic_swap_int(mtx, 0);
89 	if (v != id)
90 		__thr_umtx_unlock(mtx, v, id);
91 }
92 
93 int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
94 		   const struct timespec *timeout, int clockid);
95 int _thr_umtx_wait_intr(volatile umtx_t *mtx, umtx_t exp);
96 void _thr_umtx_wake(volatile umtx_t *mtx, int count);
97 #endif
98