171b3fa15SDavid Xu /*-
2d3b15642Szrj  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
371b3fa15SDavid Xu  * All rights reserved.
471b3fa15SDavid Xu  *
571b3fa15SDavid Xu  * Redistribution and use in source and binary forms, with or without
671b3fa15SDavid Xu  * modification, are permitted provided that the following conditions
771b3fa15SDavid Xu  * are met:
871b3fa15SDavid Xu  * 1. Redistributions of source code must retain the above copyright
971b3fa15SDavid Xu  *    notice, this list of conditions and the following disclaimer.
1071b3fa15SDavid Xu  * 2. Redistributions in binary form must reproduce the above copyright
1171b3fa15SDavid Xu  *    notice, this list of conditions and the following disclaimer in the
1271b3fa15SDavid Xu  *    documentation and/or other materials provided with the distribution.
1371b3fa15SDavid Xu  *
1471b3fa15SDavid Xu  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1571b3fa15SDavid Xu  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1671b3fa15SDavid Xu  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1771b3fa15SDavid Xu  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1871b3fa15SDavid Xu  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1971b3fa15SDavid Xu  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2071b3fa15SDavid Xu  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2171b3fa15SDavid Xu  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2271b3fa15SDavid Xu  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2371b3fa15SDavid Xu  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2471b3fa15SDavid Xu  * SUCH DAMAGE.
2571b3fa15SDavid Xu  *
2671b3fa15SDavid Xu  */
2771b3fa15SDavid Xu 
2871b3fa15SDavid Xu #ifndef _THR_DFLY_UMTX_H_
2971b3fa15SDavid Xu #define _THR_DFLY_UMTX_H_
3071b3fa15SDavid Xu 
3171b3fa15SDavid Xu #include <unistd.h>
3271b3fa15SDavid Xu 
33f56151faSMatthew Dillon #define	cpu_pause()	__asm __volatile("pause":::"memory")
34f56151faSMatthew Dillon 
3571b3fa15SDavid Xu typedef int umtx_t;
3671b3fa15SDavid Xu 
37fcaa7a3aSMatthew Dillon int	__thr_umtx_lock(volatile umtx_t *mtx, int id, int timo);
38fcaa7a3aSMatthew Dillon int	__thr_umtx_timedlock(volatile umtx_t *mtx, int id,
3971b3fa15SDavid Xu 		 const struct timespec *timeout);
40f56151faSMatthew Dillon void	__thr_umtx_unlock(volatile umtx_t *mtx, int v, int id);
4171b3fa15SDavid Xu 
4271b3fa15SDavid Xu static inline void
_thr_umtx_init(volatile umtx_t * mtx)4371b3fa15SDavid Xu _thr_umtx_init(volatile umtx_t *mtx)
4471b3fa15SDavid Xu {
4571b3fa15SDavid Xu 	*mtx = 0;
4671b3fa15SDavid Xu }
4771b3fa15SDavid Xu 
4871b3fa15SDavid Xu static inline int
_thr_umtx_trylock(volatile umtx_t * mtx,int id,int temporary)49*721505deSMatthew Dillon _thr_umtx_trylock(volatile umtx_t *mtx, int id, int temporary)
5071b3fa15SDavid Xu {
51*721505deSMatthew Dillon 	if (temporary)
52*721505deSMatthew Dillon 		sigblockall();
53f56151faSMatthew Dillon 	if (atomic_cmpset_acq_int(mtx, 0, id))
5471b3fa15SDavid Xu 		return (0);
55f56151faSMatthew Dillon 	cpu_pause();
56f56151faSMatthew Dillon 	if (atomic_cmpset_acq_int(mtx, 0, id))
57f56151faSMatthew Dillon 		return (0);
58f56151faSMatthew Dillon 	cpu_pause();
59f56151faSMatthew Dillon 	if (atomic_cmpset_acq_int(mtx, 0, id))
60f56151faSMatthew Dillon 		return (0);
61*721505deSMatthew Dillon 	if (temporary)
62*721505deSMatthew Dillon 		sigunblockall();
6371b3fa15SDavid Xu 	return (EBUSY);
6471b3fa15SDavid Xu }
6571b3fa15SDavid Xu 
6671b3fa15SDavid Xu static inline int
_thr_umtx_lock(volatile umtx_t * mtx,int id,int temporary)67*721505deSMatthew Dillon _thr_umtx_lock(volatile umtx_t *mtx, int id, int temporary)
6871b3fa15SDavid Xu {
69*721505deSMatthew Dillon 	int res;
70*721505deSMatthew Dillon 
71*721505deSMatthew Dillon 	if (temporary)
72*721505deSMatthew Dillon 		sigblockall();
73f56151faSMatthew Dillon 	if (atomic_cmpset_acq_int(mtx, 0, id))
7471b3fa15SDavid Xu 		return (0);
75*721505deSMatthew Dillon 	res = __thr_umtx_lock(mtx, id, 0);
76*721505deSMatthew Dillon 	if (res && temporary)
77*721505deSMatthew Dillon 		sigunblockall();
78*721505deSMatthew Dillon 	return res;
7971b3fa15SDavid Xu }
8071b3fa15SDavid Xu 
8171b3fa15SDavid Xu static inline int
_thr_umtx_timedlock(volatile umtx_t * mtx,int id,const struct timespec * timeout,int temporary)82fcaa7a3aSMatthew Dillon _thr_umtx_timedlock(volatile umtx_t *mtx, int id,
83*721505deSMatthew Dillon     const struct timespec *timeout, int temporary)
8471b3fa15SDavid Xu {
85*721505deSMatthew Dillon 	int res;
86*721505deSMatthew Dillon 
87*721505deSMatthew Dillon 	if (temporary)
88*721505deSMatthew Dillon 		sigblockall();
89fcaa7a3aSMatthew Dillon 	if (atomic_cmpset_acq_int(mtx, 0, id)) {
9071b3fa15SDavid Xu 		return (0);
91fcaa7a3aSMatthew Dillon 	}
92*721505deSMatthew Dillon 	res = __thr_umtx_timedlock(mtx, id, timeout);
93*721505deSMatthew Dillon 	if (res && temporary)
94*721505deSMatthew Dillon 		sigunblockall();
95*721505deSMatthew Dillon 	return res;
9671b3fa15SDavid Xu }
9771b3fa15SDavid Xu 
9871b3fa15SDavid Xu static inline void
_thr_umtx_unlock(volatile umtx_t * mtx,int id,int temporary)99*721505deSMatthew Dillon _thr_umtx_unlock(volatile umtx_t *mtx, int id, int temporary)
10071b3fa15SDavid Xu {
101f56151faSMatthew Dillon 	int v;
102f56151faSMatthew Dillon 
103f56151faSMatthew Dillon 	v = atomic_swap_int(mtx, 0);
104f56151faSMatthew Dillon 	if (v != id)
105f56151faSMatthew Dillon 		__thr_umtx_unlock(mtx, v, id);
106*721505deSMatthew Dillon 	if (temporary)
107*721505deSMatthew Dillon 		sigunblockall();
10871b3fa15SDavid Xu }
10971b3fa15SDavid Xu 
11071b3fa15SDavid Xu int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
1119219c44cSDavid Xu 		   const struct timespec *timeout, int clockid);
11298247283SMatthew Dillon int _thr_umtx_wait_intr(volatile umtx_t *mtx, umtx_t exp);
11371b3fa15SDavid Xu void _thr_umtx_wake(volatile umtx_t *mtx, int count);
11471b3fa15SDavid Xu #endif
115