1 /*	$NetBSD: atomic.h,v 1.5 2014/12/10 04:38:00 christos Exp $	*/
2 
3 /*
4  * Copyright (C) 2006, 2007, 2009, 2012  Internet Systems Consortium, Inc. ("ISC")
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
11  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
12  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
13  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
14  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
15  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /* Id: atomic.h,v 1.7 2009/06/24 02:22:50 marka Exp  */
20 
21 #ifndef ISC_ATOMIC_H
22 #define ISC_ATOMIC_H 1
23 
24 #include <isc/platform.h>
25 #include <isc/types.h>
26 
27 #ifdef ISC_PLATFORM_USEGCCASM
28 /*
29  * This routine atomically increments the value stored in 'p' by 'val', and
30  * returns the previous value.
31  *
32  * Open issue: can 'fetchadd' make the code faster for some particular values
33  * (e.g., 1 and -1)?
34  */
35 static inline isc_int32_t
36 #ifdef __GNUC__
37 __attribute__ ((unused))
38 #endif
39 isc_atomic_xadd(isc_int32_t *p, isc_int32_t val)
40 {
41 	isc_int32_t prev, swapped;
42 
43 	for (prev = *(volatile isc_int32_t *)p; ; prev = swapped) {
44 		swapped = prev + val;
45 		__asm__ volatile(
46 			"mov ar.ccv=%2;;"
47 			"cmpxchg4.acq %0=%4,%3,ar.ccv"
48 			: "=r" (swapped), "=m" (*p)
49 			: "r" (prev), "r" (swapped), "m" (*p)
50 			: "memory");
51 		if (swapped == prev)
52 			break;
53 	}
54 
55 	return (prev);
56 }
57 
58 /*
59  * This routine atomically stores the value 'val' in 'p'.
60  */
61 static inline void
62 #ifdef __GNUC__
63 __attribute__ ((unused))
64 #endif
65 isc_atomic_store(isc_int32_t *p, isc_int32_t val)
66 {
67 	__asm__ volatile(
68 		"st4.rel %0=%1"
69 		: "=m" (*p)
70 		: "r" (val)
71 		: "memory"
72 		);
73 }
74 
75 /*
76  * This routine atomically replaces the value in 'p' with 'val', if the
77  * original value is equal to 'cmpval'.  The original value is returned in any
78  * case.
79  */
80 static inline isc_int32_t
81 #ifdef __GNUC__
82 __attribute__ ((unused))
83 #endif
84 isc_atomic_cmpxchg(isc_int32_t *p, isc_int32_t cmpval, isc_int32_t val)
85 {
86 	isc_int32_t ret;
87 
88 	__asm__ volatile(
89 		"mov ar.ccv=%2;;"
90 		"cmpxchg4.acq %0=%4,%3,ar.ccv"
91 		: "=r" (ret), "=m" (*p)
92 		: "r" (cmpval), "r" (val), "m" (*p)
93 		: "memory");
94 
95 	return (ret);
96 }
97 #else /* !ISC_PLATFORM_USEGCCASM */
98 
99 #error "unsupported compiler.  disable atomic ops by --disable-atomic"
100 
101 #endif
102 #endif /* ISC_ATOMIC_H */
103