1 /*
2 * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
3 * Copyright (C) Huawei Technologies Co., Ltd. 2020. ALL RIGHTS RESERVED.
4 *
5 * See file LICENSE for terms.
6 */
7
8 #ifndef UCS_SPINLOCK_H
9 #define UCS_SPINLOCK_H
10
11 #include <ucs/type/status.h>
12 #include <pthread.h>
13 #include <errno.h>
14
15 BEGIN_C_DECLS
16
17 /** @file spinlock.h */
18
19
20 /* Spinlock creation modifiers */
21 enum {
22 UCS_SPINLOCK_FLAG_SHARED = UCS_BIT(0) /**< Make spinlock sharable in memory */
23 };
24
25 /**
26 * Simple spinlock.
27 */
28 typedef struct ucs_spinlock {
29 pthread_spinlock_t lock;
30 } ucs_spinlock_t;
31
32 /**
33 * Reentrant spinlock.
34 */
35 typedef struct ucs_recursive_spinlock {
36 ucs_spinlock_t super;
37 int count;
38 pthread_t owner;
39 } ucs_recursive_spinlock_t;
40
41 #define UCS_SPINLOCK_OWNER_NULL ((pthread_t)-1)
42
43
ucs_spinlock_init(ucs_spinlock_t * lock,int flags)44 static ucs_status_t ucs_spinlock_init(ucs_spinlock_t *lock, int flags)
45 {
46 int ret, lock_flags;
47
48 if (flags & UCS_SPINLOCK_FLAG_SHARED) {
49 lock_flags = PTHREAD_PROCESS_SHARED;
50 } else {
51 lock_flags = PTHREAD_PROCESS_PRIVATE;
52 }
53
54 ret = pthread_spin_init(&lock->lock, lock_flags);
55 if (ret != 0) {
56 return UCS_ERR_IO_ERROR;
57 }
58
59 return UCS_OK;
60 }
61
62 static inline ucs_status_t
ucs_recursive_spinlock_init(ucs_recursive_spinlock_t * lock,int flags)63 ucs_recursive_spinlock_init(ucs_recursive_spinlock_t* lock, int flags)
64 {
65 lock->count = 0;
66 lock->owner = UCS_SPINLOCK_OWNER_NULL;
67
68 return ucs_spinlock_init(&lock->super, flags);
69 }
70
ucs_spinlock_destroy(ucs_spinlock_t * lock)71 static inline ucs_status_t ucs_spinlock_destroy(ucs_spinlock_t *lock)
72 {
73 int ret;
74
75 ret = pthread_spin_destroy(&lock->lock);
76 if (ret != 0) {
77 if (errno == EBUSY) {
78 return UCS_ERR_BUSY;
79 } else {
80 return UCS_ERR_INVALID_PARAM;
81 }
82 }
83
84 return UCS_OK;
85 }
86
87 static inline ucs_status_t
ucs_recursive_spinlock_destroy(ucs_recursive_spinlock_t * lock)88 ucs_recursive_spinlock_destroy(ucs_recursive_spinlock_t *lock)
89 {
90 if (lock->count != 0) {
91 return UCS_ERR_BUSY;
92 }
93
94 return ucs_spinlock_destroy(&lock->super);
95 }
96
97 static inline int
ucs_recursive_spin_is_owner(ucs_recursive_spinlock_t * lock,pthread_t self)98 ucs_recursive_spin_is_owner(ucs_recursive_spinlock_t *lock, pthread_t self)
99 {
100 return lock->owner == self;
101 }
102
ucs_spin_lock(ucs_spinlock_t * lock)103 static inline void ucs_spin_lock(ucs_spinlock_t *lock)
104 {
105 pthread_spin_lock(&lock->lock);
106 }
107
ucs_recursive_spin_lock(ucs_recursive_spinlock_t * lock)108 static inline void ucs_recursive_spin_lock(ucs_recursive_spinlock_t *lock)
109 {
110 pthread_t self = pthread_self();
111
112 if (ucs_recursive_spin_is_owner(lock, self)) {
113 ++lock->count;
114 return;
115 }
116
117 ucs_spin_lock(&lock->super);
118 lock->owner = self;
119 ++lock->count;
120 }
121
ucs_spin_try_lock(ucs_spinlock_t * lock)122 static inline int ucs_spin_try_lock(ucs_spinlock_t *lock)
123 {
124 if (pthread_spin_trylock(&lock->lock) != 0) {
125 return 0;
126 }
127
128 return 1;
129 }
130
ucs_recursive_spin_trylock(ucs_recursive_spinlock_t * lock)131 static inline int ucs_recursive_spin_trylock(ucs_recursive_spinlock_t *lock)
132 {
133 pthread_t self = pthread_self();
134
135 if (ucs_recursive_spin_is_owner(lock, self)) {
136 ++lock->count;
137 return 1;
138 }
139
140 if (ucs_spin_try_lock(&lock->super) == 0) {
141 return 0;
142 }
143
144 lock->owner = self;
145 ++lock->count;
146 return 1;
147 }
148
ucs_spin_unlock(ucs_spinlock_t * lock)149 static inline void ucs_spin_unlock(ucs_spinlock_t *lock)
150 {
151 pthread_spin_unlock(&lock->lock);
152 }
153
ucs_recursive_spin_unlock(ucs_recursive_spinlock_t * lock)154 static inline void ucs_recursive_spin_unlock(ucs_recursive_spinlock_t *lock)
155 {
156 --lock->count;
157 if (lock->count == 0) {
158 lock->owner = UCS_SPINLOCK_OWNER_NULL;
159 ucs_spin_unlock(&lock->super);
160 }
161 }
162
163 END_C_DECLS
164
165 #endif
166