1
2 /*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7 #include <nxt_main.h>
8
9
10 /*
11 * All modern pthread mutex implementations try to acquire a lock atomically
12 * in userland before going to sleep in kernel. Some spins on SMP systems
13 * before the sleeping.
14 *
15 * In Solaris since version 8 all mutex types spin before sleeping.
16 * The default spin count is 1000. It can be overridden using
17 * _THREAD_ADAPTIVE_SPIN=100 environment variable.
18 *
19 * In MacOSX all mutex types spin to acquire a lock protecting a mutex's
20 * internals. If the mutex is busy, thread calls Mach semaphore_wait().
21 *
22 *
23 * PTHREAD_MUTEX_NORMAL lacks deadlock detection and is the fastest
24 * mutex type.
25 *
26 * Linux: No spinning. The internal name PTHREAD_MUTEX_TIMED_NP
27 * remains from the times when pthread_mutex_timedlock() was
28 * non-standard extension. Alias name: PTHREAD_MUTEX_FAST_NP.
29 * FreeBSD: No spinning.
30 *
31 *
32 * PTHREAD_MUTEX_ERRORCHECK is usually as fast as PTHREAD_MUTEX_NORMAL
33 * yet has lightweight deadlock detection.
34 *
35 * Linux: No spinning. The internal name: PTHREAD_MUTEX_ERRORCHECK_NP.
36 * FreeBSD: No spinning.
37 *
38 *
39 * PTHREAD_MUTEX_RECURSIVE allows recursive locking.
40 *
41 * Linux: No spinning. The internal name: PTHREAD_MUTEX_RECURSIVE_NP.
42 * FreeBSD: No spinning.
43 *
44 *
45 * PTHREAD_MUTEX_ADAPTIVE_NP spins on SMP systems before sleeping.
46 *
47 * Linux: No deadlock detection. Dynamically changes a spin count
48 * for each mutex from 10 to 100 based on spin count taken
49 * previously.
50 *
51 * FreeBSD: Deadlock detection. The default spin count is 2000.
52 * It can be overriden using LIBPTHREAD_SPINLOOPS environment
53 * variable or by pthread_mutex_setspinloops_np(). If a lock
54 * is still busy, sched_yield() can be called on both UP and
55 * SMP systems. The default yield loop count is zero, but it
56 * can be set by LIBPTHREAD_YIELDLOOPS environment variable or
57 * by pthread_mutex_setyieldloops_np(). sched_yield() moves
58 * a thread to the end of CPU scheduler run queue and this is
59 * cheaper than removing the thread from the queue and sleeping.
60 *
61 * Solaris: No PTHREAD_MUTEX_ADAPTIVE_NP .
62 * MacOSX: No PTHREAD_MUTEX_ADAPTIVE_NP.
63 *
64 *
65 * PTHREAD_MUTEX_ELISION_NP is a Linux extension to elide locks using
66 * Intel Restricted Transactional Memory. It is the most suitable for
67 * rwlock pattern access because it allows simultaneous reads without lock.
68 * Supported since glibc 2.18.
69 *
70 *
71 * PTHREAD_MUTEX_DEFAULT is default mutex type.
72 *
73 * Linux: PTHREAD_MUTEX_NORMAL.
74 * FreeBSD: PTHREAD_MUTEX_ERRORCHECK.
75 * Solaris: PTHREAD_MUTEX_NORMAL.
76 * MacOSX: PTHREAD_MUTEX_NORMAL.
77 */
78
79
80 nxt_int_t
nxt_thread_mutex_create(nxt_thread_mutex_t * mtx)81 nxt_thread_mutex_create(nxt_thread_mutex_t *mtx)
82 {
83 nxt_err_t err;
84 pthread_mutexattr_t attr;
85
86 err = pthread_mutexattr_init(&attr);
87 if (err != 0) {
88 nxt_thread_log_alert("pthread_mutexattr_init() failed %E", err);
89 return NXT_ERROR;
90 }
91
92 err = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
93 if (err != 0) {
94 nxt_thread_log_alert("pthread_mutexattr_settype"
95 "(PTHREAD_MUTEX_ERRORCHECK) failed %E", err);
96 return NXT_ERROR;
97 }
98
99 err = pthread_mutex_init(mtx, &attr);
100 if (err != 0) {
101 nxt_thread_log_alert("pthread_mutex_init() failed %E", err);
102 return NXT_ERROR;
103 }
104
105 err = pthread_mutexattr_destroy(&attr);
106 if (err != 0) {
107 nxt_thread_log_alert("pthread_mutexattr_destroy() failed %E", err);
108 }
109
110 nxt_thread_log_debug("pthread_mutex_init(%p)", mtx);
111
112 return NXT_OK;
113 }
114
115
116 void
nxt_thread_mutex_destroy(nxt_thread_mutex_t * mtx)117 nxt_thread_mutex_destroy(nxt_thread_mutex_t *mtx)
118 {
119 nxt_err_t err;
120
121 err = pthread_mutex_destroy(mtx);
122 if (nxt_slow_path(err != 0)) {
123 nxt_thread_log_alert("pthread_mutex_destroy() failed %E", err);
124 }
125
126 nxt_thread_log_debug("pthread_mutex_destroy(%p)", mtx);
127 }
128
129
130 nxt_int_t
nxt_thread_mutex_lock(nxt_thread_mutex_t * mtx)131 nxt_thread_mutex_lock(nxt_thread_mutex_t *mtx)
132 {
133 nxt_err_t err;
134
135 nxt_thread_log_debug("pthread_mutex_lock(%p) enter", mtx);
136
137 err = pthread_mutex_lock(mtx);
138 if (nxt_fast_path(err == 0)) {
139 return NXT_OK;
140 }
141
142 nxt_thread_log_alert("pthread_mutex_lock() failed %E", err);
143
144 return NXT_ERROR;
145 }
146
147
148 nxt_bool_t
nxt_thread_mutex_trylock(nxt_thread_mutex_t * mtx)149 nxt_thread_mutex_trylock(nxt_thread_mutex_t *mtx)
150 {
151 nxt_err_t err;
152
153 nxt_thread_debug(thr);
154
155 nxt_log_debug(thr->log, "pthread_mutex_trylock(%p) enter", mtx);
156
157 err = pthread_mutex_trylock(mtx);
158 if (nxt_fast_path(err == 0)) {
159 return 1;
160 }
161
162 if (err == NXT_EBUSY) {
163 nxt_log_debug(thr->log, "pthread_mutex_trylock(%p) failed", mtx);
164
165 } else {
166 nxt_thread_log_alert("pthread_mutex_trylock() failed %E", err);
167 }
168
169 return 0;
170 }
171
172
173 nxt_int_t
nxt_thread_mutex_unlock(nxt_thread_mutex_t * mtx)174 nxt_thread_mutex_unlock(nxt_thread_mutex_t *mtx)
175 {
176 nxt_err_t err;
177 nxt_thread_t *thr;
178
179 err = pthread_mutex_unlock(mtx);
180
181 thr = nxt_thread();
182 nxt_thread_time_update(thr);
183
184 if (nxt_fast_path(err == 0)) {
185 nxt_log_debug(thr->log, "pthread_mutex_unlock(%p) exit", mtx);
186 return NXT_OK;
187 }
188
189 nxt_log_alert(thr->log, "pthread_mutex_unlock() failed %E", err);
190
191 return NXT_ERROR;
192 }
193