1 /**
2  * WinPR: Windows Portable Runtime
3  * Synchronization Functions
4  *
5  * Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
6  * Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *     http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  */
20 
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24 
25 #include <winpr/tchar.h>
26 #include <winpr/synch.h>
27 #include <winpr/sysinfo.h>
28 #include <winpr/interlocked.h>
29 #include <winpr/thread.h>
30 
31 #include "synch.h"
32 
33 #ifdef HAVE_UNISTD_H
34 #include <unistd.h>
35 #endif
36 
37 #if defined(__APPLE__)
38 #include <mach/task.h>
39 #include <mach/mach.h>
40 #include <mach/semaphore.h>
41 #endif
42 
43 #ifndef _WIN32
44 
45 #include "../log.h"
46 #define TAG WINPR_TAG("synch.critical")
47 
InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)48 VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
49 {
50 	InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
51 }
52 
InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection,DWORD dwSpinCount,DWORD Flags)53 BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
54                                  DWORD Flags)
55 {
56 	/**
57 	 * See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
58 	 * - The LockCount field indicates the number of times that any thread has
59 	 *   called the EnterCriticalSection routine for this critical section,
60 	 *   minus one. This field starts at -1 for an unlocked critical section.
61 	 *   Each call of EnterCriticalSection increments this value; each call of
62 	 *   LeaveCriticalSection decrements it.
63 	 * - The RecursionCount field indicates the number of times that the owning
64 	 *   thread has called EnterCriticalSection for this critical section.
65 	 */
66 	if (Flags != 0)
67 	{
68 		WLog_WARN(TAG, "Flags unimplemented");
69 	}
70 
71 	lpCriticalSection->DebugInfo = NULL;
72 	lpCriticalSection->LockCount = -1;
73 	lpCriticalSection->SpinCount = 0;
74 	lpCriticalSection->RecursionCount = 0;
75 	lpCriticalSection->OwningThread = NULL;
76 	lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
77 
78 	if (!lpCriticalSection->LockSemaphore)
79 		return FALSE;
80 
81 #if defined(__APPLE__)
82 
83 	if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
84 	    KERN_SUCCESS)
85 		goto out_fail;
86 
87 #else
88 
89 	if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
90 		goto out_fail;
91 
92 #endif
93 	SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
94 	return TRUE;
95 out_fail:
96 	free(lpCriticalSection->LockSemaphore);
97 	return FALSE;
98 }
99 
InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection,DWORD dwSpinCount)100 BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
101 {
102 	return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
103 }
104 
SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection,DWORD dwSpinCount)105 DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
106 {
107 #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
108 	SYSTEM_INFO sysinfo;
109 	DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
110 
111 	if (dwSpinCount)
112 	{
113 		/* Don't spin on uniprocessor systems! */
114 		GetNativeSystemInfo(&sysinfo);
115 
116 		if (sysinfo.dwNumberOfProcessors < 2)
117 			dwSpinCount = 0;
118 	}
119 
120 	lpCriticalSection->SpinCount = dwSpinCount;
121 	return dwPreviousSpinCount;
122 #else
123 	return 0;
124 #endif
125 }
126 
_WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)127 static VOID _WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
128 {
129 #if defined(__APPLE__)
130 	semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
131 #else
132 	sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
133 #endif
134 }
135 
_UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)136 static VOID _UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
137 {
138 #if defined __APPLE__
139 	semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
140 #else
141 	sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
142 #endif
143 }
144 
EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)145 VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
146 {
147 #if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
148 	ULONG SpinCount = lpCriticalSection->SpinCount;
149 
150 	/* If we're lucky or if the current thread is already owner we can return early */
151 	if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
152 		return;
153 
154 	/* Spin requested times but don't compete with another waiting thread */
155 	while (SpinCount-- && lpCriticalSection->LockCount < 1)
156 	{
157 		/* Atomically try to acquire and check the if the section is free. */
158 		if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
159 		{
160 			lpCriticalSection->RecursionCount = 1;
161 			lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
162 			return;
163 		}
164 
165 		/* Failed to get the lock. Let the scheduler know that we're spinning. */
166 		if (sched_yield() != 0)
167 		{
168 			/**
169 			 * On some operating systems sched_yield is a stub.
170 			 * usleep should at least trigger a context switch if any thread is waiting.
171 			 * A ThreadYield() would be nice in winpr ...
172 			 */
173 			usleep(1);
174 		}
175 	}
176 
177 #endif
178 
179 	/* First try the fastest possible path to get the lock. */
180 	if (InterlockedIncrement(&lpCriticalSection->LockCount))
181 	{
182 		/* Section is already locked. Check if it is owned by the current thread. */
183 		if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
184 		{
185 			/* Recursion. No need to wait. */
186 			lpCriticalSection->RecursionCount++;
187 			return;
188 		}
189 
190 		/* Section is locked by another thread. We have to wait. */
191 		_WaitForCriticalSection(lpCriticalSection);
192 	}
193 
194 	/* We got the lock. Own it ... */
195 	lpCriticalSection->RecursionCount = 1;
196 	lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
197 }
198 
TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)199 BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
200 {
201 	HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
202 
203 	/* Atomically acquire the the lock if the section is free. */
204 	if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
205 	{
206 		lpCriticalSection->RecursionCount = 1;
207 		lpCriticalSection->OwningThread = current_thread;
208 		return TRUE;
209 	}
210 
211 	/* Section is already locked. Check if it is owned by the current thread. */
212 	if (lpCriticalSection->OwningThread == current_thread)
213 	{
214 		/* Recursion, return success */
215 		lpCriticalSection->RecursionCount++;
216 		InterlockedIncrement(&lpCriticalSection->LockCount);
217 		return TRUE;
218 	}
219 
220 	return FALSE;
221 }
222 
LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)223 VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
224 {
225 	/* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
226 	if (--lpCriticalSection->RecursionCount < 1)
227 	{
228 		/* Last recursion, clear owner, unlock and if there are other waiting threads ... */
229 		lpCriticalSection->OwningThread = NULL;
230 
231 		if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
232 		{
233 			/* ...signal the semaphore to unblock the next waiting thread */
234 			_UnWaitCriticalSection(lpCriticalSection);
235 		}
236 	}
237 	else
238 	{
239 		InterlockedDecrement(&lpCriticalSection->LockCount);
240 	}
241 }
242 
DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)243 VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
244 {
245 	lpCriticalSection->LockCount = -1;
246 	lpCriticalSection->SpinCount = 0;
247 	lpCriticalSection->RecursionCount = 0;
248 	lpCriticalSection->OwningThread = NULL;
249 
250 	if (lpCriticalSection->LockSemaphore != NULL)
251 	{
252 #if defined __APPLE__
253 		semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
254 #else
255 		sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
256 #endif
257 		free(lpCriticalSection->LockSemaphore);
258 		lpCriticalSection->LockSemaphore = NULL;
259 	}
260 }
261 
262 #endif
263