1 // Copyright (c) 2012- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 #include <algorithm>
19 #include <map>
20 #include <unordered_map>
21 #include "Common/Serialize/Serializer.h"
22 #include "Common/Serialize/SerializeFuncs.h"
23 #include "Common/Serialize/SerializeMap.h"
24 #include "Core/MemMapHelpers.h"
25 #include "Core/HLE/HLE.h"
26 #include "Core/MIPS/MIPS.h"
27 #include "Core/CoreTiming.h"
28 #include "Core/Reporting.h"
29 #include "Core/HLE/sceKernel.h"
30 #include "Core/HLE/sceKernelMutex.h"
31 #include "Core/HLE/sceKernelThread.h"
32 #include "Core/HLE/KernelWaitHelpers.h"
33 
34 #define PSP_MUTEX_ATTR_FIFO 0
35 #define PSP_MUTEX_ATTR_PRIORITY 0x100
36 #define PSP_MUTEX_ATTR_ALLOW_RECURSIVE 0x200
37 #define PSP_MUTEX_ATTR_KNOWN (PSP_MUTEX_ATTR_PRIORITY | PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
38 
39 // Not sure about the names of these
40 #define PSP_MUTEX_ERROR_NO_SUCH_MUTEX 0x800201C3
41 #define PSP_MUTEX_ERROR_TRYLOCK_FAILED 0x800201C4
42 #define PSP_MUTEX_ERROR_NOT_LOCKED 0x800201C5
43 #define PSP_MUTEX_ERROR_LOCK_OVERFLOW 0x800201C6
44 #define PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201C7
45 #define PSP_MUTEX_ERROR_ALREADY_LOCKED 0x800201C8
46 
47 #define PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX 0x800201CA
48 // Note: used only for _600.
49 #define PSP_LWMUTEX_ERROR_TRYLOCK_FAILED 0x800201CB
50 #define PSP_LWMUTEX_ERROR_NOT_LOCKED 0x800201CC
51 #define PSP_LWMUTEX_ERROR_LOCK_OVERFLOW 0x800201CD
52 #define PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201CE
53 #define PSP_LWMUTEX_ERROR_ALREADY_LOCKED 0x800201CF
54 
55 struct NativeMutex
56 {
57 	SceSize_le size;
58 	char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
59 	SceUInt_le attr;
60 	s32_le initialCount;
61 	s32_le lockLevel;
62 	SceUID_le lockThread;
63 	// Not kept up to date.
64 	s32_le numWaitThreads;
65 };
66 
67 struct PSPMutex : public KernelObject
68 {
GetNamePSPMutex69 	const char *GetName() override { return nm.name; }
GetTypeNamePSPMutex70 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNamePSPMutex71 	static const char *GetStaticTypeName() { return "Mutex"; }
GetMissingErrorCodePSPMutex72 	static u32 GetMissingErrorCode() { return PSP_MUTEX_ERROR_NO_SUCH_MUTEX; }
GetStaticIDTypePSPMutex73 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Mutex; }
GetIDTypePSPMutex74 	int GetIDType() const override { return SCE_KERNEL_TMID_Mutex; }
75 
DoStatePSPMutex76 	void DoState(PointerWrap &p) override
77 	{
78 		auto s = p.Section("Mutex", 1);
79 		if (!s)
80 			return;
81 
82 		Do(p, nm);
83 		SceUID dv = 0;
84 		Do(p, waitingThreads, dv);
85 		Do(p, pausedWaits);
86 	}
87 
88 	NativeMutex nm;
89 	std::vector<SceUID> waitingThreads;
90 	// Key is the callback id it was for, or if no callback, the thread id.
91 	std::map<SceUID, u64> pausedWaits;
92 };
93 
94 
95 struct NativeLwMutexWorkarea
96 {
97 	s32_le lockLevel;
98 	SceUID_le lockThread;
99 	u32_le attr;
100 	s32_le numWaitThreads;
101 	SceUID_le uid;
102 	s32_le pad[3];
103 
initNativeLwMutexWorkarea104 	void init()
105 	{
106 		memset(this, 0, sizeof(NativeLwMutexWorkarea));
107 	}
108 
clearNativeLwMutexWorkarea109 	void clear()
110 	{
111 		lockLevel = 0;
112 		lockThread = -1;
113 		uid = -1;
114 	}
115 };
116 
117 struct NativeLwMutex
118 {
119 	SceSize_le size;
120 	char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
121 	SceUInt_le attr;
122 	SceUID_le uid;
123 	PSPPointer<NativeLwMutexWorkarea> workarea;
124 	s32_le initialCount;
125 	// Not kept up to date.
126 	s32_le currentCount;
127 	// Not kept up to date.
128 	SceUID_le lockThread;
129 	// Not kept up to date.
130 	s32_le numWaitThreads;
131 };
132 
133 struct LwMutex : public KernelObject
134 {
GetNameLwMutex135 	const char *GetName() override { return nm.name; }
GetTypeNameLwMutex136 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNameLwMutex137 	static const char *GetStaticTypeName() { return "LwMutex"; }
GetMissingErrorCodeLwMutex138 	static u32 GetMissingErrorCode() { return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX; }
GetStaticIDTypeLwMutex139 	static int GetStaticIDType() { return SCE_KERNEL_TMID_LwMutex; }
GetIDTypeLwMutex140 	int GetIDType() const override { return SCE_KERNEL_TMID_LwMutex; }
141 
DoStateLwMutex142 	void DoState(PointerWrap &p) override
143 	{
144 		auto s = p.Section("LwMutex", 1);
145 		if (!s)
146 			return;
147 
148 		Do(p, nm);
149 		SceUID dv = 0;
150 		Do(p, waitingThreads, dv);
151 		Do(p, pausedWaits);
152 	}
153 
154 	NativeLwMutex nm;
155 	std::vector<SceUID> waitingThreads;
156 	// Key is the callback id it was for, or if no callback, the thread id.
157 	std::map<SceUID, u64> pausedWaits;
158 };
159 
160 static int mutexWaitTimer = -1;
161 static int lwMutexWaitTimer = -1;
162 // Thread -> Mutex locks for thread end.
163 typedef std::unordered_multimap<SceUID, SceUID> MutexMap;
164 static MutexMap mutexHeldLocks;
165 
166 void __KernelMutexBeginCallback(SceUID threadID, SceUID prevCallbackId);
167 void __KernelMutexEndCallback(SceUID threadID, SceUID prevCallbackId);
168 void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId);
169 void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId);
170 
__KernelMutexInit()171 void __KernelMutexInit()
172 {
173 	mutexWaitTimer = CoreTiming::RegisterEvent("MutexTimeout", __KernelMutexTimeout);
174 	lwMutexWaitTimer = CoreTiming::RegisterEvent("LwMutexTimeout", __KernelLwMutexTimeout);
175 
176 	__KernelListenThreadEnd(&__KernelMutexThreadEnd);
177 	__KernelRegisterWaitTypeFuncs(WAITTYPE_MUTEX, __KernelMutexBeginCallback, __KernelMutexEndCallback);
178 	__KernelRegisterWaitTypeFuncs(WAITTYPE_LWMUTEX, __KernelLwMutexBeginCallback, __KernelLwMutexEndCallback);
179 }
180 
__KernelMutexDoState(PointerWrap & p)181 void __KernelMutexDoState(PointerWrap &p)
182 {
183 	auto s = p.Section("sceKernelMutex", 1);
184 	if (!s)
185 		return;
186 
187 	Do(p, mutexWaitTimer);
188 	CoreTiming::RestoreRegisterEvent(mutexWaitTimer, "MutexTimeout", __KernelMutexTimeout);
189 	Do(p, lwMutexWaitTimer);
190 	CoreTiming::RestoreRegisterEvent(lwMutexWaitTimer, "LwMutexTimeout", __KernelLwMutexTimeout);
191 	Do(p, mutexHeldLocks);
192 }
193 
__KernelMutexObject()194 KernelObject *__KernelMutexObject()
195 {
196 	return new PSPMutex;
197 }
198 
__KernelLwMutexObject()199 KernelObject *__KernelLwMutexObject()
200 {
201 	return new LwMutex;
202 }
203 
__KernelMutexShutdown()204 void __KernelMutexShutdown()
205 {
206 	mutexHeldLocks.clear();
207 }
208 
__KernelMutexAcquireLock(PSPMutex * mutex,int count,SceUID thread)209 static void __KernelMutexAcquireLock(PSPMutex *mutex, int count, SceUID thread) {
210 #if defined(_DEBUG)
211 	auto locked = mutexHeldLocks.equal_range(thread);
212 	for (MutexMap::iterator iter = locked.first; iter != locked.second; ++iter)
213 		_dbg_assert_msg_((*iter).second != mutex->GetUID(), "Thread %d / mutex %d wasn't removed from mutexHeldLocks properly.", thread, mutex->GetUID());
214 #endif
215 
216 	mutexHeldLocks.insert(std::make_pair(thread, mutex->GetUID()));
217 
218 	mutex->nm.lockLevel = count;
219 	mutex->nm.lockThread = thread;
220 }
221 
__KernelMutexAcquireLock(PSPMutex * mutex,int count)222 static void __KernelMutexAcquireLock(PSPMutex *mutex, int count) {
223 	__KernelMutexAcquireLock(mutex, count, __KernelGetCurThread());
224 }
225 
__KernelMutexEraseLock(PSPMutex * mutex)226 static void __KernelMutexEraseLock(PSPMutex *mutex) {
227 	if (mutex->nm.lockThread != -1)
228 	{
229 		SceUID id = mutex->GetUID();
230 		std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(mutex->nm.lockThread);
231 		for (MutexMap::iterator iter = locked.first; iter != locked.second; ++iter)
232 		{
233 			if ((*iter).second == id)
234 			{
235 				mutexHeldLocks.erase(iter);
236 				break;
237 			}
238 		}
239 	}
240 	mutex->nm.lockThread = -1;
241 }
242 
__KernelMutexFindPriority(std::vector<SceUID> & waiting)243 static std::vector<SceUID>::iterator __KernelMutexFindPriority(std::vector<SceUID> &waiting)
244 {
245 	_dbg_assert_msg_(!waiting.empty(), "__KernelMutexFindPriority: Trying to find best of no threads.");
246 
247 	std::vector<SceUID>::iterator iter, end, best = waiting.end();
248 	u32 best_prio = 0xFFFFFFFF;
249 	for (iter = waiting.begin(), end = waiting.end(); iter != end; ++iter)
250 	{
251 		u32 iter_prio = __KernelGetThreadPrio(*iter);
252 		if (iter_prio < best_prio)
253 		{
254 			best = iter;
255 			best_prio = iter_prio;
256 		}
257 	}
258 
259 	_dbg_assert_msg_(best != waiting.end(), "__KernelMutexFindPriority: Returning invalid best thread.");
260 	return best;
261 }
262 
__KernelUnlockMutexForThread(PSPMutex * mutex,SceUID threadID,u32 & error,int result)263 static bool __KernelUnlockMutexForThread(PSPMutex *mutex, SceUID threadID, u32 &error, int result) {
264 	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_MUTEX, mutex->GetUID()))
265 		return false;
266 
267 	// If result is an error code, we're just letting it go.
268 	if (result == 0)
269 	{
270 		int wVal = (int)__KernelGetWaitValue(threadID, error);
271 		__KernelMutexAcquireLock(mutex, wVal, threadID);
272 	}
273 
274 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
275 	if (timeoutPtr != 0 && mutexWaitTimer != -1)
276 	{
277 		// Remove any event for this thread.
278 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
279 		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
280 	}
281 
282 	__KernelResumeThreadFromWait(threadID, result);
283 	return true;
284 }
285 
__KernelUnlockMutexForThreadCheck(PSPMutex * mutex,SceUID threadID,u32 & error,int result,bool & wokeThreads)286 static bool __KernelUnlockMutexForThreadCheck(PSPMutex *mutex, SceUID threadID, u32 &error, int result, bool &wokeThreads) {
287 	if (mutex->nm.lockThread == -1 && __KernelUnlockMutexForThread(mutex, threadID, error, 0))
288 		return true;
289 	return false;
290 }
291 
__KernelMutexBeginCallback(SceUID threadID,SceUID prevCallbackId)292 void __KernelMutexBeginCallback(SceUID threadID, SceUID prevCallbackId)
293 {
294 	auto result = HLEKernel::WaitBeginCallback<PSPMutex, WAITTYPE_MUTEX, SceUID>(threadID, prevCallbackId, mutexWaitTimer);
295 	if (result == HLEKernel::WAIT_CB_SUCCESS)
296 		DEBUG_LOG(SCEKERNEL, "sceKernelLockMutexCB: Suspending lock wait for callback");
297 	else
298 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelLockMutexCB: beginning callback with bad wait id?");
299 }
300 
__KernelMutexEndCallback(SceUID threadID,SceUID prevCallbackId)301 void __KernelMutexEndCallback(SceUID threadID, SceUID prevCallbackId)
302 {
303 	auto result = HLEKernel::WaitEndCallback<PSPMutex, WAITTYPE_MUTEX, SceUID>(threadID, prevCallbackId, mutexWaitTimer, __KernelUnlockMutexForThreadCheck);
304 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
305 		DEBUG_LOG(SCEKERNEL, "sceKernelLockMutexCB: Resuming lock wait for callback");
306 }
307 
sceKernelCreateMutex(const char * name,u32 attr,int initialCount,u32 optionsPtr)308 int sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
309 {
310 	if (!name)
311 	{
312 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateMutex(): invalid name", SCE_KERNEL_ERROR_ERROR);
313 		return SCE_KERNEL_ERROR_ERROR;
314 	}
315 	if (attr & ~0xBFF)
316 	{
317 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateMutex(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
318 		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
319 	}
320 
321 	if (initialCount < 0)
322 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
323 	if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
324 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
325 
326 	PSPMutex *mutex = new PSPMutex();
327 	SceUID id = kernelObjects.Create(mutex);
328 
329 	mutex->nm.size = sizeof(mutex->nm);
330 	strncpy(mutex->nm.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
331 	mutex->nm.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
332 	mutex->nm.attr = attr;
333 	mutex->nm.initialCount = initialCount;
334 	if (initialCount == 0)
335 	{
336 		mutex->nm.lockLevel = 0;
337 		mutex->nm.lockThread = -1;
338 	}
339 	else
340 		__KernelMutexAcquireLock(mutex, initialCount);
341 
342 	DEBUG_LOG(SCEKERNEL, "%i=sceKernelCreateMutex(%s, %08x, %d, %08x)", id, name, attr, initialCount, optionsPtr);
343 
344 	if (optionsPtr != 0)
345 	{
346 		u32 size = Memory::Read_U32(optionsPtr);
347 		if (size > 4)
348 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateMutex(%s) unsupported options parameter, size = %d", name, size);
349 	}
350 	if ((attr & ~PSP_MUTEX_ATTR_KNOWN) != 0)
351 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateMutex(%s) unsupported attr parameter: %08x", name, attr);
352 
353 	return id;
354 }
355 
sceKernelDeleteMutex(SceUID id)356 int sceKernelDeleteMutex(SceUID id)
357 {
358 	u32 error;
359 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(id, error);
360 	if (mutex)
361 	{
362 		DEBUG_LOG(SCEKERNEL, "sceKernelDeleteMutex(%i)", id);
363 		bool wokeThreads = false;
364 		std::vector<SceUID>::iterator iter, end;
365 		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
366 			wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE);
367 
368 		if (mutex->nm.lockThread != -1)
369 			__KernelMutexEraseLock(mutex);
370 		mutex->waitingThreads.clear();
371 
372 		if (wokeThreads)
373 			hleReSchedule("mutex deleted");
374 
375 		return kernelObjects.Destroy<PSPMutex>(id);
376 	}
377 	else
378 	{
379 		DEBUG_LOG(SCEKERNEL, "sceKernelDeleteMutex(%i): invalid mutex", id);
380 		return error;
381 	}
382 }
383 
__KernelLockMutexCheck(PSPMutex * mutex,int count,u32 & error)384 static bool __KernelLockMutexCheck(PSPMutex *mutex, int count, u32 &error) {
385 	if (error)
386 		return false;
387 
388 	const bool mutexIsRecursive = (mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) != 0;
389 
390 	if (count <= 0)
391 		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
392 	else if (count > 1 && !mutexIsRecursive)
393 		error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
394 	// Two positive ints will always overflow to negative.
395 	else if (count + mutex->nm.lockLevel < 0)
396 		error = PSP_MUTEX_ERROR_LOCK_OVERFLOW;
397 	// Only a recursive mutex can re-lock.
398 	else if (mutex->nm.lockThread == __KernelGetCurThread())
399 	{
400 		if (mutexIsRecursive)
401 			return true;
402 
403 		error = PSP_MUTEX_ERROR_ALREADY_LOCKED;
404 	}
405 	// Otherwise it would lock or wait.
406 	else if (mutex->nm.lockLevel == 0)
407 		return true;
408 
409 	return false;
410 }
411 
__KernelLockMutex(PSPMutex * mutex,int count,u32 & error)412 static bool __KernelLockMutex(PSPMutex *mutex, int count, u32 &error) {
413 	if (!__KernelLockMutexCheck(mutex, count, error))
414 		return false;
415 
416 	if (mutex->nm.lockLevel == 0)
417 	{
418 		__KernelMutexAcquireLock(mutex, count);
419 		// Nobody had it locked - no need to block
420 		return true;
421 	}
422 
423 	if (mutex->nm.lockThread == __KernelGetCurThread())
424 	{
425 		// __KernelLockMutexCheck() would've returned an error, so this must be recursive.
426 		mutex->nm.lockLevel += count;
427 		return true;
428 	}
429 
430 	return false;
431 }
432 
__KernelUnlockMutex(PSPMutex * mutex,u32 & error)433 static bool __KernelUnlockMutex(PSPMutex *mutex, u32 &error) {
434 	__KernelMutexEraseLock(mutex);
435 
436 	bool wokeThreads = false;
437 	std::vector<SceUID>::iterator iter;
438 	while (!wokeThreads && !mutex->waitingThreads.empty())
439 	{
440 		if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0)
441 			iter = __KernelMutexFindPriority(mutex->waitingThreads);
442 		else
443 			iter = mutex->waitingThreads.begin();
444 
445 		wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, 0);
446 		mutex->waitingThreads.erase(iter);
447 	}
448 
449 	if (!wokeThreads)
450 		mutex->nm.lockThread = -1;
451 
452 	return wokeThreads;
453 }
454 
__KernelMutexTimeout(u64 userdata,int cyclesLate)455 void __KernelMutexTimeout(u64 userdata, int cyclesLate)
456 {
457 	SceUID threadID = (SceUID)userdata;
458 	HLEKernel::WaitExecTimeout<PSPMutex, WAITTYPE_MUTEX>(threadID);
459 }
460 
__KernelMutexThreadEnd(SceUID threadID)461 void __KernelMutexThreadEnd(SceUID threadID)
462 {
463 	u32 error;
464 
465 	// If it was waiting on the mutex, it should finish now.
466 	SceUID waitingMutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
467 	if (waitingMutexID)
468 	{
469 		PSPMutex *mutex = kernelObjects.Get<PSPMutex>(waitingMutexID, error);
470 		if (mutex)
471 			HLEKernel::RemoveWaitingThread(mutex->waitingThreads, threadID);
472 	}
473 
474 	// Unlock all mutexes the thread had locked.
475 	std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(threadID);
476 	for (MutexMap::iterator iter = locked.first; iter != locked.second; )
477 	{
478 		// Need to increment early so erase() doesn't invalidate.
479 		SceUID mutexID = (*iter++).second;
480 		PSPMutex *mutex = kernelObjects.Get<PSPMutex>(mutexID, error);
481 
482 		if (mutex)
483 		{
484 			mutex->nm.lockLevel = 0;
485 			__KernelUnlockMutex(mutex, error);
486 		}
487 	}
488 }
489 
__KernelWaitMutex(PSPMutex * mutex,u32 timeoutPtr)490 static void __KernelWaitMutex(PSPMutex *mutex, u32 timeoutPtr) {
491 	if (timeoutPtr == 0 || mutexWaitTimer == -1)
492 		return;
493 
494 	int micro = (int) Memory::Read_U32(timeoutPtr);
495 
496 	// This happens to be how the hardware seems to time things.
497 	if (micro <= 3)
498 		micro = 25;
499 	else if (micro <= 249)
500 		micro = 250;
501 
502 	// This should call __KernelMutexTimeout() later, unless we cancel it.
503 	CoreTiming::ScheduleEvent(usToCycles(micro), mutexWaitTimer, __KernelGetCurThread());
504 }
505 
sceKernelCancelMutex(SceUID uid,int count,u32 numWaitThreadsPtr)506 int sceKernelCancelMutex(SceUID uid, int count, u32 numWaitThreadsPtr)
507 {
508 	u32 error;
509 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(uid, error);
510 	if (mutex)
511 	{
512 		bool lockable = count <= 0 || __KernelLockMutexCheck(mutex, count, error);
513 		if (!lockable)
514 		{
515 			// May still be okay.  As long as the count/etc. are valid.
516 			if (error != 0 && error != PSP_MUTEX_ERROR_LOCK_OVERFLOW && error != PSP_MUTEX_ERROR_ALREADY_LOCKED)
517 			{
518 				DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x): invalid count", uid, count, numWaitThreadsPtr);
519 				return error;
520 			}
521 		}
522 
523 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr);
524 
525 		// Remove threads no longer waiting on this first (so the numWaitThreads value is correct.)
526 		HLEKernel::CleanupWaitingThreads(WAITTYPE_MUTEX, uid, mutex->waitingThreads);
527 
528 		if (Memory::IsValidAddress(numWaitThreadsPtr))
529 			Memory::Write_U32((u32)mutex->waitingThreads.size(), numWaitThreadsPtr);
530 
531 		bool wokeThreads = false;
532 		for (auto iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
533 			wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_CANCEL);
534 
535 		if (mutex->nm.lockThread != -1)
536 			__KernelMutexEraseLock(mutex);
537 		mutex->waitingThreads.clear();
538 
539 		if (count <= 0)
540 		{
541 			mutex->nm.lockLevel = 0;
542 			mutex->nm.lockThread = -1;
543 		}
544 		else
545 			__KernelMutexAcquireLock(mutex, count);
546 
547 		if (wokeThreads)
548 			hleReSchedule("mutex canceled");
549 
550 		return 0;
551 	}
552 	else
553 	{
554 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelMutex(%i, %d, %08x)", uid, count, numWaitThreadsPtr);
555 		return error;
556 	}
557 }
558 
559 // int sceKernelLockMutex(SceUID id, int count, int *timeout)
sceKernelLockMutex(SceUID id,int count,u32 timeoutPtr)560 int sceKernelLockMutex(SceUID id, int count, u32 timeoutPtr)
561 {
562 	DEBUG_LOG(SCEKERNEL, "sceKernelLockMutex(%i, %i, %08x)", id, count, timeoutPtr);
563 	u32 error;
564 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(id, error);
565 
566 	if (__KernelLockMutex(mutex, count, error))
567 		return 0;
568 	else if (error)
569 		return error;
570 	else
571 	{
572 		SceUID threadID = __KernelGetCurThread();
573 		// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
574 		if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
575 			mutex->waitingThreads.push_back(threadID);
576 		__KernelWaitMutex(mutex, timeoutPtr);
577 		__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, false, "mutex waited");
578 
579 		// Return value will be overwritten by wait.
580 		return 0;
581 	}
582 }
583 
584 // int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
sceKernelLockMutexCB(SceUID id,int count,u32 timeoutPtr)585 int sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
586 {
587 	DEBUG_LOG(SCEKERNEL, "sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
588 	u32 error;
589 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(id, error);
590 
591 	if (!__KernelLockMutexCheck(mutex, count, error))
592 	{
593 		if (error)
594 			return error;
595 
596 		SceUID threadID = __KernelGetCurThread();
597 		// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
598 		if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
599 			mutex->waitingThreads.push_back(threadID);
600 		__KernelWaitMutex(mutex, timeoutPtr);
601 		__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true, "mutex waited");
602 
603 		// Return value will be overwritten by wait.
604 		return 0;
605 	}
606 	else
607 	{
608 		if (__KernelCurHasReadyCallbacks())
609 		{
610 			// Might actually end up having to wait, so set the timeout.
611 			__KernelWaitMutex(mutex, timeoutPtr);
612 			__KernelWaitCallbacksCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr);
613 
614 			// Return value will be written to callback's v0, but... that's probably fine?
615 		}
616 		else
617 			__KernelLockMutex(mutex, count, error);
618 
619 		return 0;
620 	}
621 }
622 
623 // int sceKernelTryLockMutex(SceUID id, int count)
sceKernelTryLockMutex(SceUID id,int count)624 int sceKernelTryLockMutex(SceUID id, int count)
625 {
626 	DEBUG_LOG(SCEKERNEL, "sceKernelTryLockMutex(%i, %i)", id, count);
627 	u32 error;
628 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(id, error);
629 
630 	if (__KernelLockMutex(mutex, count, error))
631 		return 0;
632 	else if (error)
633 		return error;
634 	else
635 		return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
636 }
637 
638 // int sceKernelUnlockMutex(SceUID id, int count)
sceKernelUnlockMutex(SceUID id,int count)639 int sceKernelUnlockMutex(SceUID id, int count)
640 {
641 	DEBUG_LOG(SCEKERNEL, "sceKernelUnlockMutex(%i, %i)", id, count);
642 	u32 error;
643 	PSPMutex *mutex = kernelObjects.Get<PSPMutex>(id, error);
644 
645 	if (error)
646 		return error;
647 	if (count <= 0)
648 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
649 	if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
650 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
651 	if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread())
652 		return PSP_MUTEX_ERROR_NOT_LOCKED;
653 	if (mutex->nm.lockLevel < count)
654 		return PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW;
655 
656 	mutex->nm.lockLevel -= count;
657 
658 	if (mutex->nm.lockLevel == 0)
659 	{
660 		if (__KernelUnlockMutex(mutex, error))
661 			hleReSchedule("mutex unlocked");
662 	}
663 
664 	return 0;
665 }
666 
sceKernelReferMutexStatus(SceUID id,u32 infoAddr)667 int sceKernelReferMutexStatus(SceUID id, u32 infoAddr)
668 {
669 	u32 error;
670 	PSPMutex *m = kernelObjects.Get<PSPMutex>(id, error);
671 	if (!m)
672 	{
673 		ERROR_LOG(SCEKERNEL, "sceKernelReferMutexStatus(%i, %08x): invalid mutex id", id, infoAddr);
674 		return error;
675 	}
676 
677 	DEBUG_LOG(SCEKERNEL, "sceKernelReferMutexStatus(%08x, %08x)", id, infoAddr);
678 
679 	// Should we crash the thread somehow?
680 	if (!Memory::IsValidAddress(infoAddr))
681 		return -1;
682 
683 	// Don't write if the size is 0.  Anything else is A-OK, though, apparently.
684 	if (Memory::Read_U32(infoAddr) != 0)
685 	{
686 		HLEKernel::CleanupWaitingThreads(WAITTYPE_MUTEX, id, m->waitingThreads);
687 
688 		m->nm.numWaitThreads = (int) m->waitingThreads.size();
689 		Memory::WriteStruct(infoAddr, &m->nm);
690 	}
691 	return 0;
692 }
693 
sceKernelCreateLwMutex(u32 workareaPtr,const char * name,u32 attr,int initialCount,u32 optionsPtr)694 int sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr)
695 {
696 	if (!name)
697 	{
698 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateLwMutex(): invalid name", SCE_KERNEL_ERROR_ERROR);
699 		return SCE_KERNEL_ERROR_ERROR;
700 	}
701 	if (attr >= 0x400)
702 	{
703 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateLwMutex(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
704 		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
705 	}
706 
707 	if (initialCount < 0)
708 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
709 	if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
710 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
711 
712 	LwMutex *mutex = new LwMutex();
713 	SceUID id = kernelObjects.Create(mutex);
714 	mutex->nm.size = sizeof(mutex->nm);
715 	strncpy(mutex->nm.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
716 	mutex->nm.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
717 	mutex->nm.attr = attr;
718 	mutex->nm.uid = id;
719 	mutex->nm.workarea = workareaPtr;
720 	mutex->nm.initialCount = initialCount;
721 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
722 	workarea->init();
723 	workarea->lockLevel = initialCount;
724 	if (initialCount == 0)
725 		workarea->lockThread = 0;
726 	else
727 		workarea->lockThread = __KernelGetCurThread();
728 	workarea->attr = attr;
729 	workarea->uid = id;
730 
731 	DEBUG_LOG(SCEKERNEL, "sceKernelCreateLwMutex(%08x, %s, %08x, %d, %08x)", workareaPtr, name, attr, initialCount, optionsPtr);
732 
733 	if (optionsPtr != 0)
734 	{
735 		u32 size = Memory::Read_U32(optionsPtr);
736 		if (size > 4)
737 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateLwMutex(%s) unsupported options parameter, size = %d", name, size);
738 	}
739 	if ((attr & ~PSP_MUTEX_ATTR_KNOWN) != 0)
740 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateLwMutex(%s) unsupported attr parameter: %08x", name, attr);
741 
742 	return 0;
743 }
744 
745 template <typename T>
__KernelUnlockLwMutexForThread(LwMutex * mutex,T workarea,SceUID threadID,u32 & error,int result)746 bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result)
747 {
748 	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_LWMUTEX, mutex->GetUID()))
749 		return false;
750 
751 	// If result is an error code, we're just letting it go.
752 	if (result == 0)
753 	{
754 		workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error);
755 		workarea->lockThread = threadID;
756 	}
757 
758 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
759 	if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
760 	{
761 		// Remove any event for this thread.
762 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
763 		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
764 	}
765 
766 	__KernelResumeThreadFromWait(threadID, result);
767 	return true;
768 }
769 
sceKernelDeleteLwMutex(u32 workareaPtr)770 int sceKernelDeleteLwMutex(u32 workareaPtr)
771 {
772 	DEBUG_LOG(SCEKERNEL, "sceKernelDeleteLwMutex(%08x)", workareaPtr);
773 
774 	if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
775 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
776 
777 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
778 
779 	u32 error;
780 	LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
781 	if (mutex)
782 	{
783 		bool wokeThreads = false;
784 		std::vector<SceUID>::iterator iter, end;
785 		for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
786 			wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE);
787 		mutex->waitingThreads.clear();
788 
789 		workarea->clear();
790 
791 		if (wokeThreads)
792 			hleReSchedule("lwmutex deleted");
793 
794 		return kernelObjects.Destroy<LwMutex>(mutex->GetUID());
795 	}
796 	else
797 		return error;
798 }
799 
__KernelLockLwMutex(NativeLwMutexWorkarea * workarea,int count,u32 & error)800 static bool __KernelLockLwMutex(NativeLwMutexWorkarea *workarea, int count, u32 &error)
801 {
802 	if (!error)
803 	{
804 		if (count <= 0)
805 			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
806 		else if (count > 1 && !(workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
807 			error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
808 		// Two positive ints will always overflow to negative.
809 		else if (count + workarea->lockLevel < 0)
810 			error = PSP_LWMUTEX_ERROR_LOCK_OVERFLOW;
811 		else if (workarea->uid == -1)
812 			error = PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
813 	}
814 
815 	if (error)
816 		return false;
817 
818 	if (workarea->lockLevel == 0)
819 	{
820 		if (workarea->lockThread != 0)
821 		{
822 			// Validate that it actually exists so we can return an error if not.
823 			kernelObjects.Get<LwMutex>(workarea->uid, error);
824 			if (error)
825 				return false;
826 		}
827 
828 		workarea->lockLevel = count;
829 		workarea->lockThread = __KernelGetCurThread();
830 		return true;
831 	}
832 
833 	if (workarea->lockThread == __KernelGetCurThread())
834 	{
835 		// Recursive mutex, let's just increase the lock count and keep going
836 		if (workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
837 		{
838 			workarea->lockLevel += count;
839 			return true;
840 		}
841 		else
842 		{
843 			error = PSP_LWMUTEX_ERROR_ALREADY_LOCKED;
844 			return false;
845 		}
846 	}
847 
848 	return false;
849 }
850 
851 template <typename T>
__KernelUnlockLwMutex(T workarea,u32 & error)852 bool __KernelUnlockLwMutex(T workarea, u32 &error)
853 {
854 	LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
855 	if (error)
856 	{
857 		workarea->lockThread = 0;
858 		return false;
859 	}
860 
861 	bool wokeThreads = false;
862 	std::vector<SceUID>::iterator iter;
863 	while (!wokeThreads && !mutex->waitingThreads.empty())
864 	{
865 		if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0)
866 			iter = __KernelMutexFindPriority(mutex->waitingThreads);
867 		else
868 			iter = mutex->waitingThreads.begin();
869 
870 		wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, 0);
871 		mutex->waitingThreads.erase(iter);
872 	}
873 
874 	if (!wokeThreads)
875 		workarea->lockThread = 0;
876 
877 	return wokeThreads;
878 }
879 
__KernelLwMutexTimeout(u64 userdata,int cyclesLate)880 void __KernelLwMutexTimeout(u64 userdata, int cyclesLate)
881 {
882 	SceUID threadID = (SceUID)userdata;
883 	HLEKernel::WaitExecTimeout<LwMutex, WAITTYPE_LWMUTEX>(threadID);
884 }
885 
__KernelWaitLwMutex(LwMutex * mutex,u32 timeoutPtr)886 static void __KernelWaitLwMutex(LwMutex *mutex, u32 timeoutPtr)
887 {
888 	if (timeoutPtr == 0 || lwMutexWaitTimer == -1)
889 		return;
890 
891 	int micro = (int) Memory::Read_U32(timeoutPtr);
892 
893 	// This happens to be how the hardware seems to time things.
894 	if (micro <= 3)
895 		micro = 25;
896 	else if (micro <= 249)
897 		micro = 250;
898 
899 	// This should call __KernelLwMutexTimeout() later, unless we cancel it.
900 	CoreTiming::ScheduleEvent(usToCycles(micro), lwMutexWaitTimer, __KernelGetCurThread());
901 }
902 
__KernelUnlockLwMutexForThreadCheck(LwMutex * mutex,SceUID threadID,u32 & error,int result,bool & wokeThreads)903 static bool __KernelUnlockLwMutexForThreadCheck(LwMutex *mutex, SceUID threadID, u32 &error, int result, bool &wokeThreads)
904 {
905 	if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, mutex->nm.workarea, threadID, error, 0))
906 		return true;
907 	return false;
908 }
909 
__KernelLwMutexBeginCallback(SceUID threadID,SceUID prevCallbackId)910 void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId)
911 {
912 	auto result = HLEKernel::WaitBeginCallback<LwMutex, WAITTYPE_LWMUTEX, SceUID>(threadID, prevCallbackId, lwMutexWaitTimer);
913 	if (result == HLEKernel::WAIT_CB_SUCCESS)
914 		DEBUG_LOG(SCEKERNEL, "sceKernelLockLwMutexCB: Suspending lock wait for callback");
915 	else
916 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelLockLwMutexCB: beginning callback with bad wait id?");
917 }
918 
__KernelLwMutexEndCallback(SceUID threadID,SceUID prevCallbackId)919 void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId)
920 {
921 	auto result = HLEKernel::WaitEndCallback<LwMutex, WAITTYPE_LWMUTEX, SceUID>(threadID, prevCallbackId, lwMutexWaitTimer, __KernelUnlockLwMutexForThreadCheck);
922 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
923 		DEBUG_LOG(SCEKERNEL, "sceKernelLockLwMutexCB: Resuming lock wait for callback");
924 }
925 
sceKernelTryLockLwMutex(u32 workareaPtr,int count)926 int sceKernelTryLockLwMutex(u32 workareaPtr, int count)
927 {
928 	DEBUG_LOG(SCEKERNEL, "sceKernelTryLockLwMutex(%08x, %i)", workareaPtr, count);
929 
930 	if (!Memory::IsValidAddress(workareaPtr)) {
931 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
932 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
933 	}
934 
935 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
936 
937 	u32 error = 0;
938 	if (__KernelLockLwMutex(workarea, count, error))
939 		return 0;
940 	// Unlike sceKernelTryLockLwMutex_600, this always returns the same error.
941 	else if (error)
942 		return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
943 	else
944 		return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
945 }
946 
sceKernelTryLockLwMutex_600(u32 workareaPtr,int count)947 int sceKernelTryLockLwMutex_600(u32 workareaPtr, int count)
948 {
949 	DEBUG_LOG(SCEKERNEL, "sceKernelTryLockLwMutex_600(%08x, %i)", workareaPtr, count);
950 
951 	if (!Memory::IsValidAddress(workareaPtr)) {
952 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
953 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
954 	}
955 
956 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
957 
958 	u32 error = 0;
959 	if (__KernelLockLwMutex(workarea, count, error))
960 		return 0;
961 	else if (error)
962 		return error;
963 	else
964 		return PSP_LWMUTEX_ERROR_TRYLOCK_FAILED;
965 }
966 
sceKernelLockLwMutex(u32 workareaPtr,int count,u32 timeoutPtr)967 int sceKernelLockLwMutex(u32 workareaPtr, int count, u32 timeoutPtr)
968 {
969 	VERBOSE_LOG(SCEKERNEL, "sceKernelLockLwMutex(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
970 
971 	if (!Memory::IsValidAddress(workareaPtr)) {
972 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
973 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
974 	}
975 
976 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
977 
978 	u32 error = 0;
979 	if (__KernelLockLwMutex(workarea, count, error))
980 		return 0;
981 	else if (error)
982 		return error;
983 	else
984 	{
985 		LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
986 		if (mutex)
987 		{
988 			SceUID threadID = __KernelGetCurThread();
989 			// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
990 			if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
991 				mutex->waitingThreads.push_back(threadID);
992 			__KernelWaitLwMutex(mutex, timeoutPtr);
993 			__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea->uid, count, timeoutPtr, false, "lwmutex waited");
994 
995 			// Return value will be overwritten by wait.
996 			return 0;
997 		}
998 		else
999 			return error;
1000 	}
1001 }
1002 
sceKernelLockLwMutexCB(u32 workareaPtr,int count,u32 timeoutPtr)1003 int sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr)
1004 {
1005 	VERBOSE_LOG(SCEKERNEL, "sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
1006 
1007 	if (!Memory::IsValidAddress(workareaPtr)) {
1008 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
1009 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
1010 	}
1011 
1012 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
1013 
1014 	u32 error = 0;
1015 	if (__KernelLockLwMutex(workarea, count, error))
1016 		return 0;
1017 	else if (error)
1018 		return error;
1019 	else
1020 	{
1021 		LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
1022 		if (mutex)
1023 		{
1024 			SceUID threadID = __KernelGetCurThread();
1025 			// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
1026 			if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
1027 				mutex->waitingThreads.push_back(threadID);
1028 			__KernelWaitLwMutex(mutex, timeoutPtr);
1029 			__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea->uid, count, timeoutPtr, true, "lwmutex cb waited");
1030 
1031 			// Return value will be overwritten by wait.
1032 			return 0;
1033 		}
1034 		else
1035 			return error;
1036 	}
1037 }
1038 
sceKernelUnlockLwMutex(u32 workareaPtr,int count)1039 int sceKernelUnlockLwMutex(u32 workareaPtr, int count)
1040 {
1041 	VERBOSE_LOG(SCEKERNEL, "sceKernelUnlockLwMutex(%08x, %i)", workareaPtr, count);
1042 
1043 	if (!Memory::IsValidAddress(workareaPtr)) {
1044 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
1045 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
1046 	}
1047 
1048 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
1049 
1050 	if (workarea->uid == -1)
1051 		return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
1052 	else if (count <= 0)
1053 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
1054 	else if ((workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
1055 		return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
1056 	else if (workarea->lockLevel == 0 || workarea->lockThread != __KernelGetCurThread())
1057 		return PSP_LWMUTEX_ERROR_NOT_LOCKED;
1058 	else if (workarea->lockLevel < count)
1059 		return PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW;
1060 
1061 	workarea->lockLevel -= count;
1062 
1063 	if (workarea->lockLevel == 0)
1064 	{
1065 		u32 error;
1066 		if (__KernelUnlockLwMutex(workarea, error))
1067 			hleReSchedule("lwmutex unlocked");
1068 	}
1069 
1070 	return 0;
1071 }
1072 
__KernelReferLwMutexStatus(SceUID uid,u32 infoPtr)1073 static int __KernelReferLwMutexStatus(SceUID uid, u32 infoPtr)
1074 {
1075 	u32 error;
1076 	LwMutex *m = kernelObjects.Get<LwMutex>(uid, error);
1077 	if (!m)
1078 		return error;
1079 
1080 	// Should we crash the thread somehow?
1081 	if (!Memory::IsValidAddress(infoPtr))
1082 		return -1;
1083 
1084 	if (Memory::Read_U32(infoPtr) != 0)
1085 	{
1086 		auto workarea = m->nm.workarea;
1087 
1088 		HLEKernel::CleanupWaitingThreads(WAITTYPE_LWMUTEX, uid, m->waitingThreads);
1089 
1090 		// Refresh and write
1091 		m->nm.currentCount = workarea->lockLevel;
1092 		m->nm.lockThread = workarea->lockThread == 0 ? SceUID_le(-1) : workarea->lockThread;
1093 		m->nm.numWaitThreads = (int) m->waitingThreads.size();
1094 		Memory::WriteStruct(infoPtr, &m->nm);
1095 	}
1096 	return 0;
1097 }
1098 
sceKernelReferLwMutexStatusByID(SceUID uid,u32 infoPtr)1099 int sceKernelReferLwMutexStatusByID(SceUID uid, u32 infoPtr)
1100 {
1101 	int error = __KernelReferLwMutexStatus(uid, infoPtr);
1102 	if (error >= 0)
1103 	{
1104 		DEBUG_LOG(SCEKERNEL, "sceKernelReferLwMutexStatusByID(%08x, %08x)", uid, infoPtr);
1105 		return error;
1106 	}
1107 	else
1108 	{
1109 		ERROR_LOG(SCEKERNEL, "%08x=sceKernelReferLwMutexStatusByID(%08x, %08x)", error, uid, infoPtr);
1110 		return error;
1111 	}
1112 }
1113 
sceKernelReferLwMutexStatus(u32 workareaPtr,u32 infoPtr)1114 int sceKernelReferLwMutexStatus(u32 workareaPtr, u32 infoPtr)
1115 {
1116 	if (!Memory::IsValidAddress(workareaPtr)) {
1117 		ERROR_LOG(SCEKERNEL, "Bad workarea pointer for LwMutex");
1118 		return SCE_KERNEL_ERROR_ACCESS_ERROR;
1119 	}
1120 
1121 	auto workarea = PSPPointer<NativeLwMutexWorkarea>::Create(workareaPtr);
1122 
1123 	int error = __KernelReferLwMutexStatus(workarea->uid, infoPtr);
1124 	if (error >= 0)
1125 	{
1126 		DEBUG_LOG(SCEKERNEL, "sceKernelReferLwMutexStatus(%08x, %08x)", workareaPtr, infoPtr);
1127 		return error;
1128 	}
1129 	else
1130 	{
1131 		ERROR_LOG(SCEKERNEL, "%08x=sceKernelReferLwMutexStatus(%08x, %08x)", error, workareaPtr, infoPtr);
1132 		return error;
1133 	}
1134 }
1135