1 // Copyright (c) 2012- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 #include <algorithm>
19 #include <list>
20 #include <map>
21 #include <mutex>
22 #include <set>
23 
24 #include "Common/CommonTypes.h"
25 #include "Common/LogManager.h"
26 #include "Common/StringUtils.h"
27 #include "Common/Serialize/Serializer.h"
28 #include "Common/Serialize/SerializeFuncs.h"
29 #include "Common/Serialize/SerializeList.h"
30 #include "Common/Serialize/SerializeMap.h"
31 #include "Core/HLE/HLE.h"
32 #include "Core/HLE/HLETables.h"
33 #include "Core/MIPS/MIPSAnalyst.h"
34 #include "Core/MIPS/MIPSCodeUtils.h"
35 #include "Core/MIPS/MIPS.h"
36 #include "Core/MIPS/MIPSDebugInterface.h"
37 #include "Core/Core.h"
38 #include "Core/CoreTiming.h"
39 #include "Core/MemMapHelpers.h"
40 #include "Core/MIPS/JitCommon/JitCommon.h"
41 #include "Core/Reporting.h"
42 
43 #include "Core/HLE/sceAudio.h"
44 #include "Core/HLE/sceKernel.h"
45 #include "Core/HLE/sceKernelMemory.h"
46 #include "Core/HLE/sceKernelThread.h"
47 #include "Core/HLE/sceKernelModule.h"
48 #include "Core/HLE/sceKernelInterrupt.h"
49 #include "Core/HLE/KernelThreadDebugInterface.h"
50 #include "Core/HLE/KernelWaitHelpers.h"
51 #include "Core/HLE/ThreadQueueList.h"
52 
53 struct WaitTypeNames {
54 	WaitType type;
55 	const char *name;
56 };
57 
58 const WaitTypeNames waitTypeNames[] = {
59 	{ WAITTYPE_NONE,            "None" },
60 	{ WAITTYPE_SLEEP,           "Sleep" },
61 	{ WAITTYPE_DELAY,           "Delay" },
62 	{ WAITTYPE_SEMA,            "Semaphore" },
63 	{ WAITTYPE_EVENTFLAG,       "Event flag", },
64 	{ WAITTYPE_MBX,             "MBX" },
65 	{ WAITTYPE_VPL,             "VPL" },
66 	{ WAITTYPE_FPL,             "FPL" },
67 	{ WAITTYPE_MSGPIPE,         "Message pipe" },
68 	{ WAITTYPE_THREADEND,       "Thread end" },
69 	{ WAITTYPE_AUDIOCHANNEL,    "Audio channel" },
70 	{ WAITTYPE_UMD,             "UMD" },
71 	{ WAITTYPE_VBLANK,          "VBlank" },
72 	{ WAITTYPE_MUTEX,           "Mutex" },
73 	{ WAITTYPE_LWMUTEX,         "LwMutex" },
74 	{ WAITTYPE_CTRL,            "Control" },
75 	{ WAITTYPE_IO,              "IO" },
76 	{ WAITTYPE_GEDRAWSYNC,      "GeDrawSync" },
77 	{ WAITTYPE_GELISTSYNC,      "GeListSync" },
78 	{ WAITTYPE_MODULE,          "Module" },
79 	{ WAITTYPE_HLEDELAY,        "HleDelay" },
80 	{ WAITTYPE_TLSPL,           "TLS" },
81 	{ WAITTYPE_VMEM,            "Volatile Mem" },
82 	{ WAITTYPE_ASYNCIO,         "AsyncIO" },
83 	{ WAITTYPE_MICINPUT,        "Microphone input"},
84 	{ WAITTYPE_NET,             "Network"},
85 	{ WAITTYPE_USB,             "USB" },
86 };
87 
getWaitTypeName(WaitType type)88 const char *getWaitTypeName(WaitType type) {
89 	for (WaitTypeNames info : waitTypeNames) {
90 		if (info.type == type)
91 			return info.name;
92 	}
93 
94 	return "Unknown";
95 }
96 
97 enum ThreadEventType {
98 	THREADEVENT_CREATE = 1,
99 	THREADEVENT_START  = 2,
100 	THREADEVENT_EXIT   = 4,
101 	THREADEVENT_DELETE = 8,
102 	THREADEVENT_SUPPORTED = THREADEVENT_CREATE | THREADEVENT_START | THREADEVENT_EXIT | THREADEVENT_DELETE,
103 };
104 
105 bool __KernelThreadTriggerEvent(bool isKernel, SceUID threadID, ThreadEventType type);
106 
107 enum {
108 	PSP_THREAD_ATTR_KERNEL       = 0x00001000,
109 	PSP_THREAD_ATTR_VFPU         = 0x00004000,
110 	PSP_THREAD_ATTR_SCRATCH_SRAM = 0x00008000, // Save/restore scratch as part of context???
111 	PSP_THREAD_ATTR_NO_FILLSTACK = 0x00100000, // No filling of 0xff.
112 	PSP_THREAD_ATTR_CLEAR_STACK  = 0x00200000, // Clear thread stack when deleted.
113 	PSP_THREAD_ATTR_LOW_STACK    = 0x00400000, // Allocate stack from bottom not top.
114 	PSP_THREAD_ATTR_USER         = 0x80000000,
115 	PSP_THREAD_ATTR_USBWLAN      = 0xa0000000,
116 	PSP_THREAD_ATTR_VSH          = 0xc0000000,
117 
118 	// TODO: Support more, not even sure what all of these mean.
119 	PSP_THREAD_ATTR_USER_MASK    = 0xf8f060ff,
120 	PSP_THREAD_ATTR_USER_ERASE   = 0x78800000,
121 	PSP_THREAD_ATTR_SUPPORTED    = (PSP_THREAD_ATTR_KERNEL | PSP_THREAD_ATTR_VFPU | PSP_THREAD_ATTR_NO_FILLSTACK | PSP_THREAD_ATTR_CLEAR_STACK | PSP_THREAD_ATTR_LOW_STACK | PSP_THREAD_ATTR_USER)
122 };
123 
124 struct NativeCallback
125 {
126 	SceUInt_le size;
127 	char name[32];
128 	SceUID_le threadId;
129 	u32_le entrypoint;
130 	u32_le commonArgument;
131 
132 	s32_le notifyCount;
133 	s32_le notifyArg;
134 };
135 
136 class PSPCallback : public KernelObject {
137 public:
GetName()138 	const char *GetName() override { return nc.name; }
GetTypeName()139 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeName()140 	static const char *GetStaticTypeName() { return "CallBack"; }
141 
GetQuickInfo(char * ptr,int size)142 	void GetQuickInfo(char *ptr, int size) override {
143 		sprintf(ptr, "thread=%i, argument= %08x",
144 			//hackAddress,
145 			nc.threadId,
146 			nc.commonArgument);
147 	}
148 
~PSPCallback()149 	~PSPCallback() {
150 	}
151 
GetMissingErrorCode()152 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_CBID; }
GetStaticIDType()153 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Callback; }
GetIDType() const154 	int GetIDType() const override { return SCE_KERNEL_TMID_Callback; }
155 
DoState(PointerWrap & p)156 	void DoState(PointerWrap &p) override
157 	{
158 		auto s = p.Section("Callback", 1);
159 		if (!s)
160 			return;
161 
162 		Do(p, nc);
163 		// Saved values were moved to mips call, ignoring here.
164 		u32 legacySaved = 0;
165 		Do(p, legacySaved);
166 		Do(p, legacySaved);
167 		Do(p, legacySaved);
168 		Do(p, legacySaved);
169 		Do(p, legacySaved);
170 	}
171 
172 	NativeCallback nc;
173 };
174 
175 #if COMMON_LITTLE_ENDIAN
176 typedef WaitType WaitType_le;
177 #else
178 typedef swap_struct_t<WaitType, swap_32_t<WaitType> > WaitType_le;
179 #endif
180 
181 // Real PSP struct, don't change the fields.
182 struct SceKernelThreadRunStatus
183 {
184 	SceSize_le size;
185 	u32_le status;
186 	s32_le currentPriority;
187 	WaitType_le waitType;
188 	SceUID_le waitID;
189 	s32_le wakeupCount;
190 	SceKernelSysClock runForClocks;
191 	s32_le numInterruptPreempts;
192 	s32_le numThreadPreempts;
193 	s32_le numReleases;
194 };
195 
196 // Real PSP struct, don't change the fields.
197 struct NativeThread
198 {
199 	u32_le nativeSize;
200 	char name[KERNELOBJECT_MAX_NAME_LENGTH+1];
201 
202 	// Threading stuff
203 	u32_le attr;
204 	u32_le status;
205 	u32_le entrypoint;
206 	u32_le initialStack;
207 	u32_le stackSize;
208 	u32_le gpreg;
209 
210 	s32_le initialPriority;
211 	s32_le currentPriority;
212 	WaitType_le waitType;
213 	SceUID_le waitID;
214 	s32_le wakeupCount;
215 	s32_le exitStatus;
216 	SceKernelSysClock runForClocks;
217 	s32_le numInterruptPreempts;
218 	s32_le numThreadPreempts;
219 	s32_le numReleases;
220 };
221 
222 struct ThreadWaitInfo {
223 	u32 waitValue;
224 	u32 timeoutPtr;
225 };
226 
227 // Owns outstanding MIPS calls and provides a way to get them by ID.
228 class MipsCallManager {
229 public:
MipsCallManager()230 	MipsCallManager() : idGen_(0) {}
add(MipsCall * call)231 	u32 add(MipsCall *call) {
232 		u32 id = genId();
233 		calls_.insert(std::pair<int, MipsCall *>(id, call));
234 		return id;
235 	}
get(u32 id)236 	MipsCall *get(u32 id) {
237 		auto iter = calls_.find(id);
238 		if (iter == calls_.end())
239 			return NULL;
240 		return iter->second;
241 	}
pop(u32 id)242 	MipsCall *pop(u32 id) {
243 		MipsCall *temp = calls_[id];
244 		calls_.erase(id);
245 		return temp;
246 	}
clear()247 	void clear() {
248 		for (auto it = calls_.begin(), end = calls_.end(); it != end; ++it) {
249 			delete it->second;
250 		}
251 		calls_.clear();
252 		types_.clear();
253 		idGen_ = 0;
254 	}
255 
registerActionType(ActionCreator creator)256 	int registerActionType(ActionCreator creator) {
257 		types_.push_back(creator);
258 		return (int) types_.size() - 1;
259 	}
260 
restoreActionType(int actionType,ActionCreator creator)261 	void restoreActionType(int actionType, ActionCreator creator) {
262 		if (actionType >= (int) types_.size())
263 			types_.resize(actionType + 1, NULL);
264 		types_[actionType] = creator;
265 	}
266 
createActionByType(int actionType)267 	PSPAction *createActionByType(int actionType) {
268 		if (actionType < (int) types_.size() && types_[actionType] != NULL) {
269 			PSPAction *a = types_[actionType]();
270 			a->actionTypeID = actionType;
271 			return a;
272 		}
273 		return NULL;
274 	}
275 
DoState(PointerWrap & p)276 	void DoState(PointerWrap &p) {
277 		auto s = p.Section("MipsCallManager", 1);
278 		if (!s)
279 			return;
280 
281 		Do(p, calls_);
282 		Do(p, idGen_);
283 	}
284 
285 private:
genId()286 	u32 genId() { return ++idGen_; }
287 	std::map<u32, MipsCall *> calls_;
288 	std::vector<ActionCreator> types_;
289 	u32 idGen_;
290 };
291 
292 class ActionAfterMipsCall : public PSPAction
293 {
ActionAfterMipsCall()294 	ActionAfterMipsCall()
295 	{
296 		chainedAction = NULL;
297 	}
298 
299 public:
300 	void run(MipsCall &call) override;
301 
Create()302 	static PSPAction *Create() {
303 		return new ActionAfterMipsCall();
304 	}
305 
DoState(PointerWrap & p)306 	void DoState(PointerWrap &p) override
307 	{
308 		auto s = p.Section("ActionAfterMipsCall", 1);
309 		if (!s)
310 			return;
311 
312 		Do(p, threadID);
313 		Do(p, status);
314 		Do(p, waitType);
315 		Do(p, waitID);
316 		Do(p, waitInfo);
317 		Do(p, isProcessingCallbacks);
318 		Do(p, currentCallbackId);
319 
320 		int chainedActionType = 0;
321 		if (chainedAction != NULL)
322 			chainedActionType = chainedAction->actionTypeID;
323 		Do(p, chainedActionType);
324 
325 		if (chainedActionType != 0)
326 		{
327 			if (p.mode == p.MODE_READ)
328 				chainedAction = __KernelCreateAction(chainedActionType);
329 			chainedAction->DoState(p);
330 		}
331 	}
332 
333 	SceUID threadID;
334 
335 	// Saved thread state
336 	int status;
337 	WaitType waitType;
338 	int waitID;
339 	ThreadWaitInfo waitInfo;
340 	bool isProcessingCallbacks;
341 	SceUID currentCallbackId;
342 
343 	PSPAction *chainedAction;
344 };
345 
346 class ActionAfterCallback : public PSPAction
347 {
348 public:
ActionAfterCallback()349 	ActionAfterCallback() {}
350 	void run(MipsCall &call) override;
351 
Create()352 	static PSPAction *Create() {
353 		return new ActionAfterCallback;
354 	}
355 
setCallback(SceUID cbId_)356 	void setCallback(SceUID cbId_)
357 	{
358 		cbId = cbId_;
359 	}
360 
DoState(PointerWrap & p)361 	void DoState(PointerWrap &p) override
362 	{
363 		auto s = p.Section("ActionAfterCallback", 1);
364 		if (!s)
365 			return;
366 
367 		Do(p, cbId);
368 	}
369 
370 	SceUID cbId;
371 };
372 
373 class PSPThread : public KernelObject {
374 public:
PSPThread()375 	PSPThread() : debug(currentMIPS, context) {}
376 
GetName()377 	const char *GetName() override { return nt.name; }
GetTypeName()378 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeName()379 	static const char *GetStaticTypeName() { return "Thread"; }
GetQuickInfo(char * ptr,int size)380 	void GetQuickInfo(char *ptr, int size) override {
381 		sprintf(ptr, "pc= %08x sp= %08x %s %s %s %s %s %s (wt=%i wid=%i wv= %08x )",
382 			context.pc, context.r[MIPS_REG_SP],
383 			(nt.status & THREADSTATUS_RUNNING) ? "RUN" : "",
384 			(nt.status & THREADSTATUS_READY) ? "READY" : "",
385 			(nt.status & THREADSTATUS_WAIT) ? "WAIT" : "",
386 			(nt.status & THREADSTATUS_SUSPEND) ? "SUSPEND" : "",
387 			(nt.status & THREADSTATUS_DORMANT) ? "DORMANT" : "",
388 			(nt.status & THREADSTATUS_DEAD) ? "DEAD" : "",
389 			(int)nt.waitType,
390 			nt.waitID,
391 			waitInfo.waitValue);
392 	}
393 
GetMissingErrorCode()394 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_THID; }
GetStaticIDType()395 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Thread; }
GetIDType() const396 	int GetIDType() const override { return SCE_KERNEL_TMID_Thread; }
397 
AllocateStack(u32 & stackSize)398 	bool AllocateStack(u32 &stackSize) {
399 		_assert_msg_(stackSize >= 0x200, "thread stack should be 256 bytes or larger");
400 
401 		FreeStack();
402 
403 		bool fromTop = (nt.attr & PSP_THREAD_ATTR_LOW_STACK) == 0;
404 		if (nt.attr & PSP_THREAD_ATTR_KERNEL)
405 		{
406 			// Allocate stacks for kernel threads (idle) in kernel RAM
407 			currentStack.start = kernelMemory.Alloc(stackSize, fromTop, (std::string("stack/") + nt.name).c_str());
408 		}
409 		else
410 		{
411 			currentStack.start = userMemory.Alloc(stackSize, fromTop, (std::string("stack/") + nt.name).c_str());
412 		}
413 		if (currentStack.start == (u32)-1)
414 		{
415 			currentStack.start = 0;
416 			nt.initialStack = 0;
417 			ERROR_LOG(SCEKERNEL, "Failed to allocate stack for thread");
418 			return false;
419 		}
420 
421 		nt.initialStack = currentStack.start;
422 		nt.stackSize = stackSize;
423 		return true;
424 	}
425 
FillStack()426 	bool FillStack() {
427 		// Fill the stack.
428 		if ((nt.attr & PSP_THREAD_ATTR_NO_FILLSTACK) == 0) {
429 			Memory::Memset(currentStack.start, 0xFF, nt.stackSize, "ThreadFillStack");
430 		}
431 		context.r[MIPS_REG_SP] = currentStack.start + nt.stackSize;
432 		currentStack.end = context.r[MIPS_REG_SP];
433 		// The k0 section is 256 bytes at the top of the stack.
434 		context.r[MIPS_REG_SP] -= 256;
435 		context.r[MIPS_REG_K0] = context.r[MIPS_REG_SP];
436 		u32 k0 = context.r[MIPS_REG_K0];
437 		Memory::Memset(k0, 0, 0x100, "ThreadK0");
438 		Memory::Write_U32(GetUID(),        k0 + 0xc0);
439 		Memory::Write_U32(nt.initialStack, k0 + 0xc8);
440 		Memory::Write_U32(0xffffffff,      k0 + 0xf8);
441 		Memory::Write_U32(0xffffffff,      k0 + 0xfc);
442 		// After k0 comes the arguments, which is done by sceKernelStartThread().
443 
444 		Memory::Write_U32(GetUID(), nt.initialStack);
445 		return true;
446 	}
447 
FreeStack()448 	void FreeStack() {
449 		if (currentStack.start != 0) {
450 			DEBUG_LOG(SCEKERNEL, "Freeing thread stack %s", nt.name);
451 
452 			if ((nt.attr & PSP_THREAD_ATTR_CLEAR_STACK) != 0 && nt.initialStack != 0) {
453 				Memory::Memset(nt.initialStack, 0, nt.stackSize, "ThreadFreeStack");
454 			}
455 
456 			if (nt.attr & PSP_THREAD_ATTR_KERNEL) {
457 				kernelMemory.Free(currentStack.start);
458 			} else {
459 				userMemory.Free(currentStack.start);
460 			}
461 			currentStack.start = 0;
462 		}
463 	}
464 
PushExtendedStack(u32 size)465 	bool PushExtendedStack(u32 size)
466 	{
467 		u32 stack = userMemory.Alloc(size, true, (std::string("extended/") + nt.name).c_str());
468 		if (stack == (u32)-1)
469 			return false;
470 
471 		pushedStacks.push_back(currentStack);
472 		currentStack.start = stack;
473 		currentStack.end = stack + size;
474 		nt.initialStack = currentStack.start;
475 		nt.stackSize = currentStack.end - currentStack.start;
476 
477 		// We still drop the threadID at the bottom and fill it, but there's no k0.
478 		Memory::Memset(currentStack.start, 0xFF, nt.stackSize, "ThreadExtendStack");
479 		Memory::Write_U32(GetUID(), nt.initialStack);
480 		return true;
481 	}
482 
PopExtendedStack()483 	bool PopExtendedStack()
484 	{
485 		if (pushedStacks.size() == 0)
486 			return false;
487 
488 		userMemory.Free(currentStack.start);
489 		currentStack = pushedStacks.back();
490 		pushedStacks.pop_back();
491 		nt.initialStack = currentStack.start;
492 		nt.stackSize = currentStack.end - currentStack.start;
493 		return true;
494 	}
495 
496 	// Can't use a destructor since savestates will call that too.
Cleanup()497 	void Cleanup()
498 	{
499 		// Callbacks are automatically deleted when their owning thread is deleted.
500 		for (auto it = callbacks.begin(), end = callbacks.end(); it != end; ++it)
501 			kernelObjects.Destroy<PSPCallback>(*it);
502 
503 		if (pushedStacks.size() != 0)
504 		{
505 			WARN_LOG_REPORT(SCEKERNEL, "Thread ended within an extended stack");
506 			for (size_t i = 0; i < pushedStacks.size(); ++i)
507 				userMemory.Free(pushedStacks[i].start);
508 		}
509 		FreeStack();
510 	}
511 
512 	void setReturnValue(u32 retval);
513 	void setReturnValue(u64 retval);
514 	void resumeFromWait();
515 	bool isWaitingFor(WaitType type, int id) const;
516 	int getWaitID(WaitType type) const;
517 	ThreadWaitInfo getWaitInfo() const;
518 
519 	// Utils
isRunning() const520 	inline bool isRunning() const { return (nt.status & THREADSTATUS_RUNNING) != 0; }
isStopped() const521 	inline bool isStopped() const { return (nt.status & THREADSTATUS_DORMANT) != 0; }
isReady() const522 	inline bool isReady() const { return (nt.status & THREADSTATUS_READY) != 0; }
isWaiting() const523 	inline bool isWaiting() const { return (nt.status & THREADSTATUS_WAIT) != 0; }
isSuspended() const524 	inline bool isSuspended() const { return (nt.status & THREADSTATUS_SUSPEND) != 0; }
525 
DoState(PointerWrap & p)526 	void DoState(PointerWrap &p) override
527 	{
528 		auto s = p.Section("Thread", 1, 5);
529 		if (!s)
530 			return;
531 
532 		Do(p, nt);
533 		Do(p, waitInfo);
534 		Do(p, moduleId);
535 		Do(p, isProcessingCallbacks);
536 		Do(p, currentMipscallId);
537 		Do(p, currentCallbackId);
538 
539 		// TODO: If we want to "version" a DoState method here, we can just use minVer = 0.
540 		Do(p, context);
541 
542 		if (s <= 3)
543 		{
544 			// We must have been loading an old state if we're here.
545 			// Reorder VFPU data to new order.
546 			float temp[128];
547 			memcpy(temp, context.v, 128 * sizeof(float));
548 			for (int i = 0; i < 128; i++) {
549 				context.v[voffset[i]] = temp[i];
550 			}
551 		}
552 
553 		if (s <= 2)
554 		{
555 			context.other[4] = context.other[5];
556 			context.other[3] = context.other[4];
557 		}
558 		if (s <= 4)
559 			std::swap(context.hi, context.lo);
560 
561 		Do(p, callbacks);
562 
563 		Do(p, pendingMipsCalls);
564 		Do(p, pushedStacks);
565 		Do(p, currentStack);
566 
567 		if (s >= 2)
568 		{
569 			Do(p, waitingThreads);
570 			Do(p, pausedWaits);
571 		}
572 	}
573 
574 	NativeThread nt{};
575 
576 	ThreadWaitInfo waitInfo{};
577 	SceUID moduleId = -1;
578 
579 	bool isProcessingCallbacks = false;
580 	u32 currentMipscallId = -1;
581 	SceUID currentCallbackId = -1;
582 
583 	PSPThreadContext context;
584 	KernelThreadDebugInterface debug;
585 
586 	std::vector<SceUID> callbacks;
587 
588 	std::list<u32> pendingMipsCalls;
589 
590 	struct StackInfo {
591 		u32 start;
592 		u32 end;
593 	};
594 	// This is a stack of... stacks, since sceKernelExtendThreadStack() can recurse.
595 	// These are stacks that aren't "active" right now, but will pop off once the func returns.
596 	std::vector<StackInfo> pushedStacks;
597 
598 	StackInfo currentStack{};
599 
600 	// For thread end.
601 	std::vector<SceUID> waitingThreads;
602 	// Key is the callback id it was for, or if no callback, the thread id.
603 	std::map<SceUID, u64> pausedWaits;
604 };
605 
606 struct WaitTypeFuncs
607 {
608 	WaitBeginCallbackFunc beginFunc;
609 	WaitEndCallbackFunc endFunc;
610 };
611 
612 bool __KernelExecuteMipsCallOnCurrentThread(u32 callId, bool reschedAfter);
613 
614 PSPThread *__KernelCreateThread(SceUID &id, SceUID moduleID, const char *name, u32 entryPoint, u32 priority, int stacksize, u32 attr);
615 void __KernelResetThread(PSPThread *t, int lowestPriority);
616 void __KernelCancelWakeup(SceUID threadID);
617 void __KernelCancelThreadEndTimeout(SceUID threadID);
618 bool __KernelCheckThreadCallbacks(PSPThread *thread, bool force);
619 
620 //////////////////////////////////////////////////////////////////////////
621 //STATE BEGIN
622 //////////////////////////////////////////////////////////////////////////
623 int g_inCbCount = 0;
624 // Normally, the same as currentThread.  In an interrupt, remembers the callback's thread id.
625 SceUID currentCallbackThreadID = 0;
626 int readyCallbacksCount = 0;
627 SceUID currentThread;
628 PSPThread *currentThreadPtr;
629 u32 idleThreadHackAddr;
630 u32 threadReturnHackAddr;
631 u32 hleReturnHackAddr;
632 u32 cbReturnHackAddr;
633 u32 intReturnHackAddr;
634 u32 extendReturnHackAddr;
635 u32 moduleReturnHackAddr;
636 std::vector<ThreadCallback> threadEndListeners;
637 
638 typedef std::vector<SceUID> ThreadEventHandlerList;
639 static std::map<SceUID, ThreadEventHandlerList> threadEventHandlers;
640 static std::vector<SceUID> pendingDeleteThreads;
641 
642 // Lists all thread ids that aren't deleted/etc.
643 std::vector<SceUID> threadqueue;
644 // Only for debugger, so not needed to read, just write.
645 std::mutex threadqueueLock;
646 
647 // Lists only ready thread ids.
648 ThreadQueueList threadReadyQueue;
649 
650 SceUID threadIdleID[2];
651 
652 int eventScheduledWakeup;
653 int eventThreadEndTimeout;
654 
655 bool dispatchEnabled = true;
656 
657 MipsCallManager mipsCalls;
658 int actionAfterCallback;
659 int actionAfterMipsCall;
660 
661 // When inside a callback, delays are "paused", and rechecked after the callback returns.
662 std::map<SceUID, u64> pausedDelays;
663 
664 // Doesn't need state saving.
665 WaitTypeFuncs waitTypeFuncs[NUM_WAITTYPES];
666 
667 // Doesn't really need state saving, just for logging purposes.
668 static u64 lastSwitchCycles = 0;
669 
670 //////////////////////////////////////////////////////////////////////////
671 //STATE END
672 //////////////////////////////////////////////////////////////////////////
673 
__KernelRegisterActionType(ActionCreator creator)674 int __KernelRegisterActionType(ActionCreator creator)
675 {
676 	return mipsCalls.registerActionType(creator);
677 }
678 
__KernelRestoreActionType(int actionType,ActionCreator creator)679 void __KernelRestoreActionType(int actionType, ActionCreator creator)
680 {
681 	_assert_(actionType >= 0);
682 	mipsCalls.restoreActionType(actionType, creator);
683 }
684 
__KernelCreateAction(int actionType)685 PSPAction *__KernelCreateAction(int actionType)
686 {
687 	return mipsCalls.createActionByType(actionType);
688 }
689 
DoState(PointerWrap & p)690 void MipsCall::DoState(PointerWrap &p)
691 {
692 	auto s = p.Section("MipsCall", 1);
693 	if (!s)
694 		return;
695 
696 	Do(p, entryPoint);
697 	Do(p, cbId);
698 	DoArray(p, args, ARRAY_SIZE(args));
699 	Do(p, numArgs);
700 	// No longer used.
701 	u32 legacySavedIdRegister = 0;
702 	Do(p, legacySavedIdRegister);
703 	u32 legacySavedRa = 0;
704 	Do(p, legacySavedRa);
705 	Do(p, savedPc);
706 	Do(p, savedV0);
707 	Do(p, savedV1);
708 	Do(p, tag);
709 	Do(p, savedId);
710 	Do(p, reschedAfter);
711 
712 	int actionTypeID = 0;
713 	if (doAfter != NULL)
714 		actionTypeID = doAfter->actionTypeID;
715 	Do(p, actionTypeID);
716 	if (actionTypeID != 0)
717 	{
718 		if (p.mode == p.MODE_READ)
719 			doAfter = __KernelCreateAction(actionTypeID);
720 		doAfter->DoState(p);
721 	}
722 }
723 
setReturnValue(u32 value)724 void MipsCall::setReturnValue(u32 value)
725 {
726 	savedV0 = value;
727 }
728 
setReturnValue(u64 value)729 void MipsCall::setReturnValue(u64 value)
730 {
731 	savedV0 = value & 0xFFFFFFFF;
732 	savedV1 = (value >> 32) & 0xFFFFFFFF;
733 }
734 
__GetCurrentThread()735 inline PSPThread *__GetCurrentThread() {
736 	return currentThreadPtr;
737 }
738 
__SetCurrentThread(PSPThread * thread,SceUID threadID,const char * name)739 inline void __SetCurrentThread(PSPThread *thread, SceUID threadID, const char *name) {
740 	currentThread = threadID;
741 	currentThreadPtr = thread;
742 	hleCurrentThreadName = name;
743 }
744 
__KernelCallbackReturnAddress()745 u32 __KernelCallbackReturnAddress() {
746 	return cbReturnHackAddr;
747 }
748 
__KernelInterruptReturnAddress()749 u32 __KernelInterruptReturnAddress() {
750 	return intReturnHackAddr;
751 }
752 
__KernelDelayBeginCallback(SceUID threadID,SceUID prevCallbackId)753 static void __KernelDelayBeginCallback(SceUID threadID, SceUID prevCallbackId) {
754 	u32 error;
755 	SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_DELAY, error);
756 	if (waitID == threadID) {
757 		// Most waits need to keep track of waiting threads, delays don't.  Use a fake list.
758 		std::vector<SceUID> dummy;
759 		HLEKernel::WaitBeginCallback(threadID, prevCallbackId, eventScheduledWakeup, dummy, pausedDelays, true);
760 		DEBUG_LOG(SCEKERNEL, "sceKernelDelayThreadCB: Suspending delay for callback");
761 	}
762 	else
763 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelDelayThreadCB: beginning callback with bad wait?");
764 }
765 
__KernelDelayEndCallback(SceUID threadID,SceUID prevCallbackId)766 static void __KernelDelayEndCallback(SceUID threadID, SceUID prevCallbackId) {
767 	SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;
768 
769 	if (pausedDelays.find(pauseKey) == pausedDelays.end())
770 	{
771 		// This probably should not happen.
772 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelDelayThreadCB: cannot find delay deadline");
773 		__KernelResumeThreadFromWait(threadID, 0);
774 		return;
775 	}
776 
777 	u64 delayDeadline = pausedDelays[pauseKey];
778 	pausedDelays.erase(pauseKey);
779 
780 	// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
781 
782 	s64 cyclesLeft = delayDeadline - CoreTiming::GetTicks();
783 	if (cyclesLeft < 0)
784 		__KernelResumeThreadFromWait(threadID, 0);
785 	else
786 	{
787 		CoreTiming::ScheduleEvent(cyclesLeft, eventScheduledWakeup, __KernelGetCurThread());
788 		DEBUG_LOG(SCEKERNEL, "sceKernelDelayThreadCB: Resuming delay after callback");
789 	}
790 }
791 
__KernelSleepBeginCallback(SceUID threadID,SceUID prevCallbackId)792 static void __KernelSleepBeginCallback(SceUID threadID, SceUID prevCallbackId) {
793 	DEBUG_LOG(SCEKERNEL, "sceKernelSleepThreadCB: Suspending sleep for callback");
794 }
795 
__KernelSleepEndCallback(SceUID threadID,SceUID prevCallbackId)796 static void __KernelSleepEndCallback(SceUID threadID, SceUID prevCallbackId) {
797 	u32 error;
798 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
799 	if (!thread) {
800 		// This probably should not happen.
801 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSleepThreadCB: thread deleted?");
802 		return;
803 	}
804 
805 	// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
806 
807 	if (thread->nt.wakeupCount > 0) {
808 		thread->nt.wakeupCount--;
809 		DEBUG_LOG(SCEKERNEL, "sceKernelSleepThreadCB: resume from callback, wakeupCount decremented to %i", thread->nt.wakeupCount);
810 		__KernelResumeThreadFromWait(threadID, 0);
811 	} else {
812 		DEBUG_LOG(SCEKERNEL, "sceKernelSleepThreadCB: Resuming sleep after callback");
813 	}
814 }
815 
__KernelThreadEndBeginCallback(SceUID threadID,SceUID prevCallbackId)816 static void __KernelThreadEndBeginCallback(SceUID threadID, SceUID prevCallbackId)
817 {
818 	auto result = HLEKernel::WaitBeginCallback<PSPThread, WAITTYPE_THREADEND, SceUID>(threadID, prevCallbackId, eventThreadEndTimeout);
819 	if (result == HLEKernel::WAIT_CB_SUCCESS)
820 		DEBUG_LOG(SCEKERNEL, "sceKernelWaitThreadEndCB: Suspending wait for callback");
821 	else if (result == HLEKernel::WAIT_CB_BAD_WAIT_DATA)
822 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelWaitThreadEndCB: wait not found to pause for callback");
823 	else
824 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelWaitThreadEndCB: beginning callback with bad wait id?");
825 }
826 
__KernelCheckResumeThreadEnd(PSPThread * t,SceUID waitingThreadID,u32 & error,int result,bool & wokeThreads)827 static bool __KernelCheckResumeThreadEnd(PSPThread *t, SceUID waitingThreadID, u32 &error, int result, bool &wokeThreads) {
828 	if (!HLEKernel::VerifyWait(waitingThreadID, WAITTYPE_THREADEND, t->GetUID()))
829 		return true;
830 
831 	if (t->nt.status == THREADSTATUS_DORMANT) {
832 		u32 timeoutPtr = __KernelGetWaitTimeoutPtr(waitingThreadID, error);
833 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventThreadEndTimeout, waitingThreadID);
834 		if (timeoutPtr != 0)
835 			Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
836 		s32 exitStatus = t->nt.exitStatus;
837 		__KernelResumeThreadFromWait(waitingThreadID, exitStatus);
838 		return true;
839 	}
840 
841 	return false;
842 }
843 
__KernelThreadEndEndCallback(SceUID threadID,SceUID prevCallbackId)844 static void __KernelThreadEndEndCallback(SceUID threadID, SceUID prevCallbackId)
845 {
846 	auto result = HLEKernel::WaitEndCallback<PSPThread, WAITTYPE_THREADEND, SceUID>(threadID, prevCallbackId, eventThreadEndTimeout, __KernelCheckResumeThreadEnd);
847 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
848 		DEBUG_LOG(SCEKERNEL, "sceKernelWaitThreadEndCB: Resuming wait from callback");
849 }
850 
__KernelSetThreadRA(SceUID threadID,u32 nid)851 u32 __KernelSetThreadRA(SceUID threadID, u32 nid)
852 {
853 	u32 newRA;
854 	switch (nid)
855 	{
856 	case NID_MODULERETURN:
857 		newRA = moduleReturnHackAddr;
858 		break;
859 	default:
860 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelSetThreadRA(): invalid RA address");
861 		return -1;
862 	}
863 
864 	if (threadID == currentThread)
865 		currentMIPS->r[MIPS_REG_RA] = newRA;
866 	else
867 	{
868 		u32 error;
869 		PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
870 		if (!thread)
871 			return error;
872 
873 		thread->context.r[MIPS_REG_RA] = newRA;
874 	}
875 
876 	return 0;
877 }
878 
879 void hleScheduledWakeup(u64 userdata, int cyclesLate);
880 void hleThreadEndTimeout(u64 userdata, int cyclesLate);
881 
__KernelWriteFakeSysCall(u32 nid,u32 * ptr,u32 & pos)882 static void __KernelWriteFakeSysCall(u32 nid, u32 *ptr, u32 &pos)
883 {
884 	*ptr = pos;
885 	pos += 8;
886 	WriteSyscall("FakeSysCalls", nid, *ptr);
887 	MIPSAnalyst::PrecompileFunction(*ptr, 8);
888 }
889 
HLEMipsCallReturnAddress()890 u32 HLEMipsCallReturnAddress() {
891 	if (hleReturnHackAddr == 0) {
892 		// From an old save state, likely... try to recover.
893 		u32 blockSize = 2 * sizeof(u32);
894 		u32 pos = kernelMemory.Alloc(blockSize, false, "hlerethack");
895 		__KernelWriteFakeSysCall(NID_HLECALLRETURN, &hleReturnHackAddr, pos);
896 	}
897 	return hleReturnHackAddr;
898 }
899 
__KernelThreadingInit()900 void __KernelThreadingInit()
901 {
902 	struct ThreadHack
903 	{
904 		u32 nid;
905 		u32 *addr;
906 	};
907 
908 	// Yeah, this is straight out of JPCSP, I should be ashamed.
909 	const static u32_le idleThreadCode[] = {
910 		MIPS_MAKE_LUI(MIPS_REG_RA, 0x0800),
911 		MIPS_MAKE_JR_RA(),
912 		MIPS_MAKE_SYSCALL("FakeSysCalls", "_sceKernelIdle"),
913 		MIPS_MAKE_BREAK(0),
914 	};
915 
916 	// If you add another func here, don't forget __KernelThreadingDoState() below.
917 	static ThreadHack threadHacks[] = {
918 		{NID_THREADRETURN, &threadReturnHackAddr},
919 		{NID_CALLBACKRETURN, &cbReturnHackAddr},
920 		{NID_INTERRUPTRETURN, &intReturnHackAddr},
921 		{NID_EXTENDRETURN, &extendReturnHackAddr},
922 		{NID_MODULERETURN, &moduleReturnHackAddr},
923 		{NID_HLECALLRETURN, &hleReturnHackAddr},
924 	};
925 	u32 blockSize = sizeof(idleThreadCode) + ARRAY_SIZE(threadHacks) * 2 * 4;  // The thread code above plus 8 bytes per "hack"
926 
927 	dispatchEnabled = true;
928 	memset(waitTypeFuncs, 0, sizeof(waitTypeFuncs));
929 
930 	__SetCurrentThread(NULL, 0, NULL);
931 	g_inCbCount = 0;
932 	currentCallbackThreadID = 0;
933 	readyCallbacksCount = 0;
934 	lastSwitchCycles = 0;
935 	idleThreadHackAddr = kernelMemory.Alloc(blockSize, false, "threadrethack");
936 
937 	Memory::Memcpy(idleThreadHackAddr, idleThreadCode, sizeof(idleThreadCode), "ThreadMIPS");
938 
939 	u32 pos = idleThreadHackAddr + sizeof(idleThreadCode);
940 	for (size_t i = 0; i < ARRAY_SIZE(threadHacks); ++i) {
941 		__KernelWriteFakeSysCall(threadHacks[i].nid, threadHacks[i].addr, pos);
942 	}
943 
944 	eventScheduledWakeup = CoreTiming::RegisterEvent("ScheduledWakeup", &hleScheduledWakeup);
945 	eventThreadEndTimeout = CoreTiming::RegisterEvent("ThreadEndTimeout", &hleThreadEndTimeout);
946 	actionAfterMipsCall = __KernelRegisterActionType(ActionAfterMipsCall::Create);
947 	actionAfterCallback = __KernelRegisterActionType(ActionAfterCallback::Create);
948 
949 	// Create the two idle threads, as well. With the absolute minimal possible priority.
950 	// 4096 stack size - don't know what the right value is. Hm, if callbacks are ever to run on these threads...
951 	__KernelResetThread(__KernelCreateThread(threadIdleID[0], 0, "idle0", idleThreadHackAddr, 0x7f, 4096, PSP_THREAD_ATTR_KERNEL), 0);
952 	__KernelResetThread(__KernelCreateThread(threadIdleID[1], 0, "idle1", idleThreadHackAddr, 0x7f, 4096, PSP_THREAD_ATTR_KERNEL), 0);
953 	// These idle threads are later started in LoadExec, which calls __KernelStartIdleThreads below.
954 
955 	__KernelListenThreadEnd(__KernelCancelWakeup);
956 	__KernelListenThreadEnd(__KernelCancelThreadEndTimeout);
957 
958 	__KernelRegisterWaitTypeFuncs(WAITTYPE_DELAY, __KernelDelayBeginCallback, __KernelDelayEndCallback);
959 	__KernelRegisterWaitTypeFuncs(WAITTYPE_SLEEP, __KernelSleepBeginCallback, __KernelSleepEndCallback);
960 	__KernelRegisterWaitTypeFuncs(WAITTYPE_THREADEND, __KernelThreadEndBeginCallback, __KernelThreadEndEndCallback);
961 }
962 
__KernelThreadingDoState(PointerWrap & p)963 void __KernelThreadingDoState(PointerWrap &p)
964 {
965 	auto s = p.Section("sceKernelThread", 1, 4);
966 	if (!s)
967 		return;
968 
969 	Do(p, g_inCbCount);
970 	Do(p, currentCallbackThreadID);
971 	Do(p, readyCallbacksCount);
972 	Do(p, idleThreadHackAddr);
973 	Do(p, threadReturnHackAddr);
974 	Do(p, cbReturnHackAddr);
975 	Do(p, intReturnHackAddr);
976 	Do(p, extendReturnHackAddr);
977 	Do(p, moduleReturnHackAddr);
978 
979 	if (s >= 4) {
980 		Do(p, hleReturnHackAddr);
981 	} else {
982 		hleReturnHackAddr = 0;
983 	}
984 
985 	Do(p, currentThread);
986 	SceUID dv = 0;
987 	Do(p, threadqueue, dv);
988 	DoArray(p, threadIdleID, ARRAY_SIZE(threadIdleID));
989 	Do(p, dispatchEnabled);
990 
991 	Do(p, threadReadyQueue);
992 
993 	Do(p, eventScheduledWakeup);
994 	CoreTiming::RestoreRegisterEvent(eventScheduledWakeup, "ScheduledWakeup", &hleScheduledWakeup);
995 	Do(p, eventThreadEndTimeout);
996 	CoreTiming::RestoreRegisterEvent(eventThreadEndTimeout, "ThreadEndTimeout", &hleThreadEndTimeout);
997 	Do(p, actionAfterMipsCall);
998 	__KernelRestoreActionType(actionAfterMipsCall, ActionAfterMipsCall::Create);
999 	Do(p, actionAfterCallback);
1000 	__KernelRestoreActionType(actionAfterCallback, ActionAfterCallback::Create);
1001 
1002 	Do(p, pausedDelays);
1003 
1004 	__SetCurrentThread(kernelObjects.GetFast<PSPThread>(currentThread), currentThread, __KernelGetThreadName(currentThread));
1005 	lastSwitchCycles = CoreTiming::GetTicks();
1006 
1007 	if (s >= 2)
1008 		Do(p, threadEventHandlers);
1009 	if (s >= 3)
1010 		Do(p, pendingDeleteThreads);
1011 }
1012 
__KernelThreadingDoStateLate(PointerWrap & p)1013 void __KernelThreadingDoStateLate(PointerWrap &p)
1014 {
1015 	// We do this late to give modules time to register actions.
1016 	mipsCalls.DoState(p);
1017 	p.DoMarker("sceKernelThread Late");
1018 }
1019 
__KernelThreadObject()1020 KernelObject *__KernelThreadObject()
1021 {
1022 	return new PSPThread;
1023 }
1024 
__KernelCallbackObject()1025 KernelObject *__KernelCallbackObject()
1026 {
1027 	return new PSPCallback;
1028 }
1029 
__KernelListenThreadEnd(ThreadCallback callback)1030 void __KernelListenThreadEnd(ThreadCallback callback)
1031 {
1032 	threadEndListeners.push_back(callback);
1033 }
1034 
__KernelFireThreadEnd(SceUID threadID)1035 static void __KernelFireThreadEnd(SceUID threadID)
1036 {
1037 	for (auto iter = threadEndListeners.begin(), end = threadEndListeners.end(); iter != end; ++iter)
1038 	{
1039 		ThreadCallback cb = *iter;
1040 		cb(threadID);
1041 	}
1042 }
1043 
1044 // TODO: Use __KernelChangeThreadState instead?  It has other affects...
__KernelChangeReadyState(PSPThread * thread,SceUID threadID,bool ready)1045 static void __KernelChangeReadyState(PSPThread *thread, SceUID threadID, bool ready) {
1046 	// Passing the id as a parameter is just an optimization, if it's wrong it will cause havoc.
1047 	_dbg_assert_msg_(thread->GetUID() == threadID, "Incorrect threadID");
1048 	int prio = thread->nt.currentPriority;
1049 
1050 	if (thread->isReady())
1051 	{
1052 		if (!ready)
1053 			threadReadyQueue.remove(prio, threadID);
1054 	}
1055 	else if (ready)
1056 	{
1057 		if (thread->isRunning())
1058 			threadReadyQueue.push_front(prio, threadID);
1059 		else
1060 			threadReadyQueue.push_back(prio, threadID);
1061 		thread->nt.status = THREADSTATUS_READY;
1062 	}
1063 }
1064 
__KernelChangeReadyState(SceUID threadID,bool ready)1065 static void __KernelChangeReadyState(SceUID threadID, bool ready)
1066 {
1067 	u32 error;
1068 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
1069 	if (thread)
1070 		__KernelChangeReadyState(thread, threadID, ready);
1071 	else
1072 		WARN_LOG(SCEKERNEL, "Trying to change the ready state of an unknown thread?");
1073 }
1074 
__KernelStartIdleThreads(SceUID moduleId)1075 void __KernelStartIdleThreads(SceUID moduleId)
1076 {
1077 	for (int i = 0; i < 2; i++)
1078 	{
1079 		u32 error;
1080 		PSPThread *t = kernelObjects.Get<PSPThread>(threadIdleID[i], error);
1081 		t->nt.gpreg = __KernelGetModuleGP(moduleId);
1082 		t->context.r[MIPS_REG_GP] = t->nt.gpreg;
1083 		//t->context.pc += 4;	// ADJUSTPC
1084 		threadReadyQueue.prepare(t->nt.currentPriority);
1085 		__KernelChangeReadyState(t, threadIdleID[i], true);
1086 	}
1087 }
1088 
__KernelSwitchOffThread(const char * reason)1089 bool __KernelSwitchOffThread(const char *reason)
1090 {
1091 	if (!reason)
1092 		reason = "switch off thread";
1093 
1094 	SceUID threadID = currentThread;
1095 
1096 	if (threadID != threadIdleID[0] && threadID != threadIdleID[1])
1097 	{
1098 		PSPThread *current = __GetCurrentThread();
1099 		if (current && current->isRunning())
1100 			__KernelChangeReadyState(current, threadID, true);
1101 
1102 		// Idle 0 chosen entirely arbitrarily.
1103 		PSPThread *t = kernelObjects.GetFast<PSPThread>(threadIdleID[0]);
1104 		if (t)
1105 		{
1106 			hleSkipDeadbeef();
1107 			__KernelSwitchContext(t, reason);
1108 			return true;
1109 		}
1110 		else
1111 			ERROR_LOG(SCEKERNEL, "Unable to switch to idle thread.");
1112 	}
1113 
1114 	return false;
1115 }
1116 
__KernelSwitchToThread(SceUID threadID,const char * reason)1117 bool __KernelSwitchToThread(SceUID threadID, const char *reason)
1118 {
1119 	if (!reason)
1120 		reason = "switch to thread";
1121 
1122 	if (currentThread != threadIdleID[0] && currentThread != threadIdleID[1])
1123 	{
1124 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelSwitchToThread used when already on a thread.");
1125 		return false;
1126 	}
1127 
1128 	if (currentThread == threadID)
1129 		return false;
1130 
1131 	u32 error;
1132 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1133 	if (!t)
1134 	{
1135 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelSwitchToThread: %x doesn't exist", threadID);
1136 		hleReSchedule("switch to deleted thread");
1137 	}
1138 	else if (t->isReady() || t->isRunning())
1139 	{
1140 		PSPThread *current = __GetCurrentThread();
1141 		if (current && current->isRunning())
1142 			__KernelChangeReadyState(current, currentThread, true);
1143 
1144 		if (!Memory::IsValidAddress(t->context.pc)) {
1145 			Core_ExecException(t->context.pc, currentMIPS->pc, ExecExceptionType::THREAD);
1146 		}
1147 
1148 		__KernelSwitchContext(t, reason);
1149 		return true;
1150 	}
1151 	else
1152 	{
1153 		hleReSchedule("switch to waiting thread");
1154 	}
1155 
1156 	return false;
1157 }
1158 
__KernelIdle()1159 void __KernelIdle()
1160 {
1161 	// Don't skip 0xDEADBEEF here, this is called directly bypassing CallSyscall().
1162 	// That means the hle flag would stick around until the next call.
1163 
1164 	CoreTiming::Idle();
1165 	// We Advance within __KernelReSchedule(), so anything that has now happened after idle
1166 	// will be triggered properly upon reschedule.
1167 	__KernelReSchedule("idle");
1168 }
1169 
__KernelThreadingShutdown()1170 void __KernelThreadingShutdown() {
1171 	std::lock_guard<std::mutex> guard(threadqueueLock);
1172 
1173 	kernelMemory.Free(threadReturnHackAddr);
1174 	threadqueue.clear();
1175 	threadReadyQueue.clear();
1176 	threadEndListeners.clear();
1177 	mipsCalls.clear();
1178 	threadReturnHackAddr = 0;
1179 	cbReturnHackAddr = 0;
1180 	hleReturnHackAddr = 0;
1181 	__SetCurrentThread(NULL, 0, NULL);
1182 	intReturnHackAddr = 0;
1183 	pausedDelays.clear();
1184 	threadEventHandlers.clear();
1185 	pendingDeleteThreads.clear();
1186 }
1187 
__KernelThreadingSummary()1188 std::string __KernelThreadingSummary() {
1189 	PSPThread *t = __GetCurrentThread();
1190 	return StringFromFormat("Cur thread: %s (attr %08x)", t ? t->GetName() : "(null)", t ? (u32)t->nt.attr : 0);
1191 }
1192 
__KernelGetThreadName(SceUID threadID)1193 const char *__KernelGetThreadName(SceUID threadID)
1194 {
1195 	u32 error;
1196 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1197 	if (t)
1198 		return t->nt.name;
1199 	return "ERROR";
1200 }
1201 
KernelIsThreadDormant(SceUID threadID)1202 bool KernelIsThreadDormant(SceUID threadID) {
1203 	u32 error;
1204 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1205 	if (t)
1206 		return (t->nt.status & (THREADSTATUS_DEAD | THREADSTATUS_DORMANT)) != 0;
1207 	return false;
1208 }
1209 
KernelIsThreadWaiting(SceUID threadID)1210 bool KernelIsThreadWaiting(SceUID threadID) {
1211 	u32 error;
1212 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1213 	if (t)
1214 		return (t->nt.status & (THREADSTATUS_WAITSUSPEND)) != 0;
1215 	return false;
1216 }
1217 
__KernelGetWaitValue(SceUID threadID,u32 & error)1218 u32 __KernelGetWaitValue(SceUID threadID, u32 &error) {
1219 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1220 	if (t) {
1221 		return t->getWaitInfo().waitValue;
1222 	} else {
1223 		ERROR_LOG(SCEKERNEL, "__KernelGetWaitValue ERROR: thread %i", threadID);
1224 		return 0;
1225 	}
1226 }
1227 
__KernelGetWaitTimeoutPtr(SceUID threadID,u32 & error)1228 u32 __KernelGetWaitTimeoutPtr(SceUID threadID, u32 &error) {
1229 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1230 	if (t) {
1231 		return t->getWaitInfo().timeoutPtr;
1232 	} else {
1233 		ERROR_LOG(SCEKERNEL, "__KernelGetWaitTimeoutPtr ERROR: thread %i", threadID);
1234 		return 0;
1235 	}
1236 }
1237 
__KernelGetWaitID(SceUID threadID,WaitType type,u32 & error)1238 SceUID __KernelGetWaitID(SceUID threadID, WaitType type, u32 &error) {
1239 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1240 	if (t) {
1241 		return t->getWaitID(type);
1242 	} else {
1243 		ERROR_LOG(SCEKERNEL, "__KernelGetWaitID ERROR: thread %i", threadID);
1244 		return -1;
1245 	}
1246 }
1247 
__KernelGetCurrentCallbackID(SceUID threadID,u32 & error)1248 SceUID __KernelGetCurrentCallbackID(SceUID threadID, u32 &error) {
1249 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1250 	if (t) {
1251 		return t->currentCallbackId;
1252 	} else {
1253 		ERROR_LOG(SCEKERNEL, "__KernelGetCurrentCallbackID ERROR: thread %i", threadID);
1254 		return 0;
1255 	}
1256 }
1257 
sceKernelReferThreadStatus(u32 threadID,u32 statusPtr)1258 u32 sceKernelReferThreadStatus(u32 threadID, u32 statusPtr)
1259 {
1260 	static const u32 THREADINFO_SIZE = 104;
1261 	static const u32 THREADINFO_SIZE_AFTER_260 = 108;
1262 
1263 	if (threadID == 0)
1264 		threadID = __KernelGetCurThread();
1265 
1266 	u32 error;
1267 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1268 	if (!t) {
1269 		hleEatCycles(700);
1270 		hleReSchedule("refer thread status");
1271 		return hleLogError(SCEKERNEL, error, "bad thread");
1272 	}
1273 
1274 	u32 wantedSize = Memory::Read_U32(statusPtr);
1275 
1276 	if (sceKernelGetCompiledSdkVersion() > 0x02060010) {
1277 		if (wantedSize > THREADINFO_SIZE_AFTER_260) {
1278 			hleEatCycles(1200);
1279 			hleReSchedule("refer thread status");
1280 			return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_SIZE, "bad size %d", wantedSize);
1281 		}
1282 
1283 		t->nt.nativeSize = THREADINFO_SIZE_AFTER_260;
1284 		if (wantedSize != 0)
1285 			Memory::Memcpy(statusPtr, &t->nt, std::min(wantedSize, (u32)sizeof(t->nt)), "ThreadStatus");
1286 		// TODO: What is this value?  Basic tests show 0...
1287 		if (wantedSize > sizeof(t->nt))
1288 			Memory::Memset(statusPtr + sizeof(t->nt), 0, wantedSize - sizeof(t->nt), "ThreadStatus");
1289 	} else {
1290 		t->nt.nativeSize = THREADINFO_SIZE;
1291 		u32 sz = std::min(THREADINFO_SIZE, wantedSize);
1292 		if (sz != 0)
1293 			Memory::Memcpy(statusPtr, &t->nt, sz, "ThreadStatus");
1294 	}
1295 
1296 	hleEatCycles(1400);
1297 	hleReSchedule("refer thread status");
1298 	return hleLogSuccessVerboseI(SCEKERNEL, 0);
1299 }
1300 
1301 // Thanks JPCSP
sceKernelReferThreadRunStatus(u32 threadID,u32 statusPtr)1302 u32 sceKernelReferThreadRunStatus(u32 threadID, u32 statusPtr)
1303 {
1304 	if (threadID == 0)
1305 		threadID = __KernelGetCurThread();
1306 
1307 	u32 error;
1308 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1309 	if (!t)
1310 	{
1311 		ERROR_LOG(SCEKERNEL,"sceKernelReferThreadRunStatus Error %08x", error);
1312 		return error;
1313 	}
1314 
1315 	DEBUG_LOG(SCEKERNEL,"sceKernelReferThreadRunStatus(%i, %08x)", threadID, statusPtr);
1316 	if (!Memory::IsValidAddress(statusPtr))
1317 		return -1;
1318 
1319 	auto runStatus = PSPPointer<SceKernelThreadRunStatus>::Create(statusPtr);
1320 
1321 	// TODO: Check size?
1322 	runStatus->size = sizeof(SceKernelThreadRunStatus);
1323 	runStatus->status = t->nt.status;
1324 	runStatus->currentPriority = t->nt.currentPriority;
1325 	runStatus->waitType = t->nt.waitType;
1326 	runStatus->waitID = t->nt.waitID;
1327 	runStatus->wakeupCount = t->nt.wakeupCount;
1328 	runStatus->runForClocks = t->nt.runForClocks;
1329 	runStatus->numInterruptPreempts = t->nt.numInterruptPreempts;
1330 	runStatus->numThreadPreempts = t->nt.numThreadPreempts;
1331 	runStatus->numReleases = t->nt.numReleases;
1332 
1333 	return 0;
1334 }
1335 
__KernelGetThreadExitStatus(SceUID threadID)1336 int __KernelGetThreadExitStatus(SceUID threadID) {
1337 	u32 error;
1338 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1339 	if (!t) {
1340 		return hleLogError(SCEKERNEL, error);
1341 	}
1342 
1343 	// __KernelResetThread and __KernelCreateThread set exitStatus in case it's DORMANT.
1344 	if (t->nt.status == THREADSTATUS_DORMANT) {
1345 		return hleLogSuccessI(SCEKERNEL, t->nt.exitStatus);
1346 	}
1347 	return hleLogVerbose(SCEKERNEL, SCE_KERNEL_ERROR_NOT_DORMANT, "not dormant");
1348 }
1349 
sceKernelGetThreadExitStatus(SceUID threadID)1350 int sceKernelGetThreadExitStatus(SceUID threadID) {
1351 	u32 status = __KernelGetThreadExitStatus(threadID);
1352 	// Seems this is called in a tight-ish loop, maybe awaiting an interrupt - issue #13698
1353 	hleEatCycles(330);
1354 	return status;
1355 }
1356 
sceKernelGetThreadmanIdType(u32 uid)1357 u32 sceKernelGetThreadmanIdType(u32 uid) {
1358 	int type;
1359 	if (kernelObjects.GetIDType(uid, &type)) {
1360 		if (type < 0x1000) {
1361 			DEBUG_LOG(SCEKERNEL, "%i=sceKernelGetThreadmanIdType(%i)", type, uid);
1362 			return type;
1363 		} else {
1364 			// This means a partition memory block or module, etc.
1365 			ERROR_LOG(SCEKERNEL, "sceKernelGetThreadmanIdType(%i): invalid object type %i", uid, type);
1366 			return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1367 		}
1368 	} else {
1369 		ERROR_LOG(SCEKERNEL, "sceKernelGetThreadmanIdType(%i) - FAILED", uid);
1370 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1371 	}
1372 }
1373 
__ThreadmanIdListIsSleeping(const PSPThread * t)1374 static bool __ThreadmanIdListIsSleeping(const PSPThread *t) {
1375 	return t->isWaitingFor(WAITTYPE_SLEEP, 0);
1376 }
1377 
__ThreadmanIdListIsDelayed(const PSPThread * t)1378 static bool __ThreadmanIdListIsDelayed(const PSPThread *t) {
1379 	return t->isWaitingFor(WAITTYPE_DELAY, t->GetUID());
1380 }
1381 
__ThreadmanIdListIsSuspended(const PSPThread * t)1382 static bool __ThreadmanIdListIsSuspended(const PSPThread *t) {
1383 	return t->isSuspended();
1384 }
1385 
__ThreadmanIdListIsDormant(const PSPThread * t)1386 static bool __ThreadmanIdListIsDormant(const PSPThread *t) {
1387 	return t->isStopped();
1388 }
1389 
sceKernelGetThreadmanIdList(u32 type,u32 readBufPtr,u32 readBufSize,u32 idCountPtr)1390 u32 sceKernelGetThreadmanIdList(u32 type, u32 readBufPtr, u32 readBufSize, u32 idCountPtr) {
1391 	if (readBufSize >= 0x8000000) {
1392 		// Not exact, it's probably if the sum ends up negative or something.
1393 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid size", type, readBufPtr, readBufSize, idCountPtr);
1394 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
1395 	}
1396 	if (!Memory::IsValidAddress(readBufPtr) && readBufSize > 0) {
1397 		// Crashes on a PSP.
1398 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid pointer", type, readBufPtr, readBufSize, idCountPtr);
1399 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1400 	}
1401 
1402 	u32 total = 0;
1403 	auto uids = PSPPointer<SceUID_le>::Create(readBufPtr);
1404 	u32 error;
1405 	if (type > 0 && type <= SCE_KERNEL_TMID_Tlspl) {
1406 		DEBUG_LOG(SCEKERNEL, "sceKernelGetThreadmanIdList(%i, %08x, %i, %08x)", type, readBufPtr, readBufSize, idCountPtr);
1407 		total = kernelObjects.ListIDType(type, uids, readBufSize);
1408 	} else if (type >= SCE_KERNEL_TMID_SleepThread && type <= SCE_KERNEL_TMID_DormantThread) {
1409 		bool (*checkFunc)(const PSPThread *t) = nullptr;
1410 		switch (type) {
1411 		case SCE_KERNEL_TMID_SleepThread:
1412 			checkFunc = &__ThreadmanIdListIsSleeping;
1413 			break;
1414 
1415 		case SCE_KERNEL_TMID_DelayThread:
1416 			checkFunc = &__ThreadmanIdListIsDelayed;
1417 			break;
1418 
1419 		case SCE_KERNEL_TMID_SuspendThread:
1420 			checkFunc = &__ThreadmanIdListIsSuspended;
1421 			break;
1422 
1423 		case SCE_KERNEL_TMID_DormantThread:
1424 			checkFunc = &__ThreadmanIdListIsDormant;
1425 			break;
1426 
1427 		default:
1428 			_dbg_assert_msg_(false, "Unexpected type %d", type);
1429 		}
1430 
1431 		for (size_t i = 0; i < threadqueue.size(); i++) {
1432 			const PSPThread *t = kernelObjects.Get<PSPThread>(threadqueue[i], error);
1433 			if (checkFunc(t)) {
1434 				if (total < readBufSize) {
1435 					*uids++ = threadqueue[i];
1436 				}
1437 				++total;
1438 			}
1439 		}
1440 	} else {
1441 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelGetThreadmanIdList(%i, %08x, %i, %08x): invalid type", type, readBufPtr, readBufSize, idCountPtr);
1442 		return SCE_KERNEL_ERROR_ILLEGAL_TYPE;
1443 	}
1444 
1445 	if (Memory::IsValidAddress(idCountPtr)) {
1446 		Memory::Write_U32(total, idCountPtr);
1447 	}
1448 	return total > readBufSize ? readBufSize : total;
1449 }
1450 
1451 // Saves the current CPU context
__KernelSaveContext(PSPThreadContext * ctx,bool vfpuEnabled)1452 void __KernelSaveContext(PSPThreadContext *ctx, bool vfpuEnabled) {
1453 	// r and f are immediately next to each other and must be.
1454 	memcpy((void *)ctx->r, (void *)currentMIPS->r, sizeof(ctx->r) + sizeof(ctx->f));
1455 
1456 	if (vfpuEnabled) {
1457 		memcpy(ctx->v, currentMIPS->v, sizeof(ctx->v));
1458 		memcpy(ctx->vfpuCtrl, currentMIPS->vfpuCtrl, sizeof(ctx->vfpuCtrl));
1459 	}
1460 
1461 	memcpy(ctx->other, currentMIPS->other, sizeof(ctx->other));
1462 }
1463 
1464 // Loads a CPU context
__KernelLoadContext(PSPThreadContext * ctx,bool vfpuEnabled)1465 void __KernelLoadContext(PSPThreadContext *ctx, bool vfpuEnabled) {
1466 	// r and f are immediately next to each other and must be.
1467 	memcpy((void *)currentMIPS->r, (void *)ctx->r, sizeof(ctx->r) + sizeof(ctx->f));
1468 
1469 	if (vfpuEnabled) {
1470 		memcpy(currentMIPS->v, ctx->v, sizeof(ctx->v));
1471 		memcpy(currentMIPS->vfpuCtrl, ctx->vfpuCtrl, sizeof(ctx->vfpuCtrl));
1472 	}
1473 
1474 	if (!Memory::IsValidAddress(ctx->pc)) {
1475 		Core_ExecException(ctx->pc, currentMIPS->pc, ExecExceptionType::THREAD);
1476 	}
1477 
1478 	memcpy(currentMIPS->other, ctx->other, sizeof(ctx->other));
1479 	if (MIPSComp::jit) {
1480 		// When thread switching, we must update the rounding mode if cached in the jit.
1481 		MIPSComp::jit->UpdateFCR31();
1482 	}
1483 
1484 	// Reset the llBit, the other thread may have touched memory.
1485 	currentMIPS->llBit = 0;
1486 }
1487 
__KernelResumeThreadFromWait(SceUID threadID,u32 retval)1488 u32 __KernelResumeThreadFromWait(SceUID threadID, u32 retval)
1489 {
1490 	u32 error;
1491 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1492 	if (t)
1493 	{
1494 		t->resumeFromWait();
1495 		t->setReturnValue(retval);
1496 		return 0;
1497 	}
1498 	else
1499 	{
1500 		ERROR_LOG(SCEKERNEL, "__KernelResumeThreadFromWait(%d): bad thread: %08x", threadID, error);
1501 		return error;
1502 	}
1503 }
1504 
__KernelResumeThreadFromWait(SceUID threadID,u64 retval)1505 u32 __KernelResumeThreadFromWait(SceUID threadID, u64 retval)
1506 {
1507 	u32 error;
1508 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1509 	if (t)
1510 	{
1511 		t->resumeFromWait();
1512 		t->setReturnValue(retval);
1513 		return 0;
1514 	}
1515 	else
1516 	{
1517 		ERROR_LOG(SCEKERNEL, "__KernelResumeThreadFromWait(%d): bad thread: %08x", threadID, error);
1518 		return error;
1519 	}
1520 }
1521 
1522 // makes the current thread wait for an event
__KernelWaitCurThread(WaitType type,SceUID waitID,u32 waitValue,u32 timeoutPtr,bool processCallbacks,const char * reason)1523 void __KernelWaitCurThread(WaitType type, SceUID waitID, u32 waitValue, u32 timeoutPtr, bool processCallbacks, const char *reason) {
1524 	if (!dispatchEnabled) {
1525 		WARN_LOG_REPORT(SCEKERNEL, "Ignoring wait, dispatching disabled... right thing to do?");
1526 		return;
1527 	}
1528 
1529 	PSPThread *thread = __GetCurrentThread();
1530 	_assert_(thread != nullptr);
1531 	if ((thread->nt.status & THREADSTATUS_WAIT) != 0)
1532 		WARN_LOG_REPORT(SCEKERNEL, "Waiting thread for %d that was already waiting for %d", type, thread->nt.waitType);
1533 	thread->nt.waitID = waitID;
1534 	thread->nt.waitType = type;
1535 	__KernelChangeThreadState(thread, ThreadStatus(THREADSTATUS_WAIT | (thread->nt.status & THREADSTATUS_SUSPEND)));
1536 	thread->nt.numReleases++;
1537 	thread->waitInfo.waitValue = waitValue;
1538 	thread->waitInfo.timeoutPtr = timeoutPtr;
1539 
1540 	if (!reason)
1541 		reason = "started wait";
1542 
1543 	hleReSchedule(processCallbacks, reason);
1544 }
1545 
__KernelWaitCallbacksCurThread(WaitType type,SceUID waitID,u32 waitValue,u32 timeoutPtr)1546 void __KernelWaitCallbacksCurThread(WaitType type, SceUID waitID, u32 waitValue, u32 timeoutPtr) {
1547 	if (!dispatchEnabled) {
1548 		WARN_LOG_REPORT(SCEKERNEL, "Ignoring wait, dispatching disabled... right thing to do?");
1549 		return;
1550 	}
1551 
1552 	PSPThread *thread = __GetCurrentThread();
1553 	if ((thread->nt.status & THREADSTATUS_WAIT) != 0)
1554 		WARN_LOG_REPORT(SCEKERNEL, "Waiting thread for %d that was already waiting for %d", type, thread->nt.waitType);
1555 	thread->nt.waitID = waitID;
1556 	thread->nt.waitType = type;
1557 	__KernelChangeThreadState(thread, ThreadStatus(THREADSTATUS_WAIT | (thread->nt.status & THREADSTATUS_SUSPEND)));
1558 	// TODO: Probably not...?
1559 	thread->nt.numReleases++;
1560 	thread->waitInfo.waitValue = waitValue;
1561 	thread->waitInfo.timeoutPtr = timeoutPtr;
1562 
1563 	__KernelForceCallbacks();
1564 }
1565 
hleScheduledWakeup(u64 userdata,int cyclesLate)1566 void hleScheduledWakeup(u64 userdata, int cyclesLate)
1567 {
1568 	SceUID threadID = (SceUID)userdata;
1569 	u32 error;
1570 	if (__KernelGetWaitID(threadID, WAITTYPE_DELAY, error) == threadID)
1571 	{
1572 		__KernelResumeThreadFromWait(threadID, 0);
1573 		__KernelReSchedule("thread delay finished");
1574 	}
1575 }
1576 
__KernelScheduleWakeup(SceUID threadID,s64 usFromNow)1577 void __KernelScheduleWakeup(SceUID threadID, s64 usFromNow)
1578 {
1579 	s64 cycles = usToCycles(usFromNow);
1580 	CoreTiming::ScheduleEvent(cycles, eventScheduledWakeup, threadID);
1581 }
1582 
__KernelCancelWakeup(SceUID threadID)1583 void __KernelCancelWakeup(SceUID threadID)
1584 {
1585 	CoreTiming::UnscheduleEvent(eventScheduledWakeup, threadID);
1586 }
1587 
hleThreadEndTimeout(u64 userdata,int cyclesLate)1588 void hleThreadEndTimeout(u64 userdata, int cyclesLate)
1589 {
1590 	SceUID threadID = (SceUID) userdata;
1591 	HLEKernel::WaitExecTimeout<PSPThread, WAITTYPE_THREADEND>(threadID);
1592 }
1593 
__KernelScheduleThreadEndTimeout(SceUID threadID,SceUID waitForID,s64 usFromNow)1594 static void __KernelScheduleThreadEndTimeout(SceUID threadID, SceUID waitForID, s64 usFromNow)
1595 {
1596 	s64 cycles = usToCycles(usFromNow);
1597 	CoreTiming::ScheduleEvent(cycles, eventThreadEndTimeout, threadID);
1598 }
1599 
__KernelCancelThreadEndTimeout(SceUID threadID)1600 void __KernelCancelThreadEndTimeout(SceUID threadID)
1601 {
1602 	CoreTiming::UnscheduleEvent(eventThreadEndTimeout, threadID);
1603 }
1604 
__KernelRemoveFromThreadQueue(SceUID threadID)1605 static void __KernelRemoveFromThreadQueue(SceUID threadID) {
1606 	std::lock_guard<std::mutex> guard(threadqueueLock);
1607 
1608 	int prio = __KernelGetThreadPrio(threadID);
1609 	if (prio != 0)
1610 		threadReadyQueue.remove(prio, threadID);
1611 
1612 	threadqueue.erase(std::remove(threadqueue.begin(), threadqueue.end(), threadID), threadqueue.end());
1613 }
1614 
__KernelStopThread(SceUID threadID,int exitStatus,const char * reason)1615 void __KernelStopThread(SceUID threadID, int exitStatus, const char *reason)
1616 {
1617 	u32 error;
1618 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1619 	if (t)
1620 	{
1621 		__KernelChangeReadyState(t, threadID, false);
1622 		t->nt.exitStatus = exitStatus;
1623 		t->nt.status = THREADSTATUS_DORMANT;
1624 		__KernelFireThreadEnd(threadID);
1625 		for (size_t i = 0; i < t->waitingThreads.size(); ++i)
1626 		{
1627 			const SceUID waitingThread = t->waitingThreads[i];
1628 			u32 timeoutPtr = __KernelGetWaitTimeoutPtr(waitingThread, error);
1629 			if (HLEKernel::VerifyWait(waitingThread, WAITTYPE_THREADEND, threadID))
1630 			{
1631 				s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventThreadEndTimeout, waitingThread);
1632 				if (timeoutPtr != 0)
1633 					Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
1634 
1635 				HLEKernel::ResumeFromWait(waitingThread, WAITTYPE_THREADEND, threadID, exitStatus);
1636 			}
1637 		}
1638 		t->waitingThreads.clear();
1639 
1640 		// Stopped threads are never waiting.
1641 		t->nt.waitType = WAITTYPE_NONE;
1642 		t->nt.waitID = 0;
1643 	} else {
1644 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelStopThread: thread %d does not exist", threadID);
1645 	}
1646 }
1647 
__KernelDeleteThread(SceUID threadID,int exitStatus,const char * reason)1648 u32 __KernelDeleteThread(SceUID threadID, int exitStatus, const char *reason)
1649 {
1650 	__KernelStopThread(threadID, exitStatus, reason);
1651 	__KernelRemoveFromThreadQueue(threadID);
1652 
1653 	if (currentThread == threadID)
1654 		__SetCurrentThread(NULL, 0, NULL);
1655 	if (currentCallbackThreadID == threadID)
1656 	{
1657 		currentCallbackThreadID = 0;
1658 		g_inCbCount = 0;
1659 	}
1660 
1661 	u32 error;
1662 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
1663 	if (t)
1664 	{
1665 		for (auto it = t->callbacks.begin(), end = t->callbacks.end(); it != end; ++it)
1666 		{
1667 			PSPCallback *callback = kernelObjects.Get<PSPCallback>(*it, error);
1668 			if (callback && callback->nc.notifyCount != 0)
1669 				readyCallbacksCount--;
1670 		}
1671 
1672 		t->Cleanup();
1673 
1674 		// Before triggering, set v0.  It'll be restored if one is called.
1675 		RETURN(error);
1676 		t->nt.status = THREADSTATUS_DEAD;
1677 
1678 		if (__KernelThreadTriggerEvent((t->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, threadID, THREADEVENT_DELETE)) {
1679 			// Don't delete it yet.  We'll delete later.
1680 			pendingDeleteThreads.push_back(threadID);
1681 			return 0;
1682 		} else {
1683 			return kernelObjects.Destroy<PSPThread>(threadID);
1684 		}
1685 	} else {
1686 		RETURN(error);
1687 		return error;
1688 	}
1689 }
1690 
__ReportThreadQueueEmpty()1691 static void __ReportThreadQueueEmpty() {
1692 	// We failed to find a thread to schedule.
1693 	// This means something horrible happened to the idle threads.
1694 	u32 error;
1695 	PSPThread *idleThread0 = kernelObjects.Get<PSPThread>(threadIdleID[0], error);
1696 	PSPThread *idleThread1 = kernelObjects.Get<PSPThread>(threadIdleID[1], error);
1697 
1698 	char idleDescription0[256];
1699 	int idleStatus0 = -1;
1700 	if (idleThread0) {
1701 		idleThread0->GetQuickInfo(idleDescription0, sizeof(idleDescription0));
1702 		idleStatus0 = idleThread0->nt.status;
1703 	} else {
1704 		sprintf(idleDescription0, "DELETED");
1705 	}
1706 
1707 	char idleDescription1[256];
1708 	int idleStatus1 = -1;
1709 	if (idleThread1) {
1710 		idleThread1->GetQuickInfo(idleDescription1, sizeof(idleDescription1));
1711 		idleStatus1 = idleThread1->nt.status;
1712 	} else {
1713 		sprintf(idleDescription1, "DELETED");
1714 	}
1715 
1716 	ERROR_LOG_REPORT_ONCE(threadqueueempty, SCEKERNEL, "Failed to reschedule: out of threads on queue (%d, %d)", idleStatus0, idleStatus1);
1717 	WARN_LOG(SCEKERNEL, "Failed to reschedule: idle0 -> %s", idleDescription0);
1718 	WARN_LOG(SCEKERNEL, "Failed to reschedule: idle1 -> %s", idleDescription1);
1719 }
1720 
1721 // Returns NULL if the current thread is fine.
__KernelNextThread()1722 static PSPThread *__KernelNextThread() {
1723 	SceUID bestThread;
1724 
1725 	// If the current thread is running, it's a valid candidate.
1726 	PSPThread *cur = __GetCurrentThread();
1727 	if (cur && cur->isRunning()) {
1728 		bestThread = threadReadyQueue.pop_first_better(cur->nt.currentPriority);
1729 		if (bestThread != 0)
1730 			__KernelChangeReadyState(cur, currentThread, true);
1731 	} else {
1732 		bestThread = threadReadyQueue.pop_first();
1733 
1734 		if (bestThread == 0) {
1735 			// Zoinks.  No thread?
1736 			__ReportThreadQueueEmpty();
1737 
1738 			// Let's try to get back on track, if possible.
1739 			bestThread = threadIdleID[1];
1740 		}
1741 	}
1742 
1743 	// Assume threadReadyQueue has not become corrupt.
1744 	if (bestThread != 0)
1745 		return kernelObjects.GetFast<PSPThread>(bestThread);
1746 	else
1747 		return 0;
1748 }
1749 
__KernelReSchedule(const char * reason)1750 void __KernelReSchedule(const char *reason)
1751 {
1752 	// First, let's check if there are any pending callbacks to trigger.
1753 	// TODO: Could probably take this out of __KernelReSchedule() which is a bit hot.
1754 	__KernelCheckCallbacks();
1755 
1756 	// Execute any pending events while we're doing scheduling.
1757 	CoreTiming::Advance();
1758 	if (__IsInInterrupt() || !__KernelIsDispatchEnabled()) {
1759 		// Threads don't get changed within interrupts or while dispatch is disabled.
1760 		reason = "In Interrupt Or Callback";
1761 		return;
1762 	}
1763 
1764 	PSPThread *nextThread = __KernelNextThread();
1765 	if (nextThread) {
1766 		__KernelSwitchContext(nextThread, reason);
1767 	}
1768 	// Otherwise, no need to switch.
1769 }
1770 
__KernelReSchedule(bool doCallbacks,const char * reason)1771 void __KernelReSchedule(bool doCallbacks, const char *reason)
1772 {
1773 	PSPThread *thread = __GetCurrentThread();
1774 	if (doCallbacks && thread != nullptr) {
1775 		thread->isProcessingCallbacks = doCallbacks;
1776 	}
1777 
1778 	// Note - this calls the function above, not this one. Overloading...
1779 	__KernelReSchedule(reason);
1780 	if (doCallbacks && thread != nullptr && thread->GetUID() == currentThread) {
1781 		if (thread->isRunning()) {
1782 			thread->isProcessingCallbacks = false;
1783 		}
1784 	}
1785 }
1786 
sceKernelCheckThreadStack()1787 int sceKernelCheckThreadStack()
1788 {
1789 	u32 error;
1790 	PSPThread *t = kernelObjects.Get<PSPThread>(__KernelGetCurThread(), error);
1791 	if (t) {
1792 		u32 diff = labs((long)((s64)currentMIPS->r[MIPS_REG_SP] - (s64)t->currentStack.start));
1793 		DEBUG_LOG(SCEKERNEL, "%i=sceKernelCheckThreadStack()", diff);
1794 		return diff;
1795 	} else {
1796 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelCheckThreadStack() - not on thread");
1797 		return -1;
1798 	}
1799 }
1800 
reset()1801 void PSPThreadContext::reset() {
1802 	for (int i = 0; i<32; i++) {
1803 		r[i] = 0xDEADBEEF;
1804 		fi[i] = 0x7f800001;
1805 	}
1806 	r[0] = 0;
1807 	for (int i = 0; i<128; i++) {
1808 		vi[i] = 0x7f800001;
1809 	}
1810 	for (int i = 0; i<15; i++) {
1811 		vfpuCtrl[i] = 0x00000000;
1812 	}
1813 	vfpuCtrl[VFPU_CTRL_SPREFIX] = 0xe4; // neutral
1814 	vfpuCtrl[VFPU_CTRL_TPREFIX] = 0xe4; // neutral
1815 	vfpuCtrl[VFPU_CTRL_DPREFIX] = 0x0;	// neutral
1816 	vfpuCtrl[VFPU_CTRL_CC] = 0x3f;
1817 	vfpuCtrl[VFPU_CTRL_INF4] = 0;
1818 	vfpuCtrl[VFPU_CTRL_REV] = 0x7772ceab;
1819 	vfpuCtrl[VFPU_CTRL_RCX0] = 0x3f800001;
1820 	vfpuCtrl[VFPU_CTRL_RCX1] = 0x3f800002;
1821 	vfpuCtrl[VFPU_CTRL_RCX2] = 0x3f800004;
1822 	vfpuCtrl[VFPU_CTRL_RCX3] = 0x3f800008;
1823 	vfpuCtrl[VFPU_CTRL_RCX4] = 0x3f800000;
1824 	vfpuCtrl[VFPU_CTRL_RCX5] = 0x3f800000;
1825 	vfpuCtrl[VFPU_CTRL_RCX6] = 0x3f800000;
1826 	vfpuCtrl[VFPU_CTRL_RCX7] = 0x3f800000;
1827 	fpcond = 0;
1828 	fcr31 = 0x00000e00;
1829 	hi = 0xDEADBEEF;
1830 	lo = 0xDEADBEEF;
1831 	// Just for a clean state.
1832 	other[5] = 0;
1833 }
1834 
__KernelResetThread(PSPThread * t,int lowestPriority)1835 void __KernelResetThread(PSPThread *t, int lowestPriority) {
1836 	t->context.reset();
1837 	t->context.pc = t->nt.entrypoint;
1838 
1839 	// If the thread would be better than lowestPriority, reset to its initial.  Yes, kinda odd...
1840 	if (t->nt.currentPriority < lowestPriority)
1841 		t->nt.currentPriority = t->nt.initialPriority;
1842 
1843 	t->nt.waitType = WAITTYPE_NONE;
1844 	t->nt.waitID = 0;
1845 	memset(&t->waitInfo, 0, sizeof(t->waitInfo));
1846 
1847 	t->nt.exitStatus = SCE_KERNEL_ERROR_NOT_DORMANT;
1848 	t->isProcessingCallbacks = false;
1849 	t->currentCallbackId = 0;
1850 	t->currentMipscallId = 0;
1851 	t->pendingMipsCalls.clear();
1852 
1853 	// This will be overwritten when starting the thread, but let's point it somewhere useful.
1854 	t->context.r[MIPS_REG_RA] = threadReturnHackAddr;
1855 	// TODO: Not sure if it's reset here, but this makes sense.
1856 	t->context.r[MIPS_REG_GP] = t->nt.gpreg;
1857 	t->FillStack();
1858 
1859 	if (!t->waitingThreads.empty())
1860 		ERROR_LOG_REPORT(SCEKERNEL, "Resetting thread with threads waiting on end?");
1861 }
1862 
__KernelCreateThread(SceUID & id,SceUID moduleId,const char * name,u32 entryPoint,u32 priority,int stacksize,u32 attr)1863 PSPThread *__KernelCreateThread(SceUID &id, SceUID moduleId, const char *name, u32 entryPoint, u32 priority, int stacksize, u32 attr) {
1864 	std::lock_guard<std::mutex> guard(threadqueueLock);
1865 
1866 	PSPThread *t = new PSPThread();
1867 	id = kernelObjects.Create(t);
1868 
1869 	threadqueue.push_back(id);
1870 	threadReadyQueue.prepare(priority);
1871 
1872 	memset(&t->nt, 0xCD, sizeof(t->nt));
1873 
1874 	t->nt.entrypoint = entryPoint;
1875 	t->nt.nativeSize = sizeof(t->nt);
1876 	t->nt.attr = attr;
1877 	// TODO: I have no idea what this value is but the PSP firmware seems to add it on create.
1878 	t->nt.attr |= 0xFF;
1879 	t->nt.initialPriority = t->nt.currentPriority = priority;
1880 	t->nt.stackSize = stacksize;
1881 	t->nt.status = THREADSTATUS_DORMANT;
1882 
1883 	t->nt.numInterruptPreempts = 0;
1884 	t->nt.numReleases = 0;
1885 	t->nt.numThreadPreempts = 0;
1886 	t->nt.runForClocks.lo = 0;
1887 	t->nt.runForClocks.hi = 0;
1888 	t->nt.wakeupCount = 0;
1889 	t->nt.initialStack = 0;
1890 	t->nt.waitID = 0;
1891 	t->nt.exitStatus = SCE_KERNEL_ERROR_DORMANT;
1892 	t->nt.waitType = WAITTYPE_NONE;
1893 
1894 	if (moduleId)
1895 		t->nt.gpreg = __KernelGetModuleGP(moduleId);
1896 	else
1897 		t->nt.gpreg = 0;  // sceKernelStartThread will take care of this.
1898 	t->moduleId = moduleId;
1899 
1900 	strncpy(t->nt.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
1901 	t->nt.name[KERNELOBJECT_MAX_NAME_LENGTH] = '\0';
1902 
1903 	u32 stackSize = t->nt.stackSize;
1904 	t->AllocateStack(stackSize);  // can change the stacksize!
1905 	t->nt.stackSize = stackSize;
1906 	return t;
1907 }
1908 
__KernelSetupRootThread(SceUID moduleID,int args,const char * argp,int prio,int stacksize,int attr)1909 SceUID __KernelSetupRootThread(SceUID moduleID, int args, const char *argp, int prio, int stacksize, int attr)
1910 {
1911 	//grab mips regs
1912 	SceUID id;
1913 	PSPThread *thread = __KernelCreateThread(id, moduleID, "root", currentMIPS->pc, prio, stacksize, attr);
1914 	if (thread->currentStack.start == 0)
1915 		ERROR_LOG_REPORT(SCEKERNEL, "Unable to allocate stack for root thread.");
1916 	__KernelResetThread(thread, 0);
1917 
1918 	PSPThread *prevThread = __GetCurrentThread();
1919 	if (prevThread && prevThread->isRunning())
1920 		__KernelChangeReadyState(currentThread, true);
1921 	__SetCurrentThread(thread, id, "root");
1922 	thread->nt.status = THREADSTATUS_RUNNING; // do not schedule
1923 
1924 	strcpy(thread->nt.name, "root");
1925 
1926 	if (!Memory::IsValidAddress(thread->context.pc)) {
1927 		Core_ExecException(thread->context.pc, currentMIPS->pc, ExecExceptionType::THREAD);
1928 	}
1929 
1930 	__KernelLoadContext(&thread->context, (attr & PSP_THREAD_ATTR_VFPU) != 0);
1931 	currentMIPS->r[MIPS_REG_A0] = args;
1932 	currentMIPS->r[MIPS_REG_SP] -= (args + 0xf) & ~0xf;
1933 	u32 location = currentMIPS->r[MIPS_REG_SP];
1934 	currentMIPS->r[MIPS_REG_A1] = location;
1935 	if (argp)
1936 		Memory::Memcpy(location, argp, args, "ThreadParam");
1937 	// Let's assume same as starting a new thread, 64 bytes for safety/kernel.
1938 	currentMIPS->r[MIPS_REG_SP] -= 64;
1939 
1940 	return id;
1941 }
1942 
__KernelCreateThreadInternal(const char * threadName,SceUID moduleID,u32 entry,u32 prio,int stacksize,u32 attr)1943 SceUID __KernelCreateThreadInternal(const char *threadName, SceUID moduleID, u32 entry, u32 prio, int stacksize, u32 attr)
1944 {
1945 	SceUID id;
1946 	PSPThread *newThread = __KernelCreateThread(id, moduleID, threadName, entry, prio, stacksize, attr);
1947 	if (newThread->currentStack.start == 0)
1948 		return SCE_KERNEL_ERROR_NO_MEMORY;
1949 
1950 	return id;
1951 }
1952 
__KernelCreateThread(const char * threadName,SceUID moduleID,u32 entry,u32 prio,int stacksize,u32 attr,u32 optionAddr,bool allowKernel)1953 int __KernelCreateThread(const char *threadName, SceUID moduleID, u32 entry, u32 prio, int stacksize, u32 attr, u32 optionAddr, bool allowKernel) {
1954 	if (threadName == nullptr)
1955 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ERROR, "NULL thread name");
1956 
1957 	if ((u32)stacksize < 0x200)
1958 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_STACK_SIZE, "bogus thread stack size %08x", stacksize);
1959 	if (prio < 0x08 || prio > 0x77) {
1960 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateThread(name=%s): bogus priority %08x", threadName, prio);
1961 		// TODO: Should return this error.
1962 		// return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY;
1963 		prio = prio < 0x08 ? 0x08 : 0x77;
1964 	}
1965 	if (!Memory::IsValidAddress(entry)) {
1966 		// The PSP firmware seems to allow NULL...?
1967 		if (entry != 0)
1968 			return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ADDR, "invalid thread entry %08x", entry);
1969 	}
1970 	if ((attr & ~PSP_THREAD_ATTR_USER_MASK) != 0 && !allowKernel)
1971 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ATTR, "illegal thread attributes %08x", attr);
1972 
1973 	if ((attr & ~PSP_THREAD_ATTR_SUPPORTED) != 0)
1974 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateThread(name=%s): unsupported attributes %08x", threadName, attr);
1975 
1976 	// TODO: Not sure what these values are, but they are removed from the attr silently.
1977 	// Some are USB/VSH specific, probably removes when they are from the wrong module?
1978 	attr &= ~PSP_THREAD_ATTR_USER_ERASE;
1979 
1980 	if ((attr & PSP_THREAD_ATTR_KERNEL) == 0) {
1981 		if (allowKernel && (attr & PSP_THREAD_ATTR_USER) == 0) {
1982 			attr |= PSP_THREAD_ATTR_KERNEL;
1983 		} else {
1984 			attr |= PSP_THREAD_ATTR_USER;
1985 		}
1986 	}
1987 
1988 	SceUID id = __KernelCreateThreadInternal(threadName, moduleID, entry, prio, stacksize, attr);
1989 	if ((u32)id == SCE_KERNEL_ERROR_NO_MEMORY)
1990 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_NO_MEMORY, "out of memory, %08x stack requested", stacksize);
1991 
1992 	if (optionAddr != 0)
1993 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateThread(name=%s): unsupported options parameter %08x", threadName, optionAddr);
1994 
1995 	// Creating a thread resumes dispatch automatically.  Probably can't create without it.
1996 	dispatchEnabled = true;
1997 
1998 	hleEatCycles(32000);
1999 	// This won't schedule to the new thread, but it may to one woken from eating cycles.
2000 	// Technically, this should not eat all at once, and reschedule in the middle, but that's hard.
2001 	hleReSchedule("thread created");
2002 
2003 	// Before triggering, set v0, since we restore on return.
2004 	RETURN(id);
2005 	__KernelThreadTriggerEvent((attr & PSP_THREAD_ATTR_KERNEL) != 0, id, THREADEVENT_CREATE);
2006 	return hleLogSuccessInfoI(SCEKERNEL, id);
2007 }
2008 
sceKernelCreateThread(const char * threadName,u32 entry,u32 prio,int stacksize,u32 attr,u32 optionAddr)2009 int sceKernelCreateThread(const char *threadName, u32 entry, u32 prio, int stacksize, u32 attr, u32 optionAddr) {
2010 	PSPThread *cur = __GetCurrentThread();
2011 	SceUID module = __KernelGetCurThreadModuleId();
2012 	bool allowKernel = KernelModuleIsKernelMode(module) || hleIsKernelMode() || (cur ? (cur->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0 : false);
2013 	return __KernelCreateThread(threadName, module, entry, prio, stacksize, attr, optionAddr, allowKernel);
2014 }
2015 
__KernelStartThread(SceUID threadToStartID,int argSize,u32 argBlockPtr,bool forceArgs)2016 int __KernelStartThread(SceUID threadToStartID, int argSize, u32 argBlockPtr, bool forceArgs) {
2017 	u32 error;
2018 	PSPThread *startThread = kernelObjects.Get<PSPThread>(threadToStartID, error);
2019 	if (startThread == 0)
2020 		return error;
2021 
2022 	PSPThread *cur = __GetCurrentThread();
2023 	__KernelResetThread(startThread, cur ? (s32)cur->nt.currentPriority : 0);
2024 
2025 	u32 &sp = startThread->context.r[MIPS_REG_SP];
2026 	// Force args means just use those as a0/a1 without any special treatment.
2027 	// This is a hack to avoid allocating memory for helper threads which take args.
2028 	if (forceArgs) {
2029 		startThread->context.r[MIPS_REG_A0] = argSize;
2030 		startThread->context.r[MIPS_REG_A1] = argBlockPtr;
2031 	} else if (argBlockPtr && argSize > 0) {
2032 		// Make room for the arguments, always 0x10 aligned.
2033 		sp -= (argSize + 0xf) & ~0xf;
2034 		startThread->context.r[MIPS_REG_A0] = argSize;
2035 		startThread->context.r[MIPS_REG_A1] = sp;
2036 
2037 		// Now copy argument to stack.
2038 		if (Memory::IsValidAddress(argBlockPtr)) {
2039 			Memory::Memcpy(sp, argBlockPtr, argSize, "ThreadStartArgs");
2040 		}
2041 	} else {
2042 		startThread->context.r[MIPS_REG_A0] = 0;
2043 		startThread->context.r[MIPS_REG_A1] = 0;
2044 	}
2045 
2046 	// On the PSP, there's an extra 64 bytes of stack eaten after the args.
2047 	// This could be stack overflow safety, or just stack eaten by the kernel entry func.
2048 	sp -= 64;
2049 
2050 	// At the bottom of those 64 bytes, the return syscall and ra is written.
2051 	// Test Drive Unlimited actually depends on it being in the correct place.
2052 	WriteSyscall("FakeSysCalls", NID_THREADRETURN, sp);
2053 	Memory::Write_U32(MIPS_MAKE_B(-1), sp + 8);
2054 	Memory::Write_U32(MIPS_MAKE_NOP(), sp + 12);
2055 
2056 	// Point ra at our return stub, and start fp off matching sp.
2057 	startThread->context.r[MIPS_REG_RA] = sp;
2058 	startThread->context.r[MIPS_REG_FP] = sp;
2059 
2060 	// Smaller is better for priority.  Only switch if the new thread is better.
2061 	if (cur && cur->nt.currentPriority > startThread->nt.currentPriority) {
2062 		if (!Memory::IsValidAddress(startThread->context.pc)) {
2063 			Core_ExecException(startThread->context.pc, currentMIPS->pc, ExecExceptionType::THREAD);
2064 		}
2065 		__KernelChangeReadyState(cur, currentThread, true);
2066 		if (__InterruptsEnabled())
2067 			hleReSchedule("thread started");
2068 	}
2069 
2070 	// Starting a thread automatically resumes the dispatch thread if the new thread has worse priority.
2071 	// Seems strange but also seems reproducible.
2072 	if (cur && cur->nt.currentPriority <= startThread->nt.currentPriority) {
2073 		dispatchEnabled = true;
2074 	}
2075 
2076 	__KernelChangeReadyState(startThread, threadToStartID, true);
2077 
2078 	// Need to write out v0 before triggering event.
2079 	// TODO: Technically the wrong place.  This should trigger when the thread actually starts (e.g. if suspended.)
2080 	RETURN(0);
2081 	__KernelThreadTriggerEvent((startThread->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, threadToStartID, THREADEVENT_START);
2082 	return 0;
2083 }
2084 
__KernelStartThreadValidate(SceUID threadToStartID,int argSize,u32 argBlockPtr,bool forceArgs)2085 int __KernelStartThreadValidate(SceUID threadToStartID, int argSize, u32 argBlockPtr, bool forceArgs) {
2086 	if (threadToStartID == 0)
2087 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_THID, "thread id is 0");
2088 	if (argSize < 0 || argBlockPtr & 0x80000000)
2089 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ADDR, "bad thread argument pointer/length %08x / %08x", argSize, argBlockPtr);
2090 
2091 	u32 error = 0;
2092 	PSPThread *startThread = kernelObjects.Get<PSPThread>(threadToStartID, error);
2093 	if (startThread == 0)
2094 		return hleLogError(SCEKERNEL, error, "thread does not exist");
2095 
2096 	if (startThread->nt.status != THREADSTATUS_DORMANT)
2097 		return hleLogWarning(SCEKERNEL, SCE_KERNEL_ERROR_NOT_DORMANT, "thread already running");
2098 
2099 	hleEatCycles(3400);
2100 	return __KernelStartThread(threadToStartID, argSize, argBlockPtr, forceArgs);
2101 }
2102 
2103 // int sceKernelStartThread(SceUID threadToStartID, SceSize argSize, void *argBlock)
sceKernelStartThread(SceUID threadToStartID,int argSize,u32 argBlockPtr)2104 int sceKernelStartThread(SceUID threadToStartID, int argSize, u32 argBlockPtr) {
2105 	return hleLogSuccessInfoI(SCEKERNEL, __KernelStartThreadValidate(threadToStartID, argSize, argBlockPtr));
2106 }
2107 
sceKernelGetThreadStackFreeSize(SceUID threadID)2108 int sceKernelGetThreadStackFreeSize(SceUID threadID)
2109 {
2110 	DEBUG_LOG(SCEKERNEL, "sceKernelGetThreadStackFreeSize(%i)", threadID);
2111 
2112 	if (threadID == 0)
2113 		threadID = __KernelGetCurThread();
2114 
2115 	u32 error;
2116 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
2117 	if (thread == nullptr) {
2118 		ERROR_LOG(SCEKERNEL, "sceKernelGetThreadStackFreeSize: invalid thread id %i", threadID);
2119 		return error;
2120 	}
2121 
2122 	// Scan the stack for 0xFF, starting after 0x10 (the thread id is written there.)
2123 	// Obviously this doesn't work great if PSP_THREAD_ATTR_NO_FILLSTACK is used.
2124 	int sz = 0;
2125 	for (u32 offset = 0x10; offset < thread->nt.stackSize; ++offset)
2126 	{
2127 		if (Memory::Read_U8(thread->currentStack.start + offset) != 0xFF)
2128 			break;
2129 		sz++;
2130 	}
2131 
2132 	return sz & ~3;
2133 }
2134 
__KernelReturnFromThread()2135 void __KernelReturnFromThread()
2136 {
2137 	hleSkipDeadbeef();
2138 
2139 	int exitStatus = currentMIPS->r[MIPS_REG_V0];
2140 	PSPThread *thread = __GetCurrentThread();
2141 	_dbg_assert_msg_(thread != NULL, "Returned from a NULL thread.");
2142 
2143 	DEBUG_LOG(SCEKERNEL, "__KernelReturnFromThread: %d", exitStatus);
2144 	__KernelStopThread(currentThread, exitStatus, "thread returned");
2145 
2146 	hleReSchedule("thread returned");
2147 
2148 	// TODO: This should trigger ON the thread when it exits.
2149 	__KernelThreadTriggerEvent((thread->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, thread->GetUID(), THREADEVENT_EXIT);
2150 
2151 	// The stack will be deallocated when the thread is deleted.
2152 }
2153 
sceKernelExitThread(int exitStatus)2154 int sceKernelExitThread(int exitStatus) {
2155 	if (!__KernelIsDispatchEnabled() && sceKernelGetCompiledSdkVersion() >= 0x03080000)
2156 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT);
2157 	PSPThread *thread = __GetCurrentThread();
2158 	_dbg_assert_msg_(thread != NULL, "Exited from a NULL thread.");
2159 
2160 	INFO_LOG(SCEKERNEL, "sceKernelExitThread(%d)", exitStatus);
2161 	if (exitStatus < 0) {
2162 		exitStatus = SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
2163 	}
2164 	__KernelStopThread(currentThread, exitStatus, "thread exited");
2165 
2166 	hleReSchedule("thread exited");
2167 
2168 	// TODO: This should trigger ON the thread when it exits.
2169 	__KernelThreadTriggerEvent((thread->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, thread->GetUID(), THREADEVENT_EXIT);
2170 
2171 	// The stack will be deallocated when the thread is deleted.
2172 	return 0;
2173 }
2174 
_sceKernelExitThread(int exitStatus)2175 void _sceKernelExitThread(int exitStatus) {
2176 	PSPThread *thread = __GetCurrentThread();
2177 	_dbg_assert_msg_(thread != NULL, "_Exited from a NULL thread.");
2178 
2179 	ERROR_LOG_REPORT(SCEKERNEL, "_sceKernelExitThread(%d): should not be called directly", exitStatus);
2180 	__KernelStopThread(currentThread, exitStatus, "thread _exited");
2181 
2182 	hleReSchedule("thread _exited");
2183 
2184 	// TODO: This should trigger ON the thread when it exits.
2185 	__KernelThreadTriggerEvent((thread->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, thread->GetUID(), THREADEVENT_EXIT);
2186 
2187 	// The stack will be deallocated when the thread is deleted.
2188 }
2189 
sceKernelExitDeleteThread(int exitStatus)2190 int sceKernelExitDeleteThread(int exitStatus) {
2191 	if (!__KernelIsDispatchEnabled() && sceKernelGetCompiledSdkVersion() >= 0x03080000)
2192 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT);
2193 	PSPThread *thread = __GetCurrentThread();
2194 	if (thread)
2195 	{
2196 		INFO_LOG(SCEKERNEL,"sceKernelExitDeleteThread(%d)", exitStatus);
2197 		uint32_t thread_attr = thread->nt.attr;
2198 		uint32_t uid = thread->GetUID();
2199 		__KernelDeleteThread(currentThread, exitStatus, "thread exited with delete");
2200 		// Temporary hack since we don't reschedule within callbacks.
2201 		g_inCbCount = 0;
2202 
2203 		hleReSchedule("thread exited with delete");
2204 
2205 		// TODO: This should trigger ON the thread when it exits.
2206 		__KernelThreadTriggerEvent((thread_attr & PSP_THREAD_ATTR_KERNEL) != 0, uid, THREADEVENT_EXIT);
2207 	}
2208 	else
2209 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelExitDeleteThread(%d) ERROR - could not find myself!", exitStatus);
2210 	return 0;
2211 }
2212 
sceKernelSuspendDispatchThread()2213 u32 sceKernelSuspendDispatchThread()
2214 {
2215 	if (!__InterruptsEnabled())
2216 	{
2217 		DEBUG_LOG(SCEKERNEL, "sceKernelSuspendDispatchThread(): interrupts disabled");
2218 		return SCE_KERNEL_ERROR_CPUDI;
2219 	}
2220 
2221 	u32 oldDispatchEnabled = dispatchEnabled;
2222 	dispatchEnabled = false;
2223 	DEBUG_LOG(SCEKERNEL, "%i=sceKernelSuspendDispatchThread()", oldDispatchEnabled);
2224 	hleEatCycles(940);
2225 	return oldDispatchEnabled;
2226 }
2227 
sceKernelResumeDispatchThread(u32 enabled)2228 u32 sceKernelResumeDispatchThread(u32 enabled)
2229 {
2230 	if (!__InterruptsEnabled())
2231 	{
2232 		DEBUG_LOG(SCEKERNEL, "sceKernelResumeDispatchThread(%i): interrupts disabled", enabled);
2233 		return SCE_KERNEL_ERROR_CPUDI;
2234 	}
2235 
2236 	u32 oldDispatchEnabled = dispatchEnabled;
2237 	dispatchEnabled = enabled != 0;
2238 	DEBUG_LOG(SCEKERNEL, "sceKernelResumeDispatchThread(%i) - from %i", enabled, oldDispatchEnabled);
2239 	hleReSchedule("dispatch resumed");
2240 	hleEatCycles(940);
2241 	return 0;
2242 }
2243 
__KernelIsDispatchEnabled()2244 bool __KernelIsDispatchEnabled()
2245 {
2246 	// Dispatch can never be enabled when interrupts are disabled.
2247 	return dispatchEnabled && __InterruptsEnabled();
2248 }
2249 
KernelRotateThreadReadyQueue(int priority)2250 int KernelRotateThreadReadyQueue(int priority) {
2251 	PSPThread *cur = __GetCurrentThread();
2252 
2253 	// 0 is special, it means "my current priority."
2254 	if (priority == 0)
2255 		priority = cur->nt.currentPriority;
2256 
2257 	if (priority <= 0x07 || priority > 0x77)
2258 		return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY;
2259 
2260 	if (!threadReadyQueue.empty(priority)) {
2261 		// In other words, yield to everyone else.
2262 		if (cur->nt.currentPriority == priority) {
2263 			threadReadyQueue.push_back(priority, currentThread);
2264 			cur->nt.status = (cur->nt.status & ~THREADSTATUS_RUNNING) | THREADSTATUS_READY;
2265 		}
2266 		// Yield the next thread of this priority to all other threads of same priority.
2267 		else
2268 			threadReadyQueue.rotate(priority);
2269 	}
2270 
2271 	return 0;
2272 }
2273 
sceKernelRotateThreadReadyQueue(int priority)2274 int sceKernelRotateThreadReadyQueue(int priority) {
2275 	int result = KernelRotateThreadReadyQueue(priority);
2276 	if (result == 0) {
2277 		hleReSchedule("rotatethreadreadyqueue");
2278 		hleEatCycles(250);
2279 	}
2280 	return hleLogSuccessVerboseI(SCEKERNEL, result);
2281 }
2282 
sceKernelDeleteThread(int threadID)2283 int sceKernelDeleteThread(int threadID) {
2284 	if (threadID == 0 || threadID == currentThread) {
2285 		ERROR_LOG(SCEKERNEL, "sceKernelDeleteThread(%i): cannot delete current thread", threadID);
2286 		return SCE_KERNEL_ERROR_NOT_DORMANT;
2287 	}
2288 
2289 	u32 error;
2290 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2291 	if (t) {
2292 		if (!t->isStopped()) {
2293 			ERROR_LOG(SCEKERNEL, "sceKernelDeleteThread(%i): thread not dormant", threadID);
2294 			return SCE_KERNEL_ERROR_NOT_DORMANT;
2295 		}
2296 
2297 		DEBUG_LOG(SCEKERNEL, "sceKernelDeleteThread(%i)", threadID);
2298 		return __KernelDeleteThread(threadID, SCE_KERNEL_ERROR_THREAD_TERMINATED, "thread deleted");
2299 	} else {
2300 		ERROR_LOG(SCEKERNEL, "sceKernelDeleteThread(%i): thread doesn't exist", threadID);
2301 		return error;
2302 	}
2303 }
2304 
sceKernelTerminateDeleteThread(int threadID)2305 int sceKernelTerminateDeleteThread(int threadID)
2306 {
2307 	if (threadID == 0 || threadID == currentThread)
2308 	{
2309 		ERROR_LOG(SCEKERNEL, "sceKernelTerminateDeleteThread(%i): cannot terminate current thread", threadID);
2310 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2311 	}
2312 	if (!__KernelIsDispatchEnabled() && sceKernelGetCompiledSdkVersion() >= 0x03080000)
2313 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT);
2314 
2315 	u32 error;
2316 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2317 	if (t)
2318 	{
2319 		bool wasStopped = t->isStopped();
2320 		uint32_t attr = t->nt.attr;
2321 		uint32_t uid = t->GetUID();
2322 
2323 		INFO_LOG(SCEKERNEL, "sceKernelTerminateDeleteThread(%i)", threadID);
2324 		error = __KernelDeleteThread(threadID, SCE_KERNEL_ERROR_THREAD_TERMINATED, "thread terminated with delete");
2325 
2326 		if (!wasStopped) {
2327 			// Set v0 before calling the handler, or it'll get lost.
2328 			RETURN(error);
2329 			__KernelThreadTriggerEvent((attr & PSP_THREAD_ATTR_KERNEL) != 0, uid, THREADEVENT_EXIT);
2330 		}
2331 
2332 		return error;
2333 	}
2334 	else
2335 	{
2336 		ERROR_LOG(SCEKERNEL, "sceKernelTerminateDeleteThread(%i): thread doesn't exist", threadID);
2337 		return error;
2338 	}
2339 }
2340 
sceKernelTerminateThread(SceUID threadID)2341 int sceKernelTerminateThread(SceUID threadID) {
2342 	if (__IsInInterrupt() && sceKernelGetCompiledSdkVersion() >= 0x03080000) {
2343 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_CONTEXT, "in interrupt");
2344 	}
2345 	if (!__KernelIsDispatchEnabled() && sceKernelGetCompiledSdkVersion() >= 0x03080000)
2346 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT);
2347 	if (threadID == 0 || threadID == currentThread) {
2348 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_THID, "cannot terminate current thread");
2349 	}
2350 
2351 	u32 error;
2352 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2353 	if (t) {
2354 		if (t->isStopped()) {
2355 			return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_DORMANT, "already stopped");
2356 		}
2357 
2358 		// TODO: Should this reschedule?  Seems like not.
2359 		__KernelStopThread(threadID, SCE_KERNEL_ERROR_THREAD_TERMINATED, "thread terminated");
2360 
2361 		// On terminate, we reset the thread priority.  On exit, we don't always (see __KernelResetThread.)
2362 		t->nt.currentPriority = t->nt.initialPriority;
2363 
2364 		// Need to set v0 since it'll be restored.
2365 		RETURN(0);
2366 		__KernelThreadTriggerEvent((t->nt.attr & PSP_THREAD_ATTR_KERNEL) != 0, t->GetUID(), THREADEVENT_EXIT);
2367 
2368 		return hleLogSuccessInfoI(SCEKERNEL, 0);
2369 	} else {
2370 		return hleLogError(SCEKERNEL, error, "thread doesn't exist");
2371 	}
2372 }
2373 
__KernelGetCurThread()2374 SceUID __KernelGetCurThread()
2375 {
2376 	return currentThread;
2377 }
2378 
KernelCurThreadPriority()2379 int KernelCurThreadPriority() {
2380 	PSPThread *t = __GetCurrentThread();
2381 	if (t)
2382 		return t->nt.currentPriority;
2383 	return 0;
2384 }
2385 
__KernelGetCurThreadModuleId()2386 SceUID __KernelGetCurThreadModuleId() {
2387 	PSPThread *t = __GetCurrentThread();
2388 	if (t)
2389 		return t->moduleId;
2390 	return 0;
2391 }
2392 
__KernelGetCurThreadStack()2393 u32 __KernelGetCurThreadStack() {
2394 	PSPThread *t = __GetCurrentThread();
2395 	if (t)
2396 		return t->currentStack.end;
2397 	return 0;
2398 }
2399 
__KernelGetCurThreadStackStart()2400 u32 __KernelGetCurThreadStackStart() {
2401 	PSPThread *t = __GetCurrentThread();
2402 	if (t)
2403 		return t->currentStack.start;
2404 	return 0;
2405 }
2406 
sceKernelGetThreadId()2407 SceUID sceKernelGetThreadId()
2408 {
2409 	VERBOSE_LOG(SCEKERNEL, "%i = sceKernelGetThreadId()", currentThread);
2410 	hleEatCycles(180);
2411 	return currentThread;
2412 }
2413 
sceKernelGetThreadCurrentPriority()2414 int sceKernelGetThreadCurrentPriority() {
2415 	u32 retVal = __GetCurrentThread()->nt.currentPriority;
2416 	return hleLogSuccessI(SCEKERNEL, retVal);
2417 }
2418 
sceKernelChangeCurrentThreadAttr(u32 clearAttr,u32 setAttr)2419 int sceKernelChangeCurrentThreadAttr(u32 clearAttr, u32 setAttr) {
2420 	// Seems like this is the only allowed attribute?
2421 	if ((clearAttr & ~PSP_THREAD_ATTR_VFPU) != 0 || (setAttr & ~PSP_THREAD_ATTR_VFPU) != 0) {
2422 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ATTR, "invalid attr");
2423 	}
2424 
2425 	PSPThread *t = __GetCurrentThread();
2426 	if (!t)
2427 		return hleReportError(SCEKERNEL, -1, "no current thread");
2428 
2429 	t->nt.attr = (t->nt.attr & ~clearAttr) | setAttr;
2430 	return hleLogSuccessI(SCEKERNEL, 0);
2431 }
2432 
2433 // Assumes validated parameters.
KernelChangeThreadPriority(SceUID threadID,int priority)2434 bool KernelChangeThreadPriority(SceUID threadID, int priority) {
2435 	u32 error;
2436 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
2437 	if (thread) {
2438 		int old = thread->nt.currentPriority;
2439 		threadReadyQueue.remove(old, threadID);
2440 
2441 		thread->nt.currentPriority = priority;
2442 		threadReadyQueue.prepare(thread->nt.currentPriority);
2443 		if (thread->isRunning()) {
2444 			thread->nt.status = (thread->nt.status & ~THREADSTATUS_RUNNING) | THREADSTATUS_READY;
2445 		}
2446 		if (thread->isReady()) {
2447 			threadReadyQueue.push_back(thread->nt.currentPriority, threadID);
2448 		}
2449 		return true;
2450 	} else {
2451 		return false;
2452 	}
2453 }
2454 
sceKernelChangeThreadPriority(SceUID threadID,int priority)2455 int sceKernelChangeThreadPriority(SceUID threadID, int priority) {
2456 	if (threadID == 0) {
2457 		threadID = __KernelGetCurThread();
2458 	}
2459 
2460 	// 0 means the current (running) thread's priority, not target's.
2461 	if (priority == 0) {
2462 		PSPThread *cur = __GetCurrentThread();
2463 		if (!cur) {
2464 			ERROR_LOG_REPORT(SCEKERNEL, "sceKernelChangeThreadPriority(%i, %i): no current thread?", threadID, priority);
2465 		} else {
2466 			priority = cur->nt.currentPriority;
2467 		}
2468 	}
2469 
2470 	u32 error;
2471 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
2472 	if (thread) {
2473 		if (thread->isStopped()) {
2474 			return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_DORMANT, "thread is dormant");
2475 		}
2476 
2477 		if (priority < 0x08 || priority > 0x77) {
2478 			return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_PRIORITY, "bogus priority");
2479 		}
2480 
2481 		KernelChangeThreadPriority(threadID, priority);
2482 
2483 		hleEatCycles(450);
2484 		hleReSchedule("change thread priority");
2485 
2486 		return hleLogSuccessI(SCEKERNEL, 0);
2487 	} else {
2488 		return hleLogError(SCEKERNEL, error, "thread not found");
2489 	}
2490 }
2491 
__KernelDelayThreadUs(u64 usec)2492 static s64 __KernelDelayThreadUs(u64 usec) {
2493 	if (usec < 200) {
2494 		return 210;
2495 	}
2496 
2497 	if (usec > 0x8000000000000000ULL) {
2498 		// Wrap around (behavior seen on firmware) and potentially wake up soon.
2499 		usec -= 0x8000000000000000ULL;
2500 	}
2501 	if (usec > 0x0010000000000000ULL) {
2502 		// This will probably overflow when we convert to cycles.
2503 		// Note: converting millenia to hundreds of years.  Should be safe, basically perma-delay.
2504 		usec >>= 12;
2505 	}
2506 
2507 	// It never wakes up right away.  It usually takes at least 15 extra us, but let's be nicer.
2508 	return usec + 10;
2509 }
2510 
sceKernelDelayThreadCB(u32 usec)2511 int sceKernelDelayThreadCB(u32 usec) {
2512 	hleEatCycles(2000);
2513 	// Note: Sometimes (0) won't delay, potentially based on how much the thread is doing.
2514 	// But a loop with just 0 often does delay, and games depend on this.  So we err on that side.
2515 	SceUID curThread = __KernelGetCurThread();
2516 	s64 delayUs = __KernelDelayThreadUs(usec);
2517 	__KernelScheduleWakeup(curThread, delayUs);
2518 	__KernelWaitCurThread(WAITTYPE_DELAY, curThread, 0, 0, true, "thread delayed");
2519 	return hleLogSuccessI(SCEKERNEL, 0, "delaying %lld usecs", delayUs);
2520 }
2521 
sceKernelDelayThread(u32 usec)2522 int sceKernelDelayThread(u32 usec) {
2523 	hleEatCycles(2000);
2524 	// Note: Sometimes (0) won't delay, potentially based on how much the thread is doing.
2525 	// But a loop with just 0 often does delay, and games depend on this.  So we err on that side.
2526 	SceUID curThread = __KernelGetCurThread();
2527 	s64 delayUs = __KernelDelayThreadUs(usec);
2528 	__KernelScheduleWakeup(curThread, delayUs);
2529 	__KernelWaitCurThread(WAITTYPE_DELAY, curThread, 0, 0, false, "thread delayed");
2530 	return hleLogSuccessI(SCEKERNEL, 0, "delaying %lld usecs", delayUs);
2531 }
2532 
sceKernelDelaySysClockThreadCB(u32 sysclockAddr)2533 int sceKernelDelaySysClockThreadCB(u32 sysclockAddr) {
2534 	auto sysclock = PSPPointer<SceKernelSysClock>::Create(sysclockAddr);
2535 	if (!sysclock.IsValid()) {
2536 		// Note: crashes on real firmware.
2537 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ADDRESS, "bad pointer");
2538 	}
2539 
2540 	// This is just a u64 of usecs.  All bits are respected, but overflow can happen for very large values.
2541 	u64 usec = sysclock->lo | ((u64)sysclock->hi << 32);
2542 
2543 	SceUID curThread = __KernelGetCurThread();
2544 	s64 delayUs = __KernelDelayThreadUs(usec);
2545 	__KernelScheduleWakeup(curThread, delayUs);
2546 	__KernelWaitCurThread(WAITTYPE_DELAY, curThread, 0, 0, true, "thread delayed");
2547 	return hleLogSuccessI(SCEKERNEL, 0, "delaying %lld usecs", delayUs);
2548 }
2549 
sceKernelDelaySysClockThread(u32 sysclockAddr)2550 int sceKernelDelaySysClockThread(u32 sysclockAddr) {
2551 	auto sysclock = PSPPointer<SceKernelSysClock>::Create(sysclockAddr);
2552 	if (!sysclock.IsValid()) {
2553 		// Note: crashes on real firmware.
2554 		return hleLogError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ADDRESS, "bad pointer");
2555 	}
2556 
2557 	// This is just a u64 of usecs.  All bits are respected, but overflow can happen for very large values.
2558 	u64 usec = sysclock->lo | ((u64)sysclock->hi << 32);
2559 
2560 	SceUID curThread = __KernelGetCurThread();
2561 	s64 delayUs = __KernelDelayThreadUs(usec);
2562 	__KernelScheduleWakeup(curThread, delayUs);
2563 	__KernelWaitCurThread(WAITTYPE_DELAY, curThread, 0, 0, false, "thread delayed");
2564 	return hleLogSuccessI(SCEKERNEL, 0, "delaying %lld usecs", delayUs);
2565 }
2566 
__KernelGetThreadPrio(SceUID id)2567 u32 __KernelGetThreadPrio(SceUID id) {
2568 	u32 error;
2569 	PSPThread *thread = kernelObjects.Get<PSPThread>(id, error);
2570 	if (thread)
2571 		return thread->nt.currentPriority;
2572 	return 0;
2573 }
2574 
__KernelThreadSortPriority(SceUID thread1,SceUID thread2)2575 bool __KernelThreadSortPriority(SceUID thread1, SceUID thread2)
2576 {
2577 	return __KernelGetThreadPrio(thread1) < __KernelGetThreadPrio(thread2);
2578 }
2579 
2580 //////////////////////////////////////////////////////////////////////////
2581 // WAIT/SLEEP ETC
2582 //////////////////////////////////////////////////////////////////////////
sceKernelWakeupThread(SceUID uid)2583 int sceKernelWakeupThread(SceUID uid) {
2584 	if (uid == currentThread) {
2585 		return hleLogWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_THID, "unable to wakeup current thread");
2586 	}
2587 
2588 	u32 error;
2589 	PSPThread *t = kernelObjects.Get<PSPThread>(uid, error);
2590 	if (t) {
2591 		if (!t->isWaitingFor(WAITTYPE_SLEEP, 0)) {
2592 			t->nt.wakeupCount++;
2593 			return hleLogSuccessI(SCEKERNEL, 0, "wakeupCount incremented to %i", t->nt.wakeupCount);
2594 		} else {
2595 			__KernelResumeThreadFromWait(uid, 0);
2596 			hleReSchedule("thread woken up");
2597 			return hleLogSuccessVerboseI(SCEKERNEL, 0, "woke thread at %i", t->nt.wakeupCount);
2598 		}
2599 	} else {
2600 		return hleLogError(SCEKERNEL, error, "bad thread id");
2601 	}
2602 }
2603 
sceKernelCancelWakeupThread(SceUID uid)2604 int sceKernelCancelWakeupThread(SceUID uid) {
2605 	if (uid == 0) {
2606 		uid = __KernelGetCurThread();
2607 	}
2608 
2609 	u32 error;
2610 	PSPThread *t = kernelObjects.Get<PSPThread>(uid, error);
2611 	if (t) {
2612 		int wCount = t->nt.wakeupCount;
2613 		t->nt.wakeupCount = 0;
2614 		return hleLogSuccessI(SCEKERNEL, wCount, "wakeupCount reset to 0");
2615 	} else {
2616 		return hleLogError(SCEKERNEL, error, "bad thread id");
2617 	}
2618 }
2619 
__KernelSleepThread(bool doCallbacks)2620 static int __KernelSleepThread(bool doCallbacks) {
2621 	PSPThread *thread = __GetCurrentThread();
2622 	if (!thread) {
2623 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelSleepThread*(): bad current thread");
2624 		return -1;
2625 	}
2626 
2627 	if (thread->nt.wakeupCount > 0) {
2628 		thread->nt.wakeupCount--;
2629 		return hleLogSuccessI(SCEKERNEL, 0, "wakeupCount decremented to %i", thread->nt.wakeupCount);
2630 	} else {
2631 		__KernelWaitCurThread(WAITTYPE_SLEEP, 0, 0, 0, doCallbacks, "thread slept");
2632 		return hleLogSuccessVerboseI(SCEKERNEL, 0, "sleeping");
2633 	}
2634 	return 0;
2635 }
2636 
sceKernelSleepThread()2637 int sceKernelSleepThread() {
2638 	return __KernelSleepThread(false);
2639 }
2640 
2641 //the homebrew PollCallbacks
sceKernelSleepThreadCB()2642 int sceKernelSleepThreadCB() {
2643 	return __KernelSleepThread(true);
2644 }
2645 
sceKernelWaitThreadEnd(SceUID threadID,u32 timeoutPtr)2646 int sceKernelWaitThreadEnd(SceUID threadID, u32 timeoutPtr)
2647 {
2648 	DEBUG_LOG(SCEKERNEL, "sceKernelWaitThreadEnd(%i, %08x)", threadID, timeoutPtr);
2649 	if (threadID == 0 || threadID == currentThread)
2650 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2651 
2652 	if (!__KernelIsDispatchEnabled())
2653 		return SCE_KERNEL_ERROR_CAN_NOT_WAIT;
2654 	if (__IsInInterrupt())
2655 		return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT;
2656 
2657 	u32 error;
2658 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2659 	if (t)
2660 	{
2661 		if (t->nt.status != THREADSTATUS_DORMANT)
2662 		{
2663 			if (Memory::IsValidAddress(timeoutPtr))
2664 				__KernelScheduleThreadEndTimeout(currentThread, threadID, Memory::Read_U32(timeoutPtr));
2665 			if (std::find(t->waitingThreads.begin(), t->waitingThreads.end(), currentThread) == t->waitingThreads.end())
2666 				t->waitingThreads.push_back(currentThread);
2667 			__KernelWaitCurThread(WAITTYPE_THREADEND, threadID, 0, timeoutPtr, false, "thread wait end");
2668 		}
2669 
2670 		return t->nt.exitStatus;
2671 	}
2672 	else
2673 	{
2674 		ERROR_LOG(SCEKERNEL, "sceKernelWaitThreadEnd - bad thread %i", threadID);
2675 		return error;
2676 	}
2677 }
2678 
sceKernelWaitThreadEndCB(SceUID threadID,u32 timeoutPtr)2679 int sceKernelWaitThreadEndCB(SceUID threadID, u32 timeoutPtr)
2680 {
2681 	DEBUG_LOG(SCEKERNEL, "sceKernelWaitThreadEndCB(%i, 0x%X)", threadID, timeoutPtr);
2682 	if (threadID == 0 || threadID == currentThread)
2683 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2684 
2685 	if (!__KernelIsDispatchEnabled())
2686 		return SCE_KERNEL_ERROR_CAN_NOT_WAIT;
2687 	if (__IsInInterrupt())
2688 		return SCE_KERNEL_ERROR_ILLEGAL_CONTEXT;
2689 
2690 	u32 error;
2691 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2692 	if (t)
2693 	{
2694 		if (t->nt.status != THREADSTATUS_DORMANT)
2695 		{
2696 			if (Memory::IsValidAddress(timeoutPtr))
2697 				__KernelScheduleThreadEndTimeout(currentThread, threadID, Memory::Read_U32(timeoutPtr));
2698 			if (std::find(t->waitingThreads.begin(), t->waitingThreads.end(), currentThread) == t->waitingThreads.end())
2699 				t->waitingThreads.push_back(currentThread);
2700 			__KernelWaitCurThread(WAITTYPE_THREADEND, threadID, 0, timeoutPtr, true, "thread wait end");
2701 		}
2702 		else
2703 			hleCheckCurrentCallbacks();
2704 
2705 		return t->nt.exitStatus;
2706 	}
2707 	else
2708 	{
2709 		ERROR_LOG(SCEKERNEL, "sceKernelWaitThreadEndCB - bad thread %i", threadID);
2710 		return error;
2711 	}
2712 }
2713 
sceKernelReleaseWaitThread(SceUID threadID)2714 int sceKernelReleaseWaitThread(SceUID threadID)
2715 {
2716 	DEBUG_LOG(SCEKERNEL, "sceKernelReleaseWaitThread(%i)", threadID);
2717 	if (__KernelInCallback())
2718 		WARN_LOG_REPORT(SCEKERNEL, "UNTESTED sceKernelReleaseWaitThread() might not do the right thing in a callback");
2719 
2720 	if (threadID == 0 || threadID == currentThread)
2721 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2722 
2723 	u32 error;
2724 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2725 	if (t)
2726 	{
2727 		if (!t->isWaiting())
2728 			return SCE_KERNEL_ERROR_NOT_WAIT;
2729 		if (t->nt.waitType == WAITTYPE_HLEDELAY)
2730 		{
2731 			WARN_LOG_REPORT_ONCE(rwt_delay, SCEKERNEL, "sceKernelReleaseWaitThread(): Refusing to wake HLE-delayed thread, right thing to do?");
2732 			return SCE_KERNEL_ERROR_NOT_WAIT;
2733 		}
2734 		if (t->nt.waitType == WAITTYPE_MODULE)
2735 		{
2736 			WARN_LOG_REPORT_ONCE(rwt_sm, SCEKERNEL, "sceKernelReleaseWaitThread(): Refusing to wake start_module thread, right thing to do?");
2737 			return SCE_KERNEL_ERROR_NOT_WAIT;
2738 		}
2739 
2740 		__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_RELEASE_WAIT);
2741 		hleReSchedule("thread released from wait");
2742 		return 0;
2743 	}
2744 	else
2745 	{
2746 		ERROR_LOG(SCEKERNEL, "sceKernelReleaseWaitThread - bad thread %i", threadID);
2747 		return error;
2748 	}
2749 }
2750 
sceKernelSuspendThread(SceUID threadID)2751 int sceKernelSuspendThread(SceUID threadID)
2752 {
2753 	// TODO: What about interrupts/callbacks?
2754 	if (threadID == 0 || threadID == currentThread)
2755 	{
2756 		ERROR_LOG(SCEKERNEL, "sceKernelSuspendThread(%d): cannot suspend current thread", threadID);
2757 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2758 	}
2759 
2760 	u32 error;
2761 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2762 	if (t)
2763 	{
2764 		if (t->isStopped())
2765 		{
2766 			ERROR_LOG(SCEKERNEL, "sceKernelSuspendThread(%d): thread not running", threadID);
2767 			return SCE_KERNEL_ERROR_DORMANT;
2768 		}
2769 		if (t->isSuspended())
2770 		{
2771 			ERROR_LOG(SCEKERNEL, "sceKernelSuspendThread(%d): thread already suspended", threadID);
2772 			return SCE_KERNEL_ERROR_SUSPEND;
2773 		}
2774 
2775 		DEBUG_LOG(SCEKERNEL, "sceKernelSuspendThread(%d)", threadID);
2776 		if (t->isReady())
2777 			__KernelChangeReadyState(t, threadID, false);
2778 		t->nt.status = (t->nt.status & ~THREADSTATUS_READY) | THREADSTATUS_SUSPEND;
2779 		return 0;
2780 	}
2781 	else
2782 	{
2783 		ERROR_LOG(SCEKERNEL, "sceKernelSuspendThread(%d): bad thread", threadID);
2784 		return error;
2785 	}
2786 }
2787 
sceKernelResumeThread(SceUID threadID)2788 int sceKernelResumeThread(SceUID threadID)
2789 {
2790 	// TODO: What about interrupts/callbacks?
2791 	if (threadID == 0 || threadID == currentThread)
2792 	{
2793 		ERROR_LOG(SCEKERNEL, "sceKernelResumeThread(%d): cannot suspend current thread", threadID);
2794 		return SCE_KERNEL_ERROR_ILLEGAL_THID;
2795 	}
2796 
2797 	u32 error;
2798 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
2799 	if (t)
2800 	{
2801 		if (!t->isSuspended())
2802 		{
2803 			ERROR_LOG(SCEKERNEL, "sceKernelResumeThread(%d): thread not suspended", threadID);
2804 			return SCE_KERNEL_ERROR_NOT_SUSPEND;
2805 		}
2806 		DEBUG_LOG(SCEKERNEL, "sceKernelResumeThread(%d)", threadID);
2807 		t->nt.status &= ~THREADSTATUS_SUSPEND;
2808 
2809 		// If it was dormant, waiting, etc. before we don't flip its ready state.
2810 		if (t->nt.status == 0)
2811 			__KernelChangeReadyState(t, threadID, true);
2812 		hleReSchedule("resume thread from suspend");
2813 		return 0;
2814 	}
2815 	else
2816 	{
2817 		ERROR_LOG(SCEKERNEL, "sceKernelResumeThread(%d): bad thread", threadID);
2818 		return error;
2819 	}
2820 }
2821 
2822 
2823 
2824 //////////////////////////////////////////////////////////////////////////
2825 // CALLBACKS
2826 //////////////////////////////////////////////////////////////////////////
2827 
sceKernelCreateCallback(const char * name,u32 entrypoint,u32 signalArg)2828 SceUID sceKernelCreateCallback(const char *name, u32 entrypoint, u32 signalArg)
2829 {
2830 	if (!name)
2831 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ERROR, "invalid name");
2832 	if (entrypoint & 0xF0000000)
2833 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ADDR, "invalid func");
2834 
2835 	PSPCallback *cb = new PSPCallback();
2836 	SceUID id = kernelObjects.Create(cb);
2837 
2838 	strncpy(cb->nc.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
2839 	cb->nc.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
2840 	cb->nc.size = sizeof(NativeCallback);
2841 	cb->nc.entrypoint = entrypoint;
2842 	cb->nc.threadId = __KernelGetCurThread();
2843 	cb->nc.commonArgument = signalArg;
2844 	cb->nc.notifyCount = 0;
2845 	cb->nc.notifyArg = 0;
2846 
2847 	PSPThread *thread = __GetCurrentThread();
2848 	if (thread)
2849 		thread->callbacks.push_back(id);
2850 
2851 	return hleLogSuccessI(SCEKERNEL, id);
2852 }
2853 
sceKernelDeleteCallback(SceUID cbId)2854 int sceKernelDeleteCallback(SceUID cbId)
2855 {
2856 	u32 error;
2857 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
2858 	if (cb)
2859 	{
2860 		PSPThread *thread = kernelObjects.Get<PSPThread>(cb->nc.threadId, error);
2861 		if (thread)
2862 			thread->callbacks.erase(std::remove(thread->callbacks.begin(), thread->callbacks.end(), cbId), thread->callbacks.end());
2863 		if (cb->nc.notifyCount != 0)
2864 			readyCallbacksCount--;
2865 
2866 		return hleLogSuccessI(SCEKERNEL, kernelObjects.Destroy<PSPCallback>(cbId));
2867 	} else {
2868 		return hleLogError(SCEKERNEL, error, "bad cbId");
2869 	}
2870 }
2871 
2872 // Generally very rarely used, but Numblast uses it like candy.
sceKernelNotifyCallback(SceUID cbId,int notifyArg)2873 int sceKernelNotifyCallback(SceUID cbId, int notifyArg)
2874 {
2875 	u32 error;
2876 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
2877 	if (cb) {
2878 		__KernelNotifyCallback(cbId, notifyArg);
2879 		return hleLogSuccessI(SCEKERNEL, 0);
2880 	} else {
2881 		return hleLogError(SCEKERNEL, error, "bad cbId");
2882 	}
2883 }
2884 
sceKernelCancelCallback(SceUID cbId)2885 int sceKernelCancelCallback(SceUID cbId)
2886 {
2887 	u32 error;
2888 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
2889 	if (cb) {
2890 		// This just resets the notify count.
2891 		cb->nc.notifyArg = 0;
2892 		return hleLogSuccessI(SCEKERNEL, 0);
2893 	} else {
2894 		return hleLogError(SCEKERNEL, error, "bad cbId");
2895 	}
2896 }
2897 
sceKernelGetCallbackCount(SceUID cbId)2898 int sceKernelGetCallbackCount(SceUID cbId)
2899 {
2900 	u32 error;
2901 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
2902 	if (cb) {
2903 		return hleLogSuccessVerboseI(SCEKERNEL, cb->nc.notifyCount);
2904 	} else {
2905 		return hleLogError(SCEKERNEL, error, "bad cbId");
2906 	}
2907 }
2908 
sceKernelReferCallbackStatus(SceUID cbId,u32 statusAddr)2909 int sceKernelReferCallbackStatus(SceUID cbId, u32 statusAddr)
2910 {
2911 	u32 error;
2912 	PSPCallback *c = kernelObjects.Get<PSPCallback>(cbId, error);
2913 	if (c) {
2914 		if (Memory::IsValidAddress(statusAddr) && Memory::Read_U32(statusAddr) != 0) {
2915 			Memory::WriteStruct(statusAddr, &c->nc);
2916 			return hleLogSuccessI(SCEKERNEL, 0);
2917 		} else {
2918 			return hleLogDebug(SCEKERNEL, 0, "struct size was 0");
2919 		}
2920 	} else {
2921 		return hleLogError(SCEKERNEL, error, "bad cbId");
2922 	}
2923 }
2924 
sceKernelExtendThreadStack(u32 size,u32 entryAddr,u32 entryParameter)2925 u32 sceKernelExtendThreadStack(u32 size, u32 entryAddr, u32 entryParameter)
2926 {
2927 	if (size < 512)
2928 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_STACK_SIZE, "xxx", "stack size too small");
2929 
2930 	PSPThread *thread = __GetCurrentThread();
2931 	if (!thread)
2932 		return hleReportError(SCEKERNEL, -1, "xxx", "not on a thread?");
2933 
2934 	if (!thread->PushExtendedStack(size))
2935 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_NO_MEMORY, "xxx", "could not allocate new stack");
2936 
2937 	// The stack has been changed now, so it's do or die time.
2938 
2939 	// Push the old SP, RA, and PC onto the stack (so we can restore them later.)
2940 	Memory::Write_U32(currentMIPS->r[MIPS_REG_RA], thread->currentStack.end - 4);
2941 	Memory::Write_U32(currentMIPS->r[MIPS_REG_SP], thread->currentStack.end - 8);
2942 	Memory::Write_U32(currentMIPS->pc, thread->currentStack.end - 12);
2943 
2944 	if (!Memory::IsValidAddress(entryAddr)) {
2945 		Core_ExecException(entryAddr, currentMIPS->pc, ExecExceptionType::THREAD);
2946 	}
2947 
2948 	currentMIPS->pc = entryAddr;
2949 	currentMIPS->r[MIPS_REG_A0] = entryParameter;
2950 	currentMIPS->r[MIPS_REG_RA] = extendReturnHackAddr;
2951 	// Stack should stay aligned even though we saved only 3 regs.
2952 	currentMIPS->r[MIPS_REG_SP] = thread->currentStack.end - 0x10;
2953 
2954 	hleSkipDeadbeef();
2955 	return hleLogSuccessI(SCEKERNEL, 0);
2956 }
2957 
__KernelReturnFromExtendStack()2958 void __KernelReturnFromExtendStack()
2959 {
2960 	hleSkipDeadbeef();
2961 
2962 	PSPThread *thread = __GetCurrentThread();
2963 	if (!thread)
2964 	{
2965 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelReturnFromExtendStack() - not on a thread?");
2966 		return;
2967 	}
2968 
2969 	// Grab the saved regs at the top of the stack.
2970 	u32 restoreRA = Memory::Read_U32(thread->currentStack.end - 4);
2971 	u32 restoreSP = Memory::Read_U32(thread->currentStack.end - 8);
2972 	u32 restorePC = Memory::Read_U32(thread->currentStack.end - 12);
2973 
2974 	if (!thread->PopExtendedStack())
2975 	{
2976 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelReturnFromExtendStack() - no stack to restore?");
2977 		return;
2978 	}
2979 
2980 	if (!Memory::IsValidAddress(restorePC)) {
2981 		Core_ExecException(restorePC, currentMIPS->pc, ExecExceptionType::THREAD);
2982 	}
2983 
2984 	DEBUG_LOG(SCEKERNEL, "__KernelReturnFromExtendStack()");
2985 	currentMIPS->r[MIPS_REG_RA] = restoreRA;
2986 	currentMIPS->r[MIPS_REG_SP] = restoreSP;
2987 	currentMIPS->pc = restorePC;
2988 
2989 	// We retain whatever is in v0/v1, it gets passed on to the caller of sceKernelExtendThreadStack().
2990 }
2991 
run(MipsCall & call)2992 void ActionAfterMipsCall::run(MipsCall &call) {
2993 	u32 error;
2994 	PSPThread *thread = kernelObjects.Get<PSPThread>(threadID, error);
2995 	if (thread) {
2996 		// Resume waiting after a callback, but not from terminate/delete.
2997 		if ((thread->nt.status & (THREADSTATUS_DEAD | THREADSTATUS_DORMANT)) == 0) {
2998 			__KernelChangeReadyState(thread, threadID, (status & THREADSTATUS_READY) != 0);
2999 			thread->nt.status = status;
3000 		}
3001 		thread->nt.waitType = waitType;
3002 		thread->nt.waitID = waitID;
3003 		thread->waitInfo = waitInfo;
3004 		thread->isProcessingCallbacks = isProcessingCallbacks;
3005 		thread->currentCallbackId = currentCallbackId;
3006 	}
3007 
3008 	if (chainedAction) {
3009 		chainedAction->run(call);
3010 		delete chainedAction;
3011 	}
3012 }
3013 
setReturnValue(u32 retval)3014 void PSPThread::setReturnValue(u32 retval) {
3015 	if (GetUID() == currentThread) {
3016 		currentMIPS->r[MIPS_REG_V0] = retval;
3017 	} else {
3018 		context.r[MIPS_REG_V0] = retval;
3019 	}
3020 }
3021 
setReturnValue(u64 retval)3022 void PSPThread::setReturnValue(u64 retval) {
3023 	if (GetUID() == currentThread) {
3024 		currentMIPS->r[MIPS_REG_V0] = retval & 0xFFFFFFFF;
3025 		currentMIPS->r[MIPS_REG_V1] = (retval >> 32) & 0xFFFFFFFF;
3026 	} else {
3027 		context.r[MIPS_REG_V0] = retval & 0xFFFFFFFF;
3028 		context.r[MIPS_REG_V1] = (retval >> 32) & 0xFFFFFFFF;
3029 	}
3030 }
3031 
resumeFromWait()3032 void PSPThread::resumeFromWait() {
3033 	nt.status &= ~THREADSTATUS_WAIT;
3034 	if (!(nt.status & (THREADSTATUS_WAITSUSPEND | THREADSTATUS_DORMANT | THREADSTATUS_DEAD)))
3035 		__KernelChangeReadyState(this, GetUID(), true);
3036 
3037 	// Non-waiting threads do not process callbacks.
3038 	isProcessingCallbacks = false;
3039 }
3040 
isWaitingFor(WaitType type,int id) const3041 bool PSPThread::isWaitingFor(WaitType type, int id) const {
3042 	if (nt.status & THREADSTATUS_WAIT)
3043 		return nt.waitType == type && nt.waitID == id;
3044 	return false;
3045 }
3046 
getWaitID(WaitType type) const3047 int PSPThread::getWaitID(WaitType type) const {
3048 	if (nt.waitType == type)
3049 		return nt.waitID;
3050 	return 0;
3051 }
3052 
getWaitInfo() const3053 ThreadWaitInfo PSPThread::getWaitInfo() const {
3054 	return waitInfo;
3055 }
3056 
__KernelSwitchContext(PSPThread * target,const char * reason)3057 void __KernelSwitchContext(PSPThread *target, const char *reason) {
3058 	u32 oldPC = 0;
3059 	SceUID oldUID = 0;
3060 	const char *oldName = hleCurrentThreadName != NULL ? hleCurrentThreadName : "(none)";
3061 
3062 	PSPThread *cur = __GetCurrentThread();
3063 	if (cur)  // It might just have been deleted.
3064 	{
3065 		__KernelSaveContext(&cur->context, (cur->nt.attr & PSP_THREAD_ATTR_VFPU) != 0);
3066 		oldPC = currentMIPS->pc;
3067 		oldUID = cur->GetUID();
3068 
3069 		// Normally this is taken care of in __KernelNextThread().
3070 		if (cur->isRunning())
3071 			__KernelChangeReadyState(cur, oldUID, true);
3072 	}
3073 
3074 	if (target)
3075 	{
3076 		__SetCurrentThread(target, target->GetUID(), target->nt.name);
3077 		__KernelChangeReadyState(target, currentThread, false);
3078 		target->nt.status = (target->nt.status | THREADSTATUS_RUNNING) & ~THREADSTATUS_READY;
3079 
3080 		__KernelLoadContext(&target->context, (target->nt.attr & PSP_THREAD_ATTR_VFPU) != 0);
3081 	}
3082 	else
3083 		__SetCurrentThread(NULL, 0, NULL);
3084 
3085 	const bool fromIdle = oldUID == threadIdleID[0] || oldUID == threadIdleID[1];
3086 	const bool toIdle = currentThread == threadIdleID[0] || currentThread == threadIdleID[1];
3087 #if DEBUG_LEVEL <= MAX_LOGLEVEL || DEBUG_LOG == NOTICE_LOG
3088 	if (!(fromIdle && toIdle))
3089 	{
3090 		u64 nowCycles = CoreTiming::GetTicks();
3091 		s64 consumedCycles = nowCycles - lastSwitchCycles;
3092 		lastSwitchCycles = nowCycles;
3093 
3094 		DEBUG_LOG(SCEKERNEL, "Context switch: %s -> %s (%i->%i, pc: %08x->%08x, %s) +%lldus",
3095 			oldName, hleCurrentThreadName,
3096 			oldUID, currentThread,
3097 			oldPC, currentMIPS->pc,
3098 			reason,
3099 			cyclesToUs(consumedCycles));
3100 	}
3101 #endif
3102 
3103 	// Switching threads eats some cycles.  This is a low approximation.
3104 	if (fromIdle && toIdle) {
3105 		// Don't eat any cycles going between idle.
3106 	} else if (fromIdle || toIdle) {
3107 		currentMIPS->downcount -= 1200;
3108 	} else {
3109 		currentMIPS->downcount -= 2700;
3110 	}
3111 
3112 	if (target)
3113 	{
3114 		// No longer waiting.
3115 		target->nt.waitType = WAITTYPE_NONE;
3116 		target->nt.waitID = 0;
3117 
3118 		__KernelExecutePendingMipsCalls(target, true);
3119 	}
3120 }
3121 
__KernelChangeThreadState(PSPThread * thread,ThreadStatus newStatus)3122 void __KernelChangeThreadState(PSPThread *thread, ThreadStatus newStatus) {
3123 	if (!thread || thread->nt.status == newStatus)
3124 		return;
3125 
3126 	if (!dispatchEnabled && thread == __GetCurrentThread() && newStatus != THREADSTATUS_RUNNING) {
3127 		ERROR_LOG(SCEKERNEL, "Dispatching suspended, not changing thread state");
3128 		return;
3129 	}
3130 
3131 	// TODO: JPSCP has many conditions here, like removing wait timeout actions etc.
3132 	// if (thread->nt.status == THREADSTATUS_WAIT && newStatus != THREADSTATUS_WAITSUSPEND) {
3133 
3134 	__KernelChangeReadyState(thread, thread->GetUID(), (newStatus & THREADSTATUS_READY) != 0);
3135 	thread->nt.status = newStatus;
3136 
3137 	if (newStatus == THREADSTATUS_WAIT) {
3138 		if (thread->nt.waitType == WAITTYPE_NONE) {
3139 			ERROR_LOG(SCEKERNEL, "Waittype none not allowed here");
3140 		}
3141 
3142 		// Schedule deletion of stopped threads here.  if (thread->isStopped())
3143 	}
3144 }
3145 
3146 
3147 
__CanExecuteCallbackNow(PSPThread * thread)3148 static bool __CanExecuteCallbackNow(PSPThread *thread) {
3149 	return currentCallbackThreadID == 0 && g_inCbCount == 0;
3150 }
3151 
3152 // Takes ownership of afterAction.
__KernelCallAddress(PSPThread * thread,u32 entryPoint,PSPAction * afterAction,const u32 args[],int numargs,bool reschedAfter,SceUID cbId)3153 void __KernelCallAddress(PSPThread *thread, u32 entryPoint, PSPAction *afterAction, const u32 args[], int numargs, bool reschedAfter, SceUID cbId) {
3154 	if (!thread || thread->isStopped()) {
3155 		WARN_LOG_REPORT(SCEKERNEL, "Running mipscall on dormant thread");
3156 	}
3157 
3158 	_dbg_assert_msg_(numargs <= 6, "MipsCalls can only take 6 args.");
3159 
3160 	if (thread) {
3161 		ActionAfterMipsCall *after = (ActionAfterMipsCall *) __KernelCreateAction(actionAfterMipsCall);
3162 		after->chainedAction = afterAction;
3163 		after->threadID = thread->GetUID();
3164 		after->status = thread->nt.status;
3165 		after->waitType = (WaitType)(u32)thread->nt.waitType;
3166 		after->waitID = thread->nt.waitID;
3167 		after->waitInfo = thread->waitInfo;
3168 		after->isProcessingCallbacks = thread->isProcessingCallbacks;
3169 		after->currentCallbackId = thread->currentCallbackId;
3170 
3171 		afterAction = after;
3172 
3173 		if (thread->nt.waitType != WAITTYPE_NONE) {
3174 			// If it's a callback, tell the wait to stop.
3175 			if (cbId > 0) {
3176 				if (waitTypeFuncs[thread->nt.waitType].beginFunc != NULL) {
3177 					waitTypeFuncs[thread->nt.waitType].beginFunc(after->threadID, thread->currentCallbackId);
3178 				} else {
3179 					ERROR_LOG_REPORT(HLE, "Missing begin/restore funcs for wait type %d", thread->nt.waitType);
3180 				}
3181 			}
3182 
3183 			// Release thread from waiting
3184 			thread->nt.waitType = WAITTYPE_NONE;
3185 		}
3186 
3187 		__KernelChangeThreadState(thread, THREADSTATUS_READY);
3188 	}
3189 
3190 	MipsCall *call = new MipsCall();
3191 	call->entryPoint = entryPoint;
3192 	for (int i = 0; i < numargs; i++) {
3193 		call->args[i] = args[i];
3194 	}
3195 	call->numArgs = (int) numargs;
3196 	call->doAfter = afterAction;
3197 	call->tag = "callAddress";
3198 	call->cbId = cbId;
3199 
3200 	u32 callId = mipsCalls.add(call);
3201 
3202 	bool called = false;
3203 	if ((!thread || thread == __GetCurrentThread())) {
3204 		if (__CanExecuteCallbackNow(thread)) {
3205 			thread = __GetCurrentThread();
3206 			__KernelChangeThreadState(thread, THREADSTATUS_RUNNING);
3207 			called = __KernelExecuteMipsCallOnCurrentThread(callId, reschedAfter);
3208 		}
3209 	}
3210 
3211 	if (!called) {
3212 		if (thread) {
3213 			DEBUG_LOG(SCEKERNEL, "Making mipscall pending on thread");
3214 			thread->pendingMipsCalls.push_back(callId);
3215 		} else {
3216 			WARN_LOG(SCEKERNEL, "Ignoring mispcall on NULL/deleted thread");
3217 		}
3218 	}
3219 }
3220 
__KernelDirectMipsCall(u32 entryPoint,PSPAction * afterAction,u32 args[],int numargs,bool reschedAfter)3221 void __KernelDirectMipsCall(u32 entryPoint, PSPAction *afterAction, u32 args[], int numargs, bool reschedAfter) {
3222 	__KernelCallAddress(__GetCurrentThread(), entryPoint, afterAction, args, numargs, reschedAfter, 0);
3223 }
3224 
__KernelExecuteMipsCallOnCurrentThread(u32 callId,bool reschedAfter)3225 bool __KernelExecuteMipsCallOnCurrentThread(u32 callId, bool reschedAfter)
3226 {
3227 	hleSkipDeadbeef();
3228 
3229 	PSPThread *cur = __GetCurrentThread();
3230 	if (cur == nullptr) {
3231 		ERROR_LOG(SCEKERNEL, "__KernelExecuteMipsCallOnCurrentThread(): Bad current thread");
3232 		return false;
3233 	}
3234 
3235 	if (g_inCbCount > 0) {
3236 		WARN_LOG_REPORT(SCEKERNEL, "__KernelExecuteMipsCallOnCurrentThread(): Already in a callback!");
3237 	}
3238 	DEBUG_LOG(SCEKERNEL, "Executing mipscall %i", callId);
3239 	MipsCall *call = mipsCalls.get(callId);
3240 
3241 	// Grab some MIPS stack space.
3242 	u32 &sp = currentMIPS->r[MIPS_REG_SP];
3243 	if (!Memory::IsValidAddress(sp - 32 * 4)) {
3244 		ERROR_LOG_REPORT(SCEKERNEL, "__KernelExecuteMipsCallOnCurrentThread(): Not enough free stack");
3245 		return false;
3246 	}
3247 
3248 	// Let's just save regs generously.  Better to be safe.
3249 	sp -= 32 * 4;
3250 	for (int i = MIPS_REG_A0; i <= MIPS_REG_T7; ++i) {
3251 		Memory::Write_U32(currentMIPS->r[i], sp + i * 4);
3252 	}
3253 	Memory::Write_U32(currentMIPS->r[MIPS_REG_T8], sp + MIPS_REG_T8 * 4);
3254 	Memory::Write_U32(currentMIPS->r[MIPS_REG_T9], sp + MIPS_REG_T9 * 4);
3255 	Memory::Write_U32(currentMIPS->r[MIPS_REG_RA], sp + MIPS_REG_RA * 4);
3256 
3257 	// Save the few regs that need saving
3258 	call->savedPc = currentMIPS->pc;
3259 	call->savedV0 = currentMIPS->r[MIPS_REG_V0];
3260 	call->savedV1 = currentMIPS->r[MIPS_REG_V1];
3261 	call->savedId = cur->currentMipscallId;
3262 	call->reschedAfter = reschedAfter;
3263 
3264 	if (!Memory::IsValidAddress(call->entryPoint)) {
3265 		Core_ExecException(call->entryPoint, currentMIPS->pc, ExecExceptionType::THREAD);
3266 	}
3267 
3268 	// Set up the new state
3269 	currentMIPS->pc = call->entryPoint;
3270 	currentMIPS->r[MIPS_REG_RA] = __KernelCallbackReturnAddress();
3271 	cur->currentMipscallId = callId;
3272 	for (int i = 0; i < call->numArgs; i++) {
3273 		currentMIPS->r[MIPS_REG_A0 + i] = call->args[i];
3274 	}
3275 
3276 	if (call->cbId != 0)
3277 		g_inCbCount++;
3278 	currentCallbackThreadID = currentThread;
3279 
3280 	return true;
3281 }
3282 
__KernelReturnFromMipsCall()3283 void __KernelReturnFromMipsCall()
3284 {
3285 	hleSkipDeadbeef();
3286 
3287 	PSPThread *cur = __GetCurrentThread();
3288 	if (cur == NULL)
3289 	{
3290 		ERROR_LOG(SCEKERNEL, "__KernelReturnFromMipsCall(): Bad current thread");
3291 		return;
3292 	}
3293 
3294 	u32 callId = cur->currentMipscallId;
3295 	MipsCall *call = mipsCalls.pop(callId);
3296 
3297 	// Value returned by the callback function
3298 	u32 retVal = currentMIPS->r[MIPS_REG_V0];
3299 	DEBUG_LOG(SCEKERNEL, "__KernelReturnFromMipsCall(), returned %08x", retVal);
3300 
3301 	// TODO: Should also save/restore wait state here?
3302 	if (call->doAfter) {
3303 		call->doAfter->run(*call);
3304 		delete call->doAfter;
3305 		call->doAfter = nullptr;
3306 	}
3307 
3308 	u32 &sp = currentMIPS->r[MIPS_REG_SP];
3309 	for (int i = MIPS_REG_A0; i <= MIPS_REG_T7; ++i) {
3310 		currentMIPS->r[i] = Memory::Read_U32(sp + i * 4);
3311 	}
3312 	currentMIPS->r[MIPS_REG_T8] = Memory::Read_U32(sp + MIPS_REG_T8 * 4);
3313 	currentMIPS->r[MIPS_REG_T9] = Memory::Read_U32(sp + MIPS_REG_T9 * 4);
3314 	currentMIPS->r[MIPS_REG_RA] = Memory::Read_U32(sp + MIPS_REG_RA * 4);
3315 	sp += 32 * 4;
3316 
3317 	if (!Memory::IsValidAddress(call->savedPc)) {
3318 		Core_ExecException(call->savedPc, currentMIPS->pc, ExecExceptionType::THREAD);
3319 	}
3320 
3321 	currentMIPS->pc = call->savedPc;
3322 	// This is how we set the return value.
3323 	currentMIPS->r[MIPS_REG_V0] = call->savedV0;
3324 	currentMIPS->r[MIPS_REG_V1] = call->savedV1;
3325 	cur->currentMipscallId = call->savedId;
3326 
3327 	// If the thread called ExitDelete, we might've already decreased g_inCbCount.
3328 	if (call->cbId != 0 && g_inCbCount > 0) {
3329 		g_inCbCount--;
3330 	}
3331 	currentCallbackThreadID = 0;
3332 
3333 	if (cur->nt.waitType != WAITTYPE_NONE)
3334 	{
3335 		if (call->cbId > 0)
3336 		{
3337 			if (waitTypeFuncs[cur->nt.waitType].endFunc != NULL)
3338 				waitTypeFuncs[cur->nt.waitType].endFunc(cur->GetUID(), cur->currentCallbackId);
3339 			else
3340 				ERROR_LOG_REPORT(HLE, "Missing begin/restore funcs for wait type %d", cur->nt.waitType);
3341 		}
3342 	}
3343 
3344 	// yeah! back in the real world, let's keep going. Should we process more callbacks?
3345 	if (!__KernelExecutePendingMipsCalls(cur, call->reschedAfter)) {
3346 		// Sometimes, we want to stay on the thread.
3347 		int threadReady = cur->nt.status & (THREADSTATUS_READY | THREADSTATUS_RUNNING);
3348 		if (call->reschedAfter || threadReady == 0)
3349 			__KernelReSchedule("return from callback");
3350 
3351 		// Now seems like a good time to clear out any pending deletes.
3352 		for (SceUID delThread : pendingDeleteThreads) {
3353 			kernelObjects.Destroy<PSPThread>(delThread);
3354 		}
3355 		pendingDeleteThreads.clear();
3356 	}
3357 
3358 	delete call;
3359 }
3360 
3361 // First arg must be current thread, passed to avoid perf cost of a lookup.
__KernelExecutePendingMipsCalls(PSPThread * thread,bool reschedAfter)3362 bool __KernelExecutePendingMipsCalls(PSPThread *thread, bool reschedAfter) {
3363 	_dbg_assert_msg_(thread->GetUID() == __KernelGetCurThread(), "__KernelExecutePendingMipsCalls() should be called only with the current thread.");
3364 
3365 	if (thread->pendingMipsCalls.empty()) {
3366 		// Nothing to do
3367 		return false;
3368 	}
3369 
3370 	if (__CanExecuteCallbackNow(thread))
3371 	{
3372 		// Pop off the first pending mips call
3373 		u32 callId = thread->pendingMipsCalls.front();
3374 		thread->pendingMipsCalls.pop_front();
3375 		if (__KernelExecuteMipsCallOnCurrentThread(callId, reschedAfter)) {
3376 			return true;
3377 		}
3378 	}
3379 	return false;
3380 }
3381 
3382 // Executes the callback, when it next is context switched to.
__KernelRunCallbackOnThread(SceUID cbId,PSPThread * thread,bool reschedAfter)3383 static void __KernelRunCallbackOnThread(SceUID cbId, PSPThread *thread, bool reschedAfter) {
3384 	u32 error;
3385 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
3386 	if (!cb) {
3387 		ERROR_LOG(SCEKERNEL, "__KernelRunCallbackOnThread: Bad cbId %i", cbId);
3388 		return;
3389 	}
3390 
3391 	DEBUG_LOG(SCEKERNEL, "__KernelRunCallbackOnThread: Turning callback %i into pending mipscall", cbId);
3392 
3393 	// Alright, we're on the right thread
3394 	// Should save/restore wait state?
3395 
3396 	const u32 args[] = {(u32) cb->nc.notifyCount, (u32) cb->nc.notifyArg, cb->nc.commonArgument};
3397 
3398 	// Clear the notify count / arg
3399 	cb->nc.notifyCount = 0;
3400 	cb->nc.notifyArg = 0;
3401 
3402 	ActionAfterCallback *action = (ActionAfterCallback *) __KernelCreateAction(actionAfterCallback);
3403 	if (action != NULL)
3404 		action->setCallback(cbId);
3405 	else
3406 		ERROR_LOG(SCEKERNEL, "Something went wrong creating a restore action for a callback.");
3407 
3408 	__KernelCallAddress(thread, cb->nc.entrypoint, action, args, 3, reschedAfter, cbId);
3409 }
3410 
run(MipsCall & call)3411 void ActionAfterCallback::run(MipsCall &call) {
3412 	if (cbId != -1) {
3413 		u32 error;
3414 		PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
3415 		if (cb) {
3416 			PSPThread *t = kernelObjects.Get<PSPThread>(cb->nc.threadId, error);
3417 			if (t) {
3418 				// Check for other callbacks to run (including ones this callback scheduled.)
3419 				__KernelCheckThreadCallbacks(t, true);
3420 			}
3421 
3422 			DEBUG_LOG(SCEKERNEL, "Left callback %i - %s", cbId, cb->nc.name);
3423 			// Callbacks that don't return 0 are deleted. But should this be done here?
3424 			if (currentMIPS->r[MIPS_REG_V0] != 0) {
3425 				DEBUG_LOG(SCEKERNEL, "ActionAfterCallback::run(): Callback returned non-zero, gets deleted!");
3426 				kernelObjects.Destroy<PSPCallback>(cbId);
3427 			}
3428 		}
3429 	}
3430 }
3431 
__KernelCurHasReadyCallbacks()3432 bool __KernelCurHasReadyCallbacks() {
3433 	if (readyCallbacksCount == 0) {
3434 		return false;
3435 	}
3436 
3437 	PSPThread *thread = __GetCurrentThread();
3438 	u32 error;
3439 	for (auto it = thread->callbacks.begin(), end = thread->callbacks.end(); it != end; ++it) {
3440 		PSPCallback *callback = kernelObjects.Get<PSPCallback>(*it, error);
3441 		if (callback && callback->nc.notifyCount != 0) {
3442 			return true;
3443 		}
3444 	}
3445 	return false;
3446 }
3447 
3448 // Check callbacks on the current thread only.
3449 // Returns true if any callbacks were processed on the current thread.
__KernelCheckThreadCallbacks(PSPThread * thread,bool force)3450 bool __KernelCheckThreadCallbacks(PSPThread *thread, bool force) {
3451 	if (!thread || (!thread->isProcessingCallbacks && !force)) {
3452 		return false;
3453 	}
3454 
3455 	if (!thread->callbacks.empty()) {
3456 		u32 error;
3457 		for (auto it = thread->callbacks.begin(), end = thread->callbacks.end(); it != end; ++it) {
3458 			PSPCallback *callback = kernelObjects.Get<PSPCallback>(*it, error);
3459 			if (callback && callback->nc.notifyCount != 0) {
3460 				__KernelRunCallbackOnThread(callback->GetUID(), thread, !force);
3461 				readyCallbacksCount--;
3462 				return true;
3463 			}
3464 		}
3465 	}
3466 	return false;
3467 }
3468 
3469 // Checks for callbacks on all threads
__KernelCheckCallbacks()3470 bool __KernelCheckCallbacks() {
3471 	// Let's not check every thread all the time, callbacks are fairly uncommon.
3472 	if (readyCallbacksCount == 0) {
3473 		return false;
3474 	}
3475 	if (readyCallbacksCount < 0) {
3476 		ERROR_LOG_REPORT(SCEKERNEL, "readyCallbacksCount became negative: %i", readyCallbacksCount);
3477 	}
3478 	if (__IsInInterrupt() || !__KernelIsDispatchEnabled() || __KernelInCallback()) {
3479 		// TODO: Technically, other callbacks can run when a thread within a callback is waiting.
3480 		// However, callbacks that were pending before the current callback started won't be run.
3481 		// This is pretty uncommon, and not yet handled correctly.
3482 		return false;
3483 	}
3484 
3485 	bool processed = false;
3486 
3487 	u32 error;
3488 	for (auto iter = threadqueue.begin(); iter != threadqueue.end(); ++iter) {
3489 		PSPThread *thread = kernelObjects.Get<PSPThread>(*iter, error);
3490 		if (thread && __KernelCheckThreadCallbacks(thread, false)) {
3491 			processed = true;
3492 		}
3493 	}
3494 
3495 	if (processed) {
3496 		return __KernelExecutePendingMipsCalls(__GetCurrentThread(), true);
3497 	}
3498 	return false;
3499 }
3500 
__KernelForceCallbacks()3501 bool __KernelForceCallbacks()
3502 {
3503 	// Let's not check every thread all the time, callbacks are fairly uncommon.
3504 	if (readyCallbacksCount == 0) {
3505 		return false;
3506 	}
3507 	if (readyCallbacksCount < 0) {
3508 		ERROR_LOG_REPORT(SCEKERNEL, "readyCallbacksCount became negative: %i", readyCallbacksCount);
3509 	}
3510 
3511 	PSPThread *curThread = __GetCurrentThread();
3512 
3513 	bool callbacksProcessed = __KernelCheckThreadCallbacks(curThread, true);
3514 	if (callbacksProcessed)
3515 		__KernelExecutePendingMipsCalls(curThread, false);
3516 
3517 	return callbacksProcessed;
3518 }
3519 
3520 // Not wrapped because it has special return logic.
sceKernelCheckCallback()3521 void sceKernelCheckCallback()
3522 {
3523 	// Start with yes.
3524 	RETURN(1);
3525 
3526 	bool callbacksProcessed = __KernelForceCallbacks();
3527 
3528 	if (callbacksProcessed) {
3529 		DEBUG_LOG(SCEKERNEL, "sceKernelCheckCallback() - processed a callback.");
3530 		// The RETURN(1) above is still active here, unless __KernelForceCallbacks changed it.
3531 	} else {
3532 		RETURN(0);
3533 	}
3534 	hleEatCycles(230);
3535 }
3536 
__KernelInCallback()3537 bool __KernelInCallback()
3538 {
3539 	return (g_inCbCount != 0);
3540 }
3541 
__KernelNotifyCallback(SceUID cbId,int notifyArg)3542 void __KernelNotifyCallback(SceUID cbId, int notifyArg)
3543 {
3544 	u32 error;
3545 
3546 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
3547 	if (!cb) {
3548 		// Yeah, we're screwed, this shouldn't happen.
3549 		ERROR_LOG(SCEKERNEL, "__KernelNotifyCallback - invalid callback %08x", cbId);
3550 		return;
3551 	}
3552 	if (cb->nc.notifyCount == 0) {
3553 		readyCallbacksCount++;
3554 	}
3555 	cb->nc.notifyCount++;
3556 	cb->nc.notifyArg = notifyArg;
3557 }
3558 
__KernelRegisterWaitTypeFuncs(WaitType type,WaitBeginCallbackFunc beginFunc,WaitEndCallbackFunc endFunc)3559 void __KernelRegisterWaitTypeFuncs(WaitType type, WaitBeginCallbackFunc beginFunc, WaitEndCallbackFunc endFunc)
3560 {
3561 	waitTypeFuncs[type].beginFunc = beginFunc;
3562 	waitTypeFuncs[type].endFunc = endFunc;
3563 }
3564 
GetThreadsInfo()3565 std::vector<DebugThreadInfo> GetThreadsInfo() {
3566 	std::lock_guard<std::mutex> guard(threadqueueLock);
3567 	std::vector<DebugThreadInfo> threadList;
3568 
3569 	u32 error;
3570 	for (const auto uid : threadqueue) {
3571 		PSPThread *t = kernelObjects.Get<PSPThread>(uid, error);
3572 		if (!t)
3573 			continue;
3574 
3575 		DebugThreadInfo info;
3576 		info.id = uid;
3577 		strncpy(info.name,t->GetName(),KERNELOBJECT_MAX_NAME_LENGTH);
3578 		info.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
3579 		info.status = t->nt.status;
3580 		info.entrypoint = t->nt.entrypoint;
3581 		info.initialStack = t->nt.initialStack;
3582 		info.stackSize = (u32)t->nt.stackSize;
3583 		info.priority = t->nt.currentPriority;
3584 		info.waitType = (WaitType)(u32)t->nt.waitType;
3585 		info.isCurrent = uid == currentThread;
3586 		if (info.isCurrent)
3587 			info.curPC = currentMIPS->pc;
3588 		else
3589 			info.curPC = t->context.pc;
3590 		threadList.push_back(info);
3591 	}
3592 
3593 	return threadList;
3594 }
3595 
KernelDebugThread(SceUID threadID)3596 DebugInterface *KernelDebugThread(SceUID threadID) {
3597 	if (threadID == currentThread) {
3598 		return currentDebugMIPS;
3599 	}
3600 
3601 	u32 error;
3602 	PSPThread *t = kernelObjects.Get<PSPThread>(threadID, error);
3603 	if (t) {
3604 		return &t->debug;
3605 	}
3606 
3607 	return nullptr;
3608 }
3609 
__KernelChangeThreadState(SceUID threadId,ThreadStatus newStatus)3610 void __KernelChangeThreadState(SceUID threadId, ThreadStatus newStatus) {
3611 	u32 error;
3612 	PSPThread *t = kernelObjects.Get<PSPThread>(threadId, error);
3613 	if (!t)
3614 		return;
3615 
3616 	__KernelChangeThreadState(t, newStatus);
3617 }
3618 
sceKernelRegisterExitCallback(SceUID cbId)3619 int sceKernelRegisterExitCallback(SceUID cbId)
3620 {
3621 	u32 error;
3622 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(cbId, error);
3623 	if (!cb)
3624 	{
3625 		WARN_LOG(SCEKERNEL, "sceKernelRegisterExitCallback(%i): invalid callback id", cbId);
3626 		if (sceKernelGetCompiledSdkVersion() >= 0x3090500)
3627 			return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
3628 		return 0;
3629 	}
3630 
3631 	DEBUG_LOG(SCEKERNEL, "sceKernelRegisterExitCallback(%i)", cbId);
3632 	registeredExitCbId = cbId;
3633 	return 0;
3634 }
3635 
LoadExecForUser_362A956B()3636 int LoadExecForUser_362A956B()
3637 {
3638 	WARN_LOG_REPORT(SCEKERNEL, "LoadExecForUser_362A956B()");
3639 	u32 error;
3640 	PSPCallback *cb = kernelObjects.Get<PSPCallback>(registeredExitCbId, error);
3641 	if (!cb) {
3642 		WARN_LOG(SCEKERNEL, "LoadExecForUser_362A956B() : registeredExitCbId not found 0x%x", registeredExitCbId);
3643 		return SCE_KERNEL_ERROR_UNKNOWN_CBID;
3644 	}
3645 	int cbArg = cb->nc.commonArgument;
3646 	if (!Memory::IsValidAddress(cbArg)) {
3647 		WARN_LOG(SCEKERNEL, "LoadExecForUser_362A956B() : invalid address for cbArg (0x%08X)", cbArg);
3648 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
3649 	}
3650 	u32 unknown1 = Memory::Read_U32(cbArg - 8);
3651 	if (unknown1 >= 4) {
3652 		WARN_LOG(SCEKERNEL, "LoadExecForUser_362A956B() : invalid value unknown1 (0x%08X)", unknown1);
3653 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
3654 	}
3655 	u32 parameterArea = Memory::Read_U32(cbArg - 4);
3656 	if (!Memory::IsValidAddress(parameterArea)) {
3657 		WARN_LOG(SCEKERNEL, "LoadExecForUser_362A956B() : invalid address for parameterArea on userMemory  (0x%08X)", parameterArea);
3658 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
3659 	}
3660 
3661 	u32 size = Memory::Read_U32(parameterArea);
3662 	if (size < 12) {
3663 		WARN_LOG(SCEKERNEL, "LoadExecForUser_362A956B() : invalid parameterArea size %d", size);
3664 		return SCE_KERNEL_ERROR_ILLEGAL_SIZE;
3665 	}
3666 	Memory::Write_U32(0, parameterArea + 4);
3667 	Memory::Write_U32(-1, parameterArea + 8);
3668 	return 0;
3669 }
3670 
3671 static const SceUID SCE_TE_THREADID_ALL_USER = 0xFFFFFFF0;
3672 
3673 struct NativeThreadEventHandler {
3674 	u32 size;
3675 	char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
3676 	SceUID threadID;
3677 	u32 mask;
3678 	u32 handlerPtr;
3679 	u32 commonArg;
3680 };
3681 
3682 struct ThreadEventHandler : public KernelObject {
GetNameThreadEventHandler3683 	const char *GetName() override { return nteh.name; }
GetTypeNameThreadEventHandler3684 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNameThreadEventHandler3685 	static const char *GetStaticTypeName() { return "ThreadEventHandler"; }
GetMissingErrorCodeThreadEventHandler3686 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_TEID; }
GetStaticIDTypeThreadEventHandler3687 	static int GetStaticIDType() { return SCE_KERNEL_TMID_ThreadEventHandler; }
GetIDTypeThreadEventHandler3688 	int GetIDType() const override { return SCE_KERNEL_TMID_ThreadEventHandler; }
3689 
DoStateThreadEventHandler3690 	void DoState(PointerWrap &p) override {
3691 		auto s = p.Section("ThreadEventHandler", 1);
3692 		if (!s)
3693 			return;
3694 
3695 		Do(p, nteh);
3696 	}
3697 
3698 	NativeThreadEventHandler nteh;
3699 };
3700 
__KernelThreadEventHandlerObject()3701 KernelObject *__KernelThreadEventHandlerObject() {
3702 	// Default object to load from state.
3703 	return new ThreadEventHandler;
3704 }
3705 
__KernelThreadTriggerEvent(const ThreadEventHandlerList & handlers,SceUID threadID,ThreadEventType type)3706 bool __KernelThreadTriggerEvent(const ThreadEventHandlerList &handlers, SceUID threadID, ThreadEventType type) {
3707 	PSPThread *thread = __GetCurrentThread();
3708 	if (!thread || thread->isStopped()) {
3709 		SceUID nextThreadID = threadReadyQueue.peek_first();
3710 		thread = kernelObjects.GetFast<PSPThread>(nextThreadID);
3711 	}
3712 
3713 	bool hadHandlers = false;
3714 	for (auto it = handlers.begin(), end = handlers.end(); it != end; ++it) {
3715 		u32 error;
3716 		const auto teh = kernelObjects.Get<ThreadEventHandler>(*it, error);
3717 		if (!teh || (teh->nteh.mask & type) == 0) {
3718 			continue;
3719 		}
3720 
3721 		const u32 args[] = {(u32)type, (u32)threadID, teh->nteh.commonArg};
3722 		__KernelCallAddress(thread, teh->nteh.handlerPtr, nullptr, args, ARRAY_SIZE(args), true, 0);
3723 		hadHandlers = true;
3724 	}
3725 
3726 	return hadHandlers;
3727 }
3728 
__KernelThreadTriggerEvent(bool isKernel,SceUID threadID,ThreadEventType type)3729 bool __KernelThreadTriggerEvent(bool isKernel, SceUID threadID, ThreadEventType type) {
3730 	bool hadExactHandlers = false;
3731 	auto exactHandlers = threadEventHandlers.find(threadID);
3732 	if (exactHandlers != threadEventHandlers.end()) {
3733 		hadExactHandlers = __KernelThreadTriggerEvent(exactHandlers->second, threadID, type);
3734 	}
3735 
3736 	bool hadKindHandlers = false;
3737 	if (isKernel) {
3738 		auto kernelHandlers = threadEventHandlers.find(SCE_TE_THREADID_ALL_USER);
3739 		if (kernelHandlers != threadEventHandlers.end()) {
3740 			hadKindHandlers = __KernelThreadTriggerEvent(kernelHandlers->second, threadID, type);
3741 		}
3742 	} else {
3743 		auto userHandlers = threadEventHandlers.find(SCE_TE_THREADID_ALL_USER);
3744 		if (userHandlers != threadEventHandlers.end()) {
3745 			hadKindHandlers = __KernelThreadTriggerEvent(userHandlers->second, threadID, type);
3746 		}
3747 	}
3748 
3749 	return hadKindHandlers || hadExactHandlers;
3750 }
3751 
sceKernelRegisterThreadEventHandler(const char * name,SceUID threadID,u32 mask,u32 handlerPtr,u32 commonArg)3752 SceUID sceKernelRegisterThreadEventHandler(const char *name, SceUID threadID, u32 mask, u32 handlerPtr, u32 commonArg) {
3753 	if (!name) {
3754 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ERROR, "invalid name");
3755 	}
3756 	if (threadID == 0) {
3757 		// "atexit"?
3758 		if (mask != THREADEVENT_EXIT) {
3759 			return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ATTR, "invalid thread id");
3760 		}
3761 	}
3762 	u32 error;
3763 	if (kernelObjects.Get<PSPThread>(threadID, error) == NULL && threadID != SCE_TE_THREADID_ALL_USER) {
3764 		return hleReportError(SCEKERNEL, error, "bad thread id");
3765 	}
3766 	if ((mask & ~THREADEVENT_SUPPORTED) != 0) {
3767 		return hleReportError(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MASK, "invalid event mask");
3768 	}
3769 
3770 	auto teh = new ThreadEventHandler;
3771 	teh->nteh.size = sizeof(teh->nteh);
3772 	strncpy(teh->nteh.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
3773 	teh->nteh.name[KERNELOBJECT_MAX_NAME_LENGTH] = '\0';
3774 	teh->nteh.threadID = threadID;
3775 	teh->nteh.mask = mask;
3776 	teh->nteh.handlerPtr = handlerPtr;
3777 	teh->nteh.commonArg = commonArg;
3778 
3779 	SceUID uid = kernelObjects.Create(teh);
3780 	threadEventHandlers[threadID].push_back(uid);
3781 
3782 	return hleLogSuccessI(SCEKERNEL, uid);
3783 }
3784 
sceKernelReleaseThreadEventHandler(SceUID uid)3785 int sceKernelReleaseThreadEventHandler(SceUID uid) {
3786 	u32 error;
3787 	auto teh = kernelObjects.Get<ThreadEventHandler>(uid, error);
3788 	if (!teh) {
3789 		return hleReportError(SCEKERNEL, error, "bad handler id");
3790 	}
3791 
3792 	auto &handlers = threadEventHandlers[teh->nteh.threadID];
3793 	handlers.erase(std::remove(handlers.begin(), handlers.end(), uid), handlers.end());
3794 	return hleLogSuccessI(SCEKERNEL, kernelObjects.Destroy<ThreadEventHandler>(uid));
3795 }
3796 
sceKernelReferThreadEventHandlerStatus(SceUID uid,u32 infoPtr)3797 int sceKernelReferThreadEventHandlerStatus(SceUID uid, u32 infoPtr) {
3798 	u32 error;
3799 	auto teh = kernelObjects.Get<ThreadEventHandler>(uid, error);
3800 	if (!teh) {
3801 		return hleReportError(SCEKERNEL, error, "bad handler id");
3802 	}
3803 
3804 	if (Memory::IsValidAddress(infoPtr) && Memory::Read_U32(infoPtr) != 0) {
3805 		Memory::WriteStruct(infoPtr, &teh->nteh);
3806 		return hleLogSuccessI(SCEKERNEL, 0);
3807 	} else {
3808 		return hleLogDebug(SCEKERNEL, 0, "struct size was 0");
3809 	}
3810 }
3811