1 // Copyright (c) 2012- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 #include <algorithm>
19 #include <string>
20 #include <vector>
21 #include <map>
22 
23 #include "Common/Thread/ParallelLoop.h"
24 #include "Core/CoreTiming.h"
25 #include "Core/Debugger/MemBlockInfo.h"
26 #include "Core/HLE/HLE.h"
27 #include "Core/HLE/FunctionWrappers.h"
28 #include "Core/MIPS/MIPS.h"
29 #include "Core/MemMapHelpers.h"
30 #include "Core/Reporting.h"
31 #include "Core/System.h"
32 #include "Core/ThreadPools.h"
33 #include "Common/Serialize/Serializer.h"
34 #include "Common/Serialize/SerializeFuncs.h"
35 #include "Common/Serialize/SerializeMap.h"
36 
37 #include "Core/HLE/sceKernel.h"
38 #include "Core/HLE/sceKernelThread.h"
39 #include "Core/HLE/sceKernelInterrupt.h"
40 #include "Core/HLE/sceKernelMemory.h"
41 #include "Core/HLE/KernelWaitHelpers.h"
42 
43 const int TLSPL_NUM_INDEXES = 16;
44 
45 //////////////////////////////////////////////////////////////////////////
46 // STATE BEGIN
47 BlockAllocator userMemory(256);
48 BlockAllocator kernelMemory(256);
49 
50 static int vplWaitTimer = -1;
51 static int fplWaitTimer = -1;
52 static bool tlsplUsedIndexes[TLSPL_NUM_INDEXES];
53 
54 // Thread -> TLSPL uids for thread end.
55 typedef std::multimap<SceUID, SceUID> TlsplMap;
56 static TlsplMap tlsplThreadEndChecks;
57 // STATE END
58 //////////////////////////////////////////////////////////////////////////
59 
60 #define SCE_KERNEL_HASCOMPILEDSDKVERSION 0x1000
61 #define SCE_KERNEL_HASCOMPILERVERSION    0x2000
62 
63 int flags_ = 0;
64 int sdkVersion_;
65 int compilerVersion_;
66 
67 struct FplWaitingThread
68 {
69 	SceUID threadID;
70 	u32 addrPtr;
71 	u64 pausedTimeout;
72 
operator ==FplWaitingThread73 	bool operator ==(const SceUID &otherThreadID) const
74 	{
75 		return threadID == otherThreadID;
76 	}
77 };
78 
79 struct NativeFPL
80 {
81 	u32_le size;
82 	char name[KERNELOBJECT_MAX_NAME_LENGTH+1];
83 	u32_le attr;
84 
85 	s32_le blocksize;
86 	s32_le numBlocks;
87 	s32_le numFreeBlocks;
88 	s32_le numWaitThreads;
89 };
90 
91 //FPL - Fixed Length Dynamic Memory Pool - every item has the same length
92 struct FPL : public KernelObject
93 {
FPLFPL94 	FPL() : blocks(NULL), nextBlock(0) {}
~FPLFPL95 	~FPL() {
96 		if (blocks != NULL) {
97 			delete [] blocks;
98 		}
99 	}
GetNameFPL100 	const char *GetName() override { return nf.name; }
GetTypeNameFPL101 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNameFPL102 	static const char *GetStaticTypeName() { return "FPL"; }
GetMissingErrorCodeFPL103 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_FPLID; }
GetStaticIDTypeFPL104 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Fpl; }
GetIDTypeFPL105 	int GetIDType() const override { return SCE_KERNEL_TMID_Fpl; }
106 
findFreeBlockFPL107 	int findFreeBlock() {
108 		for (int i = 0; i < nf.numBlocks; i++) {
109 			int b = nextBlock++ % nf.numBlocks;
110 			if (!blocks[b]) {
111 				return b;
112 			}
113 		}
114 		return -1;
115 	}
116 
allocateBlockFPL117 	int allocateBlock() {
118 		int block = findFreeBlock();
119 		if (block >= 0)
120 			blocks[block] = true;
121 		return block;
122 	}
123 
freeBlockFPL124 	bool freeBlock(int b) {
125 		if (blocks[b]) {
126 			blocks[b] = false;
127 			return true;
128 		}
129 		return false;
130 	}
131 
DoStateFPL132 	void DoState(PointerWrap &p) override
133 	{
134 		auto s = p.Section("FPL", 1);
135 		if (!s)
136 			return;
137 
138 		Do(p, nf);
139 		if (p.mode == p.MODE_READ)
140 			blocks = new bool[nf.numBlocks];
141 		DoArray(p, blocks, nf.numBlocks);
142 		Do(p, address);
143 		Do(p, alignedSize);
144 		Do(p, nextBlock);
145 		FplWaitingThread dv = {0};
146 		Do(p, waitingThreads, dv);
147 		Do(p, pausedWaits);
148 	}
149 
150 	NativeFPL nf;
151 	bool *blocks;
152 	u32 address;
153 	int alignedSize;
154 	int nextBlock;
155 	std::vector<FplWaitingThread> waitingThreads;
156 	// Key is the callback id it was for, or if no callback, the thread id.
157 	std::map<SceUID, FplWaitingThread> pausedWaits;
158 };
159 
160 struct VplWaitingThread
161 {
162 	SceUID threadID;
163 	u32 addrPtr;
164 	u64 pausedTimeout;
165 
operator ==VplWaitingThread166 	bool operator ==(const SceUID &otherThreadID) const
167 	{
168 		return threadID == otherThreadID;
169 	}
170 };
171 
172 struct SceKernelVplInfo
173 {
174 	SceSize_le size;
175 	char name[KERNELOBJECT_MAX_NAME_LENGTH+1];
176 	SceUInt_le attr;
177 	s32_le poolSize;
178 	s32_le freeSize;
179 	s32_le numWaitThreads;
180 };
181 
182 struct SceKernelVplBlock
183 {
184 	PSPPointer<SceKernelVplBlock> next;
185 	// Includes this info (which is 1 block / 8 bytes.)
186 	u32_le sizeInBlocks;
187 };
188 
189 struct SceKernelVplHeader {
190 	u32_le startPtr_;
191 	// TODO: Why twice?  Is there a case it changes?
192 	u32_le startPtr2_;
193 	u32_le sentinel_;
194 	u32_le sizeMinus8_;
195 	u32_le allocatedInBlocks_;
196 	PSPPointer<SceKernelVplBlock> nextFreeBlock_;
197 	SceKernelVplBlock firstBlock_;
198 
InitSceKernelVplHeader199 	void Init(u32 ptr, u32 size) {
200 		startPtr_ = ptr;
201 		startPtr2_ = ptr;
202 		sentinel_ = ptr + 7;
203 		sizeMinus8_ = size - 8;
204 		allocatedInBlocks_ = 0;
205 		nextFreeBlock_ = FirstBlockPtr();
206 
207 		firstBlock_.next = LastBlockPtr();
208 		// Includes its own header, which is one block.
209 		firstBlock_.sizeInBlocks = (size - 0x28) / 8 + 1;
210 
211 		auto lastBlock = LastBlock();
212 		lastBlock->next = FirstBlockPtr();
213 		lastBlock->sizeInBlocks = 0;
214 	}
215 
AllocateSceKernelVplHeader216 	u32 Allocate(u32 size) {
217 		u32 allocBlocks = ((size + 7) / 8) + 1;
218 		auto prev = nextFreeBlock_;
219 		do {
220 			auto b = prev->next;
221 			if (b->sizeInBlocks > allocBlocks) {
222 				b = SplitBlock(b, allocBlocks);
223 			}
224 
225 			if (b->sizeInBlocks == allocBlocks) {
226 				UnlinkFreeBlock(b, prev);
227 				return b.ptr + 8;
228 			}
229 
230 			prev = b;
231 		} while (prev.IsValid() && prev != nextFreeBlock_);
232 
233 		return (u32)-1;
234 	}
235 
FreeSceKernelVplHeader236 	bool Free(u32 ptr) {
237 		auto b = PSPPointer<SceKernelVplBlock>::Create(ptr - 8);
238 		// Is it even in the right range?  Can't be the last block, which is always 0.
239 		if (!b.IsValid() || ptr < FirstBlockPtr() || ptr >= LastBlockPtr()) {
240 			return false;
241 		}
242 		// Great, let's check if it matches our magic.
243 		if (b->next.ptr != SentinelPtr() || b->sizeInBlocks > allocatedInBlocks_) {
244 			return false;
245 		}
246 
247 		auto prev = LastBlock();
248 		do {
249 			auto next = prev->next;
250 			// Already free.
251 			if (next == b) {
252 				return false;
253 			} else if (next > b) {
254 				LinkFreeBlock(b, prev, next);
255 				return true;
256 			}
257 
258 			prev = next;
259 		} while (prev.IsValid() && prev != LastBlock());
260 
261 		// TODO: Log?
262 		return false;
263 	}
264 
FreeSizeSceKernelVplHeader265 	u32 FreeSize() const {
266 		// Size less the header and number of allocated bytes.
267 		return sizeMinus8_ + 8 - 0x20 - allocatedInBlocks_ * 8;
268 	}
269 
LinkFreeBlockSceKernelVplHeader270 	bool LinkFreeBlock(PSPPointer<SceKernelVplBlock> b, PSPPointer<SceKernelVplBlock> prev, PSPPointer<SceKernelVplBlock> next) {
271 		allocatedInBlocks_ -= b->sizeInBlocks;
272 		nextFreeBlock_ = prev;
273 
274 		// Make sure we don't consider it free later by erasing the magic.
275 		b->next = next.ptr;
276 		const auto afterB = b + b->sizeInBlocks;
277 		if (afterB == next && next->sizeInBlocks != 0) {
278 			b = MergeBlocks(b, next);
279 		}
280 
281 		const auto afterPrev = prev + prev->sizeInBlocks;
282 		if (afterPrev == b) {
283 			b = MergeBlocks(prev, b);
284 		} else {
285 			prev->next = b.ptr;
286 		}
287 
288 		return true;
289 	}
290 
UnlinkFreeBlockSceKernelVplHeader291 	void UnlinkFreeBlock(PSPPointer<SceKernelVplBlock> b, PSPPointer<SceKernelVplBlock> prev) {
292 		allocatedInBlocks_ += b->sizeInBlocks;
293 		prev->next = b->next;
294 		nextFreeBlock_ = prev;
295 		b->next = SentinelPtr();
296 	}
297 
SplitBlockSceKernelVplHeader298 	PSPPointer<SceKernelVplBlock> SplitBlock(PSPPointer<SceKernelVplBlock> b, u32 allocBlocks) {
299 		u32 prev = b.ptr;
300 		b->sizeInBlocks -= allocBlocks;
301 
302 		b += b->sizeInBlocks;
303 		b->sizeInBlocks = allocBlocks;
304 		b->next = prev;
305 
306 		return b;
307 	}
308 
ValidateSceKernelVplHeader309 	inline void Validate() {
310 		auto lastBlock = LastBlock();
311 		_dbg_assert_msg_(nextFreeBlock_->next.ptr != SentinelPtr(), "Next free block should not be allocated.");
312 		_dbg_assert_msg_(nextFreeBlock_->next.ptr != sentinel_, "Next free block should not point to sentinel.");
313 		_dbg_assert_msg_(lastBlock->sizeInBlocks == 0, "Last block should have size of 0.");
314 		_dbg_assert_msg_(lastBlock->next.ptr != SentinelPtr(), "Last block should not be allocated.");
315 		_dbg_assert_msg_(lastBlock->next.ptr != sentinel_, "Last block should not point to sentinel.");
316 
317 		auto b = PSPPointer<SceKernelVplBlock>::Create(FirstBlockPtr());
318 		bool sawFirstFree = false;
319 		while (b.ptr < lastBlock.ptr) {
320 			bool isFree = b->next.ptr != SentinelPtr();
321 			if (isFree) {
322 				if (!sawFirstFree) {
323 					_dbg_assert_msg_(lastBlock->next.ptr == b.ptr, "Last block should point to first free block.");
324 					sawFirstFree = true;
325 				}
326 				_dbg_assert_msg_(b->next.ptr != SentinelPtr(), "Free blocks should only point to other free blocks.");
327 				_dbg_assert_msg_(b->next.ptr > b.ptr, "Free blocks should be in order.");
328 				_dbg_assert_msg_(b + b->sizeInBlocks < b->next || b->next.ptr == lastBlock.ptr, "Two free blocks should not be next to each other.");
329 			} else {
330 				_dbg_assert_msg_(b->next.ptr == SentinelPtr(), "Allocated blocks should point to the sentinel.");
331 			}
332 			_dbg_assert_msg_(b->sizeInBlocks != 0, "Only the last block should have a size of 0.");
333 			b += b->sizeInBlocks;
334 		}
335 		if (!sawFirstFree) {
336 			_dbg_assert_msg_(lastBlock->next.ptr == lastBlock.ptr, "Last block should point to itself when full.");
337 		}
338 		_dbg_assert_msg_(b.ptr == lastBlock.ptr, "Blocks should not extend outside vpl.");
339 	}
340 
ListBlocksSceKernelVplHeader341 	void ListBlocks() {
342 		auto b = PSPPointer<SceKernelVplBlock>::Create(FirstBlockPtr());
343 		auto lastBlock = LastBlock();
344 		while (b.ptr < lastBlock.ptr) {
345 			bool isFree = b->next.ptr != SentinelPtr();
346 			if (nextFreeBlock_ == b && isFree) {
347 				NOTICE_LOG(SCEKERNEL, "NEXT:  %x -> %x (size %x)", b.ptr - startPtr_, b->next.ptr - startPtr_, b->sizeInBlocks * 8);
348 			} else if (isFree) {
349 				NOTICE_LOG(SCEKERNEL, "FREE:  %x -> %x (size %x)", b.ptr - startPtr_, b->next.ptr - startPtr_, b->sizeInBlocks * 8);
350 			} else {
351 				NOTICE_LOG(SCEKERNEL, "BLOCK: %x (size %x)", b.ptr - startPtr_, b->sizeInBlocks * 8);
352 			}
353 			b += b->sizeInBlocks;
354 		}
355 		NOTICE_LOG(SCEKERNEL, "LAST:  %x -> %x (size %x)", lastBlock.ptr - startPtr_, lastBlock->next.ptr - startPtr_, lastBlock->sizeInBlocks * 8);
356 	}
357 
MergeBlocksSceKernelVplHeader358 	PSPPointer<SceKernelVplBlock> MergeBlocks(PSPPointer<SceKernelVplBlock> first, PSPPointer<SceKernelVplBlock> second) {
359 		first->sizeInBlocks += second->sizeInBlocks;
360 		first->next = second->next;
361 		return first;
362 	}
363 
FirstBlockPtrSceKernelVplHeader364 	u32 FirstBlockPtr() const {
365 		return startPtr_ + 0x18;
366 	}
367 
LastBlockPtrSceKernelVplHeader368 	u32 LastBlockPtr() const {
369 		return startPtr_ + sizeMinus8_;
370 	}
371 
LastBlockSceKernelVplHeader372 	PSPPointer<SceKernelVplBlock> LastBlock() {
373 		return PSPPointer<SceKernelVplBlock>::Create(LastBlockPtr());
374 	}
375 
SentinelPtrSceKernelVplHeader376 	u32 SentinelPtr() const {
377 		return startPtr_ + 8;
378 	}
379 };
380 
381 struct VPL : public KernelObject
382 {
GetNameVPL383 	const char *GetName() override { return nv.name; }
GetTypeNameVPL384 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNameVPL385 	static const char *GetStaticTypeName() { return "VPL"; }
GetMissingErrorCodeVPL386 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_VPLID; }
GetStaticIDTypeVPL387 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Vpl; }
GetIDTypeVPL388 	int GetIDType() const override { return SCE_KERNEL_TMID_Vpl; }
389 
VPLVPL390 	VPL() : alloc(8) {
391 		header = 0;
392 	}
393 
DoStateVPL394 	void DoState(PointerWrap &p) override {
395 		auto s = p.Section("VPL", 1, 2);
396 		if (!s) {
397 			return;
398 		}
399 
400 		Do(p, nv);
401 		Do(p, address);
402 		VplWaitingThread dv = {0};
403 		Do(p, waitingThreads, dv);
404 		alloc.DoState(p);
405 		Do(p, pausedWaits);
406 
407 		if (s >= 2) {
408 			Do(p, header);
409 		}
410 	}
411 
412 	SceKernelVplInfo nv;
413 	u32 address;
414 	std::vector<VplWaitingThread> waitingThreads;
415 	// Key is the callback id it was for, or if no callback, the thread id.
416 	std::map<SceUID, VplWaitingThread> pausedWaits;
417 	BlockAllocator alloc;
418 	PSPPointer<SceKernelVplHeader> header;
419 };
420 
421 void __KernelVplTimeout(u64 userdata, int cyclesLate);
422 void __KernelFplTimeout(u64 userdata, int cyclesLate);
423 void __KernelTlsplThreadEnd(SceUID threadID);
424 
425 void __KernelVplBeginCallback(SceUID threadID, SceUID prevCallbackId);
426 void __KernelVplEndCallback(SceUID threadID, SceUID prevCallbackId);
427 void __KernelFplBeginCallback(SceUID threadID, SceUID prevCallbackId);
428 void __KernelFplEndCallback(SceUID threadID, SceUID prevCallbackId);
429 
__KernelMemoryInit()430 void __KernelMemoryInit()
431 {
432 	MemBlockInfoInit();
433 	kernelMemory.Init(PSP_GetKernelMemoryBase(), PSP_GetKernelMemoryEnd() - PSP_GetKernelMemoryBase(), false);
434 	userMemory.Init(PSP_GetUserMemoryBase(), PSP_GetUserMemoryEnd() - PSP_GetUserMemoryBase(), false);
435 	ParallelMemset(&g_threadManager, Memory::GetPointer(PSP_GetKernelMemoryBase()), 0, PSP_GetUserMemoryEnd() - PSP_GetKernelMemoryBase());
436 	NotifyMemInfo(MemBlockFlags::WRITE, PSP_GetKernelMemoryBase(), PSP_GetUserMemoryEnd() - PSP_GetKernelMemoryBase(), "MemInit");
437 	INFO_LOG(SCEKERNEL, "Kernel and user memory pools initialized");
438 
439 	vplWaitTimer = CoreTiming::RegisterEvent("VplTimeout", __KernelVplTimeout);
440 	fplWaitTimer = CoreTiming::RegisterEvent("FplTimeout", __KernelFplTimeout);
441 
442 	flags_ = 0;
443 	sdkVersion_ = 0;
444 	compilerVersion_ = 0;
445 	memset(tlsplUsedIndexes, 0, sizeof(tlsplUsedIndexes));
446 
447 	__KernelListenThreadEnd(&__KernelTlsplThreadEnd);
448 
449 	__KernelRegisterWaitTypeFuncs(WAITTYPE_VPL, __KernelVplBeginCallback, __KernelVplEndCallback);
450 	__KernelRegisterWaitTypeFuncs(WAITTYPE_FPL, __KernelFplBeginCallback, __KernelFplEndCallback);
451 
452 	// The kernel statically allocates this memory, which has some code in it.
453 	// It appears this is used for some common funcs in Kernel_Library (memcpy, lwmutex, suspend intr, etc.)
454 	// Allocating this block is necessary to have the same memory semantics as real firmware.
455 	userMemory.AllocAt(PSP_GetUserMemoryBase(), 0x4000, "usersystemlib");
456 }
457 
__KernelMemoryDoState(PointerWrap & p)458 void __KernelMemoryDoState(PointerWrap &p)
459 {
460 	auto s = p.Section("sceKernelMemory", 1, 2);
461 	if (!s)
462 		return;
463 
464 	kernelMemory.DoState(p);
465 	userMemory.DoState(p);
466 
467 	Do(p, vplWaitTimer);
468 	CoreTiming::RestoreRegisterEvent(vplWaitTimer, "VplTimeout", __KernelVplTimeout);
469 	Do(p, fplWaitTimer);
470 	CoreTiming::RestoreRegisterEvent(fplWaitTimer, "FplTimeout", __KernelFplTimeout);
471 	Do(p, flags_);
472 	Do(p, sdkVersion_);
473 	Do(p, compilerVersion_);
474 	DoArray(p, tlsplUsedIndexes, ARRAY_SIZE(tlsplUsedIndexes));
475 	if (s >= 2) {
476 		Do(p, tlsplThreadEndChecks);
477 	}
478 
479 	MemBlockInfoDoState(p);
480 }
481 
__KernelMemoryShutdown()482 void __KernelMemoryShutdown()
483 {
484 #ifdef _DEBUG
485 	INFO_LOG(SCEKERNEL,"Shutting down user memory pool: ");
486 	userMemory.ListBlocks();
487 #endif
488 	userMemory.Shutdown();
489 #ifdef _DEBUG
490 	INFO_LOG(SCEKERNEL,"Shutting down \"kernel\" memory pool: ");
491 	kernelMemory.ListBlocks();
492 #endif
493 	kernelMemory.Shutdown();
494 	tlsplThreadEndChecks.clear();
495 	MemBlockInfoShutdown();
496 }
497 
498 enum SceKernelFplAttr
499 {
500 	PSP_FPL_ATTR_FIFO     = 0x0000,
501 	PSP_FPL_ATTR_PRIORITY = 0x0100,
502 	PSP_FPL_ATTR_HIGHMEM  = 0x4000,
503 	PSP_FPL_ATTR_KNOWN    = PSP_FPL_ATTR_FIFO | PSP_FPL_ATTR_PRIORITY | PSP_FPL_ATTR_HIGHMEM,
504 };
505 
__KernelUnlockFplForThread(FPL * fpl,FplWaitingThread & threadInfo,u32 & error,int result,bool & wokeThreads)506 static bool __KernelUnlockFplForThread(FPL *fpl, FplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads)
507 {
508 	const SceUID threadID = threadInfo.threadID;
509 	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_FPL, fpl->GetUID()))
510 		return true;
511 
512 	// If result is an error code, we're just letting it go.
513 	if (result == 0)
514 	{
515 		int blockNum = fpl->allocateBlock();
516 		if (blockNum >= 0)
517 		{
518 			u32 blockPtr = fpl->address + fpl->alignedSize * blockNum;
519 			Memory::Write_U32(blockPtr, threadInfo.addrPtr);
520 			NotifyMemInfo(MemBlockFlags::SUB_ALLOC, blockPtr, fpl->alignedSize, "FplAllocate");
521 		}
522 		else
523 			return false;
524 	}
525 
526 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
527 	if (timeoutPtr != 0 && fplWaitTimer != -1)
528 	{
529 		// Remove any event for this thread.
530 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(fplWaitTimer, threadID);
531 		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
532 	}
533 
534 	__KernelResumeThreadFromWait(threadID, result);
535 	wokeThreads = true;
536 	return true;
537 }
538 
__KernelFplBeginCallback(SceUID threadID,SceUID prevCallbackId)539 void __KernelFplBeginCallback(SceUID threadID, SceUID prevCallbackId)
540 {
541 	auto result = HLEKernel::WaitBeginCallback<FPL, WAITTYPE_FPL, FplWaitingThread>(threadID, prevCallbackId, fplWaitTimer);
542 	if (result == HLEKernel::WAIT_CB_SUCCESS)
543 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateFplCB: Suspending fpl wait for callback");
544 	else if (result == HLEKernel::WAIT_CB_BAD_WAIT_DATA)
545 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelAllocateFplCB: wait not found to pause for callback");
546 	else
547 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelAllocateFplCB: beginning callback with bad wait id?");
548 }
549 
__KernelFplEndCallback(SceUID threadID,SceUID prevCallbackId)550 void __KernelFplEndCallback(SceUID threadID, SceUID prevCallbackId)
551 {
552 	auto result = HLEKernel::WaitEndCallback<FPL, WAITTYPE_FPL, FplWaitingThread>(threadID, prevCallbackId, fplWaitTimer, __KernelUnlockFplForThread);
553 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
554 		DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMbxCB: Resuming mbx wait from callback");
555 }
556 
__FplThreadSortPriority(FplWaitingThread thread1,FplWaitingThread thread2)557 static bool __FplThreadSortPriority(FplWaitingThread thread1, FplWaitingThread thread2)
558 {
559 	return __KernelThreadSortPriority(thread1.threadID, thread2.threadID);
560 }
561 
__KernelClearFplThreads(FPL * fpl,int reason)562 static bool __KernelClearFplThreads(FPL *fpl, int reason)
563 {
564 	u32 error;
565 	bool wokeThreads = false;
566 	for (auto iter = fpl->waitingThreads.begin(), end = fpl->waitingThreads.end(); iter != end; ++iter)
567 		__KernelUnlockFplForThread(fpl, *iter, error, reason, wokeThreads);
568 	fpl->waitingThreads.clear();
569 
570 	return wokeThreads;
571 }
572 
__KernelSortFplThreads(FPL * fpl)573 static void __KernelSortFplThreads(FPL *fpl)
574 {
575 	// Remove any that are no longer waiting.
576 	SceUID uid = fpl->GetUID();
577 	HLEKernel::CleanupWaitingThreads(WAITTYPE_FPL, uid, fpl->waitingThreads);
578 
579 	if ((fpl->nf.attr & PSP_FPL_ATTR_PRIORITY) != 0)
580 		std::stable_sort(fpl->waitingThreads.begin(), fpl->waitingThreads.end(), __FplThreadSortPriority);
581 }
582 
sceKernelCreateFpl(const char * name,u32 mpid,u32 attr,u32 blockSize,u32 numBlocks,u32 optPtr)583 int sceKernelCreateFpl(const char *name, u32 mpid, u32 attr, u32 blockSize, u32 numBlocks, u32 optPtr)
584 {
585 	if (!name)
586 	{
587 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid name", SCE_KERNEL_ERROR_NO_MEMORY);
588 		return SCE_KERNEL_ERROR_NO_MEMORY;
589 	}
590 	if (mpid < 1 || mpid > 9 || mpid == 7)
591 	{
592 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT, mpid);
593 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
594 	}
595 	// We only support user right now.
596 	if (mpid != 2 && mpid != 6)
597 	{
598 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_PERM, mpid);
599 		return SCE_KERNEL_ERROR_ILLEGAL_PERM;
600 	}
601 	if (((attr & ~PSP_FPL_ATTR_KNOWN) & ~0xFF) != 0)
602 	{
603 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
604 		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
605 	}
606 	// There's probably a simpler way to get this same basic formula...
607 	// This is based on results from a PSP.
608 	bool illegalMemSize = blockSize == 0 || numBlocks == 0;
609 	if (!illegalMemSize && (u64) blockSize > ((0x100000000ULL / (u64) numBlocks) - 4ULL))
610 		illegalMemSize = true;
611 	if (!illegalMemSize && (u64) numBlocks >= 0x100000000ULL / (((u64) blockSize + 3ULL) & ~3ULL))
612 		illegalMemSize = true;
613 	if (illegalMemSize)
614 	{
615 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid blockSize/count", SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE);
616 		return SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE;
617 	}
618 
619 	int alignment = 4;
620 	if (optPtr != 0)
621 	{
622 		u32 size = Memory::Read_U32(optPtr);
623 		if (size > 8)
624 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateFpl(): unsupported extra options, size = %d", size);
625 		if (size >= 4)
626 			alignment = Memory::Read_U32(optPtr + 4);
627 		// Must be a power of 2 to be valid.
628 		if ((alignment & (alignment - 1)) != 0)
629 		{
630 			WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateFpl(): invalid alignment %d", SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT, alignment);
631 			return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
632 		}
633 	}
634 
635 	if (alignment < 4)
636 		alignment = 4;
637 
638 	int alignedSize = ((int)blockSize + alignment - 1) & ~(alignment - 1);
639 	u32 totalSize = alignedSize * numBlocks;
640 	bool atEnd = (attr & PSP_FPL_ATTR_HIGHMEM) != 0;
641 	u32 address = userMemory.Alloc(totalSize, atEnd, "FPL");
642 	if (address == (u32)-1)
643 	{
644 		DEBUG_LOG(SCEKERNEL, "sceKernelCreateFpl(\"%s\", partition=%i, attr=%08x, bsize=%i, nb=%i) FAILED - out of ram",
645 			name, mpid, attr, blockSize, numBlocks);
646 		return SCE_KERNEL_ERROR_NO_MEMORY;
647 	}
648 
649 	FPL *fpl = new FPL;
650 	SceUID id = kernelObjects.Create(fpl);
651 
652 	strncpy(fpl->nf.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
653 	fpl->nf.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
654 	fpl->nf.attr = attr;
655 	fpl->nf.size = sizeof(fpl->nf);
656 	fpl->nf.blocksize = blockSize;
657 	fpl->nf.numBlocks = numBlocks;
658 	fpl->nf.numFreeBlocks = numBlocks;
659 	fpl->nf.numWaitThreads = 0;
660 
661 	fpl->blocks = new bool[fpl->nf.numBlocks];
662 	memset(fpl->blocks, 0, fpl->nf.numBlocks * sizeof(bool));
663 	fpl->address = address;
664 	fpl->alignedSize = alignedSize;
665 
666 	DEBUG_LOG(SCEKERNEL, "%i=sceKernelCreateFpl(\"%s\", partition=%i, attr=%08x, bsize=%i, nb=%i)",
667 		id, name, mpid, attr, blockSize, numBlocks);
668 
669 	return id;
670 }
671 
sceKernelDeleteFpl(SceUID uid)672 int sceKernelDeleteFpl(SceUID uid)
673 {
674 	hleEatCycles(600);
675 	u32 error;
676 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
677 	if (fpl)
678 	{
679 		DEBUG_LOG(SCEKERNEL, "sceKernelDeleteFpl(%i)", uid);
680 
681 		bool wokeThreads = __KernelClearFplThreads(fpl, SCE_KERNEL_ERROR_WAIT_DELETE);
682 		if (wokeThreads)
683 			hleReSchedule("fpl deleted");
684 
685 		userMemory.Free(fpl->address);
686 		return kernelObjects.Destroy<FPL>(uid);
687 	}
688 	else
689 	{
690 		DEBUG_LOG(SCEKERNEL, "sceKernelDeleteFpl(%i): invalid fpl", uid);
691 		return error;
692 	}
693 }
694 
__KernelFplTimeout(u64 userdata,int cyclesLate)695 void __KernelFplTimeout(u64 userdata, int cyclesLate)
696 {
697 	SceUID threadID = (SceUID) userdata;
698 	HLEKernel::WaitExecTimeout<FPL, WAITTYPE_FPL>(threadID);
699 }
700 
__KernelSetFplTimeout(u32 timeoutPtr)701 static void __KernelSetFplTimeout(u32 timeoutPtr)
702 {
703 	if (timeoutPtr == 0 || fplWaitTimer == -1)
704 		return;
705 
706 	int micro = (int) Memory::Read_U32(timeoutPtr);
707 
708 	// TODO: test for fpls.
709 	// This happens to be how the hardware seems to time things.
710 	if (micro <= 5)
711 		micro = 20;
712 	// Yes, this 7 is reproducible.  6 is (a lot) longer than 7.
713 	else if (micro == 7)
714 		micro = 25;
715 	else if (micro <= 215)
716 		micro = 250;
717 
718 	CoreTiming::ScheduleEvent(usToCycles(micro), fplWaitTimer, __KernelGetCurThread());
719 }
720 
sceKernelAllocateFpl(SceUID uid,u32 blockPtrAddr,u32 timeoutPtr)721 int sceKernelAllocateFpl(SceUID uid, u32 blockPtrAddr, u32 timeoutPtr)
722 {
723 	u32 error;
724 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
725 	if (fpl)
726 	{
727 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateFpl(%i, %08x, %08x)", uid, blockPtrAddr, timeoutPtr);
728 
729 		int blockNum = fpl->allocateBlock();
730 		if (blockNum >= 0) {
731 			u32 blockPtr = fpl->address + fpl->alignedSize * blockNum;
732 			Memory::Write_U32(blockPtr, blockPtrAddr);
733 			NotifyMemInfo(MemBlockFlags::SUB_ALLOC, blockPtr, fpl->alignedSize, "FplAllocate");
734 		} else {
735 			SceUID threadID = __KernelGetCurThread();
736 			HLEKernel::RemoveWaitingThread(fpl->waitingThreads, threadID);
737 			FplWaitingThread waiting = {threadID, blockPtrAddr};
738 			fpl->waitingThreads.push_back(waiting);
739 
740 			__KernelSetFplTimeout(timeoutPtr);
741 			__KernelWaitCurThread(WAITTYPE_FPL, uid, 0, timeoutPtr, false, "fpl waited");
742 		}
743 
744 		return 0;
745 	}
746 	else
747 	{
748 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateFpl(%i, %08x, %08x): invalid fpl", uid, blockPtrAddr, timeoutPtr);
749 		return error;
750 	}
751 }
752 
sceKernelAllocateFplCB(SceUID uid,u32 blockPtrAddr,u32 timeoutPtr)753 int sceKernelAllocateFplCB(SceUID uid, u32 blockPtrAddr, u32 timeoutPtr)
754 {
755 	u32 error;
756 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
757 	if (fpl)
758 	{
759 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateFplCB(%i, %08x, %08x)", uid, blockPtrAddr, timeoutPtr);
760 
761 		int blockNum = fpl->allocateBlock();
762 		if (blockNum >= 0) {
763 			u32 blockPtr = fpl->address + fpl->alignedSize * blockNum;
764 			Memory::Write_U32(blockPtr, blockPtrAddr);
765 			NotifyMemInfo(MemBlockFlags::SUB_ALLOC, blockPtr, fpl->alignedSize, "FplAllocate");
766 		} else {
767 			SceUID threadID = __KernelGetCurThread();
768 			HLEKernel::RemoveWaitingThread(fpl->waitingThreads, threadID);
769 			FplWaitingThread waiting = {threadID, blockPtrAddr};
770 			fpl->waitingThreads.push_back(waiting);
771 
772 			__KernelSetFplTimeout(timeoutPtr);
773 			__KernelWaitCurThread(WAITTYPE_FPL, uid, 0, timeoutPtr, true, "fpl waited");
774 		}
775 
776 		return 0;
777 	}
778 	else
779 	{
780 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateFplCB(%i, %08x, %08x): invalid fpl", uid, blockPtrAddr, timeoutPtr);
781 		return error;
782 	}
783 }
784 
sceKernelTryAllocateFpl(SceUID uid,u32 blockPtrAddr)785 int sceKernelTryAllocateFpl(SceUID uid, u32 blockPtrAddr)
786 {
787 	u32 error;
788 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
789 	if (fpl)
790 	{
791 		DEBUG_LOG(SCEKERNEL, "sceKernelTryAllocateFpl(%i, %08x)", uid, blockPtrAddr);
792 
793 		int blockNum = fpl->allocateBlock();
794 		if (blockNum >= 0) {
795 			u32 blockPtr = fpl->address + fpl->alignedSize * blockNum;
796 			Memory::Write_U32(blockPtr, blockPtrAddr);
797 			NotifyMemInfo(MemBlockFlags::SUB_ALLOC, blockPtr, fpl->alignedSize, "FplAllocate");
798 			return 0;
799 		} else {
800 			return SCE_KERNEL_ERROR_NO_MEMORY;
801 		}
802 	}
803 	else
804 	{
805 		DEBUG_LOG(SCEKERNEL, "sceKernelTryAllocateFpl(%i, %08x): invalid fpl", uid, blockPtrAddr);
806 		return error;
807 	}
808 }
809 
sceKernelFreeFpl(SceUID uid,u32 blockPtr)810 int sceKernelFreeFpl(SceUID uid, u32 blockPtr)
811 {
812 	if (blockPtr > PSP_GetUserMemoryEnd()) {
813 		WARN_LOG(SCEKERNEL, "%08x=sceKernelFreeFpl(%i, %08x): invalid address", SCE_KERNEL_ERROR_ILLEGAL_ADDR, uid, blockPtr);
814 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
815 	}
816 
817 	u32 error;
818 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
819 	if (fpl) {
820 		int blockNum = (blockPtr - fpl->address) / fpl->alignedSize;
821 		if (blockNum < 0 || blockNum >= fpl->nf.numBlocks) {
822 			DEBUG_LOG(SCEKERNEL, "sceKernelFreeFpl(%i, %08x): bad block ptr", uid, blockPtr);
823 			return SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCK;
824 		} else {
825 			if (fpl->freeBlock(blockNum)) {
826 				u32 blockPtr = fpl->address + fpl->alignedSize * blockNum;
827 				NotifyMemInfo(MemBlockFlags::SUB_FREE, blockPtr, fpl->alignedSize, "FplFree");
828 
829 				DEBUG_LOG(SCEKERNEL, "sceKernelFreeFpl(%i, %08x)", uid, blockPtr);
830 				__KernelSortFplThreads(fpl);
831 
832 				bool wokeThreads = false;
833 retry:
834 				for (auto iter = fpl->waitingThreads.begin(), end = fpl->waitingThreads.end(); iter != end; ++iter)
835 				{
836 					if (__KernelUnlockFplForThread(fpl, *iter, error, 0, wokeThreads))
837 					{
838 						fpl->waitingThreads.erase(iter);
839 						goto retry;
840 					}
841 				}
842 
843 				if (wokeThreads)
844 					hleReSchedule("fpl freed");
845 				return 0;
846 			} else {
847 				DEBUG_LOG(SCEKERNEL, "sceKernelFreeFpl(%i, %08x): already free", uid, blockPtr);
848 				return SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCK;
849 			}
850 		}
851 	}
852 	else
853 	{
854 		DEBUG_LOG(SCEKERNEL, "sceKernelFreeFpl(%i, %08x): invalid fpl", uid, blockPtr);
855 		return error;
856 	}
857 }
858 
sceKernelCancelFpl(SceUID uid,u32 numWaitThreadsPtr)859 int sceKernelCancelFpl(SceUID uid, u32 numWaitThreadsPtr)
860 {
861 	hleEatCycles(600);
862 
863 	u32 error;
864 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
865 	if (fpl)
866 	{
867 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelFpl(%i, %08x)", uid, numWaitThreadsPtr);
868 		fpl->nf.numWaitThreads = (int) fpl->waitingThreads.size();
869 		if (Memory::IsValidAddress(numWaitThreadsPtr))
870 			Memory::Write_U32(fpl->nf.numWaitThreads, numWaitThreadsPtr);
871 
872 		bool wokeThreads = __KernelClearFplThreads(fpl, SCE_KERNEL_ERROR_WAIT_CANCEL);
873 		if (wokeThreads)
874 			hleReSchedule("fpl canceled");
875 		return 0;
876 	}
877 	else
878 	{
879 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelFpl(%i, %08x): invalid fpl", uid, numWaitThreadsPtr);
880 		return error;
881 	}
882 }
883 
sceKernelReferFplStatus(SceUID uid,u32 statusPtr)884 int sceKernelReferFplStatus(SceUID uid, u32 statusPtr)
885 {
886 	u32 error;
887 	FPL *fpl = kernelObjects.Get<FPL>(uid, error);
888 	if (fpl)
889 	{
890 		DEBUG_LOG(SCEKERNEL, "sceKernelReferFplStatus(%i, %08x)", uid, statusPtr);
891 		// Refresh waiting threads and free block count.
892 		__KernelSortFplThreads(fpl);
893 		fpl->nf.numWaitThreads = (int) fpl->waitingThreads.size();
894 		fpl->nf.numFreeBlocks = 0;
895 		for (int i = 0; i < (int)fpl->nf.numBlocks; ++i)
896 		{
897 			if (!fpl->blocks[i])
898 				++fpl->nf.numFreeBlocks;
899 		}
900 		if (Memory::Read_U32(statusPtr) != 0)
901 			Memory::WriteStruct(statusPtr, &fpl->nf);
902 		return 0;
903 	}
904 	else
905 	{
906 		DEBUG_LOG(SCEKERNEL, "sceKernelReferFplStatus(%i, %08x): invalid fpl", uid, statusPtr);
907 		return error;
908 	}
909 }
910 
911 
912 
913 //////////////////////////////////////////////////////////////////////////
914 // ALLOCATIONS
915 //////////////////////////////////////////////////////////////////////////
916 //00:49:12 <TyRaNiD> ector, well the partitions are 1 = kernel, 2 = user, 3 = me, 4 = kernel mirror :)
917 
918 class PartitionMemoryBlock : public KernelObject
919 {
920 public:
GetName()921 	const char *GetName() override { return name; }
GetTypeName()922 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeName()923 	static const char *GetStaticTypeName() { return "MemoryPart"; }
GetQuickInfo(char * ptr,int size)924 	void GetQuickInfo(char *ptr, int size) override
925 	{
926 		int sz = alloc->GetBlockSizeFromAddress(address);
927 		snprintf(ptr, size, "MemPart: %08x - %08x	size: %08x", address, address + sz, sz);
928 	}
GetMissingErrorCode()929 	static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_UID; }
GetStaticIDType()930 	static int GetStaticIDType() { return PPSSPP_KERNEL_TMID_PMB; }
GetIDType() const931 	int GetIDType() const override { return PPSSPP_KERNEL_TMID_PMB; }
932 
PartitionMemoryBlock(BlockAllocator * _alloc,const char * _name,u32 size,MemblockType type,u32 alignment)933 	PartitionMemoryBlock(BlockAllocator *_alloc, const char *_name, u32 size, MemblockType type, u32 alignment)
934 	{
935 		alloc = _alloc;
936 		strncpy(name, _name, 32);
937 		name[31] = '\0';
938 
939 		// 0 is used for save states to wake up.
940 		if (size != 0)
941 		{
942 			if (type == PSP_SMEM_Addr)
943 			{
944 				alignment &= ~0xFF;
945 				address = alloc->AllocAt(alignment, size, name);
946 			}
947 			else if (type == PSP_SMEM_LowAligned || type == PSP_SMEM_HighAligned)
948 				address = alloc->AllocAligned(size, 0x100, alignment, type == PSP_SMEM_HighAligned, name);
949 			else
950 				address = alloc->Alloc(size, type == PSP_SMEM_High, name);
951 #ifdef _DEBUG
952 			alloc->ListBlocks();
953 #endif
954 		}
955 	}
~PartitionMemoryBlock()956 	~PartitionMemoryBlock()
957 	{
958 		if (address != (u32)-1)
959 			alloc->Free(address);
960 	}
IsValid()961 	bool IsValid() {return address != (u32)-1;}
962 	BlockAllocator *alloc;
963 
DoState(PointerWrap & p)964 	void DoState(PointerWrap &p) override
965 	{
966 		auto s = p.Section("PMB", 1);
967 		if (!s)
968 			return;
969 
970 		Do(p, address);
971 		DoArray(p, name, sizeof(name));
972 	}
973 
974 	u32 address;
975 	char name[32];
976 };
977 
978 
sceKernelMaxFreeMemSize()979 static u32 sceKernelMaxFreeMemSize()
980 {
981 	u32 retVal = userMemory.GetLargestFreeBlockSize();
982 	DEBUG_LOG(SCEKERNEL, "%08x (dec %i)=sceKernelMaxFreeMemSize()", retVal, retVal);
983 	return retVal;
984 }
985 
sceKernelTotalFreeMemSize()986 static u32 sceKernelTotalFreeMemSize()
987 {
988 	u32 retVal = userMemory.GetTotalFreeBytes();
989 	DEBUG_LOG(SCEKERNEL, "%08x (dec %i)=sceKernelTotalFreeMemSize()", retVal, retVal);
990 	return retVal;
991 }
992 
sceKernelAllocPartitionMemory(int partition,const char * name,int type,u32 size,u32 addr)993 int sceKernelAllocPartitionMemory(int partition, const char *name, int type, u32 size, u32 addr)
994 {
995 	if (name == NULL)
996 	{
997 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid name", SCE_KERNEL_ERROR_ERROR);
998 		return SCE_KERNEL_ERROR_ERROR;
999 	}
1000 	if (size == 0)
1001 	{
1002 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid size %x", SCE_KERNEL_ERROR_MEMBLOCK_ALLOC_FAILED, size);
1003 		return SCE_KERNEL_ERROR_MEMBLOCK_ALLOC_FAILED;
1004 	}
1005 	if (partition < 1 || partition > 9 || partition == 7)
1006 	{
1007 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid partition %x", SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT, partition);
1008 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1009 	}
1010 	// We only support user right now.
1011 	if (partition != 2 && partition != 5 && partition != 6)
1012 	{
1013 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid partition %x", SCE_KERNEL_ERROR_ILLEGAL_PARTITION, partition);
1014 		return SCE_KERNEL_ERROR_ILLEGAL_PARTITION;
1015 	}
1016 	if (type < PSP_SMEM_Low || type > PSP_SMEM_HighAligned)
1017 	{
1018 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid type %x", SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCKTYPE, type);
1019 		return SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCKTYPE;
1020 	}
1021 	// Alignment is only allowed for powers of 2.
1022 	if ((type == PSP_SMEM_LowAligned || type == PSP_SMEM_HighAligned) && ((addr & (addr - 1)) != 0 || addr == 0))
1023 	{
1024 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelAllocPartitionMemory(): invalid alignment %x", SCE_KERNEL_ERROR_ILLEGAL_ALIGNMENT_SIZE, addr);
1025 		return SCE_KERNEL_ERROR_ILLEGAL_ALIGNMENT_SIZE;
1026 	}
1027 
1028 	PartitionMemoryBlock *block = new PartitionMemoryBlock(&userMemory, name, size, (MemblockType)type, addr);
1029 	if (!block->IsValid())
1030 	{
1031 		delete block;
1032 		ERROR_LOG(SCEKERNEL, "sceKernelAllocPartitionMemory(partition = %i, %s, type= %i, size= %i, addr= %08x): allocation failed", partition, name, type, size, addr);
1033 		return SCE_KERNEL_ERROR_MEMBLOCK_ALLOC_FAILED;
1034 	}
1035 	SceUID uid = kernelObjects.Create(block);
1036 
1037 	DEBUG_LOG(SCEKERNEL,"%i = sceKernelAllocPartitionMemory(partition = %i, %s, type= %i, size= %i, addr= %08x)",
1038 		uid, partition, name, type, size, addr);
1039 
1040 	return uid;
1041 }
1042 
sceKernelFreePartitionMemory(SceUID id)1043 int sceKernelFreePartitionMemory(SceUID id)
1044 {
1045 	DEBUG_LOG(SCEKERNEL,"sceKernelFreePartitionMemory(%d)",id);
1046 
1047 	return kernelObjects.Destroy<PartitionMemoryBlock>(id);
1048 }
1049 
sceKernelGetBlockHeadAddr(SceUID id)1050 u32 sceKernelGetBlockHeadAddr(SceUID id)
1051 {
1052 	u32 error;
1053 	PartitionMemoryBlock *block = kernelObjects.Get<PartitionMemoryBlock>(id, error);
1054 	if (block)
1055 	{
1056 		DEBUG_LOG(SCEKERNEL,"%08x = sceKernelGetBlockHeadAddr(%i)", block->address, id);
1057 		return block->address;
1058 	}
1059 	else
1060 	{
1061 		ERROR_LOG(SCEKERNEL,"sceKernelGetBlockHeadAddr failed(%i)", id);
1062 		return 0;
1063 	}
1064 }
1065 
1066 
sceKernelPrintf(const char * formatString)1067 static int sceKernelPrintf(const char *formatString)
1068 {
1069 	if (formatString == NULL)
1070 		return -1;
1071 
1072 	bool supported = true;
1073 	int param = 1;
1074 	char tempStr[24];
1075 	char tempFormat[24] = {'%'};
1076 	std::string result, format = formatString;
1077 
1078 	// Each printf is a separate line already in the log, so don't double space.
1079 	// This does mean we break up strings, unfortunately.
1080 	if (!format.empty() && format[format.size() - 1] == '\n')
1081 		format.resize(format.size() - 1);
1082 
1083 	for (size_t i = 0, n = format.size(); supported && i < n; )
1084 	{
1085 		size_t next = format.find('%', i);
1086 		if (next == format.npos)
1087 		{
1088 			result += format.substr(i);
1089 			break;
1090 		}
1091 		else if (next != i)
1092 			result += format.substr(i, next - i);
1093 
1094 		i = next + 1;
1095 		if (i >= n)
1096 		{
1097 			supported = false;
1098 			break;
1099 		}
1100 
1101 		const char *s;
1102 		switch (format[i])
1103 		{
1104 		case '%':
1105 			result += '%';
1106 			++i;
1107 			break;
1108 
1109 		case 's':
1110 			s = Memory::GetCharPointer(PARAM(param++));
1111 			result += s ? s : "(null)";
1112 			++i;
1113 			break;
1114 
1115 		case 'd':
1116 		case 'i':
1117 		case 'x':
1118 		case 'X':
1119 		case 'u':
1120 			tempFormat[1] = format[i];
1121 			tempFormat[2] = '\0';
1122 			snprintf(tempStr, sizeof(tempStr), tempFormat, PARAM(param++));
1123 			result += tempStr;
1124 			++i;
1125 			break;
1126 
1127 		case '0':
1128 			if (i + 3 > n || format[i + 1] != '8' || (format[i + 2] != 'x' && format[i + 2] != 'X'))
1129 				supported = false;
1130 			else
1131 			{
1132 				// These are the '0', '8', and 'x' or 'X' respectively.
1133 				tempFormat[1] = format[i];
1134 				tempFormat[2] = format[i + 1];
1135 				tempFormat[3] = format[i + 2];
1136 				tempFormat[4] = '\0';
1137 				snprintf(tempStr, sizeof(tempStr), tempFormat, PARAM(param++));
1138 				result += tempStr;
1139 				i += 3;
1140 			}
1141 			break;
1142 
1143 		case 'p':
1144 			snprintf(tempStr, sizeof(tempStr), "%08x", PARAM(param++));
1145 			result += tempStr;
1146 			++i;
1147 			break;
1148 
1149 		default:
1150 			supported = false;
1151 			break;
1152 		}
1153 
1154 		if (param > 6)
1155 			supported = false;
1156 	}
1157 
1158 	// Scrub for beeps and other suspicious control characters.
1159 	for (size_t i = 0; i < result.size(); i++) {
1160 		switch (result[i]) {
1161 		case 7:  // BEL
1162 		case 8:  // Backspace
1163 			result[i] = ' ';
1164 			break;
1165 		}
1166 	}
1167 
1168 	// Just in case there were embedded strings that had \n's.
1169 	if (!result.empty() && result[result.size() - 1] == '\n')
1170 		result.resize(result.size() - 1);
1171 
1172 	if (supported)
1173 		INFO_LOG(SCEKERNEL, "sceKernelPrintf: %s", result.c_str());
1174 	else
1175 		ERROR_LOG(SCEKERNEL, "UNIMPL sceKernelPrintf(%s, %08x, %08x, %08x)", format.c_str(), PARAM(1), PARAM(2), PARAM(3));
1176 	return 0;
1177 }
1178 
sceKernelSetCompiledSdkVersion(int sdkVersion)1179 static int sceKernelSetCompiledSdkVersion(int sdkVersion) {
1180 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1181 	bool validSDK = false;
1182 	switch (sdkMainVersion) {
1183 	case 0x01000000:
1184 	case 0x01050000:
1185 	case 0x02000000:
1186 	case 0x02050000:
1187 	case 0x02060000:
1188 	case 0x02070000:
1189 	case 0x02080000:
1190 	case 0x03000000:
1191 	case 0x03010000:
1192 	case 0x03030000:
1193 	case 0x03040000:
1194 	case 0x03050000:
1195 	case 0x03060000:
1196 		validSDK = true;
1197 		break;
1198 	default:
1199 		validSDK = false;
1200 		break;
1201 	}
1202 
1203 	if (!validSDK) {
1204 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion unknown SDK: %x", sdkVersion);
1205 	}
1206 
1207 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion(%08x)", sdkVersion);
1208 	sdkVersion_ = sdkVersion;
1209 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1210 	return 0;
1211 }
1212 
sceKernelSetCompiledSdkVersion370(int sdkVersion)1213 static int sceKernelSetCompiledSdkVersion370(int sdkVersion) {
1214 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1215 	if (sdkMainVersion != 0x03070000) {
1216 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion370 unknown SDK: %x", sdkVersion);
1217 	}
1218 
1219 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion370(%08x)", sdkVersion);
1220 	sdkVersion_ = sdkVersion;
1221 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1222 	return 0;
1223 }
1224 
sceKernelSetCompiledSdkVersion380_390(int sdkVersion)1225 static int sceKernelSetCompiledSdkVersion380_390(int sdkVersion) {
1226 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1227 	if (sdkMainVersion != 0x03080000 && sdkMainVersion != 0x03090000) {
1228 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion380_390 unknown SDK: %x", sdkVersion);
1229 		sdkVersion_ = sdkVersion;
1230 		flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1231 	}
1232 
1233 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion380_390(%08x)", sdkVersion);
1234 	sdkVersion_ = sdkVersion;
1235 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1236 	return 0;
1237 }
1238 
sceKernelSetCompiledSdkVersion395(int sdkVersion)1239 static int sceKernelSetCompiledSdkVersion395(int sdkVersion) {
1240 	int sdkMainVersion = sdkVersion & 0xFFFFFF00;
1241 	if (sdkMainVersion != 0x04000000
1242 			&& sdkMainVersion != 0x04000100
1243 			&& sdkMainVersion != 0x04000500
1244 			&& sdkMainVersion != 0x03090500
1245 			&& sdkMainVersion != 0x03090600) {
1246 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion395 unknown SDK: %x", sdkVersion);
1247 	}
1248 
1249 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion395(%08x)", sdkVersion);
1250 	sdkVersion_ = sdkVersion;
1251 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1252 	return 0;
1253 }
1254 
sceKernelSetCompiledSdkVersion600_602(int sdkVersion)1255 static int sceKernelSetCompiledSdkVersion600_602(int sdkVersion) {
1256 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1257 	if (sdkMainVersion != 0x06010000
1258 			&& sdkMainVersion != 0x06000000
1259 			&& sdkMainVersion != 0x06020000) {
1260 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion600_602 unknown SDK: %x", sdkVersion);
1261 	}
1262 
1263 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion600_602(%08x)", sdkVersion);
1264 	sdkVersion_ = sdkVersion;
1265 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1266 	return 0;
1267 }
1268 
sceKernelSetCompiledSdkVersion500_505(int sdkVersion)1269 static int sceKernelSetCompiledSdkVersion500_505(int sdkVersion)
1270 {
1271 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1272 	if (sdkMainVersion != 0x05000000
1273 			&& sdkMainVersion != 0x05050000) {
1274 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion500_505 unknown SDK: %x", sdkVersion);
1275 	}
1276 
1277 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion500_505(%08x)", sdkVersion);
1278 	sdkVersion_ = sdkVersion;
1279 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1280 	return 0;
1281 }
1282 
sceKernelSetCompiledSdkVersion401_402(int sdkVersion)1283 static int sceKernelSetCompiledSdkVersion401_402(int sdkVersion) {
1284 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1285 	if (sdkMainVersion != 0x04010000
1286 			&& sdkMainVersion != 0x04020000) {
1287 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion401_402 unknown SDK: %x", sdkVersion);
1288 	}
1289 
1290 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion401_402(%08x)", sdkVersion);
1291 	sdkVersion_ = sdkVersion;
1292 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1293 	return 0;
1294 }
1295 
sceKernelSetCompiledSdkVersion507(int sdkVersion)1296 static int sceKernelSetCompiledSdkVersion507(int sdkVersion) {
1297 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1298 	if (sdkMainVersion != 0x05070000) {
1299 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion507 unknown SDK: %x", sdkVersion);
1300 	}
1301 
1302 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion507(%08x)", sdkVersion);
1303 	sdkVersion_ = sdkVersion;
1304 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1305 	return 0;
1306 }
1307 
sceKernelSetCompiledSdkVersion603_605(int sdkVersion)1308 static int sceKernelSetCompiledSdkVersion603_605(int sdkVersion) {
1309 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1310 	if (sdkMainVersion != 0x06040000
1311 			&& sdkMainVersion != 0x06030000
1312 			&& sdkMainVersion != 0x06050000) {
1313 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion603_605 unknown SDK: %x", sdkVersion);
1314 	}
1315 
1316 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion603_605(%08x)", sdkVersion);
1317 	sdkVersion_ = sdkVersion;
1318 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1319 	return 0;
1320 }
1321 
sceKernelSetCompiledSdkVersion606(int sdkVersion)1322 static int sceKernelSetCompiledSdkVersion606(int sdkVersion) {
1323 	int sdkMainVersion = sdkVersion & 0xFFFF0000;
1324 	if (sdkMainVersion != 0x06060000) {
1325 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelSetCompiledSdkVersion606 unknown SDK: %x (would crash)", sdkVersion);
1326 	}
1327 
1328 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompiledSdkVersion606(%08x)", sdkVersion);
1329 	sdkVersion_ = sdkVersion;
1330 	flags_ |=  SCE_KERNEL_HASCOMPILEDSDKVERSION;
1331 	return 0;
1332 }
1333 
sceKernelGetCompiledSdkVersion()1334 int sceKernelGetCompiledSdkVersion() {
1335 	if (!(flags_ & SCE_KERNEL_HASCOMPILEDSDKVERSION))
1336 		return 0;
1337 	return sdkVersion_;
1338 }
1339 
sceKernelSetCompilerVersion(int version)1340 static int sceKernelSetCompilerVersion(int version) {
1341 	DEBUG_LOG(SCEKERNEL, "sceKernelSetCompilerVersion(%08x)", version);
1342 	compilerVersion_ = version;
1343 	flags_ |= SCE_KERNEL_HASCOMPILERVERSION;
1344 	return 0;
1345 }
1346 
__KernelMemoryFPLObject()1347 KernelObject *__KernelMemoryFPLObject()
1348 {
1349 	return new FPL;
1350 }
1351 
__KernelMemoryVPLObject()1352 KernelObject *__KernelMemoryVPLObject()
1353 {
1354 	return new VPL;
1355 }
1356 
__KernelMemoryPMBObject()1357 KernelObject *__KernelMemoryPMBObject()
1358 {
1359 	// TODO: We could theoretically handle kernelMemory too, but we don't support that now anyway.
1360 	return new PartitionMemoryBlock(&userMemory, "", 0, PSP_SMEM_Low, 0);
1361 }
1362 
1363 // VPL = variable length memory pool
1364 
1365 enum SceKernelVplAttr
1366 {
1367 	PSP_VPL_ATTR_FIFO       = 0x0000,
1368 	PSP_VPL_ATTR_PRIORITY   = 0x0100,
1369 	PSP_VPL_ATTR_SMALLEST   = 0x0200,
1370 	PSP_VPL_ATTR_MASK_ORDER = 0x0300,
1371 
1372 	PSP_VPL_ATTR_HIGHMEM    = 0x4000,
1373 	PSP_VPL_ATTR_KNOWN      = PSP_VPL_ATTR_FIFO | PSP_VPL_ATTR_PRIORITY | PSP_VPL_ATTR_SMALLEST | PSP_VPL_ATTR_HIGHMEM,
1374 };
1375 
__KernelUnlockVplForThread(VPL * vpl,VplWaitingThread & threadInfo,u32 & error,int result,bool & wokeThreads)1376 static bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &error, int result, bool &wokeThreads) {
1377 	const SceUID threadID = threadInfo.threadID;
1378 	if (!HLEKernel::VerifyWait(threadID, WAITTYPE_VPL, vpl->GetUID())) {
1379 		return true;
1380 	}
1381 
1382 	// If result is an error code, we're just letting it go.
1383 	if (result == 0) {
1384 		int size = (int) __KernelGetWaitValue(threadID, error);
1385 
1386 		// An older savestate may have an invalid header, use the block allocator in that case.
1387 		u32 addr;
1388 		if (vpl->header.IsValid()) {
1389 			addr = vpl->header->Allocate(size);
1390 		} else {
1391 			// Padding (normally used to track the allocation.)
1392 			u32 allocSize = size + 8;
1393 			addr = vpl->alloc.Alloc(allocSize, true);
1394 		}
1395 		if (addr != (u32) -1) {
1396 			Memory::Write_U32(addr, threadInfo.addrPtr);
1397 		} else {
1398 			return false;
1399 		}
1400 	}
1401 
1402 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
1403 	if (timeoutPtr != 0 && vplWaitTimer != -1) {
1404 		// Remove any event for this thread.
1405 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(vplWaitTimer, threadID);
1406 		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
1407 	}
1408 
1409 	__KernelResumeThreadFromWait(threadID, result);
1410 	wokeThreads = true;
1411 	return true;
1412 }
1413 
__KernelVplBeginCallback(SceUID threadID,SceUID prevCallbackId)1414 void __KernelVplBeginCallback(SceUID threadID, SceUID prevCallbackId)
1415 {
1416 	auto result = HLEKernel::WaitBeginCallback<VPL, WAITTYPE_VPL, VplWaitingThread>(threadID, prevCallbackId, vplWaitTimer);
1417 	if (result == HLEKernel::WAIT_CB_SUCCESS)
1418 		DEBUG_LOG(SCEKERNEL, "sceKernelAllocateVplCB: Suspending vpl wait for callback");
1419 	else if (result == HLEKernel::WAIT_CB_BAD_WAIT_DATA)
1420 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelAllocateVplCB: wait not found to pause for callback");
1421 	else
1422 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelAllocateVplCB: beginning callback with bad wait id?");
1423 }
1424 
__KernelVplEndCallback(SceUID threadID,SceUID prevCallbackId)1425 void __KernelVplEndCallback(SceUID threadID, SceUID prevCallbackId)
1426 {
1427 	auto result = HLEKernel::WaitEndCallback<VPL, WAITTYPE_VPL, VplWaitingThread>(threadID, prevCallbackId, vplWaitTimer, __KernelUnlockVplForThread);
1428 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
1429 		DEBUG_LOG(SCEKERNEL, "sceKernelReceiveMbxCB: Resuming mbx wait from callback");
1430 }
1431 
__VplThreadSortPriority(VplWaitingThread thread1,VplWaitingThread thread2)1432 static bool __VplThreadSortPriority(VplWaitingThread thread1, VplWaitingThread thread2)
1433 {
1434 	return __KernelThreadSortPriority(thread1.threadID, thread2.threadID);
1435 }
1436 
__KernelClearVplThreads(VPL * vpl,int reason)1437 static bool __KernelClearVplThreads(VPL *vpl, int reason)
1438 {
1439 	u32 error;
1440 	bool wokeThreads = false;
1441 	for (auto iter = vpl->waitingThreads.begin(), end = vpl->waitingThreads.end(); iter != end; ++iter)
1442 		__KernelUnlockVplForThread(vpl, *iter, error, reason, wokeThreads);
1443 	vpl->waitingThreads.clear();
1444 
1445 	return wokeThreads;
1446 }
1447 
__KernelSortVplThreads(VPL * vpl)1448 static void __KernelSortVplThreads(VPL *vpl)
1449 {
1450 	// Remove any that are no longer waiting.
1451 	SceUID uid = vpl->GetUID();
1452 	HLEKernel::CleanupWaitingThreads(WAITTYPE_VPL, uid, vpl->waitingThreads);
1453 
1454 	if ((vpl->nv.attr & PSP_VPL_ATTR_PRIORITY) != 0)
1455 		std::stable_sort(vpl->waitingThreads.begin(), vpl->waitingThreads.end(), __VplThreadSortPriority);
1456 }
1457 
sceKernelCreateVpl(const char * name,int partition,u32 attr,u32 vplSize,u32 optPtr)1458 SceUID sceKernelCreateVpl(const char *name, int partition, u32 attr, u32 vplSize, u32 optPtr)
1459 {
1460 	if (!name)
1461 	{
1462 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): invalid name", SCE_KERNEL_ERROR_ERROR);
1463 		return SCE_KERNEL_ERROR_ERROR;
1464 	}
1465 	if (partition < 1 || partition > 9 || partition == 7)
1466 	{
1467 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT, partition);
1468 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1469 	}
1470 	// We only support user right now.
1471 	if (partition != 2 && partition != 6)
1472 	{
1473 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_PERM, partition);
1474 		return SCE_KERNEL_ERROR_ILLEGAL_PERM;
1475 	}
1476 	if (((attr & ~PSP_VPL_ATTR_KNOWN) & ~0xFF) != 0)
1477 	{
1478 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
1479 		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
1480 	}
1481 	if (vplSize == 0)
1482 	{
1483 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): invalid size", SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE);
1484 		return SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE;
1485 	}
1486 	// Block Allocator seems to A-OK this, let's stop it here.
1487 	if (vplSize >= 0x80000000)
1488 	{
1489 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateVpl(): way too big size", SCE_KERNEL_ERROR_NO_MEMORY);
1490 		return SCE_KERNEL_ERROR_NO_MEMORY;
1491 	}
1492 
1493 	// Can't have that little space in a Vpl, sorry.
1494 	if (vplSize <= 0x30)
1495 		vplSize = 0x1000;
1496 	vplSize = (vplSize + 7) & ~7;
1497 
1498 	// We ignore the upalign to 256 and do it ourselves by 8.
1499 	u32 allocSize = vplSize;
1500 	u32 memBlockPtr = userMemory.Alloc(allocSize, (attr & PSP_VPL_ATTR_HIGHMEM) != 0, "VPL");
1501 	if (memBlockPtr == (u32)-1)
1502 	{
1503 		ERROR_LOG(SCEKERNEL, "sceKernelCreateVpl(): Failed to allocate %i bytes of pool data", vplSize);
1504 		return SCE_KERNEL_ERROR_NO_MEMORY;
1505 	}
1506 
1507 	VPL *vpl = new VPL;
1508 	SceUID id = kernelObjects.Create(vpl);
1509 
1510 	strncpy(vpl->nv.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
1511 	vpl->nv.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
1512 	vpl->nv.attr = attr;
1513 	vpl->nv.size = sizeof(vpl->nv);
1514 	vpl->nv.poolSize = vplSize - 0x20;
1515 	vpl->nv.numWaitThreads = 0;
1516 	vpl->nv.freeSize = vpl->nv.poolSize;
1517 
1518 	// A vpl normally has accounting stuff in the first 32 bytes.
1519 	vpl->address = memBlockPtr + 0x20;
1520 	vpl->alloc.Init(vpl->address, vpl->nv.poolSize, true);
1521 
1522 	vpl->header = PSPPointer<SceKernelVplHeader>::Create(memBlockPtr);
1523 	vpl->header->Init(memBlockPtr, vplSize);
1524 
1525 	DEBUG_LOG(SCEKERNEL, "%x=sceKernelCreateVpl(\"%s\", block=%i, attr=%i, size=%i)",
1526 		id, name, partition, vpl->nv.attr, vpl->nv.poolSize);
1527 
1528 	if (optPtr != 0)
1529 	{
1530 		u32 size = Memory::Read_U32(optPtr);
1531 		if (size > 4)
1532 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateVpl(): unsupported options parameter, size = %d", size);
1533 	}
1534 
1535 	return id;
1536 }
1537 
sceKernelDeleteVpl(SceUID uid)1538 int sceKernelDeleteVpl(SceUID uid)
1539 {
1540 	DEBUG_LOG(SCEKERNEL, "sceKernelDeleteVpl(%i)", uid);
1541 	u32 error;
1542 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1543 	if (vpl)
1544 	{
1545 		bool wokeThreads = __KernelClearVplThreads(vpl, SCE_KERNEL_ERROR_WAIT_DELETE);
1546 		if (wokeThreads)
1547 			hleReSchedule("vpl deleted");
1548 
1549 		userMemory.Free(vpl->address);
1550 		kernelObjects.Destroy<VPL>(uid);
1551 		return 0;
1552 	}
1553 	else
1554 		return error;
1555 }
1556 
1557 // Returns false for invalid parameters (e.g. don't check callbacks, etc.)
1558 // Successful allocation is indicated by error == 0.
__KernelAllocateVpl(SceUID uid,u32 size,u32 addrPtr,u32 & error,bool trying,const char * funcname)1559 static bool __KernelAllocateVpl(SceUID uid, u32 size, u32 addrPtr, u32 &error, bool trying, const char *funcname) {
1560 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1561 	if (vpl) {
1562 		if (size == 0 || size > (u32) vpl->nv.poolSize) {
1563 			WARN_LOG(SCEKERNEL, "%s(vpl=%i, size=%i, ptrout=%08x): invalid size", funcname, uid, size, addrPtr);
1564 			error = SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE;
1565 			return false;
1566 		}
1567 
1568 		VERBOSE_LOG(SCEKERNEL, "%s(vpl=%i, size=%i, ptrout=%08x)", funcname, uid, size, addrPtr);
1569 
1570 		// For some reason, try doesn't follow the same rules...
1571 		if (!trying && (vpl->nv.attr & PSP_VPL_ATTR_MASK_ORDER) == PSP_VPL_ATTR_FIFO)
1572 		{
1573 			__KernelSortVplThreads(vpl);
1574 			if (!vpl->waitingThreads.empty())
1575 			{
1576 				// Can't allocate, blocked by FIFO queue.
1577 				error = SCE_KERNEL_ERROR_NO_MEMORY;
1578 				return true;
1579 			}
1580 		}
1581 
1582 		// Allocate using the header only for newer vpls (older come from savestates.)
1583 		u32 addr;
1584 		if (vpl->header.IsValid()) {
1585 			addr = vpl->header->Allocate(size);
1586 		} else {
1587 			// Padding (normally used to track the allocation.)
1588 			u32 allocSize = size + 8;
1589 			addr = vpl->alloc.Alloc(allocSize, true, "VplAllocate");
1590 		}
1591 		if (addr != (u32) -1) {
1592 			Memory::Write_U32(addr, addrPtr);
1593 			error =  0;
1594 		} else {
1595 			error = SCE_KERNEL_ERROR_NO_MEMORY;
1596 		}
1597 
1598 		return true;
1599 	}
1600 
1601 	return false;
1602 }
1603 
__KernelVplTimeout(u64 userdata,int cyclesLate)1604 void __KernelVplTimeout(u64 userdata, int cyclesLate) {
1605 	SceUID threadID = (SceUID) userdata;
1606 	u32 error;
1607 	SceUID uid = __KernelGetWaitID(threadID, WAITTYPE_VPL, error);
1608 
1609 	HLEKernel::WaitExecTimeout<VPL, WAITTYPE_VPL>(threadID);
1610 
1611 	// If in FIFO mode, that may have cleared another thread to wake up.
1612 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1613 	if (vpl && (vpl->nv.attr & PSP_VPL_ATTR_MASK_ORDER) == PSP_VPL_ATTR_FIFO) {
1614 		bool wokeThreads;
1615 		std::vector<VplWaitingThread>::iterator iter = vpl->waitingThreads.begin();
1616 		// Unlock every waiting thread until the first that must still wait.
1617 		while (iter != vpl->waitingThreads.end() && __KernelUnlockVplForThread(vpl, *iter, error, 0, wokeThreads)) {
1618 			vpl->waitingThreads.erase(iter);
1619 			iter = vpl->waitingThreads.begin();
1620 		}
1621 	}
1622 }
1623 
__KernelSetVplTimeout(u32 timeoutPtr)1624 static void __KernelSetVplTimeout(u32 timeoutPtr)
1625 {
1626 	if (timeoutPtr == 0 || vplWaitTimer == -1)
1627 		return;
1628 
1629 	int micro = (int) Memory::Read_U32(timeoutPtr);
1630 
1631 	// This happens to be how the hardware seems to time things.
1632 	if (micro <= 5)
1633 		micro = 20;
1634 	// Yes, this 7 is reproducible.  6 is (a lot) longer than 7.
1635 	else if (micro == 7)
1636 		micro = 25;
1637 	else if (micro <= 215)
1638 		micro = 250;
1639 
1640 	CoreTiming::ScheduleEvent(usToCycles(micro), vplWaitTimer, __KernelGetCurThread());
1641 }
1642 
sceKernelAllocateVpl(SceUID uid,u32 size,u32 addrPtr,u32 timeoutPtr)1643 int sceKernelAllocateVpl(SceUID uid, u32 size, u32 addrPtr, u32 timeoutPtr)
1644 {
1645 	u32 error, ignore;
1646 	if (__KernelAllocateVpl(uid, size, addrPtr, error, false, __FUNCTION__))
1647 	{
1648 		VPL *vpl = kernelObjects.Get<VPL>(uid, ignore);
1649 		if (error == SCE_KERNEL_ERROR_NO_MEMORY)
1650 		{
1651 			if (timeoutPtr != 0 && Memory::Read_U32(timeoutPtr) == 0)
1652 				return SCE_KERNEL_ERROR_WAIT_TIMEOUT;
1653 
1654 			if (vpl)
1655 			{
1656 				SceUID threadID = __KernelGetCurThread();
1657 				HLEKernel::RemoveWaitingThread(vpl->waitingThreads, threadID);
1658 				VplWaitingThread waiting = {threadID, addrPtr};
1659 				vpl->waitingThreads.push_back(waiting);
1660 			}
1661 
1662 			__KernelSetVplTimeout(timeoutPtr);
1663 			__KernelWaitCurThread(WAITTYPE_VPL, uid, size, timeoutPtr, false, "vpl waited");
1664 		}
1665 		// If anyone else was waiting, the allocation causes a delay.
1666 		else if (error == 0 && !vpl->waitingThreads.empty())
1667 			return hleDelayResult(error, "vpl allocated", 50);
1668 	}
1669 	return error;
1670 }
1671 
sceKernelAllocateVplCB(SceUID uid,u32 size,u32 addrPtr,u32 timeoutPtr)1672 int sceKernelAllocateVplCB(SceUID uid, u32 size, u32 addrPtr, u32 timeoutPtr)
1673 {
1674 	u32 error, ignore;
1675 	if (__KernelAllocateVpl(uid, size, addrPtr, error, false, __FUNCTION__))
1676 	{
1677 		hleCheckCurrentCallbacks();
1678 
1679 		VPL *vpl = kernelObjects.Get<VPL>(uid, ignore);
1680 		if (error == SCE_KERNEL_ERROR_NO_MEMORY)
1681 		{
1682 			if (timeoutPtr != 0 && Memory::Read_U32(timeoutPtr) == 0)
1683 				return SCE_KERNEL_ERROR_WAIT_TIMEOUT;
1684 
1685 			if (vpl)
1686 			{
1687 				SceUID threadID = __KernelGetCurThread();
1688 				HLEKernel::RemoveWaitingThread(vpl->waitingThreads, threadID);
1689 				VplWaitingThread waiting = {threadID, addrPtr};
1690 				vpl->waitingThreads.push_back(waiting);
1691 			}
1692 
1693 			__KernelSetVplTimeout(timeoutPtr);
1694 			__KernelWaitCurThread(WAITTYPE_VPL, uid, size, timeoutPtr, true, "vpl waited");
1695 		}
1696 		// If anyone else was waiting, the allocation causes a delay.
1697 		else if (error == 0 && !vpl->waitingThreads.empty())
1698 			return hleDelayResult(error, "vpl allocated", 50);
1699 	}
1700 	return error;
1701 }
1702 
sceKernelTryAllocateVpl(SceUID uid,u32 size,u32 addrPtr)1703 int sceKernelTryAllocateVpl(SceUID uid, u32 size, u32 addrPtr)
1704 {
1705 	u32 error;
1706 	__KernelAllocateVpl(uid, size, addrPtr, error, true, __FUNCTION__);
1707 	return error;
1708 }
1709 
sceKernelFreeVpl(SceUID uid,u32 addr)1710 int sceKernelFreeVpl(SceUID uid, u32 addr) {
1711 	if (addr && !Memory::IsValidAddress(addr)) {
1712 		WARN_LOG(SCEKERNEL, "%08x=sceKernelFreeVpl(%i, %08x): Invalid address", SCE_KERNEL_ERROR_ILLEGAL_ADDR, uid, addr);
1713 		return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
1714 	}
1715 
1716 	VERBOSE_LOG(SCEKERNEL, "sceKernelFreeVpl(%i, %08x)", uid, addr);
1717 	u32 error;
1718 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1719 	if (vpl) {
1720 		bool freed;
1721 		// Free using the header for newer vpls (not old savestates.)
1722 		if (vpl->header.IsValid()) {
1723 			freed = vpl->header->Free(addr);
1724 		} else {
1725 			freed = vpl->alloc.FreeExact(addr);
1726 		}
1727 
1728 		if (freed) {
1729 			__KernelSortVplThreads(vpl);
1730 
1731 			bool wokeThreads = false;
1732 retry:
1733 			for (auto iter = vpl->waitingThreads.begin(), end = vpl->waitingThreads.end(); iter != end; ++iter) {
1734 				if (__KernelUnlockVplForThread(vpl, *iter, error, 0, wokeThreads)) {
1735 					vpl->waitingThreads.erase(iter);
1736 					goto retry;
1737 				}
1738 				// In FIFO, we stop at the first one that can't wake.
1739 				else if ((vpl->nv.attr & PSP_VPL_ATTR_MASK_ORDER) == PSP_VPL_ATTR_FIFO)
1740 					break;
1741 			}
1742 
1743 			if (wokeThreads) {
1744 				hleReSchedule("vpl freed");
1745 			}
1746 
1747 			return 0;
1748 		} else {
1749 			WARN_LOG(SCEKERNEL, "%08x=sceKernelFreeVpl(%i, %08x): Unable to free", SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCK, uid, addr);
1750 			return SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCK;
1751 		}
1752 	} else {
1753 		return error;
1754 	}
1755 }
1756 
sceKernelCancelVpl(SceUID uid,u32 numWaitThreadsPtr)1757 int sceKernelCancelVpl(SceUID uid, u32 numWaitThreadsPtr)
1758 {
1759 	u32 error;
1760 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1761 	if (vpl)
1762 	{
1763 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelVpl(%i, %08x)", uid, numWaitThreadsPtr);
1764 		vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size();
1765 		if (Memory::IsValidAddress(numWaitThreadsPtr))
1766 			Memory::Write_U32(vpl->nv.numWaitThreads, numWaitThreadsPtr);
1767 
1768 		bool wokeThreads = __KernelClearVplThreads(vpl, SCE_KERNEL_ERROR_WAIT_CANCEL);
1769 		if (wokeThreads)
1770 			hleReSchedule("vpl canceled");
1771 
1772 		return 0;
1773 	}
1774 	else
1775 	{
1776 		DEBUG_LOG(SCEKERNEL, "sceKernelCancelVpl(%i, %08x): invalid vpl", uid, numWaitThreadsPtr);
1777 		return error;
1778 	}
1779 }
1780 
sceKernelReferVplStatus(SceUID uid,u32 infoPtr)1781 int sceKernelReferVplStatus(SceUID uid, u32 infoPtr) {
1782 	u32 error;
1783 	VPL *vpl = kernelObjects.Get<VPL>(uid, error);
1784 	if (vpl) {
1785 		DEBUG_LOG(SCEKERNEL, "sceKernelReferVplStatus(%i, %08x)", uid, infoPtr);
1786 
1787 		__KernelSortVplThreads(vpl);
1788 		vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size();
1789 		if (vpl->header.IsValid()) {
1790 			vpl->nv.freeSize = vpl->header->FreeSize();
1791 		} else {
1792 			vpl->nv.freeSize = vpl->alloc.GetTotalFreeBytes();
1793 		}
1794 		if (Memory::IsValidAddress(infoPtr) && Memory::Read_U32(infoPtr) != 0) {
1795 			Memory::WriteStruct(infoPtr, &vpl->nv);
1796 		}
1797 		return 0;
1798 	} else {
1799 		return error;
1800 	}
1801 }
1802 
AllocMemoryBlock(const char * pname,u32 type,u32 size,u32 paramsAddr)1803 static u32 AllocMemoryBlock(const char *pname, u32 type, u32 size, u32 paramsAddr) {
1804 	if (Memory::IsValidAddress(paramsAddr) && Memory::Read_U32(paramsAddr) != 4) {
1805 		ERROR_LOG_REPORT(SCEKERNEL, "AllocMemoryBlock(%s): unsupported params size %d", pname, Memory::Read_U32(paramsAddr));
1806 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
1807 	}
1808 	if (type != PSP_SMEM_High && type != PSP_SMEM_Low) {
1809 		ERROR_LOG_REPORT(SCEKERNEL, "AllocMemoryBlock(%s): unsupported type %d", pname, type);
1810 		return SCE_KERNEL_ERROR_ILLEGAL_MEMBLOCKTYPE;
1811 	}
1812 	if (size == 0) {
1813 		WARN_LOG_REPORT(SCEKERNEL, "AllocMemoryBlock(%s): invalid size %x", pname, size);
1814 		return SCE_KERNEL_ERROR_MEMBLOCK_ALLOC_FAILED;
1815 	}
1816 	if (pname == NULL) {
1817 		ERROR_LOG_REPORT(SCEKERNEL, "AllocMemoryBlock(): NULL name");
1818 		return SCE_KERNEL_ERROR_ERROR;
1819 	}
1820 
1821 	PartitionMemoryBlock *block = new PartitionMemoryBlock(&userMemory, pname, size, (MemblockType)type, 0);
1822 	if (!block->IsValid())
1823 	{
1824 		delete block;
1825 		ERROR_LOG(SCEKERNEL, "AllocMemoryBlock(%s, %i, %08x, %08x): allocation failed", pname, type, size, paramsAddr);
1826 		return SCE_KERNEL_ERROR_MEMBLOCK_ALLOC_FAILED;
1827 	}
1828 	SceUID uid = kernelObjects.Create(block);
1829 
1830 	INFO_LOG(SCEKERNEL,"%08x=AllocMemoryBlock(SysMemUserForUser_FE707FDF)(%s, %i, %08x, %08x)", uid, pname, type, size, paramsAddr);
1831 	return uid;
1832 }
1833 
FreeMemoryBlock(u32 uid)1834 static u32 FreeMemoryBlock(u32 uid) {
1835 	INFO_LOG(SCEKERNEL, "FreeMemoryBlock(%08x)", uid);
1836 	return kernelObjects.Destroy<PartitionMemoryBlock>(uid);
1837 }
1838 
GetMemoryBlockPtr(u32 uid,u32 addr)1839 static u32 GetMemoryBlockPtr(u32 uid, u32 addr) {
1840 	u32 error;
1841 	PartitionMemoryBlock *block = kernelObjects.Get<PartitionMemoryBlock>(uid, error);
1842 	if (block)
1843 	{
1844 		INFO_LOG(SCEKERNEL, "GetMemoryBlockPtr(%08x, %08x) = %08x", uid, addr, block->address);
1845 		Memory::Write_U32(block->address, addr);
1846 		return 0;
1847 	}
1848 	else
1849 	{
1850 		ERROR_LOG(SCEKERNEL, "GetMemoryBlockPtr(%08x, %08x) failed", uid, addr);
1851 		return 0;
1852 	}
1853 }
1854 
SysMemUserForUser_D8DE5C1E()1855 static u32 SysMemUserForUser_D8DE5C1E() {
1856 	// Called by Evangelion Jo and return 0 here to go in-game.
1857 	ERROR_LOG(SCEKERNEL,"UNIMPL SysMemUserForUser_D8DE5C1E()");
1858 	return 0;
1859 }
1860 
SysMemUserForUser_ACBD88CA()1861 static u32 SysMemUserForUser_ACBD88CA() {
1862 	ERROR_LOG_REPORT_ONCE(SysMemUserForUser_ACBD88CA, SCEKERNEL, "UNIMPL SysMemUserForUser_ACBD88CA()");
1863 	return 0;
1864 }
1865 
SysMemUserForUser_945E45DA()1866 static u32 SysMemUserForUser_945E45DA() {
1867 	// Called by Evangelion Jo and expected return 0 here.
1868 	ERROR_LOG_REPORT_ONCE(SysMemUserForUser945E45DA, SCEKERNEL, "UNIMPL SysMemUserForUser_945E45DA()");
1869 	return 0;
1870 }
1871 
1872 enum
1873 {
1874 	PSP_ERROR_UNKNOWN_TLSPL_ID = 0x800201D0,
1875 	PSP_ERROR_TOO_MANY_TLSPL   = 0x800201D1,
1876 	PSP_ERROR_TLSPL_IN_USE     = 0x800201D2,
1877 };
1878 
1879 enum
1880 {
1881 	// TODO: Complete untested guesses.
1882 	PSP_TLSPL_ATTR_FIFO     = 0,
1883 	PSP_TLSPL_ATTR_PRIORITY = 0x100,
1884 	PSP_TLSPL_ATTR_HIGHMEM  = 0x4000,
1885 	PSP_TLSPL_ATTR_KNOWN    = PSP_TLSPL_ATTR_HIGHMEM | PSP_TLSPL_ATTR_PRIORITY | PSP_TLSPL_ATTR_FIFO,
1886 };
1887 
1888 struct NativeTlspl
1889 {
1890 	SceSize_le size;
1891 	char name[32];
1892 	SceUInt_le attr;
1893 	s32_le index;
1894 	u32_le blockSize;
1895 	u32_le totalBlocks;
1896 	u32_le freeBlocks;
1897 	u32_le numWaitThreads;
1898 };
1899 
1900 struct TLSPL : public KernelObject
1901 {
GetNameTLSPL1902 	const char *GetName() override { return ntls.name; }
GetTypeNameTLSPL1903 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeNameTLSPL1904 	static const char *GetStaticTypeName() { return "TLS"; }
GetMissingErrorCodeTLSPL1905 	static u32 GetMissingErrorCode() { return PSP_ERROR_UNKNOWN_TLSPL_ID; }
GetStaticIDTypeTLSPL1906 	static int GetStaticIDType() { return SCE_KERNEL_TMID_Tlspl; }
GetIDTypeTLSPL1907 	int GetIDType() const override { return SCE_KERNEL_TMID_Tlspl; }
1908 
TLSPLTLSPL1909 	TLSPL() : next(0) {}
1910 
DoStateTLSPL1911 	void DoState(PointerWrap &p) override
1912 	{
1913 		auto s = p.Section("TLS", 1, 2);
1914 		if (!s)
1915 			return;
1916 
1917 		Do(p, ntls);
1918 		Do(p, address);
1919 		if (s >= 2)
1920 			Do(p, alignment);
1921 		else
1922 			alignment = 4;
1923 		Do(p, waitingThreads);
1924 		Do(p, next);
1925 		Do(p, usage);
1926 	}
1927 
1928 	NativeTlspl ntls;
1929 	u32 address;
1930 	u32 alignment;
1931 	std::vector<SceUID> waitingThreads;
1932 	int next;
1933 	std::vector<SceUID> usage;
1934 };
1935 
__KernelTlsplObject()1936 KernelObject *__KernelTlsplObject()
1937 {
1938 	return new TLSPL;
1939 }
1940 
__KernelSortTlsplThreads(TLSPL * tls)1941 static void __KernelSortTlsplThreads(TLSPL *tls)
1942 {
1943 	// Remove any that are no longer waiting.
1944 	SceUID uid = tls->GetUID();
1945 	HLEKernel::CleanupWaitingThreads(WAITTYPE_TLSPL, uid, tls->waitingThreads);
1946 
1947 	if ((tls->ntls.attr & PSP_FPL_ATTR_PRIORITY) != 0)
1948 		std::stable_sort(tls->waitingThreads.begin(), tls->waitingThreads.end(), __KernelThreadSortPriority);
1949 }
1950 
__KernelFreeTls(TLSPL * tls,SceUID threadID)1951 int __KernelFreeTls(TLSPL *tls, SceUID threadID)
1952 {
1953 	// Find the current thread's block.
1954 	int freeBlock = -1;
1955 	for (size_t i = 0; i < tls->ntls.totalBlocks; ++i)
1956 	{
1957 		if (tls->usage[i] == threadID)
1958 		{
1959 			freeBlock = (int) i;
1960 			break;
1961 		}
1962 	}
1963 
1964 	if (freeBlock != -1)
1965 	{
1966 		SceUID uid = tls->GetUID();
1967 
1968 		u32 alignedSize = (tls->ntls.blockSize + tls->alignment - 1) & ~(tls->alignment - 1);
1969 		u32 freedAddress = tls->address + freeBlock * alignedSize;
1970 		NotifyMemInfo(MemBlockFlags::SUB_ALLOC, freedAddress, tls->ntls.blockSize, "TlsFree");
1971 
1972 		// Whenever freeing a block, clear it (even if it's not going to wake anyone.)
1973 		Memory::Memset(freedAddress, 0, tls->ntls.blockSize, "TlsFree");
1974 
1975 		// First, let's remove the end check for the freeing thread.
1976 		auto freeingLocked = tlsplThreadEndChecks.equal_range(threadID);
1977 		for (TlsplMap::iterator iter = freeingLocked.first; iter != freeingLocked.second; ++iter)
1978 		{
1979 			if (iter->second == uid)
1980 			{
1981 				tlsplThreadEndChecks.erase(iter);
1982 				break;
1983 			}
1984 		}
1985 
1986 		__KernelSortTlsplThreads(tls);
1987 		while (!tls->waitingThreads.empty())
1988 		{
1989 			SceUID waitingThreadID = tls->waitingThreads[0];
1990 			tls->waitingThreads.erase(tls->waitingThreads.begin());
1991 
1992 			// This thread must've been woken up.
1993 			if (!HLEKernel::VerifyWait(waitingThreadID, WAITTYPE_TLSPL, uid))
1994 				continue;
1995 
1996 			// Otherwise, if there was a thread waiting, we were full, so this newly freed one is theirs.
1997 			tls->usage[freeBlock] = waitingThreadID;
1998 			__KernelResumeThreadFromWait(waitingThreadID, freedAddress);
1999 
2000 			// Gotta watch the thread to quit as well, since they've allocated now.
2001 			tlsplThreadEndChecks.insert(std::make_pair(waitingThreadID, uid));
2002 
2003 			// No need to continue or free it, we're done.
2004 			return 0;
2005 		}
2006 
2007 		// No one was waiting, so now we can really free it.
2008 		tls->usage[freeBlock] = 0;
2009 		++tls->ntls.freeBlocks;
2010 		return 0;
2011 	}
2012 	// We say "okay" even though nothing was freed.
2013 	else
2014 		return 0;
2015 }
2016 
__KernelTlsplThreadEnd(SceUID threadID)2017 void __KernelTlsplThreadEnd(SceUID threadID)
2018 {
2019 	u32 error;
2020 
2021 	// It wasn't waiting, was it?
2022 	SceUID waitingTlsID = __KernelGetWaitID(threadID, WAITTYPE_TLSPL, error);
2023 	if (waitingTlsID)
2024 	{
2025 		TLSPL *tls = kernelObjects.Get<TLSPL>(waitingTlsID, error);
2026 		if (tls)
2027 			tls->waitingThreads.erase(std::remove(tls->waitingThreads.begin(), tls->waitingThreads.end(), threadID), tls->waitingThreads.end());
2028 	}
2029 
2030 	// Unlock all pools the thread had locked.
2031 	auto locked = tlsplThreadEndChecks.equal_range(threadID);
2032 	for (TlsplMap::iterator iter = locked.first; iter != locked.second; ++iter)
2033 	{
2034 		SceUID tlsID = iter->second;
2035 		TLSPL *tls = kernelObjects.Get<TLSPL>(tlsID, error);
2036 
2037 		if (tls)
2038 		{
2039 			__KernelFreeTls(tls, threadID);
2040 
2041 			// Restart the loop, freeing mutated it.
2042 			locked = tlsplThreadEndChecks.equal_range(threadID);
2043 			iter = locked.first;
2044 			if (locked.first == locked.second)
2045 				break;
2046 		}
2047 	}
2048 	tlsplThreadEndChecks.erase(locked.first, locked.second);
2049 }
2050 
sceKernelCreateTlspl(const char * name,u32 partition,u32 attr,u32 blockSize,u32 count,u32 optionsPtr)2051 SceUID sceKernelCreateTlspl(const char *name, u32 partition, u32 attr, u32 blockSize, u32 count, u32 optionsPtr)
2052 {
2053 	if (!name)
2054 	{
2055 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): invalid name", SCE_KERNEL_ERROR_NO_MEMORY);
2056 		return SCE_KERNEL_ERROR_NO_MEMORY;
2057 	}
2058 	if ((attr & ~PSP_TLSPL_ATTR_KNOWN) >= 0x100)
2059 	{
2060 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
2061 		return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
2062 	}
2063 	if (partition < 1 || partition > 9 || partition == 7)
2064 	{
2065 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT, partition);
2066 		return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
2067 	}
2068 	// We only support user right now.
2069 	if (partition != 2 && partition != 6)
2070 	{
2071 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): invalid partition %d", SCE_KERNEL_ERROR_ILLEGAL_PERM, partition);
2072 		return SCE_KERNEL_ERROR_ILLEGAL_PERM;
2073 	}
2074 
2075 	// There's probably a simpler way to get this same basic formula...
2076 	// This is based on results from a PSP.
2077 	bool illegalMemSize = blockSize == 0 || count == 0;
2078 	if (!illegalMemSize && (u64) blockSize > ((0x100000000ULL / (u64) count) - 4ULL))
2079 		illegalMemSize = true;
2080 	if (!illegalMemSize && (u64) count >= 0x100000000ULL / (((u64) blockSize + 3ULL) & ~3ULL))
2081 		illegalMemSize = true;
2082 	if (illegalMemSize)
2083 	{
2084 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): invalid blockSize/count", SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE);
2085 		return SCE_KERNEL_ERROR_ILLEGAL_MEMSIZE;
2086 	}
2087 
2088 	int index = -1;
2089 	for (int i = 0; i < TLSPL_NUM_INDEXES; ++i)
2090 		if (tlsplUsedIndexes[i] == false)
2091 		{
2092 			index = i;
2093 			break;
2094 		}
2095 
2096 	if (index == -1)
2097 	{
2098 		WARN_LOG_REPORT(SCEKERNEL, "%08x=sceKernelCreateTlspl(): ran out of indexes for TLS pools", PSP_ERROR_TOO_MANY_TLSPL);
2099 		return PSP_ERROR_TOO_MANY_TLSPL;
2100 	}
2101 
2102 	// Unless otherwise specified, we align to 4 bytes (a mips word.)
2103 	u32 alignment = 4;
2104 	if (optionsPtr != 0)
2105 	{
2106 		u32 size = Memory::Read_U32(optionsPtr);
2107 		if (size > 8)
2108 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateTlspl(%s) unsupported options parameter, size = %d", name, size);
2109 		if (size >= 8)
2110 			alignment = Memory::Read_U32(optionsPtr + 4);
2111 
2112 		// Note that 0 intentionally is allowed.
2113 		if ((alignment & (alignment - 1)) != 0)
2114 		{
2115 			ERROR_LOG_REPORT(SCEKERNEL, "sceKernelCreateTlspl(%s): alignment is not a power of 2: %d", name, alignment);
2116 			return SCE_KERNEL_ERROR_ILLEGAL_ARGUMENT;
2117 		}
2118 		// This goes for 0, 1, and 2.  Can't have less than 4 byte alignment.
2119 		if (alignment < 4)
2120 			alignment = 4;
2121 	}
2122 
2123 	// Upalign.  Strangely, the sceKernelReferTlsplStatus value is the original.
2124 	u32 alignedSize = (blockSize + alignment - 1) & ~(alignment - 1);
2125 
2126 	u32 totalSize = alignedSize * count;
2127 	u32 blockPtr = userMemory.Alloc(totalSize, (attr & PSP_TLSPL_ATTR_HIGHMEM) != 0, name);
2128 #ifdef _DEBUG
2129 	userMemory.ListBlocks();
2130 #endif
2131 
2132 	if (blockPtr == (u32) -1)
2133 	{
2134 		ERROR_LOG(SCEKERNEL, "%08x=sceKernelCreateTlspl(%s, %d, %08x, %d, %d, %08x): failed to allocate memory", SCE_KERNEL_ERROR_NO_MEMORY, name, partition, attr, blockSize, count, optionsPtr);
2135 		return SCE_KERNEL_ERROR_NO_MEMORY;
2136 	}
2137 
2138 	TLSPL *tls = new TLSPL();
2139 	SceUID id = kernelObjects.Create(tls);
2140 
2141 	tls->ntls.size = sizeof(tls->ntls);
2142 	strncpy(tls->ntls.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
2143 	tls->ntls.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
2144 	tls->ntls.attr = attr;
2145 	tls->ntls.index = index;
2146 	tlsplUsedIndexes[index] = true;
2147 	tls->ntls.blockSize = blockSize;
2148 	tls->ntls.totalBlocks = count;
2149 	tls->ntls.freeBlocks = count;
2150 	tls->ntls.numWaitThreads = 0;
2151 	tls->address = blockPtr;
2152 	tls->alignment = alignment;
2153 	tls->usage.resize(count, 0);
2154 
2155 	WARN_LOG(SCEKERNEL, "%08x=sceKernelCreateTlspl(%s, %d, %08x, %d, %d, %08x)", id, name, partition, attr, blockSize, count, optionsPtr);
2156 
2157 	return id;
2158 }
2159 
sceKernelDeleteTlspl(SceUID uid)2160 int sceKernelDeleteTlspl(SceUID uid)
2161 {
2162 	u32 error;
2163 	TLSPL *tls = kernelObjects.Get<TLSPL>(uid, error);
2164 	if (tls)
2165 	{
2166 		bool inUse = false;
2167 		for (SceUID threadID : tls->usage)
2168 		{
2169 			if (threadID != 0 && threadID != __KernelGetCurThread())
2170 				inUse = true;
2171 		}
2172 		if (inUse)
2173 		{
2174 			error = PSP_ERROR_TLSPL_IN_USE;
2175 			WARN_LOG(SCEKERNEL, "%08x=sceKernelDeleteTlspl(%08x): in use", error, uid);
2176 			return error;
2177 		}
2178 
2179 		WARN_LOG(SCEKERNEL, "sceKernelDeleteTlspl(%08x)", uid);
2180 
2181 		for (SceUID threadID : tls->waitingThreads)
2182 			HLEKernel::ResumeFromWait(threadID, WAITTYPE_TLSPL, uid, 0);
2183 		hleReSchedule("deleted tlspl");
2184 
2185 		userMemory.Free(tls->address);
2186 		tlsplUsedIndexes[tls->ntls.index] = false;
2187 		kernelObjects.Destroy<TLSPL>(uid);
2188 	}
2189 	else
2190 		ERROR_LOG(SCEKERNEL, "%08x=sceKernelDeleteTlspl(%08x): bad tlspl", error, uid);
2191 	return error;
2192 }
2193 
sceKernelGetTlsAddr(SceUID uid)2194 int sceKernelGetTlsAddr(SceUID uid)
2195 {
2196 	// TODO: Allocate downward if PSP_TLSPL_ATTR_HIGHMEM?
2197 	DEBUG_LOG(SCEKERNEL, "sceKernelGetTlsAddr(%08x)", uid);
2198 
2199 	if (!__KernelIsDispatchEnabled() || __IsInInterrupt())
2200 		return 0;
2201 
2202 	u32 error;
2203 	TLSPL *tls = kernelObjects.Get<TLSPL>(uid, error);
2204 	if (tls)
2205 	{
2206 		SceUID threadID = __KernelGetCurThread();
2207 		int allocBlock = -1;
2208 		bool needsClear = false;
2209 
2210 		// If the thread already has one, return it.
2211 		for (size_t i = 0; i < tls->ntls.totalBlocks && allocBlock == -1; ++i)
2212 		{
2213 			if (tls->usage[i] == threadID)
2214 				allocBlock = (int) i;
2215 		}
2216 
2217 		if (allocBlock == -1)
2218 		{
2219 			for (size_t i = 0; i < tls->ntls.totalBlocks && allocBlock == -1; ++i)
2220 			{
2221 				// The PSP doesn't give the same block out twice in a row, even if freed.
2222 				if (tls->usage[tls->next] == 0)
2223 					allocBlock = tls->next;
2224 				tls->next = (tls->next + 1) % tls->ntls.totalBlocks;
2225 			}
2226 
2227 			if (allocBlock != -1)
2228 			{
2229 				tls->usage[allocBlock] = threadID;
2230 				tlsplThreadEndChecks.insert(std::make_pair(threadID, uid));
2231 				--tls->ntls.freeBlocks;
2232 				needsClear = true;
2233 			}
2234 		}
2235 
2236 		if (allocBlock == -1)
2237 		{
2238 			tls->waitingThreads.push_back(threadID);
2239 			__KernelWaitCurThread(WAITTYPE_TLSPL, uid, 1, 0, false, "allocate tls");
2240 			return 0;
2241 		}
2242 
2243 		u32 alignedSize = (tls->ntls.blockSize + tls->alignment - 1) & ~(tls->alignment - 1);
2244 		u32 allocAddress = tls->address + allocBlock * alignedSize;
2245 		NotifyMemInfo(MemBlockFlags::SUB_ALLOC, allocAddress, tls->ntls.blockSize, "TlsAddr");
2246 
2247 		// We clear the blocks upon first allocation (and also when they are freed, both are necessary.)
2248 		if (needsClear) {
2249 			Memory::Memset(allocAddress, 0, tls->ntls.blockSize, "TlsAddr");
2250 		}
2251 
2252 		return allocAddress;
2253 	}
2254 	else
2255 		return 0;
2256 }
2257 
2258 // Parameters are an educated guess.
sceKernelFreeTlspl(SceUID uid)2259 int sceKernelFreeTlspl(SceUID uid)
2260 {
2261 	WARN_LOG(SCEKERNEL, "UNIMPL sceKernelFreeTlspl(%08x)", uid);
2262 	u32 error;
2263 	TLSPL *tls = kernelObjects.Get<TLSPL>(uid, error);
2264 	if (tls)
2265 	{
2266 		SceUID threadID = __KernelGetCurThread();
2267 		return __KernelFreeTls(tls, threadID);
2268 	}
2269 	else
2270 		return error;
2271 }
2272 
sceKernelReferTlsplStatus(SceUID uid,u32 infoPtr)2273 int sceKernelReferTlsplStatus(SceUID uid, u32 infoPtr)
2274 {
2275 	DEBUG_LOG(SCEKERNEL, "sceKernelReferTlsplStatus(%08x, %08x)", uid, infoPtr);
2276 	u32 error;
2277 	TLSPL *tls = kernelObjects.Get<TLSPL>(uid, error);
2278 	if (tls)
2279 	{
2280 		// Update the waiting threads in case of deletions, etc.
2281 		__KernelSortTlsplThreads(tls);
2282 		tls->ntls.numWaitThreads = (int) tls->waitingThreads.size();
2283 
2284 		if (Memory::Read_U32(infoPtr) != 0)
2285 			Memory::WriteStruct(infoPtr, &tls->ntls);
2286 		return 0;
2287 	}
2288 	else
2289 		return error;
2290 }
2291 
2292 const HLEFunction SysMemUserForUser[] = {
2293 	{0XA291F107, &WrapU_V<sceKernelMaxFreeMemSize>,               "sceKernelMaxFreeMemSize",               'x', ""     },
2294 	{0XF919F628, &WrapU_V<sceKernelTotalFreeMemSize>,             "sceKernelTotalFreeMemSize",             'x', ""     },
2295 	{0X3FC9AE6A, &WrapU_V<sceKernelDevkitVersion>,                "sceKernelDevkitVersion",                'x', ""     },
2296 	{0X237DBD4F, &WrapI_ICIUU<sceKernelAllocPartitionMemory>,     "sceKernelAllocPartitionMemory",         'i', "isixx"},
2297 	{0XB6D61D02, &WrapI_I<sceKernelFreePartitionMemory>,          "sceKernelFreePartitionMemory",          'i', "i"    },
2298 	{0X9D9A5BA1, &WrapU_I<sceKernelGetBlockHeadAddr>,             "sceKernelGetBlockHeadAddr",             'x', "i"    },
2299 	{0X13A5ABEF, &WrapI_C<sceKernelPrintf>,                       "sceKernelPrintf",                       'i', "s"    },
2300 	{0X7591C7DB, &WrapI_I<sceKernelSetCompiledSdkVersion>,        "sceKernelSetCompiledSdkVersion",        'i', "i"    },
2301 	{0X342061E5, &WrapI_I<sceKernelSetCompiledSdkVersion370>,     "sceKernelSetCompiledSdkVersion370",     'i', "i"    },
2302 	{0X315AD3A0, &WrapI_I<sceKernelSetCompiledSdkVersion380_390>, "sceKernelSetCompiledSdkVersion380_390", 'i', "i"    },
2303 	{0XEBD5C3E6, &WrapI_I<sceKernelSetCompiledSdkVersion395>,     "sceKernelSetCompiledSdkVersion395",     'i', "i"    },
2304 	{0X057E7380, &WrapI_I<sceKernelSetCompiledSdkVersion401_402>, "sceKernelSetCompiledSdkVersion401_402", 'i', "i"    },
2305 	{0XF77D77CB, &WrapI_I<sceKernelSetCompilerVersion>,           "sceKernelSetCompilerVersion",           'i', "i"    },
2306 	{0X91DE343C, &WrapI_I<sceKernelSetCompiledSdkVersion500_505>, "sceKernelSetCompiledSdkVersion500_505", 'i', "i"    },
2307 	{0X7893F79A, &WrapI_I<sceKernelSetCompiledSdkVersion507>,     "sceKernelSetCompiledSdkVersion507",     'i', "i"    },
2308 	{0X35669D4C, &WrapI_I<sceKernelSetCompiledSdkVersion600_602>, "sceKernelSetCompiledSdkVersion600_602", 'i', "i"    },  //??
2309 	{0X1B4217BC, &WrapI_I<sceKernelSetCompiledSdkVersion603_605>, "sceKernelSetCompiledSdkVersion603_605", 'i', "i"    },
2310 	{0X358CA1BB, &WrapI_I<sceKernelSetCompiledSdkVersion606>,     "sceKernelSetCompiledSdkVersion606",     'i', "i"    },
2311 	{0XFC114573, &WrapI_V<sceKernelGetCompiledSdkVersion>,        "sceKernelGetCompiledSdkVersion",        'i', ""     },
2312 	{0X2A3E5280, nullptr,                                         "sceKernelQueryMemoryInfo",              '?', ""     },
2313 	{0XACBD88CA, &WrapU_V<SysMemUserForUser_ACBD88CA>,            "SysMemUserForUser_ACBD88CA",            'x', ""     },
2314 	{0X945E45DA, &WrapU_V<SysMemUserForUser_945E45DA>,            "SysMemUserForUser_945E45DA",            'x', ""     },
2315 	{0XA6848DF8, nullptr,                                         "sceKernelSetUsersystemLibWork",         '?', ""     },
2316 	{0X6231A71D, nullptr,                                         "sceKernelSetPTRIG",                     '?', ""     },
2317 	{0X39F49610, nullptr,                                         "sceKernelGetPTRIG",                     '?', ""     },
2318 	// Obscure raw block API
2319 	{0XDB83A952, &WrapU_UU<GetMemoryBlockPtr>,                    "SysMemUserForUser_DB83A952",            'x', "xx"   },  // GetMemoryBlockAddr
2320 	{0X50F61D8A, &WrapU_U<FreeMemoryBlock>,                       "SysMemUserForUser_50F61D8A",            'x', "x"    },  // FreeMemoryBlock
2321 	{0XFE707FDF, &WrapU_CUUU<AllocMemoryBlock>,                   "SysMemUserForUser_FE707FDF",            'x', "sxxx" },  // AllocMemoryBlock
2322 	{0XD8DE5C1E, &WrapU_V<SysMemUserForUser_D8DE5C1E>,            "SysMemUserForUser_D8DE5C1E",            'x', ""     },
2323 };
2324 
Register_SysMemUserForUser()2325 void Register_SysMemUserForUser() {
2326 	RegisterModule("SysMemUserForUser", ARRAY_SIZE(SysMemUserForUser), SysMemUserForUser);
2327 }
2328