1 // Copyright (c) 2012- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 #include "Common/Serialize/Serializer.h"
19 #include "Common/Serialize/SerializeFuncs.h"
20 #include "Common/Serialize/SerializeMap.h"
21 #include "Core/HLE/HLE.h"
22 #include "Core/MIPS/MIPS.h"
23 #include "Core/CoreTiming.h"
24 #include "Core/MemMapHelpers.h"
25 #include "Core/Reporting.h"
26 
27 #include "Core/HLE/sceKernel.h"
28 #include "Core/HLE/sceKernelThread.h"
29 #include "Core/HLE/sceKernelEventFlag.h"
30 #include "Core/HLE/KernelWaitHelpers.h"
31 
32 void __KernelEventFlagTimeout(u64 userdata, int cycleslate);
33 
34 struct NativeEventFlag {
35 	u32_le size;
36 	char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
37 	u32_le attr;
38 	u32_le initPattern;
39 	u32_le currentPattern;
40 	s32_le numWaitThreads;
41 };
42 
43 struct EventFlagTh {
44 	SceUID threadID;
45 	u32 bits;
46 	u32 wait;
47 	u32 outAddr;
48 	u64 pausedTimeout;
49 
operator ==EventFlagTh50 	bool operator ==(const SceUID &otherThreadID) const {
51 		return threadID == otherThreadID;
52 	}
53 };
54 
55 class EventFlag : public KernelObject {
56 public:
GetName()57 	const char *GetName() override { return nef.name; }
GetTypeName()58 	const char *GetTypeName() override { return GetStaticTypeName(); }
GetStaticTypeName()59 	static const char *GetStaticTypeName() { return "EventFlag"; }
GetQuickInfo(char * ptr,int size)60 	void GetQuickInfo(char *ptr, int size) override {
61 		sprintf(ptr, "init=%08x cur=%08x numwait=%i",
62 			nef.initPattern,
63 			nef.currentPattern,
64 			nef.numWaitThreads);
65 	}
66 
GetMissingErrorCode()67 	static u32 GetMissingErrorCode() {
68 		return SCE_KERNEL_ERROR_UNKNOWN_EVFID;
69 	}
GetStaticIDType()70 	static int GetStaticIDType() { return SCE_KERNEL_TMID_EventFlag; }
GetIDType() const71 	int GetIDType() const override { return SCE_KERNEL_TMID_EventFlag; }
72 
DoState(PointerWrap & p)73 	void DoState(PointerWrap &p) override {
74 		auto s = p.Section("EventFlag", 1);
75 		if (!s)
76 			return;
77 
78 		Do(p, nef);
79 		EventFlagTh eft = { 0 };
80 		Do(p, waitingThreads, eft);
81 		Do(p, pausedWaits);
82 	}
83 
84 	NativeEventFlag nef;
85 	std::vector<EventFlagTh> waitingThreads;
86 	// Key is the callback id it was for, or if no callback, the thread id.
87 	std::map<SceUID, EventFlagTh> pausedWaits;
88 };
89 
90 
91 /** Event flag creation attributes */
92 enum PspEventFlagAttributes {
93 	/** Allow the event flag to be waited upon by multiple threads */
94 	PSP_EVENT_WAITMULTIPLE = 0x200
95 };
96 
97 /** Event flag wait types */
98 enum PspEventFlagWaitTypes {
99 	/** Wait for all bits in the pattern to be set */
100 	PSP_EVENT_WAITAND = 0x00,
101 	/** Wait for one or more bits in the pattern to be set */
102 	PSP_EVENT_WAITOR = 0x01,
103 	/** Clear the entire pattern when it matches. */
104 	PSP_EVENT_WAITCLEARALL = 0x10,
105 	/** Clear the wait pattern when it matches */
106 	PSP_EVENT_WAITCLEAR = 0x20,
107 
108 	PSP_EVENT_WAITKNOWN = PSP_EVENT_WAITCLEAR | PSP_EVENT_WAITCLEARALL | PSP_EVENT_WAITOR,
109 };
110 
111 static int eventFlagWaitTimer = -1;
112 
113 void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId);
114 void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId);
115 
__KernelEventFlagInit()116 void __KernelEventFlagInit() {
117 	eventFlagWaitTimer = CoreTiming::RegisterEvent("EventFlagTimeout", __KernelEventFlagTimeout);
118 	__KernelRegisterWaitTypeFuncs(WAITTYPE_EVENTFLAG, __KernelEventFlagBeginCallback, __KernelEventFlagEndCallback);
119 }
120 
__KernelEventFlagDoState(PointerWrap & p)121 void __KernelEventFlagDoState(PointerWrap &p) {
122 	auto s = p.Section("sceKernelEventFlag", 1);
123 	if (!s)
124 		return;
125 
126 	Do(p, eventFlagWaitTimer);
127 	CoreTiming::RestoreRegisterEvent(eventFlagWaitTimer, "EventFlagTimeout", __KernelEventFlagTimeout);
128 }
129 
__KernelEventFlagObject()130 KernelObject *__KernelEventFlagObject() {
131 	// Default object to load from state.
132 	return new EventFlag;
133 }
134 
__KernelCheckEventFlagMatches(u32 pattern,u32 bits,u8 wait)135 static bool __KernelCheckEventFlagMatches(u32 pattern, u32 bits, u8 wait) {
136 	// Is this in OR (any bit can match) or AND (all bits must match) mode?
137 	if (wait & PSP_EVENT_WAITOR) {
138 		return (bits & pattern) != 0;
139 	} else {
140 		return (bits & pattern) == bits;
141 	}
142 }
143 
__KernelApplyEventFlagMatch(u32_le * pattern,u32 bits,u8 wait,u32 outAddr)144 static bool __KernelApplyEventFlagMatch(u32_le *pattern, u32 bits, u8 wait, u32 outAddr) {
145 	if (__KernelCheckEventFlagMatches(*pattern, bits, wait)) {
146 		if (Memory::IsValidAddress(outAddr))
147 			Memory::Write_U32(*pattern, outAddr);
148 
149 		if (wait & PSP_EVENT_WAITCLEAR)
150 			*pattern &= ~bits;
151 		if (wait & PSP_EVENT_WAITCLEARALL)
152 			*pattern = 0;
153 		return true;
154 	}
155 	return false;
156 }
157 
__KernelUnlockEventFlagForThread(EventFlag * e,EventFlagTh & th,u32 & error,int result,bool & wokeThreads)158 static bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error, int result, bool &wokeThreads) {
159 	if (!HLEKernel::VerifyWait(th.threadID, WAITTYPE_EVENTFLAG, e->GetUID()))
160 		return true;
161 
162 	// If result is an error code, we're just letting it go.
163 	if (result == 0) {
164 		if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, th.bits, th.wait, th.outAddr))
165 			return false;
166 	} else {
167 		// Otherwise, we set the current result since we're bailing.
168 		if (Memory::IsValidAddress(th.outAddr))
169 			Memory::Write_U32(e->nef.currentPattern, th.outAddr);
170 	}
171 
172 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.threadID, error);
173 	if (timeoutPtr != 0 && eventFlagWaitTimer != -1) {
174 		// Remove any event for this thread.
175 		s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, th.threadID);
176 		Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
177 	}
178 
179 	__KernelResumeThreadFromWait(th.threadID, result);
180 	wokeThreads = true;
181 	return true;
182 }
183 
__KernelClearEventFlagThreads(EventFlag * e,int reason)184 static bool __KernelClearEventFlagThreads(EventFlag *e, int reason) {
185 	u32 error;
186 	bool wokeThreads = false;
187 	std::vector<EventFlagTh>::iterator iter, end;
188 	for (iter = e->waitingThreads.begin(), end = e->waitingThreads.end(); iter != end; ++iter)
189 		__KernelUnlockEventFlagForThread(e, *iter, error, reason, wokeThreads);
190 	e->waitingThreads.clear();
191 
192 	return wokeThreads;
193 }
194 
__KernelEventFlagBeginCallback(SceUID threadID,SceUID prevCallbackId)195 void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId) {
196 	auto result = HLEKernel::WaitBeginCallback<EventFlag, WAITTYPE_EVENTFLAG, EventFlagTh>(threadID, prevCallbackId, eventFlagWaitTimer);
197 	if (result == HLEKernel::WAIT_CB_SUCCESS)
198 		DEBUG_LOG(SCEKERNEL, "sceKernelWaitEventFlagCB: Suspending lock wait for callback");
199 	else if (result == HLEKernel::WAIT_CB_BAD_WAIT_DATA)
200 		ERROR_LOG_REPORT(SCEKERNEL, "sceKernelWaitEventFlagCB: wait not found to pause for callback");
201 	else
202 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelWaitEventFlagCB: beginning callback with bad wait id?");
203 }
204 
__KernelEventFlagEndCallback(SceUID threadID,SceUID prevCallbackId)205 void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId) {
206 	auto result = HLEKernel::WaitEndCallback<EventFlag, WAITTYPE_EVENTFLAG, EventFlagTh>(threadID, prevCallbackId, eventFlagWaitTimer, __KernelUnlockEventFlagForThread);
207 	if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
208 		DEBUG_LOG(SCEKERNEL, "sceKernelWaitEventFlagCB: Resuming lock wait from callback");
209 }
210 
211 //SceUID sceKernelCreateEventFlag(const char *name, int attr, int bits, SceKernelEventFlagOptParam *opt);
sceKernelCreateEventFlag(const char * name,u32 flag_attr,u32 flag_initPattern,u32 optPtr)212 int sceKernelCreateEventFlag(const char *name, u32 flag_attr, u32 flag_initPattern, u32 optPtr) {
213 	if (!name) {
214 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ERROR, "invalid name");
215 	}
216 
217 	// These attributes aren't valid.
218 	if ((flag_attr & 0x100) != 0 || flag_attr >= 0x300) {
219 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ATTR, "invalid attr parameter: %08x", flag_attr);
220 	}
221 
222 	EventFlag *e = new EventFlag();
223 	SceUID id = kernelObjects.Create(e);
224 
225 	e->nef.size = sizeof(NativeEventFlag);
226 	strncpy(e->nef.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
227 	e->nef.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
228 	e->nef.attr = flag_attr;
229 	e->nef.initPattern = flag_initPattern;
230 	e->nef.currentPattern = e->nef.initPattern;
231 	e->nef.numWaitThreads = 0;
232 
233 	if (optPtr != 0) {
234 		u32 size = Memory::Read_U32(optPtr);
235 		if (size > 4)
236 			WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateEventFlag(%s) unsupported options parameter, size = %d", name, size);
237 	}
238 	if ((flag_attr & ~PSP_EVENT_WAITMULTIPLE) != 0)
239 		WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateEventFlag(%s) unsupported attr parameter: %08x", name, flag_attr);
240 
241 	return hleLogSuccessI(SCEKERNEL, id);
242 }
243 
sceKernelCancelEventFlag(SceUID uid,u32 pattern,u32 numWaitThreadsPtr)244 u32 sceKernelCancelEventFlag(SceUID uid, u32 pattern, u32 numWaitThreadsPtr) {
245 	u32 error;
246 	EventFlag *e = kernelObjects.Get<EventFlag>(uid, error);
247 	if (e) {
248 		e->nef.numWaitThreads = (int) e->waitingThreads.size();
249 		if (Memory::IsValidAddress(numWaitThreadsPtr))
250 			Memory::Write_U32(e->nef.numWaitThreads, numWaitThreadsPtr);
251 
252 		e->nef.currentPattern = pattern;
253 
254 		if (__KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_CANCEL))
255 			hleReSchedule("event flag canceled");
256 
257 		return hleLogSuccessI(SCEKERNEL, 0);
258 	} else {
259 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
260 	}
261 }
262 
sceKernelClearEventFlag(SceUID id,u32 bits)263 u32 sceKernelClearEventFlag(SceUID id, u32 bits) {
264 	u32 error;
265 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
266 	if (e) {
267 		e->nef.currentPattern &= bits;
268 		// Note that it's not possible for threads to get woken up by this action.
269 		hleEatCycles(430);
270 		return hleLogSuccessI(SCEKERNEL, 0);
271 	} else {
272 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
273 	}
274 }
275 
sceKernelDeleteEventFlag(SceUID uid)276 u32 sceKernelDeleteEventFlag(SceUID uid) {
277 	u32 error;
278 	EventFlag *e = kernelObjects.Get<EventFlag>(uid, error);
279 	if (e) {
280 		bool wokeThreads = __KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_DELETE);
281 		if (wokeThreads)
282 			hleReSchedule("event flag deleted");
283 
284 		return hleLogSuccessI(SCEKERNEL, kernelObjects.Destroy<EventFlag>(uid));
285 	} else {
286 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
287 	}
288 }
289 
sceKernelSetEventFlag(SceUID id,u32 bitsToSet)290 u32 sceKernelSetEventFlag(SceUID id, u32 bitsToSet) {
291 	u32 error;
292 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
293 	if (e) {
294 		bool wokeThreads = false;
295 
296 		e->nef.currentPattern |= bitsToSet;
297 
298 		for (size_t i = 0; i < e->waitingThreads.size(); ++i) {
299 			EventFlagTh *t = &e->waitingThreads[i];
300 			if (__KernelUnlockEventFlagForThread(e, *t, error, 0, wokeThreads)) {
301 				e->waitingThreads.erase(e->waitingThreads.begin() + i);
302 				// Try the one that used to be in this place next.
303 				--i;
304 			}
305 		}
306 
307 		if (wokeThreads)
308 			hleReSchedule("event flag set");
309 
310 		hleEatCycles(430);
311 		return hleLogSuccessI(SCEKERNEL, 0);
312 	} else {
313 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
314 	}
315 }
316 
__KernelEventFlagTimeout(u64 userdata,int cycleslate)317 void __KernelEventFlagTimeout(u64 userdata, int cycleslate) {
318 	SceUID threadID = (SceUID)userdata;
319 
320 	// This still needs to set the result pointer from the wait.
321 	u32 error;
322 	SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error);
323 	u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
324 	EventFlag *e = kernelObjects.Get<EventFlag>(flagID, error);
325 	if (e) {
326 		if (timeoutPtr != 0)
327 			Memory::Write_U32(0, timeoutPtr);
328 
329 		for (size_t i = 0; i < e->waitingThreads.size(); i++) {
330 			EventFlagTh *t = &e->waitingThreads[i];
331 			if (t->threadID == threadID) {
332 				bool wokeThreads;
333 
334 				// This thread isn't waiting anymore, but we'll remove it from waitingThreads later.
335 				// The reason is, if it times out, but what it was waiting on is DELETED prior to it
336 				// actually running, it will get a DELETE result instead of a TIMEOUT.
337 				// So, we need to remember it or we won't be able to mark it DELETE instead later.
338 				__KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads);
339 				break;
340 			}
341 		}
342 	}
343 }
344 
__KernelSetEventFlagTimeout(EventFlag * e,u32 timeoutPtr)345 static void __KernelSetEventFlagTimeout(EventFlag *e, u32 timeoutPtr) {
346 	if (timeoutPtr == 0 || eventFlagWaitTimer == -1)
347 		return;
348 
349 	int micro = (int) Memory::Read_U32(timeoutPtr);
350 
351 	// This seems like the actual timing of timeouts on hardware.
352 	if (micro <= 1)
353 		micro = 25;
354 	else if (micro <= 209)
355 		micro = 240;
356 
357 	// This should call __KernelEventFlagTimeout() later, unless we cancel it.
358 	CoreTiming::ScheduleEvent(usToCycles(micro), eventFlagWaitTimer, __KernelGetCurThread());
359 }
360 
sceKernelWaitEventFlag(SceUID id,u32 bits,u32 wait,u32 outBitsPtr,u32 timeoutPtr)361 int sceKernelWaitEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 timeoutPtr) {
362 	if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
363 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
364 	}
365 	// Can't wait on 0, that's guaranteed to wait forever.
366 	if (bits == 0) {
367 		return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
368 	}
369 
370 	if (!__KernelIsDispatchEnabled()) {
371 		return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch disabled");
372 	}
373 
374 	u32 error;
375 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
376 	if (e) {
377 		EventFlagTh th;
378 		if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr)) {
379 			// If this thread was left in waitingThreads after a timeout, remove it.
380 			// Otherwise we might write the outBitsPtr in the wrong place.
381 			HLEKernel::RemoveWaitingThread(e->waitingThreads, __KernelGetCurThread());
382 
383 			u32 timeout = 0xFFFFFFFF;
384 			if (Memory::IsValidAddress(timeoutPtr))
385 				timeout = Memory::Read_U32(timeoutPtr);
386 
387 			// Do we allow more than one thread to wait?
388 			if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
389 				return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
390 			}
391 
392 			(void)hleLogSuccessI(SCEKERNEL, 0, "waiting");
393 
394 			// No match - must wait.
395 			th.threadID = __KernelGetCurThread();
396 			th.bits = bits;
397 			th.wait = wait;
398 			// If < 5ms, sometimes hardware doesn't write this, but it's unpredictable.
399 			th.outAddr = timeout == 0 ? 0 : outBitsPtr;
400 			e->waitingThreads.push_back(th);
401 
402 			__KernelSetEventFlagTimeout(e, timeoutPtr);
403 			__KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr, false, "event flag waited");
404 		} else {
405 			(void)hleLogSuccessI(SCEKERNEL, 0);
406 		}
407 
408 		hleEatCycles(600);
409 		return 0;
410 	} else {
411 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
412 	}
413 }
414 
sceKernelWaitEventFlagCB(SceUID id,u32 bits,u32 wait,u32 outBitsPtr,u32 timeoutPtr)415 int sceKernelWaitEventFlagCB(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 timeoutPtr) {
416 	if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
417 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
418 	}
419 	// Can't wait on 0, that's guaranteed to wait forever.
420 	if (bits == 0) {
421 		return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
422 	}
423 
424 	if (!__KernelIsDispatchEnabled()) {
425 		return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch disabled");
426 	}
427 
428 	u32 error;
429 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
430 	if (e) {
431 		EventFlagTh th;
432 		// We only check, not apply here.  This way the CLEAR/etc. options don't apply yet.
433 		// If we run callbacks, we will check again after the callbacks complete.
434 		bool doWait = !__KernelCheckEventFlagMatches(e->nef.currentPattern, bits, wait);
435 		bool doCallbackWait = false;
436 		if (__KernelCurHasReadyCallbacks()) {
437 			doWait = true;
438 			doCallbackWait = true;
439 		}
440 
441 		if (doWait) {
442 			// If this thread was left in waitingThreads after a timeout, remove it.
443 			// Otherwise we might write the outBitsPtr in the wrong place.
444 			HLEKernel::RemoveWaitingThread(e->waitingThreads, __KernelGetCurThread());
445 
446 			u32 timeout = 0xFFFFFFFF;
447 			if (Memory::IsValidAddress(timeoutPtr))
448 				timeout = Memory::Read_U32(timeoutPtr);
449 
450 			// Do we allow more than one thread to wait?
451 			if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
452 				return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
453 			}
454 
455 			(void)hleLogSuccessI(SCEKERNEL, 0, "waiting");
456 
457 			// No match - must wait.
458 			th.threadID = __KernelGetCurThread();
459 			th.bits = bits;
460 			th.wait = wait;
461 			// If < 5ms, sometimes hardware doesn't write this, but it's unpredictable.
462 			th.outAddr = timeout == 0 ? 0 : outBitsPtr;
463 			e->waitingThreads.push_back(th);
464 
465 			__KernelSetEventFlagTimeout(e, timeoutPtr);
466 			if (doCallbackWait)
467 				__KernelWaitCallbacksCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr);
468 			else
469 				__KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr, true, "event flag waited");
470 		} else {
471 			(void)hleLogSuccessI(SCEKERNEL, 0);
472 			__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr);
473 			hleCheckCurrentCallbacks();
474 		}
475 
476 		return 0;
477 	} else {
478 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
479 	}
480 }
481 
sceKernelPollEventFlag(SceUID id,u32 bits,u32 wait,u32 outBitsPtr)482 int sceKernelPollEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr) {
483 	if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
484 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
485 	}
486 	// Poll seems to also fail when CLEAR and CLEARALL are used together, but not wait.
487 	if ((wait & PSP_EVENT_WAITCLEAR) != 0 && (wait & PSP_EVENT_WAITCLEARALL) != 0) {
488 		return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
489 	}
490 	// Can't wait on 0, it never matches.
491 	if (bits == 0) {
492 		return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
493 	}
494 
495 	u32 error;
496 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
497 	if (e) {
498 		if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr)) {
499 			if (Memory::IsValidAddress(outBitsPtr))
500 				Memory::Write_U32(e->nef.currentPattern, outBitsPtr);
501 
502 			if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
503 				return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
504 			}
505 
506 			// No match - return that, this is polling, not waiting.
507 			return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_COND);
508 		} else {
509 			return hleLogSuccessI(SCEKERNEL, 0);
510 		}
511 	} else {
512 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
513 	}
514 }
515 
516 //int sceKernelReferEventFlagStatus(SceUID event, SceKernelEventFlagInfo *status);
sceKernelReferEventFlagStatus(SceUID id,u32 statusPtr)517 u32 sceKernelReferEventFlagStatus(SceUID id, u32 statusPtr) {
518 	u32 error;
519 	EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
520 	if (e) {
521 		if (!Memory::IsValidAddress(statusPtr))
522 			return hleLogWarning(SCEKERNEL, -1, "invalid ptr");
523 
524 		HLEKernel::CleanupWaitingThreads(WAITTYPE_EVENTFLAG, id, e->waitingThreads);
525 
526 		e->nef.numWaitThreads = (int) e->waitingThreads.size();
527 		if (Memory::Read_U32(statusPtr) != 0)
528 			Memory::WriteStruct(statusPtr, &e->nef);
529 		return hleLogSuccessI(SCEKERNEL, 0);
530 	} else {
531 		return hleLogDebug(SCEKERNEL, error, "invalid event flag");
532 	}
533 }
534