1 //====== Copyright Valve Corporation, All rights reserved. ====================
2 
3 #if defined( _MSC_VER ) && ( _MSC_VER <= 1800 )
4 	#pragma warning( disable: 4244 )
5 	// 1>C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\include\chrono(749): warning C4244: '=' : conversion from '__int64' to 'time_t', possible loss of data (steamnetworkingsockets_lowlevel.cpp)
6 #endif
7 
8 #ifdef __GNUC__
9 	// src/public/tier0/basetypes.h:104:30: error: assuming signed overflow does not occur when assuming that (X + c) < X is always false [-Werror=strict-overflow]
10 	// current steamrt:scout gcc "g++ (SteamRT 4.8.4-1ubuntu15~12.04+steamrt1.2+srt1) 4.8.4" requires this at the top due to optimizations
11 	#pragma GCC diagnostic ignored "-Wstrict-overflow"
12 #endif
13 
14 #include <thread>
15 #include <mutex>
16 #include <atomic>
17 
18 #ifdef POSIX
19 #include <pthread.h>
20 #include <sched.h>
21 #endif
22 
23 #include "steamnetworkingsockets_lowlevel.h"
24 #include "../steamnetworkingsockets_platform.h"
25 #include "../steamnetworkingsockets_internal.h"
26 #include "../steamnetworkingsockets_thinker.h"
27 #include "steamnetworkingsockets_connections.h"
28 #include <vstdlib/random.h>
29 #include <tier1/utlpriorityqueue.h>
30 #include <tier1/utllinkedlist.h>
31 #include "crypto.h"
32 
33 #include <tier0/memdbgoff.h>
34 
35 // Ugggggggggg MSVC VS2013 STL bug: try_lock_for doesn't actually respect the timeout, it always ends up using an infinite timeout.
36 // And even in 2015, the code is calling the timer to get current time, to convert a relative time to an absolute time, and then
37 // waiting until that absolute time, which then calls the timer again....and subtracts it back off....It's really bad. Just go
38 // directly to the underlying Win32 primitives.  Looks like the Visual Studio 2017 STL implementation is sane, though.
39 #if defined(_MSC_VER) && _MSC_VER < 1914
40 	// NOTE - we could implement our own mutex here.
41 	#error "std::recursive_timed_mutex doesn't work"
42 #endif
43 
44 #ifdef _XBOX_ONE
45 	#include <combaseapi.h>
46 #endif
47 
48 // Time low level send/recv calls and packet processing
49 //#define STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
50 
51 #include <tier0/memdbgon.h>
52 
53 namespace SteamNetworkingSocketsLib {
54 
55 constexpr int k_msMaxPollWait = 1000;
56 constexpr SteamNetworkingMicroseconds k_usecMaxTimestampDelta = k_msMaxPollWait * 1100;
57 
58 static void FlushSpew();
59 
60 int g_nSteamDatagramSocketBufferSize = 256*1024;
61 
62 /// Global lock for all local data structures
63 static Lock<RecursiveTimedMutexImpl> s_mutexGlobalLock( "global", 0 );
64 
65 #if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
66 
67 // By default, complain if we hold the lock for more than this long
68 constexpr SteamNetworkingMicroseconds k_usecDefaultLongLockHeldWarningThreshold = 5*1000;
69 
70 // Debug the locks active on the cu
71 struct ThreadLockDebugInfo
72 {
73 	static constexpr int k_nMaxHeldLocks = 8;
74 	static constexpr int k_nMaxTags = 32;
75 
76 	int m_nHeldLocks = 0;
77 	int m_nTags = 0;
78 
79 	SteamNetworkingMicroseconds m_usecLongLockWarningThreshold;
80 	SteamNetworkingMicroseconds m_usecIgnoreLongLockWaitTimeUntil;
81 	SteamNetworkingMicroseconds m_usecOuterLockStartTime; // Time when we started waiting on outermost lock (if we don't have it yet), or when we aquired the lock (if we have it)
82 
83 	const LockDebugInfo *m_arHeldLocks[ k_nMaxHeldLocks ];
84 	struct Tag_t
85 	{
86 		const char *m_pszTag;
87 		int m_nCount;
88 	};
89 	Tag_t m_arTags[ k_nMaxTags ];
90 };
91 
92 static void (*s_fLockAcquiredCallback)( const char *tags, SteamNetworkingMicroseconds usecWaited );
93 static void (*s_fLockHeldCallback)( const char *tags, SteamNetworkingMicroseconds usecWaited );
94 static SteamNetworkingMicroseconds s_usecLockWaitWarningThreshold = 2*1000;
95 
96 /// Get the per-thread debug info
GetThreadDebugInfo()97 static ThreadLockDebugInfo &GetThreadDebugInfo()
98 {
99 	// Apple seems to hate thread_local.  Is there some sort of feature
100 	// define a can check here?  It's a shame because it's really very
101 	// efficient on MSVC, gcc, and clang on Windows and linux.
102     //
103     // Apple seems to support thread_local starting with Xcode 8.0
104 	#if defined(__APPLE__) && __clang_major__ < 8
105 
106 		static pthread_key_t key;
107 		static pthread_once_t key_once = PTHREAD_ONCE_INIT;
108 
109 		// One time init the TLS key
110 		pthread_once( &key_once,
111 			[](){ // Initialization code to run once
112 				pthread_key_create(
113 					&key,
114 					[](void *ptr) { free(ptr); } // Destructor function
115 				);
116 			}
117 		);
118 
119 		// Get current object
120 		void *result = pthread_getspecific(key);
121 		if ( unlikely( result == nullptr ) )
122 		{
123 			result = malloc( sizeof(ThreadLockDebugInfo) );
124 			memset( result, 0, sizeof(ThreadLockDebugInfo) );
125 			pthread_setspecific(key, result);
126 		}
127 		return *static_cast<ThreadLockDebugInfo *>( result );
128 	#else
129 
130 		// Use thread_local
131 		thread_local ThreadLockDebugInfo tls_lockDebugInfo;
132 		return tls_lockDebugInfo;
133 	#endif
134 }
135 
136 /// If non-NULL, add a "tag" to the lock journal for the current thread.
137 /// This is useful so that if we hold a lock for a long time, we can get
138 /// an idea what sorts of operations were taking a long time.
AddThreadLockTag(const char * pszTag)139 static void AddThreadLockTag( const char *pszTag )
140 {
141 	if ( !pszTag )
142 		return;
143 
144 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
145 	Assert( t.m_nHeldLocks > 0 ); // Can't add a tag unless we are locked!
146 
147 	for ( int i = 0 ; i < t.m_nTags ; ++i )
148 	{
149 		if ( t.m_arTags[i].m_pszTag == pszTag )
150 		{
151 			++t.m_arTags[i].m_nCount;
152 			return;
153 		}
154 	}
155 
156 	if ( t.m_nTags >= ThreadLockDebugInfo::k_nMaxTags )
157 		return;
158 
159 	t.m_arTags[ t.m_nTags ].m_pszTag = pszTag;
160 	t.m_arTags[ t.m_nTags ].m_nCount = 1;
161 	++t.m_nTags;
162 }
163 
~LockDebugInfo()164 LockDebugInfo::~LockDebugInfo()
165 {
166 	// We should not be locked!  If we are, remove us
167 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
168 	for ( int i = t.m_nHeldLocks-1 ; i >= 0 ; --i )
169 	{
170 		if ( t.m_arHeldLocks[i] == this )
171 		{
172 			AssertMsg( false, "Lock '%s' being destroyed while it is held!", m_pszName );
173 			AboutToUnlock();
174 		}
175 	}
176 }
177 
AboutToLock(bool bTry)178 void LockDebugInfo::AboutToLock( bool bTry )
179 {
180 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
181 
182 	// First lock held by this thread?
183 	if ( t.m_nHeldLocks == 0 )
184 	{
185 		// Remember when we started trying to lock
186 		t.m_usecOuterLockStartTime = SteamNetworkingSockets_GetLocalTimestamp();
187 	}
188 	else
189 	{
190 
191 		// We already hold a lock.  Make sure it's legal for us to take another!
192 
193 		// Global lock *must* always be the outermost lock.  (It is legal to take other locks in
194 		// between and then lock the global lock recursively.)
195 		const bool bHoldGlobalLock = t.m_arHeldLocks[ 0 ] == &s_mutexGlobalLock;
196 		AssertMsg(
197 			bHoldGlobalLock || this != &s_mutexGlobalLock,
198 			"Taking global lock while already holding lock '%s'", t.m_arHeldLocks[ 0 ]->m_pszName
199 		);
200 
201 		// Check for taking locks in such a way that might lead to deadlocks.
202 		// If they are only "trying", then we do allow out of order behaviour.
203 		if ( !bTry )
204 		{
205 			const LockDebugInfo *pTopLock = t.m_arHeldLocks[ t.m_nHeldLocks-1 ];
206 
207 			// Once we take a "short duration" lock, we must not
208 			// take any additional locks!  (Including a recursive lock.)
209 			AssertMsg( !( pTopLock->m_nFlags & LockDebugInfo::k_nFlag_ShortDuration ), "Taking lock '%s' while already holding lock '%s'", m_pszName, pTopLock->m_pszName );
210 
211 			// If the global lock isn't held, then no more than one
212 			// object lock is allowed, since two different threads
213 			// might take them in different order.
214 			constexpr int k_nObjectFlags = LockDebugInfo::k_nFlag_Connection | LockDebugInfo::k_nFlag_PollGroup;
215 			if (
216 				( !bHoldGlobalLock && ( m_nFlags & k_nObjectFlags ) != 0 )
217 				//|| ( m_nFlags & k_nFlag_Table ) // We actually do this in one place when we know it's OK.  Not wirth it right now to get this situation exempted from the checking.
218 			) {
219 				// We must not already hold any existing object locks (except perhaps this one)
220 				for ( int i = 0 ; i < t.m_nHeldLocks ; ++i )
221 				{
222 					const LockDebugInfo *pOtherLock = t.m_arHeldLocks[ i ];
223 					AssertMsg( pOtherLock == this || !( pOtherLock->m_nFlags & k_nObjectFlags ),
224 						"Taking lock '%s' and then '%s', while not holding the global lock", pOtherLock->m_pszName, m_pszName );
225 				}
226 			}
227 		}
228 	}
229 }
230 
OnLocked(const char * pszTag)231 void LockDebugInfo::OnLocked( const char *pszTag )
232 {
233 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
234 
235 	Assert( t.m_nHeldLocks < ThreadLockDebugInfo::k_nMaxHeldLocks );
236 	t.m_arHeldLocks[ t.m_nHeldLocks++ ] = this;
237 
238 	if ( t.m_nHeldLocks == 1 )
239 	{
240 		SteamNetworkingMicroseconds usecNow = SteamNetworkingSockets_GetLocalTimestamp();
241 		SteamNetworkingMicroseconds usecTimeSpentWaitingOnLock = usecNow - t.m_usecOuterLockStartTime;
242 		t.m_usecLongLockWarningThreshold = k_usecDefaultLongLockHeldWarningThreshold;
243 		t.m_nTags = 0;
244 
245 		if ( usecTimeSpentWaitingOnLock > s_usecLockWaitWarningThreshold && usecNow > t.m_usecIgnoreLongLockWaitTimeUntil )
246 		{
247 			if ( pszTag )
248 				SpewWarning( "Waited %.1fms for SteamNetworkingSockets lock [%s]", usecTimeSpentWaitingOnLock*1e-3, pszTag );
249 			else
250 				SpewWarning( "Waited %.1fms for SteamNetworkingSockets lock", usecTimeSpentWaitingOnLock*1e-3 );
251 			ETW_LongOp( "lock wait", usecTimeSpentWaitingOnLock, pszTag );
252 		}
253 
254 		auto callback = s_fLockAcquiredCallback; // save to temp, to prevent very narrow race condition where variable is cleared after we null check it, and we call null
255 		if ( callback )
256 			callback( pszTag, usecTimeSpentWaitingOnLock );
257 
258 		t.m_usecOuterLockStartTime = usecNow;
259 	}
260 
261 	AddThreadLockTag( pszTag );
262 }
263 
AboutToUnlock()264 void LockDebugInfo::AboutToUnlock()
265 {
266 	char tags[ 256 ];
267 
268 	SteamNetworkingMicroseconds usecElapsed = 0;
269 	SteamNetworkingMicroseconds usecElapsedTooLong = 0;
270 	auto lockHeldCallback = s_fLockHeldCallback;
271 
272 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
273 	Assert( t.m_nHeldLocks > 0 );
274 
275 	// Unlocking the last lock?
276 	if ( t.m_nHeldLocks == 1 )
277 	{
278 
279 		// We're about to do the final release.  How long did we hold the lock?
280 		usecElapsed = SteamNetworkingSockets_GetLocalTimestamp() - t.m_usecOuterLockStartTime;
281 
282 		// Too long?  We need to check the threshold here because the threshold could
283 		// change by another thread immediately after we release the lock.  Also, if
284 		// we're debugging, all bets are off.  They could have hit a breakpoint, and
285 		// we don't want to create a bunch of confusing spew with spurious asserts
286 		if ( usecElapsed >= t.m_usecLongLockWarningThreshold && !Plat_IsInDebugSession() )
287 		{
288 			usecElapsedTooLong = usecElapsed;
289 		}
290 
291 		if ( usecElapsedTooLong > 0 || lockHeldCallback )
292 		{
293 			char *p = tags;
294 			char *end = tags + sizeof(tags) - 1;
295 			for ( int i = 0 ; i < t.m_nTags && p+5 < end ; ++i )
296 			{
297 				if ( p > tags )
298 					*(p++) = ',';
299 
300 				const ThreadLockDebugInfo::Tag_t &tag = t.m_arTags[i];
301 				int taglen = std::min( int(end-p), (int)V_strlen( tag.m_pszTag ) );
302 				memcpy( p, tag.m_pszTag, taglen );
303 				p += taglen;
304 
305 				if ( tag.m_nCount > 1 )
306 				{
307 					int l = end-p;
308 					if ( l <= 5 )
309 						break;
310 					p += V_snprintf( p, l, "(x%d)", tag.m_nCount );
311 				}
312 			}
313 			*p = '\0';
314 		}
315 
316 		t.m_nTags = 0;
317 		t.m_usecOuterLockStartTime = 0; // Just for grins.
318 	}
319 
320 	if ( usecElapsed > 0 && lockHeldCallback )
321 	{
322 		lockHeldCallback(tags, usecElapsed);
323 	}
324 
325 	// Yelp if we held the lock for longer than the threshold.
326 	if ( usecElapsedTooLong != 0 )
327 	{
328 		SpewWarning(
329 			"SteamNetworkingSockets lock held for %.1fms.  (Performance warning.)  %s\n"
330 			"This is usually a symptom of a general performance problem such as thread starvation.",
331 			usecElapsedTooLong*1e-3, tags
332 		);
333 		ETW_LongOp( "lock held", usecElapsedTooLong, tags );
334 	}
335 
336 	// NOTE: We are allowed to unlock out of order!  We specifically
337 	// do this with the table lock!
338 	for ( int i = t.m_nHeldLocks-1 ; i >= 0 ; --i )
339 	{
340 		if ( t.m_arHeldLocks[i] == this )
341 		{
342 			--t.m_nHeldLocks;
343 			if ( i < t.m_nHeldLocks ) // Don't do the memmove in the common case of stack pop
344 				memmove( &t.m_arHeldLocks[i], &t.m_arHeldLocks[i+1], (t.m_nHeldLocks-i) * sizeof(t.m_arHeldLocks[0]) );
345 			t.m_arHeldLocks[t.m_nHeldLocks] = nullptr; // Just for grins
346 			return;
347 		}
348 	}
349 
350 	AssertMsg( false, "Unlocked a lock '%s' that wasn't held?", m_pszName );
351 }
352 
_AssertHeldByCurrentThread(const char * pszFile,int line,const char * pszTag) const353 void LockDebugInfo::_AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag ) const
354 {
355 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
356 	for ( int i = t.m_nHeldLocks-1 ; i >= 0 ; --i )
357 	{
358 		if ( t.m_arHeldLocks[i] == this )
359 		{
360 			AddThreadLockTag( pszTag );
361 			return;
362 		}
363 	}
364 
365 	AssertMsg( false, "%s(%d): Lock '%s' not held", pszFile, line, m_pszName );
366 }
367 
SetLongLockWarningThresholdMS(const char * pszTag,int msWarningThreshold)368 void SteamNetworkingGlobalLock::SetLongLockWarningThresholdMS( const char *pszTag, int msWarningThreshold )
369 {
370 	AssertHeldByCurrentThread( pszTag );
371 	SteamNetworkingMicroseconds usecWarningThreshold = SteamNetworkingMicroseconds{msWarningThreshold}*1000;
372 	ThreadLockDebugInfo &t = GetThreadDebugInfo();
373 	if ( t.m_usecLongLockWarningThreshold < usecWarningThreshold )
374 	{
375 		t.m_usecLongLockWarningThreshold = usecWarningThreshold;
376 		t.m_usecIgnoreLongLockWaitTimeUntil = SteamNetworkingSockets_GetLocalTimestamp() + t.m_usecLongLockWarningThreshold;
377 	}
378 }
379 
_AssertHeldByCurrentThread(const char * pszFile,int line)380 void SteamNetworkingGlobalLock::_AssertHeldByCurrentThread( const char *pszFile, int line )
381 {
382 	s_mutexGlobalLock._AssertHeldByCurrentThread( pszFile, line );
383 }
384 
_AssertHeldByCurrentThread(const char * pszFile,int line,const char * pszTag)385 void SteamNetworkingGlobalLock::_AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag )
386 {
387 	s_mutexGlobalLock._AssertHeldByCurrentThread( pszFile, line );
388 	AddThreadLockTag( pszTag );
389 }
390 
391 #endif // #if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
392 
Lock(const char * pszTag)393 void SteamNetworkingGlobalLock::Lock( const char *pszTag )
394 {
395 	s_mutexGlobalLock.lock( pszTag );
396 }
397 
TryLock(const char * pszTag,int msTimeout)398 bool SteamNetworkingGlobalLock::TryLock( const char *pszTag, int msTimeout )
399 {
400 	return s_mutexGlobalLock.try_lock_for( msTimeout, pszTag );
401 }
402 
Unlock()403 void SteamNetworkingGlobalLock::Unlock()
404 {
405 	s_mutexGlobalLock.unlock();
406 }
407 
SeedWeakRandomGenerator()408 static void SeedWeakRandomGenerator()
409 {
410 
411 	// Seed cheesy random number generator using true source of entropy
412 	int temp;
413 	CCrypto::GenerateRandomBlock( &temp, sizeof(temp) );
414 	WeakRandomSeed( temp );
415 }
416 
417 static std::atomic<long long> s_usecTimeLastReturned;
418 
419 // Start with an offset so that a timestamp of zero is always pretty far in the past.
420 // But round it up to nice round number, so that looking at timestamps in the debugger
421 // is easy to read.
422 const long long k_nInitialTimestampMin = k_nMillion*24*3600*30;
423 const long long k_nInitialTimestamp = 3000000000000ll;
424 COMPILE_TIME_ASSERT( 2000000000000ll < k_nInitialTimestampMin );
425 COMPILE_TIME_ASSERT( k_nInitialTimestampMin < k_nInitialTimestamp );
426 static std::atomic<long long> s_usecTimeOffset( k_nInitialTimestamp );
427 
428 static std::atomic<int> s_nLowLevelSupportRefCount(0);
429 static volatile bool s_bManualPollMode;
430 
431 /////////////////////////////////////////////////////////////////////////////
432 //
433 // Raw sockets
434 //
435 /////////////////////////////////////////////////////////////////////////////
436 
437 // We don't need recursion or timeout.  Just the fastest thing that works.
438 // Note that we could use a lock-free queue for this.  But I suspect that this
439 // won't get enough work or have enough contention for that to be an important
440 // optimization
441 static ShortDurationLock s_mutexRunWithLockQueue( "run_with_lock_queue" );
442 static std::vector< ISteamNetworkingSocketsRunWithLock * > s_vecRunWithLockQueue;
443 
~ISteamNetworkingSocketsRunWithLock()444 ISteamNetworkingSocketsRunWithLock::~ISteamNetworkingSocketsRunWithLock() {}
445 
RunOrQueue(const char * pszTag)446 bool ISteamNetworkingSocketsRunWithLock::RunOrQueue( const char *pszTag )
447 {
448 	// Check if lock is available immediately
449 	if ( !SteamNetworkingGlobalLock::TryLock( pszTag, 0 ) )
450 	{
451 		Queue( pszTag );
452 		return false;
453 	}
454 
455 	// Service the queue so we always do items in order
456 	ServiceQueue();
457 
458 	// Let derived class do work
459 	Run();
460 
461 	// Go ahead and unlock now
462 	SteamNetworkingGlobalLock::Unlock();
463 
464 	// Self destruct
465 	delete this;
466 
467 	// We have run
468 	return true;
469 }
470 
Queue(const char * pszTag)471 void ISteamNetworkingSocketsRunWithLock::Queue( const char *pszTag )
472 {
473 	// Remember our tag, for accounting purposes
474 	m_pszTag = pszTag;
475 
476 	// Put us into the queue
477 	s_mutexRunWithLockQueue.lock();
478 	s_vecRunWithLockQueue.push_back( this );
479 	s_mutexRunWithLockQueue.unlock();
480 
481 	// NOTE: At this point we are subject to being run or deleted at any time!
482 
483 	// Make sure service thread will wake up to do something with this
484 	WakeSteamDatagramThread();
485 
486 }
487 
ServiceQueue()488 void ISteamNetworkingSocketsRunWithLock::ServiceQueue()
489 {
490 	// Quick check if we're empty, which will be common and can be done safely
491 	// even if we don't hold the lock and the vector is being modified.  It's
492 	// OK if we have an occasional false positive or negative here.  Work
493 	// put into this queue does not have any guarantees about when it will get done
494 	// beyond "run them in order they are queued" and "best effort".
495 	if ( s_vecRunWithLockQueue.empty() )
496 		return;
497 
498 	std::vector<ISteamNetworkingSocketsRunWithLock *> vecTempQueue;
499 
500 	// Quickly move the queue into the temp while holding the lock.
501 	// Once it is in our temp, we can release the lock.
502 	s_mutexRunWithLockQueue.lock();
503 	vecTempQueue = std::move( s_vecRunWithLockQueue );
504 	s_vecRunWithLockQueue.clear(); // Just in case assigning from std::move didn't clear it.
505 	s_mutexRunWithLockQueue.unlock();
506 
507 	// Run them
508 	for ( ISteamNetworkingSocketsRunWithLock *pItem: vecTempQueue )
509 	{
510 		// Make sure we hold the lock, and also set the tag for debugging purposes
511 		SteamNetworkingGlobalLock::AssertHeldByCurrentThread( pItem->m_pszTag );
512 
513 		// Do the work and nuke
514 		pItem->Run();
515 		delete pItem;
516 	}
517 }
518 
519 // Try to guess if the route the specified address is probably "local".
520 // This is difficult to do in general.  We want something that mostly works.
521 //
522 // False positives: VPNs and IPv6 addresses that appear to be nearby but are not.
523 // False negatives: We can't always tell if a route is local.
IsRouteToAddressProbablyLocal(netadr_t addr)524 bool IsRouteToAddressProbablyLocal( netadr_t addr )
525 {
526 
527 	// Assume that if we are able to send to any "reserved" route, that is is local.
528 	// Note that this will be true for VPNs, too!
529 	if ( addr.IsReservedAdr() )
530 		return true;
531 
532 	// But other cases might also be local routes.  E.g. two boxes with public IPs.
533 	// Convert to sockaddr struct so we can ask the operating system
534 	addr.SetPort(0);
535 	sockaddr_storage sockaddrDest;
536 	addr.ToSockadr( &sockaddrDest );
537 
538 	#ifdef _WINDOWS
539 
540 		//
541 		// These functions were added with Vista, so load dynamically
542 		// in case
543 		//
544 
545 		typedef
546 		DWORD
547 		(WINAPI *FnGetBestInterfaceEx)(
548 			struct sockaddr *pDestAddr,
549 			PDWORD           pdwBestIfIndex
550 			);
551 		typedef
552 		NETIO_STATUS
553 		(NETIOAPI_API_*FnGetBestRoute2)(
554 			NET_LUID *InterfaceLuid,
555 			NET_IFINDEX InterfaceIndex,
556 			CONST SOCKADDR_INET *SourceAddress,
557 			CONST SOCKADDR_INET *DestinationAddress,
558 			ULONG AddressSortOptions,
559 			PMIB_IPFORWARD_ROW2 BestRoute,
560 			SOCKADDR_INET *BestSourceAddress
561 			);
562 
563 		static HMODULE hModule = LoadLibraryA( "Iphlpapi.dll" );
564 		static FnGetBestInterfaceEx pGetBestInterfaceEx = hModule ? (FnGetBestInterfaceEx)GetProcAddress( hModule, "GetBestInterfaceEx" ) : nullptr;
565 		static FnGetBestRoute2 pGetBestRoute2 = hModule ? (FnGetBestRoute2)GetProcAddress( hModule, "GetBestRoute2" ) : nullptr;;
566 		if ( !pGetBestInterfaceEx || !pGetBestRoute2 )
567 			return false;
568 
569 		NET_IFINDEX dwBestIfIndex;
570 		DWORD r = (*pGetBestInterfaceEx)( (sockaddr *)&sockaddrDest, &dwBestIfIndex );
571 		if ( r != NO_ERROR )
572 		{
573 			AssertMsg2( false, "GetBestInterfaceEx failed with result %d for address '%s'", r, CUtlNetAdrRender( addr ).String() );
574 			return false;
575 		}
576 
577 		MIB_IPFORWARD_ROW2 bestRoute;
578 		SOCKADDR_INET bestSourceAddress;
579 		r = (*pGetBestRoute2)(
580 			nullptr, // InterfaceLuid
581 			dwBestIfIndex, // InterfaceIndex
582 			nullptr, // SourceAddress
583 			(SOCKADDR_INET *)&sockaddrDest, // DestinationAddress
584 			0, // AddressSortOptions
585 			&bestRoute, // BestRoute
586 			&bestSourceAddress // BestSourceAddress
587 		);
588 		if ( r != NO_ERROR )
589 		{
590 			AssertMsg2( false, "GetBestRoute2 failed with result %d for address '%s'", r, CUtlNetAdrRender( addr ).String() );
591 			return false;
592 		}
593 		if ( bestRoute.Protocol == MIB_IPPROTO_LOCAL )
594 			return true;
595 		netadr_t nextHop;
596 		if ( !nextHop.SetFromSockadr( &bestRoute.NextHop ) )
597 		{
598 			AssertMsg( false, "GetBestRoute2 returned invalid next hop address" );
599 			return false;
600 		}
601 
602 		nextHop.SetPort( 0 );
603 
604 		// https://docs.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2:
605 		//   For a remote route, the IP address of the next system or gateway en route.
606 		//   If the route is to a local loopback address or an IP address on the local
607 		//   link, the next hop is unspecified (all zeros). For a local loopback route,
608 		//   this member should be an IPv4 address of 0.0.0.0 for an IPv4 route entry
609 		//   or an IPv6 address address of 0::0 for an IPv6 route entry.
610 		if ( !nextHop.HasIP() )
611 			return true;
612 		if ( nextHop == addr )
613 			return true;
614 
615 		// If final destination is on the same IPv6/56 prefix, then assume
616 		// it's a local route.  This is an arbitrary prefix size to use,
617 		// but it's a compromise.  We think that /64 probably has too
618 		// many false negatives, but /48 has have too many false positives.
619 		if ( addr.GetType() == k_EIPTypeV6 )
620 		{
621 			if ( nextHop.GetType() == k_EIPTypeV6 )
622 			{
623 				if ( memcmp( addr.GetIPV6Bytes(), nextHop.GetIPV6Bytes(), 7 ) == 0 )
624 					return true;
625 			}
626 			netadr_t netdrBestSource;
627 			if ( netdrBestSource.SetFromSockadr( &bestSourceAddress ) && netdrBestSource.GetType() == k_EIPTypeV6 )
628 			{
629 				if ( memcmp( addr.GetIPV6Bytes(), netdrBestSource.GetIPV6Bytes(), 7 ) == 0 )
630 					return true;
631 			}
632 		}
633 
634 	#else
635 		// FIXME - Writeme
636 	#endif
637 
638 	// Nope
639 	return false;
640 }
641 
642 /////////////////////////////////////////////////////////////////////////////
643 //
644 // Raw sockets
645 //
646 /////////////////////////////////////////////////////////////////////////////
647 
648 static double s_flFakeRateLimit_Send_tokens;
649 static double s_flFakeRateLimit_Recv_tokens;
650 static SteamNetworkingMicroseconds s_usecFakeRateLimitBucketUpdateTime;
651 
InitFakeRateLimit()652 static void InitFakeRateLimit()
653 {
654 	s_usecFakeRateLimitBucketUpdateTime = SteamNetworkingSockets_GetLocalTimestamp();
655 	s_flFakeRateLimit_Send_tokens = (double)INT_MAX;
656 	s_flFakeRateLimit_Recv_tokens = (double)INT_MAX;
657 }
658 
UpdateFakeRateLimitTokenBuckets(SteamNetworkingMicroseconds usecNow)659 static void UpdateFakeRateLimitTokenBuckets( SteamNetworkingMicroseconds usecNow )
660 {
661 	float flElapsed = ( usecNow - s_usecFakeRateLimitBucketUpdateTime ) * 1e-6;
662 	s_usecFakeRateLimitBucketUpdateTime = usecNow;
663 
664 	if ( g_Config_FakeRateLimit_Send_Rate.Get() <= 0 )
665 	{
666 		s_flFakeRateLimit_Send_tokens = (double)INT_MAX;
667 	}
668 	else
669 	{
670 		s_flFakeRateLimit_Send_tokens += flElapsed*g_Config_FakeRateLimit_Send_Rate.Get();
671 		s_flFakeRateLimit_Send_tokens = std::min( s_flFakeRateLimit_Send_tokens, (double)g_Config_FakeRateLimit_Send_Burst.Get() );
672 	}
673 
674 	if ( g_Config_FakeRateLimit_Recv_Rate.Get() <= 0 )
675 	{
676 		s_flFakeRateLimit_Recv_tokens = (double)INT_MAX;
677 	}
678 	else
679 	{
680 		s_flFakeRateLimit_Recv_tokens += flElapsed*g_Config_FakeRateLimit_Recv_Rate.Get();
681 		s_flFakeRateLimit_Recv_tokens = std::min( s_flFakeRateLimit_Recv_tokens, (double)g_Config_FakeRateLimit_Recv_Burst.Get() );
682 	}
683 }
684 
IRawUDPSocket()685 inline IRawUDPSocket::IRawUDPSocket() {}
~IRawUDPSocket()686 inline IRawUDPSocket::~IRawUDPSocket() {}
687 
688 class CRawUDPSocketImpl final : public IRawUDPSocket
689 {
690 public:
691 	STEAMNETWORKINGSOCKETS_DECLARE_CLASS_OPERATOR_NEW
692 
~CRawUDPSocketImpl()693 	~CRawUDPSocketImpl()
694 	{
695 		closesocket( m_socket );
696 		#ifdef WIN32
697 			WSACloseEvent( m_event );
698 		#endif
699 	}
700 
701 	/// Descriptor from the OS
702 	SOCKET m_socket;
703 
704 	/// What address families are supported by this socket?
705 	int m_nAddressFamilies;
706 
707 	/// Who to notify when we receive a packet on this socket.
708 	/// This is set to null when we are asked to close the socket.
709 	CRecvPacketCallback m_callback;
710 
711 	/// An event that will be set when the socket has data that we can read.
712 	#ifdef WIN32
713 		WSAEVENT m_event = INVALID_HANDLE_VALUE;
714 	#endif
715 
716 	// Implements IRawUDPSocket
717 	virtual bool BSendRawPacketGather( int nChunks, const iovec *pChunks, const netadr_t &adrTo ) const override;
718 	virtual void Close() override;
719 
720 	//// Send a packet, for really realz right now.  (No checking for fake loss or lag.)
BReallySendRawPacket(int nChunks,const iovec * pChunks,const netadr_t & adrTo) const721 	inline bool BReallySendRawPacket( int nChunks, const iovec *pChunks, const netadr_t &adrTo ) const
722 	{
723 		Assert( m_socket != INVALID_SOCKET );
724 
725 		// Add a tag.  If we end up holding the lock for a long time, this tag
726 		// will tell us how many packets were sent
727 		SteamNetworkingGlobalLock::AssertHeldByCurrentThread( "SendUDPacket" );
728 
729 		// Convert address to BSD interface
730 		struct sockaddr_storage destAddress;
731 		socklen_t addrSize;
732 		if ( m_nAddressFamilies & k_nAddressFamily_IPv6 )
733 		{
734 			addrSize = sizeof(sockaddr_in6);
735 			adrTo.ToSockadrIPV6( &destAddress );
736 		}
737 		else
738 		{
739 			addrSize = (socklen_t)adrTo.ToSockadr( &destAddress );
740 		}
741 
742 		#ifdef STEAMNETWORKINGSOCKETS_ENABLE_ETW
743 		{
744 			int cbTotal = 0;
745 			for ( int i = 0 ; i < nChunks ; ++i )
746 				cbTotal += (int)pChunks[i].iov_len;
747 			ETW_UDPSendPacket( adrTo, cbTotal );
748 		}
749 		#endif
750 
751 		if ( g_Config_PacketTraceMaxBytes.Get() >= 0 )
752 		{
753 			TracePkt( true, adrTo, nChunks, pChunks );
754 		}
755 
756 		#ifdef STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
757 			SteamNetworkingMicroseconds usecSendStart = SteamNetworkingSockets_GetLocalTimestamp();
758 		#endif
759 
760 		#ifdef WIN32
761 			// Confirm that iovec and WSABUF are indeed bitwise equivalent
762 			COMPILE_TIME_ASSERT( sizeof( iovec ) == sizeof( WSABUF ) );
763 			COMPILE_TIME_ASSERT( offsetof( iovec, iov_len ) == offsetof( WSABUF, len ) );
764 			COMPILE_TIME_ASSERT( offsetof( iovec, iov_base ) == offsetof( WSABUF, buf ) );
765 
766 			DWORD numberOfBytesSent;
767 			int r = WSASendTo(
768 				m_socket,
769 				(WSABUF *)pChunks,
770 				(DWORD)nChunks,
771 				&numberOfBytesSent,
772 				0, // flags
773 				(const sockaddr *)&destAddress,
774 				addrSize,
775 				nullptr, // lpOverlapped
776 				nullptr // lpCompletionRoutine
777 			);
778 			bool bResult = ( r == 0 );
779 		#else
780 			msghdr msg;
781 			msg.msg_name = (sockaddr *)&destAddress;
782 			msg.msg_namelen = addrSize;
783 			msg.msg_iov = const_cast<struct iovec *>( pChunks );
784 			msg.msg_iovlen = nChunks;
785 			msg.msg_control = nullptr;
786 			msg.msg_controllen = 0;
787 			msg.msg_flags = 0;
788 
789 			int r = ::sendmsg( m_socket, &msg, 0 );
790 			bool bResult = ( r >= 0 ); // just check for -1 for error, since we don't want to take the time here to scan the iovec and sum up the expected total number of bytes sent
791 		#endif
792 
793 		#ifdef STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
794 			SteamNetworkingMicroseconds usecSendEnd = SteamNetworkingSockets_GetLocalTimestamp();
795 			if ( usecSendEnd > s_usecIgnoreLongLockWaitTimeUntil )
796 			{
797 				SteamNetworkingMicroseconds usecSendElapsed = usecSendEnd - usecSendStart;
798 				if ( usecSendElapsed > 1000 )
799 				{
800 					SpewWarning( "UDP send took %.1fms\n", usecSendElapsed*1e-3 );
801 					ETW_LongOp( "UDP send", usecSendElapsed );
802 				}
803 			}
804 		#endif
805 
806 		return bResult;
807 	}
808 
TracePkt(bool bSend,const netadr_t & adrRemote,int nChunks,const iovec * pChunks) const809 	void TracePkt( bool bSend, const netadr_t &adrRemote, int nChunks, const iovec *pChunks ) const
810 	{
811 		int cbTotal = 0;
812 		for ( int i = 0 ; i < nChunks ; ++i )
813 			cbTotal += pChunks[i].iov_len;
814 		if ( bSend )
815 		{
816 			ReallySpewTypeFmt( k_ESteamNetworkingSocketsDebugOutputType_Msg, "[Trace Send] %s -> %s | %d bytes\n",
817 				SteamNetworkingIPAddrRender( m_boundAddr ).c_str(), CUtlNetAdrRender( adrRemote ).String(), cbTotal );
818 		}
819 		else
820 		{
821 			ReallySpewTypeFmt( k_ESteamNetworkingSocketsDebugOutputType_Msg, "[Trace Recv] %s <- %s | %d bytes\n",
822 				SteamNetworkingIPAddrRender( m_boundAddr ).c_str(), CUtlNetAdrRender( adrRemote ).String(), cbTotal );
823 		}
824 		int l = std::min( cbTotal, g_Config_PacketTraceMaxBytes.Get() );
825 		const uint8 *p = (const uint8 *)pChunks->iov_base;
826 		int cbChunkLeft = pChunks->iov_len;
827 		while ( l > 0 )
828 		{
829 			// How many bytes to print on thie row?
830 			int row = std::min( 16, l );
831 			l -= row;
832 
833 			char buf[256], *d = buf;
834 			do {
835 
836 				// Check for end of this chunk
837 				while ( cbChunkLeft == 0 )
838 				{
839 					++pChunks;
840 					p = (const uint8 *)pChunks->iov_base;
841 					cbChunkLeft = pChunks->iov_len;
842 				}
843 
844 				// print the byte
845 				static const char hexdigit[] = "0123456789abcdef";
846 				*(d++) = ' ';
847 				*(d++) = hexdigit[ *p >> 4 ];
848 				*(d++) = hexdigit[ *p & 0xf ];
849 
850 				// Advance to next byte
851 				++p;
852 				--cbChunkLeft;
853 			} while (--row > 0 );
854 			*d = '\0';
855 
856 			// Emit the row
857 			ReallySpewTypeFmt( k_ESteamNetworkingSocketsDebugOutputType_Msg, "    %s\n", buf );
858 		}
859 	}
860 
861 private:
862 
863 	void InternalAddToCleanupQueue();
864 };
865 
866 /// We don't expect to have enough sockets, and open and close them frequently
867 /// enough, such that an occasional linear search will kill us.
868 static CUtlVector<CRawUDPSocketImpl *> s_vecRawSockets;
869 
870 /// List of raw sockets pending actual destruction.
871 static CUtlVector<CRawUDPSocketImpl *> s_vecRawSocketsPendingDeletion;
872 
873 /// Track packets that have fake lag applied and are pending to be sent/received
874 class CPacketLagger : private IThinker
875 {
876 public:
~CPacketLagger()877 	~CPacketLagger() { Clear(); }
878 
LagPacket(CRawUDPSocketImpl * pSock,const netadr_t & adr,int msDelay,int nChunks,const iovec * pChunks)879 	void LagPacket( CRawUDPSocketImpl *pSock, const netadr_t &adr, int msDelay, int nChunks, const iovec *pChunks )
880 	{
881 		SteamNetworkingGlobalLock::AssertHeldByCurrentThread( "LagPacket" );
882 
883 		int cbPkt = 0;
884 		for ( int i = 0 ; i < nChunks ; ++i )
885 			cbPkt += pChunks[i].iov_len;
886 		if ( cbPkt > k_cbSteamNetworkingSocketsMaxUDPMsgLen )
887 		{
888 			AssertMsg( false, "Tried to lag a packet that w as too big!" );
889 			return;
890 		}
891 
892 		// Make sure we never queue a packet that is queued for destruction!
893 		if ( pSock->m_socket == INVALID_SOCKET || !pSock->m_callback.m_fnCallback )
894 		{
895 			AssertMsg( false, "Tried to lag a packet on a socket that has already been closed and is pending destruction!" );
896 			return;
897 		}
898 
899 		if ( msDelay < 1 )
900 		{
901 			AssertMsg( false, "Packet lag time must be positive!" );
902 			msDelay = 1;
903 		}
904 
905 		// Limit to something sane
906 		msDelay = std::min( msDelay, 5000 );
907 		const SteamNetworkingMicroseconds usecTime = SteamNetworkingSockets_GetLocalTimestamp() + msDelay * 1000;
908 
909 		// Find the right place to insert the packet.  This is a dumb linear search, but in
910 		// the steady state where the delay is constant, this search loop won't actually iterate,
911 		// and we'll always be adding to the end of the queue
912 		LaggedPacket *pkt = nullptr;
913 		for ( CUtlLinkedList< LaggedPacket >::IndexType_t i = m_list.Tail(); i != m_list.InvalidIndex(); i = m_list.Previous( i ) )
914 		{
915 			if ( usecTime >= m_list[ i ].m_usecTime )
916 			{
917 				pkt = &m_list[ m_list.InsertAfter( i ) ];
918 				break;
919 			}
920 		}
921 		if ( pkt == nullptr )
922 		{
923 			pkt = &m_list[ m_list.AddToHead() ];
924 		}
925 
926 		pkt->m_pSockOwner = pSock;
927 		pkt->m_adrRemote = adr;
928 		pkt->m_usecTime = usecTime;
929 		pkt->m_cbPkt = cbPkt;
930 
931 		// Gather them into buffer
932 		char *d = pkt->m_pkt;
933 		for ( int i = 0 ; i < nChunks ; ++i )
934 		{
935 			int cbChunk = pChunks[i].iov_len;
936 			memcpy( d, pChunks[i].iov_base, cbChunk );
937 			d += cbChunk;
938 		}
939 
940 		Schedule();
941 	}
942 
943 	/// Periodic processing
Think(SteamNetworkingMicroseconds usecNow)944 	virtual void Think( SteamNetworkingMicroseconds usecNow ) OVERRIDE
945 	{
946 
947 		// Just always process packets in queue order.  This means there could be some
948 		// weird burst or jankiness if the delay time is changed, but that's OK.
949 		while ( !m_list.IsEmpty() )
950 		{
951 			LaggedPacket &pkt = m_list[ m_list.Head() ];
952 			if ( pkt.m_usecTime > usecNow )
953 				break;
954 
955 			// Make sure socket is still in good shape.
956 			CRawUDPSocketImpl *pSock = pkt.m_pSockOwner;
957 			if ( pSock )
958 			{
959 				if ( pSock->m_socket == INVALID_SOCKET || !pSock->m_callback.m_fnCallback )
960 				{
961 					AssertMsg( false, "Lagged packet remains in queue after socket destroyed or queued for destruction!" );
962 				}
963 				else
964 				{
965 					ProcessPacket( pkt );
966 				}
967 				m_list.RemoveFromHead();
968 			}
969 		}
970 
971 		Schedule();
972 	}
973 
974 	/// Nuke everything
Clear()975 	void Clear()
976 	{
977 		m_list.RemoveAll();
978 		IThinker::ClearNextThinkTime();
979 	}
980 
981 	/// Called when we're about to destroy a socket
AboutToDestroySocket(const CRawUDPSocketImpl * pSock)982 	void AboutToDestroySocket( const CRawUDPSocketImpl *pSock )
983 	{
984 		// Just do a dumb linear search.  This list should be empty in
985 		// production situations, and socket destruction is relatively rare,
986 		// so its not worth making this complicated.
987 		int idx = m_list.Head();
988 		while ( idx != m_list.InvalidIndex() )
989 		{
990 			int idxNext = m_list.Next( idx );
991 			if ( m_list[idx].m_pSockOwner == pSock )
992 				m_list[idx].m_pSockOwner = nullptr;
993 			idx = idxNext;
994 		}
995 	}
996 
997 protected:
998 
999 	/// Set the next think time as appropriate
Schedule()1000 	void Schedule()
1001 	{
1002 		if ( m_list.IsEmpty() )
1003 			ClearNextThinkTime();
1004 		else
1005 			SetNextThinkTime( m_list[ m_list.Head() ].m_usecTime );
1006 	}
1007 
1008 	struct LaggedPacket
1009 	{
1010 		CRawUDPSocketImpl *m_pSockOwner;
1011 		netadr_t m_adrRemote;
1012 		SteamNetworkingMicroseconds m_usecTime; /// Time when it should be sent or received
1013 		int m_cbPkt;
1014 		char m_pkt[ k_cbSteamNetworkingSocketsMaxUDPMsgLen ];
1015 	};
1016 	CUtlLinkedList<LaggedPacket> m_list;
1017 
1018 	/// Do whatever we're supposed to do with the next packet
1019 	virtual void ProcessPacket( const LaggedPacket &pkt ) = 0;
1020 };
1021 
1022 class CPacketLaggerSend final : public CPacketLagger
1023 {
1024 public:
ProcessPacket(const LaggedPacket & pkt)1025 	virtual void ProcessPacket( const LaggedPacket &pkt ) override
1026 	{
1027 		iovec temp;
1028 		temp.iov_len = pkt.m_cbPkt;
1029 		temp.iov_base = (void *)pkt.m_pkt;
1030 		pkt.m_pSockOwner->BReallySendRawPacket( 1, &temp, pkt.m_adrRemote );
1031 	}
1032 };
1033 
1034 class CPacketLaggerRecv final : public CPacketLagger
1035 {
1036 public:
ProcessPacket(const LaggedPacket & pkt)1037 	virtual void ProcessPacket( const LaggedPacket &pkt ) override
1038 	{
1039 		// Copy data out of queue into local variables, just in case a
1040 		// packet is queued while we're in this function.  We don't want
1041 		// our list to shift in memory, and the pointer we pass to the
1042 		// caller to dangle.
1043 		char temp[ k_cbSteamNetworkingSocketsMaxUDPMsgLen ];
1044 		memcpy( temp, pkt.m_pkt, pkt.m_cbPkt );
1045 		pkt.m_pSockOwner->m_callback( RecvPktInfo_t{ temp, pkt.m_cbPkt, pkt.m_adrRemote, pkt.m_pSockOwner } );
1046 	}
1047 };
1048 
1049 static CPacketLaggerSend s_packetLagQueueSend;
1050 static CPacketLaggerRecv s_packetLagQueueRecv;
1051 
1052 /// Object used to wake our background thread efficiently
1053 #if defined( _WIN32 )
1054 	static HANDLE s_hEventWakeThread = INVALID_HANDLE_VALUE;
1055 #elif defined( NN_NINTENDO_SDK )
1056 	static int s_hEventWakeThread = INVALID_SOCKET;
1057 #else
1058 	static SOCKET s_hSockWakeThreadRead = INVALID_SOCKET;
1059 	static SOCKET s_hSockWakeThreadWrite = INVALID_SOCKET;
1060 #endif
1061 
1062 static std::thread *s_pThreadSteamDatagram = nullptr;
1063 
WakeSteamDatagramThread()1064 void WakeSteamDatagramThread()
1065 {
1066 	#if defined( _WIN32 )
1067 		if ( s_hEventWakeThread != INVALID_HANDLE_VALUE )
1068 			SetEvent( s_hEventWakeThread );
1069 	#elif defined( NN_NINTENDO_SDK )
1070 		// Sorry, but this code is covered under NDA with Nintendo, and
1071 		// we don't have permission to distribute it.
1072 	#else
1073 		if ( s_hSockWakeThreadWrite != INVALID_SOCKET )
1074 		{
1075 			char buf[1] = {0};
1076 			send( s_hSockWakeThreadWrite, buf, 1, 0 );
1077 		}
1078 	#endif
1079 }
1080 
BSendRawPacketGather(int nChunks,const iovec * pChunks,const netadr_t & adrTo) const1081 bool CRawUDPSocketImpl::BSendRawPacketGather( int nChunks, const iovec *pChunks, const netadr_t &adrTo ) const
1082 {
1083 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
1084 
1085 	// Silently ignore a request to send a packet anytime we're in the process of shutting down the system
1086 	if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 )
1087 		return true;
1088 
1089 	// Check simulated global rate limit.  Make sure this is fast
1090 	// when the limit is not in use
1091 	if ( unlikely( g_Config_FakeRateLimit_Send_Rate.Get() > 0 ) )
1092 	{
1093 
1094 		// Check if bucket already has tokens in it, which
1095 		// will be common.  If so, we can avoid reading the
1096 		// timer
1097 		if ( s_flFakeRateLimit_Send_tokens <= 0.0f )
1098 		{
1099 
1100 			// Update bucket with tokens
1101 			UpdateFakeRateLimitTokenBuckets( SteamNetworkingSockets_GetLocalTimestamp() );
1102 
1103 			// Still empty?
1104 			if ( s_flFakeRateLimit_Send_tokens <= 0.0f )
1105 				return true;
1106 		}
1107 
1108 		// Spend tokens
1109 		int cbTotal = 0;
1110 		for ( int i = 0 ; i < nChunks ; ++i )
1111 			cbTotal += (int)pChunks[i].iov_len;
1112 		s_flFakeRateLimit_Send_tokens -= cbTotal;
1113 	}
1114 
1115 	// Fake loss?
1116 	if ( RandomBoolWithOdds( g_Config_FakePacketLoss_Send.Get() ) )
1117 		return true;
1118 
1119 	// Fake lag?
1120 	int32 nPacketFakeLagTotal = g_Config_FakePacketLag_Send.Get();
1121 
1122 	// Check for simulating random packet reordering
1123 	if ( RandomBoolWithOdds( g_Config_FakePacketReorder_Send.Get() ) )
1124 	{
1125 		nPacketFakeLagTotal += g_Config_FakePacketReorder_Time.Get();
1126 	}
1127 
1128 	// Check for simulating random packet duplication
1129 	if ( RandomBoolWithOdds( g_Config_FakePacketDup_Send.Get() ) )
1130 	{
1131 		int32 nDupLag = nPacketFakeLagTotal + WeakRandomInt( 0, g_Config_FakePacketDup_TimeMax.Get() );
1132 		nDupLag = std::max( 1, nDupLag );
1133 		s_packetLagQueueSend.LagPacket( const_cast<CRawUDPSocketImpl *>( this ), adrTo, nDupLag, nChunks, pChunks );
1134 	}
1135 
1136 	// Lag the original packet?
1137 	if ( nPacketFakeLagTotal > 0 )
1138 	{
1139 		s_packetLagQueueSend.LagPacket( const_cast<CRawUDPSocketImpl *>( this ), adrTo, nPacketFakeLagTotal, nChunks, pChunks );
1140 		return true;
1141 	}
1142 
1143 	// Now really send it
1144 	return BReallySendRawPacket( nChunks, pChunks, adrTo );
1145 }
1146 
InternalAddToCleanupQueue()1147 void CRawUDPSocketImpl::InternalAddToCleanupQueue()
1148 {
1149 
1150 	/// Clear the callback, to ensure that no further callbacks will be executed.
1151 	/// This marks the socket as pending destruction.
1152 	Assert( m_callback.m_fnCallback );
1153 	m_callback.m_fnCallback = nullptr;
1154 	Assert( m_socket != INVALID_SOCKET );
1155 
1156 	DbgVerify( s_vecRawSockets.FindAndFastRemove( this ) );
1157 	DbgVerify( !s_vecRawSocketsPendingDeletion.FindAndFastRemove( this ) );
1158 	s_vecRawSocketsPendingDeletion.AddToTail( this );
1159 
1160 	// Clean up lagged packets, if any
1161 	s_packetLagQueueSend.AboutToDestroySocket( this );
1162 	s_packetLagQueueRecv.AboutToDestroySocket( this );
1163 }
1164 
Close()1165 void CRawUDPSocketImpl::Close()
1166 {
1167 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread( "IRawUDPSocket::Close" );
1168 
1169 	// Mark the callback as detached, and put us in the queue for cleanup when it's safe.
1170 	InternalAddToCleanupQueue();
1171 
1172 	// Make sure we don't delay doing this too long
1173 	if ( s_bManualPollMode || ( s_pThreadSteamDatagram && s_pThreadSteamDatagram->get_id() != std::this_thread::get_id() ) )
1174 	{
1175 		// Another thread might be polling right now
1176 		WakeSteamDatagramThread();
1177 	}
1178 	else
1179 	{
1180 		// We can take care of it right now
1181 		ProcessPendingDestroyClosedRawUDPSockets();
1182 	}
1183 }
1184 
OpenUDPSocketBoundToSockAddr(const void * sockaddr,size_t len,SteamDatagramErrMsg & errMsg,int * pnIPv6AddressFamilies)1185 static SOCKET OpenUDPSocketBoundToSockAddr( const void *sockaddr, size_t len, SteamDatagramErrMsg &errMsg, int *pnIPv6AddressFamilies )
1186 {
1187 	unsigned int opt;
1188 
1189 	const sockaddr_in *inaddr = (const sockaddr_in *)sockaddr;
1190 
1191 	// Select socket type.  For linux, use the "close on exec" flag, so that the
1192 	// socket will not be inherited by any child process that we spawn.
1193 	int sockType = SOCK_DGRAM;
1194 	#ifdef LINUX
1195 		sockType |= SOCK_CLOEXEC;
1196 	#endif
1197 	#if defined( NN_NINTENDO_SDK ) && !defined( _WIN32 )
1198 		sockType |= SOCK_NONBLOCK;
1199 	#endif
1200 
1201 	// Try to create a UDP socket using the specified family
1202 	SOCKET sock = socket( inaddr->sin_family, sockType, IPPROTO_UDP );
1203 	if ( sock == INVALID_SOCKET )
1204 	{
1205 		V_sprintf_safe( errMsg, "socket() call failed.  Error code 0x%08x.", GetLastSocketError() );
1206 		return INVALID_SOCKET;
1207 	}
1208 
1209 	// We always use nonblocking IO
1210 	#if !defined( NN_NINTENDO_SDK ) || defined( _WIN32 )
1211 		opt = 1;
1212 		if ( ioctlsocket( sock, FIONBIO, (unsigned long*)&opt ) == -1 )
1213 		{
1214 			V_sprintf_safe( errMsg, "Failed to set socket nonblocking mode.  Error code 0x%08x.", GetLastSocketError() );
1215 			closesocket( sock );
1216 			return INVALID_SOCKET;
1217 		}
1218 	#endif
1219 
1220 	// Set buffer sizes
1221 	opt = g_nSteamDatagramSocketBufferSize;
1222 	if ( setsockopt( sock, SOL_SOCKET, SO_SNDBUF, (char *)&opt, sizeof(opt) ) )
1223 	{
1224 		V_sprintf_safe( errMsg, "Failed to set socket send buffer size.  Error code 0x%08x.", GetLastSocketError() );
1225 		closesocket( sock );
1226 		return INVALID_SOCKET;
1227 	}
1228 	opt = g_nSteamDatagramSocketBufferSize;
1229 	if ( setsockopt( sock, SOL_SOCKET, SO_RCVBUF, (char *)&opt, sizeof(opt) ) == -1 )
1230 	{
1231 		V_sprintf_safe( errMsg, "Failed to set socket recv buffer size.  Error code 0x%08x.", GetLastSocketError() );
1232 		closesocket( sock );
1233 		return INVALID_SOCKET;
1234 	}
1235 
1236 	// Handle IP v6 dual stack?
1237 	if ( pnIPv6AddressFamilies )
1238 	{
1239 
1240 		// Enable dual stack?
1241 		opt = ( *pnIPv6AddressFamilies == k_nAddressFamily_IPv6 ) ? 1 : 0;
1242 		if ( setsockopt( sock, IPPROTO_IPV6, IPV6_V6ONLY, (char *)&opt, sizeof( opt ) ) != 0 )
1243 		{
1244 			if ( *pnIPv6AddressFamilies == k_nAddressFamily_IPv6 )
1245 			{
1246 				// Spew a warning, but continue
1247 				SpewWarning( "Failed to set socket for IPv6 only (IPV6_V6ONLY=1).  Error code 0x%08X.  Continuing anyway.\n", GetLastSocketError() );
1248 			}
1249 			else
1250 			{
1251 				// Dual stack required, or only requested?
1252 				if ( *pnIPv6AddressFamilies == k_nAddressFamily_DualStack )
1253 				{
1254 					V_sprintf_safe( errMsg, "Failed to set socket for dual stack (IPV6_V6ONLY=0).  Error code 0x%08X.", GetLastSocketError() );
1255 					closesocket( sock );
1256 					return INVALID_SOCKET;
1257 				}
1258 
1259 				// Let caller know we're IPv6 only, and spew about this.
1260 				SpewWarning( "Failed to set socket for dual stack (IPV6_V6ONLY=0).  Error code 0x%08X.  Continuing using IPv6 only!\n", GetLastSocketError() );
1261 				*pnIPv6AddressFamilies = k_nAddressFamily_IPv6;
1262 			}
1263 		}
1264 		else
1265 		{
1266 			// Tell caller what they've got
1267 			*pnIPv6AddressFamilies = opt ? k_nAddressFamily_IPv6 : k_nAddressFamily_DualStack;
1268 		}
1269 	}
1270 
1271 	// Bind it to specific desired port and/or interfaces
1272 	if ( bind( sock, (struct sockaddr *)sockaddr, (socklen_t)len ) == -1 )
1273 	{
1274 		V_sprintf_safe( errMsg, "Failed to bind socket.  Error code 0x%08X.", GetLastSocketError() );
1275 		closesocket( sock );
1276 		return INVALID_SOCKET;
1277 	}
1278 
1279 	// All good
1280 	return sock;
1281 }
1282 
OpenRawUDPSocketInternal(CRecvPacketCallback callback,SteamDatagramErrMsg & errMsg,const SteamNetworkingIPAddr * pAddrLocal,int * pnAddressFamilies)1283 static CRawUDPSocketImpl *OpenRawUDPSocketInternal( CRecvPacketCallback callback, SteamDatagramErrMsg &errMsg, const SteamNetworkingIPAddr *pAddrLocal, int *pnAddressFamilies )
1284 {
1285 	// Creating a socket *should* be fast, but sometimes the OS might need to do some work.
1286 	// We shouldn't do this too often, give it a little extra time.
1287 	SteamNetworkingGlobalLock::SetLongLockWarningThresholdMS( "OpenRawUDPSocketInternal", 100 );
1288 
1289 	// Make sure have been initialized
1290 	if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 )
1291 	{
1292 		V_strcpy_safe( errMsg, "Internal order of operations bug.  Can't create socket, because low level systems not initialized" );
1293 		AssertMsg( false, errMsg );
1294 		return nullptr;
1295 	}
1296 
1297 	// Supply defaults
1298 	int nAddressFamilies = pnAddressFamilies ? *pnAddressFamilies : k_nAddressFamily_Auto;
1299 	SteamNetworkingIPAddr addrLocal;
1300 	if ( pAddrLocal )
1301 		addrLocal = *pAddrLocal;
1302 	else
1303 		addrLocal.Clear();
1304 
1305 	// Check that the request makes sense
1306 	if ( addrLocal.IsIPv4() )
1307 	{
1308 		// Only IPv4 family allowed, don't even try IPv6
1309 		if ( nAddressFamilies == k_nAddressFamily_Auto )
1310 		{
1311 			nAddressFamilies = k_nAddressFamily_IPv4;
1312 		}
1313 		else if ( nAddressFamilies != k_nAddressFamily_IPv4 )
1314 		{
1315 			V_strcpy_safe( errMsg, "Invalid address family request when binding to IPv4 address" );
1316 			return nullptr;
1317 		}
1318 	}
1319 	else if ( addrLocal.IsIPv6AllZeros() )
1320 	{
1321 		// We can try IPv6 dual stack, and fallback to IPv4 if requested.
1322 		// Just make sure they didn't request a totally bogus value
1323 		if ( nAddressFamilies == 0 )
1324 		{
1325 			V_strcpy_safe( errMsg, "Invalid address families" );
1326 			return nullptr;
1327 		}
1328 	}
1329 	else
1330 	{
1331 		// Only IPv6 family allowed, cannot try IPv4
1332 		if ( nAddressFamilies == k_nAddressFamily_Auto )
1333 		{
1334 			nAddressFamilies = k_nAddressFamily_IPv6;
1335 		}
1336 		else if ( nAddressFamilies != k_nAddressFamily_IPv6 )
1337 		{
1338 			V_strcpy_safe( errMsg, "Invalid address family request when binding to IPv6 address" );
1339 			return nullptr;
1340 		}
1341 	}
1342 
1343 	// Try IPv6?
1344 	SOCKET sock = INVALID_SOCKET;
1345 	if ( nAddressFamilies & k_nAddressFamily_IPv6 )
1346 	{
1347 		sockaddr_in6 address6;
1348 		memset( &address6, 0, sizeof(address6) );
1349 		address6.sin6_family = AF_INET6;
1350 		memcpy( address6.sin6_addr.s6_addr, addrLocal.m_ipv6, 16 );
1351 		address6.sin6_port = BigWord( addrLocal.m_port );
1352 
1353 		// Try to get socket
1354 		int nIPv6AddressFamilies = nAddressFamilies;
1355 		sock = OpenUDPSocketBoundToSockAddr( &address6, sizeof(address6), errMsg, &nIPv6AddressFamilies );
1356 
1357 		if ( sock == INVALID_SOCKET )
1358 		{
1359 			// Allowing fallback to IPv4?
1360 			if ( nAddressFamilies != k_nAddressFamily_Auto )
1361 				return nullptr;
1362 
1363 			// Continue below, we'll try IPv4
1364 		}
1365 		else
1366 		{
1367 			nAddressFamilies = nIPv6AddressFamilies;
1368 		}
1369 	}
1370 
1371 	// Try IPv4?
1372 	if ( sock == INVALID_SOCKET )
1373 	{
1374 		Assert( nAddressFamilies & k_nAddressFamily_IPv4 ); // Otherwise, we should have already failed above
1375 
1376 		sockaddr_in address4;
1377 		memset( &address4, 0, sizeof(address4) );
1378 		address4.sin_family = AF_INET;
1379 		address4.sin_addr.s_addr = BigDWord( addrLocal.GetIPv4() );
1380 		address4.sin_port = BigWord( addrLocal.m_port );
1381 
1382 		// Try to get socket
1383 		sock = OpenUDPSocketBoundToSockAddr( &address4, sizeof(address4), errMsg, nullptr );
1384 
1385 		// If we failed, well, we have no other options left to try.
1386 		if ( sock == INVALID_SOCKET )
1387 			return nullptr;
1388 
1389 		// We re IPv4 only
1390 		nAddressFamilies = k_nAddressFamily_IPv4;
1391 	}
1392 
1393 	// Read back address we actually bound to.
1394 	sockaddr_storage addrBound;
1395 	socklen_t cbAddress = sizeof(addrBound);
1396 	if ( getsockname( sock, (struct sockaddr *)&addrBound, &cbAddress ) != 0 )
1397 	{
1398 		V_sprintf_safe( errMsg, "getsockname failed.  Error code 0x%08X.", GetLastSocketError() );
1399 		closesocket( sock );
1400 		return nullptr;
1401 	}
1402 	if ( addrBound.ss_family == AF_INET )
1403 	{
1404 		const sockaddr_in *boundaddr4 = (const sockaddr_in *)&addrBound;
1405 		addrLocal.SetIPv4( BigDWord( boundaddr4->sin_addr.s_addr ), BigWord( boundaddr4->sin_port ) );
1406 	}
1407 	else if ( addrBound.ss_family == AF_INET6 )
1408 	{
1409 		const sockaddr_in6 *boundaddr6 = (const sockaddr_in6 *)&addrBound;
1410 		addrLocal.SetIPv6( boundaddr6->sin6_addr.s6_addr, BigWord( boundaddr6->sin6_port ) );
1411 	}
1412 	else
1413 	{
1414 		Assert( false );
1415 		V_sprintf_safe( errMsg, "getsockname returned address with unexpected family %d", addrBound.ss_family );
1416 		closesocket( sock );
1417 		return nullptr;
1418 	}
1419 
1420 	// Allocate a bookkeeping structure
1421 	CRawUDPSocketImpl *pSock = new CRawUDPSocketImpl;
1422 	pSock->m_socket = sock;
1423 	pSock->m_boundAddr = addrLocal;
1424 	pSock->m_callback = callback;
1425 	pSock->m_nAddressFamilies = nAddressFamilies;
1426 
1427 	// On windows, create an event used to poll efficiently
1428 	#ifdef _WIN32
1429 		pSock->m_event = WSACreateEvent();
1430 		if ( WSAEventSelect( pSock->m_socket, pSock->m_event, FD_READ ) != 0 )
1431 		{
1432 			delete pSock;
1433 			V_sprintf_safe( errMsg, "WSACreateEvent() or WSAEventSelect() failed.  Error code 0x%08X.", GetLastSocketError() );
1434 			return nullptr;
1435 		}
1436 	#endif
1437 
1438 	// Add to master list.  (Hopefully we usually won't have that many.)
1439 	s_vecRawSockets.AddToTail( pSock );
1440 
1441 	// Wake up background thread so we can start receiving packets on this socket immediately
1442 	WakeSteamDatagramThread();
1443 
1444 	// Give back info on address families
1445 	if ( pnAddressFamilies )
1446 		*pnAddressFamilies = nAddressFamilies;
1447 
1448 	// Give them something they can use
1449 	return pSock;
1450 }
1451 
OpenRawUDPSocket(CRecvPacketCallback callback,SteamDatagramErrMsg & errMsg,SteamNetworkingIPAddr * pAddrLocal,int * pnAddressFamilies)1452 IRawUDPSocket *OpenRawUDPSocket( CRecvPacketCallback callback, SteamDatagramErrMsg &errMsg, SteamNetworkingIPAddr *pAddrLocal, int *pnAddressFamilies )
1453 {
1454 	return OpenRawUDPSocketInternal( callback, errMsg, pAddrLocal, pnAddressFamilies );
1455 }
1456 
AssertGlobalLockHeldExactlyOnce()1457 static inline void AssertGlobalLockHeldExactlyOnce()
1458 {
1459 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
1460 		ThreadLockDebugInfo &t = GetThreadDebugInfo();
1461 		Assert( t.m_nHeldLocks == 1 && t.m_arHeldLocks[0] == &s_mutexGlobalLock );
1462 	#endif
1463 }
1464 
1465 /// Poll all of our sockets, and dispatch the packets received.
1466 /// This will return true if we own the lock, or false if we detected
1467 /// a shutdown request and bailed without re-squiring the lock.
PollRawUDPSockets(int nMaxTimeoutMS,bool bManualPoll)1468 static bool PollRawUDPSockets( int nMaxTimeoutMS, bool bManualPoll )
1469 {
1470 	// This should only ever be called from our one thread proc,
1471 	// and we assume that it will have locked the lock exactly once.
1472 	AssertGlobalLockHeldExactlyOnce();
1473 
1474 	const int nSocketsToPoll = s_vecRawSockets.Count();
1475 
1476 	#ifdef _WIN32
1477 		HANDLE *pEvents = (HANDLE*)alloca( sizeof(HANDLE) * (nSocketsToPoll+1) );
1478 		int nEvents = 0;
1479 	#else
1480 		pollfd *pPollFDs = (pollfd*)alloca( sizeof(pollfd) * (nSocketsToPoll+1) );
1481 		int nPollFDs = 0;
1482 	#endif
1483 
1484 	CRawUDPSocketImpl **pSocketsToPoll = (CRawUDPSocketImpl **)alloca( sizeof(CRawUDPSocketImpl *) * nSocketsToPoll );
1485 
1486 	for ( int i = 0 ; i < nSocketsToPoll ; ++i )
1487 	{
1488 		CRawUDPSocketImpl *pSock = s_vecRawSockets[ i ];
1489 
1490 		// Should be totally valid at this point
1491 		Assert( pSock->m_callback.m_fnCallback );
1492 		Assert( pSock->m_socket != INVALID_SOCKET );
1493 
1494 		pSocketsToPoll[ i ] = pSock;
1495 
1496 		#ifdef _WIN32
1497 			pEvents[ nEvents++ ] = pSock->m_event;
1498 		#else
1499 			pollfd *p = &pPollFDs[ nPollFDs++ ];
1500 			p->fd = pSock->m_socket;
1501 			p->events = POLLRDNORM;
1502 			p->revents = 0;
1503 		#endif
1504 	}
1505 
1506 	#if defined( _WIN32 )
1507 		Assert( s_hEventWakeThread != NULL && s_hEventWakeThread != INVALID_HANDLE_VALUE );
1508 		pEvents[ nEvents++ ] = s_hEventWakeThread;
1509 	#elif defined( NN_NINTENDO_SDK )
1510 		Assert( s_hEventWakeThread != INVALID_SOCKET );
1511 		pollfd *p = &pPollFDs[ nPollFDs++ ];
1512 		p->fd = s_hEventWakeThread;
1513 		p->events = POLLRDNORM;
1514 		p->revents = 0;
1515 	#else
1516 		Assert( s_hSockWakeThreadRead != INVALID_SOCKET );
1517 		pollfd *p = &pPollFDs[ nPollFDs++ ];
1518 		p->fd = s_hSockWakeThreadRead;
1519 		p->events = POLLRDNORM;
1520 		p->revents = 0;
1521 	#endif
1522 
1523 	// Release lock while we're asleep
1524 	SteamNetworkingGlobalLock::Unlock();
1525 
1526 	// Shutdown request?
1527 	if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode != bManualPoll )
1528 		return false; // ABORT THREAD
1529 
1530 	// Wait for data on one of the sockets, or for us to be asked to wake up
1531 	#if defined( WIN32 )
1532 		DWORD nWaitResult = WaitForMultipleObjects( nEvents, pEvents, FALSE, nMaxTimeoutMS );
1533 	#else
1534 		poll( pPollFDs, nPollFDs, nMaxTimeoutMS );
1535 	#endif
1536 
1537 	SteamNetworkingMicroseconds usecStartedLocking = SteamNetworkingSockets_GetLocalTimestamp();
1538 	UpdateFakeRateLimitTokenBuckets( usecStartedLocking );
1539 	for (;;)
1540 	{
1541 
1542 		// Shutdown request?  We've potentially been waiting a long time.
1543 		// Don't attempt to grab the lock again if we know we want to shutdown,
1544 		// that is just a waste of time.
1545 		if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode != bManualPoll )
1546 			return false;
1547 
1548 		// Try to acquire the lock.  But don't wait forever, in case the other thread has the lock
1549 		// and then makes a shutdown request while we're waiting on the lock here.
1550 		if ( SteamNetworkingGlobalLock::TryLock( "ServiceThread", 250 ) )
1551 			break;
1552 
1553 		// The only time this really should happen is a relatively rare race condition
1554 		// where the main thread is trying to shut us down.  (Or while debugging.)
1555 		// However, note that try_lock_for is permitted to "fail" spuriously, returning
1556 		// false even if no other thread holds the lock.  (For performance reasons.)
1557 		// So we check how long we have actually been waiting.
1558 		SteamNetworkingMicroseconds usecElapsed = SteamNetworkingSockets_GetLocalTimestamp() - usecStartedLocking;
1559 		AssertMsg1( usecElapsed < 50*1000 || s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode != bManualPoll || Plat_IsInDebugSession(), "SDR service thread gave up on lock after waiting %dms.  This directly adds to delay of processing of network packets!", int( usecElapsed/1000 ) );
1560 	}
1561 
1562 	// If we have spewed, flush to disk
1563 	FlushSpew();
1564 
1565 	// Recv socket data from any sockets that might have data, and execute the callbacks.
1566 	char buf[ k_cbSteamNetworkingSocketsMaxUDPMsgLen + 1024 ];
1567 #ifdef _WIN32
1568 	// Note that we assume we aren't polling a ton of sockets here.  We do at least skip ahead
1569 	// to the first socket with data, based on the return value of WaitForMultipleObjects.  But
1570 	// then we will check all sockets later in the array.
1571 	//
1572 	// Note that if we get a wake request, the event has already been reset, because we used an auto-reset event
1573 	for ( int idx = nWaitResult - WAIT_OBJECT_0 ; (unsigned)idx < (unsigned)nSocketsToPoll ; ++idx )
1574 	{
1575 
1576 		CRawUDPSocketImpl *pSock = pSocketsToPoll[ idx ];
1577 
1578 		// Check if this socket has anything, and clear the event
1579 		WSANETWORKEVENTS wsaEvents;
1580 		if ( WSAEnumNetworkEvents( pSock->m_socket, pSock->m_event, &wsaEvents ) != 0 )
1581 		{
1582 			AssertMsg1( false, "WSAEnumNetworkEvents failed.  Error code %08x", WSAGetLastError() );
1583 			continue;
1584 		}
1585 		if ( !(wsaEvents.lNetworkEvents & FD_READ) )
1586 			continue;
1587 #else
1588 	for ( int idx = 0 ; idx < nPollFDs ; ++idx )
1589 	{
1590 		if ( !( pPollFDs[ idx ].revents & POLLRDNORM ) )
1591 			continue;
1592 		if ( idx >= nSocketsToPoll )
1593 		{
1594 			// It's a wake request.  Pull a single packet out of the queue.
1595 			// We want one wake request to always result in exactly one wake up.
1596 			// Wake request are relatively rare, and we don't want to skip any
1597 			// or combine them.  That would result in complicated race conditions
1598 			// where we stay asleep a lot longer than we should.
1599 			#ifdef NN_NINTENDO_SDK
1600 				// Sorry, but this code is covered under NDA with Nintendo, and
1601 				// we don't have permission to distribute it.
1602 			#else
1603 				Assert( pPollFDs[idx].fd == s_hSockWakeThreadRead );
1604 				::recv( s_hSockWakeThreadRead, buf, sizeof(buf), 0 );
1605 			#endif
1606 			continue;
1607 		}
1608 		CRawUDPSocketImpl *pSock = pSocketsToPoll[ idx ];
1609 #endif
1610 
1611 		// Drain the socket.  But if the callback gets cleared, that
1612 		// indicates that the socket is pending destruction and is
1613 		// logically closed to the calling code.
1614 		while ( pSock->m_callback.m_fnCallback )
1615 		{
1616 			if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 )
1617 				return true; // current thread owns the lock
1618 
1619 			#ifdef STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
1620 				SteamNetworkingMicroseconds usecRecvFromStart = SteamNetworkingSockets_GetLocalTimestamp();
1621 			#endif
1622 
1623 			sockaddr_storage from;
1624 			socklen_t fromlen = sizeof(from);
1625 			int ret = ::recvfrom( pSock->m_socket, buf, sizeof( buf ), 0, (sockaddr *)&from, &fromlen );
1626 
1627 			#ifdef STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
1628 				SteamNetworkingMicroseconds usecRecvFromEnd = SteamNetworkingSockets_GetLocalTimestamp(); // FIXME - If we add a timestamp to RecvPktInfo_t this will be free
1629 				if ( usecRecvFromEnd > s_usecIgnoreLongLockWaitTimeUntil )
1630 				{
1631 					SteamNetworkingMicroseconds usecRecvFromElapsed = usecRecvFromEnd - usecRecvFromStart;
1632 					if ( usecRecvFromElapsed > 1000 )
1633 					{
1634 						SpewWarning( "recvfrom took %.1fms\n", usecRecvFromElapsed*1e-3 );
1635 						ETW_LongOp( "UDP recvfrom", usecRecvFromElapsed );
1636 					}
1637 				}
1638 			#endif
1639 
1640 			// Negative value means nothing more to read.
1641 			//
1642 			// NOTE 1: We're not checking the cause of failure.  Usually it would be "EWOULDBLOCK",
1643 			// meaning no more data.  However if there was some socket error (i.e. somebody did something
1644 			// to reset the network stack, etc) we could make the code more robust by detecting this.
1645 			// It would require us plumbing through this failure somehow, and all we have here is a callback
1646 			// for processing packets.  Probably not worth the effort to handle this relatively common case.
1647 			// It will just appear to the app that the cord is cut on this socket.
1648 			//
1649 			// NOTE 2: 0 byte datagram is possible, and in this case recvfrom will return 0.
1650 			// (But all of our protocols enforce a minimum packet size, so if we get a zero byte packet,
1651 			// it's a bogus.  We could drop it here but let's send it through the normal mechanism to
1652 			// be handled/reported in the same way as any other bogus packet.)
1653 			if ( ret < 0 )
1654 				break;
1655 
1656 			// Add a tag.  If we end up holding the lock for a long time, this tag
1657 			// will tell us how many packets were processed
1658 			SteamNetworkingGlobalLock::AssertHeldByCurrentThread( "RecvUDPPacket" );
1659 
1660 			// Check simulated global rate limit.  Make sure this is fast
1661 			// when the limit is not in use
1662 			if ( unlikely( g_Config_FakeRateLimit_Recv_Rate.Get() > 0 ) )
1663 			{
1664 
1665 				// Check if bucket already has tokens in it, which
1666 				// will be common.  If so, we can avoid reading the
1667 				// timer
1668 				if ( s_flFakeRateLimit_Recv_tokens <= 0.0f )
1669 				{
1670 
1671 					// Update bucket with tokens
1672 					// FIXME - We could probably avoid reading the timer here
1673 					// If we read it in the outer loop.  Which...we probably should do
1674 					// and add to the context struct, since almost every packet callback
1675 					// currently does it.
1676 					UpdateFakeRateLimitTokenBuckets( SteamNetworkingSockets_GetLocalTimestamp() );
1677 
1678 					// Still empty?
1679 					if ( s_flFakeRateLimit_Recv_tokens <= 0.0f )
1680 						continue;
1681 				}
1682 
1683 				// Spend tokens
1684 				s_flFakeRateLimit_Recv_tokens -= ret;
1685 			}
1686 
1687 			// Check for simulating random packet loss
1688 			if ( RandomBoolWithOdds( g_Config_FakePacketLoss_Recv.Get() ) )
1689 				continue;
1690 
1691 			RecvPktInfo_t info;
1692 			info.m_adrFrom.SetFromSockadr( &from );
1693 
1694 			// If we're dual stack, convert mapped IPv4 back to ordinary IPv4
1695 			if ( pSock->m_nAddressFamilies == k_nAddressFamily_DualStack )
1696 				info.m_adrFrom.BConvertMappedToIPv4();
1697 
1698 			// Check for tracing
1699 			if ( g_Config_PacketTraceMaxBytes.Get() >= 0 )
1700 			{
1701 				iovec tmp;
1702 				tmp.iov_base = buf;
1703 				tmp.iov_len = ret;
1704 				pSock->TracePkt( false, info.m_adrFrom, 1, &tmp );
1705 			}
1706 
1707 			int32 nPacketFakeLagTotal = g_Config_FakePacketLag_Recv.Get();
1708 
1709 			// Check for simulating random packet reordering
1710 			if ( RandomBoolWithOdds( g_Config_FakePacketReorder_Recv.Get() ) )
1711 			{
1712 				nPacketFakeLagTotal += g_Config_FakePacketReorder_Time.Get();
1713 			}
1714 
1715 			// Check for simulating random packet duplication
1716 			if ( RandomBoolWithOdds( g_Config_FakePacketDup_Recv.Get() ) )
1717 			{
1718 				int32 nDupLag = nPacketFakeLagTotal + WeakRandomInt( 0, g_Config_FakePacketDup_TimeMax.Get() );
1719 				nDupLag = std::max( 1, nDupLag );
1720 				iovec temp;
1721 				temp.iov_len = ret;
1722 				temp.iov_base = buf;
1723 				s_packetLagQueueRecv.LagPacket( pSock, info.m_adrFrom, nDupLag, 1, &temp );
1724 			}
1725 
1726 			// Check for simulating lag
1727 			if ( nPacketFakeLagTotal > 0 )
1728 			{
1729 				iovec temp;
1730 				temp.iov_len = ret;
1731 				temp.iov_base = buf;
1732 				s_packetLagQueueRecv.LagPacket( pSock, info.m_adrFrom, nPacketFakeLagTotal, 1, &temp );
1733 			}
1734 			else
1735 			{
1736 				ETW_UDPRecvPacket( info.m_adrFrom, ret );
1737 
1738 				info.m_pPkt = buf;
1739 				info.m_cbPkt = ret;
1740 				info.m_pSock = pSock;
1741 				pSock->m_callback( info );
1742 			}
1743 
1744 			#ifdef STEAMNETWORKINGSOCKETS_LOWLEVEL_TIME_SOCKET_CALLS
1745 				SteamNetworkingMicroseconds usecProcessPacketEnd = SteamNetworkingSockets_GetLocalTimestamp();
1746 				if ( usecProcessPacketEnd > s_usecIgnoreLongLockWaitTimeUntil )
1747 				{
1748 					SteamNetworkingMicroseconds usecProcessPacketElapsed = usecProcessPacketEnd - usecRecvFromEnd;
1749 					if ( usecProcessPacketElapsed > 1000 )
1750 					{
1751 						SpewWarning( "process packet took %.1fms\n", usecProcessPacketElapsed*1e-3 );
1752 						ETW_LongOp( "process packet", usecProcessPacketElapsed );
1753 					}
1754 				}
1755 			#endif
1756 		}
1757 	}
1758 
1759 	// We retained the lock
1760 	return true;
1761 }
1762 
1763 void ProcessPendingDestroyClosedRawUDPSockets()
1764 {
1765 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
1766 
1767 	for ( CRawUDPSocketImpl *pSock: s_vecRawSocketsPendingDeletion )
1768 	{
1769 		Assert( pSock->m_callback.m_fnCallback == nullptr );
1770 		delete pSock;
1771 	}
1772 
1773 	s_vecRawSocketsPendingDeletion.RemoveAll();
1774 }
1775 
1776 static void ProcessDeferredOperations()
1777 {
1778 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
1779 
1780 	// Tasks that were queued to be run while we hold the lock
1781 	ISteamNetworkingSocketsRunWithLock::ServiceQueue();
1782 
1783 	// Process any connections queued for delete
1784 	CSteamNetworkConnectionBase::ProcessDeletionList();
1785 
1786 	// Close any sockets pending delete, if we discarded a server
1787 	// We can close the sockets safely now, because we know we're
1788 	// not polling on them and we know we hold the lock
1789 	ProcessPendingDestroyClosedRawUDPSockets();
1790 }
1791 
1792 /////////////////////////////////////////////////////////////////////////////
1793 //
1794 // Service thread
1795 //
1796 /////////////////////////////////////////////////////////////////////////////
1797 
1798 //
1799 // Polling function.
1800 // On entry: lock is held *exactly once*
1801 // Returns: true - we want to keep running, lock is held
1802 // Returns: false - stop request detected, lock no longer held
1803 //
1804 static bool SteamNetworkingSockets_InternalPoll( int msWait, bool bManualPoll )
1805 {
1806 	AssertGlobalLockHeldExactlyOnce();
1807 
1808 	// Figure out how long to sleep
1809 	SteamNetworkingMicroseconds usecNextWakeTime = IThinker::Thinker_GetNextScheduledThinkTime();
1810 	if ( usecNextWakeTime < k_nThinkTime_Never )
1811 	{
1812 
1813 		// Calc wait time to wake up as late as possible,
1814 		// rounded up to the nearest millisecond.
1815 		SteamNetworkingMicroseconds usecNow = SteamNetworkingSockets_GetLocalTimestamp();
1816 		int64 usecUntilNextThinkTime = usecNextWakeTime - usecNow;
1817 
1818 		if ( usecNow >= usecNextWakeTime )
1819 		{
1820 			// Earliest thinker in the queue is ready to go now.
1821 			// There is no point in going to sleep
1822 			msWait = 0;
1823 		}
1824 		else
1825 		{
1826 
1827 			// Set wake time to wake up at the target time.  We assume the scheduler
1828 			// only has 1ms precision, so we round to the nearest ms, so that we don't
1829 			// always wake up exactly 1ms early, go to sleep and wait for 1ms.
1830 			//
1831 			// NOTE: On linux, we have a precise timer and we could probably do better
1832 			// than this.  On windows, we could use an alertable timer, and presumably when
1833 			// we set the we could use a high precision relative time, and Windows could do
1834 			// smart stuff.
1835 			int msTaskWait = ( usecUntilNextThinkTime + 500 ) / 1000;
1836 
1837 			// We must wait at least 1 ms
1838 			msTaskWait = std::max( 1, msTaskWait );
1839 
1840 			// Limit to what the caller has requested
1841 			msWait = std::min( msWait, msTaskWait );
1842 		}
1843 	}
1844 
1845 	// Don't ever sleep for too long, just in case.  This timeout
1846 	// is long enough so that if we have a bug where we really need to
1847 	// be explicitly waking the thread for good perf, we will notice
1848 	// the delay.  But not so long that a bug in some rare
1849 	// shutdown race condition (or the like) will be catastrophic
1850 	msWait = std::min( msWait, k_msMaxPollWait );
1851 
1852 	// Poll sockets
1853 	if ( !PollRawUDPSockets( msWait, bManualPoll ) )
1854 	{
1855 		// Shutdown request, and they did NOT re-acquire the lock
1856 		return false;
1857 	}
1858 
1859 	AssertGlobalLockHeldExactlyOnce();
1860 
1861 	// Shutdown request?
1862 	if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode != bManualPoll )
1863 	{
1864 		SteamNetworkingGlobalLock::Unlock();
1865 		return false; // Shutdown request, we have released the lock
1866 	}
1867 
1868 	// Check for periodic processing
1869 	IThinker::Thinker_ProcessThinkers();
1870 
1871 	// Check for various deferred operations
1872 	ProcessDeferredOperations();
1873 	return true;
1874 }
1875 
1876 static void SteamNetworkingThreadProc()
1877 {
1878 
1879 	// This is an "interrupt" thread.  When an incoming packet raises the event,
1880 	// we need to take priority above normal threads and wake up immediately
1881 	// to process the packet.  We should be asleep most of the time waiting
1882 	// for packets to arrive.
1883 	#if defined(_WIN32)
1884 		DbgVerify( SetThreadPriority( GetCurrentThread(), THREAD_PRIORITY_HIGHEST ) );
1885 	#elif defined(POSIX)
1886 		// This probably won't work on Linux, because you cannot raise thread priority
1887 		// without being root.  But on some systems it works.  So we try, and if it
1888 		// works, great.
1889 		struct sched_param sched;
1890 		int policy;
1891 		pthread_t thread = pthread_self();
1892 		if ( pthread_getschedparam(thread, &policy, &sched) == 0 )
1893 		{
1894 			// Make sure we're not already at max.  No matter what, we don't
1895 			// want to lower our priority!  On linux, it appears that what happens
1896 			// is that the current and max priority values here are 0.
1897 			int max_priority = sched_get_priority_max(policy);
1898 			//printf( "pthread_getschedparam worked, policy=%d, pri=%d, max_pri=%d\n", policy, sched.sched_priority, max_priority );
1899 			if ( max_priority > sched.sched_priority )
1900 			{
1901 
1902 				// Determine new priority.
1903 				int min_priority = sched_get_priority_min(policy);
1904 				sched.sched_priority = std::max( sched.sched_priority+1, (min_priority + max_priority*3) / 4 );
1905 
1906 				// Try to set it
1907 				pthread_setschedparam( thread, policy, &sched );
1908 			}
1909 		}
1910 	#endif
1911 
1912 	#if defined(_WIN32) && !defined(__GNUC__)
1913 
1914 		#pragma warning( disable: 6132 ) // Possible infinite loop:  use of the constant EXCEPTION_CONTINUE_EXECUTION in the exception-filter expression of a try-except.  Execution restarts in the protected block.
1915 
1916 		typedef struct tagTHREADNAME_INFO
1917 		{
1918 			DWORD dwType;
1919 			LPCSTR szName;
1920 			DWORD dwThreadID;
1921 			DWORD dwFlags;
1922 		} THREADNAME_INFO;
1923 
1924 
1925 		THREADNAME_INFO info;
1926 		{
1927 			info.dwType = 0x1000;
1928 			info.szName = "SteamNetworking";
1929 			info.dwThreadID = GetCurrentThreadId();
1930 			info.dwFlags = 0;
1931 		}
1932 		__try
1933 		{
1934 			RaiseException( 0x406D1388, 0, sizeof(info)/sizeof(DWORD), (ULONG_PTR*)&info );
1935 		}
1936 		__except(EXCEPTION_CONTINUE_EXECUTION)
1937 		{
1938 		}
1939 
1940 	#else
1941 		// Help!  Really we should do this for all platforms.  Seems it's not
1942 		// totally straightforward the correct way to do this on Linux.
1943 	#endif
1944 
1945 	// In the loop, we will always hold global lock while we're awake.
1946 	// So go ahead and acquire it now.  But watch out for a race condition
1947 	// where we want to shut down immediately after starting the thread
1948 	do
1949 	{
1950 		if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode )
1951 			return;
1952 	} while ( !SteamNetworkingGlobalLock::TryLock( "ServiceThread", 10 ) );
1953 
1954 	// Random number generator may be per thread!  Make sure and see it for
1955 	// this thread, if so
1956 	SeedWeakRandomGenerator();
1957 
1958 	SpewVerbose( "Service thread running.\n" );
1959 
1960 	// Keep looping until we're asked to terminate
1961 	while ( SteamNetworkingSockets_InternalPoll( 5000, false ) )
1962 	{
1963 		// If they activate manual poll mode, then bail!
1964 		if ( s_bManualPollMode )
1965 		{
1966 			SteamNetworkingGlobalLock::Unlock();
1967 			break;
1968 		}
1969 	}
1970 
1971 	SpewVerbose( "Service thread exiting.\n" );
1972 }
1973 
1974 static void StopSteamDatagramThread()
1975 {
1976 	// They should have set some sort of flag that will cause us the thread to stop
1977 	Assert( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) == 0 || s_bManualPollMode );
1978 
1979 	// Send wake up signal
1980 	WakeSteamDatagramThread();
1981 
1982 	// Wait for thread to finish
1983 	s_pThreadSteamDatagram->join();
1984 
1985 	// Clean up
1986 	delete s_pThreadSteamDatagram;
1987 	s_pThreadSteamDatagram = nullptr;
1988 }
1989 
1990 /////////////////////////////////////////////////////////////////////////////
1991 //
1992 // Bound sockets / socket sharing
1993 //
1994 /////////////////////////////////////////////////////////////////////////////
1995 
1996 class CDedicatedBoundSocket : public IBoundUDPSocket
1997 {
1998 private:
1999 	inline virtual ~CDedicatedBoundSocket() {}
2000 public:
2001 	CDedicatedBoundSocket( IRawUDPSocket *pRawSock, const netadr_t &adr )
2002 	: IBoundUDPSocket( pRawSock, adr ) {}
2003 
2004 	CRecvPacketCallback m_callback;
2005 
2006 	virtual void Close() OVERRIDE
2007 	{
2008 		m_pRawSock->Close();
2009 		m_pRawSock = nullptr;
2010 		delete this;
2011 	}
2012 };
2013 
2014 static void DedicatedBoundSocketCallback( const RecvPktInfo_t &info, CDedicatedBoundSocket *pSock )
2015 {
2016 
2017 	// Make sure that it's from the guy we are supposed to be talking to.
2018 	if ( info.m_adrFrom != pSock->GetRemoteHostAddr() )
2019 	{
2020 		// Packets from random internet hosts happen all the time,
2021 		// especially on a LAN where all sorts of people have broadcast
2022 		// discovery protocols.  So this probably isn't a bug or a problem.
2023 		SpewVerbose( "Ignoring stray packet from %s received on port %d.  Should only be talking to %s on that port.\n",
2024 			CUtlNetAdrRender( info.m_adrFrom ).String(), pSock->GetRawSock()->m_boundAddr.m_port,
2025 			CUtlNetAdrRender( pSock->GetRemoteHostAddr() ).String() );
2026 		return;
2027 	}
2028 
2029 	// Now execute their callback.
2030 	// Passing the address in this context is sort of superfluous.
2031 	// Should we use a different signature here so that the user
2032 	// of our API doesn't write their own useless code to check
2033 	// the from address?
2034 	pSock->m_callback( info );
2035 }
2036 
2037 IBoundUDPSocket *OpenUDPSocketBoundToHost( const netadr_t &adrRemote, CRecvPacketCallback callback, SteamDatagramErrMsg &errMsg )
2038 {
2039 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2040 
2041 	// Select local address to use.
2042 	// Since we know the remote host, let's just always use a single-stack socket
2043 	// with the specified family
2044 	int nAddressFamilies = ( adrRemote.GetType() == k_EIPTypeV6 ) ? k_nAddressFamily_IPv6 : k_nAddressFamily_IPv4;
2045 
2046 	// Create a socket, bind it to the desired local address
2047 	CDedicatedBoundSocket *pTempContext = nullptr; // don't yet know the context
2048 	CRawUDPSocketImpl *pRawSock = OpenRawUDPSocketInternal( CRecvPacketCallback( DedicatedBoundSocketCallback, pTempContext ), errMsg, nullptr, &nAddressFamilies );
2049 	if ( !pRawSock )
2050 		return nullptr;
2051 
2052 	// Return wrapper interface that can only talk to this remote host
2053 	CDedicatedBoundSocket *pBoundSock = new CDedicatedBoundSocket( pRawSock, adrRemote );
2054 	pRawSock->m_callback.m_pContext = pBoundSock;
2055 	pBoundSock->m_callback = callback;
2056 
2057 	return pBoundSock;
2058 }
2059 
2060 bool CreateBoundSocketPair( CRecvPacketCallback callback1, CRecvPacketCallback callback2, IBoundUDPSocket **ppOutSockets, SteamDatagramErrMsg &errMsg )
2061 {
2062 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2063 
2064 	SteamNetworkingIPAddr localAddr;
2065 
2066 	// Create two socket UDP sockets, bound to (IPv4) loopback IP, but allow OS to choose ephemeral port
2067 	CRawUDPSocketImpl *pRawSock[2];
2068 	uint32 nLocalIP = 0x7f000001; // 127.0.0.1
2069 	CDedicatedBoundSocket *pTempContext = nullptr; // don't yet know the context
2070 	localAddr.SetIPv4( nLocalIP, 0 );
2071 	pRawSock[0] = OpenRawUDPSocketInternal( CRecvPacketCallback( DedicatedBoundSocketCallback, pTempContext ), errMsg, &localAddr, nullptr );
2072 	if ( !pRawSock[0] )
2073 		return false;
2074 	localAddr.SetIPv4( nLocalIP, 0 );
2075 	pRawSock[1] = OpenRawUDPSocketInternal( CRecvPacketCallback( DedicatedBoundSocketCallback, pTempContext ), errMsg, &localAddr, nullptr );
2076 	if ( !pRawSock[1] )
2077 	{
2078 		delete pRawSock[0];
2079 		return false;
2080 	}
2081 
2082 	// Return wrapper interfaces that can only talk to each other
2083 	for ( int i = 0 ; i < 2 ; ++i )
2084 	{
2085 		auto s = new CDedicatedBoundSocket( pRawSock[i], netadr_t( nLocalIP, pRawSock[1-i]->m_boundAddr.m_port ) );
2086 		pRawSock[i]->m_callback.m_pContext = s;
2087 		s->m_callback = (i == 0 ) ? callback1 : callback2;
2088 		ppOutSockets[i] = s;
2089 	}
2090 
2091 	return true;
2092 }
2093 
2094 CSharedSocket::CSharedSocket()
2095 {
2096 	m_pRawSock = nullptr;
2097 }
2098 
2099 CSharedSocket::~CSharedSocket()
2100 {
2101 	Kill();
2102 }
2103 
2104 void CSharedSocket::CallbackRecvPacket( const RecvPktInfo_t &info, CSharedSocket *pSock )
2105 {
2106 	// Locate the client
2107 	int idx = pSock->m_mapRemoteHosts.Find( info.m_adrFrom );
2108 
2109 	// Select the callback to invoke, ether client-specific, or the default
2110 	const CRecvPacketCallback &callback = ( idx == pSock->m_mapRemoteHosts.InvalidIndex() ) ? pSock->m_callbackDefault : pSock->m_mapRemoteHosts[ idx ]->m_callback;
2111 
2112 	// Execute the callback
2113 	callback( info );
2114 }
2115 
2116 bool CSharedSocket::BInit( const SteamNetworkingIPAddr &localAddr, CRecvPacketCallback callbackDefault, SteamDatagramErrMsg &errMsg )
2117 {
2118 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2119 
2120 	Kill();
2121 
2122 	SteamNetworkingIPAddr bindAddr = localAddr;
2123 	m_pRawSock = OpenRawUDPSocket( CRecvPacketCallback( CallbackRecvPacket, this ), errMsg, &bindAddr, nullptr );
2124 	if ( m_pRawSock == nullptr )
2125 		return false;
2126 
2127 	m_callbackDefault = callbackDefault;
2128 	return true;
2129 }
2130 
2131 void CSharedSocket::Kill()
2132 {
2133 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2134 
2135 	m_callbackDefault.m_fnCallback = nullptr;
2136 	if ( m_pRawSock )
2137 	{
2138 		m_pRawSock->Close();
2139 		m_pRawSock = nullptr;
2140 	}
2141 	FOR_EACH_HASHMAP( m_mapRemoteHosts, idx )
2142 	{
2143 		CloseRemoteHostByIndex( idx );
2144 	}
2145 }
2146 
2147 void CSharedSocket::CloseRemoteHostByIndex( int idx )
2148 {
2149 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2150 
2151 	delete m_mapRemoteHosts[ idx ];
2152 	m_mapRemoteHosts[idx] = nullptr; // just for grins
2153 	m_mapRemoteHosts.RemoveAt( idx );
2154 }
2155 
2156 IBoundUDPSocket *CSharedSocket::AddRemoteHost( const netadr_t &adrRemote, CRecvPacketCallback callback )
2157 {
2158 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2159 
2160 	if ( m_mapRemoteHosts.HasElement( adrRemote ) )
2161 	{
2162 		AssertMsg1( false, "Already talking to %s on this shared socket, cannot add another remote host!", CUtlNetAdrRender( adrRemote ).String() );
2163 		return nullptr;
2164 	}
2165 	RemoteHost *pRemoteHost = new RemoteHost( m_pRawSock, adrRemote );
2166 	pRemoteHost->m_pOwner = this;
2167 	pRemoteHost->m_callback = callback;
2168 	m_mapRemoteHosts.Insert( adrRemote, pRemoteHost );
2169 
2170 	return pRemoteHost;
2171 }
2172 
2173 void CSharedSocket::RemoteHost::Close()
2174 {
2175 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2176 
2177 	int idx = m_pOwner->m_mapRemoteHosts.Find( m_adr );
2178 	if ( idx == m_pOwner->m_mapRemoteHosts.InvalidIndex() || m_pOwner->m_mapRemoteHosts[idx] != this )
2179 	{
2180 		AssertMsg( false, "CSharedSocket client table corruption!" );
2181 		delete this;
2182 	}
2183 	else
2184 	{
2185 		m_pOwner->CloseRemoteHostByIndex( idx );
2186 	}
2187 }
2188 
2189 /////////////////////////////////////////////////////////////////////////////
2190 //
2191 // Spew
2192 //
2193 /////////////////////////////////////////////////////////////////////////////
2194 
2195 SteamNetworkingMicroseconds g_usecLastRateLimitSpew;
2196 int g_nRateLimitSpewCount;
2197 ESteamNetworkingSocketsDebugOutputType g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_None; // Option selected by the "system" (environment variable, etc)
2198 ESteamNetworkingSocketsDebugOutputType g_eAppSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Msg; // Option selected by app
2199 ESteamNetworkingSocketsDebugOutputType g_eDefaultGroupSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Msg; // Effective value
2200 FSteamNetworkingSocketsDebugOutput g_pfnDebugOutput = nullptr;
2201 void (*g_pfnPreFormatSpewHandler)( ESteamNetworkingSocketsDebugOutputType eType, bool bFmt, const char* pstrFile, int nLine, const char *pMsg, va_list ap ) = SteamNetworkingSockets_DefaultPreFormatDebugOutputHandler;
2202 static bool s_bSpewInitted = false;
2203 
2204 static FILE *g_pFileSystemSpew;
2205 static SteamNetworkingMicroseconds g_usecSystemLogFileOpened;
2206 static bool s_bNeedToFlushSystemSpew = false;;
2207 
2208 // FIXME - probably need our own leaf lock for the spew
2209 
2210 static void InitSpew()
2211 {
2212 
2213 	// First time, check environment variables and set system spew level
2214 	if ( !s_bSpewInitted )
2215 	{
2216 		s_bSpewInitted = true;
2217 
2218 		const char *STEAMNETWORKINGSOCKETS_LOG_LEVEL = getenv( "STEAMNETWORKINGSOCKETS_LOG_LEVEL" );
2219 		if ( !V_isempty( STEAMNETWORKINGSOCKETS_LOG_LEVEL ) )
2220 		{
2221 			switch ( atoi( STEAMNETWORKINGSOCKETS_LOG_LEVEL ) )
2222 			{
2223 				case 0: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_None; break;
2224 				case 1: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Warning; break;
2225 				case 2: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Msg; break;
2226 				case 3: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Verbose; break;
2227 				case 4: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Debug; break;
2228 				case 5: g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_Everything; break;
2229 			}
2230 
2231 			if ( g_eSystemSpewLevel > k_ESteamNetworkingSocketsDebugOutputType_None )
2232 			{
2233 
2234 				// What log file to use?
2235 				const char *pszLogFile = getenv( "STEAMNETWORKINGSOCKETS_LOG_FILE" );
2236 				if ( !pszLogFile )
2237 					pszLogFile = "steamnetworkingsockets.log" ;
2238 
2239 				// Try to open file.  Use binary mode, since we want to make sure we control
2240 				// when it is flushed to disk
2241 				g_pFileSystemSpew = fopen( pszLogFile, "wb" );
2242 				if ( g_pFileSystemSpew )
2243 				{
2244 					g_usecSystemLogFileOpened = SteamNetworkingSockets_GetLocalTimestamp();
2245 					time_t now = time(nullptr);
2246 					fprintf( g_pFileSystemSpew, "Log opened, time %lld %s", (long long)now, ctime( &now ) );
2247 
2248 					// if they ask for verbose, turn on some other groups, by default
2249 					if ( g_eSystemSpewLevel >= k_ESteamNetworkingSocketsDebugOutputType_Verbose )
2250 					{
2251 						g_ConfigDefault_LogLevel_P2PRendezvous.m_value.m_defaultValue = g_eSystemSpewLevel;
2252 						g_ConfigDefault_LogLevel_P2PRendezvous.m_value.Set( g_eSystemSpewLevel );
2253 
2254 						g_ConfigDefault_LogLevel_PacketGaps.m_value.m_defaultValue = g_eSystemSpewLevel-1;
2255 						g_ConfigDefault_LogLevel_PacketGaps.m_value.Set( g_eSystemSpewLevel-1 );
2256 					}
2257 				}
2258 				else
2259 				{
2260 					// Failed
2261 					g_eSystemSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_None;
2262 				}
2263 			}
2264 		}
2265 	}
2266 
2267 	g_eDefaultGroupSpewLevel = std::max( g_eSystemSpewLevel, g_eAppSpewLevel );
2268 
2269 }
2270 
2271 static void KillSpew()
2272 {
2273 	g_eDefaultGroupSpewLevel = g_eSystemSpewLevel = g_eAppSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_None;
2274 	g_pfnDebugOutput = nullptr;
2275 	s_bSpewInitted = false;
2276 	s_bNeedToFlushSystemSpew = false;
2277 	if ( g_pFileSystemSpew )
2278 	{
2279 		fclose( g_pFileSystemSpew );
2280 		g_pFileSystemSpew = nullptr;
2281 	}
2282 }
2283 
2284 static void FlushSpew()
2285 {
2286 	if ( s_bNeedToFlushSystemSpew )
2287 	{
2288 		if ( g_pFileSystemSpew )
2289 			fflush( g_pFileSystemSpew );
2290 		s_bNeedToFlushSystemSpew = false;
2291 	}
2292 }
2293 
2294 
2295 void ReallySpewTypeFmt( int eType, const char *pMsg, ... )
2296 {
2297 	va_list ap;
2298 	va_start( ap, pMsg );
2299 	(*g_pfnPreFormatSpewHandler)( ESteamNetworkingSocketsDebugOutputType(eType), true, nullptr, 0, pMsg, ap );
2300 	va_end( ap );
2301 }
2302 
2303 bool BSteamNetworkingSocketsLowLevelAddRef( SteamDatagramErrMsg &errMsg )
2304 {
2305 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2306 
2307 	// Make sure and call time function at least once
2308 	// just before we start up our thread, so we don't lurch
2309 	// on our first reading after the thread is running and
2310 	// take action to correct this.
2311 	SteamNetworkingSockets_GetLocalTimestamp();
2312 
2313 	// First time init?
2314 	if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) == 0 )
2315 	{
2316 		InitSpew();
2317 
2318 		CCrypto::Init();
2319 
2320 		// Initialize event tracing
2321 		ETW_Init();
2322 
2323 		// Give us a extra time here.  This is a one-time init function and the OS might
2324 		// need to load up libraries and stuff.
2325 		SteamNetworkingGlobalLock::SetLongLockWarningThresholdMS( "BSteamNetworkingSocketsLowLevelAddRef", 500 );
2326 
2327 		// Initialize COM
2328 		#ifdef _XBOX_ONE
2329 		{
2330 			HRESULT hr = ::CoInitializeEx( nullptr, COINIT_MULTITHREADED );
2331 			if ( !SUCCEEDED( hr ) )
2332 			{
2333 				V_sprintf_safe( errMsg, "CoInitializeEx returned %x", hr );
2334 				return false;
2335 			}
2336 		}
2337 		#endif
2338 
2339 		// Initialize sockets
2340 		#ifdef _WIN32
2341 		{
2342 			#pragma comment( lib, "ws2_32.lib" )
2343 			WSAData wsaData;
2344 			if ( ::WSAStartup( MAKEWORD(2, 2), &wsaData ) != 0 )
2345 			{
2346 				#ifdef _XBOX_ONE
2347 					::CoUninitialize();
2348 				#endif
2349 				V_strcpy_safe( errMsg, "WSAStartup failed" );
2350 				return false;
2351 			}
2352 
2353 			#pragma comment( lib, "winmm.lib" )
2354 			if ( ::timeBeginPeriod( 1 ) != 0 )
2355 			{
2356 				::WSACleanup();
2357 				#ifdef _XBOX_ONE
2358 					::CoUninitialize();
2359 				#endif
2360 				V_strcpy_safe( errMsg, "timeBeginPeriod failed" );
2361 				return false;
2362 			}
2363 		}
2364 		#endif
2365 
2366 		// Initialize fake rate limit token buckets
2367 		InitFakeRateLimit();
2368 
2369 		// Make sure random number generator is seeded
2370 		SeedWeakRandomGenerator();
2371 
2372 		// Create thread communication object used to wake the background thread efficiently
2373 		// in case a thinker priority changes or we want to shutdown
2374 		#if defined( _WIN32 )
2375 			Assert( s_hEventWakeThread == INVALID_HANDLE_VALUE );
2376 
2377 			// Note: Using "automatic reset" style event.
2378 			s_hEventWakeThread = CreateEvent( nullptr, false, false, nullptr );
2379 			if ( s_hEventWakeThread == NULL || s_hEventWakeThread == INVALID_HANDLE_VALUE )
2380 			{
2381 				s_hEventWakeThread = INVALID_HANDLE_VALUE;
2382 				V_sprintf_safe( errMsg, "CreateEvent() call failed.  Error code 0x%08x.", GetLastError() );
2383 				return false;
2384 			}
2385 		#elif defined( NN_NINTENDO_SDK )
2386 			// Sorry, but this code is covered under NDA with Nintendo, and
2387 			// we don't have permission to distribute it.
2388 		#else
2389 			Assert( s_hSockWakeThreadRead == INVALID_SOCKET );
2390 			Assert( s_hSockWakeThreadWrite == INVALID_SOCKET );
2391 			int sockType = SOCK_DGRAM;
2392 			#ifdef LINUX
2393 				sockType |= SOCK_CLOEXEC;
2394 			#endif
2395 			int sock[2];
2396 			if ( socketpair( AF_LOCAL, sockType, 0, sock ) != 0 )
2397 			{
2398 				V_sprintf_safe( errMsg, "socketpair() call failed.  Error code 0x%08x.", GetLastSocketError() );
2399 				return false;
2400 			}
2401 
2402 			s_hSockWakeThreadRead = sock[0];
2403 			s_hSockWakeThreadWrite = sock[1];
2404 
2405 			unsigned int opt;
2406 			opt = 1;
2407 			if ( ioctlsocket( s_hSockWakeThreadRead, FIONBIO, (unsigned long*)&opt ) != 0 )
2408 			{
2409 				AssertMsg1( false, "Failed to set socket nonblocking mode.  Error code 0x%08x.", GetLastSocketError() );
2410 			}
2411 			opt = 1;
2412 			if ( ioctlsocket( s_hSockWakeThreadWrite, FIONBIO, (unsigned long*)&opt ) != 0 )
2413 			{
2414 				AssertMsg1( false, "Failed to set socket nonblocking mode.  Error code 0x%08x.", GetLastSocketError() );
2415 			}
2416 		#endif
2417 
2418 		SpewMsg( "Initialized low level socket/threading support.\n" );
2419 	}
2420 
2421 	//extern void KludgePrintPublicKey();
2422 	//KludgePrintPublicKey();
2423 
2424 	s_nLowLevelSupportRefCount.fetch_add(1, std::memory_order_acq_rel);
2425 
2426 	// Make sure the thread is running, if it should be
2427 	if ( !s_bManualPollMode && !s_pThreadSteamDatagram )
2428 		s_pThreadSteamDatagram = new std::thread( SteamNetworkingThreadProc );
2429 
2430 	// Install an axexit handler, so that if static destruction is triggered without
2431 	// cleaning up the library properly, we won't crash.
2432 	static bool s_bInstalledAtExitHandler = false;
2433 	if ( !s_bInstalledAtExitHandler )
2434 	{
2435 		s_bInstalledAtExitHandler = true;
2436 		atexit( []{
2437 			SteamNetworkingGlobalLock scopeLock( "atexit" );
2438 
2439 			// Static destruction is about to happen.  If we have a thread,
2440 			// we need to nuke it
2441 			KillSpew();
2442 			while ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) > 0 )
2443 				SteamNetworkingSocketsLowLevelDecRef();
2444 		} );
2445 	}
2446 
2447 	return true;
2448 }
2449 
2450 void SteamNetworkingSocketsLowLevelDecRef()
2451 {
2452 	SteamNetworkingGlobalLock::AssertHeldByCurrentThread();
2453 
2454 	// Last user is now done?
2455 	int nLastRefCount = s_nLowLevelSupportRefCount.fetch_sub(1, std::memory_order_acq_rel);
2456 	Assert( nLastRefCount > 0 );
2457 	if ( nLastRefCount > 1 )
2458 		return;
2459 
2460 	SpewMsg( "Shutting down low level socket/threading support.\n" );
2461 
2462 	// Give us a extra time here.  This is a one-time shutdown function.
2463 	// There is a potential race condition / deadlock with the service thread,
2464 	// that might cause us to have to wait for it to timeout.  And the OS
2465 	// might need to do stuff when we close a bunch of sockets (and WSACleanup)
2466 	SteamNetworkingGlobalLock::SetLongLockWarningThresholdMS( "SteamNetworkingSocketsLowLevelDecRef", 500 );
2467 
2468 	if ( s_vecRawSockets.IsEmpty() )
2469 	{
2470 		s_vecRawSockets.Purge();
2471 	}
2472 	else
2473 	{
2474 		AssertMsg( false, "Trying to close low level socket support, but we still have sockets open!" );
2475 	}
2476 
2477 	// Stop the service thread, if we have one
2478 	if ( s_pThreadSteamDatagram )
2479 		StopSteamDatagramThread();
2480 
2481 	// Destory wake communication objects
2482 	#if defined( _WIN32 )
2483 		if ( s_hEventWakeThread != INVALID_HANDLE_VALUE )
2484 		{
2485 			CloseHandle( s_hEventWakeThread );
2486 			s_hEventWakeThread = INVALID_HANDLE_VALUE;
2487 		}
2488 	#elif defined( NN_NINTENDO_SDK )
2489 		// Sorry, but this code is covered under NDA with Nintendo, and
2490 		// we don't have permission to distribute it.
2491 	#else
2492 		if ( s_hSockWakeThreadRead != INVALID_SOCKET )
2493 		{
2494 			closesocket( s_hSockWakeThreadRead );
2495 			s_hSockWakeThreadRead = INVALID_SOCKET;
2496 		}
2497 		if ( s_hSockWakeThreadWrite != INVALID_SOCKET )
2498 		{
2499 			closesocket( s_hSockWakeThreadWrite );
2500 			s_hSockWakeThreadWrite = INVALID_SOCKET;
2501 		}
2502 	#endif
2503 
2504 	// Check for any leftover tasks that were queued to be run while we hold the lock
2505 	ProcessDeferredOperations();
2506 
2507 	Assert( s_vecRawSocketsPendingDeletion.IsEmpty() );
2508 	s_vecRawSocketsPendingDeletion.Purge();
2509 
2510 	// Nuke packet lagger queues and make sure we are not registered to think
2511 	s_packetLagQueueRecv.Clear();
2512 	s_packetLagQueueSend.Clear();
2513 
2514 	// Shutdown event tracing
2515 	ETW_Kill();
2516 
2517 	// Nuke sockets and COM
2518 	#ifdef _WIN32
2519 		::timeEndPeriod( 1 );
2520 		::WSACleanup();
2521 	#endif
2522 	#ifdef _XBOX_ONE
2523 		::CoUninitialize();
2524 	#endif
2525 
2526 	KillSpew();
2527 }
2528 
2529 #ifdef DBGFLAG_VALIDATE
2530 void SteamNetworkingSocketsLowLevelValidate( CValidator &validator )
2531 {
2532 	ValidateRecursive( s_vecRawSockets );
2533 }
2534 #endif
2535 
2536 void SteamNetworkingSockets_SetDebugOutputFunction( ESteamNetworkingSocketsDebugOutputType eDetailLevel, FSteamNetworkingSocketsDebugOutput pfnFunc )
2537 {
2538 	if ( pfnFunc && eDetailLevel > k_ESteamNetworkingSocketsDebugOutputType_None )
2539 	{
2540 		SteamNetworkingSocketsLib::g_pfnDebugOutput = pfnFunc;
2541 		SteamNetworkingSocketsLib::g_eAppSpewLevel = ESteamNetworkingSocketsDebugOutputType( eDetailLevel );
2542 	}
2543 	else
2544 	{
2545 		SteamNetworkingSocketsLib::g_pfnDebugOutput = nullptr;
2546 		SteamNetworkingSocketsLib::g_eAppSpewLevel = k_ESteamNetworkingSocketsDebugOutputType_None;
2547 	}
2548 
2549 	SteamNetworkingSocketsLib::InitSpew();
2550 }
2551 
2552 SteamNetworkingMicroseconds SteamNetworkingSockets_GetLocalTimestamp()
2553 {
2554 	SteamNetworkingMicroseconds usecResult;
2555 	long long usecLastReturned;
2556 	for (;;)
2557 	{
2558 		// Fetch values into locals (probably registers)
2559 		usecLastReturned = SteamNetworkingSocketsLib::s_usecTimeLastReturned;
2560 		long long usecOffset = SteamNetworkingSocketsLib::s_usecTimeOffset;
2561 
2562 		// Read raw timer
2563 		uint64 usecRaw = Plat_USTime();
2564 
2565 		// Add offset to get value in "SteamNetworkingMicroseconds" time
2566 		usecResult = usecRaw + usecOffset;
2567 
2568 		// How much raw timer time (presumed to be wall clock time) has elapsed since
2569 		// we read the timer?
2570 		SteamNetworkingMicroseconds usecElapsed = usecResult - usecLastReturned;
2571 		Assert( usecElapsed >= 0 ); // Our raw timer function is not monotonic!  We assume this never happens!
2572 		if ( usecElapsed <= k_usecMaxTimestampDelta )
2573 		{
2574 			// Should be the common case - only a relatively small of time has elapsed
2575 			break;
2576 		}
2577 		if ( SteamNetworkingSocketsLib::s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 )
2578 		{
2579 			// We don't have any expectation that we should be updating the timer frequently,
2580 			// so  a big jump in the value just means they aren't calling it very often
2581 			break;
2582 		}
2583 
2584 		// NOTE: We should only rarely get here, and probably as a result of running under the debugger
2585 
2586 		// Adjust offset so that delta between timestamps is limited
2587 		long long usecNewOffset = usecOffset - ( usecElapsed - k_usecMaxTimestampDelta );
2588 		usecResult = usecRaw + usecNewOffset;
2589 
2590 		// Save the new offset.
2591 		if ( SteamNetworkingSocketsLib::s_usecTimeOffset.compare_exchange_strong( usecOffset, usecNewOffset ) )
2592 			break;
2593 
2594 		// Race condition which should be extremely rare.  Some other thread changed the offset, in the time
2595 		// between when we fetched it and now.  (So, a really small race window!)  Just start all over from
2596 		// the beginning.
2597 	}
2598 
2599 	// Save the last value returned.  Unless another thread snuck in there while we were busy.
2600 	// If so, that's OK.
2601 	SteamNetworkingSocketsLib::s_usecTimeLastReturned.compare_exchange_strong( usecLastReturned, usecResult );
2602 
2603 	return usecResult;
2604 }
2605 
2606 } // namespace SteamNetworkingSocketsLib
2607 
2608 using namespace SteamNetworkingSocketsLib;
2609 
SteamNetworkingSockets_SetManualPollMode(bool bFlag)2610 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetManualPollMode( bool bFlag )
2611 {
2612 	if ( s_bManualPollMode == bFlag )
2613 		return;
2614 	SteamNetworkingGlobalLock scopeLock( "SteamNetworkingSockets_SetManualPollMode" );
2615 	s_bManualPollMode = bFlag;
2616 
2617 	// Check for starting/stopping the thread
2618 	if ( s_pThreadSteamDatagram )
2619 	{
2620 		// Thread is active.  Should it be?
2621 		if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) <= 0 || s_bManualPollMode )
2622 		{
2623 			SpewMsg( "Service thread is running, and manual poll mode actiavted.  Stopping service thread.\n" );
2624 			StopSteamDatagramThread();
2625 		}
2626 	}
2627 	else
2628 	{
2629 		if ( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) > 0 && !s_bManualPollMode )
2630 		{
2631 			// Start up the thread
2632 			SpewMsg( "Service thread is not running, and manual poll mode was turned off, starting service thread.\n" );
2633 			s_pThreadSteamDatagram = new std::thread( SteamNetworkingThreadProc );
2634 		}
2635 	}
2636 }
2637 
SteamNetworkingSockets_Poll(int msMaxWaitTime)2638 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_Poll( int msMaxWaitTime )
2639 {
2640 	if ( !s_bManualPollMode )
2641 	{
2642 		AssertMsg( false, "Not in manual poll mode!" );
2643 		return;
2644 	}
2645 	Assert( s_nLowLevelSupportRefCount.load(std::memory_order_acquire) > 0 );
2646 
2647 	while ( !SteamNetworkingGlobalLock::TryLock( "SteamNetworkingSockets_Poll", 1 ) )
2648 	{
2649 		if ( --msMaxWaitTime <= 0 )
2650 			return;
2651 	}
2652 
2653 	bool bStillLocked = SteamNetworkingSockets_InternalPoll( msMaxWaitTime, true );
2654 	if ( bStillLocked )
2655 		SteamNetworkingGlobalLock::Unlock();
2656 }
2657 
SteamNetworkingSockets_SetLockWaitWarningThreshold(SteamNetworkingMicroseconds usecTheshold)2658 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetLockWaitWarningThreshold( SteamNetworkingMicroseconds usecTheshold )
2659 {
2660 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
2661 		s_usecLockWaitWarningThreshold = usecTheshold;
2662 	#else
2663 		// Should we assert here?
2664 	#endif
2665 }
2666 
SteamNetworkingSockets_SetLockAcquiredCallback(void (* callback)(const char * tags,SteamNetworkingMicroseconds usecWaited))2667 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetLockAcquiredCallback( void (*callback)( const char *tags, SteamNetworkingMicroseconds usecWaited ) )
2668 {
2669 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
2670 		s_fLockAcquiredCallback = callback;
2671 	#else
2672 		// Should we assert here?
2673 	#endif
2674 }
2675 
SteamNetworkingSockets_SetLockHeldCallback(void (* callback)(const char * tags,SteamNetworkingMicroseconds usecWaited))2676 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetLockHeldCallback( void (*callback)( const char *tags, SteamNetworkingMicroseconds usecWaited ) )
2677 {
2678 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
2679 		s_fLockHeldCallback = callback;
2680 	#else
2681 		// Should we assert here?
2682 	#endif
2683 }
2684 
SteamNetworkingSockets_SetPreFormatDebugOutputHandler(ESteamNetworkingSocketsDebugOutputType eDetailLevel,void (* pfn_Handler)(ESteamNetworkingSocketsDebugOutputType eType,bool bFmt,const char * pstrFile,int nLine,const char * pMsg,va_list ap))2685 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetPreFormatDebugOutputHandler(
2686 	ESteamNetworkingSocketsDebugOutputType eDetailLevel,
2687 	void (*pfn_Handler)( ESteamNetworkingSocketsDebugOutputType eType, bool bFmt, const char* pstrFile, int nLine, const char *pMsg, va_list ap )
2688 )
2689 {
2690 	g_eDefaultGroupSpewLevel = eDetailLevel;
2691 	g_pfnPreFormatSpewHandler = pfn_Handler;
2692 }
2693 
SteamNetworkingSockets_DefaultPreFormatDebugOutputHandler(ESteamNetworkingSocketsDebugOutputType eType,bool bFmt,const char * pstrFile,int nLine,const char * pMsg,va_list ap)2694 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_DefaultPreFormatDebugOutputHandler( ESteamNetworkingSocketsDebugOutputType eType, bool bFmt, const char* pstrFile, int nLine, const char *pMsg, va_list ap )
2695 {
2696 	// Do the formatting
2697 	char buf[ 2048 ];
2698 	int szBuf = sizeof(buf);
2699 	char *msgDest = buf;
2700 	if ( pstrFile )
2701 	{
2702 		int l = V_sprintf_safe( buf, "%s(%d): ", pstrFile, nLine );
2703 		szBuf -= l;
2704 		msgDest += l;
2705 	}
2706 
2707 	if ( bFmt )
2708 		V_vsnprintf( msgDest, szBuf, pMsg, ap );
2709 	else
2710 		V_strncpy( msgDest, pMsg, szBuf );
2711 
2712 	// Gah, some, but not all, of our code has newlines on the end
2713 	V_StripTrailingWhitespaceASCII( buf );
2714 
2715 	// Spew to log file?
2716 	if ( eType <= g_eSystemSpewLevel && g_pFileSystemSpew )
2717 	{
2718 
2719 		// Write
2720 		SteamNetworkingMicroseconds usecLogTime = SteamNetworkingSockets_GetLocalTimestamp() - g_usecSystemLogFileOpened;
2721 		fprintf( g_pFileSystemSpew, "%8.3f %s\n", usecLogTime*1e-6, buf );
2722 
2723 		// Queue to flush when we we think we can afford to hit the disk synchronously
2724 		s_bNeedToFlushSystemSpew = true;
2725 
2726 		// Flush certain critical messages things immediately
2727 		if ( eType <= k_ESteamNetworkingSocketsDebugOutputType_Error )
2728 			FlushSpew();
2729 	}
2730 
2731 	// Invoke callback
2732 	FSteamNetworkingSocketsDebugOutput pfnDebugOutput = g_pfnDebugOutput;
2733 	if ( pfnDebugOutput )
2734 		pfnDebugOutput( eType, buf );
2735 }
2736 
2737 
2738 /////////////////////////////////////////////////////////////////////////////
2739 //
2740 // memory override
2741 //
2742 /////////////////////////////////////////////////////////////////////////////
2743 
2744 #include <tier0/memdbgoff.h>
2745 
2746 #ifdef STEAMNETWORKINGSOCKETS_ENABLE_MEM_OVERRIDE
2747 
2748 static bool s_bHasAllocatedMemory = false;
2749 
2750 static void* (*s_pfn_malloc)( size_t s ) = malloc;
2751 static void (*s_pfn_free)( void *p ) = free;
2752 static void* (*s_pfn_realloc)( void *p, size_t s ) = realloc;
2753 
SteamNetworkingSockets_Malloc(size_t s)2754 void *SteamNetworkingSockets_Malloc( size_t s )
2755 {
2756 	s_bHasAllocatedMemory = true;
2757 	return (*s_pfn_malloc)( s );
2758 }
2759 
SteamNetworkingSockets_Realloc(void * p,size_t s)2760 void *SteamNetworkingSockets_Realloc( void *p, size_t s )
2761 {
2762 	s_bHasAllocatedMemory = true;
2763 	return (*s_pfn_realloc)( p, s );
2764 }
2765 
SteamNetworkingSockets_Free(void * p)2766 void SteamNetworkingSockets_Free( void *p )
2767 {
2768 	(*s_pfn_free)( p );
2769 }
2770 
SteamNetworkingSockets_SetCustomMemoryAllocator(void * (* pfn_malloc)(size_t s),void (* pfn_free)(void * p),void * (* pfn_realloc)(void * p,size_t s))2771 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_SetCustomMemoryAllocator(
2772 	void* (*pfn_malloc)( size_t s ),
2773 	void (*pfn_free)( void *p ),
2774 	void* (*pfn_realloc)( void *p, size_t s )
2775 ) {
2776 	Assert( !s_bHasAllocatedMemory ); // Too late!
2777 
2778 	s_pfn_malloc = pfn_malloc;
2779 	s_pfn_free = pfn_free;
2780 	s_pfn_realloc = pfn_realloc;
2781 }
2782 #endif
2783