1 //====== Copyright Valve Corporation, All rights reserved. ====================
2 
3 #ifndef STEAMNETWORKINGSOCKETS_LOWLEVEL_H
4 #define STEAMNETWORKINGSOCKETS_LOWLEVEL_H
5 #pragma once
6 
7 #include <atomic>
8 #include <cstdint>
9 #include <functional>
10 #include <mutex>
11 #include <thread>
12 #include <steam/steamnetworkingtypes.h>
13 #include <tier1/netadr.h>
14 #include <tier1/utlhashmap.h>
15 #include "../steamnetworkingsockets_internal.h"
16 
17 // Comment this in to enable Windows event tracing
18 //#ifdef _WINDOWS
19 //	#define STEAMNETWORKINGSOCKETS_ENABLE_ETW
20 //#endif
21 
22 // Set STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL.
23 // NOTE: Currently only 0 or 1 is allowed.  Later we might add more flexibility
24 #ifndef STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL
25 	#ifdef DBGFLAG_ASSERT
26 		#define STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL 1
27 	#else
28 		#define STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL 0
29 	#endif
30 #endif
31 
32 struct iovec;
33 
34 namespace SteamNetworkingSocketsLib {
35 
36 class IRawUDPSocket;
37 
38 /////////////////////////////////////////////////////////////////////////////
39 //
40 // Low level sockets
41 //
42 /////////////////////////////////////////////////////////////////////////////
43 
44 /// Info about an incoming packet passed to the CRecvPacketCallback
45 struct RecvPktInfo_t
46 {
47 	const void *m_pPkt;
48 	int m_cbPkt;
49 	netadr_t m_adrFrom;
50 	IRawUDPSocket *m_pSock;
51 };
52 
53 /// Store the callback and its context together
54 class CRecvPacketCallback
55 {
56 public:
57 	/// Prototype of the callback
58 	typedef void (*FCallbackRecvPacket)( const RecvPktInfo_t &info, void *pContext );
59 
60 	/// Default constructor sets stuff to null
CRecvPacketCallback()61 	inline CRecvPacketCallback() : m_fnCallback( nullptr ), m_pContext( nullptr ) {}
62 
63 	/// A template constructor so you can use type safe context and avoid messy casting
64 	template< typename T >
CRecvPacketCallback(void (* fnCallback)(const RecvPktInfo_t & info,T context),T context)65 	inline CRecvPacketCallback( void (*fnCallback)( const RecvPktInfo_t &info, T context ), T context )
66 	: m_fnCallback ( reinterpret_cast< FCallbackRecvPacket>( fnCallback ) )
67 	, m_pContext( reinterpret_cast< void * >( context ) )
68 	{
69 		COMPILE_TIME_ASSERT( sizeof(T) == sizeof(void*) );
70 	}
71 
72 	FCallbackRecvPacket m_fnCallback;
73 	void *m_pContext;
74 
75 	/// Shortcut notation to execute the callback
operator()76 	inline void operator()( const RecvPktInfo_t &info ) const
77 	{
78 		if ( m_fnCallback )
79 			m_fnCallback( info, m_pContext );
80 	}
81 };
82 
83 /// Interface object for a low-level Berkeley socket.  We always use non-blocking, UDP sockets.
84 class IRawUDPSocket
85 {
86 public:
87 	/// A thin wrapper around ::sendto
88 	///
89 	/// Packets sent through this method are subject to fake loss (steamdatagram_fakepacketloss_send),
90 	/// lag (steamdatagram_fakepacketlag_send and steamdatagram_fakepacketreorder_send), and
91 	/// duplication (steamdatagram_fakepacketdup_send)
BSendRawPacket(const void * pPkt,int cbPkt,const netadr_t & adrTo)92 	inline bool BSendRawPacket( const void *pPkt, int cbPkt, const netadr_t &adrTo ) const
93 	{
94 		iovec temp;
95 		temp.iov_len = cbPkt;
96 		temp.iov_base = (void *)pPkt;
97 		return BSendRawPacketGather( 1, &temp, adrTo );
98 	}
BSendRawPacket(const void * pPkt,int cbPkt,const SteamNetworkingIPAddr & adrTo)99 	inline bool BSendRawPacket( const void *pPkt, int cbPkt, const SteamNetworkingIPAddr &adrTo ) const
100 	{
101 		netadr_t netadrTo;
102 		SteamNetworkingIPAddrToNetAdr( netadrTo, adrTo );
103 		return BSendRawPacket( pPkt, cbPkt, netadrTo );
104 	}
105 
106 	/// Gather-based send.  Simulated lag, loss, etc are applied
107 	virtual bool BSendRawPacketGather( int nChunks, const iovec *pChunks, const netadr_t &adrTo ) const = 0;
BSendRawPacketGather(int nChunks,const iovec * pChunks,const SteamNetworkingIPAddr & adrTo)108 	inline bool BSendRawPacketGather( int nChunks, const iovec *pChunks, const SteamNetworkingIPAddr &adrTo ) const
109 	{
110 		netadr_t netadrTo;
111 		SteamNetworkingIPAddrToNetAdr( netadrTo, adrTo );
112 		return BSendRawPacketGather( nChunks, pChunks, netadrTo );
113 	}
114 
115 	/// Logically close the socket.  This might not actually close the socket IMMEDIATELY,
116 	/// there may be a slight delay.  (On the order of a few milliseconds.)  But you will not
117 	/// get any further callbacks.
118 	virtual void Close() = 0;
119 
120 	/// The local address we ended up binding to
121 	SteamNetworkingIPAddr m_boundAddr;
122 
123 protected:
124 	IRawUDPSocket();
125 	virtual ~IRawUDPSocket();
126 };
127 
128 const int k_nAddressFamily_Auto = -1; // Will try to use IPv6 dual stack if possible.  Falls back to IPv4 if necessary (and possible for your requested bind address)
129 const int k_nAddressFamily_IPv4 = 1;
130 const int k_nAddressFamily_IPv6 = 2;
131 const int k_nAddressFamily_DualStack = k_nAddressFamily_IPv4|k_nAddressFamily_IPv6;
132 
133 /// Create a UDP socket, set all the socket options for non-blocking, etc, bind it to the desired interface and port, and
134 /// make sure we're setup to poll the socket efficiently and deliver packets received to the specified callback.
135 ///
136 /// Local address is interpreted as follows:
137 /// - If a specific IPv6 or IPv4 address is present, we will try to bind to that interface,
138 ///   and dual-stack will be disabled.
139 /// - If IPv4 0.0.0.0 is specified, only bind for IPv4
140 /// - If IPv6 ::0 is specified, consult pnAddressFamilies.
141 ///
142 /// Address family is interpreted as follows:
143 /// - k_nAddressFamily_IPv4/k_nAddressFamily_IPv6: only bind for that protocol
144 /// - k_nAddressFamily_DualStack: Fail if we cannot get dual stack
145 /// - k_nAddressFamily_Auto (or null): Try dual stack if address is ::0 or null,
146 ///   otherwise use single protocol.
147 ///
148 /// Upon exit, the address and address families are modified to contain the actual bound
149 /// address (specifically, the port!) and available address families.
150 extern IRawUDPSocket *OpenRawUDPSocket( CRecvPacketCallback callback, SteamDatagramErrMsg &errMsg, SteamNetworkingIPAddr *pAddrLocal, int *pnAddressFamilies );
151 
152 /// A single socket could, in theory, be used to communicate with every single remote host.
153 /// Or we may decide to open up one socket per remote host, to workaround weird firewall/NAT
154 /// bugs.  A IBoundUDPSocket abstracts this.  If you need to talk to a single remote host
155 /// over UDP, you can get one of these and not worry about whether you got your own socket
156 /// or are sharing a socket.  And you don't need to worry about polling the socket.  You'll
157 /// just get your callback when a packet is received.
158 class IBoundUDPSocket
159 {
160 public:
161 
162 	/// Send a packet on this socket to the bound remote host
BSendRawPacket(const void * pPkt,int cbPkt)163 	inline bool BSendRawPacket( const void *pPkt, int cbPkt ) const
164 	{
165 		return m_pRawSock->BSendRawPacket( pPkt, cbPkt, m_adr );
166 	}
167 
168 	/// Gather-based send to the bound remote host
BSendRawPacketGather(int nChunks,const iovec * pChunks)169 	inline bool BSendRawPacketGather( int nChunks, const iovec *pChunks ) const
170 	{
171 		return m_pRawSock->BSendRawPacketGather( nChunks, pChunks, m_adr );
172 	}
173 
174 	/// Close this socket and stop talking to the specified remote host
175 	virtual void Close() = 0;
176 
177 	/// Who are we talking to?
GetRemoteHostAddr()178 	const netadr_t &GetRemoteHostAddr() const { return m_adr; }
179 
180 	/// Access the underlying socket we are using (which might be shared)
GetRawSock()181 	IRawUDPSocket *GetRawSock() const { return m_pRawSock; }
182 
183 protected:
IBoundUDPSocket(IRawUDPSocket * pRawSock,const netadr_t & adr)184 	inline IBoundUDPSocket( IRawUDPSocket *pRawSock, const netadr_t &adr ) : m_adr( adr ), m_pRawSock( pRawSock ) {}
~IBoundUDPSocket()185 	inline ~IBoundUDPSocket() {}
186 
187 	/// Address of remote host
188 	netadr_t m_adr;
189 
190 	/// The raw socket that is being shared
191 	IRawUDPSocket *m_pRawSock;
192 };
193 
194 /// Get a socket to talk to a single host.  The underlying socket won't be
195 /// shared with anybody else.
196 extern IBoundUDPSocket *OpenUDPSocketBoundToHost( const netadr_t &adrRemote, CRecvPacketCallback callback, SteamDatagramErrMsg &errMsg );
197 
198 /// Create a pair of sockets that are bound to talk to each other.
199 extern bool CreateBoundSocketPair( CRecvPacketCallback callback1, CRecvPacketCallback callback2, IBoundUDPSocket **ppOutSockets, SteamDatagramErrMsg &errMsg );
200 
201 /// Manage a single underlying socket that is used to talk to multiple remote hosts
202 class CSharedSocket
203 {
204 public:
205 	STEAMNETWORKINGSOCKETS_DECLARE_CLASS_OPERATOR_NEW
206 	CSharedSocket();
207 	~CSharedSocket();
208 
209 	/// Allocate a raw socket and setup bookkeeping structures so we can add
210 	/// clients that will talk using it.
211 	bool BInit( const SteamNetworkingIPAddr &localAddr, CRecvPacketCallback callbackDefault, SteamDatagramErrMsg &errMsg );
212 
213 	/// Close all sockets and clean up all resources
214 	void Kill();
215 
216 	/// Add a client to talk to a given remote address.  Use IBoundUDPSocket::Close when you
217 	/// are done.
218 	IBoundUDPSocket *AddRemoteHost( const netadr_t &adrRemote, CRecvPacketCallback callback );
219 
220 	/// Send a packet to a remove host.  It doesn't matter if the remote host
221 	/// is in the client table a client already or not.
BSendRawPacket(const void * pPkt,int cbPkt,const netadr_t & adrTo)222 	bool BSendRawPacket( const void *pPkt, int cbPkt, const netadr_t &adrTo ) const
223 	{
224 		return m_pRawSock->BSendRawPacket( pPkt, cbPkt, adrTo );
225 	}
226 
GetBoundAddr()227 	const SteamNetworkingIPAddr *GetBoundAddr() const
228 	{
229 		if ( !m_pRawSock )
230 		{
231 			Assert( false );
232 			return nullptr;
233 		}
234 		return &m_pRawSock->m_boundAddr;
235 	}
236 
237 private:
238 
239 	/// Call this if we get a packet from somebody we don't recognize
240 	CRecvPacketCallback m_callbackDefault;
241 
242 	/// The raw socket that is being shared
243 	IRawUDPSocket *m_pRawSock;
244 
245 	class RemoteHost : public IBoundUDPSocket
246 	{
247 	private:
248 		friend class CSharedSocket;
~RemoteHost()249 		inline virtual ~RemoteHost() {}
250 	public:
251 		STEAMNETWORKINGSOCKETS_DECLARE_CLASS_OPERATOR_NEW
RemoteHost(IRawUDPSocket * pRawSock,const netadr_t & adr)252 		inline RemoteHost( IRawUDPSocket *pRawSock, const netadr_t &adr ) : IBoundUDPSocket( pRawSock, adr ) {}
253 		CRecvPacketCallback m_callback;
254 		CSharedSocket *m_pOwner;
255 		virtual void Close() OVERRIDE;
256 	};
257 	friend class RemoteHost;
258 
259 	/// List of remote hosts we're talking to.  It's sort of silly to use a map,
260 	/// which duplicates the address in the key as well as a member of the
261 	/// RemoteHost.
262 	/// Perhaps a better approach would be to use an RBTree, but then we'd
263 	/// need to be able to search the tree given an address, and RBTRee class
264 	/// doesn't have that interface yet.  Also, it's probably better to
265 	/// waste a tiny bit of space and put the keys close together in memory,
266 	/// anyway.
267 	CUtlHashMap<netadr_t, RemoteHost *, std::equal_to<netadr_t>, netadr_t::Hash > m_mapRemoteHosts;
268 
269 	void CloseRemoteHostByIndex( int idx );
270 
271 	static void CallbackRecvPacket( const RecvPktInfo_t &info, CSharedSocket *pSock );
272 };
273 
274 /////////////////////////////////////////////////////////////////////////////
275 //
276 // Misc low level service thread stuff
277 //
278 /////////////////////////////////////////////////////////////////////////////
279 
280 /// Called when we know it's safe to actually destroy sockets pending deletion.
281 /// This is when: 1.) We own the lock and 2.) we aren't polling in the service thread.
282 extern void ProcessPendingDestroyClosedRawUDPSockets();
283 
284 /// Last time that we spewed something that was subject to rate limit
285 extern SteamNetworkingMicroseconds g_usecLastRateLimitSpew;
286 extern int g_nRateLimitSpewCount;
287 
288 /// Check for rate limiting spew (e.g. when spew could be triggered by malicious sender.)
BRateLimitSpew(SteamNetworkingMicroseconds usecNow)289 inline bool BRateLimitSpew( SteamNetworkingMicroseconds usecNow )
290 {
291 	if ( g_nRateLimitSpewCount <= 0 )
292 	{
293 		if ( usecNow < g_usecLastRateLimitSpew + 300000 )
294 			return false;
295 		g_usecLastRateLimitSpew = usecNow;
296 		g_nRateLimitSpewCount = 3; // Allow a short burst, because sometimes we need messages from different levels on the call stack
297 	}
298 	--g_nRateLimitSpewCount;
299 	return true;
300 }
301 
302 extern ESteamNetworkingSocketsDebugOutputType g_eDefaultGroupSpewLevel;
303 extern void ReallySpewTypeFmt( int eType, PRINTF_FORMAT_STRING const char *pFmt, ... ) FMTFUNCTION( 2, 3 );
304 extern void (*g_pfnPreFormatSpewHandler)( ESteamNetworkingSocketsDebugOutputType eType, bool bFmt, const char* pstrFile, int nLine, const char *pMsg, va_list ap );
305 extern FSteamNetworkingSocketsDebugOutput g_pfnDebugOutput;
306 
307 #define SpewTypeGroup( eType, nGroup, ... ) ( ( (eType) <= (nGroup) ) ? ReallySpewTypeFmt( (eType), __VA_ARGS__ ) : (void)0 )
308 #define SpewMsgGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Msg, (nGroup), __VA_ARGS__ )
309 #define SpewVerboseGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Verbose, (nGroup), __VA_ARGS__ )
310 #define SpewDebugGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Debug, (nGroup), __VA_ARGS__ )
311 #define SpewImportantGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Important, (nGroup), __VA_ARGS__ )
312 #define SpewWarningGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Warning, (nGroup), __VA_ARGS__ )
313 #define SpewErrorGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Error, (nGroup), __VA_ARGS__ )
314 #define SpewBugGroup( nGroup, ... ) SpewTypeGroup( k_ESteamNetworkingSocketsDebugOutputType_Bug, (nGroup), __VA_ARGS__ )
315 
316 #define SpewTypeDefaultGroup( eType, ... ) SpewTypeGroup( eType, g_eDefaultGroupSpewLevel, __VA_ARGS__ )
317 #define SpewMsg( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Msg, __VA_ARGS__ )
318 #define SpewVerbose( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Verbose, __VA_ARGS__ )
319 #define SpewDebug( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Debug, __VA_ARGS__ )
320 #define SpewImportant( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Important, __VA_ARGS__ )
321 #define SpewWarning( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Warning, __VA_ARGS__ )
322 #define SpewError( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Error, __VA_ARGS__ )
323 #define SpewBug( ... ) SpewTypeDefaultGroup( k_ESteamNetworkingSocketsDebugOutputType_Bug, __VA_ARGS__ )
324 
325 #define SpewTypeDefaultGroupRateLimited( usecNow, eType, ... ) ( ( (eType) <= g_eDefaultGroupSpewLevel && BRateLimitSpew( usecNow ) ) ? ReallySpewTypeFmt( (eType), __VA_ARGS__ ) : (void)0 )
326 #define SpewWarningRateLimited( usecNow, ... ) SpewTypeDefaultGroupRateLimited( usecNow, k_ESteamNetworkingSocketsDebugOutputType_Warning, __VA_ARGS__ )
327 
328 /// Make sure stuff is initialized
329 extern bool BSteamNetworkingSocketsLowLevelAddRef( SteamDatagramErrMsg &errMsg );
330 
331 /// Nuke common stuff
332 extern void SteamNetworkingSocketsLowLevelDecRef();
333 
334 /////////////////////////////////////////////////////////////////////////////
335 //
336 // Locking
337 //
338 // Having fine-grained locks is utterly terrifying, frankly.  In order
339 // to make this work, while avoiding deadlocks, we protect *most* things
340 // with the global lock, and only certain frequently used API calls use more
341 // fine-grained locks.
342 //
343 // In general, the global lock will be held while the background is doing its work,
344 // and so we want to avoid API calls taking that lock when possible.  The most
345 // important API calls that are likely to conflict are:
346 //
347 // - sending messages on a connection
348 // - polling for incoming messages on a connection or poll group.
349 // - polling connection state
350 //
351 // These are the calls most likely to be called often, maybe from multiple threads at
352 // the same time.
353 //
354 // For less frequently-used API calls, we are less concerned about lock contention and
355 // prefer to keep the code simple until we have a proven example of bad performance.
356 //
357 // Here are the locks that are used:
358 //
359 // - Global lock.  You must hold this lock while:
360 //   - Changing any data not specifically carved out below.
361 //   - Creating or destroying objects
362 //   - Changing connection state
363 //   - Changing links between multiple objects.  (E.g. assigning connections to poll groups.)
364 //   - Acquiring more than one "object" lock below at the same time.
365 // - Per-connection locks.  You must hold this lock to modify any property of the connection.
366 // - Per-poll-group locks.  You must hold this lock to modify any property of the poll group.
367 // - g_tables_lock.  Protects the connection and poll group global handle lookup tables.
368 //   You must hold the lock any time you want to read or write the connection or poll group
369 //   tables.  This is a very special lock with custom handling.
370 // - Other miscellaneous "leaf" locks that are only held very briefly to protect specific
371 //   data structures, such as callback lists.  (ShortDurationLock's)
372 //
373 // The rules for acquiring locks are as follows:
374 // - You may not acquire the global lock while already holding any other lock.  The global lock
375 //   must always be acquired *first*.
376 // - You may not acquire another lock while you already hold a ShortDurationLock.  These locks
377 //   are intended for extremely simple use cases where the lock is expected to be held for a brief
378 //   period of time and contention is expected to be low.
379 // - You may not acquire more than object lock (connection or poll group) unless already holding
380 //   the global lock.
381 // - The table lock must always be acquired before any object or poll group locks.  This is the flow
382 //   that happens for all API calls.  Also - note that API calls are special in that they release the
383 //   table lock out of order, while retaining the object lock.  (It is not a stack lock/unlock pattern.)
384 //   Object creation is special, and out-of-order locking is OK.  See the code for why.
385 //
386 // A sequence of lock acquisitions that violates the rules above *is* allowed, provided
387 // that the out-of-order acquisition is a "try" acquisition, tolerant of failing due to
388 // deadlock.
389 //
390 /////////////////////////////////////////////////////////////////////////////
391 
392 // You can override these with more optimal platform-specific
393 // versions if you want
394 using ShortDurationMutexImpl = std::mutex; // No recursion, no timeout, should only be held for a short time, so expect low contention.  Good candidate for spinlock.
395 using RecursiveMutexImpl = std::recursive_mutex; // Need to able to lock recursively, but don't need to be able to wait with timeout.
396 using RecursiveTimedMutexImpl = std::recursive_timed_mutex; // Recursion, and need to be able to wait with timeout.  (Does this ability actually add any extra work on any OS we care about?)
397 
398 /// Debug record for a lock.
399 struct LockDebugInfo
400 {
401 	static constexpr int k_nFlag_ShortDuration = (1<<0);
402 	static constexpr int k_nFlag_Connection = (1<<1);
403 	static constexpr int k_nFlag_PollGroup = (1<<2);
404 	static constexpr int k_nFlag_Table = (1<<4);
405 
406 	const char *const m_pszName;
407 	const int m_nFlags;
408 
409 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
410 		void _AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag = nullptr ) const;
411 	#else
412 		inline void _AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag = nullptr ) const {}
413 	#endif
414 
415 protected:
LockDebugInfoLockDebugInfo416 	LockDebugInfo( const char *pszName, int nFlags ) : m_pszName( pszName ), m_nFlags( nFlags ) {}
417 
418 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
419 		void AboutToLock( bool bTry );
420 		void OnLocked( const char *pszTag );
421 		void AboutToUnlock();
422 		~LockDebugInfo();
423 	#else
AboutToLockLockDebugInfo424 		void AboutToLock( bool bTry ) {}
OnLockedLockDebugInfo425 		void OnLocked( const char *pszTag ) {}
AboutToUnlockLockDebugInfo426 		void AboutToUnlock() {}
427 	#endif
428 };
429 
430 /// Wrapper for locks to make them somewhat debuggable.
431 template<typename TMutexImpl >
432 struct Lock : LockDebugInfo
433 {
LockLock434 	inline Lock( const char *pszName, int nFlags ) : LockDebugInfo( pszName, nFlags ) {}
435 	inline void lock( const char *pszTag = nullptr )
436 	{
437 		LockDebugInfo::AboutToLock( false );
438 		m_impl.lock();
439 		LockDebugInfo::OnLocked( pszTag );
440 	}
unlockLock441 	inline void unlock()
442 	{
443 		LockDebugInfo::AboutToUnlock();
444 		m_impl.unlock();
445 	}
446 	inline bool try_lock( const char *pszTag = nullptr ) {
447 		LockDebugInfo::AboutToLock( true );
448 		if ( !m_impl.try_lock() )
449 			return false;
450 		LockDebugInfo::OnLocked( pszTag );
451 		return true;
452 	}
453 	inline bool try_lock_for( int msTimeout, const char *pszTag = nullptr )
454 	{
455 		LockDebugInfo::AboutToLock( true );
456 		if ( !m_impl.try_lock_for( std::chrono::milliseconds( msTimeout ) ) )
457 			return false;
458 		LockDebugInfo::OnLocked( pszTag );
459 		return true;
460 	}
461 
462 private:
463 	TMutexImpl m_impl;
464 };
465 
466 /// Object that automatically unlocks a lock when it goes out of scope using RIAA
467 template<typename TLock>
468 struct ScopeLock
469 {
ScopeLockScopeLock470 	ScopeLock() : m_pLock( nullptr ) {}
471 	explicit ScopeLock( TLock &lock, const char *pszTag = nullptr ) : m_pLock(&lock) { lock.lock( pszTag ); }
~ScopeLockScopeLock472 	~ScopeLock() { if ( m_pLock ) m_pLock->unlock(); }
IsLockedScopeLock473 	bool IsLocked() const { return m_pLock != nullptr; }
474 	void Lock( TLock &lock, const char *pszTag = nullptr )
475 	{
476 		if ( m_pLock )
477 		{
478 			AssertMsg( false, "Scopelock already holding %s, while locking %s!  tag=%s",
479 				m_pLock->m_pszName, lock.m_pszName, pszTag ? pszTag : "???" );
480 			m_pLock->unlock();
481 		}
482 		m_pLock = &lock;
483 		lock.lock( pszTag );
484 	}
TryLockScopeLock485 	bool TryLock( TLock &lock, int msTimeout, const char *pszTag )
486 	{
487 		if ( m_pLock )
488 		{
489 			AssertMsg( false, "Scopelock already holding %s, while trylock %s!  tag=%s",
490 				m_pLock->m_pszName, lock.m_pszName, pszTag ? pszTag : "???" );
491 			m_pLock->unlock();
492 			m_pLock = nullptr;
493 		}
494 		if ( !lock.try_lock_for( msTimeout, pszTag ) )
495 			return false;
496 		m_pLock = &lock;
497 		return true;
498 	}
UnlockScopeLock499 	void Unlock() { if ( !m_pLock ) return; m_pLock->unlock(); m_pLock = nullptr; }
500 
501 	// If we have a lock, forget about it
AbandonScopeLock502 	void Abandon() { m_pLock = nullptr; }
503 private:
504 	TLock *m_pLock;
505 };
506 
507 // A very simple lock to protect short accesses to a small set of data.
508 // Used when:
509 // - We hold the lock for a brief period.
510 // - We don't need to take any additional locks while already holding this one.
511 //   (Including this lock -- e.g. we don't need to lock recursively.)
512 struct ShortDurationLock : Lock<ShortDurationMutexImpl>
513 {
ShortDurationLockShortDurationLock514 	ShortDurationLock( const char *pszName ) : Lock<ShortDurationMutexImpl>( pszName, k_nFlag_ShortDuration ) {}
515 };
516 using ShortDurationScopeLock = ScopeLock<ShortDurationLock>;
517 
518 #if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
519 	#define AssertHeldByCurrentThread( ... ) _AssertHeldByCurrentThread( __FILE__, __LINE__ ,## __VA_ARGS__ )
520 	#define AssertLocksHeldByCurrentThread( ... ) _AssertLocksHeldByCurrentThread( __FILE__, __LINE__,## __VA_ARGS__ )
521 #else
522 	#define AssertHeldByCurrentThread( ... ) _AssertHeldByCurrentThread( nullptr, 0,## __VA_ARGS__ )
523 	#define AssertLocksHeldByCurrentThread( ... ) _AssertLocksHeldByCurrentThread( nullptr, 0,## __VA_ARGS__ )
524 #endif
525 
526 /// Special utilities for acquiring the global lock
527 struct SteamNetworkingGlobalLock
528 {
529 	inline SteamNetworkingGlobalLock( const char *pszTag = nullptr ) { Lock( pszTag ); }
~SteamNetworkingGlobalLockSteamNetworkingGlobalLock530 	inline ~SteamNetworkingGlobalLock() { Unlock(); }
531 	static void Lock( const char *pszTag );
532 	static bool TryLock( const char *pszTag, int msTimeout );
533 	static void Unlock();
534 
535 	#if STEAMNETWORKINGSOCKETS_LOCK_DEBUG_LEVEL > 0
536 		static void _AssertHeldByCurrentThread( const char *pszFile, int line );
537 		static void _AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag );
538 		static void SetLongLockWarningThresholdMS( const char *pszTag, int msWarningThreshold );
539 	#else
_AssertHeldByCurrentThreadSteamNetworkingGlobalLock540 		static void _AssertHeldByCurrentThread( const char *pszFile, int line ) {}
_AssertHeldByCurrentThreadSteamNetworkingGlobalLock541 		static void _AssertHeldByCurrentThread( const char *pszFile, int line, const char *pszTag ) {}
SetLongLockWarningThresholdMSSteamNetworkingGlobalLock542 		inline static void SetLongLockWarningThresholdMS( const char *pszTag, int msWarningThreshold ) {}
543 	#endif
544 };
545 
546 #ifdef DBGFLAG_VALIDATE
547 extern void SteamNetworkingSocketsLowLevelValidate( CValidator &validator );
548 #endif
549 
550 /// Wake up the service thread ASAP.  Intended to be called from other threads,
551 /// but is safe to call from the service thread as well.
552 extern void WakeSteamDatagramThread();
553 
554 /// Class used to take some action while we have the global thread locked,
555 /// perhaps later and in another thread if necessary.  Intended to be used
556 /// from callbacks and other contexts where we don't know what thread we are
557 /// in and cannot risk trying to wait on the lock, without risking creating
558 /// a deadlock.
559 ///
560 /// Note: This code could have been a lot simpler with std::function, but
561 /// it was intentionally not used, to avoid adding that runtime dependency.
562 class ISteamNetworkingSocketsRunWithLock
563 {
564 public:
565 	virtual ~ISteamNetworkingSocketsRunWithLock();
566 
567 	/// If we can run immediately, then do so, delete self, and return true.
568 	/// Otherwise, we are placed into a queue and false is returned.
569 	bool RunOrQueue( const char *pszTag );
570 
571 	/// Don't check the global lock, just queue the item to be run.
572 	void Queue( const char *pszTag );
573 
574 	/// Called from service thread while we hold the lock
575 	static void ServiceQueue();
576 
Tag()577 	inline const char *Tag() const { return m_pszTag; }
578 private:
579 	const char *m_pszTag = nullptr;
580 
581 protected:
582 	virtual void Run() = 0;
583 
ISteamNetworkingSocketsRunWithLock()584 	inline ISteamNetworkingSocketsRunWithLock() {};
585 };
586 
587 /////////////////////////////////////////////////////////////////////////////
588 //
589 // Misc
590 //
591 /////////////////////////////////////////////////////////////////////////////
592 
593 /// Fetch current time
594 extern SteamNetworkingMicroseconds SteamNetworkingSockets_GetLocalTimestamp();
595 
596 /// Set debug output hook
597 extern void SteamNetworkingSockets_SetDebugOutputFunction( ESteamNetworkingSocketsDebugOutputType eDetailLevel, FSteamNetworkingSocketsDebugOutput pfnFunc );
598 
599 /// Return true if it looks like the address is a local address
600 extern bool IsRouteToAddressProbablyLocal( netadr_t addr );
601 
602 #ifdef STEAMNETWORKINGSOCKETS_ENABLE_ETW
603 	extern void ETW_Init();
604 	extern void ETW_Kill();
605 	extern void ETW_LongOp( const char *opName, SteamNetworkingMicroseconds usec, const char *pszInfo = nullptr );
606 	extern void ETW_UDPSendPacket( const netadr_t &adrTo, int cbPkt );
607 	extern void ETW_UDPRecvPacket( const netadr_t &adrFrom, int cbPkt );
608 	extern void ETW_ICESendPacket( HSteamNetConnection hConn, int cbPkt );
609 	extern void ETW_ICERecvPacket( HSteamNetConnection hConn, int cbPkt );
610 	extern void ETW_ICEProcessPacket( HSteamNetConnection hConn, int cbPkt );
611 	extern void ETW_webrtc_setsockopt( int slevel, int sopt, int value );
612 	extern void ETW_webrtc_send( int length );
613 	extern void ETW_webrtc_sendto( void *addr, int length );
614 #else
ETW_Init()615 	inline void ETW_Init() {}
ETW_Kill()616 	inline void ETW_Kill() {}
617 	inline void ETW_LongOp( const char *opName, SteamNetworkingMicroseconds usec, const char *pszInfo = nullptr ) {}
ETW_UDPSendPacket(const netadr_t & adrTo,int cbPkt)618 	inline void ETW_UDPSendPacket( const netadr_t &adrTo, int cbPkt ) {}
ETW_UDPRecvPacket(const netadr_t & adrFrom,int cbPkt)619 	inline void ETW_UDPRecvPacket( const netadr_t &adrFrom, int cbPkt ) {}
ETW_ICESendPacket(HSteamNetConnection hConn,int cbPkt)620 	inline void ETW_ICESendPacket( HSteamNetConnection hConn, int cbPkt ) {}
ETW_ICERecvPacket(HSteamNetConnection hConn,int cbPkt)621 	inline void ETW_ICERecvPacket( HSteamNetConnection hConn, int cbPkt ) {}
ETW_ICEProcessPacket(HSteamNetConnection hConn,int cbPkt)622 	inline void ETW_ICEProcessPacket( HSteamNetConnection hConn, int cbPkt ) {}
623 #endif
624 
625 } // namespace SteamNetworkingSocketsLib
626 
627 STEAMNETWORKINGSOCKETS_INTERFACE void SteamNetworkingSockets_DefaultPreFormatDebugOutputHandler( ESteamNetworkingSocketsDebugOutputType eType, bool bFmt, const char* pstrFile, int nLine, const char *pMsg, va_list ap );
628 
629 #endif // STEAMNETWORKINGSOCKETS_LOWLEVEL_H
630