1 //====== Copyright Valve Corporation, All rights reserved. ====================
2 
3 #pragma once
4 
5 #include "../steamnetworkingsockets_internal.h"
6 #include <vector>
7 #include <map>
8 #include <set>
9 
10 struct P2PSessionState_t;
11 
12 namespace SteamNetworkingSocketsLib {
13 
14 struct LockDebugInfo;
15 
16 // Acks may be delayed.  This controls the precision used on the wire to encode the delay time.
17 constexpr int k_nAckDelayPrecisionShift = 5;
18 constexpr SteamNetworkingMicroseconds k_usecAckDelayPrecision = (1 << k_nAckDelayPrecisionShift );
19 
20 // When a receiver detects a dropped packet, wait a bit before NACKing it, to give it time
21 // to arrive out of order.  This is really important for many different types of connections
22 // that send on different channels, e.g. DSL, Wifi.
23 // Here we really could be smarter, by tracking how often dropped
24 // packets really do arrive out of order.  If the rate is low, then it's
25 // probably best to go ahead and send a NACK now, rather than waiting.
26 // But if dropped packets do often arrive out of order, then waiting
27 // to NACK will probably save some retransmits.  In fact, instead
28 // of learning the rate, we should probably try to learn the delay.
29 // E.g. a probability distribution P(t), which describes the odds
30 // that a dropped packet will have arrived at time t.  Then you
31 // adjust the NACK delay such that P(nack_delay) gives the best
32 // balance between false positive and false negative rates.
33 constexpr SteamNetworkingMicroseconds k_usecNackFlush = 3*1000;
34 
35 // Max size of a message that we are wiling to *receive*.
36 constexpr int k_cbMaxMessageSizeRecv = k_cbMaxSteamNetworkingSocketsMessageSizeSend*2;
37 
38 // The max we will look ahead and allocate data, ahead of the reliable
39 // messages we have been able to decode.  We limit this to make sure that
40 // a malicious sender cannot exploit us.
41 constexpr int k_cbMaxBufferedReceiveReliableData = k_cbMaxMessageSizeRecv + 64*1024;
42 constexpr int k_nMaxReliableStreamGaps_Extend = 30; // Discard reliable data past the end of the stream, if it would cause us to get too many gaps
43 constexpr int k_nMaxReliableStreamGaps_Fragment = 20; // Discard reliable data that is filling in the middle of a hole, if it would cause the number of gaps to exceed this number
44 constexpr int k_nMaxPacketGaps = 62; // Don't bother tracking more than N gaps.  Instead, we will end up NACKing some packets that we actually did receive.  This should not break the protocol, but it protects us from malicious sender
45 
46 // Hang on to at most N unreliable segments.  When packets are dropping
47 // and unreliable messages being fragmented, we will accumulate old pieces
48 // of unreliable messages that we retain in hopes that we will get the
49 // missing piece and reassemble the whole message.  At a certain point we
50 // must give up and discard them.  We use a simple strategy of just limiting
51 // the max total number.  In reality large unreliable messages are just a very bad
52 // idea, since the odds of the message dropping increase exponentially with the
53 // number of packets.  With 20 packets, even 1% packet loss becomes ~80% message
54 // loss.  (Assuming naive fragmentation and reassembly and no forward
55 // error correction.)
56 constexpr int k_nMaxBufferedUnreliableSegments = 20;
57 
58 // If app tries to send a message larger than N bytes unreliably,
59 // complain about it, and automatically convert to reliable.
60 // About 15 segments.
61 constexpr int k_cbMaxUnreliableMsgSizeSend = 15*1100;
62 
63 // Max possible size of an unreliable segment we could receive.
64 constexpr int k_cbMaxUnreliableSegmentSizeRecv = k_cbSteamNetworkingSocketsMaxPlaintextPayloadRecv;
65 
66 // Largest possible total unreliable message we can receive, based on the constraints above
67 constexpr int k_cbMaxUnreliableMsgSizeRecv = k_nMaxBufferedUnreliableSegments*k_cbMaxUnreliableSegmentSizeRecv;
68 COMPILE_TIME_ASSERT( k_cbMaxUnreliableMsgSizeRecv > k_cbMaxUnreliableMsgSizeSend + 4096 ); // Postel's law; confirm how much slack we have here
69 
70 class CSteamNetworkConnectionBase;
71 class CConnectionTransport;
72 struct SteamNetworkingMessageQueue;
73 
74 /// Actual implementation of SteamNetworkingMessage_t, which is the API
75 /// visible type.  Has extra fields needed to put the message into intrusive
76 /// linked lists.
77 class CSteamNetworkingMessage : public SteamNetworkingMessage_t
78 {
79 public:
80 	STEAMNETWORKINGSOCKETS_DECLARE_CLASS_OPERATOR_NEW
81 	static CSteamNetworkingMessage *New( CSteamNetworkConnectionBase *pParent, uint32 cbSize, int64 nMsgNum, int nFlags, SteamNetworkingMicroseconds usecNow );
82 	static CSteamNetworkingMessage *New( uint32 cbSize );
83 	static void DefaultFreeData( SteamNetworkingMessage_t *pMsg );
84 
85 	/// OK to delay sending this message until this time.  Set to zero to explicitly force
86 	/// Nagle timer to expire and send now (but this should behave the same as if the
87 	/// timer < usecNow).  If the timer is cleared, then all messages with lower message numbers
88 	/// are also cleared.
89 	// NOTE: Intentionally reusing the m_usecTimeReceived field, which is not used on outbound messages
SNPSend_UsecNagle()90 	inline SteamNetworkingMicroseconds SNPSend_UsecNagle() const { return m_usecTimeReceived; }
SNPSend_SetUsecNagle(SteamNetworkingMicroseconds x)91 	inline void SNPSend_SetUsecNagle( SteamNetworkingMicroseconds x ) { m_usecTimeReceived = x; }
92 
93 	/// Offset in reliable stream of the header byte.  0 if we're not reliable.
SNPSend_ReliableStreamPos()94 	inline int64 SNPSend_ReliableStreamPos() const { return m_nConnUserData; }
SNPSend_SetReliableStreamPos(int64 x)95 	inline void SNPSend_SetReliableStreamPos( int64 x ) { m_nConnUserData = x; }
SNPSend_ReliableStreamSize()96 	inline int SNPSend_ReliableStreamSize() const
97 	{
98 		DbgAssert( m_nFlags & k_nSteamNetworkingSend_Reliable && m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader );
99 		return m_cbSize;
100 	}
101 
SNPSend_IsReliable()102 	inline bool SNPSend_IsReliable() const
103 	{
104 		if ( m_nFlags & k_nSteamNetworkingSend_Reliable )
105 		{
106 			DbgAssert( m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader );
107 			return true;
108 		}
109 		DbgAssert( m_nConnUserData == 0 && m_cbSNPSendReliableHeader == 0 );
110 		return false;
111 	}
112 
113 	// Reliable stream header
114 	int m_cbSNPSendReliableHeader;
SNPSend_ReliableHeader()115 	byte *SNPSend_ReliableHeader()
116 	{
117 		// !KLUDGE! Reuse the peer identity to hold the reliable header
118 		return (byte*)&m_identityPeer;
119 	}
120 
121 	/// Remove it from queues
122 	void Unlink();
123 
124 	struct Links
125 	{
126 		SteamNetworkingMessageQueue *m_pQueue;
127 		CSteamNetworkingMessage *m_pPrev;
128 		CSteamNetworkingMessage *m_pNext;
129 
ClearLinks130 		inline void Clear() { m_pQueue = nullptr; m_pPrev = nullptr; m_pNext = nullptr; }
131 	};
132 
133 	/// Intrusive links for the "primary" list we are in
134 	Links m_links;
135 
136 	/// Intrusive links for any secondary list we may be in.  (Same listen socket or
137 	/// P2P channel, depending on message type)
138 	Links m_linksSecondaryQueue;
139 
140 	void LinkBefore( CSteamNetworkingMessage *pSuccessor, Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue );
141 	void LinkToQueueTail( Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue );
142 	void UnlinkFromQueue( Links CSteamNetworkingMessage::*pMbrLinks );
143 
144 private:
145 	// Use New and Release()!!
CSteamNetworkingMessage()146 	inline CSteamNetworkingMessage() {}
~CSteamNetworkingMessage()147 	inline ~CSteamNetworkingMessage() {}
148 	static void ReleaseFunc( SteamNetworkingMessage_t *pIMsg );
149 };
150 
151 /// A doubly-linked list of CSteamNetworkingMessage
152 struct SteamNetworkingMessageQueue
153 {
154 	CSteamNetworkingMessage *m_pFirst = nullptr;
155 	CSteamNetworkingMessage *m_pLast = nullptr;
156 	LockDebugInfo *m_pRequiredLock = nullptr; // Is there a lock that is required to be held while we access this queue?
157 
emptySteamNetworkingMessageQueue158 	inline bool empty() const
159 	{
160 		if ( m_pFirst )
161 		{
162 			Assert( m_pLast );
163 			return false;
164 		}
165 		Assert( !m_pLast );
166 		return true;
167 	}
168 
169 	/// Remove the first messages out of the queue (up to nMaxMessages).  Returns the number returned
170 	int RemoveMessages( SteamNetworkingMessage_t **ppOutMessages, int nMaxMessages );
171 
172 	/// Delete all queued messages
173 	void PurgeMessages();
174 
175 	/// Check the lock is held, if appropriate
176 	void AssertLockHeld() const;
177 };
178 
179 /// Maximum number of packets we will send in one Think() call.
180 const int k_nMaxPacketsPerThink = 16;
181 
182 /// Max number of tokens we are allowed to store up in reserve, for a burst.
183 const float k_flSendRateBurstOverageAllowance = k_cbSteamNetworkingSocketsMaxEncryptedPayloadSend;
184 
185 struct SNPRange_t
186 {
187 	/// Byte or sequence number range
188 	int64 m_nBegin;
189 	int64 m_nEnd; // STL-style.  It's one past the end
190 
lengthSNPRange_t191 	inline int64 length() const
192 	{
193 		// In general, allow zero-length ranges, but not negative ones
194 		Assert( m_nEnd >= m_nBegin );
195 		return m_nEnd - m_nBegin;
196 	}
197 
198 	/// Strict comparison function.  This is used in situations where
199 	/// ranges must not overlap, AND we also never search for
200 	/// a range that might overlap.
201 	struct NonOverlappingLess
202 	{
operatorSNPRange_t::NonOverlappingLess203 		inline bool operator ()(const SNPRange_t &l, const SNPRange_t &r ) const
204 		{
205 			if ( l.m_nBegin < r.m_nBegin ) return true;
206 			AssertMsg( l.m_nBegin > r.m_nBegin || l.m_nEnd == r.m_nEnd, "Ranges should not overlap in this map!" );
207 			return false;
208 		}
209 	};
210 };
211 
212 /// A packet that has been sent but we don't yet know if was received
213 /// or dropped.  These are kept in an ordered map keyed by packet number.
214 /// (Hence the packet number not being a member)  When we receive an ACK,
215 /// we remove packets from this list.
216 struct SNPInFlightPacket_t
217 {
218 	//
219 	// FIXME - Could definitely pack this structure better.  And maybe
220 	//         worth it to optimize cache
221 	//
222 
223 	/// Local timestamp when we sent it
224 	SteamNetworkingMicroseconds m_usecWhenSent;
225 
226 	/// Did we get an ack block from peer that explicitly marked this
227 	/// packet as being skipped?  Note that we might subsequently get an
228 	/// an ack for this same packet, that's OK!
229 	bool m_bNack;
230 
231 	/// Transport used to send
232 	CConnectionTransport *m_pTransport;
233 
234 	/// List of reliable segments.  Ignoring retransmission,
235 	/// there really is no reason why we we would need to have
236 	/// more than 1 in a packet, even if there are multiple
237 	/// reliable messages.  If we need to retry, we might
238 	/// be fragmented.  But usually it will only be a few.
239 	vstd::small_vector<SNPRange_t,1> m_vecReliableSegments;
240 };
241 
242 struct SSNPSendMessageList : public SteamNetworkingMessageQueue
243 {
244 
245 	/// Unlink the message at the head, if any and return it.
246 	/// Unlike STL pop_front, this will return nullptr if the
247 	/// list is empty
pop_frontSSNPSendMessageList248 	CSteamNetworkingMessage *pop_front()
249 	{
250 		CSteamNetworkingMessage *pResult = m_pFirst;
251 		if ( pResult )
252 		{
253 			Assert( m_pLast );
254 			Assert( pResult->m_links.m_pQueue == this );
255 			Assert( pResult->m_links.m_pPrev == nullptr );
256 			m_pFirst = pResult->m_links.m_pNext;
257 			if ( m_pFirst )
258 			{
259 				Assert( m_pFirst->m_links.m_pPrev == pResult );
260 				Assert( m_pFirst->m_nMessageNumber > pResult->m_nMessageNumber );
261 				m_pFirst->m_links.m_pPrev = nullptr;
262 			}
263 			else
264 			{
265 				Assert( m_pLast == pResult );
266 				m_pLast = nullptr;
267 			}
268 			pResult->m_links.m_pQueue = nullptr;
269 			pResult->m_links.m_pNext = nullptr;
270 		}
271 		return pResult;
272 	}
273 
274 	/// Optimized insertion when we know it goes at the end
push_backSSNPSendMessageList275 	void push_back( CSteamNetworkingMessage *pMsg )
276 	{
277 		if ( m_pFirst == nullptr )
278 		{
279 			Assert( m_pLast == nullptr );
280 			m_pFirst = pMsg;
281 		}
282 		else
283 		{
284 			// Messages are always kept in message number order
285 			Assert( pMsg->m_nMessageNumber > m_pLast->m_nMessageNumber );
286 			Assert( m_pLast->m_links.m_pNext == nullptr );
287 			m_pLast->m_links.m_pNext = pMsg;
288 		}
289 		pMsg->m_links.m_pQueue = this;
290 		pMsg->m_links.m_pNext = nullptr;
291 		pMsg->m_links.m_pPrev = m_pLast;
292 		m_pLast = pMsg;
293 	}
294 
295 };
296 
297 /// Info used by a sender to estimate the available bandwidth
298 struct SSendRateData
299 {
300 	/// Current sending rate in bytes per second, RFC 3448 4.2 states default
301 	/// is one packet per second, but that is insane and we're not doing that.
302 	/// In most cases we will set a default based on initial ping, so this is
303 	/// only rarely used.
304 	int m_nCurrentSendRateEstimate = 64*1024;
305 
306 	/// Actual send rate we are going to USE.  This depends on the send rate estimate
307 	/// and the current BBR state
308 	float m_flCurrentSendRateUsed = 64*1024;
309 
310 	/// If >=0, then we can send a full packet right now.  We allow ourselves to "store up"
311 	/// about 1 packet worth of "reserve".  In other words, if we have not sent any packets
312 	/// for a while, basically we allow ourselves to send two packets in rapid succession,
313 	/// thus "bursting" over the limit by 1 packet.  That long term rate will be clamped by
314 	/// the send rate.
315 	///
316 	/// If <0, then we are currently "over" our rate limit and need to wait before we can
317 	/// send a packet.
318 	///
319 	/// Provision for accumulating "credits" and burst allowance, to account for lossy
320 	/// kernel scheduler, etc is mentioned in RFC 5348, section 4.6.
321 	float m_flTokenBucket = 0;
322 
323 	/// Last time that we added tokens to m_flTokenBucket
324 	SteamNetworkingMicroseconds m_usecTokenBucketTime = 0;
325 
326 	/// Calculate time until we could send our next packet, checking our token
327 	/// bucket and the current send rate
CalcTimeUntilNextSendSSendRateData328 	SteamNetworkingMicroseconds CalcTimeUntilNextSend() const
329 	{
330 		// Do we have tokens to burn right now?
331 		if ( m_flTokenBucket >= 0.0f )
332 			return 0;
333 
334 		return SteamNetworkingMicroseconds( m_flTokenBucket * -1e6f / m_flCurrentSendRateUsed ) + 1; // +1 to make sure that if we don't have any tokens, we never return 0, since zero means "ready right now"
335 	}
336 };
337 
338 struct SSNPSenderState
339 {
340 	SSNPSenderState();
~SSNPSenderStateSSNPSenderState341 	~SSNPSenderState() {
342 		Shutdown();
343 	}
344 	void Shutdown();
345 
346 	/// Nagle timer on all pending messages
ClearNagleTimersSSNPSenderState347 	void ClearNagleTimers()
348 	{
349 		CSteamNetworkingMessage *pMsg = m_messagesQueued.m_pLast;
350 		while ( pMsg && pMsg->SNPSend_UsecNagle() )
351 		{
352 			pMsg->SNPSend_SetUsecNagle( 0 );
353 			pMsg = pMsg->m_links.m_pPrev;
354 		}
355 	}
356 
357 	// Current message number, we ++ when adding a message
358 	int64 m_nReliableStreamPos = 1;
359 	int64 m_nLastSentMsgNum = 0; // Will increment to 1 with first message
360 	int64 m_nLastSendMsgNumReliable = 0;
361 
362 	/// List of messages that we have not yet finished putting on the wire the first time.
363 	/// The Nagle timer may be active on one or more, but if so, it is only on messages
364 	/// at the END of the list.  The first message may be partially sent.
365 	SSNPSendMessageList m_messagesQueued;
366 
367 	/// How many bytes into the first message in the queue have we put on the wire?
368 	int m_cbCurrentSendMessageSent = 0;
369 
370 	/// List of reliable messages that have been fully placed on the wire at least once,
371 	/// but we're hanging onto because of the potential need to retry.  (Note that if we get
372 	/// packet loss, it's possible that we hang onto a message even after it's been fully
373 	/// acked, because a prior message is still needed.  We always operate on this list
374 	/// like a queue, rather than seeking into the middle of the list and removing messages
375 	/// as soon as they are no longer needed.)
376 	SSNPSendMessageList m_unackedReliableMessages;
377 
378 	// Buffered data counters.  See SteamNetworkingQuickConnectionStatus for more info
379 	int m_cbPendingUnreliable = 0;
380 	int m_cbPendingReliable = 0;
381 	int m_cbSentUnackedReliable = 0;
PendingBytesTotalSSNPSenderState382 	inline int PendingBytesTotal() const { return m_cbPendingUnreliable + m_cbPendingReliable; }
383 
384 	// Stats.  FIXME - move to LinkStatsEndToEnd and track rate counters
385 	int64 m_nMessagesSentReliable = 0;
386 	int64 m_nMessagesSentUnreliable = 0;
387 
388 	/// List of packets that we have sent but don't know whether they were received or not.
389 	/// We keep a dummy sentinel at the head of the list, with a negative packet number.
390 	/// This vastly simplifies the processing.
391 	std_map<int64,SNPInFlightPacket_t> m_mapInFlightPacketsByPktNum;
392 
393 	/// The next unacked packet that should be timed out and implicitly NACKed,
394 	/// if we don't receive an ACK in time.  Will be m_mapInFlightPacketsByPktNum.end()
395 	/// if we don't have any in flight packets that we are waiting on.
396 	std_map<int64,SNPInFlightPacket_t>::iterator m_itNextInFlightPacketToTimeout;
397 
398 	/// Ordered list of reliable ranges that we have recently sent
399 	/// in a packet.  These should be non-overlapping, and furthermore
400 	/// should not overlap with with any range in m_listReadyReliableRange
401 	///
402 	/// The "value" portion of the map is the message that has the first bit of
403 	/// reliable data we need for this message
404 	std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listInFlightReliableRange;
405 
406 	/// Ordered list of ranges that have been put on the wire,
407 	/// but have been detected as dropped, and now need to be retried.
408 	std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listReadyRetryReliableRange;
409 
410 	/// Oldest packet sequence number that we are still asking peer
411 	/// to send acks for.
412 	int64 m_nMinPktWaitingOnAck = 0;
413 
414 	// Remove messages from m_unackedReliableMessages that have been fully acked.
415 	void RemoveAckedReliableMessageFromUnackedList();
416 
417 	/// Check invariants in debug.
418 	#if STEAMNETWORKINGSOCKETS_SNP_PARANOIA == 0
DebugCheckInFlightPacketMapSSNPSenderState419 		inline void DebugCheckInFlightPacketMap() const {}
420 	#else
421 		void DebugCheckInFlightPacketMap() const;
422 	#endif
423 	#if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1
MaybeCheckInFlightPacketMapSSNPSenderState424 		inline void MaybeCheckInFlightPacketMap() const { DebugCheckInFlightPacketMap(); }
425 	#else
MaybeCheckInFlightPacketMapSSNPSenderState426 		inline void MaybeCheckInFlightPacketMap() const {}
427 	#endif
428 };
429 
430 struct SSNPRecvUnreliableSegmentKey
431 {
432 	int64 m_nMsgNum;
433 	int m_nOffset;
434 
435 	inline bool operator<(const SSNPRecvUnreliableSegmentKey &x) const
436 	{
437 		if ( m_nMsgNum < x.m_nMsgNum ) return true;
438 		if ( m_nMsgNum > x.m_nMsgNum ) return false;
439 		return m_nOffset < x.m_nOffset;
440 	}
441 };
442 
443 struct SSNPRecvUnreliableSegmentData
444 {
445 	int m_cbSegSize = -1;
446 	bool m_bLast = false;
447 	char m_buf[ k_cbMaxUnreliableSegmentSizeRecv ];
448 };
449 
450 struct SSNPPacketGap
451 {
452 	int64 m_nEnd; // just after the last packet received
453 	SteamNetworkingMicroseconds m_usecWhenReceivedPktBefore; // So we can send RTT data in our acks
454 	SteamNetworkingMicroseconds m_usecWhenAckPrior; // We need to send an ack for everything with lower packet numbers than this gap by this time.  (Earlier is OK.)
455 	SteamNetworkingMicroseconds m_usecWhenOKToNack; // Don't give up on the gap being filed before this time
456 };
457 
458 struct SSNPReceiverState
459 {
460 	SSNPReceiverState();
~SSNPReceiverStateSSNPReceiverState461 	~SSNPReceiverState() {
462 		Shutdown();
463 	}
464 	void Shutdown();
465 
466 	/// Unreliable message segments that we have received.  When an unreliable message
467 	/// needs to be fragmented, we store the pieces here.  NOTE: it might be more efficient
468 	/// to use a simpler container, with worse O(), since this should ordinarily be
469 	/// a pretty small list.
470 	std_map<SSNPRecvUnreliableSegmentKey,SSNPRecvUnreliableSegmentData> m_mapUnreliableSegments;
471 
472 	/// Stream position of the first byte in m_bufReliableData.  Remember that the first byte
473 	/// in the reliable stream is actually at position 1, not 0
474 	int64 m_nReliableStreamPos = 1;
475 
476 	/// The highest message number we have seen so far.
477 	int64 m_nHighestSeenMsgNum = 0;
478 
479 	/// The message number of the most recently received reliable message
480 	int64 m_nLastRecvReliableMsgNum = 0;
481 
482 	/// Reliable data stream that we have received.  This might have gaps in it!
483 	std_vector<byte> m_bufReliableStream;
484 
485 	/// Gaps in the reliable data.  These are created when we receive reliable data that
486 	/// is beyond what we expect next.  Since these must never overlap, we store them
487 	/// using begin as the key and end as the value.
488 	///
489 	/// !SPEED! We should probably use a small fixed-sized, sorted vector here,
490 	/// since in most cases the list will be small, and the cost of dynamic memory
491 	/// allocation will be way worse than O(n) insertion/removal.
492 	std_map<int64,int64> m_mapReliableStreamGaps;
493 
494 	/// List of gaps in the packet sequence numbers we have received.
495 	/// Since these must never overlap, we store them using begin as the
496 	/// key and the end in the value.
497 	///
498 	/// The last item in the list is a sentinel with
499 	/// begin and end set to INT64_MAX, and m_usecWhenAckPrior is
500 	/// the time when we need to flush acks/backs for all packets,
501 	/// including those received after the last gap (if any --
502 	/// INT64_MAX means nothing scheduled).  Remember, our wire
503 	/// protocol cannot report on packet N without also reporting
504 	/// on all packets numbered < N.
505 	///
506 	/// !SPEED! We should probably use a small fixed-sized, sorted vector here,
507 	/// since in most cases the list will be small, and the cost of dynamic memory
508 	/// allocation will be way worse than O(n) insertion/removal.
509 	std_map<int64,SSNPPacketGap> m_mapPacketGaps;
510 
511 	/// Oldest packet sequence number we need to ack to our peer
512 	int64 m_nMinPktNumToSendAcks = 0;
513 
514 	/// Packet number when we received the value of m_nMinPktNumToSendAcks
515 	int64 m_nPktNumUpdatedMinPktNumToSendAcks = 0;
516 
517 	/// The next ack that needs to be sent.  The invariant
518 	/// for the times are:
519 	///
520 	/// * Blocks with lower packet numbers: m_usecWhenAckPrior = INT64_MAX
521 	/// * This block: m_usecWhenAckPrior < INT64_MAX, or we are the sentinel
522 	/// * Blocks with higher packet numbers (if we are not the sentinel): m_usecWhenAckPrior >= previous m_usecWhenAckPrior
523 	///
524 	/// We might send acks before they are due, rather than
525 	/// waiting until the last moment!  If we are going to
526 	/// send a packet at all, we usually try to send at least
527 	/// a few acks, and if there is room in the packet, as
528 	/// many as will fit.  The one exception is that if
529 	/// sending an ack would imply a NACK that we don't want to
530 	/// send yet.  (Remember the restrictions on what we are able
531 	/// to communicate due to the tight RLE encoding of the wire
532 	/// format.)  These delays are usually very short lived, and
533 	/// only happen when there is packet loss, so they don't delay
534 	/// acks very much.  The whole purpose of this rather involved
535 	/// bookkeeping is to figure out which acks we *need* to send,
536 	/// and which acks we cannot send yet, so we can make optimal
537 	/// decisions.
538 	std_map<int64,SSNPPacketGap>::iterator m_itPendingAck;
539 
540 	/// Iterator into m_mapPacketGaps.  If != the sentinel,
541 	/// we will avoid reporting on the dropped packets in this
542 	/// gap (and all higher numbered packets), because we are
543 	/// waiting in the hopes that they will arrive out of order.
544 	std_map<int64,SSNPPacketGap>::iterator m_itPendingNack;
545 
546 	/// Queue a flush of ALL acks (and NACKs!) by the given time.
547 	/// If anything is scheduled to happen earlier, that schedule
548 	/// will still be honered.  We will ack up to that packet number,
549 	/// and then we we may report higher numbered blocks, or we may
550 	/// stop and wait to report more acks until later.
551 	void QueueFlushAllAcks( SteamNetworkingMicroseconds usecWhen );
552 
553 	/// Return the time when we need to flush out acks, or INT64_MAX
554 	/// if we don't have any acks pending right now.
TimeWhenFlushAcksSSNPReceiverState555 	inline SteamNetworkingMicroseconds TimeWhenFlushAcks() const
556 	{
557 		// Paranoia
558 		if ( m_mapPacketGaps.empty() )
559 		{
560 			AssertMsg( false, "TimeWhenFlushAcks - we're shut down!" );
561 			return INT64_MAX;
562 		}
563 		return m_itPendingAck->second.m_usecWhenAckPrior;
564 	}
565 
566 	/// Check invariants in debug.
567 	#if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1
568 		void DebugCheckPackGapMap() const;
569 	#else
DebugCheckPackGapMapSSNPReceiverState570 		inline void DebugCheckPackGapMap() const {}
571 	#endif
572 
573 	// Stats.  FIXME - move to LinkStatsEndToEnd and track rate counters
574 	int64 m_nMessagesRecvReliable = 0;
575 	int64 m_nMessagesRecvUnreliable = 0;
576 };
577 
578 } // SteamNetworkingSocketsLib
579