1 //====== Copyright Valve Corporation, All rights reserved. ====================
2 //
3 // Utils for calculating networking stats
4 //
5 //=============================================================================
6 
7 #ifndef STEAMNETWORKING_STATSUTILS_H
8 #define STEAMNETWORKING_STATSUTILS_H
9 #pragma once
10 
11 #include <tier0/basetypes.h>
12 #include <tier0/t0constants.h>
13 #include "percentile_generator.h"
14 #include "steamnetworking_stats.h"
15 #include "steamnetworkingsockets_internal.h"
16 #include "steamnetworkingsockets_thinker.h"
17 
18 //#include <google/protobuf/repeated_field.h> // FIXME - should only need this!
19 #include <tier0/memdbgoff.h>
20 #include <steamnetworkingsockets_messages.pb.h>
21 #include <tier0/memdbgon.h>
22 
23 class CMsgSteamDatagramConnectionQuality;
24 
25 // Internal stuff goes in a private namespace
26 namespace SteamNetworkingSocketsLib {
27 
28 /// Default interval for link stats rate measurement
29 const SteamNetworkingMicroseconds k_usecSteamDatagramLinkStatsDefaultInterval = 5 * k_nMillion;
30 
31 /// Default interval for speed stats rate measurement
32 const SteamNetworkingMicroseconds k_usecSteamDatagramSpeedStatsDefaultInterval = 1 * k_nMillion;
33 
34 /// We should send tracer ping requests in our packets on approximately
35 /// this interval.  (Tracer pings and their replies are relatively cheap.)
36 /// These serve both as latency measurements, and also as keepalives, if only
37 /// one side or the other is doing most of the talking, to make sure the other side
38 /// always does a minimum amount of acking.
39 const SteamNetworkingMicroseconds k_usecLinkStatsMinPingRequestInterval = 5 * k_nMillion;
40 const SteamNetworkingMicroseconds k_usecLinkStatsMaxPingRequestInterval = 7 * k_nMillion;
41 
42 /// Client should send instantaneous connection quality stats
43 /// at approximately this interval
44 const SteamNetworkingMicroseconds k_usecLinkStatsInstantaneousReportInterval = 20 * k_nMillion;
45 const SteamNetworkingMicroseconds k_usecLinkStatsInstantaneousReportMaxInterval = 30 * k_nMillion;
46 
47 /// Client will report lifetime connection stats at approximately this interval
48 const SteamNetworkingMicroseconds k_usecLinkStatsLifetimeReportInterval = 120 * k_nMillion;
49 const SteamNetworkingMicroseconds k_usecLinkStatsLifetimeReportMaxInterval = 140 * k_nMillion;
50 
51 /// If we are timing out, ping the peer on this interval
52 const SteamNetworkingMicroseconds k_usecAggressivePingInterval = 200*1000;
53 
54 /// If we haven't heard from the peer in a while, send a keepalive
55 const SteamNetworkingMicroseconds k_usecKeepAliveInterval = 10*k_nMillion;
56 
57 /// Track the rate that something is happening
58 struct Rate_t
59 {
ResetRate_t60 	void Reset() { memset( this, 0, sizeof(*this) ); }
61 
62 	int64	m_nCurrentInterval;
63 	int64	m_nAccumulator; // does not include the currentinterval
64 	float	m_flRate;
65 
TotalRate_t66 	int64 Total() const { return m_nAccumulator + m_nCurrentInterval; }
67 
ProcessRate_t68 	inline void Process( int64 nIncrement )
69 	{
70 		m_nCurrentInterval += nIncrement;
71 	}
72 
UpdateIntervalRate_t73 	inline void UpdateInterval( float flIntervalDuration )
74 	{
75 		m_flRate = float(m_nCurrentInterval) / flIntervalDuration;
76 		m_nAccumulator += m_nCurrentInterval;
77 		m_nCurrentInterval = 0;
78 	}
79 
80 	inline void operator+=( const Rate_t &x )
81 	{
82 		m_nCurrentInterval += x.m_nCurrentInterval;
83 		m_nAccumulator += x.m_nAccumulator;
84 		m_flRate += x.m_flRate;
85 	}
86 };
87 
88 /// Track flow rate (number and bytes)
89 struct PacketRate_t
90 {
ResetPacketRate_t91 	void Reset() { memset( this, 0, sizeof(*this) ); }
92 
93 	Rate_t m_packets;
94 	Rate_t m_bytes;
95 
ProcessPacketPacketRate_t96 	inline void ProcessPacket( int sz )
97 	{
98 		m_packets.Process( 1 );
99 		m_bytes.Process( sz );
100 	}
101 
UpdateIntervalPacketRate_t102 	void UpdateInterval( float flIntervalDuration )
103 	{
104 		m_packets.UpdateInterval( flIntervalDuration );
105 		m_bytes.UpdateInterval( flIntervalDuration );
106 	}
107 
108 	inline void operator+=( const PacketRate_t &x )
109 	{
110 		m_packets += x.m_packets;
111 		m_bytes += x.m_bytes;
112 	}
113 };
114 
115 /// Class used to track ping values
116 struct PingTracker
117 {
118 
119 	struct Ping
120 	{
121 		int m_nPingMS;
122 		SteamNetworkingMicroseconds m_usecTimeRecv;
123 	};
124 
125 	/// Recent ping measurements.  The most recent one is at entry 0.
126 	Ping m_arPing[ 3 ];
127 
128 	/// Number of valid entries in m_arPing.
129 	int m_nValidPings;
130 
131 	/// Time when the most recent ping was received
TimeRecvMostRecentPingPingTracker132 	SteamNetworkingMicroseconds TimeRecvMostRecentPing() const { return m_arPing[0].m_usecTimeRecv; }
133 
134 	/// Return the worst of the pings in the small sample of recent pings
135 	int WorstPingInRecentSample() const;
136 
137 	/// Estimate a conservative (i.e. err on the large side) timeout for the connection
CalcConservativeTimeoutPingTracker138 	SteamNetworkingMicroseconds CalcConservativeTimeout() const
139 	{
140 		constexpr SteamNetworkingMicroseconds k_usecMaxTimeout = 1250000;
141 		if ( m_nSmoothedPing < 0 )
142 			return k_usecMaxTimeout;
143 		return std::min( SteamNetworkingMicroseconds{ WorstPingInRecentSample()*2000 + 250000 }, k_usecMaxTimeout );
144 	}
145 
146 	/// Smoothed ping value
147 	int m_nSmoothedPing;
148 
149 	/// Time when we last sent a message, for which we expect a reply (possibly delayed)
150 	/// that we could use to measure latency.  (Possibly because the reply contains
151 	/// a simple timestamp, or possibly because it will contain a sequence number, and
152 	/// we will be able to look up that sequence number and remember when we sent it.)
153 	SteamNetworkingMicroseconds m_usecTimeLastSentPingRequest;
154 protected:
155 	void Reset();
156 
157 	/// Called when we receive a ping measurement
158 	void ReceivedPing( int nPingMS, SteamNetworkingMicroseconds usecNow );
159 };
160 
161 /// Ping tracker that tracks detailed lifetime stats
162 struct PingTrackerDetailed : PingTracker
163 {
ResetPingTrackerDetailed164 	void Reset()
165 	{
166 		PingTracker::Reset();
167 		m_sample.Clear();
168 		m_histogram.Reset();
169 	}
ReceivedPingPingTrackerDetailed170 	void ReceivedPing( int nPingMS, SteamNetworkingMicroseconds usecNow )
171 	{
172 		PingTracker::ReceivedPing( nPingMS, usecNow );
173 		m_sample.AddSample( uint16( std::min( nPingMS, 0xffff ) ) );
174 		m_histogram.AddSample( nPingMS );
175 	}
176 
177 	/// Track sample of pings received so we can generate percentiles.
178 	/// Also tracks how many pings we have received total
179 	PercentileGenerator<uint16> m_sample;
180 
181 	/// Counts by bucket
182 	PingHistogram m_histogram;
183 
184 	/// Populate structure
GetLifetimeStatsPingTrackerDetailed185 	void GetLifetimeStats( SteamDatagramLinkLifetimeStats &s ) const
186 	{
187 		s.m_pingHistogram  = m_histogram;
188 
189 		s.m_nPingNtile5th  = m_sample.NumSamples() < 20 ? -1 : m_sample.GetPercentile( .05f );
190 		s.m_nPingNtile50th = m_sample.NumSamples() <  2 ? -1 : m_sample.GetPercentile( .50f );
191 		s.m_nPingNtile75th = m_sample.NumSamples() <  4 ? -1 : m_sample.GetPercentile( .75f );
192 		s.m_nPingNtile95th = m_sample.NumSamples() < 20 ? -1 : m_sample.GetPercentile( .95f );
193 		s.m_nPingNtile98th = m_sample.NumSamples() < 50 ? -1 : m_sample.GetPercentile( .98f );
194 	}
195 };
196 
197 /// Before switching to a different route, we need to make sure that we have a ping
198 /// sample in at least N recent time buckets.  (See PingTrackerForRouteSelection)
199 const int k_nRecentValidTimeBucketsToSwitchRoute = 15;
200 
201 /// Ping tracker that tracks samples over several intervals.  This is used
202 /// to make routing decisions in such a way to avoid route flapping when ping
203 /// times on different routes are fluctuating.
204 ///
205 /// This class also has the concept of a user override, which is used to fake
206 /// a particular ping time for debugging.
207 struct PingTrackerForRouteSelection : PingTracker
208 {
209 	COMPILE_TIME_ASSERT( k_nRecentValidTimeBucketsToSwitchRoute == 15 );
210 	static constexpr int k_nTimeBucketCount = 17;
211 	static constexpr SteamNetworkingMicroseconds k_usecTimeBucketWidth = k_nMillion; // Desired width of each time bucket
212 	static constexpr int k_nPingOverride_None = -2; // Ordinary operation.  (-1 is a legit ping time, which means "ping failed")
213 	static constexpr SteamNetworkingMicroseconds k_usecAntiFlapRouteCheckPingInterval = 200*1000;
214 
215 	struct TimeBucket
216 	{
217 		SteamNetworkingMicroseconds m_usecEnd; // End of this bucket.  The start of the bucket is m_usecEnd-k_usecTimeBucketWidth
218 		int m_nPingCount;
219 		int m_nMinPing; // INT_NAX if we have not received one
220 		int m_nMaxPing; // INT_MIN
221 	};
222 	TimeBucket m_arTimeBuckets[ k_nTimeBucketCount ];
223 	int m_idxCurrentBucket;
224 	int m_nTotalPingsReceived;
225 	int m_nPingOverride = k_nPingOverride_None;
226 
ResetPingTrackerForRouteSelection227 	void Reset()
228 	{
229 		PingTracker::Reset();
230 		m_nTotalPingsReceived = 0;
231 		m_idxCurrentBucket = 0;
232 		for ( TimeBucket &b: m_arTimeBuckets )
233 		{
234 			b.m_usecEnd = 0;
235 			b.m_nPingCount = 0;
236 			b.m_nMinPing = INT_MAX;
237 			b.m_nMaxPing = INT_MIN;
238 		}
239 	}
ReceivedPingPingTrackerForRouteSelection240 	void ReceivedPing( int nPingMS, SteamNetworkingMicroseconds usecNow )
241 	{
242 		// Ping time override in effect?
243 		if ( m_nPingOverride > k_nPingOverride_None )
244 		{
245 			if ( m_nPingOverride == -1 )
246 				return;
247 			nPingMS = m_nPingOverride;
248 		}
249 		PingTracker::ReceivedPing( nPingMS, usecNow );
250 		++m_nTotalPingsReceived;
251 
252 		SteamNetworkingMicroseconds usecCurrentBucketEnd = m_arTimeBuckets[ m_idxCurrentBucket ].m_usecEnd;
253 		if ( usecCurrentBucketEnd > usecNow )
254 		{
255 			TimeBucket &curBucket = m_arTimeBuckets[ m_idxCurrentBucket ];
256 			++curBucket.m_nPingCount;
257 			curBucket.m_nMinPing = std::min( curBucket.m_nMinPing, nPingMS );
258 			curBucket.m_nMaxPing = std::max( curBucket.m_nMaxPing, nPingMS );
259 		}
260 		else
261 		{
262 			++m_idxCurrentBucket;
263 			if ( m_idxCurrentBucket >= k_nTimeBucketCount )
264 				m_idxCurrentBucket = 0;
265 			TimeBucket &newBucket = m_arTimeBuckets[ m_idxCurrentBucket ];
266 
267 			// If we are less than halfway into the new window, then start it immediately after
268 			// the previous one.
269 			if ( usecCurrentBucketEnd + (k_usecTimeBucketWidth/2) >= usecNow )
270 			{
271 				newBucket.m_usecEnd = usecCurrentBucketEnd + k_usecTimeBucketWidth;
272 			}
273 			else
274 			{
275 				// It's been more than half a window.  Start this window at the current time.
276 				newBucket.m_usecEnd = usecNow + k_usecTimeBucketWidth;
277 			}
278 
279 			newBucket.m_nPingCount = 1;
280 			newBucket.m_nMinPing = nPingMS;
281 			newBucket.m_nMaxPing = nPingMS;
282 		}
283 	}
284 
SetPingOverridePingTrackerForRouteSelection285 	void SetPingOverride( int nPing )
286 	{
287 		m_nPingOverride = nPing;
288 		if ( m_nPingOverride <= k_nPingOverride_None )
289 			return;
290 		if ( m_nPingOverride < 0 )
291 		{
292 			m_nValidPings = 0;
293 			m_nSmoothedPing = -1;
294 			return;
295 		}
296 		m_nSmoothedPing = nPing;
297 		for ( int i = 0 ; i < m_nValidPings ; ++i )
298 			m_arPing[i].m_nPingMS = nPing;
299 		TimeBucket &curBucket = m_arTimeBuckets[ m_idxCurrentBucket ];
300 		curBucket.m_nMinPing = nPing;
301 		curBucket.m_nMaxPing = nPing;
302 	}
303 
304 	/// Return true if the next ping received will start a new bucket
TimeToSendNextAntiFlapRouteCheckPingRequestPingTrackerForRouteSelection305 	SteamNetworkingMicroseconds TimeToSendNextAntiFlapRouteCheckPingRequest() const
306 	{
307 		return std::min(
308 			m_arTimeBuckets[ m_idxCurrentBucket ].m_usecEnd, // time to start next bucket
309 			m_usecTimeLastSentPingRequest + k_usecAntiFlapRouteCheckPingInterval // and then send them at a given rate
310 		);
311 	}
312 
313 	// Get the min/max ping value among recent buckets.
314 	// Returns the number of valid buckets used to collect the data.
GetPingRangeFromRecentBucketsPingTrackerForRouteSelection315 	int GetPingRangeFromRecentBuckets( int &nOutMin, int &nOutMax, SteamNetworkingMicroseconds usecNow ) const
316 	{
317 		int nMin = m_nSmoothedPing;
318 		int nMax = m_nSmoothedPing;
319 		int nBucketsValid = 0;
320 		if ( m_nSmoothedPing >= 0 )
321 		{
322 			SteamNetworkingMicroseconds usecRecentEndThreshold = usecNow - ( (k_nTimeBucketCount-1) * k_usecTimeBucketWidth );
323 			for ( const TimeBucket &bucket: m_arTimeBuckets )
324 			{
325 				if ( bucket.m_usecEnd >= usecRecentEndThreshold )
326 				{
327 					Assert( bucket.m_nPingCount > 0 );
328 					Assert( 0 <= bucket.m_nMinPing );
329 					Assert( bucket.m_nMinPing <= bucket.m_nMaxPing );
330 					++nBucketsValid;
331 					nMin = std::min( nMin, bucket.m_nMinPing );
332 					nMax = std::max( nMax, bucket.m_nMaxPing );
333 				}
334 			}
335 		}
336 		nOutMin = nMin;
337 		nOutMax = nMax;
338 		return nBucketsValid;
339 	}
340 };
341 
342 /// Token bucket rate limiter
343 /// https://en.wikipedia.org/wiki/Token_bucket
344 struct TokenBucketRateLimiter
345 {
TokenBucketRateLimiterTokenBucketRateLimiter346 	TokenBucketRateLimiter() { Reset(); }
347 
348 	/// Mark the token bucket as full and reset internal timer
ResetTokenBucketRateLimiter349 	void Reset() { m_usecLastTime = 0; m_flTokenDeficitFromFull = 0.0f; }
350 
351 	/// Attempt to spend a token.
352 	/// flMaxSteadyStateRate - the rate that tokens are added to the bucket, per second.
353 	///                        Over a long interval, tokens cannot be spent faster than this rate.  And if they are consumed
354 	///                        at this rate, there is no allowance for bursting higher.  Typically you'll set this to a bit
355 	///                        higher than the true steady-state rate, so that the bucket can fill back up to allow for
356 	///                        another burst.
357 	/// flMaxBurst - The max possible burst, in tokens.
BCheckTokenBucketRateLimiter358 	bool BCheck( SteamNetworkingMicroseconds usecNow, float flMaxSteadyStateRate, float flMaxBurst )
359 	{
360 		Assert( flMaxBurst >= 1.0f );
361 		Assert( flMaxSteadyStateRate > 0.0f );
362 
363 		// Calculate elapsed time (in seconds) and advance timestamp
364 		float flElapsed = ( usecNow - m_usecLastTime ) * 1e-6f;
365 		m_usecLastTime = usecNow;
366 
367 		// Add tokens to the bucket, but stop if it gets full
368 		m_flTokenDeficitFromFull = Max( m_flTokenDeficitFromFull - flElapsed*flMaxSteadyStateRate, 0.0f );
369 
370 		// Burst rate currently being exceeded?
371 		if ( m_flTokenDeficitFromFull + 1.0f > flMaxBurst )
372 			return false;
373 
374 		// We have a token.  Spend it
375 		m_flTokenDeficitFromFull += 1.0f;
376 		return true;
377 	}
378 
379 private:
380 
381 	/// Last time a token was spent
382 	SteamNetworkingMicroseconds m_usecLastTime;
383 
384 	/// The degree to which the bucket is not full.  E.g. 0 is "full" and any higher number means they are less than full.
385 	/// Doing the accounting in this "inverted" way makes it easier to reset and adjust the limits dynamically.
386 	float m_flTokenDeficitFromFull;
387 };
388 
389 // Bitmask returned by GetStatsSendNeed
390 constexpr int k_nSendStats_Instantanous_Due = 1;
391 constexpr int k_nSendStats_Instantanous_Ready = 2;
392 constexpr int k_nSendStats_Lifetime_Due = 4;
393 constexpr int k_nSendStats_Lifetime_Ready = 8;
394 constexpr int k_nSendStats_Instantanous = k_nSendStats_Instantanous_Due|k_nSendStats_Instantanous_Ready;
395 constexpr int k_nSendStats_Lifetime = k_nSendStats_Lifetime_Due|k_nSendStats_Lifetime_Ready;
396 constexpr int k_nSendStats_Due = k_nSendStats_Instantanous_Due|k_nSendStats_Lifetime_Due;
397 constexpr int k_nSendStats_Ready = k_nSendStats_Instantanous_Ready|k_nSendStats_Lifetime_Ready;
398 
399 /// Track quality stats based on flow of sequence numbers
400 struct SequencedPacketCounters
401 {
402 	int m_nRecv; // packets successfully received containing a sequence number
403 	int m_nDropped; // packets assumed to be dropped in the current interval
404 	int m_nOutOfOrder; // any sequence number deviation other than a simple dropped packet.  (Most recent interval.)
405 	int m_nLurch; // any sequence number deviation other than a simple dropped packet.  (Most recent interval.)
406 	int m_nDuplicate; // any sequence number deviation other than a simple dropped packet.  (Most recent interval.)
407 	int m_usecMaxJitter;
408 
ResetSequencedPacketCounters409 	void Reset()
410 	{
411 		m_nRecv = 0;
412 		m_nDropped = 0;
413 		m_nOutOfOrder = 0;
414 		m_nLurch = 0;
415 		m_nDuplicate = 0;
416 		m_usecMaxJitter = -1;
417 	}
418 
AccumulateSequencedPacketCounters419 	void Accumulate( const SequencedPacketCounters &x )
420 	{
421 		m_nRecv += x.m_nRecv;
422 		m_nDropped += x.m_nDropped;
423 		m_nOutOfOrder += m_nOutOfOrder;
424 		m_nLurch += m_nLurch;
425 		m_nDuplicate += x.m_nDuplicate;
426 		m_usecMaxJitter = std::max( m_usecMaxJitter, x.m_usecMaxJitter );
427 	}
428 
WeirdSequencedPacketCounters429 	inline int Weird() const { return m_nOutOfOrder + m_nLurch + m_nDuplicate; }
430 
CalculateQualitySequencedPacketCounters431 	static inline float CalculateQuality( int nRecv, int nDropped, int nWeird )
432 	{
433 		Assert( nRecv >= nWeird );
434 		int nSent = nRecv + nDropped;
435 		if ( nSent <= 0 )
436 			return -1.0f;
437 		return (float)(nRecv - nWeird) / (float)nSent;
438 	}
439 
CalculateQualitySequencedPacketCounters440 	inline float CalculateQuality() const
441 	{
442 		return CalculateQuality( m_nRecv, m_nDropped, Weird() );
443 	}
444 
OnRecvSequencedPacketCounters445 	inline void OnRecv()
446 	{
447 		++m_nRecv;
448 	}
OnDroppedSequencedPacketCounters449 	inline void OnDropped( int nDropped )
450 	{
451 		m_nDropped += nDropped;
452 	}
OnDuplicateSequencedPacketCounters453 	inline void OnDuplicate()
454 	{
455 		++m_nDuplicate;
456 	}
OnLurchSequencedPacketCounters457 	inline void OnLurch()
458 	{
459 		++m_nLurch;
460 	}
OnOutOfOrderSequencedPacketCounters461 	inline void OnOutOfOrder()
462 	{
463 		++m_nOutOfOrder;
464 
465 		// We previously marked this as dropped.  Undo that
466 		if ( m_nDropped > 0 ) // Might have marked it in the previous interval.  Our stats will be slightly off in this case.  Not worth it to try to get this exactly right.
467 			--m_nDropped;
468 	}
469 
470 };
471 
472 
473 /// Base class used to handle link quality calculations.
474 ///
475 /// All extant instantiations will actually be LinkStatsTracker<T>, where T is the specific,
476 /// derived type.  There are several functions that, if we cared more about simplicity and less
477 /// about perf, would be defined as virtual functions here.  But many of these functions are tiny
478 /// and called in inner loops, and we want to ensure that the compiler is able to expand everything
479 /// inline and does not use virtual function dispatch.
480 ///
481 /// So, if a function needs to be "virtual", meaning it can be overridden by a derived class, then
482 /// we name it with "Internal" here, and place a small wrapper in LinkStatsTracker<T> that will call
483 /// the correct version.  We never call the "Internal" one directly, except when invoking the base
484 /// class.  In this way, we make sure that we always call the most derived class version.
485 ///
486 /// If a base class needs to *call* a virtual function, then we have a problem.  With a traditional
487 /// member function, we have type erasure.  The type of this is always the type of the method, not the
488 /// actual derived type.  To work around this, all methods that need to call virtual functions
489 /// are declared as static, accepting "this" (named "pThis") as a template argument, thus the type
490 /// is not erased.
491 ///
492 /// All of this is weird, no doubt, but it achieves the goal of ensuring that the compiler can inline
493 /// all of these small functions if appropriate, and no virtual function dispatch is used.
494 struct LinkStatsTrackerBase
495 {
496 
497 
498 	/// What version is the peer running?  It's 0 if we don't know yet.
499 	uint32 m_nPeerProtocolVersion;
500 
501 	/// Ping
502 	PingTrackerDetailed m_ping;
503 
504 	//
505 	// Outgoing stats
506 	//
507 	int64 m_nNextSendSequenceNumber;
508 	PacketRate_t m_sent;
509 	SteamNetworkingMicroseconds m_usecTimeLastSentSeq;
510 
511 	/// Called when we sent a packet, with or without a sequence number
TrackSentPacketLinkStatsTrackerBase512 	inline void TrackSentPacket( int cbPktSize )
513 	{
514 		m_sent.ProcessPacket( cbPktSize );
515 	}
516 
517 	/// Consume the next sequence number, and record the time at which
518 	/// we sent a sequenced packet.  (Don't call this unless you are sending
519 	/// a sequenced packet.)
ConsumeSendPacketNumberAndGetWireFmtLinkStatsTrackerBase520 	inline uint16 ConsumeSendPacketNumberAndGetWireFmt( SteamNetworkingMicroseconds usecNow )
521 	{
522 		m_usecTimeLastSentSeq = usecNow;
523 		return uint16( m_nNextSendSequenceNumber++ );
524 	}
525 
526 	//
527 	// Incoming
528 	//
529 
530 	/// Highest (valid!) packet number we have ever processed
531 	int64 m_nMaxRecvPktNum;
532 
533 	/// Packet and data rate trackers for inbound flow
534 	PacketRate_t m_recv;
535 
536 	// Some additional debugging for sequence number accounting
537 	int64 m_nDebugLastInitMaxRecvPktNum;
538 	int64 m_nDebugPktsRecvInOrder;
539 	int64 m_arDebugHistoryRecvSeqNum[ 8 ];
540 
541 	/// Setup state to expect the next packet to be nPktNum+1,
542 	/// and discard all packets <= nPktNum
543 	void InitMaxRecvPktNum( int64 nPktNum );
ResetMaxRecvPktNumForIncomingWirePktNumLinkStatsTrackerBase544 	void ResetMaxRecvPktNumForIncomingWirePktNum( uint16 nPktNum )
545 	{
546 		InitMaxRecvPktNum( (int64)(uint16)( nPktNum - 1 ) );
547 	}
548 
549 	/// Bitmask of recently received packets, used to reject duplicate packets.
550 	/// (Important for preventing replay attacks.)
551 	///
552 	/// Let B be m_nMaxRecvPktNum & ~63.  (The largest multiple of 64
553 	/// that is <= m_nMaxRecvPktNum.)   Then m_recvPktNumberMask[1] bit n
554 	/// corresponds to B + n.  (Some of these bits may represent packet numbers
555 	/// higher than m_nMaxRecvPktNum.)  m_recvPktNumberMask[0] bit n
556 	/// corresponds to B - 64 + n.
557 	uint64 m_recvPktNumberMask[2];
558 
559 	/// Get string describing state of recent packets received.
560 	std::string RecvPktNumStateDebugString() const;
561 
562 	/// Packets that we receive that exceed the rate limit.
563 	/// (We might drop these, or we might just want to be interested in how often it happens.)
564 	PacketRate_t m_recvExceedRateLimit;
565 
566 	/// Time when we last received anything
567 	SteamNetworkingMicroseconds m_usecTimeLastRecv;
568 
569 	/// Time when we last received a sequenced packet
570 	SteamNetworkingMicroseconds m_usecTimeLastRecvSeq;
571 
572 	/// Called when we receive any packet, with or without a sequence number.
573 	/// Does not perform any rate limiting checks
TrackRecvPacketLinkStatsTrackerBase574 	inline void TrackRecvPacket( int cbPktSize, SteamNetworkingMicroseconds usecNow )
575 	{
576 		m_recv.ProcessPacket( cbPktSize );
577 		m_usecTimeLastRecv = usecNow;
578 		m_usecInFlightReplyTimeout = 0;
579 		m_nReplyTimeoutsSinceLastRecv = 0;
580 		m_usecWhenTimeoutStarted = 0;
581 	}
582 
583 	//
584 	// Quality metrics stats
585 	//
586 
587 	// Track instantaneous rate of number of sequence number anomalies
588 	SequencedPacketCounters m_seqPktCounters;
589 
590 	// Instantaneous rates, calculated from most recent completed interval
591 	float m_flInPacketsDroppedPct;
592 	float m_flInPacketsWeirdSequencePct;
593 	int m_usecMaxJitterPreviousInterval;
594 
595 	// Lifetime counters.  The "accumulator" values do not include the current interval -- use the accessors to get those
596 	int64 m_nPktsRecvSequenced;
597 	int64 m_nPktsRecvDroppedAccumulator;
598 	int64 m_nPktsRecvOutOfOrderAccumulator;
599 	int64 m_nPktsRecvDuplicateAccumulator;
600 	int64 m_nPktsRecvLurchAccumulator;
PktsRecvDroppedLinkStatsTrackerBase601 	inline int64 PktsRecvDropped() const { return m_nPktsRecvDroppedAccumulator + m_seqPktCounters.m_nDropped; }
PktsRecvOutOfOrderLinkStatsTrackerBase602 	inline int64 PktsRecvOutOfOrder() const { return m_nPktsRecvOutOfOrderAccumulator + m_seqPktCounters.m_nOutOfOrder; }
PktsRecvDuplicateLinkStatsTrackerBase603 	inline int64 PktsRecvDuplicate() const { return m_nPktsRecvDuplicateAccumulator + m_seqPktCounters.m_nDuplicate; }
PktsRecvLurchLinkStatsTrackerBase604 	inline int64 PktsRecvLurch() const { return m_nPktsRecvLurchAccumulator + m_seqPktCounters.m_nLurch; }
605 
606 	/// Lifetime quality statistics
607 	PercentileGenerator<uint8> m_qualitySample;
608 
609 	/// Histogram of quality intervals
610 	QualityHistogram m_qualityHistogram;
611 
612 	// Histogram of incoming latency variance
613 	JitterHistogram m_jitterHistogram;
614 
615 	//
616 	// Misc stats bookkeeping
617 	//
618 
619 	/// Check if it's been long enough since the last time we sent a ping,
620 	/// and we'd like to try to sneak one in if possible.
621 	///
622 	/// Note that in general, tracer pings are the only kind of pings that the relay
623 	/// ever sends.  It assumes that the endpoints will take care of any keepalives,
624 	/// etc that need to happen, and the relay can merely observe this process and take
625 	/// note of the outcome.
626 	///
627 	/// Returns:
628 	/// 0 - Not needed right now
629 	/// 1 - Opportunistic, but don't send by itself
630 	/// 2 - Yes, send one if possible
ReadyToSendTracerPingLinkStatsTrackerBase631 	inline int ReadyToSendTracerPing( SteamNetworkingMicroseconds usecNow ) const
632 	{
633 		if ( m_bPassive )
634 			return 0;
635 		SteamNetworkingMicroseconds usecTimeSince = usecNow - std::max( m_ping.m_usecTimeLastSentPingRequest, m_ping.TimeRecvMostRecentPing() );
636 		if ( usecTimeSince > k_usecLinkStatsMaxPingRequestInterval )
637 			return 2;
638 		if ( usecTimeSince > k_usecLinkStatsMinPingRequestInterval )
639 			return 1;
640 		return 0;
641 	}
642 
643 	/// Check if we appear to be timing out and need to send an "aggressive" ping, meaning send it right
644 	/// now, request that the reply not be delayed, and also request that the relay (if any) confirm its
645 	/// connectivity as well.
BNeedToSendPingImmediateLinkStatsTrackerBase646 	inline bool BNeedToSendPingImmediate( SteamNetworkingMicroseconds usecNow ) const
647 	{
648 		return
649 			!m_bPassive
650 			&& m_nReplyTimeoutsSinceLastRecv > 0 // We're timing out
651 			&& m_usecLastSendPacketExpectingImmediateReply+k_usecAggressivePingInterval <= usecNow; // we haven't just recently sent an aggressive ping.
652 	}
653 
654 	/// Check if we should send a keepalive ping.  In this case we haven't heard from the peer in a while,
655 	/// but we don't have any reason to think there are any problems.
BNeedToSendKeepaliveLinkStatsTrackerBase656 	inline bool BNeedToSendKeepalive( SteamNetworkingMicroseconds usecNow ) const
657 	{
658 		return
659 			!m_bPassive
660 			&& m_usecInFlightReplyTimeout == 0 // not already tracking some other message for which we expect a reply (and which would confirm that the connection is alive)
661 			&& m_usecTimeLastRecv + k_usecKeepAliveInterval <= usecNow; // haven't heard from the peer recently
662 	}
663 
664 	/// Fill out message with everything we'd like to send.  We don't assume that we will
665 	/// actually send it.  (We might be looking for a good opportunity, and the data we want
666 	/// to send doesn't fit.)
667 	void PopulateMessage( int nNeedFlags, CMsgSteamDatagramConnectionQuality &msg, SteamNetworkingMicroseconds usecNow );
668 	void PopulateLifetimeMessage( CMsgSteamDatagramLinkLifetimeStats &msg );
669 	/// Called when we send any message for which we expect some sort of reply.  (But maybe not an ack.)
670 	void TrackSentMessageExpectingReply( SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply );
671 
672 	/// Called when we receive stats from remote host
673 	void ProcessMessage( const CMsgSteamDatagramConnectionQuality &msg, SteamNetworkingMicroseconds usecNow );
674 
675 	/// Received from remote host
676 	SteamDatagramLinkInstantaneousStats m_latestRemote;
677 	SteamNetworkingMicroseconds m_usecTimeRecvLatestRemote;
678 	SteamDatagramLinkLifetimeStats m_lifetimeRemote;
679 	SteamNetworkingMicroseconds m_usecTimeRecvLifetimeRemote;
680 
681 	int64 m_pktNumInFlight;
682 	bool m_bInFlightInstantaneous;
683 	bool m_bInFlightLifetime;
684 
685 	/// Time when the current interval started
686 	SteamNetworkingMicroseconds m_usecIntervalStart;
687 
688 	//
689 	// Reply timeout
690 	//
691 
692 	/// If we have a message in flight for which we expect a reply (possibly delayed)
693 	/// and we haven't heard ANYTHING back, then this is the time when we should
694 	/// declare a timeout (and increment m_nReplyTimeoutsSinceLastRecv)
695 	SteamNetworkingMicroseconds m_usecInFlightReplyTimeout;
696 
697 	/// Time when we last sent some sort of packet for which we expect
698 	/// an immediate reply.  m_stats.m_ping and m_usecInFlightReplyTimeout both
699 	/// remember when we send requests that expect replies, but both include
700 	/// ones that we allow the reply to be delayed.  This timestamp only includes
701 	/// ones that we do not allow to be delayed.
702 	SteamNetworkingMicroseconds m_usecLastSendPacketExpectingImmediateReply;
703 
704 	/// Number of consecutive times a reply from this guy has timed out, since
705 	/// the last time we got valid communication from him.  This is reset basically
706 	/// any time we get a packet from the peer.
707 	int m_nReplyTimeoutsSinceLastRecv;
708 
709 	/// Time when the current timeout (if any) was first detected.  This is not
710 	/// the same thing as the time we last heard from them.  For a mostly idle
711 	/// connection, the keepalive interval is relatively sparse, and so we don't
712 	/// know if we didn't hear from them, was it because there was a problem,
713 	/// or just they had nothing to say.  This timestamp measures the time when
714 	/// we expected to heard something but didn't.
715 	SteamNetworkingMicroseconds m_usecWhenTimeoutStarted;
716 
717 	//
718 	// Populate public interface structure
719 	//
720 	void GetLinkStats( SteamDatagramLinkStats &s, SteamNetworkingMicroseconds usecNow ) const;
721 
722 	/// This is the only function we needed to make virtual.  To factor this one
723 	/// out is really awkward, and this isn't called very often anyway.
724 	virtual void GetLifetimeStats( SteamDatagramLinkLifetimeStats &s ) const;
725 
PeerAckedInstantaneousLinkStatsTrackerBase726 	inline void PeerAckedInstantaneous( SteamNetworkingMicroseconds usecNow )
727 	{
728 		m_usecPeerAckedInstaneous = usecNow;
729 		m_nPktsRecvSeqWhenPeerAckInstantaneous = m_nPktsRecvSequenced;
730 		m_nPktsSentWhenPeerAckInstantaneous = m_sent.m_packets.Total();
731 	}
PeerAckedLifetimeLinkStatsTrackerBase732 	inline void PeerAckedLifetime( SteamNetworkingMicroseconds usecNow )
733 	{
734 		m_usecPeerAckedLifetime = usecNow;
735 		m_nPktsRecvSeqWhenPeerAckLifetime = m_nPktsRecvSequenced;
736 		m_nPktsSentWhenPeerAckLifetime = m_sent.m_packets.Total();
737 	}
738 
InFlightPktAckLinkStatsTrackerBase739 	void InFlightPktAck( SteamNetworkingMicroseconds usecNow )
740 	{
741 		if ( m_bInFlightInstantaneous )
742 			PeerAckedInstantaneous( usecNow );
743 		if ( m_bInFlightLifetime )
744 			PeerAckedLifetime( usecNow );
745 		m_pktNumInFlight = 0;
746 		m_bInFlightInstantaneous = m_bInFlightLifetime = false;
747 	}
748 
InFlightPktTimeoutLinkStatsTrackerBase749 	void InFlightPktTimeout()
750 	{
751 		m_pktNumInFlight = 0;
752 		m_bInFlightInstantaneous = m_bInFlightLifetime = false;
753 	}
754 
755 	/// Get urgency level to send instantaneous/lifetime stats.
756 	int GetStatsSendNeed( SteamNetworkingMicroseconds usecNow );
757 
758 	/// Describe this stats tracker, for debugging, asserts, etc
759 	virtual std::string Describe() const = 0;
760 
761 protected:
762 	// Make sure it's used as abstract base.  Note that we require you to call Init()
763 	// with a timestamp value, so the constructor is empty by default.
LinkStatsTrackerBaseLinkStatsTrackerBase764 	inline LinkStatsTrackerBase() {}
765 
766 	/// Initialize the stats tracking object
767 	void InitInternal( SteamNetworkingMicroseconds usecNow );
768 
769 	/// Check if it's time to update, and if so, do it.
770 	template <typename TLinkStatsTracker>
ThinkInternalLinkStatsTrackerBase771 	inline static void ThinkInternal( TLinkStatsTracker *pThis, SteamNetworkingMicroseconds usecNow )
772 	{
773 		// Check for ending the current QoS interval
774 		if ( !pThis->m_bPassive && pThis->m_usecIntervalStart + k_usecSteamDatagramLinkStatsDefaultInterval < usecNow )
775 		{
776 			pThis->UpdateInterval( usecNow );
777 		}
778 
779 		// Check for reply timeout.
780 		if ( pThis->m_usecInFlightReplyTimeout > 0 && pThis->m_usecInFlightReplyTimeout < usecNow )
781 		{
782 			pThis->InFlightReplyTimeout( usecNow );
783 		}
784 	}
785 
786 	/// Called when m_usecInFlightReplyTimeout is reached.  We intentionally only allow
787 	/// one of this type of timeout to be in flight at a time, so that the max
788 	/// rate that we accumulate them is based on the ping time, instead of the packet
789 	/// rate.
790 	template <typename TLinkStatsTracker>
InFlightReplyTimeoutInternalLinkStatsTrackerBase791 	inline static void InFlightReplyTimeoutInternal( TLinkStatsTracker *pThis, SteamNetworkingMicroseconds usecNow )
792 	{
793 		pThis->m_usecInFlightReplyTimeout = 0;
794 		if ( pThis->m_usecWhenTimeoutStarted == 0 )
795 		{
796 			Assert( pThis->m_nReplyTimeoutsSinceLastRecv == 0 );
797 			pThis->m_usecWhenTimeoutStarted = usecNow;
798 		}
799 		++pThis->m_nReplyTimeoutsSinceLastRecv;
800 	}
801 
802 	void GetInstantaneousStats( SteamDatagramLinkInstantaneousStats &s ) const;
803 
804 	/// Called after we send a packet for which we expect an ack.  Note that we must have consumed the outgoing sequence
805 	/// for that packet (using GetNextSendSequenceNumber), but must *NOT* have consumed any more!
806 	/// This call implies TrackSentPingRequest, since we will be able to match up the ack'd sequence
807 	/// number with the time sent to get a latency estimate.
808 	template <typename TLinkStatsTracker>
TrackSentMessageExpectingSeqNumAckInternalLinkStatsTrackerBase809 	inline static void TrackSentMessageExpectingSeqNumAckInternal( TLinkStatsTracker *pThis, SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply )
810 	{
811 		pThis->TrackSentPingRequest( usecNow, bAllowDelayedReply );
812 	}
813 
814 	/// Are we in "passive" state?  When we are "active", we expect that our peer is awake
815 	/// and will reply to our messages, and that we should be actively sending our peer
816 	/// connection quality statistics and keepalives.  When we are passive, we still measure
817 	/// statistics and can receive messages from the peer, and send acknowledgments as necessary.
818 	/// but we will indicate that keepalives or stats need to be sent to the peer.
819 	bool m_bPassive;
820 
821 	/// Called to switch the passive state.  (Should only be called on an actual state change.)
822 	void SetPassiveInternal( bool bFlag, SteamNetworkingMicroseconds usecNow );
823 
824 	/// Check if we really need to flush out stats now.  Derived class should provide the reason strings.
825 	/// (See the code.)
826 	const char *InternalGetSendStatsReasonOrUpdateNextThinkTime( SteamNetworkingMicroseconds usecNow, const char *const arpszReasonStrings[4], SteamNetworkingMicroseconds &inOutNextThinkTime );
827 
828 	/// Called when we send a packet for which we expect a reply and
829 	/// for which we expect to get latency info.
830 	/// This implies TrackSentMessageExpectingReply.
831 	template <typename TLinkStatsTracker>
TrackSentPingRequestInternalLinkStatsTrackerBase832 	inline static void TrackSentPingRequestInternal( TLinkStatsTracker *pThis, SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply )
833 	{
834 		pThis->TrackSentMessageExpectingReply( usecNow, bAllowDelayedReply );
835 		pThis->m_ping.m_usecTimeLastSentPingRequest = usecNow;
836 	}
837 
838 	/// Called when we receive a reply from which we are able to calculate latency information
839 	template <typename TLinkStatsTracker>
ReceivedPingInternalLinkStatsTrackerBase840 	inline static void ReceivedPingInternal( TLinkStatsTracker *pThis, int nPingMS, SteamNetworkingMicroseconds usecNow )
841 	{
842 		pThis->m_ping.ReceivedPing( nPingMS, usecNow );
843 	}
844 
BInternalNeedToSendPingImmediateLinkStatsTrackerBase845 	inline bool BInternalNeedToSendPingImmediate( SteamNetworkingMicroseconds usecNow, SteamNetworkingMicroseconds &inOutNextThinkTime )
846 	{
847 		if ( m_nReplyTimeoutsSinceLastRecv == 0 )
848 			return false;
849 		SteamNetworkingMicroseconds usecUrgentPing = m_usecLastSendPacketExpectingImmediateReply+k_usecAggressivePingInterval;
850 		if ( usecUrgentPing <= usecNow )
851 			return true;
852 		if ( usecUrgentPing < inOutNextThinkTime )
853 			inOutNextThinkTime = usecUrgentPing;
854 		return false;
855 	}
856 
BInternalNeedToSendKeepAliveLinkStatsTrackerBase857 	inline bool BInternalNeedToSendKeepAlive( SteamNetworkingMicroseconds usecNow, SteamNetworkingMicroseconds &inOutNextThinkTime )
858 	{
859 		if ( m_usecInFlightReplyTimeout == 0 )
860 		{
861 			SteamNetworkingMicroseconds usecKeepAlive = m_usecTimeLastRecv + k_usecKeepAliveInterval;
862 			if ( usecKeepAlive <= usecNow )
863 				return true;
864 			if ( usecKeepAlive < inOutNextThinkTime )
865 				inOutNextThinkTime = usecKeepAlive;
866 		}
867 		else
868 		{
869 			if ( m_usecInFlightReplyTimeout < inOutNextThinkTime )
870 				inOutNextThinkTime = m_usecInFlightReplyTimeout;
871 		}
872 		return false;
873 	}
874 
875 	// Hooks that derived classes may override when we process a packet
876 	// and it meets certain characteristics
InternalProcessSequencedPacket_CountLinkStatsTrackerBase877 	inline void InternalProcessSequencedPacket_Count()
878 	{
879 		m_seqPktCounters.OnRecv();
880 		++m_nPktsRecvSequenced;
881 	}
882 	void InternalProcessSequencedPacket_OutOfOrder( int64 nPktNum );
InternalProcessSequencedPacket_DuplicateLinkStatsTrackerBase883 	inline void InternalProcessSequencedPacket_Duplicate()
884 	{
885 		m_seqPktCounters.OnDuplicate();
886 	}
InternalProcessSequencedPacket_LurchLinkStatsTrackerBase887 	inline void InternalProcessSequencedPacket_Lurch()
888 	{
889 		m_seqPktCounters.OnLurch();
890 	}
InternalProcessSequencedPacket_DroppedLinkStatsTrackerBase891 	inline void InternalProcessSequencedPacket_Dropped( int nDropped )
892 	{
893 		m_seqPktCounters.OnDropped( nDropped );
894 	}
895 
896 private:
897 
898 	// Number of lifetime sequenced packets received, and overall packets sent,
899 	// the last time the peer acked stats
900 	int64 m_nPktsRecvSeqWhenPeerAckInstantaneous;
901 	int64 m_nPktsSentWhenPeerAckInstantaneous;
902 	int64 m_nPktsRecvSeqWhenPeerAckLifetime;
903 	int64 m_nPktsSentWhenPeerAckLifetime;
904 
905 	/// Local time when peer last acknowledged lifetime stats.
906 	SteamNetworkingMicroseconds m_usecPeerAckedLifetime;
907 
908 	/// Local time when peer last acknowledged instantaneous stats.
909 	SteamNetworkingMicroseconds m_usecPeerAckedInstaneous;
910 
911 	bool BCheckHaveDataToSendInstantaneous( SteamNetworkingMicroseconds usecNow );
912 	bool BCheckHaveDataToSendLifetime( SteamNetworkingMicroseconds usecNow );
913 
914 	/// Called to force interval to roll forward now
915 	void UpdateInterval( SteamNetworkingMicroseconds usecNow );
916 
917 	void StartNextInterval( SteamNetworkingMicroseconds usecNow );
918 };
919 
920 struct LinkStatsTrackerEndToEnd : public LinkStatsTrackerBase
921 {
922 
923 	// LinkStatsTrackerBase "overrides"
924 	virtual void GetLifetimeStats( SteamDatagramLinkLifetimeStats &s ) const OVERRIDE;
925 
926 	/// Calculate retry timeout the sender will use
CalcSenderRetryTimeoutLinkStatsTrackerEndToEnd927 	SteamNetworkingMicroseconds CalcSenderRetryTimeout() const
928 	{
929 		if ( m_ping.m_nSmoothedPing < 0 )
930 			return k_nMillion;
931 		// 3 x RTT + max delay, plus some slop.
932 		// If the receiver hands on to it for the max duration and
933 		// our RTT is very low
934 		return m_ping.m_nSmoothedPing*3000 + ( k_usecMaxDataAckDelay + 10000 );
935 	}
936 
937 	/// Time when the connection entered the connection state
938 	SteamNetworkingMicroseconds m_usecWhenStartedConnectedState;
939 
940 	/// Time when the connection ended
941 	SteamNetworkingMicroseconds m_usecWhenEndedConnectedState;
942 
943 	/// Time when the current interval started
944 	SteamNetworkingMicroseconds m_usecSpeedIntervalStart;
945 
946 	/// TX Speed, should match CMsgSteamDatagramLinkLifetimeStats
947 	int m_nTXSpeed;
948 	int m_nTXSpeedMax;
949 	PercentileGenerator<int> m_TXSpeedSample;
950 	int m_nTXSpeedHistogram16; // Speed at kb/s
951 	int m_nTXSpeedHistogram32;
952 	int m_nTXSpeedHistogram64;
953 	int m_nTXSpeedHistogram128;
954 	int m_nTXSpeedHistogram256;
955 	int m_nTXSpeedHistogram512;
956 	int m_nTXSpeedHistogram1024;
957 	int m_nTXSpeedHistogramMax;
958 
959 	/// RX Speed, should match CMsgSteamDatagramLinkLifetimeStats
960 	int m_nRXSpeed;
961 	int m_nRXSpeedMax;
962 	PercentileGenerator<int> m_RXSpeedSample;
963 	int m_nRXSpeedHistogram16; // Speed at kb/s
964 	int m_nRXSpeedHistogram32;
965 	int m_nRXSpeedHistogram64;
966 	int m_nRXSpeedHistogram128;
967 	int m_nRXSpeedHistogram256;
968 	int m_nRXSpeedHistogram512;
969 	int m_nRXSpeedHistogram1024;
970 	int m_nRXSpeedHistogramMax;
971 
972 	/// Called when we get a speed sample
973 	void UpdateSpeeds( int nTXSpeed, int nRXSpeed );
974 
975 	/// Do we need to send any stats?
GetSendReasonOrUpdateNextThinkTimeLinkStatsTrackerEndToEnd976 	inline const char *GetSendReasonOrUpdateNextThinkTime( SteamNetworkingMicroseconds usecNow, EStatsReplyRequest &eReplyRequested, SteamNetworkingMicroseconds &inOutNextThinkTime )
977 	{
978 		if ( m_bPassive )
979 		{
980 			if ( m_usecInFlightReplyTimeout > 0 && m_usecInFlightReplyTimeout < inOutNextThinkTime )
981 				inOutNextThinkTime = m_usecInFlightReplyTimeout;
982 			eReplyRequested = k_EStatsReplyRequest_NothingToSend;
983 			return nullptr;
984 		}
985 
986 		// Urgent ping?
987 		if ( BInternalNeedToSendPingImmediate( usecNow, inOutNextThinkTime ) )
988 		{
989 			eReplyRequested = k_EStatsReplyRequest_Immediate;
990 			return "E2EUrgentPing";
991 		}
992 
993 		// Keepalive?
994 		if ( BInternalNeedToSendKeepAlive( usecNow, inOutNextThinkTime ) )
995 		{
996 			eReplyRequested = k_EStatsReplyRequest_DelayedOK;
997 			return "E2EKeepAlive";
998 		}
999 
1000 		// Connection stats?
1001 		static const char *arpszReasons[4] =
1002 		{
1003 			nullptr,
1004 			"E2EInstantaneousStats",
1005 			"E2ELifetimeStats",
1006 			"E2EAllStats"
1007 		};
1008 		const char *pszReason = LinkStatsTrackerBase::InternalGetSendStatsReasonOrUpdateNextThinkTime( usecNow, arpszReasons, inOutNextThinkTime );
1009 		if ( pszReason )
1010 		{
1011 			eReplyRequested = k_EStatsReplyRequest_DelayedOK;
1012 			return pszReason;
1013 		}
1014 
1015 		eReplyRequested = k_EStatsReplyRequest_NothingToSend;
1016 		return nullptr;
1017 	}
1018 
1019 	/// Describe this stats tracker, for debugging, asserts, etc
DescribeLinkStatsTrackerEndToEnd1020 	virtual std::string Describe() const override { return "EndToEnd"; }
1021 
1022 protected:
1023 	void InitInternal( SteamNetworkingMicroseconds usecNow );
1024 
1025 	template <typename TLinkStatsTracker>
ThinkInternalLinkStatsTrackerEndToEnd1026 	inline static void ThinkInternal( TLinkStatsTracker *pThis, SteamNetworkingMicroseconds usecNow )
1027 	{
1028 		LinkStatsTrackerBase::ThinkInternal( pThis, usecNow );
1029 
1030 		if ( pThis->m_usecSpeedIntervalStart + k_usecSteamDatagramSpeedStatsDefaultInterval < usecNow )
1031 		{
1032 			pThis->UpdateSpeedInterval( usecNow );
1033 		}
1034 	}
1035 
1036 private:
1037 
1038 	void UpdateSpeedInterval( SteamNetworkingMicroseconds usecNow );
1039 	void StartNextSpeedInterval( SteamNetworkingMicroseconds usecNow );
1040 };
1041 
1042 /// The conceptual "abstract base class" for all link stats trackers.  See the comments
1043 /// on LinkSTatsTrackerBase for why this wackiness
1044 template <typename TLinkStatsTracker>
1045 struct LinkStatsTracker final : public TLinkStatsTracker
1046 {
1047 
1048 	// "Virtual functions" that we are "overriding" at compile time
1049 	// by the template argument
1050 	inline void Init( SteamNetworkingMicroseconds usecNow, bool bStartDisconnected = false )
1051 	{
1052 		TLinkStatsTracker::InitInternal( usecNow );
1053 		TLinkStatsTracker::SetPassiveInternal( bStartDisconnected, usecNow );
1054 	}
Thinkfinal1055 	inline void Think( SteamNetworkingMicroseconds usecNow ) { TLinkStatsTracker::ThinkInternal( this, usecNow ); }
SetPassivefinal1056 	inline void SetPassive( bool bFlag, SteamNetworkingMicroseconds usecNow ) { if ( TLinkStatsTracker::m_bPassive != bFlag ) TLinkStatsTracker::SetPassiveInternal( bFlag, usecNow ); }
IsPassivefinal1057 	inline bool IsPassive() const { return TLinkStatsTracker::m_bPassive; }
TrackSentMessageExpectingSeqNumAckfinal1058 	inline void TrackSentMessageExpectingSeqNumAck( SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply ) { TLinkStatsTracker::TrackSentMessageExpectingSeqNumAckInternal( this, usecNow, bAllowDelayedReply ); }
TrackSentPingRequestfinal1059 	inline void TrackSentPingRequest( SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply ) { TLinkStatsTracker::TrackSentPingRequestInternal( this, usecNow, bAllowDelayedReply ); }
ReceivedPingfinal1060 	inline void ReceivedPing( int nPingMS, SteamNetworkingMicroseconds usecNow ) { TLinkStatsTracker::ReceivedPingInternal( this, nPingMS, usecNow ); }
InFlightReplyTimeoutfinal1061 	inline void InFlightReplyTimeout( SteamNetworkingMicroseconds usecNow ) { TLinkStatsTracker::InFlightReplyTimeoutInternal( this, usecNow ); }
1062 
1063 	/// Called after we actually send connection data.  Note that we must have consumed the outgoing sequence
1064 	/// for that packet (using GetNextSendSequenceNumber), but must *NOT* have consumed any more!
TrackSentStatsfinal1065 	void TrackSentStats( const CMsgSteamDatagramConnectionQuality &msg, SteamNetworkingMicroseconds usecNow, bool bAllowDelayedReply )
1066 	{
1067 
1068 		// Check if we expect our peer to know how to acknowledge this
1069 		if ( !TLinkStatsTracker::m_bPassive )
1070 		{
1071 			TLinkStatsTracker::m_pktNumInFlight = TLinkStatsTracker::m_nNextSendSequenceNumber-1;
1072 			TLinkStatsTracker::m_bInFlightInstantaneous = msg.has_instantaneous();
1073 			TLinkStatsTracker::m_bInFlightLifetime = msg.has_lifetime();
1074 
1075 			// They should ack.  Make a note of the sequence number that we used,
1076 			// so that we can measure latency when they reply, setup timeout bookkeeping, etc
1077 			TrackSentMessageExpectingSeqNumAck( usecNow, bAllowDelayedReply );
1078 		}
1079 		else
1080 		{
1081 			// Peer can't ack.  Just mark them as acking immediately
1082 			Assert( TLinkStatsTracker::m_pktNumInFlight == 0 );
1083 			TLinkStatsTracker::m_pktNumInFlight = 0;
1084 			TLinkStatsTracker::m_bInFlightInstantaneous = false;
1085 			TLinkStatsTracker::m_bInFlightLifetime = false;
1086 			if ( msg.has_instantaneous() )
1087 				TLinkStatsTracker::PeerAckedInstantaneous( usecNow );
1088 			if ( msg.has_lifetime() )
1089 				TLinkStatsTracker::PeerAckedLifetime( usecNow );
1090 		}
1091 	}
1092 
RecvPackedAcksfinal1093 	inline bool RecvPackedAcks( const google::protobuf::RepeatedField<google::protobuf::uint32> &msgField, SteamNetworkingMicroseconds usecNow )
1094 	{
1095 		bool bResult = true;
1096 		for ( uint32 nPackedAck: msgField )
1097 		{
1098 			if ( !TLinkStatsTracker::RecvPackedAckInternal( this, nPackedAck, usecNow ) )
1099 				bResult = false;
1100 		}
1101 		return bResult;
1102 	}
1103 
1104 	// Shortcut when we know that we aren't going to send now, but we want to know when to wakeup and do so
GetNextThinkTimefinal1105 	inline SteamNetworkingMicroseconds GetNextThinkTime( SteamNetworkingMicroseconds usecNow )
1106 	{
1107 		SteamNetworkingMicroseconds usecNextThink = k_nThinkTime_Never;
1108 		EStatsReplyRequest eReplyRequested;
1109 		if ( TLinkStatsTracker::GetSendReasonOrUpdateNextThinkTime( usecNow, eReplyRequested, usecNextThink ) )
1110 			return k_nThinkTime_ASAP;
1111 		return usecNextThink;
1112 	}
1113 
1114 	/// Called when we receive a packet with a sequence number.
1115 	/// This expands the wire packet number to its full value,
1116 	/// and checks if it is a duplicate or out of range.
1117 	/// Stats are also updated
ExpandWirePacketNumberAndCheckfinal1118 	int64 ExpandWirePacketNumberAndCheck( uint16 nWireSeqNum )
1119 	{
1120 		int16 nGap = (int16)( nWireSeqNum - (uint16)TLinkStatsTracker::m_nMaxRecvPktNum );
1121 		int64 nPktNum = TLinkStatsTracker::m_nMaxRecvPktNum + nGap;
1122 
1123 		// We've received a packet with a sequence number.
1124 		// Update stats
1125 		constexpr int N = V_ARRAYSIZE(TLinkStatsTracker::m_arDebugHistoryRecvSeqNum);
1126 		COMPILE_TIME_ASSERT( ( N & (N-1) ) == 0 );
1127 		TLinkStatsTracker::m_arDebugHistoryRecvSeqNum[ TLinkStatsTracker::m_nPktsRecvSequenced & (N-1) ] = nPktNum;
1128 		TLinkStatsTracker::InternalProcessSequencedPacket_Count();
1129 
1130 		// Packet number is increasing?
1131 		// (Maybe by a lot -- we don't handle that here.)
1132 		if ( likely( nPktNum > TLinkStatsTracker::m_nMaxRecvPktNum ) )
1133 			return nPktNum;
1134 
1135 		// Which block of 64-bit packets is it in?
1136 		int64 B = TLinkStatsTracker::m_nMaxRecvPktNum & ~int64{63};
1137 		int64 idxRecvBitmask = ( ( nPktNum - B ) >> 6 ) + 1;
1138 		Assert( idxRecvBitmask < 2 );
1139 		if ( idxRecvBitmask < 0 )
1140 		{
1141 			// Too old (at least 64 packets old, maybe up to 128).
1142 			TLinkStatsTracker::InternalProcessSequencedPacket_Lurch(); // Should we track "very old" under a different stat than "lurch"?
1143 			return 0;
1144 		}
1145 		uint64 bit = uint64{1} << ( nPktNum & 63 );
1146 		if ( TLinkStatsTracker::m_recvPktNumberMask[ idxRecvBitmask ] & bit )
1147 		{
1148 			// Duplicate
1149 			TLinkStatsTracker::InternalProcessSequencedPacket_Duplicate();
1150 			return 0;
1151 		}
1152 
1153 		// We have an out of order packet.  We'll update that
1154 		// stat in TrackProcessSequencedPacket
1155 		Assert( nPktNum > 0 && nPktNum < TLinkStatsTracker::m_nMaxRecvPktNum );
1156 		return nPktNum;
1157 	}
1158 
1159 	/// Same as ExpandWirePacketNumberAndCheck, but if this is the first sequenced
1160 	/// packet we have ever received, initialize the packet number
ExpandWirePacketNumberAndCheckMaybeInitializefinal1161 	int64 ExpandWirePacketNumberAndCheckMaybeInitialize( uint16 nWireSeqNum )
1162 	{
1163 		if ( unlikely( TLinkStatsTracker::m_nMaxRecvPktNum == 0 ) )
1164 			TLinkStatsTracker::ResetMaxRecvPktNumForIncomingWirePktNum( nWireSeqNum );
1165 		return ExpandWirePacketNumberAndCheck( nWireSeqNum );
1166 	}
1167 
1168 	/// Called when we have processed a packet with a sequence number, to update estimated
1169 	/// number of dropped packets, etc.  This MUST only be called after we have
1170 	/// called ExpandWirePacketNumberAndCheck, to ensure that the packet number is not a
1171 	/// duplicate or out of range.
TrackProcessSequencedPacketfinal1172 	inline void TrackProcessSequencedPacket( int64 nPktNum, SteamNetworkingMicroseconds usecNow, int usecSenderTimeSincePrev )
1173 	{
1174 		Assert( nPktNum > 0 );
1175 
1176 		// Update bitfield of received packets
1177 		int64 B = TLinkStatsTracker::m_nMaxRecvPktNum & ~int64{63};
1178 		int64 idxRecvBitmask = ( ( nPktNum - B ) >> 6 ) + 1;
1179 		Assert( idxRecvBitmask >= 0 ); // We should have discarded very old packets already
1180 		if ( idxRecvBitmask >= 2 ) // Most common case is 0 or 1
1181 		{
1182 			if ( idxRecvBitmask == 2 )
1183 			{
1184 				// Crossed to the next 64-packet block.  Shift bitmasks forward by one.
1185 				TLinkStatsTracker::m_recvPktNumberMask[0] = TLinkStatsTracker::m_recvPktNumberMask[1];
1186 			}
1187 			else
1188 			{
1189 				// Large packet number jump, we skipped a whole block
1190 				TLinkStatsTracker::m_recvPktNumberMask[0] = 0;
1191 			}
1192 			TLinkStatsTracker::m_recvPktNumberMask[1] = 0;
1193 			idxRecvBitmask = 1;
1194 		}
1195 		uint64 bit = uint64{1} << ( nPktNum & 63 );
1196 		Assert( !( TLinkStatsTracker::m_recvPktNumberMask[ idxRecvBitmask ] & bit ) ); // Should not have already been marked!  We should have already discarded duplicates
1197 		TLinkStatsTracker::m_recvPktNumberMask[ idxRecvBitmask ] |= bit;
1198 
1199 		// Check for dropped packet.  Since we hope that by far the most common
1200 		// case will be packets delivered in order, we optimize this logic
1201 		// for that case.
1202 		int64 nGap = nPktNum - TLinkStatsTracker::m_nMaxRecvPktNum;
1203 		if ( likely( nGap == 1 ) )
1204 		{
1205 			++TLinkStatsTracker::m_nDebugPktsRecvInOrder;
1206 
1207 			// We've received two packets, in order.  Did the sender supply the time between packets on his side?
1208 			if ( usecSenderTimeSincePrev > 0 )
1209 			{
1210 				int usecJitter = ( usecNow - TLinkStatsTracker::m_usecTimeLastRecvSeq ) - usecSenderTimeSincePrev;
1211 				usecJitter = abs( usecJitter );
1212 				if ( usecJitter < k_usecTimeSinceLastPacketMaxReasonable )
1213 				{
1214 
1215 					// Update max jitter for current interval
1216 					TLinkStatsTracker::m_seqPktCounters.m_usecMaxJitter = std::max( TLinkStatsTracker::m_seqPktCounters.m_usecMaxJitter, usecJitter );
1217 					TLinkStatsTracker::m_jitterHistogram.AddSample( usecJitter );
1218 				}
1219 				else
1220 				{
1221 					// Something is really, really off.  Discard measurement
1222 				}
1223 			}
1224 
1225 		}
1226 		else if ( unlikely( nGap <= 0 ) )
1227 		{
1228 			// Packet number moving backward
1229 			// We should have already rejected duplicates
1230 			Assert( nGap != 0 );
1231 
1232 			// Packet number moving in reverse.
1233 			// It should be a *small* negative step, e.g. packets delivered out of order.
1234 			// If the packet is really old, we should have already discarded it earlier.
1235 			Assert( nGap >= -8 * (int64)sizeof(TLinkStatsTracker::m_recvPktNumberMask) );
1236 
1237 			// out of order
1238 			TLinkStatsTracker::InternalProcessSequencedPacket_OutOfOrder( nPktNum );
1239 			return;
1240 		}
1241 		else
1242 		{
1243 			// Packet number moving forward, i.e. a dropped packet
1244 			// Large gap?
1245 			if ( unlikely( nGap >= 100 ) )
1246 			{
1247 				// Very weird.
1248 				TLinkStatsTracker::InternalProcessSequencedPacket_Lurch();
1249 
1250 				// Reset the sequence number for packets going forward.
1251 				TLinkStatsTracker::InitMaxRecvPktNum( nPktNum );
1252 				return;
1253 			}
1254 
1255 			// Probably the most common case (after a perfect packet stream), we just dropped a packet or two
1256 			TLinkStatsTracker::InternalProcessSequencedPacket_Dropped( nGap-1 );
1257 		}
1258 
1259 		// Save highest known sequence number for next time.
1260 		TLinkStatsTracker::m_nMaxRecvPktNum = nPktNum;
1261 		TLinkStatsTracker::m_usecTimeLastRecvSeq = usecNow;
1262 	}
1263 };
1264 
1265 
1266 //
1267 // Pack/unpack C struct <-> protobuf message
1268 //
1269 extern void LinkStatsInstantaneousStructToMsg( const SteamDatagramLinkInstantaneousStats &s, CMsgSteamDatagramLinkInstantaneousStats &msg );
1270 extern void LinkStatsInstantaneousMsgToStruct( const CMsgSteamDatagramLinkInstantaneousStats &msg, SteamDatagramLinkInstantaneousStats &s );
1271 extern void LinkStatsLifetimeStructToMsg( const SteamDatagramLinkLifetimeStats &s, CMsgSteamDatagramLinkLifetimeStats &msg );
1272 extern void LinkStatsLifetimeMsgToStruct( const CMsgSteamDatagramLinkLifetimeStats &msg, SteamDatagramLinkLifetimeStats &s );
1273 
1274 } // namespace SteamNetworkingSocketsLib
1275 
1276 #endif // STEAMNETWORKING_STATSUTILS_H
1277