1 /* GStreamer
2  * Copyright (C) <2007> Wim Taymans <wim.taymans@gmail.com>
3  * Copyright (C)  2015 Kurento (http://kurento.org/)
4  *   @author: Miguel París <mparisdiaz@gmail.com>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Library General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Library General Public License for more details.
15  *
16  * You should have received a copy of the GNU Library General Public
17  * License along with this library; if not, write to the
18  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19  * Boston, MA 02110-1301, USA.
20  */
21 
22 #include "rtpstats.h"
23 
24 void
gst_rtp_packet_rate_ctx_reset(RTPPacketRateCtx * ctx,gint32 clock_rate)25 gst_rtp_packet_rate_ctx_reset (RTPPacketRateCtx * ctx, gint32 clock_rate)
26 {
27   ctx->clock_rate = clock_rate;
28   ctx->probed = FALSE;
29   ctx->avg_packet_rate = -1;
30   ctx->last_ts = -1;
31 }
32 
33 guint32
gst_rtp_packet_rate_ctx_update(RTPPacketRateCtx * ctx,guint16 seqnum,guint32 ts)34 gst_rtp_packet_rate_ctx_update (RTPPacketRateCtx * ctx, guint16 seqnum,
35     guint32 ts)
36 {
37   guint64 new_ts, diff_ts;
38   gint diff_seqnum;
39   gint32 new_packet_rate;
40 
41   if (ctx->clock_rate <= 0) {
42     return ctx->avg_packet_rate;
43   }
44 
45   new_ts = ctx->last_ts;
46   gst_rtp_buffer_ext_timestamp (&new_ts, ts);
47 
48   if (!ctx->probed) {
49     ctx->last_seqnum = seqnum;
50     ctx->last_ts = new_ts;
51     ctx->probed = TRUE;
52     return ctx->avg_packet_rate;
53   }
54 
55   diff_seqnum = gst_rtp_buffer_compare_seqnum (ctx->last_seqnum, seqnum);
56   if (diff_seqnum <= 0 || new_ts <= ctx->last_ts) {
57     return ctx->avg_packet_rate;
58   }
59 
60   diff_ts = new_ts - ctx->last_ts;
61   diff_ts = gst_util_uint64_scale_int (diff_ts, GST_SECOND, ctx->clock_rate);
62   new_packet_rate = gst_util_uint64_scale (diff_seqnum, GST_SECOND, diff_ts);
63 
64   /* The goal is that higher packet rates "win".
65    * If there's a sudden burst, the average will go up fast,
66    * but it will go down again slowly.
67    * This is useful for bursty cases, where a lot of packets are close
68    * to each other and should allow a higher reorder/dropout there.
69    * Round up the new average.
70    */
71   if (ctx->avg_packet_rate > new_packet_rate) {
72     ctx->avg_packet_rate = (7 * ctx->avg_packet_rate + new_packet_rate + 7) / 8;
73   } else {
74     ctx->avg_packet_rate = (ctx->avg_packet_rate + new_packet_rate + 1) / 2;
75   }
76 
77   ctx->last_seqnum = seqnum;
78   ctx->last_ts = new_ts;
79 
80   return ctx->avg_packet_rate;
81 }
82 
83 guint32
gst_rtp_packet_rate_ctx_get(RTPPacketRateCtx * ctx)84 gst_rtp_packet_rate_ctx_get (RTPPacketRateCtx * ctx)
85 {
86   return ctx->avg_packet_rate;
87 }
88 
89 guint32
gst_rtp_packet_rate_ctx_get_max_dropout(RTPPacketRateCtx * ctx,gint32 time_ms)90 gst_rtp_packet_rate_ctx_get_max_dropout (RTPPacketRateCtx * ctx, gint32 time_ms)
91 {
92   if (time_ms <= 0 || !ctx->probed) {
93     return RTP_DEF_DROPOUT;
94   }
95 
96   return MAX (RTP_MIN_DROPOUT, ctx->avg_packet_rate * time_ms / 1000);
97 }
98 
99 guint32
gst_rtp_packet_rate_ctx_get_max_misorder(RTPPacketRateCtx * ctx,gint32 time_ms)100 gst_rtp_packet_rate_ctx_get_max_misorder (RTPPacketRateCtx * ctx,
101     gint32 time_ms)
102 {
103   if (time_ms <= 0 || !ctx->probed) {
104     return RTP_DEF_MISORDER;
105   }
106 
107   return MAX (RTP_MIN_MISORDER, ctx->avg_packet_rate * time_ms / 1000);
108 }
109 
110 /**
111  * rtp_stats_init_defaults:
112  * @stats: an #RTPSessionStats struct
113  *
114  * Initialize @stats with its default values.
115  */
116 void
rtp_stats_init_defaults(RTPSessionStats * stats)117 rtp_stats_init_defaults (RTPSessionStats * stats)
118 {
119   rtp_stats_set_bandwidths (stats, -1, -1, -1, -1);
120   stats->min_interval = RTP_STATS_MIN_INTERVAL;
121   stats->bye_timeout = RTP_STATS_BYE_TIMEOUT;
122   stats->nacks_dropped = 0;
123   stats->nacks_sent = 0;
124   stats->nacks_received = 0;
125 }
126 
127 /**
128  * rtp_stats_set_bandwidths:
129  * @stats: an #RTPSessionStats struct
130  * @rtp_bw: RTP bandwidth
131  * @rtcp_bw: RTCP bandwidth
132  * @rs: sender RTCP bandwidth
133  * @rr: receiver RTCP bandwidth
134  *
135  * Configure the bandwidth parameters in the stats. When an input variable is
136  * set to -1, it will be calculated from the other input variables and from the
137  * defaults.
138  */
139 void
rtp_stats_set_bandwidths(RTPSessionStats * stats,guint rtp_bw,gdouble rtcp_bw,guint rs,guint rr)140 rtp_stats_set_bandwidths (RTPSessionStats * stats, guint rtp_bw,
141     gdouble rtcp_bw, guint rs, guint rr)
142 {
143   GST_DEBUG ("recalc bandwidths: RTP %u, RTCP %f, RS %u, RR %u", rtp_bw,
144       rtcp_bw, rs, rr);
145 
146   /* when given, sender and receive bandwidth add up to the total
147    * rtcp bandwidth */
148   if (rs != -1 && rr != -1)
149     rtcp_bw = rs + rr;
150 
151   /* If rtcp_bw is between 0 and 1, it is a fraction of rtp_bw */
152   if (rtcp_bw > 0.0 && rtcp_bw < 1.0) {
153     if (rtp_bw > 0.0)
154       rtcp_bw = rtp_bw * rtcp_bw;
155     else
156       rtcp_bw = -1.0;
157   }
158 
159   /* RTCP is 5% of the RTP bandwidth */
160   if (rtp_bw == -1 && rtcp_bw > 1.0)
161     rtp_bw = rtcp_bw * 20;
162   else if (rtp_bw != -1 && rtcp_bw < 0.0)
163     rtcp_bw = rtp_bw / 20;
164   else if (rtp_bw == -1 && rtcp_bw < 0.0) {
165     /* nothing given, take defaults */
166     rtp_bw = RTP_STATS_BANDWIDTH;
167     rtcp_bw = rtp_bw * RTP_STATS_RTCP_FRACTION;
168   }
169 
170   stats->bandwidth = rtp_bw;
171   stats->rtcp_bandwidth = rtcp_bw;
172 
173   /* now figure out the fractions */
174   if (rs == -1) {
175     /* rs unknown */
176     if (rr == -1) {
177       /* both not given, use defaults */
178       rs = stats->rtcp_bandwidth * RTP_STATS_SENDER_FRACTION;
179       rr = stats->rtcp_bandwidth * RTP_STATS_RECEIVER_FRACTION;
180     } else {
181       /* rr known, calculate rs */
182       if (stats->rtcp_bandwidth > rr)
183         rs = stats->rtcp_bandwidth - rr;
184       else
185         rs = 0;
186     }
187   } else if (rr == -1) {
188     /* rs known, calculate rr */
189     if (stats->rtcp_bandwidth > rs)
190       rr = stats->rtcp_bandwidth - rs;
191     else
192       rr = 0;
193   }
194 
195   if (stats->rtcp_bandwidth > 0) {
196     stats->sender_fraction = ((gdouble) rs) / ((gdouble) stats->rtcp_bandwidth);
197     stats->receiver_fraction = 1.0 - stats->sender_fraction;
198   } else {
199     /* no RTCP bandwidth, set dummy values */
200     stats->sender_fraction = 0.0;
201     stats->receiver_fraction = 0.0;
202   }
203   GST_DEBUG ("bandwidths: RTP %u, RTCP %u, RS %f, RR %f", stats->bandwidth,
204       stats->rtcp_bandwidth, stats->sender_fraction, stats->receiver_fraction);
205 }
206 
207 /**
208  * rtp_stats_calculate_rtcp_interval:
209  * @stats: an #RTPSessionStats struct
210  * @sender: if we are a sender
211  * @profile: RTP profile of this session
212  * @ptp: if this session is a point-to-point session
213  * @first: if this is the first time
214  *
215  * Calculate the RTCP interval. The result of this function is the amount of
216  * time to wait (in nanoseconds) before sending a new RTCP message.
217  *
218  * Returns: the RTCP interval.
219  */
220 GstClockTime
rtp_stats_calculate_rtcp_interval(RTPSessionStats * stats,gboolean we_send,GstRTPProfile profile,gboolean ptp,gboolean first)221 rtp_stats_calculate_rtcp_interval (RTPSessionStats * stats, gboolean we_send,
222     GstRTPProfile profile, gboolean ptp, gboolean first)
223 {
224   gdouble members, senders, n;
225   gdouble avg_rtcp_size, rtcp_bw;
226   gdouble interval;
227   gdouble rtcp_min_time;
228 
229   if (profile == GST_RTP_PROFILE_AVPF || profile == GST_RTP_PROFILE_SAVPF) {
230     /* RFC 4585 3.4d), 3.5.1 */
231 
232     if (first && !ptp)
233       rtcp_min_time = 1.0;
234     else
235       rtcp_min_time = 0.0;
236   } else {
237     /* Very first call at application start-up uses half the min
238      * delay for quicker notification while still allowing some time
239      * before reporting for randomization and to learn about other
240      * sources so the report interval will converge to the correct
241      * interval more quickly.
242      */
243     rtcp_min_time = stats->min_interval;
244     if (first)
245       rtcp_min_time /= 2.0;
246   }
247 
248   /* Dedicate a fraction of the RTCP bandwidth to senders unless
249    * the number of senders is large enough that their share is
250    * more than that fraction.
251    */
252   n = members = stats->active_sources;
253   senders = (gdouble) stats->sender_sources;
254   rtcp_bw = stats->rtcp_bandwidth;
255 
256   if (senders <= members * stats->sender_fraction) {
257     if (we_send) {
258       rtcp_bw *= stats->sender_fraction;
259       n = senders;
260     } else {
261       rtcp_bw *= stats->receiver_fraction;
262       n -= senders;
263     }
264   }
265 
266   /* no bandwidth for RTCP, return NONE to signal that we don't want to send
267    * RTCP packets */
268   if (rtcp_bw <= 0.0001)
269     return GST_CLOCK_TIME_NONE;
270 
271   avg_rtcp_size = 8.0 * stats->avg_rtcp_packet_size;
272   /*
273    * The effective number of sites times the average packet size is
274    * the total number of octets sent when each site sends a report.
275    * Dividing this by the effective bandwidth gives the time
276    * interval over which those packets must be sent in order to
277    * meet the bandwidth target, with a minimum enforced.  In that
278    * time interval we send one report so this time is also our
279    * average time between reports.
280    */
281   GST_DEBUG ("avg size %f, n %f, rtcp_bw %f", avg_rtcp_size, n, rtcp_bw);
282   interval = avg_rtcp_size * n / rtcp_bw;
283   if (interval < rtcp_min_time)
284     interval = rtcp_min_time;
285 
286   return interval * GST_SECOND;
287 }
288 
289 /**
290  * rtp_stats_add_rtcp_jitter:
291  * @stats: an #RTPSessionStats struct
292  * @interval: an RTCP interval
293  *
294  * Apply a random jitter to the @interval. @interval is typically obtained with
295  * rtp_stats_calculate_rtcp_interval().
296  *
297  * Returns: the new RTCP interval.
298  */
299 GstClockTime
rtp_stats_add_rtcp_jitter(RTPSessionStats * stats,GstClockTime interval)300 rtp_stats_add_rtcp_jitter (RTPSessionStats * stats, GstClockTime interval)
301 {
302   gdouble temp;
303 
304   /* see RFC 3550 p 30
305    * To compensate for "unconditional reconsideration" converging to a
306    * value below the intended average.
307    */
308 #define COMPENSATION  (2.71828 - 1.5);
309 
310   temp = (interval * g_random_double_range (0.5, 1.5)) / COMPENSATION;
311 
312   return (GstClockTime) temp;
313 }
314 
315 
316 /**
317  * rtp_stats_calculate_bye_interval:
318  * @stats: an #RTPSessionStats struct
319  *
320  * Calculate the BYE interval. The result of this function is the amount of
321  * time to wait (in nanoseconds) before sending a BYE message.
322  *
323  * Returns: the BYE interval.
324  */
325 GstClockTime
rtp_stats_calculate_bye_interval(RTPSessionStats * stats)326 rtp_stats_calculate_bye_interval (RTPSessionStats * stats)
327 {
328   gdouble members;
329   gdouble avg_rtcp_size, rtcp_bw;
330   gdouble interval;
331   gdouble rtcp_min_time;
332 
333   /* no interval when we have less than 50 members */
334   if (stats->active_sources < 50)
335     return 0;
336 
337   rtcp_min_time = (stats->min_interval) / 2.0;
338 
339   /* Dedicate a fraction of the RTCP bandwidth to senders unless
340    * the number of senders is large enough that their share is
341    * more than that fraction.
342    */
343   members = stats->bye_members;
344   rtcp_bw = stats->rtcp_bandwidth * stats->receiver_fraction;
345 
346   /* no bandwidth for RTCP, return NONE to signal that we don't want to send
347    * RTCP packets */
348   if (rtcp_bw <= 0.0001)
349     return GST_CLOCK_TIME_NONE;
350 
351   avg_rtcp_size = 8.0 * stats->avg_rtcp_packet_size;
352   /*
353    * The effective number of sites times the average packet size is
354    * the total number of octets sent when each site sends a report.
355    * Dividing this by the effective bandwidth gives the time
356    * interval over which those packets must be sent in order to
357    * meet the bandwidth target, with a minimum enforced.  In that
358    * time interval we send one report so this time is also our
359    * average time between reports.
360    */
361   interval = avg_rtcp_size * members / rtcp_bw;
362   if (interval < rtcp_min_time)
363     interval = rtcp_min_time;
364 
365   return interval * GST_SECOND;
366 }
367 
368 /**
369  * rtp_stats_get_packets_lost:
370  * @stats: an #RTPSourceStats struct
371  *
372  * Calculate the total number of RTP packets lost since beginning of
373  * reception. Packets that arrive late are not considered lost, and
374  * duplicates are not taken into account. Hence, the loss may be negative
375  * if there are duplicates.
376  *
377  * Returns: total RTP packets lost.
378  */
379 gint64
rtp_stats_get_packets_lost(const RTPSourceStats * stats)380 rtp_stats_get_packets_lost (const RTPSourceStats * stats)
381 {
382   gint64 lost;
383   guint64 extended_max, expected;
384 
385   extended_max = stats->cycles + stats->max_seq;
386   expected = extended_max - stats->base_seq + 1;
387   lost = expected - stats->packets_received;
388 
389   return lost;
390 }
391 
392 void
rtp_stats_set_min_interval(RTPSessionStats * stats,gdouble min_interval)393 rtp_stats_set_min_interval (RTPSessionStats * stats, gdouble min_interval)
394 {
395   stats->min_interval = min_interval;
396 }
397 
398 gboolean
__g_socket_address_equal(GSocketAddress * a,GSocketAddress * b)399 __g_socket_address_equal (GSocketAddress * a, GSocketAddress * b)
400 {
401   GInetSocketAddress *ia, *ib;
402   GInetAddress *iaa, *iab;
403 
404   ia = G_INET_SOCKET_ADDRESS (a);
405   ib = G_INET_SOCKET_ADDRESS (b);
406 
407   if (g_inet_socket_address_get_port (ia) !=
408       g_inet_socket_address_get_port (ib))
409     return FALSE;
410 
411   iaa = g_inet_socket_address_get_address (ia);
412   iab = g_inet_socket_address_get_address (ib);
413 
414   return g_inet_address_equal (iaa, iab);
415 }
416 
417 gchar *
__g_socket_address_to_string(GSocketAddress * addr)418 __g_socket_address_to_string (GSocketAddress * addr)
419 {
420   GInetSocketAddress *ia;
421   gchar *ret, *tmp;
422 
423   ia = G_INET_SOCKET_ADDRESS (addr);
424 
425   tmp = g_inet_address_to_string (g_inet_socket_address_get_address (ia));
426   ret = g_strdup_printf ("%s:%u", tmp, g_inet_socket_address_get_port (ia));
427   g_free (tmp);
428 
429   return ret;
430 }
431