1 /*
2  * Copyright (c) 2019 Fastly, Janardhan Iyengar
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to
6  * deal in the Software without restriction, including without limitation the
7  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8  * sell copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20  * IN THE SOFTWARE.
21  */
22 #include "quicly/cc.h"
23 #include "quicly.h"
24 
25 /* TODO: Avoid increase if sender was application limited. */
reno_on_acked(quicly_cc_t * cc,const quicly_loss_t * loss,uint32_t bytes,uint64_t largest_acked,uint32_t inflight,uint64_t next_pn,int64_t now,uint32_t max_udp_payload_size)26 static void reno_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t largest_acked, uint32_t inflight,
27                           uint64_t next_pn, int64_t now, uint32_t max_udp_payload_size)
28 {
29     assert(inflight >= bytes);
30     /* Do not increase congestion window while in recovery. */
31     if (largest_acked < cc->recovery_end)
32         return;
33 
34     /* Slow start. */
35     if (cc->cwnd < cc->ssthresh) {
36         cc->cwnd += bytes;
37         if (cc->cwnd_maximum < cc->cwnd)
38             cc->cwnd_maximum = cc->cwnd;
39         return;
40     }
41     /* Congestion avoidance. */
42     cc->state.reno.stash += bytes;
43     if (cc->state.reno.stash < cc->cwnd)
44         return;
45     /* Increase congestion window by 1 MSS per congestion window acked. */
46     uint32_t count = cc->state.reno.stash / cc->cwnd;
47     cc->state.reno.stash -= count * cc->cwnd;
48     cc->cwnd += count * max_udp_payload_size;
49     if (cc->cwnd_maximum < cc->cwnd)
50         cc->cwnd_maximum = cc->cwnd;
51 }
52 
quicly_cc_reno_on_lost(quicly_cc_t * cc,const quicly_loss_t * loss,uint32_t bytes,uint64_t lost_pn,uint64_t next_pn,int64_t now,uint32_t max_udp_payload_size)53 void quicly_cc_reno_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn,
54                             int64_t now, uint32_t max_udp_payload_size)
55 {
56     /* Nothing to do if loss is in recovery window. */
57     if (lost_pn < cc->recovery_end)
58         return;
59     cc->recovery_end = next_pn;
60 
61     ++cc->num_loss_episodes;
62     if (cc->cwnd_exiting_slow_start == 0)
63         cc->cwnd_exiting_slow_start = cc->cwnd;
64 
65     /* Reduce congestion window. */
66     cc->cwnd *= QUICLY_RENO_BETA;
67     if (cc->cwnd < QUICLY_MIN_CWND * max_udp_payload_size)
68         cc->cwnd = QUICLY_MIN_CWND * max_udp_payload_size;
69     cc->ssthresh = cc->cwnd;
70 
71     if (cc->cwnd_minimum > cc->cwnd)
72         cc->cwnd_minimum = cc->cwnd;
73 }
74 
quicly_cc_reno_on_persistent_congestion(quicly_cc_t * cc,const quicly_loss_t * loss,int64_t now)75 void quicly_cc_reno_on_persistent_congestion(quicly_cc_t *cc, const quicly_loss_t *loss, int64_t now)
76 {
77     /* TODO */
78 }
79 
quicly_cc_reno_on_sent(quicly_cc_t * cc,const quicly_loss_t * loss,uint32_t bytes,int64_t now)80 void quicly_cc_reno_on_sent(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, int64_t now)
81 {
82     /* Unused */
83 }
84 
reno_reset(quicly_cc_t * cc,uint32_t initcwnd)85 static void reno_reset(quicly_cc_t *cc, uint32_t initcwnd)
86 {
87     memset(cc, 0, sizeof(quicly_cc_t));
88     cc->type = &quicly_cc_type_reno;
89     cc->cwnd = cc->cwnd_initial = cc->cwnd_maximum = initcwnd;
90     cc->ssthresh = cc->cwnd_minimum = UINT32_MAX;
91 }
92 
reno_on_switch(quicly_cc_t * cc)93 static int reno_on_switch(quicly_cc_t *cc)
94 {
95     if (cc->type == &quicly_cc_type_reno) {
96         return 1; /* nothing to do */
97     } else if (cc->type == &quicly_cc_type_pico) {
98         cc->type = &quicly_cc_type_reno;
99         cc->state.reno.stash = cc->state.pico.stash;
100         return 1;
101     } else if (cc->type == &quicly_cc_type_cubic) {
102         /* When in slow start, state can be reused as-is; otherwise, restart. */
103         if (cc->cwnd_exiting_slow_start == 0) {
104             cc->type = &quicly_cc_type_reno;
105         } else {
106             reno_reset(cc, cc->cwnd_initial);
107         }
108         return 1;
109     }
110 
111     return 0;
112 }
113 
reno_init(quicly_init_cc_t * self,quicly_cc_t * cc,uint32_t initcwnd,int64_t now)114 static void reno_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwnd, int64_t now)
115 {
116     reno_reset(cc, initcwnd);
117 }
118 
119 quicly_cc_type_t quicly_cc_type_reno = {"reno",
120                                         &quicly_cc_reno_init,
121                                         reno_on_acked,
122                                         quicly_cc_reno_on_lost,
123                                         quicly_cc_reno_on_persistent_congestion,
124                                         quicly_cc_reno_on_sent,
125                                         reno_on_switch};
126 quicly_init_cc_t quicly_cc_reno_init = {reno_init};
127 
128 quicly_cc_type_t *quicly_cc_all_types[] = {&quicly_cc_type_reno, &quicly_cc_type_cubic, &quicly_cc_type_pico, NULL};
129 
quicly_cc_calc_initial_cwnd(uint32_t max_packets,uint16_t max_udp_payload_size)130 uint32_t quicly_cc_calc_initial_cwnd(uint32_t max_packets, uint16_t max_udp_payload_size)
131 {
132     static const uint32_t mtu_max = 1472;
133 
134     /* apply filters to the two arguments */
135     if (max_packets < QUICLY_MIN_CWND)
136         max_packets = QUICLY_MIN_CWND;
137     if (max_udp_payload_size > mtu_max)
138         max_udp_payload_size = mtu_max;
139 
140     return max_packets * max_udp_payload_size;
141 }
142