1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2 /*
3 * Copyright (c) 2015 Natale Patriciello <natale.patriciello@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation;
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
19 #include "tcp-congestion-ops.h"
20 #include "ns3/log.h"
21
22 namespace ns3 {
23
24 NS_LOG_COMPONENT_DEFINE ("TcpCongestionOps");
25
26 NS_OBJECT_ENSURE_REGISTERED (TcpCongestionOps);
27
28 TypeId
GetTypeId(void)29 TcpCongestionOps::GetTypeId (void)
30 {
31 static TypeId tid = TypeId ("ns3::TcpCongestionOps")
32 .SetParent<Object> ()
33 .SetGroupName ("Internet")
34 ;
35 return tid;
36 }
37
TcpCongestionOps()38 TcpCongestionOps::TcpCongestionOps () : Object ()
39 {
40 }
41
TcpCongestionOps(const TcpCongestionOps & other)42 TcpCongestionOps::TcpCongestionOps (const TcpCongestionOps &other) : Object (other)
43 {
44 }
45
~TcpCongestionOps()46 TcpCongestionOps::~TcpCongestionOps ()
47 {
48 }
49
50 void
IncreaseWindow(Ptr<TcpSocketState> tcb,uint32_t segmentsAcked)51 TcpCongestionOps::IncreaseWindow (Ptr<TcpSocketState> tcb, uint32_t segmentsAcked)
52 {
53 NS_LOG_FUNCTION (this << tcb << segmentsAcked);
54 }
55
56 void
PktsAcked(Ptr<TcpSocketState> tcb,uint32_t segmentsAcked,const Time & rtt)57 TcpCongestionOps::PktsAcked (Ptr<TcpSocketState> tcb, uint32_t segmentsAcked,
58 const Time& rtt)
59 {
60 NS_LOG_FUNCTION (this << tcb << segmentsAcked << rtt);
61 }
62
63 void
CongestionStateSet(Ptr<TcpSocketState> tcb,const TcpSocketState::TcpCongState_t newState)64 TcpCongestionOps::CongestionStateSet (Ptr<TcpSocketState> tcb,
65 const TcpSocketState::TcpCongState_t newState)
66 {
67 NS_LOG_FUNCTION (this << tcb << newState);
68 }
69
70 void
CwndEvent(Ptr<TcpSocketState> tcb,const TcpSocketState::TcpCAEvent_t event)71 TcpCongestionOps::CwndEvent (Ptr<TcpSocketState> tcb,
72 const TcpSocketState::TcpCAEvent_t event)
73 {
74 NS_LOG_FUNCTION (this << tcb << event);
75 }
76
77 bool
HasCongControl() const78 TcpCongestionOps::HasCongControl () const
79 {
80 return false;
81 }
82
83 void
CongControl(Ptr<TcpSocketState> tcb,const TcpRateOps::TcpRateConnection & rc,const TcpRateOps::TcpRateSample & rs)84 TcpCongestionOps::CongControl (Ptr<TcpSocketState> tcb,
85 const TcpRateOps::TcpRateConnection &rc,
86 const TcpRateOps::TcpRateSample &rs)
87 {
88 NS_LOG_FUNCTION (this << tcb);
89 NS_UNUSED (rc);
90 NS_UNUSED (rs);
91 }
92
93 // RENO
94
95 NS_OBJECT_ENSURE_REGISTERED (TcpNewReno);
96
97 TypeId
GetTypeId(void)98 TcpNewReno::GetTypeId (void)
99 {
100 static TypeId tid = TypeId ("ns3::TcpNewReno")
101 .SetParent<TcpCongestionOps> ()
102 .SetGroupName ("Internet")
103 .AddConstructor<TcpNewReno> ()
104 ;
105 return tid;
106 }
107
TcpNewReno(void)108 TcpNewReno::TcpNewReno (void) : TcpCongestionOps ()
109 {
110 NS_LOG_FUNCTION (this);
111 }
112
TcpNewReno(const TcpNewReno & sock)113 TcpNewReno::TcpNewReno (const TcpNewReno& sock)
114 : TcpCongestionOps (sock)
115 {
116 NS_LOG_FUNCTION (this);
117 }
118
~TcpNewReno(void)119 TcpNewReno::~TcpNewReno (void)
120 {
121 }
122
123 /**
124 * \brief Tcp NewReno slow start algorithm
125 *
126 * Defined in RFC 5681 as
127 *
128 * > During slow start, a TCP increments cwnd by at most SMSS bytes for
129 * > each ACK received that cumulatively acknowledges new data. Slow
130 * > start ends when cwnd exceeds ssthresh (or, optionally, when it
131 * > reaches it, as noted above) or when congestion is observed. While
132 * > traditionally TCP implementations have increased cwnd by precisely
133 * > SMSS bytes upon receipt of an ACK covering new data, we RECOMMEND
134 * > that TCP implementations increase cwnd, per:
135 * >
136 * > cwnd += min (N, SMSS) (2)
137 * >
138 * > where N is the number of previously unacknowledged bytes acknowledged
139 * > in the incoming ACK.
140 *
141 * The ns-3 implementation respect the RFC definition. Linux does something
142 * different:
143 * \verbatim
144 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
145 {
146 u32 cwnd = tp->snd_cwnd + acked;
147
148 if (cwnd > tp->snd_ssthresh)
149 cwnd = tp->snd_ssthresh + 1;
150 acked -= cwnd - tp->snd_cwnd;
151 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
152
153 return acked;
154 }
155 \endverbatim
156 *
157 * As stated, we want to avoid the case when a cumulative ACK increases cWnd more
158 * than a segment size, but we keep count of how many segments we have ignored,
159 * and return them.
160 *
161 * \param tcb internal congestion state
162 * \param segmentsAcked count of segments acked
163 * \return the number of segments not considered for increasing the cWnd
164 */
165 uint32_t
SlowStart(Ptr<TcpSocketState> tcb,uint32_t segmentsAcked)166 TcpNewReno::SlowStart (Ptr<TcpSocketState> tcb, uint32_t segmentsAcked)
167 {
168 NS_LOG_FUNCTION (this << tcb << segmentsAcked);
169
170 if (segmentsAcked >= 1)
171 {
172 tcb->m_cWnd += tcb->m_segmentSize;
173 NS_LOG_INFO ("In SlowStart, updated to cwnd " << tcb->m_cWnd << " ssthresh " << tcb->m_ssThresh);
174 return segmentsAcked - 1;
175 }
176
177 return 0;
178 }
179
180 /**
181 * \brief NewReno congestion avoidance
182 *
183 * During congestion avoidance, cwnd is incremented by roughly 1 full-sized
184 * segment per round-trip time (RTT).
185 *
186 * \param tcb internal congestion state
187 * \param segmentsAcked count of segments acked
188 */
189 void
CongestionAvoidance(Ptr<TcpSocketState> tcb,uint32_t segmentsAcked)190 TcpNewReno::CongestionAvoidance (Ptr<TcpSocketState> tcb, uint32_t segmentsAcked)
191 {
192 NS_LOG_FUNCTION (this << tcb << segmentsAcked);
193
194 if (segmentsAcked > 0)
195 {
196 double adder = static_cast<double> (tcb->m_segmentSize * tcb->m_segmentSize) / tcb->m_cWnd.Get ();
197 adder = std::max (1.0, adder);
198 tcb->m_cWnd += static_cast<uint32_t> (adder);
199 NS_LOG_INFO ("In CongAvoid, updated to cwnd " << tcb->m_cWnd <<
200 " ssthresh " << tcb->m_ssThresh);
201 }
202 }
203
204 /**
205 * \brief Try to increase the cWnd following the NewReno specification
206 *
207 * \see SlowStart
208 * \see CongestionAvoidance
209 *
210 * \param tcb internal congestion state
211 * \param segmentsAcked count of segments acked
212 */
213 void
IncreaseWindow(Ptr<TcpSocketState> tcb,uint32_t segmentsAcked)214 TcpNewReno::IncreaseWindow (Ptr<TcpSocketState> tcb, uint32_t segmentsAcked)
215 {
216 NS_LOG_FUNCTION (this << tcb << segmentsAcked);
217
218 if (tcb->m_cWnd < tcb->m_ssThresh)
219 {
220 segmentsAcked = SlowStart (tcb, segmentsAcked);
221 }
222
223 if (tcb->m_cWnd >= tcb->m_ssThresh)
224 {
225 CongestionAvoidance (tcb, segmentsAcked);
226 }
227
228 /* At this point, we could have segmentsAcked != 0. This because RFC says
229 * that in slow start, we should increase cWnd by min (N, SMSS); if in
230 * slow start we receive a cumulative ACK, it counts only for 1 SMSS of
231 * increase, wasting the others.
232 *
233 * // Incorrect assert, I am sorry
234 * NS_ASSERT (segmentsAcked == 0);
235 */
236 }
237
238 std::string
GetName() const239 TcpNewReno::GetName () const
240 {
241 return "TcpNewReno";
242 }
243
244 uint32_t
GetSsThresh(Ptr<const TcpSocketState> state,uint32_t bytesInFlight)245 TcpNewReno::GetSsThresh (Ptr<const TcpSocketState> state,
246 uint32_t bytesInFlight)
247 {
248 NS_LOG_FUNCTION (this << state << bytesInFlight);
249
250 return std::max (2 * state->m_segmentSize, bytesInFlight / 2);
251 }
252
253 Ptr<TcpCongestionOps>
Fork()254 TcpNewReno::Fork ()
255 {
256 return CopyObject<TcpNewReno> (this);
257 }
258
259 } // namespace ns3
260
261