1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * TCP CUBIC: Binary Increase Congestion control for TCP v2.3
4 * Home page:
5 * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
6 * This is from the implementation of CUBIC TCP in
7 * Sangtae Ha, Injong Rhee and Lisong Xu,
8 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
9 * in ACM SIGOPS Operating System Review, July 2008.
10 * Available from:
11 * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
12 *
13 * CUBIC integrates a new slow start algorithm, called HyStart.
14 * The details of HyStart are presented in
15 * Sangtae Ha and Injong Rhee,
16 * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
17 * Available from:
18 * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
19 *
20 * All testing results are available from:
21 * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
22 *
23 * Unless CUBIC is enabled and congestion window is large
24 * this behaves the same as the original Reno.
25 */
26
27 #include <linux/mm.h>
28 #include <linux/btf.h>
29 #include <linux/btf_ids.h>
30 #include <linux/module.h>
31 #include <linux/math64.h>
32 #include <net/tcp.h>
33
34 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
35 * max_cwnd = snd_cwnd * beta
36 */
37 #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
38
39 /* Two methods of hybrid slow start */
40 #define HYSTART_ACK_TRAIN 0x1
41 #define HYSTART_DELAY 0x2
42
43 /* Number of delay samples for detecting the increase of delay */
44 #define HYSTART_MIN_SAMPLES 8
45 #define HYSTART_DELAY_MIN (4000U) /* 4 ms */
46 #define HYSTART_DELAY_MAX (16000U) /* 16 ms */
47 #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
48
49 static int fast_convergence __read_mostly = 1;
50 static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
51 static int initial_ssthresh __read_mostly;
52 static int bic_scale __read_mostly = 41;
53 static int tcp_friendliness __read_mostly = 1;
54
55 static int hystart __read_mostly = 1;
56 static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
57 static int hystart_low_window __read_mostly = 16;
58 static int hystart_ack_delta_us __read_mostly = 2000;
59
60 static u32 cube_rtt_scale __read_mostly;
61 static u32 beta_scale __read_mostly;
62 static u64 cube_factor __read_mostly;
63
64 /* Note parameters that are used for precomputing scale factors are read-only */
65 module_param(fast_convergence, int, 0644);
66 MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
67 module_param(beta, int, 0644);
68 MODULE_PARM_DESC(beta, "beta for multiplicative increase");
69 module_param(initial_ssthresh, int, 0644);
70 MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
71 module_param(bic_scale, int, 0444);
72 MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
73 module_param(tcp_friendliness, int, 0644);
74 MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
75 module_param(hystart, int, 0644);
76 MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
77 module_param(hystart_detect, int, 0644);
78 MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms"
79 " 1: packet-train 2: delay 3: both packet-train and delay");
80 module_param(hystart_low_window, int, 0644);
81 MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
82 module_param(hystart_ack_delta_us, int, 0644);
83 MODULE_PARM_DESC(hystart_ack_delta_us, "spacing between ack's indicating train (usecs)");
84
85 /* BIC TCP Parameters */
86 struct bictcp {
87 u32 cnt; /* increase cwnd by 1 after ACKs */
88 u32 last_max_cwnd; /* last maximum snd_cwnd */
89 u32 last_cwnd; /* the last snd_cwnd */
90 u32 last_time; /* time when updated last_cwnd */
91 u32 bic_origin_point;/* origin point of bic function */
92 u32 bic_K; /* time to origin point
93 from the beginning of the current epoch */
94 u32 delay_min; /* min delay (usec) */
95 u32 epoch_start; /* beginning of an epoch */
96 u32 ack_cnt; /* number of acks */
97 u32 tcp_cwnd; /* estimated tcp cwnd */
98 u16 unused;
99 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */
102 u32 end_seq; /* end_seq of the round */
103 u32 last_ack; /* last time when the ACK spacing is close */
104 u32 curr_rtt; /* the minimum rtt of current round */
105 };
106
bictcp_reset(struct bictcp * ca)107 static inline void bictcp_reset(struct bictcp *ca)
108 {
109 memset(ca, 0, offsetof(struct bictcp, unused));
110 ca->found = 0;
111 }
112
bictcp_clock_us(const struct sock * sk)113 static inline u32 bictcp_clock_us(const struct sock *sk)
114 {
115 return tcp_sk(sk)->tcp_mstamp;
116 }
117
bictcp_hystart_reset(struct sock * sk)118 static inline void bictcp_hystart_reset(struct sock *sk)
119 {
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct bictcp *ca = inet_csk_ca(sk);
122
123 ca->round_start = ca->last_ack = bictcp_clock_us(sk);
124 ca->end_seq = tp->snd_nxt;
125 ca->curr_rtt = ~0U;
126 ca->sample_cnt = 0;
127 }
128
cubictcp_init(struct sock * sk)129 __bpf_kfunc static void cubictcp_init(struct sock *sk)
130 {
131 struct bictcp *ca = inet_csk_ca(sk);
132
133 bictcp_reset(ca);
134
135 if (hystart)
136 bictcp_hystart_reset(sk);
137
138 if (!hystart && initial_ssthresh)
139 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
140 }
141
cubictcp_cwnd_event(struct sock * sk,enum tcp_ca_event event)142 __bpf_kfunc static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
143 {
144 if (event == CA_EVENT_TX_START) {
145 struct bictcp *ca = inet_csk_ca(sk);
146 u32 now = tcp_jiffies32;
147 s32 delta;
148
149 delta = now - tcp_sk(sk)->lsndtime;
150
151 /* We were application limited (idle) for a while.
152 * Shift epoch_start to keep cwnd growth to cubic curve.
153 */
154 if (ca->epoch_start && delta > 0) {
155 ca->epoch_start += delta;
156 if (after(ca->epoch_start, now))
157 ca->epoch_start = now;
158 }
159 return;
160 }
161 }
162
163 /* calculate the cubic root of x using a table lookup followed by one
164 * Newton-Raphson iteration.
165 * Avg err ~= 0.195%
166 */
cubic_root(u64 a)167 static u32 cubic_root(u64 a)
168 {
169 u32 x, b, shift;
170 /*
171 * cbrt(x) MSB values for x MSB values in [0..63].
172 * Precomputed then refined by hand - Willy Tarreau
173 *
174 * For x in [0..63],
175 * v = cbrt(x << 18) - 1
176 * cbrt(x) = (v[x] + 10) >> 6
177 */
178 static const u8 v[] = {
179 /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
180 /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
181 /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
182 /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
183 /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
184 /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
185 /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
186 /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
187 };
188
189 b = fls64(a);
190 if (b < 7) {
191 /* a in [0..63] */
192 return ((u32)v[(u32)a] + 35) >> 6;
193 }
194
195 b = ((b * 84) >> 8) - 1;
196 shift = (a >> (b * 3));
197
198 x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
199
200 /*
201 * Newton-Raphson iteration
202 * 2
203 * x = ( 2 * x + a / x ) / 3
204 * k+1 k k
205 */
206 x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
207 x = ((x * 341) >> 10);
208 return x;
209 }
210
211 /*
212 * Compute congestion window to use.
213 */
bictcp_update(struct bictcp * ca,u32 cwnd,u32 acked)214 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
215 {
216 u32 delta, bic_target, max_cnt;
217 u64 offs, t;
218
219 ca->ack_cnt += acked; /* count the number of ACKed packets */
220
221 if (ca->last_cwnd == cwnd &&
222 (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
223 return;
224
225 /* The CUBIC function can update ca->cnt at most once per jiffy.
226 * On all cwnd reduction events, ca->epoch_start is set to 0,
227 * which will force a recalculation of ca->cnt.
228 */
229 if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
230 goto tcp_friendliness;
231
232 ca->last_cwnd = cwnd;
233 ca->last_time = tcp_jiffies32;
234
235 if (ca->epoch_start == 0) {
236 ca->epoch_start = tcp_jiffies32; /* record beginning */
237 ca->ack_cnt = acked; /* start counting */
238 ca->tcp_cwnd = cwnd; /* syn with cubic */
239
240 if (ca->last_max_cwnd <= cwnd) {
241 ca->bic_K = 0;
242 ca->bic_origin_point = cwnd;
243 } else {
244 /* Compute new K based on
245 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
246 */
247 ca->bic_K = cubic_root(cube_factor
248 * (ca->last_max_cwnd - cwnd));
249 ca->bic_origin_point = ca->last_max_cwnd;
250 }
251 }
252
253 /* cubic function - calc*/
254 /* calculate c * time^3 / rtt,
255 * while considering overflow in calculation of time^3
256 * (so time^3 is done by using 64 bit)
257 * and without the support of division of 64bit numbers
258 * (so all divisions are done by using 32 bit)
259 * also NOTE the unit of those veriables
260 * time = (t - K) / 2^bictcp_HZ
261 * c = bic_scale >> 10
262 * rtt = (srtt >> 3) / HZ
263 * !!! The following code does not have overflow problems,
264 * if the cwnd < 1 million packets !!!
265 */
266
267 t = (s32)(tcp_jiffies32 - ca->epoch_start);
268 t += usecs_to_jiffies(ca->delay_min);
269 /* change the unit from HZ to bictcp_HZ */
270 t <<= BICTCP_HZ;
271 do_div(t, HZ);
272
273 if (t < ca->bic_K) /* t - K */
274 offs = ca->bic_K - t;
275 else
276 offs = t - ca->bic_K;
277
278 /* c/rtt * (t-K)^3 */
279 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
280 if (t < ca->bic_K) /* below origin*/
281 bic_target = ca->bic_origin_point - delta;
282 else /* above origin*/
283 bic_target = ca->bic_origin_point + delta;
284
285 /* cubic function - calc bictcp_cnt*/
286 if (bic_target > cwnd) {
287 ca->cnt = cwnd / (bic_target - cwnd);
288 } else {
289 ca->cnt = 100 * cwnd; /* very small increment*/
290 }
291
292 /*
293 * The initial growth of cubic function may be too conservative
294 * when the available bandwidth is still unknown.
295 */
296 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
297 ca->cnt = 20; /* increase cwnd 5% per RTT */
298
299 tcp_friendliness:
300 /* TCP Friendly */
301 if (tcp_friendliness) {
302 u32 scale = beta_scale;
303
304 delta = (cwnd * scale) >> 3;
305 while (ca->ack_cnt > delta) { /* update tcp cwnd */
306 ca->ack_cnt -= delta;
307 ca->tcp_cwnd++;
308 }
309
310 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
311 delta = ca->tcp_cwnd - cwnd;
312 max_cnt = cwnd / delta;
313 if (ca->cnt > max_cnt)
314 ca->cnt = max_cnt;
315 }
316 }
317
318 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
319 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
320 */
321 ca->cnt = max(ca->cnt, 2U);
322 }
323
cubictcp_cong_avoid(struct sock * sk,u32 ack,u32 acked)324 __bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
325 {
326 struct tcp_sock *tp = tcp_sk(sk);
327 struct bictcp *ca = inet_csk_ca(sk);
328
329 if (!tcp_is_cwnd_limited(sk))
330 return;
331
332 if (tcp_in_slow_start(tp)) {
333 acked = tcp_slow_start(tp, acked);
334 if (!acked)
335 return;
336 }
337 bictcp_update(ca, tcp_snd_cwnd(tp), acked);
338 tcp_cong_avoid_ai(tp, ca->cnt, acked);
339 }
340
cubictcp_recalc_ssthresh(struct sock * sk)341 __bpf_kfunc static u32 cubictcp_recalc_ssthresh(struct sock *sk)
342 {
343 const struct tcp_sock *tp = tcp_sk(sk);
344 struct bictcp *ca = inet_csk_ca(sk);
345
346 ca->epoch_start = 0; /* end of epoch */
347
348 /* Wmax and fast convergence */
349 if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
350 ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
351 / (2 * BICTCP_BETA_SCALE);
352 else
353 ca->last_max_cwnd = tcp_snd_cwnd(tp);
354
355 return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
356 }
357
cubictcp_state(struct sock * sk,u8 new_state)358 __bpf_kfunc static void cubictcp_state(struct sock *sk, u8 new_state)
359 {
360 if (new_state == TCP_CA_Loss) {
361 bictcp_reset(inet_csk_ca(sk));
362 bictcp_hystart_reset(sk);
363 }
364 }
365
366 /* Account for TSO/GRO delays.
367 * Otherwise short RTT flows could get too small ssthresh, since during
368 * slow start we begin with small TSO packets and ca->delay_min would
369 * not account for long aggregation delay when TSO packets get bigger.
370 * Ideally even with a very small RTT we would like to have at least one
371 * TSO packet being sent and received by GRO, and another one in qdisc layer.
372 * We apply another 100% factor because @rate is doubled at this point.
373 * We cap the cushion to 1ms.
374 */
hystart_ack_delay(const struct sock * sk)375 static u32 hystart_ack_delay(const struct sock *sk)
376 {
377 unsigned long rate;
378
379 rate = READ_ONCE(sk->sk_pacing_rate);
380 if (!rate)
381 return 0;
382 return min_t(u64, USEC_PER_MSEC,
383 div64_ul((u64)sk->sk_gso_max_size * 4 * USEC_PER_SEC, rate));
384 }
385
hystart_update(struct sock * sk,u32 delay)386 static void hystart_update(struct sock *sk, u32 delay)
387 {
388 struct tcp_sock *tp = tcp_sk(sk);
389 struct bictcp *ca = inet_csk_ca(sk);
390 u32 threshold;
391
392 if (after(tp->snd_una, ca->end_seq))
393 bictcp_hystart_reset(sk);
394
395 if (hystart_detect & HYSTART_ACK_TRAIN) {
396 u32 now = bictcp_clock_us(sk);
397
398 /* first detection parameter - ack-train detection */
399 if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
400 ca->last_ack = now;
401
402 threshold = ca->delay_min + hystart_ack_delay(sk);
403
404 /* Hystart ack train triggers if we get ack past
405 * ca->delay_min/2.
406 * Pacing might have delayed packets up to RTT/2
407 * during slow start.
408 */
409 if (sk->sk_pacing_status == SK_PACING_NONE)
410 threshold >>= 1;
411
412 if ((s32)(now - ca->round_start) > threshold) {
413 ca->found = 1;
414 pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
415 now - ca->round_start, threshold,
416 ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp));
417 NET_INC_STATS(sock_net(sk),
418 LINUX_MIB_TCPHYSTARTTRAINDETECT);
419 NET_ADD_STATS(sock_net(sk),
420 LINUX_MIB_TCPHYSTARTTRAINCWND,
421 tcp_snd_cwnd(tp));
422 tp->snd_ssthresh = tcp_snd_cwnd(tp);
423 }
424 }
425 }
426
427 if (hystart_detect & HYSTART_DELAY) {
428 /* obtain the minimum delay of more than sampling packets */
429 if (ca->curr_rtt > delay)
430 ca->curr_rtt = delay;
431 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
432 ca->sample_cnt++;
433 } else {
434 if (ca->curr_rtt > ca->delay_min +
435 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
436 ca->found = 1;
437 NET_INC_STATS(sock_net(sk),
438 LINUX_MIB_TCPHYSTARTDELAYDETECT);
439 NET_ADD_STATS(sock_net(sk),
440 LINUX_MIB_TCPHYSTARTDELAYCWND,
441 tcp_snd_cwnd(tp));
442 tp->snd_ssthresh = tcp_snd_cwnd(tp);
443 }
444 }
445 }
446 }
447
cubictcp_acked(struct sock * sk,const struct ack_sample * sample)448 __bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
449 {
450 const struct tcp_sock *tp = tcp_sk(sk);
451 struct bictcp *ca = inet_csk_ca(sk);
452 u32 delay;
453
454 /* Some calls are for duplicates without timetamps */
455 if (sample->rtt_us < 0)
456 return;
457
458 /* Discard delay samples right after fast recovery */
459 if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
460 return;
461
462 delay = sample->rtt_us;
463 if (delay == 0)
464 delay = 1;
465
466 /* first time call or link delay decreases */
467 if (ca->delay_min == 0 || ca->delay_min > delay)
468 ca->delay_min = delay;
469
470 /* hystart triggers when cwnd is larger than some threshold */
471 if (!ca->found && tcp_in_slow_start(tp) && hystart &&
472 tcp_snd_cwnd(tp) >= hystart_low_window)
473 hystart_update(sk, delay);
474 }
475
476 static struct tcp_congestion_ops cubictcp __read_mostly = {
477 .init = cubictcp_init,
478 .ssthresh = cubictcp_recalc_ssthresh,
479 .cong_avoid = cubictcp_cong_avoid,
480 .set_state = cubictcp_state,
481 .undo_cwnd = tcp_reno_undo_cwnd,
482 .cwnd_event = cubictcp_cwnd_event,
483 .pkts_acked = cubictcp_acked,
484 .owner = THIS_MODULE,
485 .name = "cubic",
486 };
487
488 BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids)
489 BTF_ID_FLAGS(func, cubictcp_init)
490 BTF_ID_FLAGS(func, cubictcp_recalc_ssthresh)
491 BTF_ID_FLAGS(func, cubictcp_cong_avoid)
492 BTF_ID_FLAGS(func, cubictcp_state)
493 BTF_ID_FLAGS(func, cubictcp_cwnd_event)
494 BTF_ID_FLAGS(func, cubictcp_acked)
495 BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids)
496
497 static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = {
498 .owner = THIS_MODULE,
499 .set = &tcp_cubic_check_kfunc_ids,
500 };
501
cubictcp_register(void)502 static int __init cubictcp_register(void)
503 {
504 int ret;
505
506 BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
507
508 /* Precompute a bunch of the scaling factors that are used per-packet
509 * based on SRTT of 100ms
510 */
511
512 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
513 / (BICTCP_BETA_SCALE - beta);
514
515 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
516
517 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
518 * so K = cubic_root( (wmax-cwnd)*rtt/c )
519 * the unit of K is bictcp_HZ=2^10, not HZ
520 *
521 * c = bic_scale >> 10
522 * rtt = 100ms
523 *
524 * the following code has been designed and tested for
525 * cwnd < 1 million packets
526 * RTT < 100 seconds
527 * HZ < 1,000,00 (corresponding to 10 nano-second)
528 */
529
530 /* 1/c * 2^2*bictcp_HZ * srtt */
531 cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */
532
533 /* divide by bic_scale and by constant Srtt (100ms) */
534 do_div(cube_factor, bic_scale * 10);
535
536 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_cubic_kfunc_set);
537 if (ret < 0)
538 return ret;
539 return tcp_register_congestion_control(&cubictcp);
540 }
541
cubictcp_unregister(void)542 static void __exit cubictcp_unregister(void)
543 {
544 tcp_unregister_congestion_control(&cubictcp);
545 }
546
547 module_init(cubictcp_register);
548 module_exit(cubictcp_unregister);
549
550 MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
551 MODULE_LICENSE("GPL");
552 MODULE_DESCRIPTION("CUBIC TCP");
553 MODULE_VERSION("2.3");
554