1 /*
2 * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org>
3 * Copyright (c) 2010 The FreeBSD Foundation
4 * All rights reserved.
5 * Copyright (c) 2017 by Delphix. All rights reserved.
6 *
7 * This software was developed by Lawrence Stewart while studying at the Centre
8 * for Advanced Internet Architectures, Swinburne University of Technology, made
9 * possible in part by a grant from the Cisco University Research Program Fund
10 * at Community Foundation Silicon Valley.
11 *
12 * Portions of this software were developed at the Centre for Advanced
13 * Internet Architectures, Swinburne University of Technology, Melbourne,
14 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * An implementation of the CUBIC congestion control algorithm for FreeBSD,
40 * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha.
41 * Originally released as part of the NewTCP research project at Swinburne
42 * University of Technology's Centre for Advanced Internet Architectures,
43 * Melbourne, Australia, which was made possible in part by a grant from the
44 * Cisco University Research Program Fund at Community Foundation Silicon
45 * Valley. More details are available at:
46 * http://caia.swin.edu.au/urp/newtcp/
47 */
48
49 #include <sys/errno.h>
50 #include <sys/types.h>
51 #include <sys/kmem.h>
52 #include <sys/ddi.h>
53 #include <sys/sunddi.h>
54 #include <sys/modctl.h>
55
56 #include <inet/tcp_impl.h>
57 #include <inet/cc.h>
58 #include <inet/cc/cc_cubic.h>
59 #include <inet/cc/cc_module.h>
60
61 static struct modlmisc cc_cubic_modlmisc = {
62 &mod_miscops,
63 "Cubic Congestion Control"
64 };
65
66 static struct modlinkage cc_cubic_modlinkage = {
67 MODREV_1,
68 &cc_cubic_modlmisc,
69 NULL
70 };
71
72 /*
73 * cubic uses the NewReno implementation of after_idle and uses NewReno's
74 * ack_received callback during slow start.
75 */
76 static struct cc_algo *newreno_cc_algo;
77
78 static void cubic_ack_received(struct cc_var *ccv, uint16_t type);
79 static void cubic_cb_destroy(struct cc_var *ccv);
80 static int cubic_cb_init(struct cc_var *ccv);
81 static void cubic_cong_signal(struct cc_var *ccv, uint32_t type);
82 static void cubic_conn_init(struct cc_var *ccv);
83 static void cubic_post_recovery(struct cc_var *ccv);
84 static void cubic_record_rtt(struct cc_var *ccv);
85 static void cubic_ssthresh_update(struct cc_var *ccv);
86
87 struct cubic {
88 /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */
89 int64_t K;
90 /* Sum of RTT samples across an epoch in ticks. */
91 int64_t sum_rtt_ticks;
92 /* cwnd at the most recent congestion event. */
93 uint32_t max_cwnd;
94 /* cwnd at the previous congestion event. */
95 uint32_t prev_max_cwnd;
96 /* Number of congestion events. */
97 uint32_t num_cong_events;
98 /* Minimum observed rtt in ticks. */
99 int min_rtt_ticks;
100 /* Mean observed rtt between congestion epochs. */
101 int mean_rtt_ticks;
102 /* ACKs since last congestion event. */
103 int epoch_ack_count;
104 /* Time of last congestion event in ticks. */
105 clock_t t_last_cong;
106 };
107
108 struct cc_algo cubic_cc_algo = {
109 .name = "cubic",
110 .ack_received = cubic_ack_received,
111 .cb_destroy = cubic_cb_destroy,
112 .cb_init = cubic_cb_init,
113 .cong_signal = cubic_cong_signal,
114 .conn_init = cubic_conn_init,
115 .post_recovery = cubic_post_recovery,
116 };
117
118 int
119 _init(void)
120 {
121 int err;
122
123 if ((newreno_cc_algo = cc_load_algo("newreno")) == NULL)
124 return (EINVAL);
125
126 if ((err = cc_register_algo(&cubic_cc_algo)) == 0) {
127 if ((err = mod_install(&cc_cubic_modlinkage)) != 0)
128 (void) cc_deregister_algo(&cubic_cc_algo);
129 }
130 cubic_cc_algo.after_idle = newreno_cc_algo->after_idle;
131 return (err);
132 }
133
134 int
135 _fini(void)
136 {
137 /* XXX Not unloadable for now */
138 return (EBUSY);
139 }
140
141 int
142 _info(struct modinfo *modinfop)
143 {
144 return (mod_info(&cc_cubic_modlinkage, modinfop));
145 }
146
147 static void
148 cubic_ack_received(struct cc_var *ccv, uint16_t type)
149 {
150 struct cubic *cubic_data;
151 uint32_t w_tf, w_cubic_next;
152 int ticks_since_cong;
153
154 cubic_data = ccv->cc_data;
155 cubic_record_rtt(ccv);
156
157 /*
158 * Regular ACK and we're not in cong/fast recovery and we're cwnd
159 * limited and we're either not doing ABC or are slow starting or are
160 * doing ABC and we've sent a cwnd's worth of bytes.
161 */
162 if (type == CC_ACK && !IN_RECOVERY(ccv->flags) &&
163 (ccv->flags & CCF_CWND_LIMITED) && (!CC_ABC(ccv) ||
164 CCV(ccv, tcp_cwnd) <= CCV(ccv, tcp_cwnd_ssthresh) ||
165 (CC_ABC(ccv) && (ccv->flags & CCF_ABC_SENTAWND)))) {
166 /* Use the logic in NewReno ack_received() for slow start. */
167 if (CCV(ccv, tcp_cwnd) <= CCV(ccv, tcp_cwnd_ssthresh) ||
168 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)
169 newreno_cc_algo->ack_received(ccv, type);
170 else {
171 ticks_since_cong = ddi_get_lbolt() -
172 cubic_data->t_last_cong;
173
174 /*
175 * The mean RTT is used to best reflect the equations in
176 * the I-D. Using min_rtt in the tf_cwnd calculation
177 * causes w_tf to grow much faster than it should if the
178 * RTT is dominated by network buffering rather than
179 * propagation delay.
180 */
181 w_tf = tf_cwnd(ticks_since_cong,
182 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
183 CCV(ccv, tcp_mss));
184
185 w_cubic_next = cubic_cwnd(ticks_since_cong +
186 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
187 CCV(ccv, tcp_mss), cubic_data->K);
188
189 ccv->flags &= ~CCF_ABC_SENTAWND;
190
191 if (w_cubic_next < w_tf)
192 /*
193 * TCP-friendly region, follow tf
194 * cwnd growth.
195 */
196 CCV(ccv, tcp_cwnd) = w_tf;
197
198 else if (CCV(ccv, tcp_cwnd) < w_cubic_next) {
199 /*
200 * Concave or convex region, follow CUBIC
201 * cwnd growth.
202 */
203 if (CC_ABC(ccv))
204 CCV(ccv, tcp_cwnd) = w_cubic_next;
205 else
206 CCV(ccv, tcp_cwnd) += ((w_cubic_next -
207 CCV(ccv, tcp_cwnd)) *
208 CCV(ccv, tcp_mss)) /
209 CCV(ccv, tcp_cwnd);
210 }
211
212 /*
213 * If we're not in slow start and we're probing for a
214 * new cwnd limit at the start of a connection
215 * (happens when hostcache has a relevant entry),
216 * keep updating our current estimate of the
217 * max_cwnd.
218 */
219 if (cubic_data->num_cong_events == 0 &&
220 cubic_data->max_cwnd < CCV(ccv, tcp_cwnd))
221 cubic_data->max_cwnd = CCV(ccv, tcp_cwnd);
222 }
223 }
224 }
225
226 static void
227 cubic_cb_destroy(struct cc_var *ccv)
228 {
229
230 if (ccv->cc_data != NULL)
231 kmem_free(ccv->cc_data, sizeof (struct cubic));
232 }
233
234 static int
235 cubic_cb_init(struct cc_var *ccv)
236 {
237 struct cubic *cubic_data;
238
239 cubic_data = kmem_alloc(sizeof (struct cubic), KM_NOSLEEP);
240
241 if (cubic_data == NULL)
242 return (ENOMEM);
243
244 /* Init some key variables with sensible defaults. */
245 cubic_data->t_last_cong = ddi_get_lbolt();
246 cubic_data->min_rtt_ticks = TCPTV_SRTTBASE;
247 cubic_data->mean_rtt_ticks = 1;
248
249 ccv->cc_data = cubic_data;
250
251 return (0);
252 }
253
254 /*
255 * Perform any necessary tasks before we enter congestion recovery.
256 */
257 static void
258 cubic_cong_signal(struct cc_var *ccv, uint32_t type)
259 {
260 struct cubic *cubic_data;
261 uint32_t cwin;
262 uint32_t mss;
263
264 cubic_data = ccv->cc_data;
265 cwin = CCV(ccv, tcp_cwnd);
266 mss = CCV(ccv, tcp_mss);
267
268 switch (type) {
269 case CC_NDUPACK:
270 if (!IN_FASTRECOVERY(ccv->flags)) {
271 if (!IN_CONGRECOVERY(ccv->flags)) {
272 cubic_ssthresh_update(ccv);
273 cubic_data->num_cong_events++;
274 cubic_data->prev_max_cwnd =
275 cubic_data->max_cwnd;
276 cubic_data->max_cwnd = cwin;
277 CCV(ccv, tcp_cwnd) =
278 CCV(ccv, tcp_cwnd_ssthresh);
279 }
280 ENTER_RECOVERY(ccv->flags);
281 }
282 break;
283
284 case CC_ECN:
285 if (!IN_CONGRECOVERY(ccv->flags)) {
286 cubic_ssthresh_update(ccv);
287 cubic_data->num_cong_events++;
288 cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
289 cubic_data->max_cwnd = cwin;
290 cubic_data->t_last_cong = ddi_get_lbolt();
291 CCV(ccv, tcp_cwnd) = CCV(ccv, tcp_cwnd_ssthresh);
292 ENTER_CONGRECOVERY(ccv->flags);
293 }
294 break;
295
296 case CC_RTO:
297 /*
298 * Grab the current time and record it so we know when the
299 * most recent congestion event was. Only record it when the
300 * timeout has fired more than once, as there is a reasonable
301 * chance the first one is a false alarm and may not indicate
302 * congestion.
303 */
304 if (CCV(ccv, tcp_timer_backoff) >= 2) {
305 cubic_data->num_cong_events++;
306 cubic_data->t_last_cong = ddi_get_lbolt();
307 cubic_ssthresh_update(ccv);
308 cubic_data->max_cwnd = cwin;
309 CCV(ccv, tcp_cwnd) = mss;
310 }
311 break;
312 }
313 }
314
315 static void
316 cubic_conn_init(struct cc_var *ccv)
317 {
318 struct cubic *cubic_data;
319
320 cubic_data = ccv->cc_data;
321
322 /*
323 * Ensure we have a sane initial value for max_cwnd recorded. Without
324 * this here bad things happen when entries from the TCP hostcache
325 * get used.
326 */
327 cubic_data->max_cwnd = CCV(ccv, tcp_cwnd);
328 }
329
330 /*
331 * Perform any necessary tasks before we exit congestion recovery.
332 */
333 static void
334 cubic_post_recovery(struct cc_var *ccv)
335 {
336 struct cubic *cubic_data;
337
338 cubic_data = ccv->cc_data;
339
340 /* Fast convergence heuristic. */
341 if (cubic_data->max_cwnd < cubic_data->prev_max_cwnd)
342 cubic_data->max_cwnd = (cubic_data->max_cwnd * CUBIC_FC_FACTOR)
343 >> CUBIC_SHIFT;
344
345 if (IN_FASTRECOVERY(ccv->flags)) {
346 /* Update cwnd based on beta and adjusted max_cwnd. */
347 CCV(ccv, tcp_cwnd) = max(1, ((CUBIC_BETA *
348 cubic_data->max_cwnd) >> CUBIC_SHIFT));
349 }
350 cubic_data->t_last_cong = ddi_get_lbolt();
351
352 /* Calculate the average RTT between congestion epochs. */
353 if (cubic_data->epoch_ack_count > 0 &&
354 cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) {
355 cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks /
356 cubic_data->epoch_ack_count);
357 }
358
359 cubic_data->epoch_ack_count = 0;
360 cubic_data->sum_rtt_ticks = 0;
361 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, tcp_mss));
362 }
363
364 /*
365 * Record the min RTT and sum samples for the epoch average RTT calculation.
366 */
367 static void
368 cubic_record_rtt(struct cc_var *ccv)
369 {
370 struct cubic *cubic_data;
371 int t_srtt_ticks;
372
373 /* Ignore srtt until a min number of samples have been taken. */
374 if (CCV(ccv, tcp_rtt_update) >= CUBIC_MIN_RTT_SAMPLES) {
375 cubic_data = ccv->cc_data;
376 /* tcp_rtt_sa is 8 * smoothed RTT in nanoseconds */
377 t_srtt_ticks = NSEC_TO_TICK(CCV(ccv, tcp_rtt_sa >> 3));
378
379 /*
380 * Record the current SRTT as our minrtt if it's the smallest
381 * we've seen or minrtt is currently equal to its initialized
382 * value.
383 *
384 * XXXLAS: Should there be some hysteresis for minrtt?
385 */
386 if ((t_srtt_ticks < cubic_data->min_rtt_ticks ||
387 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) {
388 cubic_data->min_rtt_ticks = max(1, t_srtt_ticks);
389
390 /*
391 * If the connection is within its first congestion
392 * epoch, ensure we prime mean_rtt_ticks with a
393 * reasonable value until the epoch average RTT is
394 * calculated in cubic_post_recovery().
395 */
396 if (cubic_data->min_rtt_ticks >
397 cubic_data->mean_rtt_ticks)
398 cubic_data->mean_rtt_ticks =
399 cubic_data->min_rtt_ticks;
400 }
401
402 /* Sum samples for epoch average RTT calculation. */
403 cubic_data->sum_rtt_ticks += t_srtt_ticks;
404 cubic_data->epoch_ack_count++;
405 }
406 }
407
408 /*
409 * Update the ssthresh in the event of congestion.
410 */
411 static void
412 cubic_ssthresh_update(struct cc_var *ccv)
413 {
414 struct cubic *cubic_data;
415
416 cubic_data = ccv->cc_data;
417
418 /*
419 * On the first congestion event, set ssthresh to cwnd * 0.5, on
420 * subsequent congestion events, set it to cwnd * beta.
421 */
422 if (cubic_data->num_cong_events == 0)
423 CCV(ccv, tcp_cwnd_ssthresh) = CCV(ccv, tcp_cwnd) >> 1;
424 else
425 CCV(ccv, tcp_cwnd_ssthresh) =
426 (CCV(ccv, tcp_cwnd) * CUBIC_BETA) >> CUBIC_SHIFT;
427 }