Print this page
DLPX-25998 TCP congestion control is inadequate
Reviewed at: http://reviews.delphix.com/r/34808/
DLPX-43064 include high-resolution round-trip times in connstat (EP-652)
DLPX-42721 Create inline function for TCP RTO calculation
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/tcp_impl.h
+++ new/usr/src/uts/common/inet/tcp_impl.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2017 Joyent, Inc.
24 24 * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved.
25 - * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
25 + * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26 26 */
27 27
28 28 #ifndef _INET_TCP_IMPL_H
29 29 #define _INET_TCP_IMPL_H
30 30
31 31 /*
32 32 * TCP implementation private declarations. These interfaces are
33 33 * used to build the IP module and are not meant to be accessed
34 34 * by any modules except IP itself. They are undocumented and are
35 35 * subject to change without notice.
36 36 */
37 37
38 38 #ifdef __cplusplus
39 39 extern "C" {
40 40 #endif
41 41
42 42 #ifdef _KERNEL
43 43
44 44 #include <sys/cpuvar.h>
45 45 #include <sys/clock_impl.h> /* For LBOLT_FASTPATH{,64} */
46 46 #include <inet/optcom.h>
47 47 #include <inet/tcp.h>
48 48 #include <inet/tunables.h>
49 49
50 50 #define TCP_MOD_ID 5105
51 51
52 52 extern struct qinit tcp_sock_winit;
53 53 extern struct qinit tcp_winit;
54 54
55 55 extern sock_downcalls_t sock_tcp_downcalls;
56 56
57 57 /*
58 58 * Note that by default, the _snd_lowat_fraction tunable controls the value of
59 59 * the transmit low water mark. TCP_XMIT_LOWATER (and thus the _xmit_lowat
60 60 * property) is only used if the administrator has disabled _snd_lowat_fraction
61 61 * by setting it to 0.
62 62 */
63 63 #define TCP_XMIT_LOWATER 4096
64 64 #define TCP_XMIT_HIWATER 128000
65 65 #define TCP_RECV_LOWATER 2048
66 66 #define TCP_RECV_HIWATER 1048576
67 67
68 68 /*
69 69 * Bind hash list size and has function. It has to be a power of 2 for
70 70 * hashing.
71 71 */
72 72 #define TCP_BIND_FANOUT_SIZE 1024
73 73 #define TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1))
74 74
75 75 /*
76 76 * This implementation follows the 4.3BSD interpretation of the urgent
77 77 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause
78 78 * incompatible changes in protocols like telnet and rlogin.
79 79 */
80 80 #define TCP_OLD_URP_INTERPRETATION 1
81 81
82 82 /* TCP option length */
83 83 #define TCPOPT_NOP_LEN 1
84 84 #define TCPOPT_MAXSEG_LEN 4
85 85 #define TCPOPT_WS_LEN 3
86 86 #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1)
87 87 #define TCPOPT_TSTAMP_LEN 10
88 88 #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2)
89 89 #define TCPOPT_SACK_OK_LEN 2
90 90 #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2)
91 91 #define TCPOPT_REAL_SACK_LEN 4
92 92 #define TCPOPT_MAX_SACK_LEN 36
93 93 #define TCPOPT_HEADER_LEN 2
94 94
95 95 /* Round up the value to the nearest mss. */
96 96 #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss))
97 97
98 98 /*
99 99 * Was this tcp created via socket() interface?
100 100 */
101 101 #define TCP_IS_SOCKET(tcp) ((tcp)->tcp_issocket)
102 102
103 103 /*
104 104 * Is this tcp not attached to any upper client?
105 105 */
106 106 #define TCP_IS_DETACHED(tcp) ((tcp)->tcp_detached)
107 107
108 108 /* TCP timers related data structures. Refer to tcp_timers.c. */
109 109 typedef struct tcp_timer_s {
110 110 conn_t *connp;
111 111 void (*tcpt_proc)(void *);
112 112 callout_id_t tcpt_tid;
113 113 } tcp_timer_t;
114 114
115 115 extern kmem_cache_t *tcp_timercache;
116 116
117 117 /*
118 118 * Macro for starting various timers. Retransmission timer has its own macro,
119 119 * TCP_TIMER_RESTART(). tim is in millisec.
120 120 */
121 121 #define TCP_TIMER(tcp, f, tim) \
122 122 tcp_timeout(tcp->tcp_connp, f, tim)
123 123 #define TCP_TIMER_CANCEL(tcp, id) \
124 124 tcp_timeout_cancel(tcp->tcp_connp, id)
125 125
126 126 /*
127 127 * To restart the TCP retransmission timer. intvl is in millisec.
128 128 */
129 129 #define TCP_TIMER_RESTART(tcp, intvl) { \
130 130 if ((tcp)->tcp_timer_tid != 0) \
131 131 (void) TCP_TIMER_CANCEL((tcp), (tcp)->tcp_timer_tid); \
132 132 (tcp)->tcp_timer_tid = TCP_TIMER((tcp), tcp_timer, (intvl)); \
133 133 }
134 134
135 135
136 136 /*
137 137 * Maximum TIME_WAIT timeout. It is defined here (instead of tcp_tunables.c)
138 138 * so that other parameters can be derived from it.
139 139 */
140 140 #define TCP_TIME_WAIT_MAX (10 * MINUTES)
141 141
142 142 /*
143 143 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs.
144 144 * Running it every 5 seconds seems to yield a reasonable balance between
145 145 * cleanup liveliness and system load.
146 146 */
147 147 #define TCP_TIME_WAIT_DELAY (5 * SECONDS)
148 148
149 149 #define TCP_TIME_WAIT_BUCKETS ((TCP_TIME_WAIT_MAX / TCP_TIME_WAIT_DELAY) + 1)
150 150
151 151 /*
152 152 * For scalability, we must not run a timer for every TCP connection
153 153 * in TIME_WAIT state. To see why, consider (for time wait interval of
154 154 * 1 minutes):
155 155 * 10,000 connections/sec * 60 seconds/time wait = 600,000 active conn's
156 156 *
157 157 * Since TIME_WAIT expiration occurs on a per-squeue basis, handling
158 158 * connections from all netstacks on the system, a simple queue is inadequate
159 159 * for pending entries. This is because tcp_time_wait_interval may differ
160 160 * between connections, causing tail insertion to violate expiration order.
161 161 *
162 162 * Instead of performing expensive sorting or unnecessary list traversal to
163 163 * counteract interval variance between netstacks, a timing wheel structure is
164 164 * used. The duration covered by each bucket in the wheel is determined by the
165 165 * TCP_TIME_WAIT_DELAY (5 seconds). The number of buckets in the wheel is
166 166 * determined by dividing the maximum TIME_WAIT interval (10 minutes) by
167 167 * TCP_TIME_WAIT_DELAY, with one added bucket for rollover protection.
168 168 * (Yielding 121 buckets with the current parameters) When items are inserted
169 169 * into the set of buckets, they are indexed by using their expiration time
170 170 * divided by the bucket size, modulo the number of buckets. This means that
171 171 * when each bucket is processed, all items within should have expired within
172 172 * the last TCP_TIME_WAIT_DELAY interval.
173 173 *
174 174 * Since bucket timer schedules are rounded to the nearest TCP_TIME_WAIT_DELAY
175 175 * interval to ensure all connections in the pending bucket will be expired, a
176 176 * per-squeue offset is used when doing TIME_WAIT scheduling. This offset is
177 177 * between 0 and the TCP_TIME_WAIT_DELAY and is designed to avoid scheduling
178 178 * all of the tcp_time_wait_collector threads to run in lock-step. The offset
179 179 * is fixed while there are any connections present in the buckets.
180 180 *
181 181 * When a tcp_t enters TIME_WAIT state, a timer is started (timeout is
182 182 * tcps_time_wait_interval). When the tcp_t is detached (upper layer closes
183 183 * the end point), it is scheduled to be cleaned up by the squeue-driving
184 184 * tcp_time_wait_collector (also using tcps_time_wait_interval). This means
185 185 * that the TIME_WAIT state can be extended (up to doubled) if the tcp_t
186 186 * doesn't become detached for a long time.
187 187 *
188 188 * The list manipulations (including tcp_time_wait_next/prev)
189 189 * are protected by the tcp_time_wait_lock. The content of the
190 190 * detached TIME_WAIT connections is protected by the normal perimeters.
191 191 *
192 192 * These connection lists are per squeue and squeues are shared across the
193 193 * tcp_stack_t instances. Things in a tcp_time_wait_bucket remain associated
194 194 * with the tcp_stack_t and conn_netstack. Any tcp_t connections stored in the
195 195 * tcp_free_list are disassociated and have NULL tcp_tcps and conn_netstack
196 196 * pointers.
197 197 */
198 198 typedef struct tcp_squeue_priv_s {
199 199 kmutex_t tcp_time_wait_lock;
200 200 boolean_t tcp_time_wait_collector_active;
201 201 callout_id_t tcp_time_wait_tid;
202 202 uint64_t tcp_time_wait_cnt;
203 203 int64_t tcp_time_wait_schedule;
204 204 int64_t tcp_time_wait_offset;
205 205 tcp_t *tcp_time_wait_bucket[TCP_TIME_WAIT_BUCKETS];
206 206 tcp_t *tcp_free_list;
207 207 uint_t tcp_free_list_cnt;
208 208 } tcp_squeue_priv_t;
209 209
210 210 /*
211 211 * Parameters for TCP Initial Send Sequence number (ISS) generation. When
212 212 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated
213 213 * by adding three components: a time component which grows by 1 every 4096
214 214 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27);
215 215 * a per-connection component which grows by 125000 for every new connection;
216 216 * and an "extra" component that grows by a random amount centered
217 217 * approximately on 64000. This causes the ISS generator to cycle every
218 218 * 4.89 hours if no TCP connections are made, and faster if connections are
219 219 * made.
220 220 *
221 221 * When tcp_strong_iss is set to 0, ISS is calculated by adding two
222 222 * components: a time component which grows by 250000 every second; and
223 223 * a per-connection component which grows by 125000 for every new connections.
224 224 *
225 225 * A third method, when tcp_strong_iss is set to 2, for generating ISS is
226 226 * prescribed by Steve Bellovin. This involves adding time, the 125000 per
227 227 * connection, and a one-way hash (MD5) of the connection ID <sport, dport,
228 228 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered
229 229 * password.
230 230 */
231 231 #define ISS_INCR 250000
232 232 #define ISS_NSEC_SHT 12
233 233
234 234 /* Macros for timestamp comparisons */
235 235 #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0)
236 236 #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0)
237 237
238 238 /*
239 239 * Initialize cwnd according to RFC 3390. def_max_init_cwnd is
240 240 * either tcp_slow_start_initial or tcp_slow_start_after idle
241 241 * depending on the caller. If the upper layer has not used the
242 242 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd
243 243 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd.
244 244 * If the upper layer has changed set the tcp_init_cwnd, just use
245 245 * it to calculate the tcp_cwnd.
246 246 *
247 247 * "An Argument for Increasing TCP's Initial Congestion Window"
248 248 * ACM SIGCOMM Computer Communications Review, vol. 40 (2010), pp. 27-33
249 249 * -- Nandita Dukkipati, Tiziana Refice, Yuchung Cheng,
250 250 * Hsiao-keng Jerry Chu, Tom Herbert, Amit Agarwal,
251 251 * Arvind Jain, Natalia Sutin
252 252 *
253 253 * "Based on the results from our experiments, we believe the
254 254 * initial congestion window should be at least ten segments
255 255 * and the same be investigated for standardization by the IETF."
256 256 *
257 257 * As such, the def_max_init_cwnd argument with which this macro is
258 258 * invoked is either the tcps_slow_start_initial or
259 259 * tcps_slow_start_after_idle which both default to 0 and will respect
260 260 * RFC 3390 exactly. If the tunables are explicitly set by the operator,
261 261 * then the initial congestion window should be set as the operator
262 262 * demands, within reason. We shall arbitrarily define reason as a
263 263 * maximum of 16 (same as used by the TCP_INIT_CWND setsockopt).
264 264 */
265 265
266 266 /* Maximum TCP initial cwin (start/restart). */
267 267 #define TCP_MAX_INIT_CWND 16
268 268
269 269 #define TCP_SET_INIT_CWND(tcp, mss, def_max_init_cwnd) \
270 270 { \
271 271 if ((tcp)->tcp_init_cwnd == 0) { \
272 272 if (def_max_init_cwnd == 0) { \
273 273 (tcp)->tcp_cwnd = MIN(4 * (mss), \
274 274 MAX(2 * (mss), 4380 / (mss) * (mss))); \
275 275 } else { \
276 276 (tcp)->tcp_cwnd = MIN(TCP_MAX_INIT_CWND * (mss),\
277 277 def_max_init_cwnd * (mss)); \
278 278 } \
279 279 } else { \
280 280 (tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss); \
281 281 } \
282 282 tcp->tcp_cwnd_cnt = 0; \
283 283 }
284 284
285 285 /*
286 286 * Set ECN capable transport (ECT) code point in IP header.
287 287 *
288 288 * Note that there are 2 ECT code points '01' and '10', which are called
289 289 * ECT(1) and ECT(0) respectively. Here we follow the original ECT code
290 290 * point ECT(0) for TCP as described in RFC 2481.
291 291 */
292 292 #define TCP_SET_ECT(tcp, iph) \
|
↓ open down ↓ |
257 lines elided |
↑ open up ↑ |
293 293 if ((tcp)->tcp_connp->conn_ipversion == IPV4_VERSION) { \
294 294 /* We need to clear the code point first. */ \
295 295 ((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \
296 296 ((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \
297 297 } else { \
298 298 ((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \
299 299 ((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \
300 300 }
301 301
302 302 /*
303 - * Set tcp_rto with boundary checking.
304 - */
305 -#define TCP_SET_RTO(tcp, rto) \
306 - if ((rto) < (tcp)->tcp_rto_min) \
307 - (tcp)->tcp_rto = (tcp)->tcp_rto_min; \
308 - else if ((rto) > (tcp)->tcp_rto_max) \
309 - (tcp)->tcp_rto = (tcp)->tcp_rto_max; \
310 - else \
311 - (tcp)->tcp_rto = (rto);
312 -
313 -/*
314 303 * TCP options struct returned from tcp_parse_options.
315 304 */
316 305 typedef struct tcp_opt_s {
317 306 uint32_t tcp_opt_mss;
318 307 uint32_t tcp_opt_wscale;
319 308 uint32_t tcp_opt_ts_val;
320 309 uint32_t tcp_opt_ts_ecr;
321 310 tcp_t *tcp;
322 311 } tcp_opt_t;
323 312
324 313 /*
325 314 * Flags returned from tcp_parse_options.
326 315 */
327 316 #define TCP_OPT_MSS_PRESENT 1
328 317 #define TCP_OPT_WSCALE_PRESENT 2
329 318 #define TCP_OPT_TSTAMP_PRESENT 4
330 319 #define TCP_OPT_SACK_OK_PRESENT 8
331 320 #define TCP_OPT_SACK_PRESENT 16
332 321
333 322 /*
334 323 * Write-side flow-control is implemented via the per instance STREAMS
335 324 * write-side Q by explicitly setting QFULL to stop the flow of mblk_t(s)
336 325 * and clearing QFULL and calling qbackenable() to restart the flow based
337 326 * on the number of TCP unsent bytes (i.e. those not on the wire waiting
338 327 * for a remote ACK).
339 328 *
340 329 * This is different than a standard STREAMS kmod which when using the
341 330 * STREAMS Q the framework would automatictly flow-control based on the
342 331 * defined hiwat/lowat values as mblk_t's are enqueued/dequeued.
343 332 *
344 333 * As of FireEngine TCP write-side flow-control needs to take into account
345 334 * both the unsent tcp_xmit list bytes but also any squeue_t enqueued bytes
346 335 * (i.e. from tcp_wput() -> tcp_output()).
347 336 *
348 337 * This is accomplished by adding a new tcp_t fields, tcp_squeue_bytes, to
349 338 * count the number of bytes enqueued by tcp_wput() and the number of bytes
350 339 * dequeued and processed by tcp_output().
351 340 *
352 341 * So, the total number of bytes unsent is (squeue_bytes + unsent) with all
353 342 * flow-control uses of unsent replaced with the macro TCP_UNSENT_BYTES.
354 343 */
355 344 extern void tcp_clrqfull(tcp_t *);
356 345 extern void tcp_setqfull(tcp_t *);
357 346
358 347 #define TCP_UNSENT_BYTES(tcp) \
359 348 ((tcp)->tcp_squeue_bytes + (tcp)->tcp_unsent)
360 349
361 350 /*
362 351 * Linked list struct to store listener connection limit configuration per
363 352 * IP stack. The list is stored at tcps_listener_conf in tcp_stack_t.
364 353 *
365 354 * tl_port: the listener port of this limit configuration
366 355 * tl_ratio: the maximum amount of memory consumed by all concurrent TCP
367 356 * connections created by a listener does not exceed 1/tl_ratio
368 357 * of the total system memory. Note that this is only an
369 358 * approximation.
370 359 * tl_link: linked list struct
371 360 */
372 361 typedef struct tcp_listener_s {
373 362 in_port_t tl_port;
374 363 uint32_t tl_ratio;
375 364 list_node_t tl_link;
376 365 } tcp_listener_t;
377 366
378 367 /*
379 368 * If there is a limit set on the number of connections allowed per each
380 369 * listener, the following struct is used to store that counter. It keeps
381 370 * the number of TCP connection created by a listener. Note that this needs
382 371 * to be separated from the listener since the listener can go away before
383 372 * all the connections are gone.
384 373 *
385 374 * When the struct is allocated, tlc_cnt is set to 1. When a new connection
386 375 * is created by the listener, tlc_cnt is incremented by 1. When a connection
387 376 * created by the listener goes away, tlc_count is decremented by 1. When the
388 377 * listener itself goes away, tlc_cnt is decremented by one. The last
389 378 * connection (or the listener) which decrements tlc_cnt to zero frees the
390 379 * struct.
391 380 *
392 381 * tlc_max is the maximum number of concurrent TCP connections created from a
393 382 * listner. It is calculated when the tcp_listen_cnt_t is allocated.
394 383 *
395 384 * tlc_report_time stores the time when cmn_err() is called to report that the
396 385 * max has been exceeeded. Report is done at most once every
397 386 * TCP_TLC_REPORT_INTERVAL mins for a listener.
398 387 *
399 388 * tlc_drop stores the number of connection attempt dropped because the
400 389 * limit has reached.
401 390 */
402 391 typedef struct tcp_listen_cnt_s {
403 392 uint32_t tlc_max;
404 393 uint32_t tlc_cnt;
405 394 int64_t tlc_report_time;
406 395 uint32_t tlc_drop;
407 396 } tcp_listen_cnt_t;
408 397
409 398 /*
410 399 * Track tcp_t entities bound to the same port/address tuple via SO_REUSEPORT.
411 400 * - tcprg_lock: Protects the other fields
412 401 * - tcprg_size: Allocated size (in entries) of tcprg_members array
413 402 * - tcprg_count: Count of occupied tcprg_members slots
414 403 * - tcprg_active: Count of members which still have SO_REUSEPORT set
415 404 * - tcprg_members: Connections associated with address/port group
416 405 */
417 406 typedef struct tcp_rg_s {
418 407 kmutex_t tcprg_lock;
419 408 unsigned int tcprg_size;
420 409 unsigned int tcprg_count;
421 410 unsigned int tcprg_active;
422 411 tcp_t **tcprg_members;
423 412 } tcp_rg_t;
424 413
425 414 #define TCP_TLC_REPORT_INTERVAL (30 * MINUTES)
426 415
427 416 #define TCP_DECR_LISTEN_CNT(tcp) \
428 417 { \
429 418 ASSERT((tcp)->tcp_listen_cnt->tlc_cnt > 0); \
430 419 if (atomic_dec_32_nv(&(tcp)->tcp_listen_cnt->tlc_cnt) == 0) \
431 420 kmem_free((tcp)->tcp_listen_cnt, sizeof (tcp_listen_cnt_t)); \
432 421 (tcp)->tcp_listen_cnt = NULL; \
433 422 }
434 423
435 424 /* Increment and decrement the number of connections in tcp_stack_t. */
436 425 #define TCPS_CONN_INC(tcps) \
437 426 atomic_inc_64( \
438 427 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt)
439 428
440 429 #define TCPS_CONN_DEC(tcps) \
441 430 atomic_dec_64( \
442 431 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt)
443 432
444 433 /*
445 434 * When the system is under memory pressure, stack variable tcps_reclaim is
446 435 * true, we shorten the connection timeout abort interval to tcp_early_abort
447 436 * seconds. Defined in tcp.c.
448 437 */
449 438 extern uint32_t tcp_early_abort;
450 439
451 440 /*
452 441 * To reach to an eager in Q0 which can be dropped due to an incoming
453 442 * new SYN request when Q0 is full, a new doubly linked list is
454 443 * introduced. This list allows to select an eager from Q0 in O(1) time.
455 444 * This is needed to avoid spending too much time walking through the
456 445 * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of
457 446 * this new list has to be a member of Q0.
458 447 * This list is headed by listener's tcp_t. When the list is empty,
459 448 * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0,
460 449 * of listener's tcp_t point to listener's tcp_t itself.
461 450 *
462 451 * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager
463 452 * in the list. MAKE_UNDROPPABLE() takes the eager out of the list.
464 453 * These macros do not affect the eager's membership to Q0.
465 454 */
466 455 #define MAKE_DROPPABLE(listener, eager) \
467 456 if ((eager)->tcp_eager_next_drop_q0 == NULL) { \
468 457 (listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\
469 458 = (eager); \
470 459 (eager)->tcp_eager_prev_drop_q0 = (listener); \
471 460 (eager)->tcp_eager_next_drop_q0 = \
472 461 (listener)->tcp_eager_next_drop_q0; \
473 462 (listener)->tcp_eager_next_drop_q0 = (eager); \
474 463 }
475 464
476 465 #define MAKE_UNDROPPABLE(eager) \
477 466 if ((eager)->tcp_eager_next_drop_q0 != NULL) { \
478 467 (eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0 \
479 468 = (eager)->tcp_eager_prev_drop_q0; \
480 469 (eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0 \
481 470 = (eager)->tcp_eager_next_drop_q0; \
482 471 (eager)->tcp_eager_prev_drop_q0 = NULL; \
483 472 (eager)->tcp_eager_next_drop_q0 = NULL; \
484 473 }
485 474
486 475 /*
487 476 * The format argument to pass to tcp_display().
488 477 * DISP_PORT_ONLY means that the returned string has only port info.
489 478 * DISP_ADDR_AND_PORT means that the returned string also contains the
490 479 * remote and local IP address.
491 480 */
492 481 #define DISP_PORT_ONLY 1
493 482 #define DISP_ADDR_AND_PORT 2
494 483
495 484 #define IP_ADDR_CACHE_SIZE 2048
496 485 #define IP_ADDR_CACHE_HASH(faddr) \
497 486 (ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1))
498 487
499 488 /*
500 489 * TCP reassembly macros. We hide starting and ending sequence numbers in
501 490 * b_next and b_prev of messages on the reassembly queue. The messages are
502 491 * chained using b_cont. These macros are used in tcp_reass() so we don't
503 492 * have to see the ugly casts and assignments.
504 493 */
505 494 #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next))
506 495 #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \
507 496 (mblk_t *)(uintptr_t)(u))
508 497 #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev))
509 498 #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \
510 499 (mblk_t *)(uintptr_t)(u))
511 500
512 501 #define tcps_time_wait_interval tcps_propinfo_tbl[0].prop_cur_uval
513 502 #define tcps_conn_req_max_q tcps_propinfo_tbl[1].prop_cur_uval
514 503 #define tcps_conn_req_max_q0 tcps_propinfo_tbl[2].prop_cur_uval
515 504 #define tcps_conn_req_min tcps_propinfo_tbl[3].prop_cur_uval
516 505 #define tcps_conn_grace_period tcps_propinfo_tbl[4].prop_cur_uval
517 506 #define tcps_cwnd_max_ tcps_propinfo_tbl[5].prop_cur_uval
518 507 #define tcps_dbg tcps_propinfo_tbl[6].prop_cur_uval
519 508 #define tcps_smallest_nonpriv_port tcps_propinfo_tbl[7].prop_cur_uval
520 509 #define tcps_ip_abort_cinterval tcps_propinfo_tbl[8].prop_cur_uval
521 510 #define tcps_ip_abort_linterval tcps_propinfo_tbl[9].prop_cur_uval
522 511 #define tcps_ip_abort_interval tcps_propinfo_tbl[10].prop_cur_uval
523 512 #define tcps_ip_notify_cinterval tcps_propinfo_tbl[11].prop_cur_uval
524 513 #define tcps_ip_notify_interval tcps_propinfo_tbl[12].prop_cur_uval
525 514 #define tcps_ipv4_ttl tcps_propinfo_tbl[13].prop_cur_uval
526 515 #define tcps_keepalive_interval_high tcps_propinfo_tbl[14].prop_max_uval
527 516 #define tcps_keepalive_interval tcps_propinfo_tbl[14].prop_cur_uval
528 517 #define tcps_keepalive_interval_low tcps_propinfo_tbl[14].prop_min_uval
529 518 #define tcps_maxpsz_multiplier tcps_propinfo_tbl[15].prop_cur_uval
530 519 #define tcps_mss_def_ipv4 tcps_propinfo_tbl[16].prop_cur_uval
531 520 #define tcps_mss_max_ipv4 tcps_propinfo_tbl[17].prop_cur_uval
532 521 #define tcps_mss_min tcps_propinfo_tbl[18].prop_cur_uval
533 522 #define tcps_naglim_def tcps_propinfo_tbl[19].prop_cur_uval
534 523 #define tcps_rexmit_interval_initial_high \
535 524 tcps_propinfo_tbl[20].prop_max_uval
536 525 #define tcps_rexmit_interval_initial tcps_propinfo_tbl[20].prop_cur_uval
537 526 #define tcps_rexmit_interval_initial_low \
538 527 tcps_propinfo_tbl[20].prop_min_uval
539 528 #define tcps_rexmit_interval_max_high tcps_propinfo_tbl[21].prop_max_uval
540 529 #define tcps_rexmit_interval_max tcps_propinfo_tbl[21].prop_cur_uval
541 530 #define tcps_rexmit_interval_max_low tcps_propinfo_tbl[21].prop_min_uval
542 531 #define tcps_rexmit_interval_min_high tcps_propinfo_tbl[22].prop_max_uval
543 532 #define tcps_rexmit_interval_min tcps_propinfo_tbl[22].prop_cur_uval
544 533 #define tcps_rexmit_interval_min_low tcps_propinfo_tbl[22].prop_min_uval
545 534 #define tcps_deferred_ack_interval tcps_propinfo_tbl[23].prop_cur_uval
546 535 #define tcps_snd_lowat_fraction tcps_propinfo_tbl[24].prop_cur_uval
547 536 #define tcps_dupack_fast_retransmit tcps_propinfo_tbl[25].prop_cur_uval
548 537 #define tcps_ignore_path_mtu tcps_propinfo_tbl[26].prop_cur_bval
549 538 #define tcps_smallest_anon_port tcps_propinfo_tbl[27].prop_cur_uval
550 539 #define tcps_largest_anon_port tcps_propinfo_tbl[28].prop_cur_uval
551 540 #define tcps_xmit_hiwat tcps_propinfo_tbl[29].prop_cur_uval
552 541 #define tcps_xmit_lowat tcps_propinfo_tbl[30].prop_cur_uval
553 542 #define tcps_recv_hiwat tcps_propinfo_tbl[31].prop_cur_uval
554 543 #define tcps_recv_hiwat_minmss tcps_propinfo_tbl[32].prop_cur_uval
555 544 #define tcps_fin_wait_2_flush_interval_high \
556 545 tcps_propinfo_tbl[33].prop_max_uval
557 546 #define tcps_fin_wait_2_flush_interval tcps_propinfo_tbl[33].prop_cur_uval
558 547 #define tcps_fin_wait_2_flush_interval_low \
559 548 tcps_propinfo_tbl[33].prop_min_uval
560 549 #define tcps_max_buf tcps_propinfo_tbl[34].prop_cur_uval
561 550 #define tcps_strong_iss tcps_propinfo_tbl[35].prop_cur_uval
562 551 #define tcps_rtt_updates tcps_propinfo_tbl[36].prop_cur_uval
563 552 #define tcps_wscale_always tcps_propinfo_tbl[37].prop_cur_bval
564 553 #define tcps_tstamp_always tcps_propinfo_tbl[38].prop_cur_bval
565 554 #define tcps_tstamp_if_wscale tcps_propinfo_tbl[39].prop_cur_bval
566 555 #define tcps_rexmit_interval_extra tcps_propinfo_tbl[40].prop_cur_uval
567 556 #define tcps_deferred_acks_max tcps_propinfo_tbl[41].prop_cur_uval
568 557 #define tcps_slow_start_after_idle tcps_propinfo_tbl[42].prop_cur_uval
569 558 #define tcps_slow_start_initial tcps_propinfo_tbl[43].prop_cur_uval
570 559 #define tcps_sack_permitted tcps_propinfo_tbl[44].prop_cur_uval
571 560 #define tcps_ipv6_hoplimit tcps_propinfo_tbl[45].prop_cur_uval
572 561 #define tcps_mss_def_ipv6 tcps_propinfo_tbl[46].prop_cur_uval
573 562 #define tcps_mss_max_ipv6 tcps_propinfo_tbl[47].prop_cur_uval
574 563 #define tcps_rev_src_routes tcps_propinfo_tbl[48].prop_cur_bval
575 564 #define tcps_local_dack_interval tcps_propinfo_tbl[49].prop_cur_uval
576 565 #define tcps_local_dacks_max tcps_propinfo_tbl[50].prop_cur_uval
577 566 #define tcps_ecn_permitted tcps_propinfo_tbl[51].prop_cur_uval
578 567 #define tcps_rst_sent_rate_enabled tcps_propinfo_tbl[52].prop_cur_bval
579 568 #define tcps_rst_sent_rate tcps_propinfo_tbl[53].prop_cur_uval
580 569 #define tcps_push_timer_interval tcps_propinfo_tbl[54].prop_cur_uval
581 570 #define tcps_use_smss_as_mss_opt tcps_propinfo_tbl[55].prop_cur_bval
|
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
582 571 #define tcps_keepalive_abort_interval_high \
583 572 tcps_propinfo_tbl[56].prop_max_uval
584 573 #define tcps_keepalive_abort_interval \
585 574 tcps_propinfo_tbl[56].prop_cur_uval
586 575 #define tcps_keepalive_abort_interval_low \
587 576 tcps_propinfo_tbl[56].prop_min_uval
588 577 #define tcps_wroff_xtra tcps_propinfo_tbl[57].prop_cur_uval
589 578 #define tcps_dev_flow_ctl tcps_propinfo_tbl[58].prop_cur_bval
590 579 #define tcps_reass_timeout tcps_propinfo_tbl[59].prop_cur_uval
591 580 #define tcps_iss_incr tcps_propinfo_tbl[65].prop_cur_uval
581 +#define tcps_abc tcps_propinfo_tbl[67].prop_cur_bval
582 +#define tcps_abc_l_var tcps_propinfo_tbl[68].prop_cur_uval
592 583
593 584 extern struct qinit tcp_rinitv4, tcp_rinitv6;
594 585 extern boolean_t do_tcp_fusion;
595 586
596 587 /*
597 588 * Object to represent database of options to search passed to
598 589 * {sock,tpi}optcom_req() interface routine to take care of option
599 590 * management and associated methods.
600 591 */
601 592 extern optdb_obj_t tcp_opt_obj;
602 593 extern uint_t tcp_max_optsize;
603 594
604 595 extern int tcp_squeue_flag;
605 596
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
606 597 extern uint_t tcp_free_list_max_cnt;
607 598
608 599 /*
609 600 * Functions in tcp.c.
610 601 */
611 602 extern void tcp_acceptor_hash_insert(t_uscalar_t, tcp_t *);
612 603 extern tcp_t *tcp_acceptor_hash_lookup(t_uscalar_t, tcp_stack_t *);
613 604 extern void tcp_acceptor_hash_remove(tcp_t *);
614 605 extern mblk_t *tcp_ack_mp(tcp_t *);
615 606 extern int tcp_build_hdrs(tcp_t *);
607 +extern clock_t tcp_calculate_rto(tcp_t *, tcp_stack_t *);
616 608 extern void tcp_cleanup(tcp_t *);
617 609 extern int tcp_clean_death(tcp_t *, int);
618 610 extern void tcp_clean_death_wrapper(void *, mblk_t *, void *,
619 611 ip_recv_attr_t *);
620 612 extern void tcp_close_common(conn_t *, int);
621 613 extern void tcp_close_detached(tcp_t *);
622 614 extern void tcp_close_mpp(mblk_t **);
623 615 extern void tcp_closei_local(tcp_t *);
624 616 extern sock_lower_handle_t tcp_create(int, int, int, sock_downcalls_t **,
625 617 uint_t *, int *, int, cred_t *);
626 618 extern conn_t *tcp_create_common(cred_t *, boolean_t, boolean_t, int *);
627 619 extern void tcp_disconnect(tcp_t *, mblk_t *);
628 620 extern char *tcp_display(tcp_t *, char *, char);
629 621 extern int tcp_do_bind(conn_t *, struct sockaddr *, socklen_t, cred_t *,
630 622 boolean_t);
631 623 extern int tcp_do_connect(conn_t *, const struct sockaddr *, socklen_t,
632 624 cred_t *, pid_t);
633 625 extern int tcp_do_listen(conn_t *, struct sockaddr *, socklen_t, int,
634 626 cred_t *, boolean_t);
635 627 extern int tcp_do_unbind(conn_t *);
636 628 extern boolean_t tcp_eager_blowoff(tcp_t *, t_scalar_t);
637 629 extern void tcp_eager_cleanup(tcp_t *, boolean_t);
638 630 extern void tcp_eager_kill(void *, mblk_t *, void *, ip_recv_attr_t *);
639 631 extern void tcp_eager_unlink(tcp_t *);
640 632 extern void tcp_init_values(tcp_t *, tcp_t *);
641 633 extern void tcp_ipsec_cleanup(tcp_t *);
642 634 extern int tcp_maxpsz_set(tcp_t *, boolean_t);
643 635 extern void tcp_mss_set(tcp_t *, uint32_t);
644 636 extern void tcp_reinput(conn_t *, mblk_t *, ip_recv_attr_t *, ip_stack_t *);
645 637 extern void tcp_rsrv(queue_t *);
646 638 extern uint_t tcp_rwnd_reopen(tcp_t *);
647 639 extern int tcp_rwnd_set(tcp_t *, uint32_t);
648 640 extern int tcp_set_destination(tcp_t *);
649 641 extern void tcp_set_ws_value(tcp_t *);
650 642 extern void tcp_stop_lingering(tcp_t *);
651 643 extern boolean_t tcp_update_pmtu(tcp_t *, boolean_t);
652 644 extern mblk_t *tcp_zcopy_backoff(tcp_t *, mblk_t *, boolean_t);
653 645 extern boolean_t tcp_zcopy_check(tcp_t *);
654 646 extern void tcp_zcopy_notify(tcp_t *);
655 647 extern void tcp_get_proto_props(tcp_t *, struct sock_proto_props *);
656 648
657 649 /*
658 650 * Bind related functions in tcp_bind.c
659 651 */
660 652 extern int tcp_bind_check(conn_t *, struct sockaddr *, socklen_t,
661 653 cred_t *, boolean_t);
662 654 extern void tcp_bind_hash_insert(tf_t *, tcp_t *, int);
663 655 extern void tcp_bind_hash_remove(tcp_t *);
664 656 extern in_port_t tcp_bindi(tcp_t *, in_port_t, const in6_addr_t *,
665 657 int, boolean_t, boolean_t, boolean_t);
666 658 extern in_port_t tcp_update_next_port(in_port_t, const tcp_t *,
667 659 boolean_t);
668 660 extern tcp_rg_t *tcp_rg_init(tcp_t *);
669 661 extern boolean_t tcp_rg_remove(tcp_rg_t *, tcp_t *);
670 662 extern void tcp_rg_destroy(tcp_rg_t *);
671 663 extern void tcp_rg_setactive(tcp_rg_t *, boolean_t);
672 664
673 665 /*
674 666 * Fusion related functions in tcp_fusion.c.
675 667 */
676 668 extern void tcp_fuse(tcp_t *, uchar_t *, tcpha_t *);
677 669 extern void tcp_unfuse(tcp_t *);
678 670 extern boolean_t tcp_fuse_output(tcp_t *, mblk_t *, uint32_t);
679 671 extern void tcp_fuse_output_urg(tcp_t *, mblk_t *);
680 672 extern boolean_t tcp_fuse_rcv_drain(queue_t *, tcp_t *, mblk_t **);
681 673 extern size_t tcp_fuse_set_rcv_hiwat(tcp_t *, size_t);
682 674 extern int tcp_fuse_maxpsz(tcp_t *);
683 675 extern void tcp_fuse_backenable(tcp_t *);
684 676 extern void tcp_iss_key_init(uint8_t *, int, tcp_stack_t *);
685 677
686 678 /*
687 679 * Output related functions in tcp_output.c.
688 680 */
689 681 extern void tcp_close_output(void *, mblk_t *, void *, ip_recv_attr_t *);
690 682 extern void tcp_output(void *, mblk_t *, void *, ip_recv_attr_t *);
691 683 extern void tcp_output_urgent(void *, mblk_t *, void *, ip_recv_attr_t *);
692 684 extern void tcp_rexmit_after_error(tcp_t *);
693 685 extern void tcp_sack_rexmit(tcp_t *, uint_t *);
694 686 extern void tcp_send_data(tcp_t *, mblk_t *);
695 687 extern void tcp_send_synack(void *, mblk_t *, void *, ip_recv_attr_t *);
696 688 extern void tcp_shutdown_output(void *, mblk_t *, void *, ip_recv_attr_t *);
697 689 extern void tcp_ss_rexmit(tcp_t *);
698 690 extern void tcp_update_xmit_tail(tcp_t *, uint32_t);
699 691 extern void tcp_wput(queue_t *, mblk_t *);
700 692 extern void tcp_wput_data(tcp_t *, mblk_t *, boolean_t);
701 693 extern void tcp_wput_sock(queue_t *, mblk_t *);
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
702 694 extern void tcp_wput_fallback(queue_t *, mblk_t *);
703 695 extern void tcp_xmit_ctl(char *, tcp_t *, uint32_t, uint32_t, int);
704 696 extern void tcp_xmit_listeners_reset(mblk_t *, ip_recv_attr_t *,
705 697 ip_stack_t *i, conn_t *);
706 698 extern mblk_t *tcp_xmit_mp(tcp_t *, mblk_t *, int32_t, int32_t *,
707 699 mblk_t **, uint32_t, boolean_t, uint32_t *, boolean_t);
708 700
709 701 /*
710 702 * Input related functions in tcp_input.c.
711 703 */
704 +extern void cc_cong_signal(tcp_t *, uint32_t, uint32_t);
712 705 extern void tcp_icmp_input(void *, mblk_t *, void *, ip_recv_attr_t *);
713 706 extern void tcp_input_data(void *, mblk_t *, void *, ip_recv_attr_t *);
714 707 extern void tcp_input_listener_unbound(void *, mblk_t *, void *,
715 708 ip_recv_attr_t *);
716 709 extern boolean_t tcp_paws_check(tcp_t *, const tcp_opt_t *);
717 710 extern int tcp_parse_options(tcpha_t *, tcp_opt_t *);
718 711 extern uint_t tcp_rcv_drain(tcp_t *);
719 712 extern void tcp_rcv_enqueue(tcp_t *, mblk_t *, uint_t, cred_t *);
720 713 extern boolean_t tcp_verifyicmp(conn_t *, void *, icmph_t *, icmp6_t *,
721 714 ip_recv_attr_t *);
722 715
723 716 /*
724 717 * Kernel socket related functions in tcp_socket.c.
725 718 */
726 719 extern int tcp_fallback(sock_lower_handle_t, queue_t *, boolean_t,
727 720 so_proto_quiesced_cb_t, sock_quiesce_arg_t *);
728 721 extern boolean_t tcp_newconn_notify(tcp_t *, ip_recv_attr_t *);
729 722
730 723 /*
731 724 * Timer related functions in tcp_timers.c.
732 725 */
733 726 extern void tcp_ack_timer(void *);
734 727 extern void tcp_close_linger_timeout(void *);
735 728 extern void tcp_keepalive_timer(void *);
736 729 extern void tcp_push_timer(void *);
737 730 extern void tcp_reass_timer(void *);
738 731 extern mblk_t *tcp_timermp_alloc(int);
739 732 extern void tcp_timermp_free(tcp_t *);
740 733 extern timeout_id_t tcp_timeout(conn_t *, void (*)(void *), hrtime_t);
741 734 extern clock_t tcp_timeout_cancel(conn_t *, timeout_id_t);
742 735 extern void tcp_timer(void *arg);
743 736 extern void tcp_timers_stop(tcp_t *);
744 737
745 738 /*
746 739 * TCP TPI related functions in tcp_tpi.c.
747 740 */
748 741 extern void tcp_addr_req(tcp_t *, mblk_t *);
749 742 extern void tcp_capability_req(tcp_t *, mblk_t *);
750 743 extern boolean_t tcp_conn_con(tcp_t *, uchar_t *, mblk_t *,
751 744 mblk_t **, ip_recv_attr_t *);
752 745 extern void tcp_err_ack(tcp_t *, mblk_t *, int, int);
753 746 extern void tcp_err_ack_prim(tcp_t *, mblk_t *, int, int, int);
754 747 extern void tcp_info_req(tcp_t *, mblk_t *);
755 748 extern void tcp_send_conn_ind(void *, mblk_t *, void *);
756 749 extern void tcp_send_pending(void *, mblk_t *, void *, ip_recv_attr_t *);
757 750 extern void tcp_tpi_accept(queue_t *, mblk_t *);
758 751 extern void tcp_tpi_bind(tcp_t *, mblk_t *);
759 752 extern int tcp_tpi_close(queue_t *, int);
760 753 extern int tcp_tpi_close_accept(queue_t *);
761 754 extern void tcp_tpi_connect(tcp_t *, mblk_t *);
762 755 extern int tcp_tpi_opt_get(queue_t *, t_scalar_t, t_scalar_t, uchar_t *);
763 756 extern int tcp_tpi_opt_set(queue_t *, uint_t, int, int, uint_t, uchar_t *,
764 757 uint_t *, uchar_t *, void *, cred_t *);
765 758 extern void tcp_tpi_unbind(tcp_t *, mblk_t *);
766 759 extern void tcp_tli_accept(tcp_t *, mblk_t *);
767 760 extern void tcp_use_pure_tpi(tcp_t *);
768 761 extern void tcp_do_capability_ack(tcp_t *, struct T_capability_ack *,
769 762 t_uscalar_t);
770 763
771 764 /*
772 765 * TCP option processing related functions in tcp_opt_data.c
773 766 */
774 767 extern int tcp_opt_get(conn_t *, int, int, uchar_t *);
775 768 extern int tcp_opt_set(conn_t *, uint_t, int, int, uint_t, uchar_t *,
776 769 uint_t *, uchar_t *, void *, cred_t *);
777 770
778 771 /*
779 772 * TCP time wait processing related functions in tcp_time_wait.c.
780 773 */
781 774 extern void tcp_time_wait_append(tcp_t *);
782 775 extern void tcp_time_wait_collector(void *);
783 776 extern boolean_t tcp_time_wait_remove(tcp_t *, tcp_squeue_priv_t *);
784 777 extern void tcp_time_wait_processing(tcp_t *, mblk_t *, uint32_t,
785 778 uint32_t, int, tcpha_t *, ip_recv_attr_t *);
786 779
787 780 /*
788 781 * Misc functions in tcp_misc.c.
789 782 */
790 783 extern uint32_t tcp_find_listener_conf(tcp_stack_t *, in_port_t);
791 784 extern void tcp_ioctl_abort_conn(queue_t *, mblk_t *);
792 785 extern void tcp_listener_conf_cleanup(tcp_stack_t *);
793 786 extern void tcp_stack_cpu_add(tcp_stack_t *, processorid_t);
794 787
795 788 #endif /* _KERNEL */
796 789
797 790 #ifdef __cplusplus
798 791 }
799 792 #endif
800 793
801 794 #endif /* _INET_TCP_IMPL_H */
|
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX