Print this page
DLPX-25998 TCP congestion control is inadequate
Reviewed at: http://reviews.delphix.com/r/34808/
DLPX-45697 Adding Avg. RTT to connstat
DLPX-43064 include high-resolution round-trip times in connstat (EP-652)
DLPX-42721 Create inline function for TCP RTO calculation
DLPX-37540 TCP per-connection kernel statistics DLPX-37544 connstat command to display per-connection TCP statistics
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp.c
+++ new/usr/src/uts/common/inet/tcp/tcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2017 Joyent, Inc.
25 25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 - * Copyright (c) 2013,2014 by Delphix. All rights reserved.
26 + * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
27 27 * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
28 28 */
29 29 /* Copyright (c) 1990 Mentat Inc. */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/stream.h>
33 33 #include <sys/strsun.h>
34 34 #include <sys/strsubr.h>
35 35 #include <sys/stropts.h>
36 36 #include <sys/strlog.h>
37 37 #define _SUN_TPI_VERSION 2
38 38 #include <sys/tihdr.h>
39 39 #include <sys/timod.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/sunddi.h>
42 42 #include <sys/suntpi.h>
43 43 #include <sys/xti_inet.h>
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/sdt.h>
47 47 #include <sys/vtrace.h>
48 48 #include <sys/kmem.h>
49 49 #include <sys/ethernet.h>
50 50 #include <sys/cpuvar.h>
51 51 #include <sys/dlpi.h>
52 52 #include <sys/pattr.h>
53 53 #include <sys/policy.h>
54 54 #include <sys/priv.h>
55 55 #include <sys/zone.h>
56 56 #include <sys/sunldi.h>
57 57
58 58 #include <sys/errno.h>
59 59 #include <sys/signal.h>
60 60 #include <sys/socket.h>
61 61 #include <sys/socketvar.h>
62 62 #include <sys/sockio.h>
63 63 #include <sys/isa_defs.h>
64 64 #include <sys/md5.h>
65 65 #include <sys/random.h>
66 66 #include <sys/uio.h>
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
67 67 #include <sys/systm.h>
68 68 #include <netinet/in.h>
69 69 #include <netinet/tcp.h>
70 70 #include <netinet/ip6.h>
71 71 #include <netinet/icmp6.h>
72 72 #include <net/if.h>
73 73 #include <net/route.h>
74 74 #include <inet/ipsec_impl.h>
75 75
76 76 #include <inet/common.h>
77 +#include <inet/cc.h>
77 78 #include <inet/ip.h>
78 79 #include <inet/ip_impl.h>
79 80 #include <inet/ip6.h>
80 81 #include <inet/ip_ndp.h>
81 82 #include <inet/proto_set.h>
82 83 #include <inet/mib2.h>
83 84 #include <inet/optcom.h>
84 85 #include <inet/snmpcom.h>
85 86 #include <inet/kstatcom.h>
86 87 #include <inet/tcp.h>
87 88 #include <inet/tcp_impl.h>
88 89 #include <inet/tcp_cluster.h>
89 90 #include <inet/udp_impl.h>
90 91 #include <net/pfkeyv2.h>
91 92 #include <inet/ipdrop.h>
92 93
93 94 #include <inet/ipclassifier.h>
94 95 #include <inet/ip_ire.h>
95 96 #include <inet/ip_ftable.h>
96 97 #include <inet/ip_if.h>
97 98 #include <inet/ipp_common.h>
98 99 #include <inet/ip_rts.h>
99 100 #include <inet/ip_netinfo.h>
100 101 #include <sys/squeue_impl.h>
101 102 #include <sys/squeue.h>
102 103 #include <sys/tsol/label.h>
103 104 #include <sys/tsol/tnet.h>
104 105 #include <rpc/pmap_prot.h>
105 106 #include <sys/callo.h>
106 107
107 108 /*
108 109 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
109 110 *
110 111 * (Read the detailed design doc in PSARC case directory)
111 112 *
112 113 * The entire tcp state is contained in tcp_t and conn_t structure
113 114 * which are allocated in tandem using ipcl_conn_create() and passing
114 115 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
115 116 * the references on the tcp_t. The tcp_t structure is never compressed
116 117 * and packets always land on the correct TCP perimeter from the time
117 118 * eager is created till the time tcp_t dies (as such the old mentat
118 119 * TCP global queue is not used for detached state and no IPSEC checking
119 120 * is required). The global queue is still allocated to send out resets
120 121 * for connection which have no listeners and IP directly calls
121 122 * tcp_xmit_listeners_reset() which does any policy check.
122 123 *
123 124 * Protection and Synchronisation mechanism:
124 125 *
125 126 * The tcp data structure does not use any kind of lock for protecting
126 127 * its state but instead uses 'squeues' for mutual exclusion from various
127 128 * read and write side threads. To access a tcp member, the thread should
128 129 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
129 130 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
130 131 * can pass any tcp function having prototype of edesc_t as argument
131 132 * (different from traditional STREAMs model where packets come in only
132 133 * designated entry points). The list of functions that can be directly
133 134 * called via squeue are listed before the usual function prototype.
134 135 *
135 136 * Referencing:
136 137 *
137 138 * TCP is MT-Hot and we use a reference based scheme to make sure that the
138 139 * tcp structure doesn't disappear when its needed. When the application
139 140 * creates an outgoing connection or accepts an incoming connection, we
140 141 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
141 142 * The IP reference is just a symbolic reference since ip_tcpclose()
142 143 * looks at tcp structure after tcp_close_output() returns which could
143 144 * have dropped the last TCP reference. So as long as the connection is
144 145 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
145 146 * conn_t. The classifier puts its own reference when the connection is
146 147 * inserted in listen or connected hash. Anytime a thread needs to enter
147 148 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
148 149 * on write side or by doing a classify on read side and then puts a
149 150 * reference on the conn before doing squeue_enter/tryenter/fill. For
150 151 * read side, the classifier itself puts the reference under fanout lock
151 152 * to make sure that tcp can't disappear before it gets processed. The
152 153 * squeue will drop this reference automatically so the called function
153 154 * doesn't have to do a DEC_REF.
154 155 *
155 156 * Opening a new connection:
156 157 *
157 158 * The outgoing connection open is pretty simple. tcp_open() does the
158 159 * work in creating the conn/tcp structure and initializing it. The
159 160 * squeue assignment is done based on the CPU the application
160 161 * is running on. So for outbound connections, processing is always done
161 162 * on application CPU which might be different from the incoming CPU
162 163 * being interrupted by the NIC. An optimal way would be to figure out
163 164 * the NIC <-> CPU binding at listen time, and assign the outgoing
164 165 * connection to the squeue attached to the CPU that will be interrupted
165 166 * for incoming packets (we know the NIC based on the bind IP address).
166 167 * This might seem like a problem if more data is going out but the
167 168 * fact is that in most cases the transmit is ACK driven transmit where
168 169 * the outgoing data normally sits on TCP's xmit queue waiting to be
169 170 * transmitted.
170 171 *
171 172 * Accepting a connection:
172 173 *
173 174 * This is a more interesting case because of various races involved in
174 175 * establishing a eager in its own perimeter. Read the meta comment on
175 176 * top of tcp_input_listener(). But briefly, the squeue is picked by
176 177 * ip_fanout based on the ring or the sender (if loopback).
177 178 *
178 179 * Closing a connection:
179 180 *
180 181 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
181 182 * via squeue to do the close and mark the tcp as detached if the connection
182 183 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
183 184 * reference but tcp_close() drop IP's reference always. So if tcp was
184 185 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
185 186 * and 1 because it is in classifier's connected hash. This is the condition
186 187 * we use to determine that its OK to clean up the tcp outside of squeue
187 188 * when time wait expires (check the ref under fanout and conn_lock and
188 189 * if it is 2, remove it from fanout hash and kill it).
189 190 *
190 191 * Although close just drops the necessary references and marks the
191 192 * tcp_detached state, tcp_close needs to know the tcp_detached has been
192 193 * set (under squeue) before letting the STREAM go away (because a
193 194 * inbound packet might attempt to go up the STREAM while the close
194 195 * has happened and tcp_detached is not set). So a special lock and
195 196 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
196 197 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
197 198 * tcp_detached.
198 199 *
199 200 * Special provisions and fast paths:
200 201 *
201 202 * We make special provisions for sockfs by marking tcp_issocket
202 203 * whenever we have only sockfs on top of TCP. This allows us to skip
203 204 * putting the tcp in acceptor hash since a sockfs listener can never
204 205 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
205 206 * since eager has already been allocated and the accept now happens
206 207 * on acceptor STREAM. There is a big blob of comment on top of
207 208 * tcp_input_listener explaining the new accept. When socket is POP'd,
208 209 * sockfs sends us an ioctl to mark the fact and we go back to old
209 210 * behaviour. Once tcp_issocket is unset, its never set for the
210 211 * life of that connection.
211 212 *
212 213 * IPsec notes :
213 214 *
214 215 * Since a packet is always executed on the correct TCP perimeter
215 216 * all IPsec processing is defered to IP including checking new
216 217 * connections and setting IPSEC policies for new connection. The
217 218 * only exception is tcp_xmit_listeners_reset() which is called
218 219 * directly from IP and needs to policy check to see if TH_RST
219 220 * can be sent out.
220 221 */
221 222
222 223 /*
223 224 * Values for squeue switch:
224 225 * 1: SQ_NODRAIN
225 226 * 2: SQ_PROCESS
226 227 * 3: SQ_FILL
227 228 */
228 229 int tcp_squeue_wput = 2; /* /etc/systems */
229 230 int tcp_squeue_flag;
230 231
231 232 /*
232 233 * To prevent memory hog, limit the number of entries in tcp_free_list
233 234 * to 1% of available memory / number of cpus
234 235 */
235 236 uint_t tcp_free_list_max_cnt = 0;
236 237
237 238 #define TIDUSZ 4096 /* transport interface data unit size */
238 239
239 240 /*
240 241 * Size of acceptor hash list. It has to be a power of 2 for hashing.
241 242 */
242 243 #define TCP_ACCEPTOR_FANOUT_SIZE 512
243 244
244 245 #ifdef _ILP32
245 246 #define TCP_ACCEPTOR_HASH(accid) \
246 247 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
247 248 #else
248 249 #define TCP_ACCEPTOR_HASH(accid) \
249 250 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
250 251 #endif /* _ILP32 */
251 252
252 253 /*
253 254 * Minimum number of connections which can be created per listener. Used
254 255 * when the listener connection count is in effect.
255 256 */
256 257 static uint32_t tcp_min_conn_listener = 2;
257 258
258 259 uint32_t tcp_early_abort = 30;
|
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
259 260
260 261 /* TCP Timer control structure */
261 262 typedef struct tcpt_s {
262 263 pfv_t tcpt_pfv; /* The routine we are to call */
263 264 tcp_t *tcpt_tcp; /* The parameter we are to pass in */
264 265 } tcpt_t;
265 266
266 267 /*
267 268 * Functions called directly via squeue having a prototype of edesc_t.
268 269 */
269 -void tcp_input_listener(void *arg, mblk_t *mp, void *arg2,
270 - ip_recv_attr_t *ira);
271 270 void tcp_input_data(void *arg, mblk_t *mp, void *arg2,
272 271 ip_recv_attr_t *ira);
273 272 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2,
274 273 ip_recv_attr_t *dummy);
275 274
276 275
277 276 /* Prototype for TCP functions */
278 277 static void tcp_random_init(void);
279 278 int tcp_random(void);
280 279 static int tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp,
281 280 in_port_t dstport, uint_t srcid);
282 281 static int tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp,
283 282 in_port_t dstport, uint32_t flowinfo,
284 283 uint_t srcid, uint32_t scope_id);
285 284 static void tcp_iss_init(tcp_t *tcp);
286 285 static void tcp_reinit(tcp_t *tcp);
287 286 static void tcp_reinit_values(tcp_t *tcp);
288 287
289 288 static void tcp_wsrv(queue_t *q);
290 289 static void tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa);
291 290 static void tcp_update_zcopy(tcp_t *tcp);
292 291 static void tcp_notify(void *, ip_xmit_attr_t *, ixa_notify_type_t,
293 292 ixa_notify_arg_t);
294 293 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns);
295 294 static void tcp_stack_fini(netstackid_t stackid, void *arg);
296 295
297 296 static int tcp_squeue_switch(int);
298 297
299 298 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t);
300 299 static int tcp_openv4(queue_t *, dev_t *, int, int, cred_t *);
301 300 static int tcp_openv6(queue_t *, dev_t *, int, int, cred_t *);
302 301
303 302 static void tcp_squeue_add(squeue_t *);
304 303
305 304 struct module_info tcp_rinfo = {
306 305 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER
307 306 };
308 307
309 308 static struct module_info tcp_winfo = {
310 309 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16
311 310 };
312 311
313 312 /*
314 313 * Entry points for TCP as a device. The normal case which supports
315 314 * the TCP functionality.
316 315 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
317 316 */
318 317 struct qinit tcp_rinitv4 = {
319 318 NULL, (pfi_t)tcp_rsrv, tcp_openv4, tcp_tpi_close, NULL, &tcp_rinfo
320 319 };
321 320
322 321 struct qinit tcp_rinitv6 = {
323 322 NULL, (pfi_t)tcp_rsrv, tcp_openv6, tcp_tpi_close, NULL, &tcp_rinfo
324 323 };
325 324
326 325 struct qinit tcp_winit = {
327 326 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
328 327 };
329 328
330 329 /* Initial entry point for TCP in socket mode. */
331 330 struct qinit tcp_sock_winit = {
332 331 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
333 332 };
334 333
335 334 /* TCP entry point during fallback */
336 335 struct qinit tcp_fallback_sock_winit = {
337 336 (pfi_t)tcp_wput_fallback, NULL, NULL, NULL, NULL, &tcp_winfo
338 337 };
339 338
340 339 /*
341 340 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
342 341 * an accept. Avoid allocating data structures since eager has already
343 342 * been created.
344 343 */
345 344 struct qinit tcp_acceptor_rinit = {
346 345 NULL, (pfi_t)tcp_rsrv, NULL, tcp_tpi_close_accept, NULL, &tcp_winfo
347 346 };
348 347
349 348 struct qinit tcp_acceptor_winit = {
350 349 (pfi_t)tcp_tpi_accept, NULL, NULL, NULL, NULL, &tcp_winfo
351 350 };
352 351
353 352 /* For AF_INET aka /dev/tcp */
354 353 struct streamtab tcpinfov4 = {
355 354 &tcp_rinitv4, &tcp_winit
356 355 };
357 356
358 357 /* For AF_INET6 aka /dev/tcp6 */
359 358 struct streamtab tcpinfov6 = {
360 359 &tcp_rinitv6, &tcp_winit
361 360 };
362 361
363 362 /*
364 363 * Following assumes TPI alignment requirements stay along 32 bit
365 364 * boundaries
366 365 */
367 366 #define ROUNDUP32(x) \
368 367 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
369 368
370 369 /* Template for response to info request. */
371 370 struct T_info_ack tcp_g_t_info_ack = {
372 371 T_INFO_ACK, /* PRIM_type */
373 372 0, /* TSDU_size */
374 373 T_INFINITE, /* ETSDU_size */
375 374 T_INVALID, /* CDATA_size */
376 375 T_INVALID, /* DDATA_size */
377 376 sizeof (sin_t), /* ADDR_size */
378 377 0, /* OPT_size - not initialized here */
379 378 TIDUSZ, /* TIDU_size */
380 379 T_COTS_ORD, /* SERV_type */
381 380 TCPS_IDLE, /* CURRENT_state */
382 381 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
383 382 };
384 383
385 384 struct T_info_ack tcp_g_t_info_ack_v6 = {
386 385 T_INFO_ACK, /* PRIM_type */
387 386 0, /* TSDU_size */
388 387 T_INFINITE, /* ETSDU_size */
389 388 T_INVALID, /* CDATA_size */
390 389 T_INVALID, /* DDATA_size */
391 390 sizeof (sin6_t), /* ADDR_size */
392 391 0, /* OPT_size - not initialized here */
393 392 TIDUSZ, /* TIDU_size */
394 393 T_COTS_ORD, /* SERV_type */
395 394 TCPS_IDLE, /* CURRENT_state */
396 395 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
397 396 };
398 397
399 398 /*
400 399 * TCP tunables related declarations. Definitions are in tcp_tunables.c
401 400 */
402 401 extern mod_prop_info_t tcp_propinfo_tbl[];
403 402 extern int tcp_propinfo_count;
404 403
405 404 #define IS_VMLOANED_MBLK(mp) \
406 405 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
407 406
408 407 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */
409 408
410 409 /*
411 410 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
412 411 * tunable settable via NDD. Otherwise, the per-connection behavior is
413 412 * determined dynamically during tcp_set_destination(), which is the default.
414 413 */
415 414 boolean_t tcp_static_maxpsz = B_FALSE;
416 415
417 416 /*
418 417 * If the receive buffer size is changed, this function is called to update
419 418 * the upper socket layer on the new delayed receive wake up threshold.
420 419 */
421 420 static void
422 421 tcp_set_recv_threshold(tcp_t *tcp, uint32_t new_rcvthresh)
423 422 {
424 423 uint32_t default_threshold = SOCKET_RECVHIWATER >> 3;
425 424
426 425 if (IPCL_IS_NONSTR(tcp->tcp_connp)) {
427 426 conn_t *connp = tcp->tcp_connp;
428 427 struct sock_proto_props sopp;
429 428
430 429 /*
431 430 * only increase rcvthresh upto default_threshold
432 431 */
433 432 if (new_rcvthresh > default_threshold)
434 433 new_rcvthresh = default_threshold;
435 434
436 435 sopp.sopp_flags = SOCKOPT_RCVTHRESH;
437 436 sopp.sopp_rcvthresh = new_rcvthresh;
438 437
439 438 (*connp->conn_upcalls->su_set_proto_props)
440 439 (connp->conn_upper_handle, &sopp);
441 440 }
442 441 }
443 442
444 443 /*
445 444 * Figure out the value of window scale opton. Note that the rwnd is
446 445 * ASSUMED to be rounded up to the nearest MSS before the calculation.
447 446 * We cannot find the scale value and then do a round up of tcp_rwnd
448 447 * because the scale value may not be correct after that.
449 448 *
450 449 * Set the compiler flag to make this function inline.
451 450 */
452 451 void
453 452 tcp_set_ws_value(tcp_t *tcp)
454 453 {
455 454 int i;
456 455 uint32_t rwnd = tcp->tcp_rwnd;
457 456
458 457 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT;
459 458 i++, rwnd >>= 1)
460 459 ;
461 460 tcp->tcp_rcv_ws = i;
462 461 }
463 462
464 463 /*
465 464 * Remove cached/latched IPsec references.
466 465 */
467 466 void
468 467 tcp_ipsec_cleanup(tcp_t *tcp)
469 468 {
470 469 conn_t *connp = tcp->tcp_connp;
471 470
472 471 ASSERT(connp->conn_flags & IPCL_TCPCONN);
473 472
474 473 if (connp->conn_latch != NULL) {
475 474 IPLATCH_REFRELE(connp->conn_latch);
476 475 connp->conn_latch = NULL;
477 476 }
478 477 if (connp->conn_latch_in_policy != NULL) {
479 478 IPPOL_REFRELE(connp->conn_latch_in_policy);
480 479 connp->conn_latch_in_policy = NULL;
481 480 }
482 481 if (connp->conn_latch_in_action != NULL) {
483 482 IPACT_REFRELE(connp->conn_latch_in_action);
484 483 connp->conn_latch_in_action = NULL;
485 484 }
486 485 if (connp->conn_policy != NULL) {
487 486 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack);
488 487 connp->conn_policy = NULL;
489 488 }
490 489 }
491 490
492 491 /*
493 492 * Cleaup before placing on free list.
494 493 * Disassociate from the netstack/tcp_stack_t since the freelist
495 494 * is per squeue and not per netstack.
496 495 */
497 496 void
498 497 tcp_cleanup(tcp_t *tcp)
499 498 {
500 499 mblk_t *mp;
501 500 conn_t *connp = tcp->tcp_connp;
502 501 tcp_stack_t *tcps = tcp->tcp_tcps;
503 502 netstack_t *ns = tcps->tcps_netstack;
504 503 mblk_t *tcp_rsrv_mp;
505 504
506 505 tcp_bind_hash_remove(tcp);
507 506
508 507 /* Cleanup that which needs the netstack first */
509 508 tcp_ipsec_cleanup(tcp);
510 509 ixa_cleanup(connp->conn_ixa);
511 510
512 511 if (connp->conn_ht_iphc != NULL) {
513 512 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
514 513 connp->conn_ht_iphc = NULL;
515 514 connp->conn_ht_iphc_allocated = 0;
516 515 connp->conn_ht_iphc_len = 0;
517 516 connp->conn_ht_ulp = NULL;
518 517 connp->conn_ht_ulp_len = 0;
519 518 tcp->tcp_ipha = NULL;
520 519 tcp->tcp_ip6h = NULL;
521 520 tcp->tcp_tcpha = NULL;
522 521 }
523 522
524 523 /* We clear any IP_OPTIONS and extension headers */
525 524 ip_pkt_free(&connp->conn_xmit_ipp);
526 525
527 526 tcp_free(tcp);
528 527
529 528 /*
530 529 * Since we will bzero the entire structure, we need to
531 530 * remove it and reinsert it in global hash list. We
532 531 * know the walkers can't get to this conn because we
533 532 * had set CONDEMNED flag earlier and checked reference
534 533 * under conn_lock so walker won't pick it and when we
535 534 * go the ipcl_globalhash_remove() below, no walker
536 535 * can get to it.
537 536 */
538 537 ipcl_globalhash_remove(connp);
539 538
540 539 /* Save some state */
541 540 mp = tcp->tcp_timercache;
542 541
543 542 tcp_rsrv_mp = tcp->tcp_rsrv_mp;
544 543
545 544 if (connp->conn_cred != NULL) {
546 545 crfree(connp->conn_cred);
547 546 connp->conn_cred = NULL;
548 547 }
549 548 ipcl_conn_cleanup(connp);
550 549 connp->conn_flags = IPCL_TCPCONN;
551 550
552 551 /*
553 552 * Now it is safe to decrement the reference counts.
554 553 * This might be the last reference on the netstack
555 554 * in which case it will cause the freeing of the IP Instance.
556 555 */
557 556 connp->conn_netstack = NULL;
558 557 connp->conn_ixa->ixa_ipst = NULL;
559 558 netstack_rele(ns);
560 559 ASSERT(tcps != NULL);
561 560 tcp->tcp_tcps = NULL;
562 561
563 562 bzero(tcp, sizeof (tcp_t));
564 563
565 564 /* restore the state */
566 565 tcp->tcp_timercache = mp;
567 566
568 567 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
|
↓ open down ↓ |
288 lines elided |
↑ open up ↑ |
569 568
570 569 tcp->tcp_connp = connp;
571 570
572 571 ASSERT(connp->conn_tcp == tcp);
573 572 ASSERT(connp->conn_flags & IPCL_TCPCONN);
574 573 connp->conn_state_flags = CONN_INCIPIENT;
575 574 ASSERT(connp->conn_proto == IPPROTO_TCP);
576 575 ASSERT(connp->conn_ref == 1);
577 576 }
578 577
578 +#pragma inline(tcp_calculate_rto)
579 +
579 580 /*
581 + * RTO = average estimates (sa / 8) + 4 * deviation estimates (sd)
582 + *
583 + * Add tcp_rexmit_interval extra in case of extreme environment where the
584 + * algorithm fails to work. The default value of tcp_rexmit_interval_extra
585 + * should be 0.
586 + *
587 + * As we use a finer grained clock than BSD and update RTO for every ACKs, add
588 + * in another .25 of RTT to the deviation of RTO to accommodate burstiness of
589 + * 1/4 of window size.
590 + */
591 +clock_t
592 +tcp_calculate_rto(tcp_t *tcp, tcp_stack_t *tcps)
593 +{
594 + clock_t rto;
595 +
596 + rto = NSEC2MSEC((tcp->tcp_rtt_sa >> 3) + (tcp->tcp_rtt_sa >> 5) +
597 + tcp->tcp_rtt_sd) + tcps->tcps_rexmit_interval_extra +
598 + tcps->tcps_conn_grace_period;
599 +
600 + if (rto < tcp->tcp_rto_min)
601 + rto = tcp->tcp_rto_min;
602 + else if (rto > tcp->tcp_rto_max)
603 + rto = tcp->tcp_rto_max;
604 +
605 + return (rto);
606 +}
607 +
608 +/*
580 609 * Adapt to the information, such as rtt and rtt_sd, provided from the
581 610 * DCE and IRE maintained by IP.
582 611 *
583 612 * Checks for multicast and broadcast destination address.
584 613 * Returns zero if ok; an errno on failure.
585 614 *
586 615 * Note that the MSS calculation here is based on the info given in
587 616 * the DCE and IRE. We do not do any calculation based on TCP options. They
588 617 * will be handled in tcp_input_data() when TCP knows which options to use.
589 618 *
590 619 * Note on how TCP gets its parameters for a connection.
591 620 *
592 621 * When a tcp_t structure is allocated, it gets all the default parameters.
593 622 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
594 623 * spipe, rpipe, ... from the route metrics. Route metric overrides the
595 624 * default.
596 625 *
597 626 * An incoming SYN with a multicast or broadcast destination address is dropped
598 627 * in ip_fanout_v4/v6.
599 628 *
600 629 * An incoming SYN with a multicast or broadcast source address is always
601 630 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
602 631 * conn_connect.
603 632 * The same logic in tcp_set_destination also serves to
604 633 * reject an attempt to connect to a broadcast or multicast (destination)
605 634 * address.
606 635 */
607 636 int
608 637 tcp_set_destination(tcp_t *tcp)
609 638 {
610 639 uint32_t mss_max;
611 640 uint32_t mss;
612 641 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
613 642 conn_t *connp = tcp->tcp_connp;
614 643 tcp_stack_t *tcps = tcp->tcp_tcps;
615 644 iulp_t uinfo;
616 645 int error;
617 646 uint32_t flags;
618 647
619 648 flags = IPDF_LSO | IPDF_ZCOPY;
620 649 /*
621 650 * Make sure we have a dce for the destination to avoid dce_ident
622 651 * contention for connected sockets.
623 652 */
624 653 flags |= IPDF_UNIQUE_DCE;
625 654
626 655 if (!tcps->tcps_ignore_path_mtu)
627 656 connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY;
628 657
629 658 /* Use conn_lock to satify ASSERT; tcp is already serialized */
630 659 mutex_enter(&connp->conn_lock);
631 660 error = conn_connect(connp, &uinfo, flags);
632 661 mutex_exit(&connp->conn_lock);
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
633 662 if (error != 0)
634 663 return (error);
635 664
636 665 error = tcp_build_hdrs(tcp);
637 666 if (error != 0)
638 667 return (error);
639 668
640 669 tcp->tcp_localnet = uinfo.iulp_localnet;
641 670
642 671 if (uinfo.iulp_rtt != 0) {
643 - clock_t rto;
644 -
645 - tcp->tcp_rtt_sa = uinfo.iulp_rtt;
646 - tcp->tcp_rtt_sd = uinfo.iulp_rtt_sd;
647 - rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
648 - tcps->tcps_rexmit_interval_extra +
649 - (tcp->tcp_rtt_sa >> 5);
650 -
651 - TCP_SET_RTO(tcp, rto);
672 + tcp->tcp_rtt_sa = MSEC2NSEC(uinfo.iulp_rtt);
673 + tcp->tcp_rtt_sd = MSEC2NSEC(uinfo.iulp_rtt_sd);
674 + tcp->tcp_rto = tcp_calculate_rto(tcp, tcps);
652 675 }
653 676 if (uinfo.iulp_ssthresh != 0)
654 677 tcp->tcp_cwnd_ssthresh = uinfo.iulp_ssthresh;
655 678 else
656 679 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
657 680 if (uinfo.iulp_spipe > 0) {
658 681 connp->conn_sndbuf = MIN(uinfo.iulp_spipe,
659 682 tcps->tcps_max_buf);
660 683 if (tcps->tcps_snd_lowat_fraction != 0) {
661 684 connp->conn_sndlowat = connp->conn_sndbuf /
662 685 tcps->tcps_snd_lowat_fraction;
663 686 }
664 687 (void) tcp_maxpsz_set(tcp, B_TRUE);
665 688 }
666 689 /*
667 690 * Note that up till now, acceptor always inherits receive
668 691 * window from the listener. But if there is a metrics
669 692 * associated with a host, we should use that instead of
670 693 * inheriting it from listener. Thus we need to pass this
671 694 * info back to the caller.
672 695 */
673 696 if (uinfo.iulp_rpipe > 0) {
674 697 tcp->tcp_rwnd = MIN(uinfo.iulp_rpipe,
675 698 tcps->tcps_max_buf);
676 699 }
677 700
678 701 if (uinfo.iulp_rtomax > 0) {
679 702 tcp->tcp_second_timer_threshold =
680 703 uinfo.iulp_rtomax;
681 704 }
682 705
683 706 /*
684 707 * Use the metric option settings, iulp_tstamp_ok and
685 708 * iulp_wscale_ok, only for active open. What this means
686 709 * is that if the other side uses timestamp or window
687 710 * scale option, TCP will also use those options. That
688 711 * is for passive open. If the application sets a
689 712 * large window, window scale is enabled regardless of
690 713 * the value in iulp_wscale_ok. This is the behavior
691 714 * since 2.6. So we keep it.
692 715 * The only case left in passive open processing is the
693 716 * check for SACK.
694 717 * For ECN, it should probably be like SACK. But the
695 718 * current value is binary, so we treat it like the other
696 719 * cases. The metric only controls active open.For passive
697 720 * open, the ndd param, tcp_ecn_permitted, controls the
698 721 * behavior.
699 722 */
700 723 if (!tcp_detached) {
701 724 /*
702 725 * The if check means that the following can only
703 726 * be turned on by the metrics only IRE, but not off.
704 727 */
705 728 if (uinfo.iulp_tstamp_ok)
706 729 tcp->tcp_snd_ts_ok = B_TRUE;
707 730 if (uinfo.iulp_wscale_ok)
708 731 tcp->tcp_snd_ws_ok = B_TRUE;
709 732 if (uinfo.iulp_sack == 2)
710 733 tcp->tcp_snd_sack_ok = B_TRUE;
711 734 if (uinfo.iulp_ecn_ok)
712 735 tcp->tcp_ecn_ok = B_TRUE;
713 736 } else {
714 737 /*
715 738 * Passive open.
716 739 *
717 740 * As above, the if check means that SACK can only be
718 741 * turned on by the metric only IRE.
719 742 */
720 743 if (uinfo.iulp_sack > 0) {
721 744 tcp->tcp_snd_sack_ok = B_TRUE;
722 745 }
723 746 }
724 747
725 748 /*
726 749 * XXX Note that currently, iulp_mtu can be as small as 68
727 750 * because of PMTUd. So tcp_mss may go to negative if combined
728 751 * length of all those options exceeds 28 bytes. But because
729 752 * of the tcp_mss_min check below, we may not have a problem if
730 753 * tcp_mss_min is of a reasonable value. The default is 1 so
731 754 * the negative problem still exists. And the check defeats PMTUd.
732 755 * In fact, if PMTUd finds that the MSS should be smaller than
733 756 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
734 757 * value.
735 758 *
736 759 * We do not deal with that now. All those problems related to
737 760 * PMTUd will be fixed later.
738 761 */
739 762 ASSERT(uinfo.iulp_mtu != 0);
740 763 mss = tcp->tcp_initial_pmtu = uinfo.iulp_mtu;
741 764
742 765 /* Sanity check for MSS value. */
743 766 if (connp->conn_ipversion == IPV4_VERSION)
744 767 mss_max = tcps->tcps_mss_max_ipv4;
745 768 else
746 769 mss_max = tcps->tcps_mss_max_ipv6;
747 770
748 771 if (tcp->tcp_ipsec_overhead == 0)
749 772 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp);
750 773
751 774 mss -= tcp->tcp_ipsec_overhead;
752 775
753 776 if (mss < tcps->tcps_mss_min)
754 777 mss = tcps->tcps_mss_min;
755 778 if (mss > mss_max)
756 779 mss = mss_max;
757 780
758 781 /* Note that this is the maximum MSS, excluding all options. */
759 782 tcp->tcp_mss = mss;
760 783
761 784 /*
762 785 * Update the tcp connection with LSO capability.
763 786 */
764 787 tcp_update_lso(tcp, connp->conn_ixa);
765 788
766 789 /*
767 790 * Initialize the ISS here now that we have the full connection ID.
768 791 * The RFC 1948 method of initial sequence number generation requires
769 792 * knowledge of the full connection ID before setting the ISS.
770 793 */
771 794 tcp_iss_init(tcp);
772 795
773 796 tcp->tcp_loopback = (uinfo.iulp_loopback | uinfo.iulp_local);
774 797
775 798 /*
776 799 * Make sure that conn is not marked incipient
777 800 * for incoming connections. A blind
778 801 * removal of incipient flag is cheaper than
779 802 * check and removal.
780 803 */
781 804 mutex_enter(&connp->conn_lock);
782 805 connp->conn_state_flags &= ~CONN_INCIPIENT;
783 806 mutex_exit(&connp->conn_lock);
784 807 return (0);
785 808 }
786 809
787 810 /*
788 811 * tcp_clean_death / tcp_close_detached must not be called more than once
789 812 * on a tcp. Thus every function that potentially calls tcp_clean_death
790 813 * must check for the tcp state before calling tcp_clean_death.
791 814 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
792 815 * tcp_timer_handler, all check for the tcp state.
793 816 */
794 817 /* ARGSUSED */
795 818 void
796 819 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2,
797 820 ip_recv_attr_t *dummy)
798 821 {
799 822 tcp_t *tcp = ((conn_t *)arg)->conn_tcp;
800 823
801 824 freemsg(mp);
802 825 if (tcp->tcp_state > TCPS_BOUND)
803 826 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, ETIMEDOUT);
804 827 }
805 828
806 829 /*
807 830 * We are dying for some reason. Try to do it gracefully. (May be called
808 831 * as writer.)
809 832 *
810 833 * Return -1 if the structure was not cleaned up (if the cleanup had to be
811 834 * done by a service procedure).
812 835 * TBD - Should the return value distinguish between the tcp_t being
813 836 * freed and it being reinitialized?
814 837 */
815 838 int
816 839 tcp_clean_death(tcp_t *tcp, int err)
817 840 {
818 841 mblk_t *mp;
819 842 queue_t *q;
820 843 conn_t *connp = tcp->tcp_connp;
821 844 tcp_stack_t *tcps = tcp->tcp_tcps;
822 845
823 846 if (tcp->tcp_fused)
824 847 tcp_unfuse(tcp);
825 848
826 849 if (tcp->tcp_linger_tid != 0 &&
827 850 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
828 851 tcp_stop_lingering(tcp);
829 852 }
830 853
831 854 ASSERT(tcp != NULL);
832 855 ASSERT((connp->conn_family == AF_INET &&
833 856 connp->conn_ipversion == IPV4_VERSION) ||
834 857 (connp->conn_family == AF_INET6 &&
835 858 (connp->conn_ipversion == IPV4_VERSION ||
836 859 connp->conn_ipversion == IPV6_VERSION)));
837 860
838 861 if (TCP_IS_DETACHED(tcp)) {
839 862 if (tcp->tcp_hard_binding) {
840 863 /*
841 864 * Its an eager that we are dealing with. We close the
842 865 * eager but in case a conn_ind has already gone to the
843 866 * listener, let tcp_accept_finish() send a discon_ind
844 867 * to the listener and drop the last reference. If the
845 868 * listener doesn't even know about the eager i.e. the
846 869 * conn_ind hasn't gone up, blow away the eager and drop
847 870 * the last reference as well. If the conn_ind has gone
848 871 * up, state should be BOUND. tcp_accept_finish
849 872 * will figure out that the connection has received a
850 873 * RST and will send a DISCON_IND to the application.
851 874 */
852 875 tcp_closei_local(tcp);
853 876 if (!tcp->tcp_tconnind_started) {
854 877 CONN_DEC_REF(connp);
855 878 } else {
856 879 tcp->tcp_state = TCPS_BOUND;
857 880 DTRACE_TCP6(state__change, void, NULL,
858 881 ip_xmit_attr_t *, connp->conn_ixa,
859 882 void, NULL, tcp_t *, tcp, void, NULL,
860 883 int32_t, TCPS_CLOSED);
861 884 }
862 885 } else {
863 886 tcp_close_detached(tcp);
864 887 }
865 888 return (0);
866 889 }
867 890
868 891 TCP_STAT(tcps, tcp_clean_death_nondetached);
869 892
870 893 /*
871 894 * The connection is dead. Decrement listener connection counter if
872 895 * necessary.
873 896 */
874 897 if (tcp->tcp_listen_cnt != NULL)
875 898 TCP_DECR_LISTEN_CNT(tcp);
876 899
877 900 /*
878 901 * When a connection is moved to TIME_WAIT state, the connection
879 902 * counter is already decremented. So no need to decrement here
880 903 * again. See SET_TIME_WAIT() macro.
881 904 */
882 905 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
883 906 tcp->tcp_state < TCPS_TIME_WAIT) {
884 907 TCPS_CONN_DEC(tcps);
885 908 }
886 909
887 910 q = connp->conn_rq;
888 911
889 912 /* Trash all inbound data */
890 913 if (!IPCL_IS_NONSTR(connp)) {
891 914 ASSERT(q != NULL);
892 915 flushq(q, FLUSHALL);
893 916 }
894 917
895 918 /*
896 919 * If we are at least part way open and there is error
897 920 * (err==0 implies no error)
898 921 * notify our client by a T_DISCON_IND.
899 922 */
900 923 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) {
901 924 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
902 925 !TCP_IS_SOCKET(tcp)) {
903 926 /*
904 927 * Send M_FLUSH according to TPI. Because sockets will
905 928 * (and must) ignore FLUSHR we do that only for TPI
906 929 * endpoints and sockets in STREAMS mode.
907 930 */
908 931 (void) putnextctl1(q, M_FLUSH, FLUSHR);
909 932 }
910 933 if (connp->conn_debug) {
911 934 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
912 935 "tcp_clean_death: discon err %d", err);
913 936 }
914 937 if (IPCL_IS_NONSTR(connp)) {
915 938 /* Direct socket, use upcall */
916 939 (*connp->conn_upcalls->su_disconnected)(
917 940 connp->conn_upper_handle, tcp->tcp_connid, err);
918 941 } else {
919 942 mp = mi_tpi_discon_ind(NULL, err, 0);
920 943 if (mp != NULL) {
921 944 putnext(q, mp);
922 945 } else {
923 946 if (connp->conn_debug) {
924 947 (void) strlog(TCP_MOD_ID, 0, 1,
925 948 SL_ERROR|SL_TRACE,
926 949 "tcp_clean_death, sending M_ERROR");
927 950 }
928 951 (void) putnextctl1(q, M_ERROR, EPROTO);
929 952 }
930 953 }
931 954 if (tcp->tcp_state <= TCPS_SYN_RCVD) {
932 955 /* SYN_SENT or SYN_RCVD */
933 956 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
934 957 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) {
935 958 /* ESTABLISHED or CLOSE_WAIT */
936 959 TCPS_BUMP_MIB(tcps, tcpEstabResets);
937 960 }
938 961 }
939 962
940 963 /*
941 964 * ESTABLISHED non-STREAMS eagers are not 'detached' because
942 965 * an upper handle is obtained when the SYN-ACK comes in. So it
943 966 * should receive the 'disconnected' upcall, but tcp_reinit should
944 967 * not be called since this is an eager.
945 968 */
946 969 if (tcp->tcp_listener != NULL && IPCL_IS_NONSTR(connp)) {
947 970 tcp_closei_local(tcp);
948 971 tcp->tcp_state = TCPS_BOUND;
949 972 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
950 973 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
951 974 int32_t, TCPS_CLOSED);
952 975 return (0);
953 976 }
954 977
955 978 tcp_reinit(tcp);
956 979 if (IPCL_IS_NONSTR(connp))
957 980 (void) tcp_do_unbind(connp);
958 981
959 982 return (-1);
960 983 }
961 984
962 985 /*
963 986 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
964 987 * to expire, stop the wait and finish the close.
965 988 */
966 989 void
967 990 tcp_stop_lingering(tcp_t *tcp)
968 991 {
969 992 clock_t delta = 0;
970 993 conn_t *connp = tcp->tcp_connp;
971 994
972 995 tcp->tcp_linger_tid = 0;
973 996 if (tcp->tcp_state > TCPS_LISTEN) {
974 997 tcp_acceptor_hash_remove(tcp);
975 998 mutex_enter(&tcp->tcp_non_sq_lock);
976 999 if (tcp->tcp_flow_stopped) {
977 1000 tcp_clrqfull(tcp);
978 1001 }
979 1002 mutex_exit(&tcp->tcp_non_sq_lock);
980 1003
981 1004 if (tcp->tcp_timer_tid != 0) {
982 1005 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
983 1006 tcp->tcp_timer_tid = 0;
984 1007 }
985 1008 /*
986 1009 * Need to cancel those timers which will not be used when
987 1010 * TCP is detached. This has to be done before the conn_wq
988 1011 * is cleared.
989 1012 */
990 1013 tcp_timers_stop(tcp);
991 1014
992 1015 tcp->tcp_detached = B_TRUE;
993 1016 connp->conn_rq = NULL;
994 1017 connp->conn_wq = NULL;
995 1018
996 1019 if (tcp->tcp_state == TCPS_TIME_WAIT) {
997 1020 tcp_time_wait_append(tcp);
998 1021 TCP_DBGSTAT(tcp->tcp_tcps, tcp_detach_time_wait);
999 1022 goto finish;
1000 1023 }
1001 1024
1002 1025 /*
1003 1026 * If delta is zero the timer event wasn't executed and was
1004 1027 * successfully canceled. In this case we need to restart it
1005 1028 * with the minimal delta possible.
1006 1029 */
1007 1030 if (delta >= 0) {
1008 1031 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1009 1032 delta ? delta : 1);
1010 1033 }
1011 1034 } else {
1012 1035 tcp_closei_local(tcp);
1013 1036 CONN_DEC_REF(connp);
1014 1037 }
1015 1038 finish:
1016 1039 tcp->tcp_detached = B_TRUE;
1017 1040 connp->conn_rq = NULL;
1018 1041 connp->conn_wq = NULL;
1019 1042
1020 1043 /* Signal closing thread that it can complete close */
1021 1044 mutex_enter(&tcp->tcp_closelock);
1022 1045 tcp->tcp_closed = 1;
1023 1046 cv_signal(&tcp->tcp_closecv);
1024 1047 mutex_exit(&tcp->tcp_closelock);
1025 1048
1026 1049 /* If we have an upper handle (socket), release it */
1027 1050 if (IPCL_IS_NONSTR(connp)) {
1028 1051 ASSERT(connp->conn_upper_handle != NULL);
1029 1052 (*connp->conn_upcalls->su_closed)(connp->conn_upper_handle);
1030 1053 connp->conn_upper_handle = NULL;
1031 1054 connp->conn_upcalls = NULL;
1032 1055 }
1033 1056 }
1034 1057
1035 1058 void
1036 1059 tcp_close_common(conn_t *connp, int flags)
1037 1060 {
1038 1061 tcp_t *tcp = connp->conn_tcp;
1039 1062 mblk_t *mp = &tcp->tcp_closemp;
1040 1063 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
1041 1064 mblk_t *bp;
1042 1065
1043 1066 ASSERT(connp->conn_ref >= 2);
1044 1067
1045 1068 /*
1046 1069 * Mark the conn as closing. ipsq_pending_mp_add will not
1047 1070 * add any mp to the pending mp list, after this conn has
1048 1071 * started closing.
1049 1072 */
1050 1073 mutex_enter(&connp->conn_lock);
1051 1074 connp->conn_state_flags |= CONN_CLOSING;
1052 1075 if (connp->conn_oper_pending_ill != NULL)
1053 1076 conn_ioctl_cleanup_reqd = B_TRUE;
1054 1077 CONN_INC_REF_LOCKED(connp);
1055 1078 mutex_exit(&connp->conn_lock);
1056 1079 tcp->tcp_closeflags = (uint8_t)flags;
1057 1080 ASSERT(connp->conn_ref >= 3);
1058 1081
1059 1082 /*
1060 1083 * tcp_closemp_used is used below without any protection of a lock
1061 1084 * as we don't expect any one else to use it concurrently at this
1062 1085 * point otherwise it would be a major defect.
1063 1086 */
1064 1087
1065 1088 if (mp->b_prev == NULL)
1066 1089 tcp->tcp_closemp_used = B_TRUE;
1067 1090 else
1068 1091 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: "
1069 1092 "connp %p tcp %p\n", (void *)connp, (void *)tcp);
1070 1093
1071 1094 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
1072 1095
1073 1096 /*
1074 1097 * Cleanup any queued ioctls here. This must be done before the wq/rq
1075 1098 * are re-written by tcp_close_output().
1076 1099 */
1077 1100 if (conn_ioctl_cleanup_reqd)
1078 1101 conn_ioctl_cleanup(connp);
1079 1102
1080 1103 /*
1081 1104 * As CONN_CLOSING is set, no further ioctls should be passed down to
1082 1105 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1083 1106 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1084 1107 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1085 1108 * was still in flight at the time, we wait for it here. See comments
1086 1109 * for CONN_INC_IOCTLREF in ip.h for details.
1087 1110 */
1088 1111 mutex_enter(&connp->conn_lock);
1089 1112 while (connp->conn_ioctlref > 0)
1090 1113 cv_wait(&connp->conn_cv, &connp->conn_lock);
1091 1114 ASSERT(connp->conn_ioctlref == 0);
1092 1115 ASSERT(connp->conn_oper_pending_ill == NULL);
1093 1116 mutex_exit(&connp->conn_lock);
1094 1117
1095 1118 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_close_output, connp,
1096 1119 NULL, tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1097 1120
1098 1121 /*
1099 1122 * For non-STREAMS sockets, the normal case is that the conn makes
1100 1123 * an upcall when it's finally closed, so there is no need to wait
1101 1124 * in the protocol. But in case of SO_LINGER the thread sleeps here
1102 1125 * so it can properly deal with the thread being interrupted.
1103 1126 */
1104 1127 if (IPCL_IS_NONSTR(connp) && connp->conn_linger == 0)
1105 1128 goto nowait;
1106 1129
1107 1130 mutex_enter(&tcp->tcp_closelock);
1108 1131 while (!tcp->tcp_closed) {
1109 1132 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) {
1110 1133 /*
1111 1134 * The cv_wait_sig() was interrupted. We now do the
1112 1135 * following:
1113 1136 *
1114 1137 * 1) If the endpoint was lingering, we allow this
1115 1138 * to be interrupted by cancelling the linger timeout
1116 1139 * and closing normally.
1117 1140 *
1118 1141 * 2) Revert to calling cv_wait()
1119 1142 *
1120 1143 * We revert to using cv_wait() to avoid an
1121 1144 * infinite loop which can occur if the calling
1122 1145 * thread is higher priority than the squeue worker
1123 1146 * thread and is bound to the same cpu.
1124 1147 */
1125 1148 if (connp->conn_linger && connp->conn_lingertime > 0) {
1126 1149 mutex_exit(&tcp->tcp_closelock);
1127 1150 /* Entering squeue, bump ref count. */
1128 1151 CONN_INC_REF(connp);
1129 1152 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL);
1130 1153 SQUEUE_ENTER_ONE(connp->conn_sqp, bp,
1131 1154 tcp_linger_interrupted, connp, NULL,
1132 1155 tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1133 1156 mutex_enter(&tcp->tcp_closelock);
1134 1157 }
1135 1158 break;
1136 1159 }
1137 1160 }
1138 1161 while (!tcp->tcp_closed)
1139 1162 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock);
1140 1163 mutex_exit(&tcp->tcp_closelock);
1141 1164
1142 1165 /*
1143 1166 * In the case of listener streams that have eagers in the q or q0
1144 1167 * we wait for the eagers to drop their reference to us. conn_rq and
1145 1168 * conn_wq of the eagers point to our queues. By waiting for the
1146 1169 * refcnt to drop to 1, we are sure that the eagers have cleaned
1147 1170 * up their queue pointers and also dropped their references to us.
1148 1171 *
1149 1172 * For non-STREAMS sockets we do not have to wait here; the
1150 1173 * listener will instead make a su_closed upcall when the last
1151 1174 * reference is dropped.
1152 1175 */
1153 1176 if (tcp->tcp_wait_for_eagers && !IPCL_IS_NONSTR(connp)) {
1154 1177 mutex_enter(&connp->conn_lock);
1155 1178 while (connp->conn_ref != 1) {
1156 1179 cv_wait(&connp->conn_cv, &connp->conn_lock);
1157 1180 }
1158 1181 mutex_exit(&connp->conn_lock);
1159 1182 }
1160 1183
1161 1184 nowait:
1162 1185 connp->conn_cpid = NOPID;
1163 1186 }
1164 1187
1165 1188 /*
1166 1189 * Called by tcp_close() routine via squeue when lingering is
1167 1190 * interrupted by a signal.
1168 1191 */
1169 1192
1170 1193 /* ARGSUSED */
1171 1194 static void
1172 1195 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1173 1196 {
1174 1197 conn_t *connp = (conn_t *)arg;
1175 1198 tcp_t *tcp = connp->conn_tcp;
1176 1199
1177 1200 freeb(mp);
1178 1201 if (tcp->tcp_linger_tid != 0 &&
1179 1202 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
1180 1203 tcp_stop_lingering(tcp);
1181 1204 tcp->tcp_client_errno = EINTR;
1182 1205 }
1183 1206 }
1184 1207
1185 1208 /*
1186 1209 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1187 1210 * Some stream heads get upset if they see these later on as anything but NULL.
1188 1211 */
1189 1212 void
1190 1213 tcp_close_mpp(mblk_t **mpp)
1191 1214 {
1192 1215 mblk_t *mp;
1193 1216
1194 1217 if ((mp = *mpp) != NULL) {
1195 1218 do {
1196 1219 mp->b_next = NULL;
1197 1220 mp->b_prev = NULL;
1198 1221 } while ((mp = mp->b_cont) != NULL);
1199 1222
1200 1223 mp = *mpp;
1201 1224 *mpp = NULL;
1202 1225 freemsg(mp);
1203 1226 }
1204 1227 }
1205 1228
1206 1229 /* Do detached close. */
1207 1230 void
1208 1231 tcp_close_detached(tcp_t *tcp)
1209 1232 {
1210 1233 if (tcp->tcp_fused)
1211 1234 tcp_unfuse(tcp);
1212 1235
1213 1236 /*
1214 1237 * Clustering code serializes TCP disconnect callbacks and
1215 1238 * cluster tcp list walks by blocking a TCP disconnect callback
1216 1239 * if a cluster tcp list walk is in progress. This ensures
1217 1240 * accurate accounting of TCPs in the cluster code even though
1218 1241 * the TCP list walk itself is not atomic.
1219 1242 */
1220 1243 tcp_closei_local(tcp);
1221 1244 CONN_DEC_REF(tcp->tcp_connp);
1222 1245 }
1223 1246
1224 1247 /*
1225 1248 * The tcp_t is going away. Remove it from all lists and set it
1226 1249 * to TCPS_CLOSED. The freeing up of memory is deferred until
1227 1250 * tcp_inactive. This is needed since a thread in tcp_rput might have
1228 1251 * done a CONN_INC_REF on this structure before it was removed from the
1229 1252 * hashes.
1230 1253 */
|
↓ open down ↓ |
569 lines elided |
↑ open up ↑ |
1231 1254 void
1232 1255 tcp_closei_local(tcp_t *tcp)
1233 1256 {
1234 1257 conn_t *connp = tcp->tcp_connp;
1235 1258 tcp_stack_t *tcps = tcp->tcp_tcps;
1236 1259 int32_t oldstate;
1237 1260
1238 1261 if (!TCP_IS_SOCKET(tcp))
1239 1262 tcp_acceptor_hash_remove(tcp);
1240 1263
1241 - TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
1242 - tcp->tcp_ibsegs = 0;
1243 - TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
1244 - tcp->tcp_obsegs = 0;
1245 -
1246 1264 /*
1247 1265 * This can be called via tcp_time_wait_processing() if TCP gets a
1248 1266 * SYN with sequence number outside the TIME-WAIT connection's
1249 1267 * window. So we need to check for TIME-WAIT state here as the
1250 1268 * connection counter is already decremented. See SET_TIME_WAIT()
1251 1269 * macro
1252 1270 */
1253 1271 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
1254 1272 tcp->tcp_state < TCPS_TIME_WAIT) {
1255 1273 TCPS_CONN_DEC(tcps);
1256 1274 }
1257 1275
1258 1276 /*
1259 1277 * If we are an eager connection hanging off a listener that
1260 1278 * hasn't formally accepted the connection yet, get off its
1261 1279 * list and blow off any data that we have accumulated.
1262 1280 */
1263 1281 if (tcp->tcp_listener != NULL) {
1264 1282 tcp_t *listener = tcp->tcp_listener;
1265 1283 mutex_enter(&listener->tcp_eager_lock);
1266 1284 /*
1267 1285 * tcp_tconnind_started == B_TRUE means that the
1268 1286 * conn_ind has already gone to listener. At
1269 1287 * this point, eager will be closed but we
1270 1288 * leave it in listeners eager list so that
1271 1289 * if listener decides to close without doing
1272 1290 * accept, we can clean this up. In tcp_tli_accept
1273 1291 * we take care of the case of accept on closed
1274 1292 * eager.
1275 1293 */
1276 1294 if (!tcp->tcp_tconnind_started) {
1277 1295 tcp_eager_unlink(tcp);
1278 1296 mutex_exit(&listener->tcp_eager_lock);
1279 1297 /*
1280 1298 * We don't want to have any pointers to the
1281 1299 * listener queue, after we have released our
1282 1300 * reference on the listener
1283 1301 */
1284 1302 ASSERT(tcp->tcp_detached);
1285 1303 connp->conn_rq = NULL;
1286 1304 connp->conn_wq = NULL;
1287 1305 CONN_DEC_REF(listener->tcp_connp);
1288 1306 } else {
1289 1307 mutex_exit(&listener->tcp_eager_lock);
1290 1308 }
1291 1309 }
1292 1310
1293 1311 /* Stop all the timers */
1294 1312 tcp_timers_stop(tcp);
1295 1313
1296 1314 if (tcp->tcp_state == TCPS_LISTEN) {
1297 1315 if (tcp->tcp_ip_addr_cache) {
1298 1316 kmem_free((void *)tcp->tcp_ip_addr_cache,
1299 1317 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
1300 1318 tcp->tcp_ip_addr_cache = NULL;
1301 1319 }
1302 1320 }
1303 1321
1304 1322 /* Decrement listerner connection counter if necessary. */
1305 1323 if (tcp->tcp_listen_cnt != NULL)
1306 1324 TCP_DECR_LISTEN_CNT(tcp);
1307 1325
1308 1326 mutex_enter(&tcp->tcp_non_sq_lock);
1309 1327 if (tcp->tcp_flow_stopped)
1310 1328 tcp_clrqfull(tcp);
1311 1329 mutex_exit(&tcp->tcp_non_sq_lock);
1312 1330
1313 1331 tcp_bind_hash_remove(tcp);
1314 1332 /*
1315 1333 * If the tcp_time_wait_collector (which runs outside the squeue)
1316 1334 * is trying to remove this tcp from the time wait list, we will
1317 1335 * block in tcp_time_wait_remove while trying to acquire the
1318 1336 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1319 1337 * requires the ipcl_hash_remove to be ordered after the
1320 1338 * tcp_time_wait_remove for the refcnt checks to work correctly.
1321 1339 */
1322 1340 if (tcp->tcp_state == TCPS_TIME_WAIT)
1323 1341 (void) tcp_time_wait_remove(tcp, NULL);
1324 1342 CL_INET_DISCONNECT(connp);
1325 1343 ipcl_hash_remove(connp);
1326 1344 oldstate = tcp->tcp_state;
1327 1345 tcp->tcp_state = TCPS_CLOSED;
1328 1346 /* Need to probe before ixa_cleanup() is called */
1329 1347 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1330 1348 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
1331 1349 int32_t, oldstate);
1332 1350 ixa_cleanup(connp->conn_ixa);
1333 1351
1334 1352 /*
1335 1353 * Mark the conn as CONDEMNED
1336 1354 */
1337 1355 mutex_enter(&connp->conn_lock);
1338 1356 connp->conn_state_flags |= CONN_CONDEMNED;
1339 1357 mutex_exit(&connp->conn_lock);
1340 1358
1341 1359 ASSERT(tcp->tcp_time_wait_next == NULL);
1342 1360 ASSERT(tcp->tcp_time_wait_prev == NULL);
1343 1361 ASSERT(tcp->tcp_time_wait_expire == 0);
1344 1362
1345 1363 tcp_ipsec_cleanup(tcp);
1346 1364 }
1347 1365
1348 1366 /*
1349 1367 * tcp is dying (called from ipcl_conn_destroy and error cases).
1350 1368 * Free the tcp_t in either case.
1351 1369 */
1352 1370 void
1353 1371 tcp_free(tcp_t *tcp)
1354 1372 {
1355 1373 mblk_t *mp;
1356 1374 conn_t *connp = tcp->tcp_connp;
1357 1375
1358 1376 ASSERT(tcp != NULL);
1359 1377 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL);
1360 1378
1361 1379 connp->conn_rq = NULL;
1362 1380 connp->conn_wq = NULL;
1363 1381
1364 1382 tcp_close_mpp(&tcp->tcp_xmit_head);
1365 1383 tcp_close_mpp(&tcp->tcp_reass_head);
1366 1384 if (tcp->tcp_rcv_list != NULL) {
1367 1385 /* Free b_next chain */
1368 1386 tcp_close_mpp(&tcp->tcp_rcv_list);
1369 1387 }
1370 1388 if ((mp = tcp->tcp_urp_mp) != NULL) {
1371 1389 freemsg(mp);
1372 1390 }
1373 1391 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1374 1392 freemsg(mp);
1375 1393 }
1376 1394
1377 1395 if (tcp->tcp_fused_sigurg_mp != NULL) {
1378 1396 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1379 1397 freeb(tcp->tcp_fused_sigurg_mp);
1380 1398 tcp->tcp_fused_sigurg_mp = NULL;
1381 1399 }
1382 1400
1383 1401 if (tcp->tcp_ordrel_mp != NULL) {
1384 1402 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1385 1403 freeb(tcp->tcp_ordrel_mp);
1386 1404 tcp->tcp_ordrel_mp = NULL;
1387 1405 }
1388 1406
1389 1407 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
1390 1408 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
1391 1409
1392 1410 if (tcp->tcp_hopopts != NULL) {
1393 1411 mi_free(tcp->tcp_hopopts);
1394 1412 tcp->tcp_hopopts = NULL;
1395 1413 tcp->tcp_hopoptslen = 0;
1396 1414 }
1397 1415 ASSERT(tcp->tcp_hopoptslen == 0);
1398 1416 if (tcp->tcp_dstopts != NULL) {
1399 1417 mi_free(tcp->tcp_dstopts);
1400 1418 tcp->tcp_dstopts = NULL;
1401 1419 tcp->tcp_dstoptslen = 0;
1402 1420 }
1403 1421 ASSERT(tcp->tcp_dstoptslen == 0);
1404 1422 if (tcp->tcp_rthdrdstopts != NULL) {
1405 1423 mi_free(tcp->tcp_rthdrdstopts);
1406 1424 tcp->tcp_rthdrdstopts = NULL;
1407 1425 tcp->tcp_rthdrdstoptslen = 0;
1408 1426 }
1409 1427 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
1410 1428 if (tcp->tcp_rthdr != NULL) {
1411 1429 mi_free(tcp->tcp_rthdr);
1412 1430 tcp->tcp_rthdr = NULL;
1413 1431 tcp->tcp_rthdrlen = 0;
|
↓ open down ↓ |
158 lines elided |
↑ open up ↑ |
1414 1432 }
1415 1433 ASSERT(tcp->tcp_rthdrlen == 0);
1416 1434
1417 1435 /*
1418 1436 * Following is really a blowing away a union.
1419 1437 * It happens to have exactly two members of identical size
1420 1438 * the following code is enough.
1421 1439 */
1422 1440 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1423 1441
1442 + /* Allow the CC algorithm to clean up after itself. */
1443 + if (tcp->tcp_cc_algo != NULL && tcp->tcp_cc_algo->cb_destroy != NULL)
1444 + tcp->tcp_cc_algo->cb_destroy(&tcp->tcp_ccv);
1445 +
1424 1446 /*
1425 1447 * Destroy any association with SO_REUSEPORT group.
1426 1448 */
1427 1449 if (tcp->tcp_rg_bind != NULL) {
1428 1450 /*
1429 1451 * This is only necessary for connections which enabled
1430 1452 * SO_REUSEPORT but were never bound. Such connections should
1431 1453 * be the one and only member of the tcp_rg_tp to which they
1432 1454 * have been associated.
1433 1455 */
1434 1456 VERIFY(tcp_rg_remove(tcp->tcp_rg_bind, tcp));
1435 1457 tcp_rg_destroy(tcp->tcp_rg_bind);
1436 1458 tcp->tcp_rg_bind = NULL;
1437 1459 }
1438 1460
1439 1461 /*
1440 1462 * If this is a non-STREAM socket still holding on to an upper
1441 1463 * handle, release it. As a result of fallback we might also see
1442 1464 * STREAMS based conns with upper handles, in which case there is
1443 1465 * nothing to do other than clearing the field.
1444 1466 */
1445 1467 if (connp->conn_upper_handle != NULL) {
1446 1468 if (IPCL_IS_NONSTR(connp)) {
1447 1469 (*connp->conn_upcalls->su_closed)(
1448 1470 connp->conn_upper_handle);
1449 1471 tcp->tcp_detached = B_TRUE;
1450 1472 }
1451 1473 connp->conn_upper_handle = NULL;
1452 1474 connp->conn_upcalls = NULL;
1453 1475 }
1454 1476 }
1455 1477
1456 1478 /*
1457 1479 * tcp_get_conn/tcp_free_conn
1458 1480 *
1459 1481 * tcp_get_conn is used to get a clean tcp connection structure.
1460 1482 * It tries to reuse the connections put on the freelist by the
1461 1483 * time_wait_collector failing which it goes to kmem_cache. This
1462 1484 * way has two benefits compared to just allocating from and
1463 1485 * freeing to kmem_cache.
1464 1486 * 1) The time_wait_collector can free (which includes the cleanup)
1465 1487 * outside the squeue. So when the interrupt comes, we have a clean
1466 1488 * connection sitting in the freelist. Obviously, this buys us
1467 1489 * performance.
1468 1490 *
1469 1491 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1470 1492 * has multiple disadvantages - tying up the squeue during alloc.
1471 1493 * But allocating the conn/tcp in IP land is also not the best since
1472 1494 * we can't check the 'q' and 'q0' which are protected by squeue and
1473 1495 * blindly allocate memory which might have to be freed here if we are
1474 1496 * not allowed to accept the connection. By using the freelist and
|
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
1475 1497 * putting the conn/tcp back in freelist, we don't pay a penalty for
1476 1498 * allocating memory without checking 'q/q0' and freeing it if we can't
1477 1499 * accept the connection.
1478 1500 *
1479 1501 * Care should be taken to put the conn back in the same squeue's freelist
1480 1502 * from which it was allocated. Best results are obtained if conn is
1481 1503 * allocated from listener's squeue and freed to the same. Time wait
1482 1504 * collector will free up the freelist is the connection ends up sitting
1483 1505 * there for too long.
1484 1506 */
1485 -void *
1507 +conn_t *
1486 1508 tcp_get_conn(void *arg, tcp_stack_t *tcps)
1487 1509 {
1488 1510 tcp_t *tcp = NULL;
1489 1511 conn_t *connp = NULL;
1490 1512 squeue_t *sqp = (squeue_t *)arg;
1491 1513 tcp_squeue_priv_t *tcp_time_wait;
1492 1514 netstack_t *ns;
1493 1515 mblk_t *tcp_rsrv_mp = NULL;
1494 1516
1495 1517 tcp_time_wait =
1496 1518 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP));
1497 1519
1498 1520 mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1499 1521 tcp = tcp_time_wait->tcp_free_list;
1500 1522 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0));
1501 1523 if (tcp != NULL) {
1502 1524 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next;
1503 1525 tcp_time_wait->tcp_free_list_cnt--;
1504 1526 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1505 1527 tcp->tcp_time_wait_next = NULL;
1506 1528 connp = tcp->tcp_connp;
1507 1529 connp->conn_flags |= IPCL_REUSED;
1508 1530
1509 1531 ASSERT(tcp->tcp_tcps == NULL);
1510 1532 ASSERT(connp->conn_netstack == NULL);
1511 1533 ASSERT(tcp->tcp_rsrv_mp != NULL);
1512 1534 ns = tcps->tcps_netstack;
1513 1535 netstack_hold(ns);
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1514 1536 connp->conn_netstack = ns;
1515 1537 connp->conn_ixa->ixa_ipst = ns->netstack_ip;
1516 1538 tcp->tcp_tcps = tcps;
1517 1539 ipcl_globalhash_insert(connp);
1518 1540
1519 1541 connp->conn_ixa->ixa_notify_cookie = tcp;
1520 1542 ASSERT(connp->conn_ixa->ixa_notify == tcp_notify);
1521 1543 connp->conn_recv = tcp_input_data;
1522 1544 ASSERT(connp->conn_recvicmp == tcp_icmp_input);
1523 1545 ASSERT(connp->conn_verifyicmp == tcp_verifyicmp);
1524 - return ((void *)connp);
1546 + return (connp);
1525 1547 }
1526 1548 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1527 1549 /*
1528 1550 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1529 1551 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1530 1552 */
1531 1553 tcp_rsrv_mp = allocb(0, BPRI_HI);
1532 1554 if (tcp_rsrv_mp == NULL)
1533 1555 return (NULL);
1534 1556
1535 1557 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP,
1536 1558 tcps->tcps_netstack)) == NULL) {
1537 1559 freeb(tcp_rsrv_mp);
1538 1560 return (NULL);
1539 1561 }
1540 1562
1541 1563 tcp = connp->conn_tcp;
1542 1564 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
1543 1565 mutex_init(&tcp->tcp_rsrv_mp_lock, NULL, MUTEX_DEFAULT, NULL);
1544 1566
1545 1567 tcp->tcp_tcps = tcps;
1546 1568
1547 1569 connp->conn_recv = tcp_input_data;
1548 1570 connp->conn_recvicmp = tcp_icmp_input;
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
1549 1571 connp->conn_verifyicmp = tcp_verifyicmp;
1550 1572
1551 1573 /*
1552 1574 * Register tcp_notify to listen to capability changes detected by IP.
1553 1575 * This upcall is made in the context of the call to conn_ip_output
1554 1576 * thus it is inside the squeue.
1555 1577 */
1556 1578 connp->conn_ixa->ixa_notify = tcp_notify;
1557 1579 connp->conn_ixa->ixa_notify_cookie = tcp;
1558 1580
1559 - return ((void *)connp);
1581 + return (connp);
1560 1582 }
1561 1583
1562 1584 /*
1563 1585 * Handle connect to IPv4 destinations, including connections for AF_INET6
1564 1586 * sockets connecting to IPv4 mapped IPv6 destinations.
1565 1587 * Returns zero if OK, a positive errno, or a negative TLI error.
1566 1588 */
1567 1589 static int
1568 1590 tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp, in_port_t dstport,
1569 1591 uint_t srcid)
1570 1592 {
1571 1593 ipaddr_t dstaddr = *dstaddrp;
1572 1594 uint16_t lport;
1573 1595 conn_t *connp = tcp->tcp_connp;
1574 1596 tcp_stack_t *tcps = tcp->tcp_tcps;
1575 1597 int error;
1576 1598
1577 1599 ASSERT(connp->conn_ipversion == IPV4_VERSION);
1578 1600
1579 1601 /* Check for attempt to connect to INADDR_ANY */
1580 1602 if (dstaddr == INADDR_ANY) {
1581 1603 /*
1582 1604 * SunOS 4.x and 4.3 BSD allow an application
1583 1605 * to connect a TCP socket to INADDR_ANY.
1584 1606 * When they do this, the kernel picks the
1585 1607 * address of one interface and uses it
1586 1608 * instead. The kernel usually ends up
1587 1609 * picking the address of the loopback
1588 1610 * interface. This is an undocumented feature.
1589 1611 * However, we provide the same thing here
1590 1612 * in order to have source and binary
1591 1613 * compatibility with SunOS 4.x.
1592 1614 * Update the T_CONN_REQ (sin/sin6) since it is used to
1593 1615 * generate the T_CONN_CON.
1594 1616 */
1595 1617 dstaddr = htonl(INADDR_LOOPBACK);
1596 1618 *dstaddrp = dstaddr;
1597 1619 }
1598 1620
1599 1621 /* Handle __sin6_src_id if socket not bound to an IP address */
1600 1622 if (srcid != 0 && connp->conn_laddr_v4 == INADDR_ANY) {
1601 1623 if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1602 1624 IPCL_ZONEID(connp), B_TRUE, tcps->tcps_netstack)) {
1603 1625 /* Mismatch - conn_laddr_v6 would be v6 address. */
1604 1626 return (EADDRNOTAVAIL);
1605 1627 }
1606 1628 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1607 1629 }
1608 1630
1609 1631 IN6_IPADDR_TO_V4MAPPED(dstaddr, &connp->conn_faddr_v6);
1610 1632 connp->conn_fport = dstport;
1611 1633
1612 1634 /*
1613 1635 * At this point the remote destination address and remote port fields
1614 1636 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1615 1637 * have to see which state tcp was in so we can take appropriate action.
1616 1638 */
1617 1639 if (tcp->tcp_state == TCPS_IDLE) {
1618 1640 /*
1619 1641 * We support a quick connect capability here, allowing
1620 1642 * clients to transition directly from IDLE to SYN_SENT
1621 1643 * tcp_bindi will pick an unused port, insert the connection
1622 1644 * in the bind hash and transition to BOUND state.
1623 1645 */
1624 1646 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1625 1647 tcp, B_TRUE);
1626 1648 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1627 1649 B_FALSE, B_FALSE);
1628 1650 if (lport == 0)
1629 1651 return (-TNOADDR);
1630 1652 }
1631 1653
1632 1654 /*
1633 1655 * Lookup the route to determine a source address and the uinfo.
1634 1656 * Setup TCP parameters based on the metrics/DCE.
1635 1657 */
1636 1658 error = tcp_set_destination(tcp);
1637 1659 if (error != 0)
1638 1660 return (error);
1639 1661
1640 1662 /*
1641 1663 * Don't let an endpoint connect to itself.
1642 1664 */
1643 1665 if (connp->conn_faddr_v4 == connp->conn_laddr_v4 &&
1644 1666 connp->conn_fport == connp->conn_lport)
1645 1667 return (-TBADADDR);
1646 1668
1647 1669 tcp->tcp_state = TCPS_SYN_SENT;
1648 1670
1649 1671 return (ipcl_conn_insert_v4(connp));
1650 1672 }
1651 1673
1652 1674 /*
1653 1675 * Handle connect to IPv6 destinations.
1654 1676 * Returns zero if OK, a positive errno, or a negative TLI error.
1655 1677 */
1656 1678 static int
1657 1679 tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp, in_port_t dstport,
1658 1680 uint32_t flowinfo, uint_t srcid, uint32_t scope_id)
1659 1681 {
1660 1682 uint16_t lport;
1661 1683 conn_t *connp = tcp->tcp_connp;
1662 1684 tcp_stack_t *tcps = tcp->tcp_tcps;
1663 1685 int error;
1664 1686
1665 1687 ASSERT(connp->conn_family == AF_INET6);
1666 1688
1667 1689 /*
1668 1690 * If we're here, it means that the destination address is a native
1669 1691 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1670 1692 * reason why it might not be IPv6 is if the socket was bound to an
1671 1693 * IPv4-mapped IPv6 address.
1672 1694 */
1673 1695 if (connp->conn_ipversion != IPV6_VERSION)
1674 1696 return (-TBADADDR);
1675 1697
1676 1698 /*
1677 1699 * Interpret a zero destination to mean loopback.
1678 1700 * Update the T_CONN_REQ (sin/sin6) since it is used to
1679 1701 * generate the T_CONN_CON.
1680 1702 */
1681 1703 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp))
1682 1704 *dstaddrp = ipv6_loopback;
1683 1705
1684 1706 /* Handle __sin6_src_id if socket not bound to an IP address */
1685 1707 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6)) {
1686 1708 if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1687 1709 IPCL_ZONEID(connp), B_FALSE, tcps->tcps_netstack)) {
1688 1710 /* Mismatch - conn_laddr_v6 would be v4-mapped. */
1689 1711 return (EADDRNOTAVAIL);
1690 1712 }
1691 1713 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1692 1714 }
1693 1715
1694 1716 /*
1695 1717 * Take care of the scope_id now.
1696 1718 */
1697 1719 if (scope_id != 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp)) {
1698 1720 connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
1699 1721 connp->conn_ixa->ixa_scopeid = scope_id;
1700 1722 } else {
1701 1723 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
1702 1724 }
1703 1725
1704 1726 connp->conn_flowinfo = flowinfo;
1705 1727 connp->conn_faddr_v6 = *dstaddrp;
1706 1728 connp->conn_fport = dstport;
1707 1729
1708 1730 /*
1709 1731 * At this point the remote destination address and remote port fields
1710 1732 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1711 1733 * have to see which state tcp was in so we can take appropriate action.
1712 1734 */
1713 1735 if (tcp->tcp_state == TCPS_IDLE) {
1714 1736 /*
1715 1737 * We support a quick connect capability here, allowing
1716 1738 * clients to transition directly from IDLE to SYN_SENT
1717 1739 * tcp_bindi will pick an unused port, insert the connection
1718 1740 * in the bind hash and transition to BOUND state.
1719 1741 */
1720 1742 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1721 1743 tcp, B_TRUE);
1722 1744 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1723 1745 B_FALSE, B_FALSE);
1724 1746 if (lport == 0)
1725 1747 return (-TNOADDR);
1726 1748 }
1727 1749
1728 1750 /*
1729 1751 * Lookup the route to determine a source address and the uinfo.
1730 1752 * Setup TCP parameters based on the metrics/DCE.
1731 1753 */
1732 1754 error = tcp_set_destination(tcp);
1733 1755 if (error != 0)
1734 1756 return (error);
1735 1757
1736 1758 /*
1737 1759 * Don't let an endpoint connect to itself.
1738 1760 */
1739 1761 if (IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6, &connp->conn_laddr_v6) &&
1740 1762 connp->conn_fport == connp->conn_lport)
1741 1763 return (-TBADADDR);
1742 1764
1743 1765 tcp->tcp_state = TCPS_SYN_SENT;
1744 1766
1745 1767 return (ipcl_conn_insert_v6(connp));
1746 1768 }
1747 1769
1748 1770 /*
1749 1771 * Disconnect
1750 1772 * Note that unlike other functions this returns a positive tli error
1751 1773 * when it fails; it never returns an errno.
1752 1774 */
1753 1775 static int
1754 1776 tcp_disconnect_common(tcp_t *tcp, t_scalar_t seqnum)
1755 1777 {
1756 1778 conn_t *lconnp;
1757 1779 tcp_stack_t *tcps = tcp->tcp_tcps;
1758 1780 conn_t *connp = tcp->tcp_connp;
1759 1781
1760 1782 /*
1761 1783 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1762 1784 * when the stream is in BOUND state. Do not send a reset,
1763 1785 * since the destination IP address is not valid, and it can
1764 1786 * be the initialized value of all zeros (broadcast address).
1765 1787 */
1766 1788 if (tcp->tcp_state <= TCPS_BOUND) {
1767 1789 if (connp->conn_debug) {
1768 1790 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1769 1791 "tcp_disconnect: bad state, %d", tcp->tcp_state);
1770 1792 }
1771 1793 return (TOUTSTATE);
1772 1794 } else if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1773 1795 TCPS_CONN_DEC(tcps);
1774 1796 }
1775 1797
1776 1798 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) {
1777 1799
1778 1800 /*
1779 1801 * According to TPI, for non-listeners, ignore seqnum
1780 1802 * and disconnect.
1781 1803 * Following interpretation of -1 seqnum is historical
1782 1804 * and implied TPI ? (TPI only states that for T_CONN_IND,
1783 1805 * a valid seqnum should not be -1).
1784 1806 *
1785 1807 * -1 means disconnect everything
1786 1808 * regardless even on a listener.
1787 1809 */
1788 1810
1789 1811 int old_state = tcp->tcp_state;
1790 1812 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
1791 1813
1792 1814 /*
1793 1815 * The connection can't be on the tcp_time_wait_head list
1794 1816 * since it is not detached.
1795 1817 */
1796 1818 ASSERT(tcp->tcp_time_wait_next == NULL);
1797 1819 ASSERT(tcp->tcp_time_wait_prev == NULL);
1798 1820 ASSERT(tcp->tcp_time_wait_expire == 0);
1799 1821 /*
1800 1822 * If it used to be a listener, check to make sure no one else
1801 1823 * has taken the port before switching back to LISTEN state.
1802 1824 */
1803 1825 if (connp->conn_ipversion == IPV4_VERSION) {
1804 1826 lconnp = ipcl_lookup_listener_v4(connp->conn_lport,
1805 1827 connp->conn_laddr_v4, IPCL_ZONEID(connp), ipst);
1806 1828 } else {
1807 1829 uint_t ifindex = 0;
1808 1830
1809 1831 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)
1810 1832 ifindex = connp->conn_ixa->ixa_scopeid;
1811 1833
1812 1834 /* Allow conn_bound_if listeners? */
1813 1835 lconnp = ipcl_lookup_listener_v6(connp->conn_lport,
1814 1836 &connp->conn_laddr_v6, ifindex, IPCL_ZONEID(connp),
1815 1837 ipst);
1816 1838 }
1817 1839 if (tcp->tcp_conn_req_max && lconnp == NULL) {
1818 1840 tcp->tcp_state = TCPS_LISTEN;
1819 1841 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1820 1842 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1821 1843 NULL, int32_t, old_state);
1822 1844 } else if (old_state > TCPS_BOUND) {
1823 1845 tcp->tcp_conn_req_max = 0;
1824 1846 tcp->tcp_state = TCPS_BOUND;
1825 1847 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1826 1848 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1827 1849 NULL, int32_t, old_state);
1828 1850
1829 1851 /*
1830 1852 * If this end point is not going to become a listener,
1831 1853 * decrement the listener connection count if
1832 1854 * necessary. Note that we do not do this if it is
1833 1855 * going to be a listner (the above if case) since
1834 1856 * then it may remove the counter struct.
1835 1857 */
1836 1858 if (tcp->tcp_listen_cnt != NULL)
1837 1859 TCP_DECR_LISTEN_CNT(tcp);
1838 1860 }
1839 1861 if (lconnp != NULL)
1840 1862 CONN_DEC_REF(lconnp);
1841 1863 switch (old_state) {
1842 1864 case TCPS_SYN_SENT:
1843 1865 case TCPS_SYN_RCVD:
1844 1866 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1845 1867 break;
1846 1868 case TCPS_ESTABLISHED:
1847 1869 case TCPS_CLOSE_WAIT:
1848 1870 TCPS_BUMP_MIB(tcps, tcpEstabResets);
1849 1871 break;
1850 1872 }
1851 1873
1852 1874 if (tcp->tcp_fused)
1853 1875 tcp_unfuse(tcp);
1854 1876
1855 1877 mutex_enter(&tcp->tcp_eager_lock);
1856 1878 if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
1857 1879 (tcp->tcp_conn_req_cnt_q != 0)) {
1858 1880 tcp_eager_cleanup(tcp, 0);
1859 1881 }
1860 1882 mutex_exit(&tcp->tcp_eager_lock);
1861 1883
1862 1884 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt,
1863 1885 tcp->tcp_rnxt, TH_RST | TH_ACK);
1864 1886
1865 1887 tcp_reinit(tcp);
1866 1888
1867 1889 return (0);
1868 1890 } else if (!tcp_eager_blowoff(tcp, seqnum)) {
1869 1891 return (TBADSEQ);
1870 1892 }
1871 1893 return (0);
1872 1894 }
1873 1895
1874 1896 /*
1875 1897 * Our client hereby directs us to reject the connection request
1876 1898 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1877 1899 * of sending the appropriate RST, not an ICMP error.
1878 1900 */
1879 1901 void
1880 1902 tcp_disconnect(tcp_t *tcp, mblk_t *mp)
1881 1903 {
1882 1904 t_scalar_t seqnum;
1883 1905 int error;
1884 1906 conn_t *connp = tcp->tcp_connp;
1885 1907
1886 1908 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1887 1909 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) {
1888 1910 tcp_err_ack(tcp, mp, TPROTO, 0);
1889 1911 return;
1890 1912 }
1891 1913 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number;
1892 1914 error = tcp_disconnect_common(tcp, seqnum);
1893 1915 if (error != 0)
1894 1916 tcp_err_ack(tcp, mp, error, 0);
1895 1917 else {
1896 1918 if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1897 1919 /* Send M_FLUSH according to TPI */
1898 1920 (void) putnextctl1(connp->conn_rq, M_FLUSH, FLUSHRW);
1899 1921 }
1900 1922 mp = mi_tpi_ok_ack_alloc(mp);
1901 1923 if (mp != NULL)
1902 1924 putnext(connp->conn_rq, mp);
1903 1925 }
1904 1926 }
1905 1927
1906 1928 /*
1907 1929 * Handle reinitialization of a tcp structure.
1908 1930 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1909 1931 */
1910 1932 static void
1911 1933 tcp_reinit(tcp_t *tcp)
1912 1934 {
1913 1935 mblk_t *mp;
1914 1936 tcp_stack_t *tcps = tcp->tcp_tcps;
1915 1937 conn_t *connp = tcp->tcp_connp;
1916 1938 int32_t oldstate;
1917 1939
1918 1940 /* tcp_reinit should never be called for detached tcp_t's */
|
↓ open down ↓ |
349 lines elided |
↑ open up ↑ |
1919 1941 ASSERT(tcp->tcp_listener == NULL);
1920 1942 ASSERT((connp->conn_family == AF_INET &&
1921 1943 connp->conn_ipversion == IPV4_VERSION) ||
1922 1944 (connp->conn_family == AF_INET6 &&
1923 1945 (connp->conn_ipversion == IPV4_VERSION ||
1924 1946 connp->conn_ipversion == IPV6_VERSION)));
1925 1947
1926 1948 /* Cancel outstanding timers */
1927 1949 tcp_timers_stop(tcp);
1928 1950
1929 - /*
1930 - * Reset everything in the state vector, after updating global
1931 - * MIB data from instance counters.
1932 - */
1933 - TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
1934 - tcp->tcp_ibsegs = 0;
1935 - TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
1936 - tcp->tcp_obsegs = 0;
1937 -
1938 1951 tcp_close_mpp(&tcp->tcp_xmit_head);
1939 1952 if (tcp->tcp_snd_zcopy_aware)
1940 1953 tcp_zcopy_notify(tcp);
1941 1954 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL;
1942 1955 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0;
1943 1956 mutex_enter(&tcp->tcp_non_sq_lock);
1944 1957 if (tcp->tcp_flow_stopped &&
1945 1958 TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1946 1959 tcp_clrqfull(tcp);
1947 1960 }
1948 1961 mutex_exit(&tcp->tcp_non_sq_lock);
1949 1962 tcp_close_mpp(&tcp->tcp_reass_head);
1950 1963 tcp->tcp_reass_tail = NULL;
1951 1964 if (tcp->tcp_rcv_list != NULL) {
1952 1965 /* Free b_next chain */
1953 1966 tcp_close_mpp(&tcp->tcp_rcv_list);
1954 1967 tcp->tcp_rcv_last_head = NULL;
1955 1968 tcp->tcp_rcv_last_tail = NULL;
1956 1969 tcp->tcp_rcv_cnt = 0;
1957 1970 }
1958 1971 tcp->tcp_rcv_last_tail = NULL;
1959 1972
1960 1973 if ((mp = tcp->tcp_urp_mp) != NULL) {
1961 1974 freemsg(mp);
1962 1975 tcp->tcp_urp_mp = NULL;
1963 1976 }
1964 1977 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1965 1978 freemsg(mp);
1966 1979 tcp->tcp_urp_mark_mp = NULL;
1967 1980 }
1968 1981 if (tcp->tcp_fused_sigurg_mp != NULL) {
1969 1982 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1970 1983 freeb(tcp->tcp_fused_sigurg_mp);
1971 1984 tcp->tcp_fused_sigurg_mp = NULL;
1972 1985 }
1973 1986 if (tcp->tcp_ordrel_mp != NULL) {
1974 1987 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1975 1988 freeb(tcp->tcp_ordrel_mp);
1976 1989 tcp->tcp_ordrel_mp = NULL;
1977 1990 }
1978 1991
1979 1992 /*
1980 1993 * Following is a union with two members which are
1981 1994 * identical types and size so the following cleanup
1982 1995 * is enough.
1983 1996 */
1984 1997 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1985 1998
1986 1999 CL_INET_DISCONNECT(connp);
1987 2000
1988 2001 /*
1989 2002 * The connection can't be on the tcp_time_wait_head list
1990 2003 * since it is not detached.
1991 2004 */
1992 2005 ASSERT(tcp->tcp_time_wait_next == NULL);
1993 2006 ASSERT(tcp->tcp_time_wait_prev == NULL);
1994 2007 ASSERT(tcp->tcp_time_wait_expire == 0);
1995 2008
1996 2009 /*
1997 2010 * Reset/preserve other values
1998 2011 */
1999 2012 tcp_reinit_values(tcp);
2000 2013 ipcl_hash_remove(connp);
2001 2014 /* Note that ixa_cred gets cleared in ixa_cleanup */
2002 2015 ixa_cleanup(connp->conn_ixa);
2003 2016 tcp_ipsec_cleanup(tcp);
2004 2017
2005 2018 connp->conn_laddr_v6 = connp->conn_bound_addr_v6;
2006 2019 connp->conn_saddr_v6 = connp->conn_bound_addr_v6;
2007 2020 oldstate = tcp->tcp_state;
2008 2021
2009 2022 if (tcp->tcp_conn_req_max != 0) {
2010 2023 /*
2011 2024 * This is the case when a TLI program uses the same
2012 2025 * transport end point to accept a connection. This
2013 2026 * makes the TCP both a listener and acceptor. When
2014 2027 * this connection is closed, we need to set the state
2015 2028 * back to TCPS_LISTEN. Make sure that the eager list
2016 2029 * is reinitialized.
2017 2030 *
2018 2031 * Note that this stream is still bound to the four
2019 2032 * tuples of the previous connection in IP. If a new
2020 2033 * SYN with different foreign address comes in, IP will
2021 2034 * not find it and will send it to the global queue. In
2022 2035 * the global queue, TCP will do a tcp_lookup_listener()
2023 2036 * to find this stream. This works because this stream
2024 2037 * is only removed from connected hash.
2025 2038 *
2026 2039 */
2027 2040 tcp->tcp_state = TCPS_LISTEN;
2028 2041 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
2029 2042 tcp->tcp_eager_next_drop_q0 = tcp;
2030 2043 tcp->tcp_eager_prev_drop_q0 = tcp;
2031 2044 /*
2032 2045 * Initially set conn_recv to tcp_input_listener_unbound to try
2033 2046 * to pick a good squeue for the listener when the first SYN
2034 2047 * arrives. tcp_input_listener_unbound sets it to
2035 2048 * tcp_input_listener on that first SYN.
2036 2049 */
2037 2050 connp->conn_recv = tcp_input_listener_unbound;
2038 2051
2039 2052 connp->conn_proto = IPPROTO_TCP;
2040 2053 connp->conn_faddr_v6 = ipv6_all_zeros;
2041 2054 connp->conn_fport = 0;
2042 2055
2043 2056 (void) ipcl_bind_insert(connp);
2044 2057 } else {
2045 2058 tcp->tcp_state = TCPS_BOUND;
2046 2059 }
2047 2060
2048 2061 /*
2049 2062 * Initialize to default values
2050 2063 */
2051 2064 tcp_init_values(tcp, NULL);
2052 2065
2053 2066 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
2054 2067 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
2055 2068 int32_t, oldstate);
2056 2069
2057 2070 ASSERT(tcp->tcp_ptpbhn != NULL);
2058 2071 tcp->tcp_rwnd = connp->conn_rcvbuf;
2059 2072 tcp->tcp_mss = connp->conn_ipversion != IPV4_VERSION ?
2060 2073 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4;
2061 2074 }
2062 2075
2063 2076 /*
2064 2077 * Force values to zero that need be zero.
2065 2078 * Do not touch values asociated with the BOUND or LISTEN state
2066 2079 * since the connection will end up in that state after the reinit.
2067 2080 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2068 2081 * structure!
2069 2082 */
2070 2083 static void
2071 2084 tcp_reinit_values(tcp_t *tcp)
2072 2085 {
2073 2086 tcp_stack_t *tcps = tcp->tcp_tcps;
2074 2087 conn_t *connp = tcp->tcp_connp;
2075 2088
2076 2089 #ifndef lint
2077 2090 #define DONTCARE(x)
2078 2091 #define PRESERVE(x)
2079 2092 #else
2080 2093 #define DONTCARE(x) ((x) = (x))
2081 2094 #define PRESERVE(x) ((x) = (x))
2082 2095 #endif /* lint */
2083 2096
2084 2097 PRESERVE(tcp->tcp_bind_hash_port);
2085 2098 PRESERVE(tcp->tcp_bind_hash);
2086 2099 PRESERVE(tcp->tcp_ptpbhn);
2087 2100 PRESERVE(tcp->tcp_acceptor_hash);
2088 2101 PRESERVE(tcp->tcp_ptpahn);
2089 2102
2090 2103 /* Should be ASSERT NULL on these with new code! */
2091 2104 ASSERT(tcp->tcp_time_wait_next == NULL);
2092 2105 ASSERT(tcp->tcp_time_wait_prev == NULL);
2093 2106 ASSERT(tcp->tcp_time_wait_expire == 0);
2094 2107 PRESERVE(tcp->tcp_state);
2095 2108 PRESERVE(connp->conn_rq);
2096 2109 PRESERVE(connp->conn_wq);
2097 2110
2098 2111 ASSERT(tcp->tcp_xmit_head == NULL);
|
↓ open down ↓ |
151 lines elided |
↑ open up ↑ |
2099 2112 ASSERT(tcp->tcp_xmit_last == NULL);
2100 2113 ASSERT(tcp->tcp_unsent == 0);
2101 2114 ASSERT(tcp->tcp_xmit_tail == NULL);
2102 2115 ASSERT(tcp->tcp_xmit_tail_unsent == 0);
2103 2116
2104 2117 tcp->tcp_snxt = 0; /* Displayed in mib */
2105 2118 tcp->tcp_suna = 0; /* Displayed in mib */
2106 2119 tcp->tcp_swnd = 0;
2107 2120 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_process_options */
2108 2121
2109 - ASSERT(tcp->tcp_ibsegs == 0);
2110 - ASSERT(tcp->tcp_obsegs == 0);
2111 -
2112 2122 if (connp->conn_ht_iphc != NULL) {
2113 2123 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
2114 2124 connp->conn_ht_iphc = NULL;
2115 2125 connp->conn_ht_iphc_allocated = 0;
2116 2126 connp->conn_ht_iphc_len = 0;
2117 2127 connp->conn_ht_ulp = NULL;
2118 2128 connp->conn_ht_ulp_len = 0;
2119 2129 tcp->tcp_ipha = NULL;
2120 2130 tcp->tcp_ip6h = NULL;
2121 2131 tcp->tcp_tcpha = NULL;
2122 2132 }
2123 2133
2124 2134 /* We clear any IP_OPTIONS and extension headers */
2125 2135 ip_pkt_free(&connp->conn_xmit_ipp);
2126 2136
2127 2137 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */
2128 2138 DONTCARE(tcp->tcp_ipha);
2129 2139 DONTCARE(tcp->tcp_ip6h);
2130 2140 DONTCARE(tcp->tcp_tcpha);
2131 2141 tcp->tcp_valid_bits = 0;
2132 2142
2133 2143 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */
2134 2144 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */
2135 2145 tcp->tcp_last_rcv_lbolt = 0;
2136 2146
2137 2147 tcp->tcp_init_cwnd = 0;
2138 2148
2139 2149 tcp->tcp_urp_last_valid = 0;
2140 2150 tcp->tcp_hard_binding = 0;
2141 2151
2142 2152 tcp->tcp_fin_acked = 0;
2143 2153 tcp->tcp_fin_rcvd = 0;
2144 2154 tcp->tcp_fin_sent = 0;
2145 2155 tcp->tcp_ordrel_done = 0;
2146 2156
2147 2157 tcp->tcp_detached = 0;
2148 2158
2149 2159 tcp->tcp_snd_ws_ok = B_FALSE;
2150 2160 tcp->tcp_snd_ts_ok = B_FALSE;
2151 2161 tcp->tcp_zero_win_probe = 0;
2152 2162
2153 2163 tcp->tcp_loopback = 0;
2154 2164 tcp->tcp_localnet = 0;
2155 2165 tcp->tcp_syn_defense = 0;
2156 2166 tcp->tcp_set_timer = 0;
2157 2167
2158 2168 tcp->tcp_active_open = 0;
2159 2169 tcp->tcp_rexmit = B_FALSE;
2160 2170 tcp->tcp_xmit_zc_clean = B_FALSE;
2161 2171
2162 2172 tcp->tcp_snd_sack_ok = B_FALSE;
2163 2173 tcp->tcp_hwcksum = B_FALSE;
2164 2174
2165 2175 DONTCARE(tcp->tcp_maxpsz_multiplier); /* Init in tcp_init_values */
2166 2176
2167 2177 tcp->tcp_conn_def_q0 = 0;
2168 2178 tcp->tcp_ip_forward_progress = B_FALSE;
2169 2179 tcp->tcp_ecn_ok = B_FALSE;
2170 2180
2171 2181 tcp->tcp_cwr = B_FALSE;
2172 2182 tcp->tcp_ecn_echo_on = B_FALSE;
2173 2183 tcp->tcp_is_wnd_shrnk = B_FALSE;
2174 2184
2175 2185 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
2176 2186 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
2177 2187
2178 2188 tcp->tcp_rcv_ws = 0;
2179 2189 tcp->tcp_snd_ws = 0;
2180 2190 tcp->tcp_ts_recent = 0;
2181 2191 tcp->tcp_rnxt = 0; /* Displayed in mib */
2182 2192 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */
2183 2193 tcp->tcp_initial_pmtu = 0;
2184 2194
2185 2195 ASSERT(tcp->tcp_reass_head == NULL);
2186 2196 ASSERT(tcp->tcp_reass_tail == NULL);
2187 2197
2188 2198 tcp->tcp_cwnd_cnt = 0;
2189 2199
2190 2200 ASSERT(tcp->tcp_rcv_list == NULL);
2191 2201 ASSERT(tcp->tcp_rcv_last_head == NULL);
2192 2202 ASSERT(tcp->tcp_rcv_last_tail == NULL);
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
2193 2203 ASSERT(tcp->tcp_rcv_cnt == 0);
2194 2204
2195 2205 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_set_destination */
2196 2206 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */
2197 2207 tcp->tcp_csuna = 0;
2198 2208
2199 2209 tcp->tcp_rto = 0; /* Displayed in MIB */
2200 2210 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */
2201 2211 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */
2202 2212 tcp->tcp_rtt_update = 0;
2213 + tcp->tcp_rtt_sum = 0;
2214 + tcp->tcp_rtt_cnt = 0;
2203 2215
2204 2216 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2205 2217 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2206 2218
2207 2219 tcp->tcp_rack = 0; /* Displayed in mib */
2208 2220 tcp->tcp_rack_cnt = 0;
2209 2221 tcp->tcp_rack_cur_max = 0;
2210 2222 tcp->tcp_rack_abs_max = 0;
2211 2223
2212 2224 tcp->tcp_max_swnd = 0;
2213 2225
2214 2226 ASSERT(tcp->tcp_listener == NULL);
2215 2227
2216 2228 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */
2217 2229 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */
2218 2230 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */
2219 2231 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */
2220 2232
2221 2233 ASSERT(tcp->tcp_conn_req_cnt_q == 0);
2222 2234 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0);
2223 2235 PRESERVE(tcp->tcp_conn_req_max);
2224 2236 PRESERVE(tcp->tcp_conn_req_seqnum);
2225 2237
2226 2238 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */
2227 2239 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */
2228 2240 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */
2229 2241 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */
2230 2242
2231 2243 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */
2232 2244 ASSERT(tcp->tcp_urp_mp == NULL);
2233 2245 ASSERT(tcp->tcp_urp_mark_mp == NULL);
2234 2246 ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
2235 2247
2236 2248 ASSERT(tcp->tcp_eager_next_q == NULL);
2237 2249 ASSERT(tcp->tcp_eager_last_q == NULL);
2238 2250 ASSERT((tcp->tcp_eager_next_q0 == NULL &&
2239 2251 tcp->tcp_eager_prev_q0 == NULL) ||
2240 2252 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0);
2241 2253 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL);
2242 2254
2243 2255 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL &&
2244 2256 tcp->tcp_eager_prev_drop_q0 == NULL) ||
2245 2257 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0);
2246 2258
2247 2259 DONTCARE(tcp->tcp_ka_rinterval); /* Init in tcp_init_values */
2248 2260 DONTCARE(tcp->tcp_ka_abort_thres); /* Init in tcp_init_values */
2249 2261 DONTCARE(tcp->tcp_ka_cnt); /* Init in tcp_init_values */
2250 2262
2251 2263 tcp->tcp_client_errno = 0;
2252 2264
2253 2265 DONTCARE(connp->conn_sum); /* Init in tcp_init_values */
2254 2266
2255 2267 connp->conn_faddr_v6 = ipv6_all_zeros; /* Displayed in MIB */
2256 2268
2257 2269 PRESERVE(connp->conn_bound_addr_v6);
2258 2270 tcp->tcp_last_sent_len = 0;
2259 2271 tcp->tcp_dupack_cnt = 0;
2260 2272
2261 2273 connp->conn_fport = 0; /* Displayed in MIB */
2262 2274 PRESERVE(connp->conn_lport);
2263 2275
2264 2276 PRESERVE(tcp->tcp_acceptor_lockp);
2265 2277
2266 2278 ASSERT(tcp->tcp_ordrel_mp == NULL);
2267 2279 PRESERVE(tcp->tcp_acceptor_id);
2268 2280 DONTCARE(tcp->tcp_ipsec_overhead);
2269 2281
2270 2282 PRESERVE(connp->conn_family);
2271 2283 /* Remove any remnants of mapped address binding */
2272 2284 if (connp->conn_family == AF_INET6) {
2273 2285 connp->conn_ipversion = IPV6_VERSION;
2274 2286 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2275 2287 } else {
2276 2288 connp->conn_ipversion = IPV4_VERSION;
2277 2289 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2278 2290 }
2279 2291
2280 2292 connp->conn_bound_if = 0;
2281 2293 connp->conn_recv_ancillary.crb_all = 0;
2282 2294 tcp->tcp_recvifindex = 0;
2283 2295 tcp->tcp_recvhops = 0;
2284 2296 tcp->tcp_closed = 0;
2285 2297 if (tcp->tcp_hopopts != NULL) {
2286 2298 mi_free(tcp->tcp_hopopts);
2287 2299 tcp->tcp_hopopts = NULL;
2288 2300 tcp->tcp_hopoptslen = 0;
2289 2301 }
2290 2302 ASSERT(tcp->tcp_hopoptslen == 0);
2291 2303 if (tcp->tcp_dstopts != NULL) {
2292 2304 mi_free(tcp->tcp_dstopts);
2293 2305 tcp->tcp_dstopts = NULL;
2294 2306 tcp->tcp_dstoptslen = 0;
2295 2307 }
2296 2308 ASSERT(tcp->tcp_dstoptslen == 0);
2297 2309 if (tcp->tcp_rthdrdstopts != NULL) {
2298 2310 mi_free(tcp->tcp_rthdrdstopts);
2299 2311 tcp->tcp_rthdrdstopts = NULL;
2300 2312 tcp->tcp_rthdrdstoptslen = 0;
2301 2313 }
2302 2314 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
2303 2315 if (tcp->tcp_rthdr != NULL) {
2304 2316 mi_free(tcp->tcp_rthdr);
2305 2317 tcp->tcp_rthdr = NULL;
2306 2318 tcp->tcp_rthdrlen = 0;
2307 2319 }
2308 2320 ASSERT(tcp->tcp_rthdrlen == 0);
2309 2321
2310 2322 /* Reset fusion-related fields */
2311 2323 tcp->tcp_fused = B_FALSE;
2312 2324 tcp->tcp_unfusable = B_FALSE;
2313 2325 tcp->tcp_fused_sigurg = B_FALSE;
2314 2326 tcp->tcp_loopback_peer = NULL;
2315 2327
2316 2328 tcp->tcp_lso = B_FALSE;
2317 2329
2318 2330 tcp->tcp_in_ack_unsent = 0;
2319 2331 tcp->tcp_cork = B_FALSE;
2320 2332 tcp->tcp_tconnind_started = B_FALSE;
2321 2333
2322 2334 PRESERVE(tcp->tcp_squeue_bytes);
2323 2335
2324 2336 tcp->tcp_closemp_used = B_FALSE;
2325 2337
2326 2338 PRESERVE(tcp->tcp_rsrv_mp);
2327 2339 PRESERVE(tcp->tcp_rsrv_mp_lock);
|
↓ open down ↓ |
115 lines elided |
↑ open up ↑ |
2328 2340
2329 2341 #ifdef DEBUG
2330 2342 DONTCARE(tcp->tcmp_stk[0]);
2331 2343 #endif
2332 2344
2333 2345 PRESERVE(tcp->tcp_connid);
2334 2346
2335 2347 ASSERT(tcp->tcp_listen_cnt == NULL);
2336 2348 ASSERT(tcp->tcp_reass_tid == 0);
2337 2349
2350 + /* Allow the CC algorithm to clean up after itself. */
2351 + if (tcp->tcp_cc_algo->cb_destroy != NULL)
2352 + tcp->tcp_cc_algo->cb_destroy(&tcp->tcp_ccv);
2353 + tcp->tcp_cc_algo = NULL;
2354 +
2338 2355 #undef DONTCARE
2339 2356 #undef PRESERVE
2340 2357 }
2341 2358
2342 2359 /*
2343 2360 * Initialize the various fields in tcp_t. If parent (the listener) is non
2344 2361 * NULL, certain values will be inheritted from it.
2345 2362 */
2346 2363 void
2347 2364 tcp_init_values(tcp_t *tcp, tcp_t *parent)
2348 2365 {
2349 2366 tcp_stack_t *tcps = tcp->tcp_tcps;
2350 2367 conn_t *connp = tcp->tcp_connp;
2351 - clock_t rto;
2352 2368
2353 2369 ASSERT((connp->conn_family == AF_INET &&
2354 2370 connp->conn_ipversion == IPV4_VERSION) ||
2355 2371 (connp->conn_family == AF_INET6 &&
2356 2372 (connp->conn_ipversion == IPV4_VERSION ||
2357 2373 connp->conn_ipversion == IPV6_VERSION)));
2358 2374
2375 + tcp->tcp_ccv.type = IPPROTO_TCP;
2376 + tcp->tcp_ccv.ccvc.tcp = tcp;
2377 +
2359 2378 if (parent == NULL) {
2379 + tcp->tcp_cc_algo = tcps->tcps_default_cc_algo;
2380 +
2360 2381 tcp->tcp_naglim = tcps->tcps_naglim_def;
2361 2382
2362 2383 tcp->tcp_rto_initial = tcps->tcps_rexmit_interval_initial;
2363 2384 tcp->tcp_rto_min = tcps->tcps_rexmit_interval_min;
2364 2385 tcp->tcp_rto_max = tcps->tcps_rexmit_interval_max;
2365 2386
2366 2387 tcp->tcp_first_ctimer_threshold =
2367 2388 tcps->tcps_ip_notify_cinterval;
2368 2389 tcp->tcp_second_ctimer_threshold =
2369 2390 tcps->tcps_ip_abort_cinterval;
2370 2391 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval;
2371 2392 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval;
2372 2393
2373 2394 tcp->tcp_fin_wait_2_flush_interval =
2374 2395 tcps->tcps_fin_wait_2_flush_interval;
2375 2396
2376 2397 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2377 2398 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval;
2378 2399 tcp->tcp_ka_cnt = 0;
2379 2400 tcp->tcp_ka_rinterval = 0;
2380 2401
2381 2402 /*
2382 2403 * Default value of tcp_init_cwnd is 0, so no need to set here
2383 2404 * if parent is NULL. But we need to inherit it from parent.
2384 2405 */
2385 2406 } else {
2386 2407 /* Inherit various TCP parameters from the parent. */
2408 + tcp->tcp_cc_algo = parent->tcp_cc_algo;
2409 +
2387 2410 tcp->tcp_naglim = parent->tcp_naglim;
2388 2411
2389 2412 tcp->tcp_rto_initial = parent->tcp_rto_initial;
2390 2413 tcp->tcp_rto_min = parent->tcp_rto_min;
2391 2414 tcp->tcp_rto_max = parent->tcp_rto_max;
2392 2415
2393 2416 tcp->tcp_first_ctimer_threshold =
2394 2417 parent->tcp_first_ctimer_threshold;
2395 2418 tcp->tcp_second_ctimer_threshold =
2396 2419 parent->tcp_second_ctimer_threshold;
2397 2420 tcp->tcp_first_timer_threshold =
2398 2421 parent->tcp_first_timer_threshold;
2399 2422 tcp->tcp_second_timer_threshold =
2400 2423 parent->tcp_second_timer_threshold;
2401 2424
2402 2425 tcp->tcp_fin_wait_2_flush_interval =
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
2403 2426 parent->tcp_fin_wait_2_flush_interval;
2404 2427
2405 2428 tcp->tcp_ka_interval = parent->tcp_ka_interval;
2406 2429 tcp->tcp_ka_abort_thres = parent->tcp_ka_abort_thres;
2407 2430 tcp->tcp_ka_cnt = parent->tcp_ka_cnt;
2408 2431 tcp->tcp_ka_rinterval = parent->tcp_ka_rinterval;
2409 2432
2410 2433 tcp->tcp_init_cwnd = parent->tcp_init_cwnd;
2411 2434 }
2412 2435
2436 + if (tcp->tcp_cc_algo->cb_init != NULL)
2437 + VERIFY(tcp->tcp_cc_algo->cb_init(&tcp->tcp_ccv) == 0);
2438 +
2413 2439 /*
2414 2440 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2415 2441 * will be close to tcp_rexmit_interval_initial. By doing this, we
2416 2442 * allow the algorithm to adjust slowly to large fluctuations of RTT
2417 2443 * during first few transmissions of a connection as seen in slow
2418 2444 * links.
2419 2445 */
2420 - tcp->tcp_rtt_sa = tcp->tcp_rto_initial << 2;
2421 - tcp->tcp_rtt_sd = tcp->tcp_rto_initial >> 1;
2422 - rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
2423 - tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) +
2424 - tcps->tcps_conn_grace_period;
2425 - TCP_SET_RTO(tcp, rto);
2446 + tcp->tcp_rtt_sa = MSEC2NSEC(tcp->tcp_rto_initial) << 2;
2447 + tcp->tcp_rtt_sd = MSEC2NSEC(tcp->tcp_rto_initial) >> 1;
2448 + tcp->tcp_rto = tcp_calculate_rto(tcp, tcps);
2426 2449
2427 2450 tcp->tcp_timer_backoff = 0;
2428 2451 tcp->tcp_ms_we_have_waited = 0;
2429 2452 tcp->tcp_last_recv_time = ddi_get_lbolt();
2430 2453 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_;
2431 2454 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
2432 2455
2433 2456 tcp->tcp_maxpsz_multiplier = tcps->tcps_maxpsz_multiplier;
2434 2457
2435 2458 /* NOTE: ISS is now set in tcp_set_destination(). */
2436 2459
2437 2460 /* Reset fusion-related fields */
2438 2461 tcp->tcp_fused = B_FALSE;
2439 2462 tcp->tcp_unfusable = B_FALSE;
2440 2463 tcp->tcp_fused_sigurg = B_FALSE;
2441 2464 tcp->tcp_loopback_peer = NULL;
2442 2465
2443 2466 /* We rebuild the header template on the next connect/conn_request */
2444 2467
2445 2468 connp->conn_mlp_type = mlptSingle;
2446 2469
2447 2470 /*
2448 2471 * Init the window scale to the max so tcp_rwnd_set() won't pare
2449 2472 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2450 2473 */
2451 2474 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT;
2452 2475 tcp->tcp_rwnd = connp->conn_rcvbuf;
2453 2476
2454 2477 tcp->tcp_cork = B_FALSE;
2455 2478 /*
2456 2479 * Init the tcp_debug option if it wasn't already set. This value
2457 2480 * determines whether TCP
2458 2481 * calls strlog() to print out debug messages. Doing this
2459 2482 * initialization here means that this value is not inherited thru
2460 2483 * tcp_reinit().
2461 2484 */
2462 2485 if (!connp->conn_debug)
2463 2486 connp->conn_debug = tcps->tcps_dbg;
2464 2487 }
2465 2488
2466 2489 /*
2467 2490 * Update the TCP connection according to change of PMTU.
2468 2491 *
2469 2492 * Path MTU might have changed by either increase or decrease, so need to
2470 2493 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2471 2494 * or negative MSS, since tcp_mss_set() will do it.
2472 2495 *
2473 2496 * Returns B_TRUE when the connection PMTU changes, otherwise B_FALSE.
2474 2497 */
2475 2498 boolean_t
2476 2499 tcp_update_pmtu(tcp_t *tcp, boolean_t decrease_only)
2477 2500 {
2478 2501 uint32_t pmtu;
2479 2502 int32_t mss;
2480 2503 conn_t *connp = tcp->tcp_connp;
2481 2504 ip_xmit_attr_t *ixa = connp->conn_ixa;
2482 2505 iaflags_t ixaflags;
2483 2506
2484 2507 if (tcp->tcp_tcps->tcps_ignore_path_mtu)
2485 2508 return (B_FALSE);
2486 2509
2487 2510 if (tcp->tcp_state < TCPS_ESTABLISHED)
2488 2511 return (B_FALSE);
2489 2512
2490 2513 /*
2491 2514 * Always call ip_get_pmtu() to make sure that IP has updated
2492 2515 * ixa_flags properly.
2493 2516 */
2494 2517 pmtu = ip_get_pmtu(ixa);
2495 2518 ixaflags = ixa->ixa_flags;
2496 2519
2497 2520 /*
2498 2521 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2499 2522 * IPsec overhead if applied. Make sure to use the most recent
2500 2523 * IPsec information.
2501 2524 */
2502 2525 mss = pmtu - connp->conn_ht_iphc_len - conn_ipsec_length(connp);
2503 2526
2504 2527 /*
2505 2528 * Nothing to change, so just return.
2506 2529 */
2507 2530 if (mss == tcp->tcp_mss)
2508 2531 return (B_FALSE);
2509 2532
2510 2533 /*
2511 2534 * Currently, for ICMP errors, only PMTU decrease is handled.
2512 2535 */
2513 2536 if (mss > tcp->tcp_mss && decrease_only)
2514 2537 return (B_FALSE);
2515 2538
2516 2539 DTRACE_PROBE2(tcp_update_pmtu, int32_t, tcp->tcp_mss, uint32_t, mss);
2517 2540
2518 2541 /*
2519 2542 * Update ixa_fragsize and ixa_pmtu.
2520 2543 */
2521 2544 ixa->ixa_fragsize = ixa->ixa_pmtu = pmtu;
2522 2545
2523 2546 /*
2524 2547 * Adjust MSS and all relevant variables.
2525 2548 */
2526 2549 tcp_mss_set(tcp, mss);
2527 2550
2528 2551 /*
2529 2552 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2530 2553 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2531 2554 * has a (potentially different) min size we do the same. Make sure to
2532 2555 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2533 2556 * fragment the packet.
2534 2557 *
2535 2558 * LSO over IPv6 can not be fragmented. So need to disable LSO
2536 2559 * when IPv6 fragmentation is needed.
2537 2560 */
2538 2561 if (mss < tcp->tcp_tcps->tcps_mss_min)
2539 2562 ixaflags |= IXAF_PMTU_TOO_SMALL;
2540 2563
2541 2564 if (ixaflags & IXAF_PMTU_TOO_SMALL)
2542 2565 ixaflags &= ~(IXAF_DONTFRAG | IXAF_PMTU_IPV4_DF);
2543 2566
2544 2567 if ((connp->conn_ipversion == IPV4_VERSION) &&
2545 2568 !(ixaflags & IXAF_PMTU_IPV4_DF)) {
2546 2569 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0;
2547 2570 }
2548 2571 ixa->ixa_flags = ixaflags;
2549 2572 return (B_TRUE);
2550 2573 }
2551 2574
2552 2575 int
2553 2576 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk)
2554 2577 {
2555 2578 conn_t *connp = tcp->tcp_connp;
2556 2579 queue_t *q = connp->conn_rq;
2557 2580 int32_t mss = tcp->tcp_mss;
2558 2581 int maxpsz;
2559 2582
2560 2583 if (TCP_IS_DETACHED(tcp))
2561 2584 return (mss);
2562 2585 if (tcp->tcp_fused) {
2563 2586 maxpsz = tcp_fuse_maxpsz(tcp);
2564 2587 mss = INFPSZ;
2565 2588 } else if (tcp->tcp_maxpsz_multiplier == 0) {
2566 2589 /*
2567 2590 * Set the sd_qn_maxpsz according to the socket send buffer
2568 2591 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2569 2592 * instruct the stream head to copyin user data into contiguous
2570 2593 * kernel-allocated buffers without breaking it up into smaller
2571 2594 * chunks. We round up the buffer size to the nearest SMSS.
2572 2595 */
2573 2596 maxpsz = MSS_ROUNDUP(connp->conn_sndbuf, mss);
2574 2597 mss = INFPSZ;
2575 2598 } else {
2576 2599 /*
2577 2600 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2578 2601 * (and a multiple of the mss). This instructs the stream
2579 2602 * head to break down larger than SMSS writes into SMSS-
2580 2603 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2581 2604 */
2582 2605 maxpsz = tcp->tcp_maxpsz_multiplier * mss;
2583 2606 if (maxpsz > connp->conn_sndbuf / 2) {
2584 2607 maxpsz = connp->conn_sndbuf / 2;
2585 2608 /* Round up to nearest mss */
2586 2609 maxpsz = MSS_ROUNDUP(maxpsz, mss);
2587 2610 }
2588 2611 }
2589 2612
2590 2613 (void) proto_set_maxpsz(q, connp, maxpsz);
2591 2614 if (!(IPCL_IS_NONSTR(connp)))
2592 2615 connp->conn_wq->q_maxpsz = maxpsz;
2593 2616 if (set_maxblk)
2594 2617 (void) proto_set_tx_maxblk(q, connp, mss);
2595 2618 return (mss);
2596 2619 }
2597 2620
2598 2621 /* For /dev/tcp aka AF_INET open */
2599 2622 static int
2600 2623 tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2601 2624 {
2602 2625 return (tcp_open(q, devp, flag, sflag, credp, B_FALSE));
2603 2626 }
2604 2627
2605 2628 /* For /dev/tcp6 aka AF_INET6 open */
2606 2629 static int
2607 2630 tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2608 2631 {
2609 2632 return (tcp_open(q, devp, flag, sflag, credp, B_TRUE));
2610 2633 }
2611 2634
2612 2635 conn_t *
2613 2636 tcp_create_common(cred_t *credp, boolean_t isv6, boolean_t issocket,
2614 2637 int *errorp)
2615 2638 {
2616 2639 tcp_t *tcp = NULL;
2617 2640 conn_t *connp;
2618 2641 zoneid_t zoneid;
2619 2642 tcp_stack_t *tcps;
2620 2643 squeue_t *sqp;
2621 2644
2622 2645 ASSERT(errorp != NULL);
2623 2646 /*
2624 2647 * Find the proper zoneid and netstack.
2625 2648 */
2626 2649 /*
2627 2650 * Special case for install: miniroot needs to be able to
2628 2651 * access files via NFS as though it were always in the
2629 2652 * global zone.
2630 2653 */
2631 2654 if (credp == kcred && nfs_global_client_only != 0) {
2632 2655 zoneid = GLOBAL_ZONEID;
2633 2656 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->
2634 2657 netstack_tcp;
2635 2658 ASSERT(tcps != NULL);
2636 2659 } else {
2637 2660 netstack_t *ns;
2638 2661 int err;
2639 2662
2640 2663 if ((err = secpolicy_basic_net_access(credp)) != 0) {
2641 2664 *errorp = err;
2642 2665 return (NULL);
2643 2666 }
2644 2667
2645 2668 ns = netstack_find_by_cred(credp);
2646 2669 ASSERT(ns != NULL);
2647 2670 tcps = ns->netstack_tcp;
2648 2671 ASSERT(tcps != NULL);
2649 2672
2650 2673 /*
2651 2674 * For exclusive stacks we set the zoneid to zero
|
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
2652 2675 * to make TCP operate as if in the global zone.
2653 2676 */
2654 2677 if (tcps->tcps_netstack->netstack_stackid !=
2655 2678 GLOBAL_NETSTACKID)
2656 2679 zoneid = GLOBAL_ZONEID;
2657 2680 else
2658 2681 zoneid = crgetzoneid(credp);
2659 2682 }
2660 2683
2661 2684 sqp = IP_SQUEUE_GET((uint_t)gethrtime());
2662 - connp = (conn_t *)tcp_get_conn(sqp, tcps);
2685 + connp = tcp_get_conn(sqp, tcps);
2663 2686 /*
2664 2687 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2665 2688 * so we drop it by one.
2666 2689 */
2667 2690 netstack_rele(tcps->tcps_netstack);
2668 2691 if (connp == NULL) {
2669 2692 *errorp = ENOSR;
2670 2693 return (NULL);
2671 2694 }
2672 2695 ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto);
2673 2696
2674 2697 connp->conn_sqp = sqp;
2675 2698 connp->conn_initial_sqp = connp->conn_sqp;
2676 2699 connp->conn_ixa->ixa_sqp = connp->conn_sqp;
2677 2700 tcp = connp->conn_tcp;
2678 2701
2679 2702 /*
2680 2703 * Besides asking IP to set the checksum for us, have conn_ip_output
2681 2704 * to do the following checks when necessary:
2682 2705 *
2683 2706 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2684 2707 * IXAF_VERIFY_PMTU: verify PMTU changes
2685 2708 * IXAF_VERIFY_LSO: verify LSO capability changes
2686 2709 */
2687 2710 connp->conn_ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
2688 2711 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO;
2689 2712
2690 2713 if (!tcps->tcps_dev_flow_ctl)
2691 2714 connp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
2692 2715
2693 2716 if (isv6) {
2694 2717 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
2695 2718 connp->conn_ipversion = IPV6_VERSION;
2696 2719 connp->conn_family = AF_INET6;
2697 2720 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2698 2721 connp->conn_default_ttl = tcps->tcps_ipv6_hoplimit;
2699 2722 } else {
2700 2723 connp->conn_ipversion = IPV4_VERSION;
2701 2724 connp->conn_family = AF_INET;
2702 2725 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2703 2726 connp->conn_default_ttl = tcps->tcps_ipv4_ttl;
2704 2727 }
2705 2728 connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl;
2706 2729
2707 2730 crhold(credp);
2708 2731 connp->conn_cred = credp;
2709 2732 connp->conn_cpid = curproc->p_pid;
2710 2733 connp->conn_open_time = ddi_get_lbolt64();
2711 2734
2712 2735 /* Cache things in the ixa without any refhold */
2713 2736 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
2714 2737 connp->conn_ixa->ixa_cred = credp;
2715 2738 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
2716 2739
2717 2740 connp->conn_zoneid = zoneid;
2718 2741 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2719 2742 connp->conn_ixa->ixa_zoneid = zoneid;
2720 2743 connp->conn_mlp_type = mlptSingle;
2721 2744 ASSERT(connp->conn_netstack == tcps->tcps_netstack);
2722 2745 ASSERT(tcp->tcp_tcps == tcps);
2723 2746
2724 2747 /*
2725 2748 * If the caller has the process-wide flag set, then default to MAC
2726 2749 * exempt mode. This allows read-down to unlabeled hosts.
2727 2750 */
2728 2751 if (getpflags(NET_MAC_AWARE, credp) != 0)
2729 2752 connp->conn_mac_mode = CONN_MAC_AWARE;
2730 2753
2731 2754 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
2732 2755
2733 2756 if (issocket) {
2734 2757 tcp->tcp_issocket = 1;
2735 2758 }
2736 2759
2737 2760 connp->conn_rcvbuf = tcps->tcps_recv_hiwat;
2738 2761 connp->conn_sndbuf = tcps->tcps_xmit_hiwat;
2739 2762 if (tcps->tcps_snd_lowat_fraction != 0) {
2740 2763 connp->conn_sndlowat = connp->conn_sndbuf /
2741 2764 tcps->tcps_snd_lowat_fraction;
2742 2765 } else {
2743 2766 connp->conn_sndlowat = tcps->tcps_xmit_lowat;
2744 2767 }
2745 2768 connp->conn_so_type = SOCK_STREAM;
2746 2769 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2747 2770 tcps->tcps_wroff_xtra;
2748 2771
2749 2772 SOCK_CONNID_INIT(tcp->tcp_connid);
2750 2773 /* DTrace ignores this - it isn't a tcp:::state-change */
2751 2774 tcp->tcp_state = TCPS_IDLE;
2752 2775 tcp_init_values(tcp, NULL);
2753 2776 return (connp);
2754 2777 }
2755 2778
2756 2779 static int
2757 2780 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
2758 2781 boolean_t isv6)
2759 2782 {
2760 2783 tcp_t *tcp = NULL;
2761 2784 conn_t *connp = NULL;
2762 2785 int err;
2763 2786 vmem_t *minor_arena = NULL;
2764 2787 dev_t conn_dev;
2765 2788 boolean_t issocket;
2766 2789
2767 2790 if (q->q_ptr != NULL)
2768 2791 return (0);
2769 2792
2770 2793 if (sflag == MODOPEN)
2771 2794 return (EINVAL);
2772 2795
2773 2796 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
2774 2797 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
2775 2798 minor_arena = ip_minor_arena_la;
2776 2799 } else {
2777 2800 /*
2778 2801 * Either minor numbers in the large arena were exhausted
2779 2802 * or a non socket application is doing the open.
2780 2803 * Try to allocate from the small arena.
2781 2804 */
2782 2805 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) {
2783 2806 return (EBUSY);
2784 2807 }
2785 2808 minor_arena = ip_minor_arena_sa;
2786 2809 }
2787 2810
2788 2811 ASSERT(minor_arena != NULL);
2789 2812
2790 2813 *devp = makedevice(getmajor(*devp), (minor_t)conn_dev);
2791 2814
2792 2815 if (flag & SO_FALLBACK) {
2793 2816 /*
2794 2817 * Non streams socket needs a stream to fallback to
2795 2818 */
2796 2819 RD(q)->q_ptr = (void *)conn_dev;
2797 2820 WR(q)->q_qinfo = &tcp_fallback_sock_winit;
2798 2821 WR(q)->q_ptr = (void *)minor_arena;
2799 2822 qprocson(q);
2800 2823 return (0);
2801 2824 } else if (flag & SO_ACCEPTOR) {
2802 2825 q->q_qinfo = &tcp_acceptor_rinit;
2803 2826 /*
2804 2827 * the conn_dev and minor_arena will be subsequently used by
2805 2828 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2806 2829 * the minor device number for this connection from the q_ptr.
2807 2830 */
2808 2831 RD(q)->q_ptr = (void *)conn_dev;
2809 2832 WR(q)->q_qinfo = &tcp_acceptor_winit;
2810 2833 WR(q)->q_ptr = (void *)minor_arena;
2811 2834 qprocson(q);
2812 2835 return (0);
2813 2836 }
2814 2837
2815 2838 issocket = flag & SO_SOCKSTR;
2816 2839 connp = tcp_create_common(credp, isv6, issocket, &err);
2817 2840
2818 2841 if (connp == NULL) {
2819 2842 inet_minor_free(minor_arena, conn_dev);
2820 2843 q->q_ptr = WR(q)->q_ptr = NULL;
2821 2844 return (err);
2822 2845 }
2823 2846
2824 2847 connp->conn_rq = q;
2825 2848 connp->conn_wq = WR(q);
2826 2849 q->q_ptr = WR(q)->q_ptr = connp;
2827 2850
2828 2851 connp->conn_dev = conn_dev;
2829 2852 connp->conn_minor_arena = minor_arena;
2830 2853
2831 2854 ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6);
2832 2855 ASSERT(WR(q)->q_qinfo == &tcp_winit);
2833 2856
2834 2857 tcp = connp->conn_tcp;
2835 2858
2836 2859 if (issocket) {
2837 2860 WR(q)->q_qinfo = &tcp_sock_winit;
2838 2861 } else {
2839 2862 #ifdef _ILP32
2840 2863 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q);
2841 2864 #else
2842 2865 tcp->tcp_acceptor_id = conn_dev;
2843 2866 #endif /* _ILP32 */
2844 2867 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp);
2845 2868 }
2846 2869
2847 2870 /*
2848 2871 * Put the ref for TCP. Ref for IP was already put
2849 2872 * by ipcl_conn_create. Also Make the conn_t globally
2850 2873 * visible to walkers
2851 2874 */
2852 2875 mutex_enter(&connp->conn_lock);
2853 2876 CONN_INC_REF_LOCKED(connp);
2854 2877 ASSERT(connp->conn_ref == 2);
2855 2878 connp->conn_state_flags &= ~CONN_INCIPIENT;
2856 2879 mutex_exit(&connp->conn_lock);
2857 2880
2858 2881 qprocson(q);
2859 2882 return (0);
2860 2883 }
2861 2884
2862 2885 /*
2863 2886 * Build/update the tcp header template (in conn_ht_iphc) based on
2864 2887 * conn_xmit_ipp. The headers include ip6_t, any extension
2865 2888 * headers, and the maximum size tcp header (to avoid reallocation
2866 2889 * on the fly for additional tcp options).
2867 2890 *
2868 2891 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2869 2892 * Returns failure if can't allocate memory.
2870 2893 */
2871 2894 int
2872 2895 tcp_build_hdrs(tcp_t *tcp)
2873 2896 {
2874 2897 tcp_stack_t *tcps = tcp->tcp_tcps;
2875 2898 conn_t *connp = tcp->tcp_connp;
2876 2899 char buf[TCP_MAX_HDR_LENGTH];
2877 2900 uint_t buflen;
2878 2901 uint_t ulplen = TCP_MIN_HEADER_LENGTH;
2879 2902 uint_t extralen = TCP_MAX_TCP_OPTIONS_LENGTH;
2880 2903 tcpha_t *tcpha;
2881 2904 uint32_t cksum;
2882 2905 int error;
2883 2906
2884 2907 /*
2885 2908 * We might be called after the connection is set up, and we might
2886 2909 * have TS options already in the TCP header. Thus we save any
2887 2910 * existing tcp header.
2888 2911 */
2889 2912 buflen = connp->conn_ht_ulp_len;
2890 2913 if (buflen != 0) {
2891 2914 bcopy(connp->conn_ht_ulp, buf, buflen);
2892 2915 extralen -= buflen - ulplen;
2893 2916 ulplen = buflen;
2894 2917 }
2895 2918
2896 2919 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2897 2920 mutex_enter(&connp->conn_lock);
2898 2921 error = conn_build_hdr_template(connp, ulplen, extralen,
2899 2922 &connp->conn_laddr_v6, &connp->conn_faddr_v6, connp->conn_flowinfo);
2900 2923 mutex_exit(&connp->conn_lock);
2901 2924 if (error != 0)
2902 2925 return (error);
2903 2926
2904 2927 /*
2905 2928 * Any routing header/option has been massaged. The checksum difference
2906 2929 * is stored in conn_sum for later use.
2907 2930 */
2908 2931 tcpha = (tcpha_t *)connp->conn_ht_ulp;
2909 2932 tcp->tcp_tcpha = tcpha;
2910 2933
2911 2934 /* restore any old tcp header */
2912 2935 if (buflen != 0) {
2913 2936 bcopy(buf, connp->conn_ht_ulp, buflen);
2914 2937 } else {
2915 2938 tcpha->tha_sum = 0;
2916 2939 tcpha->tha_urp = 0;
2917 2940 tcpha->tha_ack = 0;
2918 2941 tcpha->tha_offset_and_reserved = (5 << 4);
2919 2942 tcpha->tha_lport = connp->conn_lport;
2920 2943 tcpha->tha_fport = connp->conn_fport;
2921 2944 }
2922 2945
2923 2946 /*
2924 2947 * IP wants our header length in the checksum field to
2925 2948 * allow it to perform a single pseudo-header+checksum
2926 2949 * calculation on behalf of TCP.
2927 2950 * Include the adjustment for a source route once IP_OPTIONS is set.
2928 2951 */
2929 2952 cksum = sizeof (tcpha_t) + connp->conn_sum;
2930 2953 cksum = (cksum >> 16) + (cksum & 0xFFFF);
2931 2954 ASSERT(cksum < 0x10000);
2932 2955 tcpha->tha_sum = htons(cksum);
2933 2956
2934 2957 if (connp->conn_ipversion == IPV4_VERSION)
2935 2958 tcp->tcp_ipha = (ipha_t *)connp->conn_ht_iphc;
2936 2959 else
2937 2960 tcp->tcp_ip6h = (ip6_t *)connp->conn_ht_iphc;
2938 2961
2939 2962 if (connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra >
2940 2963 connp->conn_wroff) {
2941 2964 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2942 2965 tcps->tcps_wroff_xtra;
2943 2966 (void) proto_set_tx_wroff(connp->conn_rq, connp,
2944 2967 connp->conn_wroff);
2945 2968 }
2946 2969 return (0);
2947 2970 }
2948 2971
2949 2972 /*
2950 2973 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2951 2974 * We do not allow the receive window to shrink. After setting rwnd,
2952 2975 * set the flow control hiwat of the stream.
2953 2976 *
2954 2977 * This function is called in 2 cases:
2955 2978 *
2956 2979 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2957 2980 * connection (passive open) and in tcp_input_data() for active connect.
2958 2981 * This is called after tcp_mss_set() when the desired MSS value is known.
2959 2982 * This makes sure that our window size is a mutiple of the other side's
2960 2983 * MSS.
2961 2984 * 2) Handling SO_RCVBUF option.
2962 2985 *
2963 2986 * It is ASSUMED that the requested size is a multiple of the current MSS.
2964 2987 *
2965 2988 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2966 2989 * user requests so.
2967 2990 */
2968 2991 int
2969 2992 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd)
2970 2993 {
2971 2994 uint32_t mss = tcp->tcp_mss;
2972 2995 uint32_t old_max_rwnd;
2973 2996 uint32_t max_transmittable_rwnd;
2974 2997 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
2975 2998 tcp_stack_t *tcps = tcp->tcp_tcps;
2976 2999 conn_t *connp = tcp->tcp_connp;
2977 3000
2978 3001 /*
2979 3002 * Insist on a receive window that is at least
2980 3003 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2981 3004 * funny TCP interactions of Nagle algorithm, SWS avoidance
2982 3005 * and delayed acknowledgement.
2983 3006 */
2984 3007 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss);
2985 3008
2986 3009 if (tcp->tcp_fused) {
2987 3010 size_t sth_hiwat;
2988 3011 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
2989 3012
2990 3013 ASSERT(peer_tcp != NULL);
2991 3014 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd);
2992 3015 if (!tcp_detached) {
2993 3016 (void) proto_set_rx_hiwat(connp->conn_rq, connp,
2994 3017 sth_hiwat);
2995 3018 tcp_set_recv_threshold(tcp, sth_hiwat >> 3);
2996 3019 }
2997 3020
2998 3021 /* Caller could have changed tcp_rwnd; update tha_win */
2999 3022 if (tcp->tcp_tcpha != NULL) {
3000 3023 tcp->tcp_tcpha->tha_win =
3001 3024 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3002 3025 }
3003 3026 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
3004 3027 tcp->tcp_cwnd_max = rwnd;
3005 3028
3006 3029 /*
3007 3030 * In the fusion case, the maxpsz stream head value of
3008 3031 * our peer is set according to its send buffer size
3009 3032 * and our receive buffer size; since the latter may
3010 3033 * have changed we need to update the peer's maxpsz.
3011 3034 */
3012 3035 (void) tcp_maxpsz_set(peer_tcp, B_TRUE);
3013 3036 return (sth_hiwat);
3014 3037 }
3015 3038
3016 3039 if (tcp_detached)
3017 3040 old_max_rwnd = tcp->tcp_rwnd;
3018 3041 else
3019 3042 old_max_rwnd = connp->conn_rcvbuf;
3020 3043
3021 3044
3022 3045 /*
3023 3046 * If window size info has already been exchanged, TCP should not
3024 3047 * shrink the window. Shrinking window is doable if done carefully.
3025 3048 * We may add that support later. But so far there is not a real
3026 3049 * need to do that.
3027 3050 */
3028 3051 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) {
3029 3052 /* MSS may have changed, do a round up again. */
3030 3053 rwnd = MSS_ROUNDUP(old_max_rwnd, mss);
3031 3054 }
3032 3055
3033 3056 /*
3034 3057 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3035 3058 * can be applied even before the window scale option is decided.
3036 3059 */
3037 3060 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws;
3038 3061 if (rwnd > max_transmittable_rwnd) {
3039 3062 rwnd = max_transmittable_rwnd -
3040 3063 (max_transmittable_rwnd % mss);
3041 3064 if (rwnd < mss)
3042 3065 rwnd = max_transmittable_rwnd;
3043 3066 /*
3044 3067 * If we're over the limit we may have to back down tcp_rwnd.
3045 3068 * The increment below won't work for us. So we set all three
3046 3069 * here and the increment below will have no effect.
3047 3070 */
3048 3071 tcp->tcp_rwnd = old_max_rwnd = rwnd;
3049 3072 }
3050 3073 if (tcp->tcp_localnet) {
3051 3074 tcp->tcp_rack_abs_max =
3052 3075 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2);
3053 3076 } else {
3054 3077 /*
3055 3078 * For a remote host on a different subnet (through a router),
3056 3079 * we ack every other packet to be conforming to RFC1122.
3057 3080 * tcp_deferred_acks_max is default to 2.
3058 3081 */
3059 3082 tcp->tcp_rack_abs_max =
3060 3083 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2);
3061 3084 }
3062 3085 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max)
3063 3086 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
3064 3087 else
3065 3088 tcp->tcp_rack_cur_max = 0;
3066 3089 /*
3067 3090 * Increment the current rwnd by the amount the maximum grew (we
3068 3091 * can not overwrite it since we might be in the middle of a
3069 3092 * connection.)
3070 3093 */
3071 3094 tcp->tcp_rwnd += rwnd - old_max_rwnd;
3072 3095 connp->conn_rcvbuf = rwnd;
3073 3096
3074 3097 /* Are we already connected? */
3075 3098 if (tcp->tcp_tcpha != NULL) {
3076 3099 tcp->tcp_tcpha->tha_win =
3077 3100 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3078 3101 }
3079 3102
3080 3103 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
3081 3104 tcp->tcp_cwnd_max = rwnd;
3082 3105
3083 3106 if (tcp_detached)
3084 3107 return (rwnd);
3085 3108
3086 3109 tcp_set_recv_threshold(tcp, rwnd >> 3);
3087 3110
3088 3111 (void) proto_set_rx_hiwat(connp->conn_rq, connp, rwnd);
3089 3112 return (rwnd);
3090 3113 }
3091 3114
3092 3115 int
3093 3116 tcp_do_unbind(conn_t *connp)
3094 3117 {
3095 3118 tcp_t *tcp = connp->conn_tcp;
3096 3119 int32_t oldstate;
3097 3120
3098 3121 switch (tcp->tcp_state) {
3099 3122 case TCPS_BOUND:
3100 3123 case TCPS_LISTEN:
3101 3124 break;
3102 3125 default:
3103 3126 return (-TOUTSTATE);
3104 3127 }
3105 3128
3106 3129 /*
3107 3130 * Need to clean up all the eagers since after the unbind, segments
3108 3131 * will no longer be delivered to this listener stream.
3109 3132 */
3110 3133 mutex_enter(&tcp->tcp_eager_lock);
3111 3134 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
3112 3135 tcp_eager_cleanup(tcp, 0);
3113 3136 }
3114 3137 mutex_exit(&tcp->tcp_eager_lock);
3115 3138
3116 3139 /* Clean up the listener connection counter if necessary. */
3117 3140 if (tcp->tcp_listen_cnt != NULL)
3118 3141 TCP_DECR_LISTEN_CNT(tcp);
3119 3142 connp->conn_laddr_v6 = ipv6_all_zeros;
3120 3143 connp->conn_saddr_v6 = ipv6_all_zeros;
3121 3144 tcp_bind_hash_remove(tcp);
3122 3145 oldstate = tcp->tcp_state;
3123 3146 tcp->tcp_state = TCPS_IDLE;
3124 3147 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
3125 3148 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
3126 3149 int32_t, oldstate);
3127 3150
3128 3151 ip_unbind(connp);
3129 3152 bzero(&connp->conn_ports, sizeof (connp->conn_ports));
3130 3153
3131 3154 return (0);
3132 3155 }
3133 3156
3134 3157 /*
3135 3158 * Collect protocol properties to send to the upper handle.
3136 3159 */
3137 3160 void
3138 3161 tcp_get_proto_props(tcp_t *tcp, struct sock_proto_props *sopp)
3139 3162 {
3140 3163 conn_t *connp = tcp->tcp_connp;
3141 3164
3142 3165 sopp->sopp_flags = SOCKOPT_RCVHIWAT | SOCKOPT_MAXBLK | SOCKOPT_WROFF;
3143 3166 sopp->sopp_maxblk = tcp_maxpsz_set(tcp, B_FALSE);
3144 3167
3145 3168 sopp->sopp_rxhiwat = tcp->tcp_fused ?
3146 3169 tcp_fuse_set_rcv_hiwat(tcp, connp->conn_rcvbuf) :
3147 3170 connp->conn_rcvbuf;
3148 3171 /*
3149 3172 * Determine what write offset value to use depending on SACK and
3150 3173 * whether the endpoint is fused or not.
3151 3174 */
3152 3175 if (tcp->tcp_fused) {
3153 3176 ASSERT(tcp->tcp_loopback);
3154 3177 ASSERT(tcp->tcp_loopback_peer != NULL);
3155 3178 /*
3156 3179 * For fused tcp loopback, set the stream head's write
3157 3180 * offset value to zero since we won't be needing any room
3158 3181 * for TCP/IP headers. This would also improve performance
3159 3182 * since it would reduce the amount of work done by kmem.
3160 3183 * Non-fused tcp loopback case is handled separately below.
3161 3184 */
3162 3185 sopp->sopp_wroff = 0;
3163 3186 /*
3164 3187 * Update the peer's transmit parameters according to
3165 3188 * our recently calculated high water mark value.
3166 3189 */
3167 3190 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE);
3168 3191 } else if (tcp->tcp_snd_sack_ok) {
3169 3192 sopp->sopp_wroff = connp->conn_ht_iphc_allocated +
3170 3193 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3171 3194 } else {
3172 3195 sopp->sopp_wroff = connp->conn_ht_iphc_len +
3173 3196 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3174 3197 }
3175 3198
3176 3199 if (tcp->tcp_loopback) {
3177 3200 sopp->sopp_flags |= SOCKOPT_LOOPBACK;
3178 3201 sopp->sopp_loopback = B_TRUE;
3179 3202 }
3180 3203 }
3181 3204
3182 3205 /*
3183 3206 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3184 3207 */
3185 3208 boolean_t
3186 3209 tcp_zcopy_check(tcp_t *tcp)
3187 3210 {
3188 3211 conn_t *connp = tcp->tcp_connp;
3189 3212 ip_xmit_attr_t *ixa = connp->conn_ixa;
3190 3213 boolean_t zc_enabled = B_FALSE;
3191 3214 tcp_stack_t *tcps = tcp->tcp_tcps;
3192 3215
3193 3216 if (do_tcpzcopy == 2)
3194 3217 zc_enabled = B_TRUE;
3195 3218 else if ((do_tcpzcopy == 1) && (ixa->ixa_flags & IXAF_ZCOPY_CAPAB))
3196 3219 zc_enabled = B_TRUE;
3197 3220
3198 3221 tcp->tcp_snd_zcopy_on = zc_enabled;
3199 3222 if (!TCP_IS_DETACHED(tcp)) {
3200 3223 if (zc_enabled) {
3201 3224 ixa->ixa_flags |= IXAF_VERIFY_ZCOPY;
3202 3225 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3203 3226 ZCVMSAFE);
3204 3227 TCP_STAT(tcps, tcp_zcopy_on);
3205 3228 } else {
3206 3229 ixa->ixa_flags &= ~IXAF_VERIFY_ZCOPY;
3207 3230 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3208 3231 ZCVMUNSAFE);
3209 3232 TCP_STAT(tcps, tcp_zcopy_off);
3210 3233 }
3211 3234 }
3212 3235 return (zc_enabled);
3213 3236 }
3214 3237
3215 3238 /*
3216 3239 * Backoff from a zero-copy message by copying data to a new allocated
3217 3240 * message and freeing the original desballoca'ed segmapped message.
3218 3241 *
3219 3242 * This function is called by following two callers:
3220 3243 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3221 3244 * the origial desballoca'ed message and notify sockfs. This is in re-
3222 3245 * transmit state.
3223 3246 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3224 3247 * to be copied to new message.
3225 3248 */
3226 3249 mblk_t *
3227 3250 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, boolean_t fix_xmitlist)
3228 3251 {
3229 3252 mblk_t *nbp;
3230 3253 mblk_t *head = NULL;
3231 3254 mblk_t *tail = NULL;
3232 3255 tcp_stack_t *tcps = tcp->tcp_tcps;
3233 3256
3234 3257 ASSERT(bp != NULL);
3235 3258 while (bp != NULL) {
3236 3259 if (IS_VMLOANED_MBLK(bp)) {
3237 3260 TCP_STAT(tcps, tcp_zcopy_backoff);
3238 3261 if ((nbp = copyb(bp)) == NULL) {
3239 3262 tcp->tcp_xmit_zc_clean = B_FALSE;
3240 3263 if (tail != NULL)
3241 3264 tail->b_cont = bp;
3242 3265 return ((head == NULL) ? bp : head);
3243 3266 }
3244 3267
3245 3268 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) {
3246 3269 if (fix_xmitlist)
3247 3270 tcp_zcopy_notify(tcp);
3248 3271 else
3249 3272 nbp->b_datap->db_struioflag |=
3250 3273 STRUIO_ZCNOTIFY;
3251 3274 }
3252 3275 nbp->b_cont = bp->b_cont;
3253 3276
3254 3277 /*
3255 3278 * Copy saved information and adjust tcp_xmit_tail
3256 3279 * if needed.
3257 3280 */
3258 3281 if (fix_xmitlist) {
3259 3282 nbp->b_prev = bp->b_prev;
3260 3283 nbp->b_next = bp->b_next;
3261 3284
3262 3285 if (tcp->tcp_xmit_tail == bp)
3263 3286 tcp->tcp_xmit_tail = nbp;
3264 3287 }
3265 3288
3266 3289 /* Free the original message. */
3267 3290 bp->b_prev = NULL;
3268 3291 bp->b_next = NULL;
3269 3292 freeb(bp);
3270 3293
3271 3294 bp = nbp;
3272 3295 }
3273 3296
3274 3297 if (head == NULL) {
3275 3298 head = bp;
3276 3299 }
3277 3300 if (tail == NULL) {
3278 3301 tail = bp;
3279 3302 } else {
3280 3303 tail->b_cont = bp;
3281 3304 tail = bp;
3282 3305 }
3283 3306
3284 3307 /* Move forward. */
3285 3308 bp = bp->b_cont;
3286 3309 }
3287 3310
3288 3311 if (fix_xmitlist) {
3289 3312 tcp->tcp_xmit_last = tail;
3290 3313 tcp->tcp_xmit_zc_clean = B_TRUE;
3291 3314 }
3292 3315
3293 3316 return (head);
3294 3317 }
3295 3318
3296 3319 void
3297 3320 tcp_zcopy_notify(tcp_t *tcp)
3298 3321 {
3299 3322 struct stdata *stp;
3300 3323 conn_t *connp;
3301 3324
3302 3325 if (tcp->tcp_detached)
3303 3326 return;
3304 3327 connp = tcp->tcp_connp;
3305 3328 if (IPCL_IS_NONSTR(connp)) {
3306 3329 (*connp->conn_upcalls->su_zcopy_notify)
3307 3330 (connp->conn_upper_handle);
3308 3331 return;
3309 3332 }
3310 3333 stp = STREAM(connp->conn_rq);
3311 3334 mutex_enter(&stp->sd_lock);
3312 3335 stp->sd_flag |= STZCNOTIFY;
3313 3336 cv_broadcast(&stp->sd_zcopy_wait);
3314 3337 mutex_exit(&stp->sd_lock);
3315 3338 }
3316 3339
3317 3340 /*
3318 3341 * Update the TCP connection according to change of LSO capability.
3319 3342 */
3320 3343 static void
3321 3344 tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa)
3322 3345 {
3323 3346 /*
3324 3347 * We check against IPv4 header length to preserve the old behavior
3325 3348 * of only enabling LSO when there are no IP options.
3326 3349 * But this restriction might not be necessary at all. Before removing
3327 3350 * it, need to verify how LSO is handled for source routing case, with
3328 3351 * which IP does software checksum.
3329 3352 *
3330 3353 * For IPv6, whenever any extension header is needed, LSO is supressed.
3331 3354 */
3332 3355 if (ixa->ixa_ip_hdr_length != ((ixa->ixa_flags & IXAF_IS_IPV4) ?
3333 3356 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN))
3334 3357 return;
3335 3358
3336 3359 /*
3337 3360 * Either the LSO capability newly became usable, or it has changed.
3338 3361 */
3339 3362 if (ixa->ixa_flags & IXAF_LSO_CAPAB) {
3340 3363 ill_lso_capab_t *lsoc = &ixa->ixa_lso_capab;
3341 3364
3342 3365 ASSERT(lsoc->ill_lso_max > 0);
3343 3366 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, lsoc->ill_lso_max);
3344 3367
3345 3368 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3346 3369 boolean_t, B_TRUE, uint32_t, tcp->tcp_lso_max);
3347 3370
3348 3371 /*
3349 3372 * If LSO to be enabled, notify the STREAM header with larger
3350 3373 * data block.
3351 3374 */
3352 3375 if (!tcp->tcp_lso)
3353 3376 tcp->tcp_maxpsz_multiplier = 0;
3354 3377
3355 3378 tcp->tcp_lso = B_TRUE;
3356 3379 TCP_STAT(tcp->tcp_tcps, tcp_lso_enabled);
3357 3380 } else { /* LSO capability is not usable any more. */
3358 3381 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3359 3382 boolean_t, B_FALSE, uint32_t, tcp->tcp_lso_max);
3360 3383
3361 3384 /*
3362 3385 * If LSO to be disabled, notify the STREAM header with smaller
3363 3386 * data block. And need to restore fragsize to PMTU.
3364 3387 */
3365 3388 if (tcp->tcp_lso) {
3366 3389 tcp->tcp_maxpsz_multiplier =
3367 3390 tcp->tcp_tcps->tcps_maxpsz_multiplier;
3368 3391 ixa->ixa_fragsize = ixa->ixa_pmtu;
3369 3392 tcp->tcp_lso = B_FALSE;
3370 3393 TCP_STAT(tcp->tcp_tcps, tcp_lso_disabled);
3371 3394 }
3372 3395 }
3373 3396
3374 3397 (void) tcp_maxpsz_set(tcp, B_TRUE);
3375 3398 }
3376 3399
3377 3400 /*
3378 3401 * Update the TCP connection according to change of ZEROCOPY capability.
3379 3402 */
3380 3403 static void
3381 3404 tcp_update_zcopy(tcp_t *tcp)
3382 3405 {
3383 3406 conn_t *connp = tcp->tcp_connp;
3384 3407 tcp_stack_t *tcps = tcp->tcp_tcps;
3385 3408
3386 3409 if (tcp->tcp_snd_zcopy_on) {
3387 3410 tcp->tcp_snd_zcopy_on = B_FALSE;
3388 3411 if (!TCP_IS_DETACHED(tcp)) {
3389 3412 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3390 3413 ZCVMUNSAFE);
3391 3414 TCP_STAT(tcps, tcp_zcopy_off);
3392 3415 }
3393 3416 } else {
3394 3417 tcp->tcp_snd_zcopy_on = B_TRUE;
3395 3418 if (!TCP_IS_DETACHED(tcp)) {
3396 3419 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3397 3420 ZCVMSAFE);
3398 3421 TCP_STAT(tcps, tcp_zcopy_on);
3399 3422 }
3400 3423 }
3401 3424 }
3402 3425
3403 3426 /*
3404 3427 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3405 3428 * so it's safe to update the TCP connection.
3406 3429 */
3407 3430 /* ARGSUSED1 */
3408 3431 static void
3409 3432 tcp_notify(void *arg, ip_xmit_attr_t *ixa, ixa_notify_type_t ntype,
3410 3433 ixa_notify_arg_t narg)
3411 3434 {
3412 3435 tcp_t *tcp = (tcp_t *)arg;
3413 3436 conn_t *connp = tcp->tcp_connp;
3414 3437
3415 3438 switch (ntype) {
3416 3439 case IXAN_LSO:
3417 3440 tcp_update_lso(tcp, connp->conn_ixa);
3418 3441 break;
3419 3442 case IXAN_PMTU:
3420 3443 (void) tcp_update_pmtu(tcp, B_FALSE);
3421 3444 break;
3422 3445 case IXAN_ZCOPY:
3423 3446 tcp_update_zcopy(tcp);
3424 3447 break;
3425 3448 default:
3426 3449 break;
3427 3450 }
3428 3451 }
3429 3452
3430 3453 /*
3431 3454 * The TCP write service routine should never be called...
3432 3455 */
3433 3456 /* ARGSUSED */
3434 3457 static void
3435 3458 tcp_wsrv(queue_t *q)
3436 3459 {
3437 3460 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps;
3438 3461
3439 3462 TCP_STAT(tcps, tcp_wsrv_called);
3440 3463 }
3441 3464
3442 3465 /*
3443 3466 * Hash list lookup routine for tcp_t structures.
3444 3467 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3445 3468 */
3446 3469 tcp_t *
3447 3470 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps)
3448 3471 {
3449 3472 tf_t *tf;
3450 3473 tcp_t *tcp;
3451 3474
3452 3475 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3453 3476 mutex_enter(&tf->tf_lock);
3454 3477 for (tcp = tf->tf_tcp; tcp != NULL;
3455 3478 tcp = tcp->tcp_acceptor_hash) {
3456 3479 if (tcp->tcp_acceptor_id == id) {
3457 3480 CONN_INC_REF(tcp->tcp_connp);
3458 3481 mutex_exit(&tf->tf_lock);
3459 3482 return (tcp);
3460 3483 }
3461 3484 }
3462 3485 mutex_exit(&tf->tf_lock);
3463 3486 return (NULL);
3464 3487 }
3465 3488
3466 3489 /*
3467 3490 * Hash list insertion routine for tcp_t structures.
3468 3491 */
3469 3492 void
3470 3493 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp)
3471 3494 {
3472 3495 tf_t *tf;
3473 3496 tcp_t **tcpp;
3474 3497 tcp_t *tcpnext;
3475 3498 tcp_stack_t *tcps = tcp->tcp_tcps;
3476 3499
3477 3500 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3478 3501
3479 3502 if (tcp->tcp_ptpahn != NULL)
3480 3503 tcp_acceptor_hash_remove(tcp);
3481 3504 tcpp = &tf->tf_tcp;
3482 3505 mutex_enter(&tf->tf_lock);
3483 3506 tcpnext = tcpp[0];
3484 3507 if (tcpnext)
3485 3508 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash;
3486 3509 tcp->tcp_acceptor_hash = tcpnext;
3487 3510 tcp->tcp_ptpahn = tcpp;
3488 3511 tcpp[0] = tcp;
3489 3512 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */
3490 3513 mutex_exit(&tf->tf_lock);
3491 3514 }
3492 3515
3493 3516 /*
3494 3517 * Hash list removal routine for tcp_t structures.
3495 3518 */
3496 3519 void
3497 3520 tcp_acceptor_hash_remove(tcp_t *tcp)
3498 3521 {
3499 3522 tcp_t *tcpnext;
3500 3523 kmutex_t *lockp;
3501 3524
3502 3525 /*
3503 3526 * Extract the lock pointer in case there are concurrent
3504 3527 * hash_remove's for this instance.
3505 3528 */
3506 3529 lockp = tcp->tcp_acceptor_lockp;
3507 3530
3508 3531 if (tcp->tcp_ptpahn == NULL)
3509 3532 return;
3510 3533
3511 3534 ASSERT(lockp != NULL);
3512 3535 mutex_enter(lockp);
3513 3536 if (tcp->tcp_ptpahn) {
3514 3537 tcpnext = tcp->tcp_acceptor_hash;
3515 3538 if (tcpnext) {
3516 3539 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn;
3517 3540 tcp->tcp_acceptor_hash = NULL;
3518 3541 }
3519 3542 *tcp->tcp_ptpahn = tcpnext;
3520 3543 tcp->tcp_ptpahn = NULL;
3521 3544 }
3522 3545 mutex_exit(lockp);
3523 3546 tcp->tcp_acceptor_lockp = NULL;
3524 3547 }
3525 3548
3526 3549 /*
3527 3550 * Type three generator adapted from the random() function in 4.4 BSD:
3528 3551 */
3529 3552
3530 3553 /*
3531 3554 * Copyright (c) 1983, 1993
3532 3555 * The Regents of the University of California. All rights reserved.
3533 3556 *
3534 3557 * Redistribution and use in source and binary forms, with or without
3535 3558 * modification, are permitted provided that the following conditions
3536 3559 * are met:
3537 3560 * 1. Redistributions of source code must retain the above copyright
3538 3561 * notice, this list of conditions and the following disclaimer.
3539 3562 * 2. Redistributions in binary form must reproduce the above copyright
3540 3563 * notice, this list of conditions and the following disclaimer in the
3541 3564 * documentation and/or other materials provided with the distribution.
3542 3565 * 3. All advertising materials mentioning features or use of this software
3543 3566 * must display the following acknowledgement:
3544 3567 * This product includes software developed by the University of
3545 3568 * California, Berkeley and its contributors.
3546 3569 * 4. Neither the name of the University nor the names of its contributors
3547 3570 * may be used to endorse or promote products derived from this software
3548 3571 * without specific prior written permission.
3549 3572 *
3550 3573 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3551 3574 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3552 3575 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3553 3576 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3554 3577 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3555 3578 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3556 3579 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3557 3580 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3558 3581 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3559 3582 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3560 3583 * SUCH DAMAGE.
3561 3584 */
3562 3585
3563 3586 /* Type 3 -- x**31 + x**3 + 1 */
3564 3587 #define DEG_3 31
3565 3588 #define SEP_3 3
3566 3589
3567 3590
3568 3591 /* Protected by tcp_random_lock */
3569 3592 static int tcp_randtbl[DEG_3 + 1];
3570 3593
3571 3594 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1];
3572 3595 static int *tcp_random_rptr = &tcp_randtbl[1];
3573 3596
3574 3597 static int *tcp_random_state = &tcp_randtbl[1];
3575 3598 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1];
3576 3599
3577 3600 kmutex_t tcp_random_lock;
3578 3601
3579 3602 void
3580 3603 tcp_random_init(void)
3581 3604 {
3582 3605 int i;
3583 3606 hrtime_t hrt;
3584 3607 time_t wallclock;
3585 3608 uint64_t result;
3586 3609
3587 3610 /*
3588 3611 * Use high-res timer and current time for seed. Gethrtime() returns
3589 3612 * a longlong, which may contain resolution down to nanoseconds.
3590 3613 * The current time will either be a 32-bit or a 64-bit quantity.
3591 3614 * XOR the two together in a 64-bit result variable.
3592 3615 * Convert the result to a 32-bit value by multiplying the high-order
3593 3616 * 32-bits by the low-order 32-bits.
3594 3617 */
3595 3618
3596 3619 hrt = gethrtime();
3597 3620 (void) drv_getparm(TIME, &wallclock);
3598 3621 result = (uint64_t)wallclock ^ (uint64_t)hrt;
3599 3622 mutex_enter(&tcp_random_lock);
3600 3623 tcp_random_state[0] = ((result >> 32) & 0xffffffff) *
3601 3624 (result & 0xffffffff);
3602 3625
3603 3626 for (i = 1; i < DEG_3; i++)
3604 3627 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1]
3605 3628 + 12345;
3606 3629 tcp_random_fptr = &tcp_random_state[SEP_3];
3607 3630 tcp_random_rptr = &tcp_random_state[0];
3608 3631 mutex_exit(&tcp_random_lock);
3609 3632 for (i = 0; i < 10 * DEG_3; i++)
3610 3633 (void) tcp_random();
3611 3634 }
3612 3635
3613 3636 /*
3614 3637 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3615 3638 * This range is selected to be approximately centered on TCP_ISS / 2,
3616 3639 * and easy to compute. We get this value by generating a 32-bit random
3617 3640 * number, selecting out the high-order 17 bits, and then adding one so
3618 3641 * that we never return zero.
3619 3642 */
3620 3643 int
3621 3644 tcp_random(void)
3622 3645 {
3623 3646 int i;
3624 3647
3625 3648 mutex_enter(&tcp_random_lock);
3626 3649 *tcp_random_fptr += *tcp_random_rptr;
3627 3650
3628 3651 /*
3629 3652 * The high-order bits are more random than the low-order bits,
3630 3653 * so we select out the high-order 17 bits and add one so that
3631 3654 * we never return zero.
3632 3655 */
3633 3656 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1;
3634 3657 if (++tcp_random_fptr >= tcp_random_end_ptr) {
3635 3658 tcp_random_fptr = tcp_random_state;
3636 3659 ++tcp_random_rptr;
3637 3660 } else if (++tcp_random_rptr >= tcp_random_end_ptr)
3638 3661 tcp_random_rptr = tcp_random_state;
3639 3662
3640 3663 mutex_exit(&tcp_random_lock);
3641 3664 return (i);
3642 3665 }
3643 3666
3644 3667 /*
3645 3668 * Split this function out so that if the secret changes, I'm okay.
3646 3669 *
3647 3670 * Initialize the tcp_iss_cookie and tcp_iss_key.
3648 3671 */
3649 3672
3650 3673 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3651 3674
3652 3675 void
3653 3676 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps)
3654 3677 {
3655 3678 struct {
3656 3679 int32_t current_time;
3657 3680 uint32_t randnum;
3658 3681 uint16_t pad;
3659 3682 uint8_t ether[6];
3660 3683 uint8_t passwd[PASSWD_SIZE];
3661 3684 } tcp_iss_cookie;
3662 3685 time_t t;
3663 3686
3664 3687 /*
3665 3688 * Start with the current absolute time.
3666 3689 */
3667 3690 (void) drv_getparm(TIME, &t);
3668 3691 tcp_iss_cookie.current_time = t;
3669 3692
3670 3693 /*
3671 3694 * XXX - Need a more random number per RFC 1750, not this crap.
3672 3695 * OTOH, if what follows is pretty random, then I'm in better shape.
3673 3696 */
3674 3697 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random());
3675 3698 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */
3676 3699
3677 3700 /*
3678 3701 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3679 3702 * as a good template.
3680 3703 */
3681 3704 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd,
3682 3705 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info)));
3683 3706
3684 3707 /*
3685 3708 * The pass-phrase. Normally this is supplied by user-called NDD.
3686 3709 */
3687 3710 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len));
3688 3711
3689 3712 /*
3690 3713 * See 4010593 if this section becomes a problem again,
3691 3714 * but the local ethernet address is useful here.
3692 3715 */
3693 3716 (void) localetheraddr(NULL,
3694 3717 (struct ether_addr *)&tcp_iss_cookie.ether);
3695 3718
3696 3719 /*
3697 3720 * Hash 'em all together. The MD5Final is called per-connection.
3698 3721 */
3699 3722 mutex_enter(&tcps->tcps_iss_key_lock);
3700 3723 MD5Init(&tcps->tcps_iss_key);
3701 3724 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie,
3702 3725 sizeof (tcp_iss_cookie));
3703 3726 mutex_exit(&tcps->tcps_iss_key_lock);
3704 3727 }
3705 3728
3706 3729 /*
3707 3730 * Called by IP when IP is loaded into the kernel
3708 3731 */
3709 3732 void
3710 3733 tcp_ddi_g_init(void)
3711 3734 {
3712 3735 tcp_timercache = kmem_cache_create("tcp_timercache",
3713 3736 sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
3714 3737 NULL, NULL, NULL, NULL, NULL, 0);
3715 3738
3716 3739 tcp_notsack_blk_cache = kmem_cache_create("tcp_notsack_blk_cache",
3717 3740 sizeof (notsack_blk_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
3718 3741
3719 3742 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL);
3720 3743
3721 3744 /* Initialize the random number generator */
3722 3745 tcp_random_init();
3723 3746
3724 3747 /* A single callback independently of how many netstacks we have */
3725 3748 ip_squeue_init(tcp_squeue_add);
3726 3749
3727 3750 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics);
3728 3751
3729 3752 tcp_squeue_flag = tcp_squeue_switch(tcp_squeue_wput);
3730 3753
3731 3754 /*
3732 3755 * We want to be informed each time a stack is created or
3733 3756 * destroyed in the kernel, so we can maintain the
3734 3757 * set of tcp_stack_t's.
3735 3758 */
3736 3759 netstack_register(NS_TCP, tcp_stack_init, NULL, tcp_stack_fini);
3737 3760 }
3738 3761
3739 3762
3740 3763 #define INET_NAME "ip"
3741 3764
3742 3765 /*
3743 3766 * Initialize the TCP stack instance.
3744 3767 */
3745 3768 static void *
3746 3769 tcp_stack_init(netstackid_t stackid, netstack_t *ns)
3747 3770 {
3748 3771 tcp_stack_t *tcps;
3749 3772 int i;
3750 3773 major_t major;
3751 3774 size_t arrsz;
3752 3775
3753 3776 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP);
3754 3777 tcps->tcps_netstack = ns;
3755 3778
3756 3779 /* Initialize locks */
3757 3780 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL);
3758 3781 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL);
3759 3782
3760 3783 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS;
3761 3784 tcps->tcps_g_epriv_ports[0] = ULP_DEF_EPRIV_PORT1;
3762 3785 tcps->tcps_g_epriv_ports[1] = ULP_DEF_EPRIV_PORT2;
3763 3786 tcps->tcps_min_anonpriv_port = 512;
3764 3787
3765 3788 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) *
3766 3789 TCP_BIND_FANOUT_SIZE, KM_SLEEP);
3767 3790 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) *
3768 3791 TCP_ACCEPTOR_FANOUT_SIZE, KM_SLEEP);
3769 3792
3770 3793 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3771 3794 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL,
3772 3795 MUTEX_DEFAULT, NULL);
3773 3796 }
3774 3797
3775 3798 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3776 3799 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL,
3777 3800 MUTEX_DEFAULT, NULL);
3778 3801 }
3779 3802
3780 3803 /* TCP's IPsec code calls the packet dropper. */
3781 3804 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement");
3782 3805
3783 3806 arrsz = tcp_propinfo_count * sizeof (mod_prop_info_t);
3784 3807 tcps->tcps_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz,
3785 3808 KM_SLEEP);
3786 3809 bcopy(tcp_propinfo_tbl, tcps->tcps_propinfo_tbl, arrsz);
3787 3810
3788 3811 /*
3789 3812 * Note: To really walk the device tree you need the devinfo
3790 3813 * pointer to your device which is only available after probe/attach.
3791 3814 * The following is safe only because it uses ddi_root_node()
3792 3815 */
3793 3816 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr,
3794 3817 tcp_opt_obj.odb_opt_arr_cnt);
3795 3818
3796 3819 /*
3797 3820 * Initialize RFC 1948 secret values. This will probably be reset once
3798 3821 * by the boot scripts.
3799 3822 *
3800 3823 * Use NULL name, as the name is caught by the new lockstats.
3801 3824 *
3802 3825 * Initialize with some random, non-guessable string, like the global
3803 3826 * T_INFO_ACK.
3804 3827 */
3805 3828
3806 3829 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack,
3807 3830 sizeof (tcp_g_t_info_ack), tcps);
3808 3831
3809 3832 tcps->tcps_kstat = tcp_kstat2_init(stackid);
3810 3833 tcps->tcps_mibkp = tcp_kstat_init(stackid);
3811 3834
3812 3835 major = mod_name_to_major(INET_NAME);
3813 3836 VERIFY0(ldi_ident_from_major(major, &tcps->tcps_ldi_ident));
3814 3837 tcps->tcps_ixa_cleanup_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
3815 3838 ASSERT(tcps->tcps_ixa_cleanup_mp != NULL);
3816 3839 cv_init(&tcps->tcps_ixa_cleanup_ready_cv, NULL, CV_DEFAULT, NULL);
3817 3840 cv_init(&tcps->tcps_ixa_cleanup_done_cv, NULL, CV_DEFAULT, NULL);
3818 3841 mutex_init(&tcps->tcps_ixa_cleanup_lock, NULL, MUTEX_DEFAULT, NULL);
3819 3842
3820 3843 mutex_init(&tcps->tcps_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
3821 3844 tcps->tcps_reclaim = B_FALSE;
3822 3845 tcps->tcps_reclaim_tid = 0;
3823 3846 tcps->tcps_reclaim_period = tcps->tcps_rexmit_interval_max;
3824 3847
3825 3848 /*
3826 3849 * ncpus is the current number of CPUs, which can be bigger than
3827 3850 * boot_ncpus. But we don't want to use ncpus to allocate all the
3828 3851 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3829 3852 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3830 3853 * there are many CPUs as we will be adding them 1 by 1.
3831 3854 *
3832 3855 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3833 3856 * are not freed until the stack is going away. So there is no need
3834 3857 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3835 3858 */
3836 3859 mutex_enter(&cpu_lock);
3837 3860 tcps->tcps_sc_cnt = MAX(ncpus, boot_ncpus);
3838 3861 mutex_exit(&cpu_lock);
3839 3862 tcps->tcps_sc = kmem_zalloc(max_ncpus * sizeof (tcp_stats_cpu_t *),
|
↓ open down ↓ |
1167 lines elided |
↑ open up ↑ |
3840 3863 KM_SLEEP);
3841 3864 for (i = 0; i < tcps->tcps_sc_cnt; i++) {
3842 3865 tcps->tcps_sc[i] = kmem_zalloc(sizeof (tcp_stats_cpu_t),
3843 3866 KM_SLEEP);
3844 3867 }
3845 3868
3846 3869 mutex_init(&tcps->tcps_listener_conf_lock, NULL, MUTEX_DEFAULT, NULL);
3847 3870 list_create(&tcps->tcps_listener_conf, sizeof (tcp_listener_t),
3848 3871 offsetof(tcp_listener_t, tl_link));
3849 3872
3873 + tcps->tcps_default_cc_algo = cc_load_algo(CC_DEFAULT_ALGO_NAME);
3874 + ASSERT3P(tcps->tcps_default_cc_algo, !=, NULL);
3875 +
3850 3876 return (tcps);
3851 3877 }
3852 3878
3853 3879 /*
3854 3880 * Called when the IP module is about to be unloaded.
3855 3881 */
3856 3882 void
3857 3883 tcp_ddi_g_destroy(void)
3858 3884 {
3859 3885 tcp_g_kstat_fini(tcp_g_kstat);
3860 3886 tcp_g_kstat = NULL;
3861 3887 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics));
3862 3888
3863 3889 mutex_destroy(&tcp_random_lock);
3864 3890
3865 3891 kmem_cache_destroy(tcp_timercache);
3866 3892 kmem_cache_destroy(tcp_notsack_blk_cache);
3867 3893
3868 3894 netstack_unregister(NS_TCP);
3869 3895 }
3870 3896
3871 3897 /*
3872 3898 * Free the TCP stack instance.
3873 3899 */
3874 3900 static void
3875 3901 tcp_stack_fini(netstackid_t stackid, void *arg)
3876 3902 {
3877 3903 tcp_stack_t *tcps = (tcp_stack_t *)arg;
3878 3904 int i;
3879 3905
3880 3906 freeb(tcps->tcps_ixa_cleanup_mp);
3881 3907 tcps->tcps_ixa_cleanup_mp = NULL;
3882 3908 cv_destroy(&tcps->tcps_ixa_cleanup_ready_cv);
3883 3909 cv_destroy(&tcps->tcps_ixa_cleanup_done_cv);
3884 3910 mutex_destroy(&tcps->tcps_ixa_cleanup_lock);
3885 3911
3886 3912 /*
3887 3913 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3888 3914 * the timer.
3889 3915 */
3890 3916 mutex_enter(&tcps->tcps_reclaim_lock);
3891 3917 tcps->tcps_reclaim = B_FALSE;
3892 3918 mutex_exit(&tcps->tcps_reclaim_lock);
3893 3919 if (tcps->tcps_reclaim_tid != 0)
3894 3920 (void) untimeout(tcps->tcps_reclaim_tid);
3895 3921 mutex_destroy(&tcps->tcps_reclaim_lock);
3896 3922
3897 3923 tcp_listener_conf_cleanup(tcps);
3898 3924
3899 3925 for (i = 0; i < tcps->tcps_sc_cnt; i++)
3900 3926 kmem_free(tcps->tcps_sc[i], sizeof (tcp_stats_cpu_t));
3901 3927 kmem_free(tcps->tcps_sc, max_ncpus * sizeof (tcp_stats_cpu_t *));
3902 3928
3903 3929 kmem_free(tcps->tcps_propinfo_tbl,
3904 3930 tcp_propinfo_count * sizeof (mod_prop_info_t));
3905 3931 tcps->tcps_propinfo_tbl = NULL;
3906 3932
3907 3933 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3908 3934 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL);
3909 3935 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock);
3910 3936 }
3911 3937
3912 3938 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3913 3939 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL);
3914 3940 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock);
3915 3941 }
3916 3942
3917 3943 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE);
3918 3944 tcps->tcps_bind_fanout = NULL;
3919 3945
3920 3946 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) *
3921 3947 TCP_ACCEPTOR_FANOUT_SIZE);
3922 3948 tcps->tcps_acceptor_fanout = NULL;
3923 3949
3924 3950 mutex_destroy(&tcps->tcps_iss_key_lock);
3925 3951 mutex_destroy(&tcps->tcps_epriv_port_lock);
3926 3952
3927 3953 ip_drop_unregister(&tcps->tcps_dropper);
3928 3954
3929 3955 tcp_kstat2_fini(stackid, tcps->tcps_kstat);
3930 3956 tcps->tcps_kstat = NULL;
3931 3957
3932 3958 tcp_kstat_fini(stackid, tcps->tcps_mibkp);
3933 3959 tcps->tcps_mibkp = NULL;
3934 3960
3935 3961 ldi_ident_release(tcps->tcps_ldi_ident);
3936 3962 kmem_free(tcps, sizeof (*tcps));
3937 3963 }
3938 3964
3939 3965 /*
3940 3966 * Generate ISS, taking into account NDD changes may happen halfway through.
3941 3967 * (If the iss is not zero, set it.)
3942 3968 */
3943 3969
3944 3970 static void
3945 3971 tcp_iss_init(tcp_t *tcp)
3946 3972 {
3947 3973 MD5_CTX context;
3948 3974 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg;
3949 3975 uint32_t answer[4];
3950 3976 tcp_stack_t *tcps = tcp->tcp_tcps;
3951 3977 conn_t *connp = tcp->tcp_connp;
3952 3978
3953 3979 tcps->tcps_iss_incr_extra += (tcps->tcps_iss_incr >> 1);
3954 3980 tcp->tcp_iss = tcps->tcps_iss_incr_extra;
3955 3981 switch (tcps->tcps_strong_iss) {
3956 3982 case 2:
3957 3983 mutex_enter(&tcps->tcps_iss_key_lock);
3958 3984 context = tcps->tcps_iss_key;
3959 3985 mutex_exit(&tcps->tcps_iss_key_lock);
3960 3986 arg.ports = connp->conn_ports;
3961 3987 arg.src = connp->conn_laddr_v6;
3962 3988 arg.dst = connp->conn_faddr_v6;
3963 3989 MD5Update(&context, (uchar_t *)&arg, sizeof (arg));
3964 3990 MD5Final((uchar_t *)answer, &context);
3965 3991 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3];
3966 3992 /*
3967 3993 * Now that we've hashed into a unique per-connection sequence
3968 3994 * space, add a random increment per strong_iss == 1. So I
3969 3995 * guess we'll have to...
3970 3996 */
3971 3997 /* FALLTHRU */
3972 3998 case 1:
3973 3999 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random();
3974 4000 break;
3975 4001 default:
3976 4002 tcp->tcp_iss += (uint32_t)gethrestime_sec() *
3977 4003 tcps->tcps_iss_incr;
3978 4004 break;
3979 4005 }
3980 4006 tcp->tcp_valid_bits = TCP_ISS_VALID;
3981 4007 tcp->tcp_fss = tcp->tcp_iss - 1;
3982 4008 tcp->tcp_suna = tcp->tcp_iss;
3983 4009 tcp->tcp_snxt = tcp->tcp_iss + 1;
3984 4010 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
3985 4011 tcp->tcp_csuna = tcp->tcp_snxt;
3986 4012 }
3987 4013
3988 4014 /*
3989 4015 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3990 4016 * on the specified backing STREAMS q. Note, the caller may make the
3991 4017 * decision to call based on the tcp_t.tcp_flow_stopped value which
3992 4018 * when check outside the q's lock is only an advisory check ...
3993 4019 */
3994 4020 void
3995 4021 tcp_setqfull(tcp_t *tcp)
3996 4022 {
3997 4023 tcp_stack_t *tcps = tcp->tcp_tcps;
3998 4024 conn_t *connp = tcp->tcp_connp;
3999 4025
4000 4026 if (tcp->tcp_closed)
4001 4027 return;
4002 4028
4003 4029 conn_setqfull(connp, &tcp->tcp_flow_stopped);
4004 4030 if (tcp->tcp_flow_stopped)
4005 4031 TCP_STAT(tcps, tcp_flwctl_on);
4006 4032 }
4007 4033
4008 4034 void
4009 4035 tcp_clrqfull(tcp_t *tcp)
4010 4036 {
4011 4037 conn_t *connp = tcp->tcp_connp;
4012 4038
4013 4039 if (tcp->tcp_closed)
4014 4040 return;
4015 4041 conn_clrqfull(connp, &tcp->tcp_flow_stopped);
4016 4042 }
4017 4043
4018 4044 static int
4019 4045 tcp_squeue_switch(int val)
4020 4046 {
4021 4047 int rval = SQ_FILL;
4022 4048
4023 4049 switch (val) {
4024 4050 case 1:
4025 4051 rval = SQ_NODRAIN;
4026 4052 break;
4027 4053 case 2:
4028 4054 rval = SQ_PROCESS;
4029 4055 break;
4030 4056 default:
4031 4057 break;
4032 4058 }
4033 4059 return (rval);
4034 4060 }
4035 4061
4036 4062 /*
4037 4063 * This is called once for each squeue - globally for all stack
4038 4064 * instances.
4039 4065 */
4040 4066 static void
4041 4067 tcp_squeue_add(squeue_t *sqp)
4042 4068 {
4043 4069 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc(
4044 4070 sizeof (tcp_squeue_priv_t), KM_SLEEP);
4045 4071
4046 4072 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait;
4047 4073 if (tcp_free_list_max_cnt == 0) {
4048 4074 int tcp_ncpus = ((boot_max_ncpus == -1) ?
4049 4075 max_ncpus : boot_max_ncpus);
4050 4076
4051 4077 /*
4052 4078 * Limit number of entries to 1% of availble memory / tcp_ncpus
4053 4079 */
4054 4080 tcp_free_list_max_cnt = (freemem * PAGESIZE) /
4055 4081 (tcp_ncpus * sizeof (tcp_t) * 100);
4056 4082 }
4057 4083 tcp_time_wait->tcp_free_list_cnt = 0;
4058 4084 }
4059 4085 /*
4060 4086 * Return unix error is tli error is TSYSERR, otherwise return a negative
4061 4087 * tli error.
4062 4088 */
4063 4089 int
4064 4090 tcp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
4065 4091 boolean_t bind_to_req_port_only)
4066 4092 {
4067 4093 int error;
4068 4094 tcp_t *tcp = connp->conn_tcp;
4069 4095
4070 4096 if (tcp->tcp_state >= TCPS_BOUND) {
4071 4097 if (connp->conn_debug) {
4072 4098 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4073 4099 "tcp_bind: bad state, %d", tcp->tcp_state);
4074 4100 }
4075 4101 return (-TOUTSTATE);
4076 4102 }
4077 4103
4078 4104 error = tcp_bind_check(connp, sa, len, cr, bind_to_req_port_only);
4079 4105 if (error != 0)
4080 4106 return (error);
4081 4107
4082 4108 ASSERT(tcp->tcp_state == TCPS_BOUND);
4083 4109 tcp->tcp_conn_req_max = 0;
4084 4110 return (0);
4085 4111 }
4086 4112
4087 4113 /*
4088 4114 * If the return value from this function is positive, it's a UNIX error.
4089 4115 * Otherwise, if it's negative, then the absolute value is a TLI error.
4090 4116 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4091 4117 */
4092 4118 int
4093 4119 tcp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
4094 4120 cred_t *cr, pid_t pid)
4095 4121 {
4096 4122 tcp_t *tcp = connp->conn_tcp;
4097 4123 sin_t *sin = (sin_t *)sa;
4098 4124 sin6_t *sin6 = (sin6_t *)sa;
4099 4125 ipaddr_t *dstaddrp;
4100 4126 in_port_t dstport;
4101 4127 uint_t srcid;
4102 4128 int error;
4103 4129 uint32_t mss;
4104 4130 mblk_t *syn_mp;
4105 4131 tcp_stack_t *tcps = tcp->tcp_tcps;
4106 4132 int32_t oldstate;
4107 4133 ip_xmit_attr_t *ixa = connp->conn_ixa;
4108 4134
4109 4135 oldstate = tcp->tcp_state;
4110 4136
4111 4137 switch (len) {
4112 4138 default:
4113 4139 /*
4114 4140 * Should never happen
4115 4141 */
4116 4142 return (EINVAL);
4117 4143
4118 4144 case sizeof (sin_t):
4119 4145 sin = (sin_t *)sa;
4120 4146 if (sin->sin_port == 0) {
4121 4147 return (-TBADADDR);
4122 4148 }
4123 4149 if (connp->conn_ipv6_v6only) {
4124 4150 return (EAFNOSUPPORT);
4125 4151 }
4126 4152 break;
4127 4153
4128 4154 case sizeof (sin6_t):
4129 4155 sin6 = (sin6_t *)sa;
4130 4156 if (sin6->sin6_port == 0) {
4131 4157 return (-TBADADDR);
4132 4158 }
4133 4159 break;
4134 4160 }
4135 4161 /*
4136 4162 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4137 4163 * make sure that the conn_ipversion is IPV4_VERSION. We
4138 4164 * need to this before we call tcp_bindi() so that the port lookup
4139 4165 * code will look for ports in the correct port space (IPv4 and
4140 4166 * IPv6 have separate port spaces).
4141 4167 */
4142 4168 if (connp->conn_family == AF_INET6 &&
4143 4169 connp->conn_ipversion == IPV6_VERSION &&
4144 4170 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4145 4171 if (connp->conn_ipv6_v6only)
4146 4172 return (EADDRNOTAVAIL);
4147 4173
4148 4174 connp->conn_ipversion = IPV4_VERSION;
4149 4175 }
4150 4176
4151 4177 switch (tcp->tcp_state) {
4152 4178 case TCPS_LISTEN:
4153 4179 /*
4154 4180 * Listening sockets are not allowed to issue connect().
4155 4181 */
4156 4182 if (IPCL_IS_NONSTR(connp))
4157 4183 return (EOPNOTSUPP);
4158 4184 /* FALLTHRU */
4159 4185 case TCPS_IDLE:
4160 4186 /*
4161 4187 * We support quick connect, refer to comments in
4162 4188 * tcp_connect_*()
4163 4189 */
4164 4190 /* FALLTHRU */
4165 4191 case TCPS_BOUND:
4166 4192 break;
4167 4193 default:
4168 4194 return (-TOUTSTATE);
4169 4195 }
4170 4196
4171 4197 /*
4172 4198 * We update our cred/cpid based on the caller of connect
4173 4199 */
4174 4200 if (connp->conn_cred != cr) {
4175 4201 crhold(cr);
4176 4202 crfree(connp->conn_cred);
4177 4203 connp->conn_cred = cr;
4178 4204 }
4179 4205 connp->conn_cpid = pid;
4180 4206
4181 4207 /* Cache things in the ixa without any refhold */
4182 4208 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED));
4183 4209 ixa->ixa_cred = cr;
4184 4210 ixa->ixa_cpid = pid;
4185 4211 if (is_system_labeled()) {
4186 4212 /* We need to restart with a label based on the cred */
4187 4213 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred);
4188 4214 }
4189 4215
4190 4216 if (connp->conn_family == AF_INET6) {
4191 4217 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4192 4218 error = tcp_connect_ipv6(tcp, &sin6->sin6_addr,
4193 4219 sin6->sin6_port, sin6->sin6_flowinfo,
4194 4220 sin6->__sin6_src_id, sin6->sin6_scope_id);
4195 4221 } else {
4196 4222 /*
4197 4223 * Destination adress is mapped IPv6 address.
4198 4224 * Source bound address should be unspecified or
4199 4225 * IPv6 mapped address as well.
4200 4226 */
4201 4227 if (!IN6_IS_ADDR_UNSPECIFIED(
4202 4228 &connp->conn_bound_addr_v6) &&
4203 4229 !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) {
4204 4230 return (EADDRNOTAVAIL);
4205 4231 }
4206 4232 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr));
4207 4233 dstport = sin6->sin6_port;
4208 4234 srcid = sin6->__sin6_src_id;
4209 4235 error = tcp_connect_ipv4(tcp, dstaddrp, dstport,
4210 4236 srcid);
4211 4237 }
4212 4238 } else {
4213 4239 dstaddrp = &sin->sin_addr.s_addr;
4214 4240 dstport = sin->sin_port;
4215 4241 srcid = 0;
4216 4242 error = tcp_connect_ipv4(tcp, dstaddrp, dstport, srcid);
4217 4243 }
4218 4244
4219 4245 if (error != 0)
4220 4246 goto connect_failed;
4221 4247
4222 4248 CL_INET_CONNECT(connp, B_TRUE, error);
4223 4249 if (error != 0)
4224 4250 goto connect_failed;
4225 4251
4226 4252 /* connect succeeded */
4227 4253 TCPS_BUMP_MIB(tcps, tcpActiveOpens);
4228 4254 tcp->tcp_active_open = 1;
4229 4255
4230 4256 /*
4231 4257 * tcp_set_destination() does not adjust for TCP/IP header length.
4232 4258 */
4233 4259 mss = tcp->tcp_mss - connp->conn_ht_iphc_len;
4234 4260
4235 4261 /*
4236 4262 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4237 4263 * to the nearest MSS.
4238 4264 *
4239 4265 * We do the round up here because we need to get the interface MTU
4240 4266 * first before we can do the round up.
4241 4267 */
4242 4268 tcp->tcp_rwnd = connp->conn_rcvbuf;
4243 4269 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss),
4244 4270 tcps->tcps_recv_hiwat_minmss * mss);
4245 4271 connp->conn_rcvbuf = tcp->tcp_rwnd;
4246 4272 tcp_set_ws_value(tcp);
4247 4273 tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
4248 4274 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always)
4249 4275 tcp->tcp_snd_ws_ok = B_TRUE;
4250 4276
4251 4277 /*
4252 4278 * Set tcp_snd_ts_ok to true
4253 4279 * so that tcp_xmit_mp will
4254 4280 * include the timestamp
4255 4281 * option in the SYN segment.
4256 4282 */
4257 4283 if (tcps->tcps_tstamp_always ||
4258 4284 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) {
4259 4285 tcp->tcp_snd_ts_ok = B_TRUE;
4260 4286 }
4261 4287
4262 4288 /*
4263 4289 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4264 4290 * the SACK metric is set. So here we just check the per stack SACK
4265 4291 * permitted param.
4266 4292 */
4267 4293 if (tcps->tcps_sack_permitted == 2) {
4268 4294 ASSERT(tcp->tcp_num_sack_blk == 0);
4269 4295 ASSERT(tcp->tcp_notsack_list == NULL);
4270 4296 tcp->tcp_snd_sack_ok = B_TRUE;
4271 4297 }
4272 4298
4273 4299 /*
4274 4300 * Should we use ECN? Note that the current
4275 4301 * default value (SunOS 5.9) of tcp_ecn_permitted
4276 4302 * is 1. The reason for doing this is that there
4277 4303 * are equipments out there that will drop ECN
4278 4304 * enabled IP packets. Setting it to 1 avoids
4279 4305 * compatibility problems.
4280 4306 */
4281 4307 if (tcps->tcps_ecn_permitted == 2)
4282 4308 tcp->tcp_ecn_ok = B_TRUE;
4283 4309
4284 4310 /* Trace change from BOUND -> SYN_SENT here */
4285 4311 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4286 4312 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4287 4313 int32_t, TCPS_BOUND);
4288 4314
4289 4315 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
4290 4316 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
4291 4317 tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
4292 4318 if (syn_mp != NULL) {
4293 4319 /*
4294 4320 * We must bump the generation before sending the syn
4295 4321 * to ensure that we use the right generation in case
4296 4322 * this thread issues a "connected" up call.
4297 4323 */
4298 4324 SOCK_CONNID_BUMP(tcp->tcp_connid);
4299 4325 /*
4300 4326 * DTrace sending the first SYN as a
4301 4327 * tcp:::connect-request event.
4302 4328 */
4303 4329 DTRACE_TCP5(connect__request, mblk_t *, NULL,
4304 4330 ip_xmit_attr_t *, connp->conn_ixa,
4305 4331 void_ip_t *, syn_mp->b_rptr, tcp_t *, tcp,
4306 4332 tcph_t *,
4307 4333 &syn_mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
4308 4334 tcp_send_data(tcp, syn_mp);
4309 4335 }
4310 4336
4311 4337 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4312 4338 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4313 4339 return (0);
4314 4340
4315 4341 connect_failed:
4316 4342 connp->conn_faddr_v6 = ipv6_all_zeros;
4317 4343 connp->conn_fport = 0;
4318 4344 tcp->tcp_state = oldstate;
4319 4345 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4320 4346 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4321 4347 return (error);
4322 4348 }
4323 4349
4324 4350 int
4325 4351 tcp_do_listen(conn_t *connp, struct sockaddr *sa, socklen_t len,
4326 4352 int backlog, cred_t *cr, boolean_t bind_to_req_port_only)
4327 4353 {
4328 4354 tcp_t *tcp = connp->conn_tcp;
4329 4355 int error = 0;
4330 4356 tcp_stack_t *tcps = tcp->tcp_tcps;
4331 4357 int32_t oldstate;
4332 4358
4333 4359 /* All Solaris components should pass a cred for this operation. */
4334 4360 ASSERT(cr != NULL);
4335 4361
4336 4362 if (tcp->tcp_state >= TCPS_BOUND) {
4337 4363 if ((tcp->tcp_state == TCPS_BOUND ||
4338 4364 tcp->tcp_state == TCPS_LISTEN) && backlog > 0) {
4339 4365 /*
4340 4366 * Handle listen() increasing backlog.
4341 4367 * This is more "liberal" then what the TPI spec
4342 4368 * requires but is needed to avoid a t_unbind
4343 4369 * when handling listen() since the port number
4344 4370 * might be "stolen" between the unbind and bind.
4345 4371 */
4346 4372 goto do_listen;
4347 4373 }
4348 4374 if (connp->conn_debug) {
4349 4375 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4350 4376 "tcp_listen: bad state, %d", tcp->tcp_state);
4351 4377 }
4352 4378 return (-TOUTSTATE);
4353 4379 } else {
4354 4380 if (sa == NULL) {
4355 4381 sin6_t addr;
4356 4382 sin_t *sin;
4357 4383 sin6_t *sin6;
4358 4384
4359 4385 ASSERT(IPCL_IS_NONSTR(connp));
4360 4386 /* Do an implicit bind: Request for a generic port. */
4361 4387 if (connp->conn_family == AF_INET) {
4362 4388 len = sizeof (sin_t);
4363 4389 sin = (sin_t *)&addr;
4364 4390 *sin = sin_null;
4365 4391 sin->sin_family = AF_INET;
4366 4392 } else {
4367 4393 ASSERT(connp->conn_family == AF_INET6);
4368 4394 len = sizeof (sin6_t);
4369 4395 sin6 = (sin6_t *)&addr;
4370 4396 *sin6 = sin6_null;
4371 4397 sin6->sin6_family = AF_INET6;
4372 4398 }
4373 4399 sa = (struct sockaddr *)&addr;
4374 4400 }
4375 4401
4376 4402 error = tcp_bind_check(connp, sa, len, cr,
4377 4403 bind_to_req_port_only);
4378 4404 if (error)
4379 4405 return (error);
4380 4406 /* Fall through and do the fanout insertion */
4381 4407 }
4382 4408
4383 4409 do_listen:
4384 4410 ASSERT(tcp->tcp_state == TCPS_BOUND || tcp->tcp_state == TCPS_LISTEN);
4385 4411 tcp->tcp_conn_req_max = backlog;
4386 4412 if (tcp->tcp_conn_req_max) {
4387 4413 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min)
4388 4414 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min;
4389 4415 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q)
4390 4416 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q;
4391 4417 /*
4392 4418 * If this is a listener, do not reset the eager list
4393 4419 * and other stuffs. Note that we don't check if the
4394 4420 * existing eager list meets the new tcp_conn_req_max
4395 4421 * requirement.
4396 4422 */
4397 4423 if (tcp->tcp_state != TCPS_LISTEN) {
4398 4424 tcp->tcp_state = TCPS_LISTEN;
4399 4425 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4400 4426 connp->conn_ixa, void, NULL, tcp_t *, tcp,
4401 4427 void, NULL, int32_t, TCPS_BOUND);
4402 4428 /* Initialize the chain. Don't need the eager_lock */
4403 4429 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
4404 4430 tcp->tcp_eager_next_drop_q0 = tcp;
4405 4431 tcp->tcp_eager_prev_drop_q0 = tcp;
4406 4432 tcp->tcp_second_ctimer_threshold =
4407 4433 tcps->tcps_ip_abort_linterval;
4408 4434 }
4409 4435 }
4410 4436
4411 4437 /*
4412 4438 * We need to make sure that the conn_recv is set to a non-null
4413 4439 * value before we insert the conn into the classifier table.
4414 4440 * This is to avoid a race with an incoming packet which does an
4415 4441 * ipcl_classify().
4416 4442 * We initially set it to tcp_input_listener_unbound to try to
4417 4443 * pick a good squeue for the listener when the first SYN arrives.
4418 4444 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4419 4445 * first SYN.
4420 4446 */
4421 4447 connp->conn_recv = tcp_input_listener_unbound;
4422 4448
4423 4449 /* Insert the listener in the classifier table */
4424 4450 error = ip_laddr_fanout_insert(connp);
4425 4451 if (error != 0) {
4426 4452 /* Undo the bind - release the port number */
4427 4453 oldstate = tcp->tcp_state;
4428 4454 tcp->tcp_state = TCPS_IDLE;
4429 4455 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4430 4456 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4431 4457 int32_t, oldstate);
4432 4458 connp->conn_bound_addr_v6 = ipv6_all_zeros;
4433 4459
4434 4460 connp->conn_laddr_v6 = ipv6_all_zeros;
4435 4461 connp->conn_saddr_v6 = ipv6_all_zeros;
4436 4462 connp->conn_ports = 0;
4437 4463
4438 4464 if (connp->conn_anon_port) {
4439 4465 zone_t *zone;
4440 4466
4441 4467 zone = crgetzone(cr);
4442 4468 connp->conn_anon_port = B_FALSE;
4443 4469 (void) tsol_mlp_anon(zone, connp->conn_mlp_type,
4444 4470 connp->conn_proto, connp->conn_lport, B_FALSE);
4445 4471 }
4446 4472 connp->conn_mlp_type = mlptSingle;
4447 4473
4448 4474 tcp_bind_hash_remove(tcp);
4449 4475 return (error);
4450 4476 } else {
4451 4477 /*
4452 4478 * If there is a connection limit, allocate and initialize
4453 4479 * the counter struct. Note that since listen can be called
4454 4480 * multiple times, the struct may have been allready allocated.
4455 4481 */
4456 4482 if (!list_is_empty(&tcps->tcps_listener_conf) &&
4457 4483 tcp->tcp_listen_cnt == NULL) {
4458 4484 tcp_listen_cnt_t *tlc;
4459 4485 uint32_t ratio;
4460 4486
4461 4487 ratio = tcp_find_listener_conf(tcps,
4462 4488 ntohs(connp->conn_lport));
4463 4489 if (ratio != 0) {
4464 4490 uint32_t mem_ratio, tot_buf;
4465 4491
4466 4492 tlc = kmem_alloc(sizeof (tcp_listen_cnt_t),
4467 4493 KM_SLEEP);
4468 4494 /*
4469 4495 * Calculate the connection limit based on
4470 4496 * the configured ratio and maxusers. Maxusers
4471 4497 * are calculated based on memory size,
4472 4498 * ~ 1 user per MB. Note that the conn_rcvbuf
4473 4499 * and conn_sndbuf may change after a
4474 4500 * connection is accepted. So what we have
4475 4501 * is only an approximation.
4476 4502 */
4477 4503 if ((tot_buf = connp->conn_rcvbuf +
4478 4504 connp->conn_sndbuf) < MB) {
4479 4505 mem_ratio = MB / tot_buf;
4480 4506 tlc->tlc_max = maxusers / ratio *
4481 4507 mem_ratio;
4482 4508 } else {
4483 4509 mem_ratio = tot_buf / MB;
4484 4510 tlc->tlc_max = maxusers / ratio /
4485 4511 mem_ratio;
4486 4512 }
4487 4513 /* At least we should allow two connections! */
4488 4514 if (tlc->tlc_max <= tcp_min_conn_listener)
4489 4515 tlc->tlc_max = tcp_min_conn_listener;
4490 4516 tlc->tlc_cnt = 1;
4491 4517 tlc->tlc_drop = 0;
4492 4518 tcp->tcp_listen_cnt = tlc;
4493 4519 }
4494 4520 }
4495 4521 }
4496 4522 return (error);
4497 4523 }
|
↓ open down ↓ |
638 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX