Print this page
4596 Callers of ip_srcid_find_id() need to be more careful
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp.c
+++ new/usr/src/uts/common/inet/tcp/tcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 + * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
27 28 */
28 29 /* Copyright (c) 1990 Mentat Inc. */
29 30
30 31 #include <sys/types.h>
31 32 #include <sys/stream.h>
32 33 #include <sys/strsun.h>
33 34 #include <sys/strsubr.h>
34 35 #include <sys/stropts.h>
35 36 #include <sys/strlog.h>
36 37 #define _SUN_TPI_VERSION 2
37 38 #include <sys/tihdr.h>
38 39 #include <sys/timod.h>
39 40 #include <sys/ddi.h>
40 41 #include <sys/sunddi.h>
41 42 #include <sys/suntpi.h>
42 43 #include <sys/xti_inet.h>
43 44 #include <sys/cmn_err.h>
44 45 #include <sys/debug.h>
45 46 #include <sys/sdt.h>
46 47 #include <sys/vtrace.h>
47 48 #include <sys/kmem.h>
48 49 #include <sys/ethernet.h>
49 50 #include <sys/cpuvar.h>
50 51 #include <sys/dlpi.h>
51 52 #include <sys/pattr.h>
52 53 #include <sys/policy.h>
53 54 #include <sys/priv.h>
54 55 #include <sys/zone.h>
55 56 #include <sys/sunldi.h>
56 57
57 58 #include <sys/errno.h>
58 59 #include <sys/signal.h>
59 60 #include <sys/socket.h>
60 61 #include <sys/socketvar.h>
61 62 #include <sys/sockio.h>
62 63 #include <sys/isa_defs.h>
63 64 #include <sys/md5.h>
64 65 #include <sys/random.h>
65 66 #include <sys/uio.h>
66 67 #include <sys/systm.h>
67 68 #include <netinet/in.h>
68 69 #include <netinet/tcp.h>
69 70 #include <netinet/ip6.h>
70 71 #include <netinet/icmp6.h>
71 72 #include <net/if.h>
72 73 #include <net/route.h>
73 74 #include <inet/ipsec_impl.h>
74 75
75 76 #include <inet/common.h>
76 77 #include <inet/ip.h>
77 78 #include <inet/ip_impl.h>
78 79 #include <inet/ip6.h>
79 80 #include <inet/ip_ndp.h>
80 81 #include <inet/proto_set.h>
81 82 #include <inet/mib2.h>
82 83 #include <inet/optcom.h>
83 84 #include <inet/snmpcom.h>
84 85 #include <inet/kstatcom.h>
85 86 #include <inet/tcp.h>
86 87 #include <inet/tcp_impl.h>
87 88 #include <inet/tcp_cluster.h>
88 89 #include <inet/udp_impl.h>
89 90 #include <net/pfkeyv2.h>
90 91 #include <inet/ipdrop.h>
91 92
92 93 #include <inet/ipclassifier.h>
93 94 #include <inet/ip_ire.h>
94 95 #include <inet/ip_ftable.h>
95 96 #include <inet/ip_if.h>
96 97 #include <inet/ipp_common.h>
97 98 #include <inet/ip_rts.h>
98 99 #include <inet/ip_netinfo.h>
99 100 #include <sys/squeue_impl.h>
100 101 #include <sys/squeue.h>
101 102 #include <sys/tsol/label.h>
102 103 #include <sys/tsol/tnet.h>
103 104 #include <rpc/pmap_prot.h>
104 105 #include <sys/callo.h>
105 106
106 107 /*
107 108 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
108 109 *
109 110 * (Read the detailed design doc in PSARC case directory)
110 111 *
111 112 * The entire tcp state is contained in tcp_t and conn_t structure
112 113 * which are allocated in tandem using ipcl_conn_create() and passing
113 114 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
114 115 * the references on the tcp_t. The tcp_t structure is never compressed
115 116 * and packets always land on the correct TCP perimeter from the time
116 117 * eager is created till the time tcp_t dies (as such the old mentat
117 118 * TCP global queue is not used for detached state and no IPSEC checking
118 119 * is required). The global queue is still allocated to send out resets
119 120 * for connection which have no listeners and IP directly calls
120 121 * tcp_xmit_listeners_reset() which does any policy check.
121 122 *
122 123 * Protection and Synchronisation mechanism:
123 124 *
124 125 * The tcp data structure does not use any kind of lock for protecting
125 126 * its state but instead uses 'squeues' for mutual exclusion from various
126 127 * read and write side threads. To access a tcp member, the thread should
127 128 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
128 129 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
129 130 * can pass any tcp function having prototype of edesc_t as argument
130 131 * (different from traditional STREAMs model where packets come in only
131 132 * designated entry points). The list of functions that can be directly
132 133 * called via squeue are listed before the usual function prototype.
133 134 *
134 135 * Referencing:
135 136 *
136 137 * TCP is MT-Hot and we use a reference based scheme to make sure that the
137 138 * tcp structure doesn't disappear when its needed. When the application
138 139 * creates an outgoing connection or accepts an incoming connection, we
139 140 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
140 141 * The IP reference is just a symbolic reference since ip_tcpclose()
141 142 * looks at tcp structure after tcp_close_output() returns which could
142 143 * have dropped the last TCP reference. So as long as the connection is
143 144 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
144 145 * conn_t. The classifier puts its own reference when the connection is
145 146 * inserted in listen or connected hash. Anytime a thread needs to enter
146 147 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
147 148 * on write side or by doing a classify on read side and then puts a
148 149 * reference on the conn before doing squeue_enter/tryenter/fill. For
149 150 * read side, the classifier itself puts the reference under fanout lock
150 151 * to make sure that tcp can't disappear before it gets processed. The
151 152 * squeue will drop this reference automatically so the called function
152 153 * doesn't have to do a DEC_REF.
153 154 *
154 155 * Opening a new connection:
155 156 *
156 157 * The outgoing connection open is pretty simple. tcp_open() does the
157 158 * work in creating the conn/tcp structure and initializing it. The
158 159 * squeue assignment is done based on the CPU the application
159 160 * is running on. So for outbound connections, processing is always done
160 161 * on application CPU which might be different from the incoming CPU
161 162 * being interrupted by the NIC. An optimal way would be to figure out
162 163 * the NIC <-> CPU binding at listen time, and assign the outgoing
163 164 * connection to the squeue attached to the CPU that will be interrupted
164 165 * for incoming packets (we know the NIC based on the bind IP address).
165 166 * This might seem like a problem if more data is going out but the
166 167 * fact is that in most cases the transmit is ACK driven transmit where
167 168 * the outgoing data normally sits on TCP's xmit queue waiting to be
168 169 * transmitted.
169 170 *
170 171 * Accepting a connection:
171 172 *
172 173 * This is a more interesting case because of various races involved in
173 174 * establishing a eager in its own perimeter. Read the meta comment on
174 175 * top of tcp_input_listener(). But briefly, the squeue is picked by
175 176 * ip_fanout based on the ring or the sender (if loopback).
176 177 *
177 178 * Closing a connection:
178 179 *
179 180 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
180 181 * via squeue to do the close and mark the tcp as detached if the connection
181 182 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
182 183 * reference but tcp_close() drop IP's reference always. So if tcp was
183 184 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
184 185 * and 1 because it is in classifier's connected hash. This is the condition
185 186 * we use to determine that its OK to clean up the tcp outside of squeue
186 187 * when time wait expires (check the ref under fanout and conn_lock and
187 188 * if it is 2, remove it from fanout hash and kill it).
188 189 *
189 190 * Although close just drops the necessary references and marks the
190 191 * tcp_detached state, tcp_close needs to know the tcp_detached has been
191 192 * set (under squeue) before letting the STREAM go away (because a
192 193 * inbound packet might attempt to go up the STREAM while the close
193 194 * has happened and tcp_detached is not set). So a special lock and
194 195 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
195 196 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
196 197 * tcp_detached.
197 198 *
198 199 * Special provisions and fast paths:
199 200 *
200 201 * We make special provisions for sockfs by marking tcp_issocket
201 202 * whenever we have only sockfs on top of TCP. This allows us to skip
202 203 * putting the tcp in acceptor hash since a sockfs listener can never
203 204 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
204 205 * since eager has already been allocated and the accept now happens
205 206 * on acceptor STREAM. There is a big blob of comment on top of
206 207 * tcp_input_listener explaining the new accept. When socket is POP'd,
207 208 * sockfs sends us an ioctl to mark the fact and we go back to old
208 209 * behaviour. Once tcp_issocket is unset, its never set for the
209 210 * life of that connection.
210 211 *
211 212 * IPsec notes :
212 213 *
213 214 * Since a packet is always executed on the correct TCP perimeter
214 215 * all IPsec processing is defered to IP including checking new
215 216 * connections and setting IPSEC policies for new connection. The
216 217 * only exception is tcp_xmit_listeners_reset() which is called
217 218 * directly from IP and needs to policy check to see if TH_RST
218 219 * can be sent out.
219 220 */
220 221
221 222 /*
222 223 * Values for squeue switch:
223 224 * 1: SQ_NODRAIN
224 225 * 2: SQ_PROCESS
225 226 * 3: SQ_FILL
226 227 */
227 228 int tcp_squeue_wput = 2; /* /etc/systems */
228 229 int tcp_squeue_flag;
229 230
230 231 /*
231 232 * To prevent memory hog, limit the number of entries in tcp_free_list
232 233 * to 1% of available memory / number of cpus
233 234 */
234 235 uint_t tcp_free_list_max_cnt = 0;
235 236
236 237 #define TIDUSZ 4096 /* transport interface data unit size */
237 238
238 239 /*
239 240 * Size of acceptor hash list. It has to be a power of 2 for hashing.
240 241 */
241 242 #define TCP_ACCEPTOR_FANOUT_SIZE 512
242 243
243 244 #ifdef _ILP32
244 245 #define TCP_ACCEPTOR_HASH(accid) \
245 246 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
246 247 #else
247 248 #define TCP_ACCEPTOR_HASH(accid) \
248 249 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
249 250 #endif /* _ILP32 */
250 251
251 252 /*
252 253 * Minimum number of connections which can be created per listener. Used
253 254 * when the listener connection count is in effect.
254 255 */
255 256 static uint32_t tcp_min_conn_listener = 2;
256 257
257 258 uint32_t tcp_early_abort = 30;
258 259
259 260 /* TCP Timer control structure */
260 261 typedef struct tcpt_s {
261 262 pfv_t tcpt_pfv; /* The routine we are to call */
262 263 tcp_t *tcpt_tcp; /* The parameter we are to pass in */
263 264 } tcpt_t;
264 265
265 266 /*
266 267 * Functions called directly via squeue having a prototype of edesc_t.
267 268 */
268 269 void tcp_input_listener(void *arg, mblk_t *mp, void *arg2,
269 270 ip_recv_attr_t *ira);
270 271 void tcp_input_data(void *arg, mblk_t *mp, void *arg2,
271 272 ip_recv_attr_t *ira);
272 273 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2,
273 274 ip_recv_attr_t *dummy);
274 275
275 276
276 277 /* Prototype for TCP functions */
277 278 static void tcp_random_init(void);
278 279 int tcp_random(void);
279 280 static int tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp,
280 281 in_port_t dstport, uint_t srcid);
281 282 static int tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp,
282 283 in_port_t dstport, uint32_t flowinfo,
283 284 uint_t srcid, uint32_t scope_id);
284 285 static void tcp_iss_init(tcp_t *tcp);
285 286 static void tcp_reinit(tcp_t *tcp);
286 287 static void tcp_reinit_values(tcp_t *tcp);
287 288
288 289 static void tcp_wsrv(queue_t *q);
289 290 static void tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa);
290 291 static void tcp_update_zcopy(tcp_t *tcp);
291 292 static void tcp_notify(void *, ip_xmit_attr_t *, ixa_notify_type_t,
292 293 ixa_notify_arg_t);
293 294 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns);
294 295 static void tcp_stack_fini(netstackid_t stackid, void *arg);
295 296
296 297 static int tcp_squeue_switch(int);
297 298
298 299 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t);
299 300 static int tcp_openv4(queue_t *, dev_t *, int, int, cred_t *);
300 301 static int tcp_openv6(queue_t *, dev_t *, int, int, cred_t *);
301 302
302 303 static void tcp_squeue_add(squeue_t *);
303 304
304 305 struct module_info tcp_rinfo = {
305 306 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER
306 307 };
307 308
308 309 static struct module_info tcp_winfo = {
309 310 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16
310 311 };
311 312
312 313 /*
313 314 * Entry points for TCP as a device. The normal case which supports
314 315 * the TCP functionality.
315 316 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
316 317 */
317 318 struct qinit tcp_rinitv4 = {
318 319 NULL, (pfi_t)tcp_rsrv, tcp_openv4, tcp_tpi_close, NULL, &tcp_rinfo
319 320 };
320 321
321 322 struct qinit tcp_rinitv6 = {
322 323 NULL, (pfi_t)tcp_rsrv, tcp_openv6, tcp_tpi_close, NULL, &tcp_rinfo
323 324 };
324 325
325 326 struct qinit tcp_winit = {
326 327 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
327 328 };
328 329
329 330 /* Initial entry point for TCP in socket mode. */
330 331 struct qinit tcp_sock_winit = {
331 332 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
332 333 };
333 334
334 335 /* TCP entry point during fallback */
335 336 struct qinit tcp_fallback_sock_winit = {
336 337 (pfi_t)tcp_wput_fallback, NULL, NULL, NULL, NULL, &tcp_winfo
337 338 };
338 339
339 340 /*
340 341 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
341 342 * an accept. Avoid allocating data structures since eager has already
342 343 * been created.
343 344 */
344 345 struct qinit tcp_acceptor_rinit = {
345 346 NULL, (pfi_t)tcp_rsrv, NULL, tcp_tpi_close_accept, NULL, &tcp_winfo
346 347 };
347 348
348 349 struct qinit tcp_acceptor_winit = {
349 350 (pfi_t)tcp_tpi_accept, NULL, NULL, NULL, NULL, &tcp_winfo
350 351 };
351 352
352 353 /* For AF_INET aka /dev/tcp */
353 354 struct streamtab tcpinfov4 = {
354 355 &tcp_rinitv4, &tcp_winit
355 356 };
356 357
357 358 /* For AF_INET6 aka /dev/tcp6 */
358 359 struct streamtab tcpinfov6 = {
359 360 &tcp_rinitv6, &tcp_winit
360 361 };
361 362
362 363 /*
363 364 * Following assumes TPI alignment requirements stay along 32 bit
364 365 * boundaries
365 366 */
366 367 #define ROUNDUP32(x) \
367 368 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
368 369
369 370 /* Template for response to info request. */
370 371 struct T_info_ack tcp_g_t_info_ack = {
371 372 T_INFO_ACK, /* PRIM_type */
372 373 0, /* TSDU_size */
373 374 T_INFINITE, /* ETSDU_size */
374 375 T_INVALID, /* CDATA_size */
375 376 T_INVALID, /* DDATA_size */
376 377 sizeof (sin_t), /* ADDR_size */
377 378 0, /* OPT_size - not initialized here */
378 379 TIDUSZ, /* TIDU_size */
379 380 T_COTS_ORD, /* SERV_type */
380 381 TCPS_IDLE, /* CURRENT_state */
381 382 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
382 383 };
383 384
384 385 struct T_info_ack tcp_g_t_info_ack_v6 = {
385 386 T_INFO_ACK, /* PRIM_type */
386 387 0, /* TSDU_size */
387 388 T_INFINITE, /* ETSDU_size */
388 389 T_INVALID, /* CDATA_size */
389 390 T_INVALID, /* DDATA_size */
390 391 sizeof (sin6_t), /* ADDR_size */
391 392 0, /* OPT_size - not initialized here */
392 393 TIDUSZ, /* TIDU_size */
393 394 T_COTS_ORD, /* SERV_type */
394 395 TCPS_IDLE, /* CURRENT_state */
395 396 (XPG4_1|EXPINLINE) /* PROVIDER_flag */
396 397 };
397 398
398 399 /*
399 400 * TCP tunables related declarations. Definitions are in tcp_tunables.c
400 401 */
401 402 extern mod_prop_info_t tcp_propinfo_tbl[];
402 403 extern int tcp_propinfo_count;
403 404
404 405 #define IS_VMLOANED_MBLK(mp) \
405 406 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
406 407
407 408 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */
408 409
409 410 /*
410 411 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
411 412 * tunable settable via NDD. Otherwise, the per-connection behavior is
412 413 * determined dynamically during tcp_set_destination(), which is the default.
413 414 */
414 415 boolean_t tcp_static_maxpsz = B_FALSE;
415 416
416 417 /*
417 418 * If the receive buffer size is changed, this function is called to update
418 419 * the upper socket layer on the new delayed receive wake up threshold.
419 420 */
420 421 static void
421 422 tcp_set_recv_threshold(tcp_t *tcp, uint32_t new_rcvthresh)
422 423 {
423 424 uint32_t default_threshold = SOCKET_RECVHIWATER >> 3;
424 425
425 426 if (IPCL_IS_NONSTR(tcp->tcp_connp)) {
426 427 conn_t *connp = tcp->tcp_connp;
427 428 struct sock_proto_props sopp;
428 429
429 430 /*
430 431 * only increase rcvthresh upto default_threshold
431 432 */
432 433 if (new_rcvthresh > default_threshold)
433 434 new_rcvthresh = default_threshold;
434 435
435 436 sopp.sopp_flags = SOCKOPT_RCVTHRESH;
436 437 sopp.sopp_rcvthresh = new_rcvthresh;
437 438
438 439 (*connp->conn_upcalls->su_set_proto_props)
439 440 (connp->conn_upper_handle, &sopp);
440 441 }
441 442 }
442 443
443 444 /*
444 445 * Figure out the value of window scale opton. Note that the rwnd is
445 446 * ASSUMED to be rounded up to the nearest MSS before the calculation.
446 447 * We cannot find the scale value and then do a round up of tcp_rwnd
447 448 * because the scale value may not be correct after that.
448 449 *
449 450 * Set the compiler flag to make this function inline.
450 451 */
451 452 void
452 453 tcp_set_ws_value(tcp_t *tcp)
453 454 {
454 455 int i;
455 456 uint32_t rwnd = tcp->tcp_rwnd;
456 457
457 458 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT;
458 459 i++, rwnd >>= 1)
459 460 ;
460 461 tcp->tcp_rcv_ws = i;
461 462 }
462 463
463 464 /*
464 465 * Remove cached/latched IPsec references.
465 466 */
466 467 void
467 468 tcp_ipsec_cleanup(tcp_t *tcp)
468 469 {
469 470 conn_t *connp = tcp->tcp_connp;
470 471
471 472 ASSERT(connp->conn_flags & IPCL_TCPCONN);
472 473
473 474 if (connp->conn_latch != NULL) {
474 475 IPLATCH_REFRELE(connp->conn_latch);
475 476 connp->conn_latch = NULL;
476 477 }
477 478 if (connp->conn_latch_in_policy != NULL) {
478 479 IPPOL_REFRELE(connp->conn_latch_in_policy);
479 480 connp->conn_latch_in_policy = NULL;
480 481 }
481 482 if (connp->conn_latch_in_action != NULL) {
482 483 IPACT_REFRELE(connp->conn_latch_in_action);
483 484 connp->conn_latch_in_action = NULL;
484 485 }
485 486 if (connp->conn_policy != NULL) {
486 487 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack);
487 488 connp->conn_policy = NULL;
488 489 }
489 490 }
490 491
491 492 /*
492 493 * Cleaup before placing on free list.
493 494 * Disassociate from the netstack/tcp_stack_t since the freelist
494 495 * is per squeue and not per netstack.
495 496 */
496 497 void
497 498 tcp_cleanup(tcp_t *tcp)
498 499 {
499 500 mblk_t *mp;
500 501 conn_t *connp = tcp->tcp_connp;
501 502 tcp_stack_t *tcps = tcp->tcp_tcps;
502 503 netstack_t *ns = tcps->tcps_netstack;
503 504 mblk_t *tcp_rsrv_mp;
504 505
505 506 tcp_bind_hash_remove(tcp);
506 507
507 508 /* Cleanup that which needs the netstack first */
508 509 tcp_ipsec_cleanup(tcp);
509 510 ixa_cleanup(connp->conn_ixa);
510 511
511 512 if (connp->conn_ht_iphc != NULL) {
512 513 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
513 514 connp->conn_ht_iphc = NULL;
514 515 connp->conn_ht_iphc_allocated = 0;
515 516 connp->conn_ht_iphc_len = 0;
516 517 connp->conn_ht_ulp = NULL;
517 518 connp->conn_ht_ulp_len = 0;
518 519 tcp->tcp_ipha = NULL;
519 520 tcp->tcp_ip6h = NULL;
520 521 tcp->tcp_tcpha = NULL;
521 522 }
522 523
523 524 /* We clear any IP_OPTIONS and extension headers */
524 525 ip_pkt_free(&connp->conn_xmit_ipp);
525 526
526 527 tcp_free(tcp);
527 528
528 529 /*
529 530 * Since we will bzero the entire structure, we need to
530 531 * remove it and reinsert it in global hash list. We
531 532 * know the walkers can't get to this conn because we
532 533 * had set CONDEMNED flag earlier and checked reference
533 534 * under conn_lock so walker won't pick it and when we
534 535 * go the ipcl_globalhash_remove() below, no walker
535 536 * can get to it.
536 537 */
537 538 ipcl_globalhash_remove(connp);
538 539
539 540 /* Save some state */
540 541 mp = tcp->tcp_timercache;
541 542
542 543 tcp_rsrv_mp = tcp->tcp_rsrv_mp;
543 544
544 545 if (connp->conn_cred != NULL) {
545 546 crfree(connp->conn_cred);
546 547 connp->conn_cred = NULL;
547 548 }
548 549 ipcl_conn_cleanup(connp);
549 550 connp->conn_flags = IPCL_TCPCONN;
550 551
551 552 /*
552 553 * Now it is safe to decrement the reference counts.
553 554 * This might be the last reference on the netstack
554 555 * in which case it will cause the freeing of the IP Instance.
555 556 */
556 557 connp->conn_netstack = NULL;
557 558 connp->conn_ixa->ixa_ipst = NULL;
558 559 netstack_rele(ns);
559 560 ASSERT(tcps != NULL);
560 561 tcp->tcp_tcps = NULL;
561 562
562 563 bzero(tcp, sizeof (tcp_t));
563 564
564 565 /* restore the state */
565 566 tcp->tcp_timercache = mp;
566 567
567 568 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
568 569
569 570 tcp->tcp_connp = connp;
570 571
571 572 ASSERT(connp->conn_tcp == tcp);
572 573 ASSERT(connp->conn_flags & IPCL_TCPCONN);
573 574 connp->conn_state_flags = CONN_INCIPIENT;
574 575 ASSERT(connp->conn_proto == IPPROTO_TCP);
575 576 ASSERT(connp->conn_ref == 1);
576 577 }
577 578
578 579 /*
579 580 * Adapt to the information, such as rtt and rtt_sd, provided from the
580 581 * DCE and IRE maintained by IP.
581 582 *
582 583 * Checks for multicast and broadcast destination address.
583 584 * Returns zero if ok; an errno on failure.
584 585 *
585 586 * Note that the MSS calculation here is based on the info given in
586 587 * the DCE and IRE. We do not do any calculation based on TCP options. They
587 588 * will be handled in tcp_input_data() when TCP knows which options to use.
588 589 *
589 590 * Note on how TCP gets its parameters for a connection.
590 591 *
591 592 * When a tcp_t structure is allocated, it gets all the default parameters.
592 593 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
593 594 * spipe, rpipe, ... from the route metrics. Route metric overrides the
594 595 * default.
595 596 *
596 597 * An incoming SYN with a multicast or broadcast destination address is dropped
597 598 * in ip_fanout_v4/v6.
598 599 *
599 600 * An incoming SYN with a multicast or broadcast source address is always
600 601 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
601 602 * conn_connect.
602 603 * The same logic in tcp_set_destination also serves to
603 604 * reject an attempt to connect to a broadcast or multicast (destination)
604 605 * address.
605 606 */
606 607 int
607 608 tcp_set_destination(tcp_t *tcp)
608 609 {
609 610 uint32_t mss_max;
610 611 uint32_t mss;
611 612 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
612 613 conn_t *connp = tcp->tcp_connp;
613 614 tcp_stack_t *tcps = tcp->tcp_tcps;
614 615 iulp_t uinfo;
615 616 int error;
616 617 uint32_t flags;
617 618
618 619 flags = IPDF_LSO | IPDF_ZCOPY;
619 620 /*
620 621 * Make sure we have a dce for the destination to avoid dce_ident
621 622 * contention for connected sockets.
622 623 */
623 624 flags |= IPDF_UNIQUE_DCE;
624 625
625 626 if (!tcps->tcps_ignore_path_mtu)
626 627 connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY;
627 628
628 629 /* Use conn_lock to satify ASSERT; tcp is already serialized */
629 630 mutex_enter(&connp->conn_lock);
630 631 error = conn_connect(connp, &uinfo, flags);
631 632 mutex_exit(&connp->conn_lock);
632 633 if (error != 0)
633 634 return (error);
634 635
635 636 error = tcp_build_hdrs(tcp);
636 637 if (error != 0)
637 638 return (error);
638 639
639 640 tcp->tcp_localnet = uinfo.iulp_localnet;
640 641
641 642 if (uinfo.iulp_rtt != 0) {
642 643 clock_t rto;
643 644
644 645 tcp->tcp_rtt_sa = uinfo.iulp_rtt;
645 646 tcp->tcp_rtt_sd = uinfo.iulp_rtt_sd;
646 647 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
647 648 tcps->tcps_rexmit_interval_extra +
648 649 (tcp->tcp_rtt_sa >> 5);
649 650
650 651 TCP_SET_RTO(tcp, rto);
651 652 }
652 653 if (uinfo.iulp_ssthresh != 0)
653 654 tcp->tcp_cwnd_ssthresh = uinfo.iulp_ssthresh;
654 655 else
655 656 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
656 657 if (uinfo.iulp_spipe > 0) {
657 658 connp->conn_sndbuf = MIN(uinfo.iulp_spipe,
658 659 tcps->tcps_max_buf);
659 660 if (tcps->tcps_snd_lowat_fraction != 0) {
660 661 connp->conn_sndlowat = connp->conn_sndbuf /
661 662 tcps->tcps_snd_lowat_fraction;
662 663 }
663 664 (void) tcp_maxpsz_set(tcp, B_TRUE);
664 665 }
665 666 /*
666 667 * Note that up till now, acceptor always inherits receive
667 668 * window from the listener. But if there is a metrics
668 669 * associated with a host, we should use that instead of
669 670 * inheriting it from listener. Thus we need to pass this
670 671 * info back to the caller.
671 672 */
672 673 if (uinfo.iulp_rpipe > 0) {
673 674 tcp->tcp_rwnd = MIN(uinfo.iulp_rpipe,
674 675 tcps->tcps_max_buf);
675 676 }
676 677
677 678 if (uinfo.iulp_rtomax > 0) {
678 679 tcp->tcp_second_timer_threshold =
679 680 uinfo.iulp_rtomax;
680 681 }
681 682
682 683 /*
683 684 * Use the metric option settings, iulp_tstamp_ok and
684 685 * iulp_wscale_ok, only for active open. What this means
685 686 * is that if the other side uses timestamp or window
686 687 * scale option, TCP will also use those options. That
687 688 * is for passive open. If the application sets a
688 689 * large window, window scale is enabled regardless of
689 690 * the value in iulp_wscale_ok. This is the behavior
690 691 * since 2.6. So we keep it.
691 692 * The only case left in passive open processing is the
692 693 * check for SACK.
693 694 * For ECN, it should probably be like SACK. But the
694 695 * current value is binary, so we treat it like the other
695 696 * cases. The metric only controls active open.For passive
696 697 * open, the ndd param, tcp_ecn_permitted, controls the
697 698 * behavior.
698 699 */
699 700 if (!tcp_detached) {
700 701 /*
701 702 * The if check means that the following can only
702 703 * be turned on by the metrics only IRE, but not off.
703 704 */
704 705 if (uinfo.iulp_tstamp_ok)
705 706 tcp->tcp_snd_ts_ok = B_TRUE;
706 707 if (uinfo.iulp_wscale_ok)
707 708 tcp->tcp_snd_ws_ok = B_TRUE;
708 709 if (uinfo.iulp_sack == 2)
709 710 tcp->tcp_snd_sack_ok = B_TRUE;
710 711 if (uinfo.iulp_ecn_ok)
711 712 tcp->tcp_ecn_ok = B_TRUE;
712 713 } else {
713 714 /*
714 715 * Passive open.
715 716 *
716 717 * As above, the if check means that SACK can only be
717 718 * turned on by the metric only IRE.
718 719 */
719 720 if (uinfo.iulp_sack > 0) {
720 721 tcp->tcp_snd_sack_ok = B_TRUE;
721 722 }
722 723 }
723 724
724 725 /*
725 726 * XXX Note that currently, iulp_mtu can be as small as 68
726 727 * because of PMTUd. So tcp_mss may go to negative if combined
727 728 * length of all those options exceeds 28 bytes. But because
728 729 * of the tcp_mss_min check below, we may not have a problem if
729 730 * tcp_mss_min is of a reasonable value. The default is 1 so
730 731 * the negative problem still exists. And the check defeats PMTUd.
731 732 * In fact, if PMTUd finds that the MSS should be smaller than
732 733 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
733 734 * value.
734 735 *
735 736 * We do not deal with that now. All those problems related to
736 737 * PMTUd will be fixed later.
737 738 */
738 739 ASSERT(uinfo.iulp_mtu != 0);
739 740 mss = tcp->tcp_initial_pmtu = uinfo.iulp_mtu;
740 741
741 742 /* Sanity check for MSS value. */
742 743 if (connp->conn_ipversion == IPV4_VERSION)
743 744 mss_max = tcps->tcps_mss_max_ipv4;
744 745 else
745 746 mss_max = tcps->tcps_mss_max_ipv6;
746 747
747 748 if (tcp->tcp_ipsec_overhead == 0)
748 749 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp);
749 750
750 751 mss -= tcp->tcp_ipsec_overhead;
751 752
752 753 if (mss < tcps->tcps_mss_min)
753 754 mss = tcps->tcps_mss_min;
754 755 if (mss > mss_max)
755 756 mss = mss_max;
756 757
757 758 /* Note that this is the maximum MSS, excluding all options. */
758 759 tcp->tcp_mss = mss;
759 760
760 761 /*
761 762 * Update the tcp connection with LSO capability.
762 763 */
763 764 tcp_update_lso(tcp, connp->conn_ixa);
764 765
765 766 /*
766 767 * Initialize the ISS here now that we have the full connection ID.
767 768 * The RFC 1948 method of initial sequence number generation requires
768 769 * knowledge of the full connection ID before setting the ISS.
769 770 */
770 771 tcp_iss_init(tcp);
771 772
772 773 tcp->tcp_loopback = (uinfo.iulp_loopback | uinfo.iulp_local);
773 774
774 775 /*
775 776 * Make sure that conn is not marked incipient
776 777 * for incoming connections. A blind
777 778 * removal of incipient flag is cheaper than
778 779 * check and removal.
779 780 */
780 781 mutex_enter(&connp->conn_lock);
781 782 connp->conn_state_flags &= ~CONN_INCIPIENT;
782 783 mutex_exit(&connp->conn_lock);
783 784 return (0);
784 785 }
785 786
786 787 /*
787 788 * tcp_clean_death / tcp_close_detached must not be called more than once
788 789 * on a tcp. Thus every function that potentially calls tcp_clean_death
789 790 * must check for the tcp state before calling tcp_clean_death.
790 791 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
791 792 * tcp_timer_handler, all check for the tcp state.
792 793 */
793 794 /* ARGSUSED */
794 795 void
795 796 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2,
796 797 ip_recv_attr_t *dummy)
797 798 {
798 799 tcp_t *tcp = ((conn_t *)arg)->conn_tcp;
799 800
800 801 freemsg(mp);
801 802 if (tcp->tcp_state > TCPS_BOUND)
802 803 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, ETIMEDOUT);
803 804 }
804 805
805 806 /*
806 807 * We are dying for some reason. Try to do it gracefully. (May be called
807 808 * as writer.)
808 809 *
809 810 * Return -1 if the structure was not cleaned up (if the cleanup had to be
810 811 * done by a service procedure).
811 812 * TBD - Should the return value distinguish between the tcp_t being
812 813 * freed and it being reinitialized?
813 814 */
814 815 int
815 816 tcp_clean_death(tcp_t *tcp, int err)
816 817 {
817 818 mblk_t *mp;
818 819 queue_t *q;
819 820 conn_t *connp = tcp->tcp_connp;
820 821 tcp_stack_t *tcps = tcp->tcp_tcps;
821 822
822 823 if (tcp->tcp_fused)
823 824 tcp_unfuse(tcp);
824 825
825 826 if (tcp->tcp_linger_tid != 0 &&
826 827 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
827 828 tcp_stop_lingering(tcp);
828 829 }
829 830
830 831 ASSERT(tcp != NULL);
831 832 ASSERT((connp->conn_family == AF_INET &&
832 833 connp->conn_ipversion == IPV4_VERSION) ||
833 834 (connp->conn_family == AF_INET6 &&
834 835 (connp->conn_ipversion == IPV4_VERSION ||
835 836 connp->conn_ipversion == IPV6_VERSION)));
836 837
837 838 if (TCP_IS_DETACHED(tcp)) {
838 839 if (tcp->tcp_hard_binding) {
839 840 /*
840 841 * Its an eager that we are dealing with. We close the
841 842 * eager but in case a conn_ind has already gone to the
842 843 * listener, let tcp_accept_finish() send a discon_ind
843 844 * to the listener and drop the last reference. If the
844 845 * listener doesn't even know about the eager i.e. the
845 846 * conn_ind hasn't gone up, blow away the eager and drop
846 847 * the last reference as well. If the conn_ind has gone
847 848 * up, state should be BOUND. tcp_accept_finish
848 849 * will figure out that the connection has received a
849 850 * RST and will send a DISCON_IND to the application.
850 851 */
851 852 tcp_closei_local(tcp);
852 853 if (!tcp->tcp_tconnind_started) {
853 854 CONN_DEC_REF(connp);
854 855 } else {
855 856 tcp->tcp_state = TCPS_BOUND;
856 857 DTRACE_TCP6(state__change, void, NULL,
857 858 ip_xmit_attr_t *, connp->conn_ixa,
858 859 void, NULL, tcp_t *, tcp, void, NULL,
859 860 int32_t, TCPS_CLOSED);
860 861 }
861 862 } else {
862 863 tcp_close_detached(tcp);
863 864 }
864 865 return (0);
865 866 }
866 867
867 868 TCP_STAT(tcps, tcp_clean_death_nondetached);
868 869
869 870 /*
870 871 * The connection is dead. Decrement listener connection counter if
871 872 * necessary.
872 873 */
873 874 if (tcp->tcp_listen_cnt != NULL)
874 875 TCP_DECR_LISTEN_CNT(tcp);
875 876
876 877 /*
877 878 * When a connection is moved to TIME_WAIT state, the connection
878 879 * counter is already decremented. So no need to decrement here
879 880 * again. See SET_TIME_WAIT() macro.
880 881 */
881 882 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
882 883 tcp->tcp_state < TCPS_TIME_WAIT) {
883 884 TCPS_CONN_DEC(tcps);
884 885 }
885 886
886 887 q = connp->conn_rq;
887 888
888 889 /* Trash all inbound data */
889 890 if (!IPCL_IS_NONSTR(connp)) {
890 891 ASSERT(q != NULL);
891 892 flushq(q, FLUSHALL);
892 893 }
893 894
894 895 /*
895 896 * If we are at least part way open and there is error
896 897 * (err==0 implies no error)
897 898 * notify our client by a T_DISCON_IND.
898 899 */
899 900 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) {
900 901 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
901 902 !TCP_IS_SOCKET(tcp)) {
902 903 /*
903 904 * Send M_FLUSH according to TPI. Because sockets will
904 905 * (and must) ignore FLUSHR we do that only for TPI
905 906 * endpoints and sockets in STREAMS mode.
906 907 */
907 908 (void) putnextctl1(q, M_FLUSH, FLUSHR);
908 909 }
909 910 if (connp->conn_debug) {
910 911 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
911 912 "tcp_clean_death: discon err %d", err);
912 913 }
913 914 if (IPCL_IS_NONSTR(connp)) {
914 915 /* Direct socket, use upcall */
915 916 (*connp->conn_upcalls->su_disconnected)(
916 917 connp->conn_upper_handle, tcp->tcp_connid, err);
917 918 } else {
918 919 mp = mi_tpi_discon_ind(NULL, err, 0);
919 920 if (mp != NULL) {
920 921 putnext(q, mp);
921 922 } else {
922 923 if (connp->conn_debug) {
923 924 (void) strlog(TCP_MOD_ID, 0, 1,
924 925 SL_ERROR|SL_TRACE,
925 926 "tcp_clean_death, sending M_ERROR");
926 927 }
927 928 (void) putnextctl1(q, M_ERROR, EPROTO);
928 929 }
929 930 }
930 931 if (tcp->tcp_state <= TCPS_SYN_RCVD) {
931 932 /* SYN_SENT or SYN_RCVD */
932 933 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
933 934 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) {
934 935 /* ESTABLISHED or CLOSE_WAIT */
935 936 TCPS_BUMP_MIB(tcps, tcpEstabResets);
936 937 }
937 938 }
938 939
939 940 /*
940 941 * ESTABLISHED non-STREAMS eagers are not 'detached' because
941 942 * an upper handle is obtained when the SYN-ACK comes in. So it
942 943 * should receive the 'disconnected' upcall, but tcp_reinit should
943 944 * not be called since this is an eager.
944 945 */
945 946 if (tcp->tcp_listener != NULL && IPCL_IS_NONSTR(connp)) {
946 947 tcp_closei_local(tcp);
947 948 tcp->tcp_state = TCPS_BOUND;
948 949 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
949 950 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
950 951 int32_t, TCPS_CLOSED);
951 952 return (0);
952 953 }
953 954
954 955 tcp_reinit(tcp);
955 956 if (IPCL_IS_NONSTR(connp))
956 957 (void) tcp_do_unbind(connp);
957 958
958 959 return (-1);
959 960 }
960 961
961 962 /*
962 963 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
963 964 * to expire, stop the wait and finish the close.
964 965 */
965 966 void
966 967 tcp_stop_lingering(tcp_t *tcp)
967 968 {
968 969 clock_t delta = 0;
969 970 tcp_stack_t *tcps = tcp->tcp_tcps;
970 971 conn_t *connp = tcp->tcp_connp;
971 972
972 973 tcp->tcp_linger_tid = 0;
973 974 if (tcp->tcp_state > TCPS_LISTEN) {
974 975 tcp_acceptor_hash_remove(tcp);
975 976 mutex_enter(&tcp->tcp_non_sq_lock);
976 977 if (tcp->tcp_flow_stopped) {
977 978 tcp_clrqfull(tcp);
978 979 }
979 980 mutex_exit(&tcp->tcp_non_sq_lock);
980 981
981 982 if (tcp->tcp_timer_tid != 0) {
982 983 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
983 984 tcp->tcp_timer_tid = 0;
984 985 }
985 986 /*
986 987 * Need to cancel those timers which will not be used when
987 988 * TCP is detached. This has to be done before the conn_wq
988 989 * is cleared.
989 990 */
990 991 tcp_timers_stop(tcp);
991 992
992 993 tcp->tcp_detached = B_TRUE;
993 994 connp->conn_rq = NULL;
994 995 connp->conn_wq = NULL;
995 996
996 997 if (tcp->tcp_state == TCPS_TIME_WAIT) {
997 998 tcp_time_wait_append(tcp);
998 999 TCP_DBGSTAT(tcps, tcp_detach_time_wait);
999 1000 goto finish;
1000 1001 }
1001 1002
1002 1003 /*
1003 1004 * If delta is zero the timer event wasn't executed and was
1004 1005 * successfully canceled. In this case we need to restart it
1005 1006 * with the minimal delta possible.
1006 1007 */
1007 1008 if (delta >= 0) {
1008 1009 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1009 1010 delta ? delta : 1);
1010 1011 }
1011 1012 } else {
1012 1013 tcp_closei_local(tcp);
1013 1014 CONN_DEC_REF(connp);
1014 1015 }
1015 1016 finish:
1016 1017 tcp->tcp_detached = B_TRUE;
1017 1018 connp->conn_rq = NULL;
1018 1019 connp->conn_wq = NULL;
1019 1020
1020 1021 /* Signal closing thread that it can complete close */
1021 1022 mutex_enter(&tcp->tcp_closelock);
1022 1023 tcp->tcp_closed = 1;
1023 1024 cv_signal(&tcp->tcp_closecv);
1024 1025 mutex_exit(&tcp->tcp_closelock);
1025 1026
1026 1027 /* If we have an upper handle (socket), release it */
1027 1028 if (IPCL_IS_NONSTR(connp)) {
1028 1029 ASSERT(connp->conn_upper_handle != NULL);
1029 1030 (*connp->conn_upcalls->su_closed)(connp->conn_upper_handle);
1030 1031 connp->conn_upper_handle = NULL;
1031 1032 connp->conn_upcalls = NULL;
1032 1033 }
1033 1034 }
1034 1035
1035 1036 void
1036 1037 tcp_close_common(conn_t *connp, int flags)
1037 1038 {
1038 1039 tcp_t *tcp = connp->conn_tcp;
1039 1040 mblk_t *mp = &tcp->tcp_closemp;
1040 1041 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
1041 1042 mblk_t *bp;
1042 1043
1043 1044 ASSERT(connp->conn_ref >= 2);
1044 1045
1045 1046 /*
1046 1047 * Mark the conn as closing. ipsq_pending_mp_add will not
1047 1048 * add any mp to the pending mp list, after this conn has
1048 1049 * started closing.
1049 1050 */
1050 1051 mutex_enter(&connp->conn_lock);
1051 1052 connp->conn_state_flags |= CONN_CLOSING;
1052 1053 if (connp->conn_oper_pending_ill != NULL)
1053 1054 conn_ioctl_cleanup_reqd = B_TRUE;
1054 1055 CONN_INC_REF_LOCKED(connp);
1055 1056 mutex_exit(&connp->conn_lock);
1056 1057 tcp->tcp_closeflags = (uint8_t)flags;
1057 1058 ASSERT(connp->conn_ref >= 3);
1058 1059
1059 1060 /*
1060 1061 * tcp_closemp_used is used below without any protection of a lock
1061 1062 * as we don't expect any one else to use it concurrently at this
1062 1063 * point otherwise it would be a major defect.
1063 1064 */
1064 1065
1065 1066 if (mp->b_prev == NULL)
1066 1067 tcp->tcp_closemp_used = B_TRUE;
1067 1068 else
1068 1069 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: "
1069 1070 "connp %p tcp %p\n", (void *)connp, (void *)tcp);
1070 1071
1071 1072 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
1072 1073
1073 1074 /*
1074 1075 * Cleanup any queued ioctls here. This must be done before the wq/rq
1075 1076 * are re-written by tcp_close_output().
1076 1077 */
1077 1078 if (conn_ioctl_cleanup_reqd)
1078 1079 conn_ioctl_cleanup(connp);
1079 1080
1080 1081 /*
1081 1082 * As CONN_CLOSING is set, no further ioctls should be passed down to
1082 1083 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1083 1084 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1084 1085 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1085 1086 * was still in flight at the time, we wait for it here. See comments
1086 1087 * for CONN_INC_IOCTLREF in ip.h for details.
1087 1088 */
1088 1089 mutex_enter(&connp->conn_lock);
1089 1090 while (connp->conn_ioctlref > 0)
1090 1091 cv_wait(&connp->conn_cv, &connp->conn_lock);
1091 1092 ASSERT(connp->conn_ioctlref == 0);
1092 1093 ASSERT(connp->conn_oper_pending_ill == NULL);
1093 1094 mutex_exit(&connp->conn_lock);
1094 1095
1095 1096 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_close_output, connp,
1096 1097 NULL, tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1097 1098
1098 1099 /*
1099 1100 * For non-STREAMS sockets, the normal case is that the conn makes
1100 1101 * an upcall when it's finally closed, so there is no need to wait
1101 1102 * in the protocol. But in case of SO_LINGER the thread sleeps here
1102 1103 * so it can properly deal with the thread being interrupted.
1103 1104 */
1104 1105 if (IPCL_IS_NONSTR(connp) && connp->conn_linger == 0)
1105 1106 goto nowait;
1106 1107
1107 1108 mutex_enter(&tcp->tcp_closelock);
1108 1109 while (!tcp->tcp_closed) {
1109 1110 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) {
1110 1111 /*
1111 1112 * The cv_wait_sig() was interrupted. We now do the
1112 1113 * following:
1113 1114 *
1114 1115 * 1) If the endpoint was lingering, we allow this
1115 1116 * to be interrupted by cancelling the linger timeout
1116 1117 * and closing normally.
1117 1118 *
1118 1119 * 2) Revert to calling cv_wait()
1119 1120 *
1120 1121 * We revert to using cv_wait() to avoid an
1121 1122 * infinite loop which can occur if the calling
1122 1123 * thread is higher priority than the squeue worker
1123 1124 * thread and is bound to the same cpu.
1124 1125 */
1125 1126 if (connp->conn_linger && connp->conn_lingertime > 0) {
1126 1127 mutex_exit(&tcp->tcp_closelock);
1127 1128 /* Entering squeue, bump ref count. */
1128 1129 CONN_INC_REF(connp);
1129 1130 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL);
1130 1131 SQUEUE_ENTER_ONE(connp->conn_sqp, bp,
1131 1132 tcp_linger_interrupted, connp, NULL,
1132 1133 tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
1133 1134 mutex_enter(&tcp->tcp_closelock);
1134 1135 }
1135 1136 break;
1136 1137 }
1137 1138 }
1138 1139 while (!tcp->tcp_closed)
1139 1140 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock);
1140 1141 mutex_exit(&tcp->tcp_closelock);
1141 1142
1142 1143 /*
1143 1144 * In the case of listener streams that have eagers in the q or q0
1144 1145 * we wait for the eagers to drop their reference to us. conn_rq and
1145 1146 * conn_wq of the eagers point to our queues. By waiting for the
1146 1147 * refcnt to drop to 1, we are sure that the eagers have cleaned
1147 1148 * up their queue pointers and also dropped their references to us.
1148 1149 *
1149 1150 * For non-STREAMS sockets we do not have to wait here; the
1150 1151 * listener will instead make a su_closed upcall when the last
1151 1152 * reference is dropped.
1152 1153 */
1153 1154 if (tcp->tcp_wait_for_eagers && !IPCL_IS_NONSTR(connp)) {
1154 1155 mutex_enter(&connp->conn_lock);
1155 1156 while (connp->conn_ref != 1) {
1156 1157 cv_wait(&connp->conn_cv, &connp->conn_lock);
1157 1158 }
1158 1159 mutex_exit(&connp->conn_lock);
1159 1160 }
1160 1161
1161 1162 nowait:
1162 1163 connp->conn_cpid = NOPID;
1163 1164 }
1164 1165
1165 1166 /*
1166 1167 * Called by tcp_close() routine via squeue when lingering is
1167 1168 * interrupted by a signal.
1168 1169 */
1169 1170
1170 1171 /* ARGSUSED */
1171 1172 static void
1172 1173 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1173 1174 {
1174 1175 conn_t *connp = (conn_t *)arg;
1175 1176 tcp_t *tcp = connp->conn_tcp;
1176 1177
1177 1178 freeb(mp);
1178 1179 if (tcp->tcp_linger_tid != 0 &&
1179 1180 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
1180 1181 tcp_stop_lingering(tcp);
1181 1182 tcp->tcp_client_errno = EINTR;
1182 1183 }
1183 1184 }
1184 1185
1185 1186 /*
1186 1187 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1187 1188 * Some stream heads get upset if they see these later on as anything but NULL.
1188 1189 */
1189 1190 void
1190 1191 tcp_close_mpp(mblk_t **mpp)
1191 1192 {
1192 1193 mblk_t *mp;
1193 1194
1194 1195 if ((mp = *mpp) != NULL) {
1195 1196 do {
1196 1197 mp->b_next = NULL;
1197 1198 mp->b_prev = NULL;
1198 1199 } while ((mp = mp->b_cont) != NULL);
1199 1200
1200 1201 mp = *mpp;
1201 1202 *mpp = NULL;
1202 1203 freemsg(mp);
1203 1204 }
1204 1205 }
1205 1206
1206 1207 /* Do detached close. */
1207 1208 void
1208 1209 tcp_close_detached(tcp_t *tcp)
1209 1210 {
1210 1211 if (tcp->tcp_fused)
1211 1212 tcp_unfuse(tcp);
1212 1213
1213 1214 /*
1214 1215 * Clustering code serializes TCP disconnect callbacks and
1215 1216 * cluster tcp list walks by blocking a TCP disconnect callback
1216 1217 * if a cluster tcp list walk is in progress. This ensures
1217 1218 * accurate accounting of TCPs in the cluster code even though
1218 1219 * the TCP list walk itself is not atomic.
1219 1220 */
1220 1221 tcp_closei_local(tcp);
1221 1222 CONN_DEC_REF(tcp->tcp_connp);
1222 1223 }
1223 1224
1224 1225 /*
1225 1226 * The tcp_t is going away. Remove it from all lists and set it
1226 1227 * to TCPS_CLOSED. The freeing up of memory is deferred until
1227 1228 * tcp_inactive. This is needed since a thread in tcp_rput might have
1228 1229 * done a CONN_INC_REF on this structure before it was removed from the
1229 1230 * hashes.
1230 1231 */
1231 1232 void
1232 1233 tcp_closei_local(tcp_t *tcp)
1233 1234 {
1234 1235 conn_t *connp = tcp->tcp_connp;
1235 1236 tcp_stack_t *tcps = tcp->tcp_tcps;
1236 1237 int32_t oldstate;
1237 1238
1238 1239 if (!TCP_IS_SOCKET(tcp))
1239 1240 tcp_acceptor_hash_remove(tcp);
1240 1241
1241 1242 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
1242 1243 tcp->tcp_ibsegs = 0;
1243 1244 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
1244 1245 tcp->tcp_obsegs = 0;
1245 1246
1246 1247 /*
1247 1248 * This can be called via tcp_time_wait_processing() if TCP gets a
1248 1249 * SYN with sequence number outside the TIME-WAIT connection's
1249 1250 * window. So we need to check for TIME-WAIT state here as the
1250 1251 * connection counter is already decremented. See SET_TIME_WAIT()
1251 1252 * macro
1252 1253 */
1253 1254 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
1254 1255 tcp->tcp_state < TCPS_TIME_WAIT) {
1255 1256 TCPS_CONN_DEC(tcps);
1256 1257 }
1257 1258
1258 1259 /*
1259 1260 * If we are an eager connection hanging off a listener that
1260 1261 * hasn't formally accepted the connection yet, get off his
1261 1262 * list and blow off any data that we have accumulated.
1262 1263 */
1263 1264 if (tcp->tcp_listener != NULL) {
1264 1265 tcp_t *listener = tcp->tcp_listener;
1265 1266 mutex_enter(&listener->tcp_eager_lock);
1266 1267 /*
1267 1268 * tcp_tconnind_started == B_TRUE means that the
1268 1269 * conn_ind has already gone to listener. At
1269 1270 * this point, eager will be closed but we
1270 1271 * leave it in listeners eager list so that
1271 1272 * if listener decides to close without doing
1272 1273 * accept, we can clean this up. In tcp_tli_accept
1273 1274 * we take care of the case of accept on closed
1274 1275 * eager.
1275 1276 */
1276 1277 if (!tcp->tcp_tconnind_started) {
1277 1278 tcp_eager_unlink(tcp);
1278 1279 mutex_exit(&listener->tcp_eager_lock);
1279 1280 /*
1280 1281 * We don't want to have any pointers to the
1281 1282 * listener queue, after we have released our
1282 1283 * reference on the listener
1283 1284 */
1284 1285 ASSERT(tcp->tcp_detached);
1285 1286 connp->conn_rq = NULL;
1286 1287 connp->conn_wq = NULL;
1287 1288 CONN_DEC_REF(listener->tcp_connp);
1288 1289 } else {
1289 1290 mutex_exit(&listener->tcp_eager_lock);
1290 1291 }
1291 1292 }
1292 1293
1293 1294 /* Stop all the timers */
1294 1295 tcp_timers_stop(tcp);
1295 1296
1296 1297 if (tcp->tcp_state == TCPS_LISTEN) {
1297 1298 if (tcp->tcp_ip_addr_cache) {
1298 1299 kmem_free((void *)tcp->tcp_ip_addr_cache,
1299 1300 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
1300 1301 tcp->tcp_ip_addr_cache = NULL;
1301 1302 }
1302 1303 }
1303 1304
1304 1305 /* Decrement listerner connection counter if necessary. */
1305 1306 if (tcp->tcp_listen_cnt != NULL)
1306 1307 TCP_DECR_LISTEN_CNT(tcp);
1307 1308
1308 1309 mutex_enter(&tcp->tcp_non_sq_lock);
1309 1310 if (tcp->tcp_flow_stopped)
1310 1311 tcp_clrqfull(tcp);
1311 1312 mutex_exit(&tcp->tcp_non_sq_lock);
1312 1313
1313 1314 tcp_bind_hash_remove(tcp);
1314 1315 /*
1315 1316 * If the tcp_time_wait_collector (which runs outside the squeue)
1316 1317 * is trying to remove this tcp from the time wait list, we will
1317 1318 * block in tcp_time_wait_remove while trying to acquire the
1318 1319 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1319 1320 * requires the ipcl_hash_remove to be ordered after the
1320 1321 * tcp_time_wait_remove for the refcnt checks to work correctly.
1321 1322 */
1322 1323 if (tcp->tcp_state == TCPS_TIME_WAIT)
1323 1324 (void) tcp_time_wait_remove(tcp, NULL);
1324 1325 CL_INET_DISCONNECT(connp);
1325 1326 ipcl_hash_remove(connp);
1326 1327 oldstate = tcp->tcp_state;
1327 1328 tcp->tcp_state = TCPS_CLOSED;
1328 1329 /* Need to probe before ixa_cleanup() is called */
1329 1330 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1330 1331 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
1331 1332 int32_t, oldstate);
1332 1333 ixa_cleanup(connp->conn_ixa);
1333 1334
1334 1335 /*
1335 1336 * Mark the conn as CONDEMNED
1336 1337 */
1337 1338 mutex_enter(&connp->conn_lock);
1338 1339 connp->conn_state_flags |= CONN_CONDEMNED;
1339 1340 mutex_exit(&connp->conn_lock);
1340 1341
1341 1342 ASSERT(tcp->tcp_time_wait_next == NULL);
1342 1343 ASSERT(tcp->tcp_time_wait_prev == NULL);
1343 1344 ASSERT(tcp->tcp_time_wait_expire == 0);
1344 1345
1345 1346 tcp_ipsec_cleanup(tcp);
1346 1347 }
1347 1348
1348 1349 /*
1349 1350 * tcp is dying (called from ipcl_conn_destroy and error cases).
1350 1351 * Free the tcp_t in either case.
1351 1352 */
1352 1353 void
1353 1354 tcp_free(tcp_t *tcp)
1354 1355 {
1355 1356 mblk_t *mp;
1356 1357 conn_t *connp = tcp->tcp_connp;
1357 1358
1358 1359 ASSERT(tcp != NULL);
1359 1360 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL);
1360 1361
1361 1362 connp->conn_rq = NULL;
1362 1363 connp->conn_wq = NULL;
1363 1364
1364 1365 tcp_close_mpp(&tcp->tcp_xmit_head);
1365 1366 tcp_close_mpp(&tcp->tcp_reass_head);
1366 1367 if (tcp->tcp_rcv_list != NULL) {
1367 1368 /* Free b_next chain */
1368 1369 tcp_close_mpp(&tcp->tcp_rcv_list);
1369 1370 }
1370 1371 if ((mp = tcp->tcp_urp_mp) != NULL) {
1371 1372 freemsg(mp);
1372 1373 }
1373 1374 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1374 1375 freemsg(mp);
1375 1376 }
1376 1377
1377 1378 if (tcp->tcp_fused_sigurg_mp != NULL) {
1378 1379 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1379 1380 freeb(tcp->tcp_fused_sigurg_mp);
1380 1381 tcp->tcp_fused_sigurg_mp = NULL;
1381 1382 }
1382 1383
1383 1384 if (tcp->tcp_ordrel_mp != NULL) {
1384 1385 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1385 1386 freeb(tcp->tcp_ordrel_mp);
1386 1387 tcp->tcp_ordrel_mp = NULL;
1387 1388 }
1388 1389
1389 1390 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
1390 1391 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
1391 1392
1392 1393 if (tcp->tcp_hopopts != NULL) {
1393 1394 mi_free(tcp->tcp_hopopts);
1394 1395 tcp->tcp_hopopts = NULL;
1395 1396 tcp->tcp_hopoptslen = 0;
1396 1397 }
1397 1398 ASSERT(tcp->tcp_hopoptslen == 0);
1398 1399 if (tcp->tcp_dstopts != NULL) {
1399 1400 mi_free(tcp->tcp_dstopts);
1400 1401 tcp->tcp_dstopts = NULL;
1401 1402 tcp->tcp_dstoptslen = 0;
1402 1403 }
1403 1404 ASSERT(tcp->tcp_dstoptslen == 0);
1404 1405 if (tcp->tcp_rthdrdstopts != NULL) {
1405 1406 mi_free(tcp->tcp_rthdrdstopts);
1406 1407 tcp->tcp_rthdrdstopts = NULL;
1407 1408 tcp->tcp_rthdrdstoptslen = 0;
1408 1409 }
1409 1410 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
1410 1411 if (tcp->tcp_rthdr != NULL) {
1411 1412 mi_free(tcp->tcp_rthdr);
1412 1413 tcp->tcp_rthdr = NULL;
1413 1414 tcp->tcp_rthdrlen = 0;
1414 1415 }
1415 1416 ASSERT(tcp->tcp_rthdrlen == 0);
1416 1417
1417 1418 /*
1418 1419 * Following is really a blowing away a union.
1419 1420 * It happens to have exactly two members of identical size
1420 1421 * the following code is enough.
1421 1422 */
1422 1423 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1423 1424
1424 1425 /*
1425 1426 * If this is a non-STREAM socket still holding on to an upper
1426 1427 * handle, release it. As a result of fallback we might also see
1427 1428 * STREAMS based conns with upper handles, in which case there is
1428 1429 * nothing to do other than clearing the field.
1429 1430 */
1430 1431 if (connp->conn_upper_handle != NULL) {
1431 1432 if (IPCL_IS_NONSTR(connp)) {
1432 1433 (*connp->conn_upcalls->su_closed)(
1433 1434 connp->conn_upper_handle);
1434 1435 tcp->tcp_detached = B_TRUE;
1435 1436 }
1436 1437 connp->conn_upper_handle = NULL;
1437 1438 connp->conn_upcalls = NULL;
1438 1439 }
1439 1440 }
1440 1441
1441 1442 /*
1442 1443 * tcp_get_conn/tcp_free_conn
1443 1444 *
1444 1445 * tcp_get_conn is used to get a clean tcp connection structure.
1445 1446 * It tries to reuse the connections put on the freelist by the
1446 1447 * time_wait_collector failing which it goes to kmem_cache. This
1447 1448 * way has two benefits compared to just allocating from and
1448 1449 * freeing to kmem_cache.
1449 1450 * 1) The time_wait_collector can free (which includes the cleanup)
1450 1451 * outside the squeue. So when the interrupt comes, we have a clean
1451 1452 * connection sitting in the freelist. Obviously, this buys us
1452 1453 * performance.
1453 1454 *
1454 1455 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1455 1456 * has multiple disadvantages - tying up the squeue during alloc.
1456 1457 * But allocating the conn/tcp in IP land is also not the best since
1457 1458 * we can't check the 'q' and 'q0' which are protected by squeue and
1458 1459 * blindly allocate memory which might have to be freed here if we are
1459 1460 * not allowed to accept the connection. By using the freelist and
1460 1461 * putting the conn/tcp back in freelist, we don't pay a penalty for
1461 1462 * allocating memory without checking 'q/q0' and freeing it if we can't
1462 1463 * accept the connection.
1463 1464 *
1464 1465 * Care should be taken to put the conn back in the same squeue's freelist
1465 1466 * from which it was allocated. Best results are obtained if conn is
1466 1467 * allocated from listener's squeue and freed to the same. Time wait
1467 1468 * collector will free up the freelist is the connection ends up sitting
1468 1469 * there for too long.
1469 1470 */
1470 1471 void *
1471 1472 tcp_get_conn(void *arg, tcp_stack_t *tcps)
1472 1473 {
1473 1474 tcp_t *tcp = NULL;
1474 1475 conn_t *connp = NULL;
1475 1476 squeue_t *sqp = (squeue_t *)arg;
1476 1477 tcp_squeue_priv_t *tcp_time_wait;
1477 1478 netstack_t *ns;
1478 1479 mblk_t *tcp_rsrv_mp = NULL;
1479 1480
1480 1481 tcp_time_wait =
1481 1482 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP));
1482 1483
1483 1484 mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1484 1485 tcp = tcp_time_wait->tcp_free_list;
1485 1486 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0));
1486 1487 if (tcp != NULL) {
1487 1488 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next;
1488 1489 tcp_time_wait->tcp_free_list_cnt--;
1489 1490 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1490 1491 tcp->tcp_time_wait_next = NULL;
1491 1492 connp = tcp->tcp_connp;
1492 1493 connp->conn_flags |= IPCL_REUSED;
1493 1494
1494 1495 ASSERT(tcp->tcp_tcps == NULL);
1495 1496 ASSERT(connp->conn_netstack == NULL);
1496 1497 ASSERT(tcp->tcp_rsrv_mp != NULL);
1497 1498 ns = tcps->tcps_netstack;
1498 1499 netstack_hold(ns);
1499 1500 connp->conn_netstack = ns;
1500 1501 connp->conn_ixa->ixa_ipst = ns->netstack_ip;
1501 1502 tcp->tcp_tcps = tcps;
1502 1503 ipcl_globalhash_insert(connp);
1503 1504
1504 1505 connp->conn_ixa->ixa_notify_cookie = tcp;
1505 1506 ASSERT(connp->conn_ixa->ixa_notify == tcp_notify);
1506 1507 connp->conn_recv = tcp_input_data;
1507 1508 ASSERT(connp->conn_recvicmp == tcp_icmp_input);
1508 1509 ASSERT(connp->conn_verifyicmp == tcp_verifyicmp);
1509 1510 return ((void *)connp);
1510 1511 }
1511 1512 mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1512 1513 /*
1513 1514 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1514 1515 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1515 1516 */
1516 1517 tcp_rsrv_mp = allocb(0, BPRI_HI);
1517 1518 if (tcp_rsrv_mp == NULL)
1518 1519 return (NULL);
1519 1520
1520 1521 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP,
1521 1522 tcps->tcps_netstack)) == NULL) {
1522 1523 freeb(tcp_rsrv_mp);
1523 1524 return (NULL);
1524 1525 }
1525 1526
1526 1527 tcp = connp->conn_tcp;
1527 1528 tcp->tcp_rsrv_mp = tcp_rsrv_mp;
1528 1529 mutex_init(&tcp->tcp_rsrv_mp_lock, NULL, MUTEX_DEFAULT, NULL);
1529 1530
1530 1531 tcp->tcp_tcps = tcps;
1531 1532
1532 1533 connp->conn_recv = tcp_input_data;
1533 1534 connp->conn_recvicmp = tcp_icmp_input;
1534 1535 connp->conn_verifyicmp = tcp_verifyicmp;
1535 1536
1536 1537 /*
1537 1538 * Register tcp_notify to listen to capability changes detected by IP.
1538 1539 * This upcall is made in the context of the call to conn_ip_output
1539 1540 * thus it is inside the squeue.
1540 1541 */
1541 1542 connp->conn_ixa->ixa_notify = tcp_notify;
1542 1543 connp->conn_ixa->ixa_notify_cookie = tcp;
1543 1544
1544 1545 return ((void *)connp);
1545 1546 }
1546 1547
1547 1548 /*
1548 1549 * Handle connect to IPv4 destinations, including connections for AF_INET6
1549 1550 * sockets connecting to IPv4 mapped IPv6 destinations.
1550 1551 * Returns zero if OK, a positive errno, or a negative TLI error.
1551 1552 */
1552 1553 static int
1553 1554 tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp, in_port_t dstport,
1554 1555 uint_t srcid)
1555 1556 {
1556 1557 ipaddr_t dstaddr = *dstaddrp;
1557 1558 uint16_t lport;
1558 1559 conn_t *connp = tcp->tcp_connp;
1559 1560 tcp_stack_t *tcps = tcp->tcp_tcps;
1560 1561 int error;
1561 1562
1562 1563 ASSERT(connp->conn_ipversion == IPV4_VERSION);
1563 1564
1564 1565 /* Check for attempt to connect to INADDR_ANY */
1565 1566 if (dstaddr == INADDR_ANY) {
1566 1567 /*
1567 1568 * SunOS 4.x and 4.3 BSD allow an application
1568 1569 * to connect a TCP socket to INADDR_ANY.
1569 1570 * When they do this, the kernel picks the
1570 1571 * address of one interface and uses it
1571 1572 * instead. The kernel usually ends up
1572 1573 * picking the address of the loopback
1573 1574 * interface. This is an undocumented feature.
1574 1575 * However, we provide the same thing here
1575 1576 * in order to have source and binary
|
↓ open down ↓ |
1539 lines elided |
↑ open up ↑ |
1576 1577 * compatibility with SunOS 4.x.
1577 1578 * Update the T_CONN_REQ (sin/sin6) since it is used to
1578 1579 * generate the T_CONN_CON.
1579 1580 */
1580 1581 dstaddr = htonl(INADDR_LOOPBACK);
1581 1582 *dstaddrp = dstaddr;
1582 1583 }
1583 1584
1584 1585 /* Handle __sin6_src_id if socket not bound to an IP address */
1585 1586 if (srcid != 0 && connp->conn_laddr_v4 == INADDR_ANY) {
1586 - ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1587 - IPCL_ZONEID(connp), tcps->tcps_netstack);
1587 + if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1588 + IPCL_ZONEID(connp), B_TRUE, tcps->tcps_netstack)) {
1589 + /* Mismatch - conn_laddr_v6 would be v6 address. */
1590 + return (EADDRNOTAVAIL);
1591 + }
1588 1592 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1589 1593 }
1590 1594
1591 1595 IN6_IPADDR_TO_V4MAPPED(dstaddr, &connp->conn_faddr_v6);
1592 1596 connp->conn_fport = dstport;
1593 1597
1594 1598 /*
1595 1599 * At this point the remote destination address and remote port fields
1596 1600 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1597 1601 * have to see which state tcp was in so we can take appropriate action.
1598 1602 */
1599 1603 if (tcp->tcp_state == TCPS_IDLE) {
1600 1604 /*
1601 1605 * We support a quick connect capability here, allowing
1602 1606 * clients to transition directly from IDLE to SYN_SENT
1603 1607 * tcp_bindi will pick an unused port, insert the connection
1604 1608 * in the bind hash and transition to BOUND state.
1605 1609 */
1606 1610 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1607 1611 tcp, B_TRUE);
1608 1612 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1609 1613 B_FALSE, B_FALSE);
1610 1614 if (lport == 0)
1611 1615 return (-TNOADDR);
1612 1616 }
1613 1617
1614 1618 /*
1615 1619 * Lookup the route to determine a source address and the uinfo.
1616 1620 * Setup TCP parameters based on the metrics/DCE.
1617 1621 */
1618 1622 error = tcp_set_destination(tcp);
1619 1623 if (error != 0)
1620 1624 return (error);
1621 1625
1622 1626 /*
1623 1627 * Don't let an endpoint connect to itself.
1624 1628 */
1625 1629 if (connp->conn_faddr_v4 == connp->conn_laddr_v4 &&
1626 1630 connp->conn_fport == connp->conn_lport)
1627 1631 return (-TBADADDR);
1628 1632
1629 1633 tcp->tcp_state = TCPS_SYN_SENT;
1630 1634
1631 1635 return (ipcl_conn_insert_v4(connp));
1632 1636 }
1633 1637
1634 1638 /*
1635 1639 * Handle connect to IPv6 destinations.
1636 1640 * Returns zero if OK, a positive errno, or a negative TLI error.
1637 1641 */
1638 1642 static int
1639 1643 tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp, in_port_t dstport,
1640 1644 uint32_t flowinfo, uint_t srcid, uint32_t scope_id)
1641 1645 {
1642 1646 uint16_t lport;
1643 1647 conn_t *connp = tcp->tcp_connp;
1644 1648 tcp_stack_t *tcps = tcp->tcp_tcps;
1645 1649 int error;
1646 1650
1647 1651 ASSERT(connp->conn_family == AF_INET6);
1648 1652
1649 1653 /*
1650 1654 * If we're here, it means that the destination address is a native
1651 1655 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1652 1656 * reason why it might not be IPv6 is if the socket was bound to an
1653 1657 * IPv4-mapped IPv6 address.
1654 1658 */
1655 1659 if (connp->conn_ipversion != IPV6_VERSION)
1656 1660 return (-TBADADDR);
1657 1661
|
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
1658 1662 /*
1659 1663 * Interpret a zero destination to mean loopback.
1660 1664 * Update the T_CONN_REQ (sin/sin6) since it is used to
1661 1665 * generate the T_CONN_CON.
1662 1666 */
1663 1667 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp))
1664 1668 *dstaddrp = ipv6_loopback;
1665 1669
1666 1670 /* Handle __sin6_src_id if socket not bound to an IP address */
1667 1671 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6)) {
1668 - ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1669 - IPCL_ZONEID(connp), tcps->tcps_netstack);
1672 + if (!ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
1673 + IPCL_ZONEID(connp), B_FALSE, tcps->tcps_netstack)) {
1674 + /* Mismatch - conn_laddr_v6 would be v4-mapped. */
1675 + return (EADDRNOTAVAIL);
1676 + }
1670 1677 connp->conn_saddr_v6 = connp->conn_laddr_v6;
1671 1678 }
1672 1679
1673 1680 /*
1674 1681 * Take care of the scope_id now.
1675 1682 */
1676 1683 if (scope_id != 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp)) {
1677 1684 connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
1678 1685 connp->conn_ixa->ixa_scopeid = scope_id;
1679 1686 } else {
1680 1687 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
1681 1688 }
1682 1689
1683 1690 connp->conn_flowinfo = flowinfo;
1684 1691 connp->conn_faddr_v6 = *dstaddrp;
1685 1692 connp->conn_fport = dstport;
1686 1693
1687 1694 /*
1688 1695 * At this point the remote destination address and remote port fields
1689 1696 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1690 1697 * have to see which state tcp was in so we can take appropriate action.
1691 1698 */
1692 1699 if (tcp->tcp_state == TCPS_IDLE) {
1693 1700 /*
1694 1701 * We support a quick connect capability here, allowing
1695 1702 * clients to transition directly from IDLE to SYN_SENT
1696 1703 * tcp_bindi will pick an unused port, insert the connection
1697 1704 * in the bind hash and transition to BOUND state.
1698 1705 */
1699 1706 lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
1700 1707 tcp, B_TRUE);
1701 1708 lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
1702 1709 B_FALSE, B_FALSE);
1703 1710 if (lport == 0)
1704 1711 return (-TNOADDR);
1705 1712 }
1706 1713
1707 1714 /*
1708 1715 * Lookup the route to determine a source address and the uinfo.
1709 1716 * Setup TCP parameters based on the metrics/DCE.
1710 1717 */
1711 1718 error = tcp_set_destination(tcp);
1712 1719 if (error != 0)
1713 1720 return (error);
1714 1721
1715 1722 /*
1716 1723 * Don't let an endpoint connect to itself.
1717 1724 */
1718 1725 if (IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6, &connp->conn_laddr_v6) &&
1719 1726 connp->conn_fport == connp->conn_lport)
1720 1727 return (-TBADADDR);
1721 1728
1722 1729 tcp->tcp_state = TCPS_SYN_SENT;
1723 1730
1724 1731 return (ipcl_conn_insert_v6(connp));
1725 1732 }
1726 1733
1727 1734 /*
1728 1735 * Disconnect
1729 1736 * Note that unlike other functions this returns a positive tli error
1730 1737 * when it fails; it never returns an errno.
1731 1738 */
1732 1739 static int
1733 1740 tcp_disconnect_common(tcp_t *tcp, t_scalar_t seqnum)
1734 1741 {
1735 1742 conn_t *lconnp;
1736 1743 tcp_stack_t *tcps = tcp->tcp_tcps;
1737 1744 conn_t *connp = tcp->tcp_connp;
1738 1745
1739 1746 /*
1740 1747 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1741 1748 * when the stream is in BOUND state. Do not send a reset,
1742 1749 * since the destination IP address is not valid, and it can
1743 1750 * be the initialized value of all zeros (broadcast address).
1744 1751 */
1745 1752 if (tcp->tcp_state <= TCPS_BOUND) {
1746 1753 if (connp->conn_debug) {
1747 1754 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1748 1755 "tcp_disconnect: bad state, %d", tcp->tcp_state);
1749 1756 }
1750 1757 return (TOUTSTATE);
1751 1758 } else if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1752 1759 TCPS_CONN_DEC(tcps);
1753 1760 }
1754 1761
1755 1762 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) {
1756 1763
1757 1764 /*
1758 1765 * According to TPI, for non-listeners, ignore seqnum
1759 1766 * and disconnect.
1760 1767 * Following interpretation of -1 seqnum is historical
1761 1768 * and implied TPI ? (TPI only states that for T_CONN_IND,
1762 1769 * a valid seqnum should not be -1).
1763 1770 *
1764 1771 * -1 means disconnect everything
1765 1772 * regardless even on a listener.
1766 1773 */
1767 1774
1768 1775 int old_state = tcp->tcp_state;
1769 1776 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
1770 1777
1771 1778 /*
1772 1779 * The connection can't be on the tcp_time_wait_head list
1773 1780 * since it is not detached.
1774 1781 */
1775 1782 ASSERT(tcp->tcp_time_wait_next == NULL);
1776 1783 ASSERT(tcp->tcp_time_wait_prev == NULL);
1777 1784 ASSERT(tcp->tcp_time_wait_expire == 0);
1778 1785 /*
1779 1786 * If it used to be a listener, check to make sure no one else
1780 1787 * has taken the port before switching back to LISTEN state.
1781 1788 */
1782 1789 if (connp->conn_ipversion == IPV4_VERSION) {
1783 1790 lconnp = ipcl_lookup_listener_v4(connp->conn_lport,
1784 1791 connp->conn_laddr_v4, IPCL_ZONEID(connp), ipst);
1785 1792 } else {
1786 1793 uint_t ifindex = 0;
1787 1794
1788 1795 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)
1789 1796 ifindex = connp->conn_ixa->ixa_scopeid;
1790 1797
1791 1798 /* Allow conn_bound_if listeners? */
1792 1799 lconnp = ipcl_lookup_listener_v6(connp->conn_lport,
1793 1800 &connp->conn_laddr_v6, ifindex, IPCL_ZONEID(connp),
1794 1801 ipst);
1795 1802 }
1796 1803 if (tcp->tcp_conn_req_max && lconnp == NULL) {
1797 1804 tcp->tcp_state = TCPS_LISTEN;
1798 1805 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1799 1806 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1800 1807 NULL, int32_t, old_state);
1801 1808 } else if (old_state > TCPS_BOUND) {
1802 1809 tcp->tcp_conn_req_max = 0;
1803 1810 tcp->tcp_state = TCPS_BOUND;
1804 1811 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1805 1812 connp->conn_ixa, void, NULL, tcp_t *, tcp, void,
1806 1813 NULL, int32_t, old_state);
1807 1814
1808 1815 /*
1809 1816 * If this end point is not going to become a listener,
1810 1817 * decrement the listener connection count if
1811 1818 * necessary. Note that we do not do this if it is
1812 1819 * going to be a listner (the above if case) since
1813 1820 * then it may remove the counter struct.
1814 1821 */
1815 1822 if (tcp->tcp_listen_cnt != NULL)
1816 1823 TCP_DECR_LISTEN_CNT(tcp);
1817 1824 }
1818 1825 if (lconnp != NULL)
1819 1826 CONN_DEC_REF(lconnp);
1820 1827 switch (old_state) {
1821 1828 case TCPS_SYN_SENT:
1822 1829 case TCPS_SYN_RCVD:
1823 1830 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1824 1831 break;
1825 1832 case TCPS_ESTABLISHED:
1826 1833 case TCPS_CLOSE_WAIT:
1827 1834 TCPS_BUMP_MIB(tcps, tcpEstabResets);
1828 1835 break;
1829 1836 }
1830 1837
1831 1838 if (tcp->tcp_fused)
1832 1839 tcp_unfuse(tcp);
1833 1840
1834 1841 mutex_enter(&tcp->tcp_eager_lock);
1835 1842 if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
1836 1843 (tcp->tcp_conn_req_cnt_q != 0)) {
1837 1844 tcp_eager_cleanup(tcp, 0);
1838 1845 }
1839 1846 mutex_exit(&tcp->tcp_eager_lock);
1840 1847
1841 1848 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt,
1842 1849 tcp->tcp_rnxt, TH_RST | TH_ACK);
1843 1850
1844 1851 tcp_reinit(tcp);
1845 1852
1846 1853 return (0);
1847 1854 } else if (!tcp_eager_blowoff(tcp, seqnum)) {
1848 1855 return (TBADSEQ);
1849 1856 }
1850 1857 return (0);
1851 1858 }
1852 1859
1853 1860 /*
1854 1861 * Our client hereby directs us to reject the connection request
1855 1862 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1856 1863 * of sending the appropriate RST, not an ICMP error.
1857 1864 */
1858 1865 void
1859 1866 tcp_disconnect(tcp_t *tcp, mblk_t *mp)
1860 1867 {
1861 1868 t_scalar_t seqnum;
1862 1869 int error;
1863 1870 conn_t *connp = tcp->tcp_connp;
1864 1871
1865 1872 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1866 1873 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) {
1867 1874 tcp_err_ack(tcp, mp, TPROTO, 0);
1868 1875 return;
1869 1876 }
1870 1877 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number;
1871 1878 error = tcp_disconnect_common(tcp, seqnum);
1872 1879 if (error != 0)
1873 1880 tcp_err_ack(tcp, mp, error, 0);
1874 1881 else {
1875 1882 if (tcp->tcp_state >= TCPS_ESTABLISHED) {
1876 1883 /* Send M_FLUSH according to TPI */
1877 1884 (void) putnextctl1(connp->conn_rq, M_FLUSH, FLUSHRW);
1878 1885 }
1879 1886 mp = mi_tpi_ok_ack_alloc(mp);
1880 1887 if (mp != NULL)
1881 1888 putnext(connp->conn_rq, mp);
1882 1889 }
1883 1890 }
1884 1891
1885 1892 /*
1886 1893 * Handle reinitialization of a tcp structure.
1887 1894 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1888 1895 */
1889 1896 static void
1890 1897 tcp_reinit(tcp_t *tcp)
1891 1898 {
1892 1899 mblk_t *mp;
1893 1900 tcp_stack_t *tcps = tcp->tcp_tcps;
1894 1901 conn_t *connp = tcp->tcp_connp;
1895 1902 int32_t oldstate;
1896 1903
1897 1904 /* tcp_reinit should never be called for detached tcp_t's */
1898 1905 ASSERT(tcp->tcp_listener == NULL);
1899 1906 ASSERT((connp->conn_family == AF_INET &&
1900 1907 connp->conn_ipversion == IPV4_VERSION) ||
1901 1908 (connp->conn_family == AF_INET6 &&
1902 1909 (connp->conn_ipversion == IPV4_VERSION ||
1903 1910 connp->conn_ipversion == IPV6_VERSION)));
1904 1911
1905 1912 /* Cancel outstanding timers */
1906 1913 tcp_timers_stop(tcp);
1907 1914
1908 1915 /*
1909 1916 * Reset everything in the state vector, after updating global
1910 1917 * MIB data from instance counters.
1911 1918 */
1912 1919 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
1913 1920 tcp->tcp_ibsegs = 0;
1914 1921 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
1915 1922 tcp->tcp_obsegs = 0;
1916 1923
1917 1924 tcp_close_mpp(&tcp->tcp_xmit_head);
1918 1925 if (tcp->tcp_snd_zcopy_aware)
1919 1926 tcp_zcopy_notify(tcp);
1920 1927 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL;
1921 1928 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0;
1922 1929 mutex_enter(&tcp->tcp_non_sq_lock);
1923 1930 if (tcp->tcp_flow_stopped &&
1924 1931 TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1925 1932 tcp_clrqfull(tcp);
1926 1933 }
1927 1934 mutex_exit(&tcp->tcp_non_sq_lock);
1928 1935 tcp_close_mpp(&tcp->tcp_reass_head);
1929 1936 tcp->tcp_reass_tail = NULL;
1930 1937 if (tcp->tcp_rcv_list != NULL) {
1931 1938 /* Free b_next chain */
1932 1939 tcp_close_mpp(&tcp->tcp_rcv_list);
1933 1940 tcp->tcp_rcv_last_head = NULL;
1934 1941 tcp->tcp_rcv_last_tail = NULL;
1935 1942 tcp->tcp_rcv_cnt = 0;
1936 1943 }
1937 1944 tcp->tcp_rcv_last_tail = NULL;
1938 1945
1939 1946 if ((mp = tcp->tcp_urp_mp) != NULL) {
1940 1947 freemsg(mp);
1941 1948 tcp->tcp_urp_mp = NULL;
1942 1949 }
1943 1950 if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
1944 1951 freemsg(mp);
1945 1952 tcp->tcp_urp_mark_mp = NULL;
1946 1953 }
1947 1954 if (tcp->tcp_fused_sigurg_mp != NULL) {
1948 1955 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1949 1956 freeb(tcp->tcp_fused_sigurg_mp);
1950 1957 tcp->tcp_fused_sigurg_mp = NULL;
1951 1958 }
1952 1959 if (tcp->tcp_ordrel_mp != NULL) {
1953 1960 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1954 1961 freeb(tcp->tcp_ordrel_mp);
1955 1962 tcp->tcp_ordrel_mp = NULL;
1956 1963 }
1957 1964
1958 1965 /*
1959 1966 * Following is a union with two members which are
1960 1967 * identical types and size so the following cleanup
1961 1968 * is enough.
1962 1969 */
1963 1970 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
1964 1971
1965 1972 CL_INET_DISCONNECT(connp);
1966 1973
1967 1974 /*
1968 1975 * The connection can't be on the tcp_time_wait_head list
1969 1976 * since it is not detached.
1970 1977 */
1971 1978 ASSERT(tcp->tcp_time_wait_next == NULL);
1972 1979 ASSERT(tcp->tcp_time_wait_prev == NULL);
1973 1980 ASSERT(tcp->tcp_time_wait_expire == 0);
1974 1981
1975 1982 /*
1976 1983 * Reset/preserve other values
1977 1984 */
1978 1985 tcp_reinit_values(tcp);
1979 1986 ipcl_hash_remove(connp);
1980 1987 /* Note that ixa_cred gets cleared in ixa_cleanup */
1981 1988 ixa_cleanup(connp->conn_ixa);
1982 1989 tcp_ipsec_cleanup(tcp);
1983 1990
1984 1991 connp->conn_laddr_v6 = connp->conn_bound_addr_v6;
1985 1992 connp->conn_saddr_v6 = connp->conn_bound_addr_v6;
1986 1993 oldstate = tcp->tcp_state;
1987 1994
1988 1995 if (tcp->tcp_conn_req_max != 0) {
1989 1996 /*
1990 1997 * This is the case when a TLI program uses the same
1991 1998 * transport end point to accept a connection. This
1992 1999 * makes the TCP both a listener and acceptor. When
1993 2000 * this connection is closed, we need to set the state
1994 2001 * back to TCPS_LISTEN. Make sure that the eager list
1995 2002 * is reinitialized.
1996 2003 *
1997 2004 * Note that this stream is still bound to the four
1998 2005 * tuples of the previous connection in IP. If a new
1999 2006 * SYN with different foreign address comes in, IP will
2000 2007 * not find it and will send it to the global queue. In
2001 2008 * the global queue, TCP will do a tcp_lookup_listener()
2002 2009 * to find this stream. This works because this stream
2003 2010 * is only removed from connected hash.
2004 2011 *
2005 2012 */
2006 2013 tcp->tcp_state = TCPS_LISTEN;
2007 2014 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
2008 2015 tcp->tcp_eager_next_drop_q0 = tcp;
2009 2016 tcp->tcp_eager_prev_drop_q0 = tcp;
2010 2017 /*
2011 2018 * Initially set conn_recv to tcp_input_listener_unbound to try
2012 2019 * to pick a good squeue for the listener when the first SYN
2013 2020 * arrives. tcp_input_listener_unbound sets it to
2014 2021 * tcp_input_listener on that first SYN.
2015 2022 */
2016 2023 connp->conn_recv = tcp_input_listener_unbound;
2017 2024
2018 2025 connp->conn_proto = IPPROTO_TCP;
2019 2026 connp->conn_faddr_v6 = ipv6_all_zeros;
2020 2027 connp->conn_fport = 0;
2021 2028
2022 2029 (void) ipcl_bind_insert(connp);
2023 2030 } else {
2024 2031 tcp->tcp_state = TCPS_BOUND;
2025 2032 }
2026 2033
2027 2034 /*
2028 2035 * Initialize to default values
2029 2036 */
2030 2037 tcp_init_values(tcp, NULL);
2031 2038
2032 2039 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
2033 2040 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
2034 2041 int32_t, oldstate);
2035 2042
2036 2043 ASSERT(tcp->tcp_ptpbhn != NULL);
2037 2044 tcp->tcp_rwnd = connp->conn_rcvbuf;
2038 2045 tcp->tcp_mss = connp->conn_ipversion != IPV4_VERSION ?
2039 2046 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4;
2040 2047 }
2041 2048
2042 2049 /*
2043 2050 * Force values to zero that need be zero.
2044 2051 * Do not touch values asociated with the BOUND or LISTEN state
2045 2052 * since the connection will end up in that state after the reinit.
2046 2053 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2047 2054 * structure!
2048 2055 */
2049 2056 static void
2050 2057 tcp_reinit_values(tcp)
2051 2058 tcp_t *tcp;
2052 2059 {
2053 2060 tcp_stack_t *tcps = tcp->tcp_tcps;
2054 2061 conn_t *connp = tcp->tcp_connp;
2055 2062
2056 2063 #ifndef lint
2057 2064 #define DONTCARE(x)
2058 2065 #define PRESERVE(x)
2059 2066 #else
2060 2067 #define DONTCARE(x) ((x) = (x))
2061 2068 #define PRESERVE(x) ((x) = (x))
2062 2069 #endif /* lint */
2063 2070
2064 2071 PRESERVE(tcp->tcp_bind_hash_port);
2065 2072 PRESERVE(tcp->tcp_bind_hash);
2066 2073 PRESERVE(tcp->tcp_ptpbhn);
2067 2074 PRESERVE(tcp->tcp_acceptor_hash);
2068 2075 PRESERVE(tcp->tcp_ptpahn);
2069 2076
2070 2077 /* Should be ASSERT NULL on these with new code! */
2071 2078 ASSERT(tcp->tcp_time_wait_next == NULL);
2072 2079 ASSERT(tcp->tcp_time_wait_prev == NULL);
2073 2080 ASSERT(tcp->tcp_time_wait_expire == 0);
2074 2081 PRESERVE(tcp->tcp_state);
2075 2082 PRESERVE(connp->conn_rq);
2076 2083 PRESERVE(connp->conn_wq);
2077 2084
2078 2085 ASSERT(tcp->tcp_xmit_head == NULL);
2079 2086 ASSERT(tcp->tcp_xmit_last == NULL);
2080 2087 ASSERT(tcp->tcp_unsent == 0);
2081 2088 ASSERT(tcp->tcp_xmit_tail == NULL);
2082 2089 ASSERT(tcp->tcp_xmit_tail_unsent == 0);
2083 2090
2084 2091 tcp->tcp_snxt = 0; /* Displayed in mib */
2085 2092 tcp->tcp_suna = 0; /* Displayed in mib */
2086 2093 tcp->tcp_swnd = 0;
2087 2094 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_process_options */
2088 2095
2089 2096 ASSERT(tcp->tcp_ibsegs == 0);
2090 2097 ASSERT(tcp->tcp_obsegs == 0);
2091 2098
2092 2099 if (connp->conn_ht_iphc != NULL) {
2093 2100 kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
2094 2101 connp->conn_ht_iphc = NULL;
2095 2102 connp->conn_ht_iphc_allocated = 0;
2096 2103 connp->conn_ht_iphc_len = 0;
2097 2104 connp->conn_ht_ulp = NULL;
2098 2105 connp->conn_ht_ulp_len = 0;
2099 2106 tcp->tcp_ipha = NULL;
2100 2107 tcp->tcp_ip6h = NULL;
2101 2108 tcp->tcp_tcpha = NULL;
2102 2109 }
2103 2110
2104 2111 /* We clear any IP_OPTIONS and extension headers */
2105 2112 ip_pkt_free(&connp->conn_xmit_ipp);
2106 2113
2107 2114 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */
2108 2115 DONTCARE(tcp->tcp_ipha);
2109 2116 DONTCARE(tcp->tcp_ip6h);
2110 2117 DONTCARE(tcp->tcp_tcpha);
2111 2118 tcp->tcp_valid_bits = 0;
2112 2119
2113 2120 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */
2114 2121 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */
2115 2122 tcp->tcp_last_rcv_lbolt = 0;
2116 2123
2117 2124 tcp->tcp_init_cwnd = 0;
2118 2125
2119 2126 tcp->tcp_urp_last_valid = 0;
2120 2127 tcp->tcp_hard_binding = 0;
2121 2128
2122 2129 tcp->tcp_fin_acked = 0;
2123 2130 tcp->tcp_fin_rcvd = 0;
2124 2131 tcp->tcp_fin_sent = 0;
2125 2132 tcp->tcp_ordrel_done = 0;
2126 2133
2127 2134 tcp->tcp_detached = 0;
2128 2135
2129 2136 tcp->tcp_snd_ws_ok = B_FALSE;
2130 2137 tcp->tcp_snd_ts_ok = B_FALSE;
2131 2138 tcp->tcp_zero_win_probe = 0;
2132 2139
2133 2140 tcp->tcp_loopback = 0;
2134 2141 tcp->tcp_localnet = 0;
2135 2142 tcp->tcp_syn_defense = 0;
2136 2143 tcp->tcp_set_timer = 0;
2137 2144
2138 2145 tcp->tcp_active_open = 0;
2139 2146 tcp->tcp_rexmit = B_FALSE;
2140 2147 tcp->tcp_xmit_zc_clean = B_FALSE;
2141 2148
2142 2149 tcp->tcp_snd_sack_ok = B_FALSE;
2143 2150 tcp->tcp_hwcksum = B_FALSE;
2144 2151
2145 2152 DONTCARE(tcp->tcp_maxpsz_multiplier); /* Init in tcp_init_values */
2146 2153
2147 2154 tcp->tcp_conn_def_q0 = 0;
2148 2155 tcp->tcp_ip_forward_progress = B_FALSE;
2149 2156 tcp->tcp_ecn_ok = B_FALSE;
2150 2157
2151 2158 tcp->tcp_cwr = B_FALSE;
2152 2159 tcp->tcp_ecn_echo_on = B_FALSE;
2153 2160 tcp->tcp_is_wnd_shrnk = B_FALSE;
2154 2161
2155 2162 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
2156 2163 bzero(&tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
2157 2164
2158 2165 tcp->tcp_rcv_ws = 0;
2159 2166 tcp->tcp_snd_ws = 0;
2160 2167 tcp->tcp_ts_recent = 0;
2161 2168 tcp->tcp_rnxt = 0; /* Displayed in mib */
2162 2169 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */
2163 2170 tcp->tcp_initial_pmtu = 0;
2164 2171
2165 2172 ASSERT(tcp->tcp_reass_head == NULL);
2166 2173 ASSERT(tcp->tcp_reass_tail == NULL);
2167 2174
2168 2175 tcp->tcp_cwnd_cnt = 0;
2169 2176
2170 2177 ASSERT(tcp->tcp_rcv_list == NULL);
2171 2178 ASSERT(tcp->tcp_rcv_last_head == NULL);
2172 2179 ASSERT(tcp->tcp_rcv_last_tail == NULL);
2173 2180 ASSERT(tcp->tcp_rcv_cnt == 0);
2174 2181
2175 2182 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_set_destination */
2176 2183 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */
2177 2184 tcp->tcp_csuna = 0;
2178 2185
2179 2186 tcp->tcp_rto = 0; /* Displayed in MIB */
2180 2187 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */
2181 2188 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */
2182 2189 tcp->tcp_rtt_update = 0;
2183 2190
2184 2191 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2185 2192 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2186 2193
2187 2194 tcp->tcp_rack = 0; /* Displayed in mib */
2188 2195 tcp->tcp_rack_cnt = 0;
2189 2196 tcp->tcp_rack_cur_max = 0;
2190 2197 tcp->tcp_rack_abs_max = 0;
2191 2198
2192 2199 tcp->tcp_max_swnd = 0;
2193 2200
2194 2201 ASSERT(tcp->tcp_listener == NULL);
2195 2202
2196 2203 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */
2197 2204 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */
2198 2205 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */
2199 2206 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */
2200 2207
2201 2208 ASSERT(tcp->tcp_conn_req_cnt_q == 0);
2202 2209 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0);
2203 2210 PRESERVE(tcp->tcp_conn_req_max);
2204 2211 PRESERVE(tcp->tcp_conn_req_seqnum);
2205 2212
2206 2213 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */
2207 2214 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */
2208 2215 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */
2209 2216 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */
2210 2217
2211 2218 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */
2212 2219 ASSERT(tcp->tcp_urp_mp == NULL);
2213 2220 ASSERT(tcp->tcp_urp_mark_mp == NULL);
2214 2221 ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
2215 2222
2216 2223 ASSERT(tcp->tcp_eager_next_q == NULL);
2217 2224 ASSERT(tcp->tcp_eager_last_q == NULL);
2218 2225 ASSERT((tcp->tcp_eager_next_q0 == NULL &&
2219 2226 tcp->tcp_eager_prev_q0 == NULL) ||
2220 2227 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0);
2221 2228 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL);
2222 2229
2223 2230 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL &&
2224 2231 tcp->tcp_eager_prev_drop_q0 == NULL) ||
2225 2232 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0);
2226 2233
2227 2234 DONTCARE(tcp->tcp_ka_rinterval); /* Init in tcp_init_values */
2228 2235 DONTCARE(tcp->tcp_ka_abort_thres); /* Init in tcp_init_values */
2229 2236 DONTCARE(tcp->tcp_ka_cnt); /* Init in tcp_init_values */
2230 2237
2231 2238 tcp->tcp_client_errno = 0;
2232 2239
2233 2240 DONTCARE(connp->conn_sum); /* Init in tcp_init_values */
2234 2241
2235 2242 connp->conn_faddr_v6 = ipv6_all_zeros; /* Displayed in MIB */
2236 2243
2237 2244 PRESERVE(connp->conn_bound_addr_v6);
2238 2245 tcp->tcp_last_sent_len = 0;
2239 2246 tcp->tcp_dupack_cnt = 0;
2240 2247
2241 2248 connp->conn_fport = 0; /* Displayed in MIB */
2242 2249 PRESERVE(connp->conn_lport);
2243 2250
2244 2251 PRESERVE(tcp->tcp_acceptor_lockp);
2245 2252
2246 2253 ASSERT(tcp->tcp_ordrel_mp == NULL);
2247 2254 PRESERVE(tcp->tcp_acceptor_id);
2248 2255 DONTCARE(tcp->tcp_ipsec_overhead);
2249 2256
2250 2257 PRESERVE(connp->conn_family);
2251 2258 /* Remove any remnants of mapped address binding */
2252 2259 if (connp->conn_family == AF_INET6) {
2253 2260 connp->conn_ipversion = IPV6_VERSION;
2254 2261 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2255 2262 } else {
2256 2263 connp->conn_ipversion = IPV4_VERSION;
2257 2264 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2258 2265 }
2259 2266
2260 2267 connp->conn_bound_if = 0;
2261 2268 connp->conn_recv_ancillary.crb_all = 0;
2262 2269 tcp->tcp_recvifindex = 0;
2263 2270 tcp->tcp_recvhops = 0;
2264 2271 tcp->tcp_closed = 0;
2265 2272 if (tcp->tcp_hopopts != NULL) {
2266 2273 mi_free(tcp->tcp_hopopts);
2267 2274 tcp->tcp_hopopts = NULL;
2268 2275 tcp->tcp_hopoptslen = 0;
2269 2276 }
2270 2277 ASSERT(tcp->tcp_hopoptslen == 0);
2271 2278 if (tcp->tcp_dstopts != NULL) {
2272 2279 mi_free(tcp->tcp_dstopts);
2273 2280 tcp->tcp_dstopts = NULL;
2274 2281 tcp->tcp_dstoptslen = 0;
2275 2282 }
2276 2283 ASSERT(tcp->tcp_dstoptslen == 0);
2277 2284 if (tcp->tcp_rthdrdstopts != NULL) {
2278 2285 mi_free(tcp->tcp_rthdrdstopts);
2279 2286 tcp->tcp_rthdrdstopts = NULL;
2280 2287 tcp->tcp_rthdrdstoptslen = 0;
2281 2288 }
2282 2289 ASSERT(tcp->tcp_rthdrdstoptslen == 0);
2283 2290 if (tcp->tcp_rthdr != NULL) {
2284 2291 mi_free(tcp->tcp_rthdr);
2285 2292 tcp->tcp_rthdr = NULL;
2286 2293 tcp->tcp_rthdrlen = 0;
2287 2294 }
2288 2295 ASSERT(tcp->tcp_rthdrlen == 0);
2289 2296
2290 2297 /* Reset fusion-related fields */
2291 2298 tcp->tcp_fused = B_FALSE;
2292 2299 tcp->tcp_unfusable = B_FALSE;
2293 2300 tcp->tcp_fused_sigurg = B_FALSE;
2294 2301 tcp->tcp_loopback_peer = NULL;
2295 2302
2296 2303 tcp->tcp_lso = B_FALSE;
2297 2304
2298 2305 tcp->tcp_in_ack_unsent = 0;
2299 2306 tcp->tcp_cork = B_FALSE;
2300 2307 tcp->tcp_tconnind_started = B_FALSE;
2301 2308
2302 2309 PRESERVE(tcp->tcp_squeue_bytes);
2303 2310
2304 2311 tcp->tcp_closemp_used = B_FALSE;
2305 2312
2306 2313 PRESERVE(tcp->tcp_rsrv_mp);
2307 2314 PRESERVE(tcp->tcp_rsrv_mp_lock);
2308 2315
2309 2316 #ifdef DEBUG
2310 2317 DONTCARE(tcp->tcmp_stk[0]);
2311 2318 #endif
2312 2319
2313 2320 PRESERVE(tcp->tcp_connid);
2314 2321
2315 2322 ASSERT(tcp->tcp_listen_cnt == NULL);
2316 2323 ASSERT(tcp->tcp_reass_tid == 0);
2317 2324
2318 2325 #undef DONTCARE
2319 2326 #undef PRESERVE
2320 2327 }
2321 2328
2322 2329 /*
2323 2330 * Initialize the various fields in tcp_t. If parent (the listener) is non
2324 2331 * NULL, certain values will be inheritted from it.
2325 2332 */
2326 2333 void
2327 2334 tcp_init_values(tcp_t *tcp, tcp_t *parent)
2328 2335 {
2329 2336 tcp_stack_t *tcps = tcp->tcp_tcps;
2330 2337 conn_t *connp = tcp->tcp_connp;
2331 2338 clock_t rto;
2332 2339
2333 2340 ASSERT((connp->conn_family == AF_INET &&
2334 2341 connp->conn_ipversion == IPV4_VERSION) ||
2335 2342 (connp->conn_family == AF_INET6 &&
2336 2343 (connp->conn_ipversion == IPV4_VERSION ||
2337 2344 connp->conn_ipversion == IPV6_VERSION)));
2338 2345
2339 2346 if (parent == NULL) {
2340 2347 tcp->tcp_naglim = tcps->tcps_naglim_def;
2341 2348
2342 2349 tcp->tcp_rto_initial = tcps->tcps_rexmit_interval_initial;
2343 2350 tcp->tcp_rto_min = tcps->tcps_rexmit_interval_min;
2344 2351 tcp->tcp_rto_max = tcps->tcps_rexmit_interval_max;
2345 2352
2346 2353 tcp->tcp_first_ctimer_threshold =
2347 2354 tcps->tcps_ip_notify_cinterval;
2348 2355 tcp->tcp_second_ctimer_threshold =
2349 2356 tcps->tcps_ip_abort_cinterval;
2350 2357 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval;
2351 2358 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval;
2352 2359
2353 2360 tcp->tcp_fin_wait_2_flush_interval =
2354 2361 tcps->tcps_fin_wait_2_flush_interval;
2355 2362
2356 2363 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval;
2357 2364 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval;
2358 2365 tcp->tcp_ka_cnt = 0;
2359 2366 tcp->tcp_ka_rinterval = 0;
2360 2367
2361 2368 /*
2362 2369 * Default value of tcp_init_cwnd is 0, so no need to set here
2363 2370 * if parent is NULL. But we need to inherit it from parent.
2364 2371 */
2365 2372 } else {
2366 2373 /* Inherit various TCP parameters from the parent. */
2367 2374 tcp->tcp_naglim = parent->tcp_naglim;
2368 2375
2369 2376 tcp->tcp_rto_initial = parent->tcp_rto_initial;
2370 2377 tcp->tcp_rto_min = parent->tcp_rto_min;
2371 2378 tcp->tcp_rto_max = parent->tcp_rto_max;
2372 2379
2373 2380 tcp->tcp_first_ctimer_threshold =
2374 2381 parent->tcp_first_ctimer_threshold;
2375 2382 tcp->tcp_second_ctimer_threshold =
2376 2383 parent->tcp_second_ctimer_threshold;
2377 2384 tcp->tcp_first_timer_threshold =
2378 2385 parent->tcp_first_timer_threshold;
2379 2386 tcp->tcp_second_timer_threshold =
2380 2387 parent->tcp_second_timer_threshold;
2381 2388
2382 2389 tcp->tcp_fin_wait_2_flush_interval =
2383 2390 parent->tcp_fin_wait_2_flush_interval;
2384 2391
2385 2392 tcp->tcp_ka_interval = parent->tcp_ka_interval;
2386 2393 tcp->tcp_ka_abort_thres = parent->tcp_ka_abort_thres;
2387 2394 tcp->tcp_ka_cnt = parent->tcp_ka_cnt;
2388 2395 tcp->tcp_ka_rinterval = parent->tcp_ka_rinterval;
2389 2396
2390 2397 tcp->tcp_init_cwnd = parent->tcp_init_cwnd;
2391 2398 }
2392 2399
2393 2400 /*
2394 2401 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2395 2402 * will be close to tcp_rexmit_interval_initial. By doing this, we
2396 2403 * allow the algorithm to adjust slowly to large fluctuations of RTT
2397 2404 * during first few transmissions of a connection as seen in slow
2398 2405 * links.
2399 2406 */
2400 2407 tcp->tcp_rtt_sa = tcp->tcp_rto_initial << 2;
2401 2408 tcp->tcp_rtt_sd = tcp->tcp_rto_initial >> 1;
2402 2409 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
2403 2410 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) +
2404 2411 tcps->tcps_conn_grace_period;
2405 2412 TCP_SET_RTO(tcp, rto);
2406 2413
2407 2414 tcp->tcp_timer_backoff = 0;
2408 2415 tcp->tcp_ms_we_have_waited = 0;
2409 2416 tcp->tcp_last_recv_time = ddi_get_lbolt();
2410 2417 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_;
2411 2418 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
2412 2419 tcp->tcp_snd_burst = TCP_CWND_INFINITE;
2413 2420
2414 2421 tcp->tcp_maxpsz_multiplier = tcps->tcps_maxpsz_multiplier;
2415 2422
2416 2423 /* NOTE: ISS is now set in tcp_set_destination(). */
2417 2424
2418 2425 /* Reset fusion-related fields */
2419 2426 tcp->tcp_fused = B_FALSE;
2420 2427 tcp->tcp_unfusable = B_FALSE;
2421 2428 tcp->tcp_fused_sigurg = B_FALSE;
2422 2429 tcp->tcp_loopback_peer = NULL;
2423 2430
2424 2431 /* We rebuild the header template on the next connect/conn_request */
2425 2432
2426 2433 connp->conn_mlp_type = mlptSingle;
2427 2434
2428 2435 /*
2429 2436 * Init the window scale to the max so tcp_rwnd_set() won't pare
2430 2437 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2431 2438 */
2432 2439 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT;
2433 2440 tcp->tcp_rwnd = connp->conn_rcvbuf;
2434 2441
2435 2442 tcp->tcp_cork = B_FALSE;
2436 2443 /*
2437 2444 * Init the tcp_debug option if it wasn't already set. This value
2438 2445 * determines whether TCP
2439 2446 * calls strlog() to print out debug messages. Doing this
2440 2447 * initialization here means that this value is not inherited thru
2441 2448 * tcp_reinit().
2442 2449 */
2443 2450 if (!connp->conn_debug)
2444 2451 connp->conn_debug = tcps->tcps_dbg;
2445 2452 }
2446 2453
2447 2454 /*
2448 2455 * Update the TCP connection according to change of PMTU.
2449 2456 *
2450 2457 * Path MTU might have changed by either increase or decrease, so need to
2451 2458 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2452 2459 * or negative MSS, since tcp_mss_set() will do it.
2453 2460 */
2454 2461 void
2455 2462 tcp_update_pmtu(tcp_t *tcp, boolean_t decrease_only)
2456 2463 {
2457 2464 uint32_t pmtu;
2458 2465 int32_t mss;
2459 2466 conn_t *connp = tcp->tcp_connp;
2460 2467 ip_xmit_attr_t *ixa = connp->conn_ixa;
2461 2468 iaflags_t ixaflags;
2462 2469
2463 2470 if (tcp->tcp_tcps->tcps_ignore_path_mtu)
2464 2471 return;
2465 2472
2466 2473 if (tcp->tcp_state < TCPS_ESTABLISHED)
2467 2474 return;
2468 2475
2469 2476 /*
2470 2477 * Always call ip_get_pmtu() to make sure that IP has updated
2471 2478 * ixa_flags properly.
2472 2479 */
2473 2480 pmtu = ip_get_pmtu(ixa);
2474 2481 ixaflags = ixa->ixa_flags;
2475 2482
2476 2483 /*
2477 2484 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2478 2485 * IPsec overhead if applied. Make sure to use the most recent
2479 2486 * IPsec information.
2480 2487 */
2481 2488 mss = pmtu - connp->conn_ht_iphc_len - conn_ipsec_length(connp);
2482 2489
2483 2490 /*
2484 2491 * Nothing to change, so just return.
2485 2492 */
2486 2493 if (mss == tcp->tcp_mss)
2487 2494 return;
2488 2495
2489 2496 /*
2490 2497 * Currently, for ICMP errors, only PMTU decrease is handled.
2491 2498 */
2492 2499 if (mss > tcp->tcp_mss && decrease_only)
2493 2500 return;
2494 2501
2495 2502 DTRACE_PROBE2(tcp_update_pmtu, int32_t, tcp->tcp_mss, uint32_t, mss);
2496 2503
2497 2504 /*
2498 2505 * Update ixa_fragsize and ixa_pmtu.
2499 2506 */
2500 2507 ixa->ixa_fragsize = ixa->ixa_pmtu = pmtu;
2501 2508
2502 2509 /*
2503 2510 * Adjust MSS and all relevant variables.
2504 2511 */
2505 2512 tcp_mss_set(tcp, mss);
2506 2513
2507 2514 /*
2508 2515 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2509 2516 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2510 2517 * has a (potentially different) min size we do the same. Make sure to
2511 2518 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2512 2519 * fragment the packet.
2513 2520 *
2514 2521 * LSO over IPv6 can not be fragmented. So need to disable LSO
2515 2522 * when IPv6 fragmentation is needed.
2516 2523 */
2517 2524 if (mss < tcp->tcp_tcps->tcps_mss_min)
2518 2525 ixaflags |= IXAF_PMTU_TOO_SMALL;
2519 2526
2520 2527 if (ixaflags & IXAF_PMTU_TOO_SMALL)
2521 2528 ixaflags &= ~(IXAF_DONTFRAG | IXAF_PMTU_IPV4_DF);
2522 2529
2523 2530 if ((connp->conn_ipversion == IPV4_VERSION) &&
2524 2531 !(ixaflags & IXAF_PMTU_IPV4_DF)) {
2525 2532 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0;
2526 2533 }
2527 2534 ixa->ixa_flags = ixaflags;
2528 2535 }
2529 2536
2530 2537 int
2531 2538 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk)
2532 2539 {
2533 2540 conn_t *connp = tcp->tcp_connp;
2534 2541 queue_t *q = connp->conn_rq;
2535 2542 int32_t mss = tcp->tcp_mss;
2536 2543 int maxpsz;
2537 2544
2538 2545 if (TCP_IS_DETACHED(tcp))
2539 2546 return (mss);
2540 2547 if (tcp->tcp_fused) {
2541 2548 maxpsz = tcp_fuse_maxpsz(tcp);
2542 2549 mss = INFPSZ;
2543 2550 } else if (tcp->tcp_maxpsz_multiplier == 0) {
2544 2551 /*
2545 2552 * Set the sd_qn_maxpsz according to the socket send buffer
2546 2553 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2547 2554 * instruct the stream head to copyin user data into contiguous
2548 2555 * kernel-allocated buffers without breaking it up into smaller
2549 2556 * chunks. We round up the buffer size to the nearest SMSS.
2550 2557 */
2551 2558 maxpsz = MSS_ROUNDUP(connp->conn_sndbuf, mss);
2552 2559 mss = INFPSZ;
2553 2560 } else {
2554 2561 /*
2555 2562 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2556 2563 * (and a multiple of the mss). This instructs the stream
2557 2564 * head to break down larger than SMSS writes into SMSS-
2558 2565 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2559 2566 */
2560 2567 maxpsz = tcp->tcp_maxpsz_multiplier * mss;
2561 2568 if (maxpsz > connp->conn_sndbuf / 2) {
2562 2569 maxpsz = connp->conn_sndbuf / 2;
2563 2570 /* Round up to nearest mss */
2564 2571 maxpsz = MSS_ROUNDUP(maxpsz, mss);
2565 2572 }
2566 2573 }
2567 2574
2568 2575 (void) proto_set_maxpsz(q, connp, maxpsz);
2569 2576 if (!(IPCL_IS_NONSTR(connp)))
2570 2577 connp->conn_wq->q_maxpsz = maxpsz;
2571 2578 if (set_maxblk)
2572 2579 (void) proto_set_tx_maxblk(q, connp, mss);
2573 2580 return (mss);
2574 2581 }
2575 2582
2576 2583 /* For /dev/tcp aka AF_INET open */
2577 2584 static int
2578 2585 tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2579 2586 {
2580 2587 return (tcp_open(q, devp, flag, sflag, credp, B_FALSE));
2581 2588 }
2582 2589
2583 2590 /* For /dev/tcp6 aka AF_INET6 open */
2584 2591 static int
2585 2592 tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2586 2593 {
2587 2594 return (tcp_open(q, devp, flag, sflag, credp, B_TRUE));
2588 2595 }
2589 2596
2590 2597 conn_t *
2591 2598 tcp_create_common(cred_t *credp, boolean_t isv6, boolean_t issocket,
2592 2599 int *errorp)
2593 2600 {
2594 2601 tcp_t *tcp = NULL;
2595 2602 conn_t *connp;
2596 2603 zoneid_t zoneid;
2597 2604 tcp_stack_t *tcps;
2598 2605 squeue_t *sqp;
2599 2606
2600 2607 ASSERT(errorp != NULL);
2601 2608 /*
2602 2609 * Find the proper zoneid and netstack.
2603 2610 */
2604 2611 /*
2605 2612 * Special case for install: miniroot needs to be able to
2606 2613 * access files via NFS as though it were always in the
2607 2614 * global zone.
2608 2615 */
2609 2616 if (credp == kcred && nfs_global_client_only != 0) {
2610 2617 zoneid = GLOBAL_ZONEID;
2611 2618 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->
2612 2619 netstack_tcp;
2613 2620 ASSERT(tcps != NULL);
2614 2621 } else {
2615 2622 netstack_t *ns;
2616 2623 int err;
2617 2624
2618 2625 if ((err = secpolicy_basic_net_access(credp)) != 0) {
2619 2626 *errorp = err;
2620 2627 return (NULL);
2621 2628 }
2622 2629
2623 2630 ns = netstack_find_by_cred(credp);
2624 2631 ASSERT(ns != NULL);
2625 2632 tcps = ns->netstack_tcp;
2626 2633 ASSERT(tcps != NULL);
2627 2634
2628 2635 /*
2629 2636 * For exclusive stacks we set the zoneid to zero
2630 2637 * to make TCP operate as if in the global zone.
2631 2638 */
2632 2639 if (tcps->tcps_netstack->netstack_stackid !=
2633 2640 GLOBAL_NETSTACKID)
2634 2641 zoneid = GLOBAL_ZONEID;
2635 2642 else
2636 2643 zoneid = crgetzoneid(credp);
2637 2644 }
2638 2645
2639 2646 sqp = IP_SQUEUE_GET((uint_t)gethrtime());
2640 2647 connp = (conn_t *)tcp_get_conn(sqp, tcps);
2641 2648 /*
2642 2649 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2643 2650 * so we drop it by one.
2644 2651 */
2645 2652 netstack_rele(tcps->tcps_netstack);
2646 2653 if (connp == NULL) {
2647 2654 *errorp = ENOSR;
2648 2655 return (NULL);
2649 2656 }
2650 2657 ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto);
2651 2658
2652 2659 connp->conn_sqp = sqp;
2653 2660 connp->conn_initial_sqp = connp->conn_sqp;
2654 2661 connp->conn_ixa->ixa_sqp = connp->conn_sqp;
2655 2662 tcp = connp->conn_tcp;
2656 2663
2657 2664 /*
2658 2665 * Besides asking IP to set the checksum for us, have conn_ip_output
2659 2666 * to do the following checks when necessary:
2660 2667 *
2661 2668 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2662 2669 * IXAF_VERIFY_PMTU: verify PMTU changes
2663 2670 * IXAF_VERIFY_LSO: verify LSO capability changes
2664 2671 */
2665 2672 connp->conn_ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
2666 2673 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO;
2667 2674
2668 2675 if (!tcps->tcps_dev_flow_ctl)
2669 2676 connp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
2670 2677
2671 2678 if (isv6) {
2672 2679 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
2673 2680 connp->conn_ipversion = IPV6_VERSION;
2674 2681 connp->conn_family = AF_INET6;
2675 2682 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
2676 2683 connp->conn_default_ttl = tcps->tcps_ipv6_hoplimit;
2677 2684 } else {
2678 2685 connp->conn_ipversion = IPV4_VERSION;
2679 2686 connp->conn_family = AF_INET;
2680 2687 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
2681 2688 connp->conn_default_ttl = tcps->tcps_ipv4_ttl;
2682 2689 }
2683 2690 connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl;
2684 2691
2685 2692 crhold(credp);
2686 2693 connp->conn_cred = credp;
2687 2694 connp->conn_cpid = curproc->p_pid;
2688 2695 connp->conn_open_time = ddi_get_lbolt64();
2689 2696
2690 2697 /* Cache things in the ixa without any refhold */
2691 2698 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
2692 2699 connp->conn_ixa->ixa_cred = credp;
2693 2700 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
2694 2701
2695 2702 connp->conn_zoneid = zoneid;
2696 2703 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2697 2704 connp->conn_ixa->ixa_zoneid = zoneid;
2698 2705 connp->conn_mlp_type = mlptSingle;
2699 2706 ASSERT(connp->conn_netstack == tcps->tcps_netstack);
2700 2707 ASSERT(tcp->tcp_tcps == tcps);
2701 2708
2702 2709 /*
2703 2710 * If the caller has the process-wide flag set, then default to MAC
2704 2711 * exempt mode. This allows read-down to unlabeled hosts.
2705 2712 */
2706 2713 if (getpflags(NET_MAC_AWARE, credp) != 0)
2707 2714 connp->conn_mac_mode = CONN_MAC_AWARE;
2708 2715
2709 2716 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
2710 2717
2711 2718 if (issocket) {
2712 2719 tcp->tcp_issocket = 1;
2713 2720 }
2714 2721
2715 2722 connp->conn_rcvbuf = tcps->tcps_recv_hiwat;
2716 2723 connp->conn_sndbuf = tcps->tcps_xmit_hiwat;
2717 2724 if (tcps->tcps_snd_lowat_fraction != 0) {
2718 2725 connp->conn_sndlowat = connp->conn_sndbuf /
2719 2726 tcps->tcps_snd_lowat_fraction;
2720 2727 } else {
2721 2728 connp->conn_sndlowat = tcps->tcps_xmit_lowat;
2722 2729 }
2723 2730 connp->conn_so_type = SOCK_STREAM;
2724 2731 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2725 2732 tcps->tcps_wroff_xtra;
2726 2733
2727 2734 SOCK_CONNID_INIT(tcp->tcp_connid);
2728 2735 /* DTrace ignores this - it isn't a tcp:::state-change */
2729 2736 tcp->tcp_state = TCPS_IDLE;
2730 2737 tcp_init_values(tcp, NULL);
2731 2738 return (connp);
2732 2739 }
2733 2740
2734 2741 static int
2735 2742 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
2736 2743 boolean_t isv6)
2737 2744 {
2738 2745 tcp_t *tcp = NULL;
2739 2746 conn_t *connp = NULL;
2740 2747 int err;
2741 2748 vmem_t *minor_arena = NULL;
2742 2749 dev_t conn_dev;
2743 2750 boolean_t issocket;
2744 2751
2745 2752 if (q->q_ptr != NULL)
2746 2753 return (0);
2747 2754
2748 2755 if (sflag == MODOPEN)
2749 2756 return (EINVAL);
2750 2757
2751 2758 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
2752 2759 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
2753 2760 minor_arena = ip_minor_arena_la;
2754 2761 } else {
2755 2762 /*
2756 2763 * Either minor numbers in the large arena were exhausted
2757 2764 * or a non socket application is doing the open.
2758 2765 * Try to allocate from the small arena.
2759 2766 */
2760 2767 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) {
2761 2768 return (EBUSY);
2762 2769 }
2763 2770 minor_arena = ip_minor_arena_sa;
2764 2771 }
2765 2772
2766 2773 ASSERT(minor_arena != NULL);
2767 2774
2768 2775 *devp = makedevice(getmajor(*devp), (minor_t)conn_dev);
2769 2776
2770 2777 if (flag & SO_FALLBACK) {
2771 2778 /*
2772 2779 * Non streams socket needs a stream to fallback to
2773 2780 */
2774 2781 RD(q)->q_ptr = (void *)conn_dev;
2775 2782 WR(q)->q_qinfo = &tcp_fallback_sock_winit;
2776 2783 WR(q)->q_ptr = (void *)minor_arena;
2777 2784 qprocson(q);
2778 2785 return (0);
2779 2786 } else if (flag & SO_ACCEPTOR) {
2780 2787 q->q_qinfo = &tcp_acceptor_rinit;
2781 2788 /*
2782 2789 * the conn_dev and minor_arena will be subsequently used by
2783 2790 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2784 2791 * the minor device number for this connection from the q_ptr.
2785 2792 */
2786 2793 RD(q)->q_ptr = (void *)conn_dev;
2787 2794 WR(q)->q_qinfo = &tcp_acceptor_winit;
2788 2795 WR(q)->q_ptr = (void *)minor_arena;
2789 2796 qprocson(q);
2790 2797 return (0);
2791 2798 }
2792 2799
2793 2800 issocket = flag & SO_SOCKSTR;
2794 2801 connp = tcp_create_common(credp, isv6, issocket, &err);
2795 2802
2796 2803 if (connp == NULL) {
2797 2804 inet_minor_free(minor_arena, conn_dev);
2798 2805 q->q_ptr = WR(q)->q_ptr = NULL;
2799 2806 return (err);
2800 2807 }
2801 2808
2802 2809 connp->conn_rq = q;
2803 2810 connp->conn_wq = WR(q);
2804 2811 q->q_ptr = WR(q)->q_ptr = connp;
2805 2812
2806 2813 connp->conn_dev = conn_dev;
2807 2814 connp->conn_minor_arena = minor_arena;
2808 2815
2809 2816 ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6);
2810 2817 ASSERT(WR(q)->q_qinfo == &tcp_winit);
2811 2818
2812 2819 tcp = connp->conn_tcp;
2813 2820
2814 2821 if (issocket) {
2815 2822 WR(q)->q_qinfo = &tcp_sock_winit;
2816 2823 } else {
2817 2824 #ifdef _ILP32
2818 2825 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q);
2819 2826 #else
2820 2827 tcp->tcp_acceptor_id = conn_dev;
2821 2828 #endif /* _ILP32 */
2822 2829 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp);
2823 2830 }
2824 2831
2825 2832 /*
2826 2833 * Put the ref for TCP. Ref for IP was already put
2827 2834 * by ipcl_conn_create. Also Make the conn_t globally
2828 2835 * visible to walkers
2829 2836 */
2830 2837 mutex_enter(&connp->conn_lock);
2831 2838 CONN_INC_REF_LOCKED(connp);
2832 2839 ASSERT(connp->conn_ref == 2);
2833 2840 connp->conn_state_flags &= ~CONN_INCIPIENT;
2834 2841 mutex_exit(&connp->conn_lock);
2835 2842
2836 2843 qprocson(q);
2837 2844 return (0);
2838 2845 }
2839 2846
2840 2847 /*
2841 2848 * Build/update the tcp header template (in conn_ht_iphc) based on
2842 2849 * conn_xmit_ipp. The headers include ip6_t, any extension
2843 2850 * headers, and the maximum size tcp header (to avoid reallocation
2844 2851 * on the fly for additional tcp options).
2845 2852 *
2846 2853 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2847 2854 * Returns failure if can't allocate memory.
2848 2855 */
2849 2856 int
2850 2857 tcp_build_hdrs(tcp_t *tcp)
2851 2858 {
2852 2859 tcp_stack_t *tcps = tcp->tcp_tcps;
2853 2860 conn_t *connp = tcp->tcp_connp;
2854 2861 char buf[TCP_MAX_HDR_LENGTH];
2855 2862 uint_t buflen;
2856 2863 uint_t ulplen = TCP_MIN_HEADER_LENGTH;
2857 2864 uint_t extralen = TCP_MAX_TCP_OPTIONS_LENGTH;
2858 2865 tcpha_t *tcpha;
2859 2866 uint32_t cksum;
2860 2867 int error;
2861 2868
2862 2869 /*
2863 2870 * We might be called after the connection is set up, and we might
2864 2871 * have TS options already in the TCP header. Thus we save any
2865 2872 * existing tcp header.
2866 2873 */
2867 2874 buflen = connp->conn_ht_ulp_len;
2868 2875 if (buflen != 0) {
2869 2876 bcopy(connp->conn_ht_ulp, buf, buflen);
2870 2877 extralen -= buflen - ulplen;
2871 2878 ulplen = buflen;
2872 2879 }
2873 2880
2874 2881 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2875 2882 mutex_enter(&connp->conn_lock);
2876 2883 error = conn_build_hdr_template(connp, ulplen, extralen,
2877 2884 &connp->conn_laddr_v6, &connp->conn_faddr_v6, connp->conn_flowinfo);
2878 2885 mutex_exit(&connp->conn_lock);
2879 2886 if (error != 0)
2880 2887 return (error);
2881 2888
2882 2889 /*
2883 2890 * Any routing header/option has been massaged. The checksum difference
2884 2891 * is stored in conn_sum for later use.
2885 2892 */
2886 2893 tcpha = (tcpha_t *)connp->conn_ht_ulp;
2887 2894 tcp->tcp_tcpha = tcpha;
2888 2895
2889 2896 /* restore any old tcp header */
2890 2897 if (buflen != 0) {
2891 2898 bcopy(buf, connp->conn_ht_ulp, buflen);
2892 2899 } else {
2893 2900 tcpha->tha_sum = 0;
2894 2901 tcpha->tha_urp = 0;
2895 2902 tcpha->tha_ack = 0;
2896 2903 tcpha->tha_offset_and_reserved = (5 << 4);
2897 2904 tcpha->tha_lport = connp->conn_lport;
2898 2905 tcpha->tha_fport = connp->conn_fport;
2899 2906 }
2900 2907
2901 2908 /*
2902 2909 * IP wants our header length in the checksum field to
2903 2910 * allow it to perform a single pseudo-header+checksum
2904 2911 * calculation on behalf of TCP.
2905 2912 * Include the adjustment for a source route once IP_OPTIONS is set.
2906 2913 */
2907 2914 cksum = sizeof (tcpha_t) + connp->conn_sum;
2908 2915 cksum = (cksum >> 16) + (cksum & 0xFFFF);
2909 2916 ASSERT(cksum < 0x10000);
2910 2917 tcpha->tha_sum = htons(cksum);
2911 2918
2912 2919 if (connp->conn_ipversion == IPV4_VERSION)
2913 2920 tcp->tcp_ipha = (ipha_t *)connp->conn_ht_iphc;
2914 2921 else
2915 2922 tcp->tcp_ip6h = (ip6_t *)connp->conn_ht_iphc;
2916 2923
2917 2924 if (connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra >
2918 2925 connp->conn_wroff) {
2919 2926 connp->conn_wroff = connp->conn_ht_iphc_allocated +
2920 2927 tcps->tcps_wroff_xtra;
2921 2928 (void) proto_set_tx_wroff(connp->conn_rq, connp,
2922 2929 connp->conn_wroff);
2923 2930 }
2924 2931 return (0);
2925 2932 }
2926 2933
2927 2934 /*
2928 2935 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2929 2936 * We do not allow the receive window to shrink. After setting rwnd,
2930 2937 * set the flow control hiwat of the stream.
2931 2938 *
2932 2939 * This function is called in 2 cases:
2933 2940 *
2934 2941 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2935 2942 * connection (passive open) and in tcp_input_data() for active connect.
2936 2943 * This is called after tcp_mss_set() when the desired MSS value is known.
2937 2944 * This makes sure that our window size is a mutiple of the other side's
2938 2945 * MSS.
2939 2946 * 2) Handling SO_RCVBUF option.
2940 2947 *
2941 2948 * It is ASSUMED that the requested size is a multiple of the current MSS.
2942 2949 *
2943 2950 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2944 2951 * user requests so.
2945 2952 */
2946 2953 int
2947 2954 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd)
2948 2955 {
2949 2956 uint32_t mss = tcp->tcp_mss;
2950 2957 uint32_t old_max_rwnd;
2951 2958 uint32_t max_transmittable_rwnd;
2952 2959 boolean_t tcp_detached = TCP_IS_DETACHED(tcp);
2953 2960 tcp_stack_t *tcps = tcp->tcp_tcps;
2954 2961 conn_t *connp = tcp->tcp_connp;
2955 2962
2956 2963 /*
2957 2964 * Insist on a receive window that is at least
2958 2965 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2959 2966 * funny TCP interactions of Nagle algorithm, SWS avoidance
2960 2967 * and delayed acknowledgement.
2961 2968 */
2962 2969 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss);
2963 2970
2964 2971 if (tcp->tcp_fused) {
2965 2972 size_t sth_hiwat;
2966 2973 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
2967 2974
2968 2975 ASSERT(peer_tcp != NULL);
2969 2976 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd);
2970 2977 if (!tcp_detached) {
2971 2978 (void) proto_set_rx_hiwat(connp->conn_rq, connp,
2972 2979 sth_hiwat);
2973 2980 tcp_set_recv_threshold(tcp, sth_hiwat >> 3);
2974 2981 }
2975 2982
2976 2983 /* Caller could have changed tcp_rwnd; update tha_win */
2977 2984 if (tcp->tcp_tcpha != NULL) {
2978 2985 tcp->tcp_tcpha->tha_win =
2979 2986 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2980 2987 }
2981 2988 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
2982 2989 tcp->tcp_cwnd_max = rwnd;
2983 2990
2984 2991 /*
2985 2992 * In the fusion case, the maxpsz stream head value of
2986 2993 * our peer is set according to its send buffer size
2987 2994 * and our receive buffer size; since the latter may
2988 2995 * have changed we need to update the peer's maxpsz.
2989 2996 */
2990 2997 (void) tcp_maxpsz_set(peer_tcp, B_TRUE);
2991 2998 return (sth_hiwat);
2992 2999 }
2993 3000
2994 3001 if (tcp_detached)
2995 3002 old_max_rwnd = tcp->tcp_rwnd;
2996 3003 else
2997 3004 old_max_rwnd = connp->conn_rcvbuf;
2998 3005
2999 3006
3000 3007 /*
3001 3008 * If window size info has already been exchanged, TCP should not
3002 3009 * shrink the window. Shrinking window is doable if done carefully.
3003 3010 * We may add that support later. But so far there is not a real
3004 3011 * need to do that.
3005 3012 */
3006 3013 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) {
3007 3014 /* MSS may have changed, do a round up again. */
3008 3015 rwnd = MSS_ROUNDUP(old_max_rwnd, mss);
3009 3016 }
3010 3017
3011 3018 /*
3012 3019 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
3013 3020 * can be applied even before the window scale option is decided.
3014 3021 */
3015 3022 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws;
3016 3023 if (rwnd > max_transmittable_rwnd) {
3017 3024 rwnd = max_transmittable_rwnd -
3018 3025 (max_transmittable_rwnd % mss);
3019 3026 if (rwnd < mss)
3020 3027 rwnd = max_transmittable_rwnd;
3021 3028 /*
3022 3029 * If we're over the limit we may have to back down tcp_rwnd.
3023 3030 * The increment below won't work for us. So we set all three
3024 3031 * here and the increment below will have no effect.
3025 3032 */
3026 3033 tcp->tcp_rwnd = old_max_rwnd = rwnd;
3027 3034 }
3028 3035 if (tcp->tcp_localnet) {
3029 3036 tcp->tcp_rack_abs_max =
3030 3037 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2);
3031 3038 } else {
3032 3039 /*
3033 3040 * For a remote host on a different subnet (through a router),
3034 3041 * we ack every other packet to be conforming to RFC1122.
3035 3042 * tcp_deferred_acks_max is default to 2.
3036 3043 */
3037 3044 tcp->tcp_rack_abs_max =
3038 3045 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2);
3039 3046 }
3040 3047 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max)
3041 3048 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
3042 3049 else
3043 3050 tcp->tcp_rack_cur_max = 0;
3044 3051 /*
3045 3052 * Increment the current rwnd by the amount the maximum grew (we
3046 3053 * can not overwrite it since we might be in the middle of a
3047 3054 * connection.)
3048 3055 */
3049 3056 tcp->tcp_rwnd += rwnd - old_max_rwnd;
3050 3057 connp->conn_rcvbuf = rwnd;
3051 3058
3052 3059 /* Are we already connected? */
3053 3060 if (tcp->tcp_tcpha != NULL) {
3054 3061 tcp->tcp_tcpha->tha_win =
3055 3062 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3056 3063 }
3057 3064
3058 3065 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
3059 3066 tcp->tcp_cwnd_max = rwnd;
3060 3067
3061 3068 if (tcp_detached)
3062 3069 return (rwnd);
3063 3070
3064 3071 tcp_set_recv_threshold(tcp, rwnd >> 3);
3065 3072
3066 3073 (void) proto_set_rx_hiwat(connp->conn_rq, connp, rwnd);
3067 3074 return (rwnd);
3068 3075 }
3069 3076
3070 3077 int
3071 3078 tcp_do_unbind(conn_t *connp)
3072 3079 {
3073 3080 tcp_t *tcp = connp->conn_tcp;
3074 3081 int32_t oldstate;
3075 3082
3076 3083 switch (tcp->tcp_state) {
3077 3084 case TCPS_BOUND:
3078 3085 case TCPS_LISTEN:
3079 3086 break;
3080 3087 default:
3081 3088 return (-TOUTSTATE);
3082 3089 }
3083 3090
3084 3091 /*
3085 3092 * Need to clean up all the eagers since after the unbind, segments
3086 3093 * will no longer be delivered to this listener stream.
3087 3094 */
3088 3095 mutex_enter(&tcp->tcp_eager_lock);
3089 3096 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
3090 3097 tcp_eager_cleanup(tcp, 0);
3091 3098 }
3092 3099 mutex_exit(&tcp->tcp_eager_lock);
3093 3100
3094 3101 /* Clean up the listener connection counter if necessary. */
3095 3102 if (tcp->tcp_listen_cnt != NULL)
3096 3103 TCP_DECR_LISTEN_CNT(tcp);
3097 3104 connp->conn_laddr_v6 = ipv6_all_zeros;
3098 3105 connp->conn_saddr_v6 = ipv6_all_zeros;
3099 3106 tcp_bind_hash_remove(tcp);
3100 3107 oldstate = tcp->tcp_state;
3101 3108 tcp->tcp_state = TCPS_IDLE;
3102 3109 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
3103 3110 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
3104 3111 int32_t, oldstate);
3105 3112
3106 3113 ip_unbind(connp);
3107 3114 bzero(&connp->conn_ports, sizeof (connp->conn_ports));
3108 3115
3109 3116 return (0);
3110 3117 }
3111 3118
3112 3119 /*
3113 3120 * Collect protocol properties to send to the upper handle.
3114 3121 */
3115 3122 void
3116 3123 tcp_get_proto_props(tcp_t *tcp, struct sock_proto_props *sopp)
3117 3124 {
3118 3125 conn_t *connp = tcp->tcp_connp;
3119 3126
3120 3127 sopp->sopp_flags = SOCKOPT_RCVHIWAT | SOCKOPT_MAXBLK | SOCKOPT_WROFF;
3121 3128 sopp->sopp_maxblk = tcp_maxpsz_set(tcp, B_FALSE);
3122 3129
3123 3130 sopp->sopp_rxhiwat = tcp->tcp_fused ?
3124 3131 tcp_fuse_set_rcv_hiwat(tcp, connp->conn_rcvbuf) :
3125 3132 connp->conn_rcvbuf;
3126 3133 /*
3127 3134 * Determine what write offset value to use depending on SACK and
3128 3135 * whether the endpoint is fused or not.
3129 3136 */
3130 3137 if (tcp->tcp_fused) {
3131 3138 ASSERT(tcp->tcp_loopback);
3132 3139 ASSERT(tcp->tcp_loopback_peer != NULL);
3133 3140 /*
3134 3141 * For fused tcp loopback, set the stream head's write
3135 3142 * offset value to zero since we won't be needing any room
3136 3143 * for TCP/IP headers. This would also improve performance
3137 3144 * since it would reduce the amount of work done by kmem.
3138 3145 * Non-fused tcp loopback case is handled separately below.
3139 3146 */
3140 3147 sopp->sopp_wroff = 0;
3141 3148 /*
3142 3149 * Update the peer's transmit parameters according to
3143 3150 * our recently calculated high water mark value.
3144 3151 */
3145 3152 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE);
3146 3153 } else if (tcp->tcp_snd_sack_ok) {
3147 3154 sopp->sopp_wroff = connp->conn_ht_iphc_allocated +
3148 3155 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3149 3156 } else {
3150 3157 sopp->sopp_wroff = connp->conn_ht_iphc_len +
3151 3158 (tcp->tcp_loopback ? 0 : tcp->tcp_tcps->tcps_wroff_xtra);
3152 3159 }
3153 3160
3154 3161 if (tcp->tcp_loopback) {
3155 3162 sopp->sopp_flags |= SOCKOPT_LOOPBACK;
3156 3163 sopp->sopp_loopback = B_TRUE;
3157 3164 }
3158 3165 }
3159 3166
3160 3167 /*
3161 3168 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3162 3169 */
3163 3170 boolean_t
3164 3171 tcp_zcopy_check(tcp_t *tcp)
3165 3172 {
3166 3173 conn_t *connp = tcp->tcp_connp;
3167 3174 ip_xmit_attr_t *ixa = connp->conn_ixa;
3168 3175 boolean_t zc_enabled = B_FALSE;
3169 3176 tcp_stack_t *tcps = tcp->tcp_tcps;
3170 3177
3171 3178 if (do_tcpzcopy == 2)
3172 3179 zc_enabled = B_TRUE;
3173 3180 else if ((do_tcpzcopy == 1) && (ixa->ixa_flags & IXAF_ZCOPY_CAPAB))
3174 3181 zc_enabled = B_TRUE;
3175 3182
3176 3183 tcp->tcp_snd_zcopy_on = zc_enabled;
3177 3184 if (!TCP_IS_DETACHED(tcp)) {
3178 3185 if (zc_enabled) {
3179 3186 ixa->ixa_flags |= IXAF_VERIFY_ZCOPY;
3180 3187 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3181 3188 ZCVMSAFE);
3182 3189 TCP_STAT(tcps, tcp_zcopy_on);
3183 3190 } else {
3184 3191 ixa->ixa_flags &= ~IXAF_VERIFY_ZCOPY;
3185 3192 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3186 3193 ZCVMUNSAFE);
3187 3194 TCP_STAT(tcps, tcp_zcopy_off);
3188 3195 }
3189 3196 }
3190 3197 return (zc_enabled);
3191 3198 }
3192 3199
3193 3200 /*
3194 3201 * Backoff from a zero-copy message by copying data to a new allocated
3195 3202 * message and freeing the original desballoca'ed segmapped message.
3196 3203 *
3197 3204 * This function is called by following two callers:
3198 3205 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3199 3206 * the origial desballoca'ed message and notify sockfs. This is in re-
3200 3207 * transmit state.
3201 3208 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3202 3209 * to be copied to new message.
3203 3210 */
3204 3211 mblk_t *
3205 3212 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, boolean_t fix_xmitlist)
3206 3213 {
3207 3214 mblk_t *nbp;
3208 3215 mblk_t *head = NULL;
3209 3216 mblk_t *tail = NULL;
3210 3217 tcp_stack_t *tcps = tcp->tcp_tcps;
3211 3218
3212 3219 ASSERT(bp != NULL);
3213 3220 while (bp != NULL) {
3214 3221 if (IS_VMLOANED_MBLK(bp)) {
3215 3222 TCP_STAT(tcps, tcp_zcopy_backoff);
3216 3223 if ((nbp = copyb(bp)) == NULL) {
3217 3224 tcp->tcp_xmit_zc_clean = B_FALSE;
3218 3225 if (tail != NULL)
3219 3226 tail->b_cont = bp;
3220 3227 return ((head == NULL) ? bp : head);
3221 3228 }
3222 3229
3223 3230 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) {
3224 3231 if (fix_xmitlist)
3225 3232 tcp_zcopy_notify(tcp);
3226 3233 else
3227 3234 nbp->b_datap->db_struioflag |=
3228 3235 STRUIO_ZCNOTIFY;
3229 3236 }
3230 3237 nbp->b_cont = bp->b_cont;
3231 3238
3232 3239 /*
3233 3240 * Copy saved information and adjust tcp_xmit_tail
3234 3241 * if needed.
3235 3242 */
3236 3243 if (fix_xmitlist) {
3237 3244 nbp->b_prev = bp->b_prev;
3238 3245 nbp->b_next = bp->b_next;
3239 3246
3240 3247 if (tcp->tcp_xmit_tail == bp)
3241 3248 tcp->tcp_xmit_tail = nbp;
3242 3249 }
3243 3250
3244 3251 /* Free the original message. */
3245 3252 bp->b_prev = NULL;
3246 3253 bp->b_next = NULL;
3247 3254 freeb(bp);
3248 3255
3249 3256 bp = nbp;
3250 3257 }
3251 3258
3252 3259 if (head == NULL) {
3253 3260 head = bp;
3254 3261 }
3255 3262 if (tail == NULL) {
3256 3263 tail = bp;
3257 3264 } else {
3258 3265 tail->b_cont = bp;
3259 3266 tail = bp;
3260 3267 }
3261 3268
3262 3269 /* Move forward. */
3263 3270 bp = bp->b_cont;
3264 3271 }
3265 3272
3266 3273 if (fix_xmitlist) {
3267 3274 tcp->tcp_xmit_last = tail;
3268 3275 tcp->tcp_xmit_zc_clean = B_TRUE;
3269 3276 }
3270 3277
3271 3278 return (head);
3272 3279 }
3273 3280
3274 3281 void
3275 3282 tcp_zcopy_notify(tcp_t *tcp)
3276 3283 {
3277 3284 struct stdata *stp;
3278 3285 conn_t *connp;
3279 3286
3280 3287 if (tcp->tcp_detached)
3281 3288 return;
3282 3289 connp = tcp->tcp_connp;
3283 3290 if (IPCL_IS_NONSTR(connp)) {
3284 3291 (*connp->conn_upcalls->su_zcopy_notify)
3285 3292 (connp->conn_upper_handle);
3286 3293 return;
3287 3294 }
3288 3295 stp = STREAM(connp->conn_rq);
3289 3296 mutex_enter(&stp->sd_lock);
3290 3297 stp->sd_flag |= STZCNOTIFY;
3291 3298 cv_broadcast(&stp->sd_zcopy_wait);
3292 3299 mutex_exit(&stp->sd_lock);
3293 3300 }
3294 3301
3295 3302 /*
3296 3303 * Update the TCP connection according to change of LSO capability.
3297 3304 */
3298 3305 static void
3299 3306 tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa)
3300 3307 {
3301 3308 /*
3302 3309 * We check against IPv4 header length to preserve the old behavior
3303 3310 * of only enabling LSO when there are no IP options.
3304 3311 * But this restriction might not be necessary at all. Before removing
3305 3312 * it, need to verify how LSO is handled for source routing case, with
3306 3313 * which IP does software checksum.
3307 3314 *
3308 3315 * For IPv6, whenever any extension header is needed, LSO is supressed.
3309 3316 */
3310 3317 if (ixa->ixa_ip_hdr_length != ((ixa->ixa_flags & IXAF_IS_IPV4) ?
3311 3318 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN))
3312 3319 return;
3313 3320
3314 3321 /*
3315 3322 * Either the LSO capability newly became usable, or it has changed.
3316 3323 */
3317 3324 if (ixa->ixa_flags & IXAF_LSO_CAPAB) {
3318 3325 ill_lso_capab_t *lsoc = &ixa->ixa_lso_capab;
3319 3326
3320 3327 ASSERT(lsoc->ill_lso_max > 0);
3321 3328 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, lsoc->ill_lso_max);
3322 3329
3323 3330 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3324 3331 boolean_t, B_TRUE, uint32_t, tcp->tcp_lso_max);
3325 3332
3326 3333 /*
3327 3334 * If LSO to be enabled, notify the STREAM header with larger
3328 3335 * data block.
3329 3336 */
3330 3337 if (!tcp->tcp_lso)
3331 3338 tcp->tcp_maxpsz_multiplier = 0;
3332 3339
3333 3340 tcp->tcp_lso = B_TRUE;
3334 3341 TCP_STAT(tcp->tcp_tcps, tcp_lso_enabled);
3335 3342 } else { /* LSO capability is not usable any more. */
3336 3343 DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
3337 3344 boolean_t, B_FALSE, uint32_t, tcp->tcp_lso_max);
3338 3345
3339 3346 /*
3340 3347 * If LSO to be disabled, notify the STREAM header with smaller
3341 3348 * data block. And need to restore fragsize to PMTU.
3342 3349 */
3343 3350 if (tcp->tcp_lso) {
3344 3351 tcp->tcp_maxpsz_multiplier =
3345 3352 tcp->tcp_tcps->tcps_maxpsz_multiplier;
3346 3353 ixa->ixa_fragsize = ixa->ixa_pmtu;
3347 3354 tcp->tcp_lso = B_FALSE;
3348 3355 TCP_STAT(tcp->tcp_tcps, tcp_lso_disabled);
3349 3356 }
3350 3357 }
3351 3358
3352 3359 (void) tcp_maxpsz_set(tcp, B_TRUE);
3353 3360 }
3354 3361
3355 3362 /*
3356 3363 * Update the TCP connection according to change of ZEROCOPY capability.
3357 3364 */
3358 3365 static void
3359 3366 tcp_update_zcopy(tcp_t *tcp)
3360 3367 {
3361 3368 conn_t *connp = tcp->tcp_connp;
3362 3369 tcp_stack_t *tcps = tcp->tcp_tcps;
3363 3370
3364 3371 if (tcp->tcp_snd_zcopy_on) {
3365 3372 tcp->tcp_snd_zcopy_on = B_FALSE;
3366 3373 if (!TCP_IS_DETACHED(tcp)) {
3367 3374 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3368 3375 ZCVMUNSAFE);
3369 3376 TCP_STAT(tcps, tcp_zcopy_off);
3370 3377 }
3371 3378 } else {
3372 3379 tcp->tcp_snd_zcopy_on = B_TRUE;
3373 3380 if (!TCP_IS_DETACHED(tcp)) {
3374 3381 (void) proto_set_tx_copyopt(connp->conn_rq, connp,
3375 3382 ZCVMSAFE);
3376 3383 TCP_STAT(tcps, tcp_zcopy_on);
3377 3384 }
3378 3385 }
3379 3386 }
3380 3387
3381 3388 /*
3382 3389 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3383 3390 * so it's safe to update the TCP connection.
3384 3391 */
3385 3392 /* ARGSUSED1 */
3386 3393 static void
3387 3394 tcp_notify(void *arg, ip_xmit_attr_t *ixa, ixa_notify_type_t ntype,
3388 3395 ixa_notify_arg_t narg)
3389 3396 {
3390 3397 tcp_t *tcp = (tcp_t *)arg;
3391 3398 conn_t *connp = tcp->tcp_connp;
3392 3399
3393 3400 switch (ntype) {
3394 3401 case IXAN_LSO:
3395 3402 tcp_update_lso(tcp, connp->conn_ixa);
3396 3403 break;
3397 3404 case IXAN_PMTU:
3398 3405 tcp_update_pmtu(tcp, B_FALSE);
3399 3406 break;
3400 3407 case IXAN_ZCOPY:
3401 3408 tcp_update_zcopy(tcp);
3402 3409 break;
3403 3410 default:
3404 3411 break;
3405 3412 }
3406 3413 }
3407 3414
3408 3415 /*
3409 3416 * The TCP write service routine should never be called...
3410 3417 */
3411 3418 /* ARGSUSED */
3412 3419 static void
3413 3420 tcp_wsrv(queue_t *q)
3414 3421 {
3415 3422 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps;
3416 3423
3417 3424 TCP_STAT(tcps, tcp_wsrv_called);
3418 3425 }
3419 3426
3420 3427 /*
3421 3428 * Hash list lookup routine for tcp_t structures.
3422 3429 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3423 3430 */
3424 3431 tcp_t *
3425 3432 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps)
3426 3433 {
3427 3434 tf_t *tf;
3428 3435 tcp_t *tcp;
3429 3436
3430 3437 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3431 3438 mutex_enter(&tf->tf_lock);
3432 3439 for (tcp = tf->tf_tcp; tcp != NULL;
3433 3440 tcp = tcp->tcp_acceptor_hash) {
3434 3441 if (tcp->tcp_acceptor_id == id) {
3435 3442 CONN_INC_REF(tcp->tcp_connp);
3436 3443 mutex_exit(&tf->tf_lock);
3437 3444 return (tcp);
3438 3445 }
3439 3446 }
3440 3447 mutex_exit(&tf->tf_lock);
3441 3448 return (NULL);
3442 3449 }
3443 3450
3444 3451 /*
3445 3452 * Hash list insertion routine for tcp_t structures.
3446 3453 */
3447 3454 void
3448 3455 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp)
3449 3456 {
3450 3457 tf_t *tf;
3451 3458 tcp_t **tcpp;
3452 3459 tcp_t *tcpnext;
3453 3460 tcp_stack_t *tcps = tcp->tcp_tcps;
3454 3461
3455 3462 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
3456 3463
3457 3464 if (tcp->tcp_ptpahn != NULL)
3458 3465 tcp_acceptor_hash_remove(tcp);
3459 3466 tcpp = &tf->tf_tcp;
3460 3467 mutex_enter(&tf->tf_lock);
3461 3468 tcpnext = tcpp[0];
3462 3469 if (tcpnext)
3463 3470 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash;
3464 3471 tcp->tcp_acceptor_hash = tcpnext;
3465 3472 tcp->tcp_ptpahn = tcpp;
3466 3473 tcpp[0] = tcp;
3467 3474 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */
3468 3475 mutex_exit(&tf->tf_lock);
3469 3476 }
3470 3477
3471 3478 /*
3472 3479 * Hash list removal routine for tcp_t structures.
3473 3480 */
3474 3481 void
3475 3482 tcp_acceptor_hash_remove(tcp_t *tcp)
3476 3483 {
3477 3484 tcp_t *tcpnext;
3478 3485 kmutex_t *lockp;
3479 3486
3480 3487 /*
3481 3488 * Extract the lock pointer in case there are concurrent
3482 3489 * hash_remove's for this instance.
3483 3490 */
3484 3491 lockp = tcp->tcp_acceptor_lockp;
3485 3492
3486 3493 if (tcp->tcp_ptpahn == NULL)
3487 3494 return;
3488 3495
3489 3496 ASSERT(lockp != NULL);
3490 3497 mutex_enter(lockp);
3491 3498 if (tcp->tcp_ptpahn) {
3492 3499 tcpnext = tcp->tcp_acceptor_hash;
3493 3500 if (tcpnext) {
3494 3501 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn;
3495 3502 tcp->tcp_acceptor_hash = NULL;
3496 3503 }
3497 3504 *tcp->tcp_ptpahn = tcpnext;
3498 3505 tcp->tcp_ptpahn = NULL;
3499 3506 }
3500 3507 mutex_exit(lockp);
3501 3508 tcp->tcp_acceptor_lockp = NULL;
3502 3509 }
3503 3510
3504 3511 /*
3505 3512 * Type three generator adapted from the random() function in 4.4 BSD:
3506 3513 */
3507 3514
3508 3515 /*
3509 3516 * Copyright (c) 1983, 1993
3510 3517 * The Regents of the University of California. All rights reserved.
3511 3518 *
3512 3519 * Redistribution and use in source and binary forms, with or without
3513 3520 * modification, are permitted provided that the following conditions
3514 3521 * are met:
3515 3522 * 1. Redistributions of source code must retain the above copyright
3516 3523 * notice, this list of conditions and the following disclaimer.
3517 3524 * 2. Redistributions in binary form must reproduce the above copyright
3518 3525 * notice, this list of conditions and the following disclaimer in the
3519 3526 * documentation and/or other materials provided with the distribution.
3520 3527 * 3. All advertising materials mentioning features or use of this software
3521 3528 * must display the following acknowledgement:
3522 3529 * This product includes software developed by the University of
3523 3530 * California, Berkeley and its contributors.
3524 3531 * 4. Neither the name of the University nor the names of its contributors
3525 3532 * may be used to endorse or promote products derived from this software
3526 3533 * without specific prior written permission.
3527 3534 *
3528 3535 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3529 3536 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3530 3537 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3531 3538 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3532 3539 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3533 3540 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3534 3541 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3535 3542 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536 3543 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3537 3544 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3538 3545 * SUCH DAMAGE.
3539 3546 */
3540 3547
3541 3548 /* Type 3 -- x**31 + x**3 + 1 */
3542 3549 #define DEG_3 31
3543 3550 #define SEP_3 3
3544 3551
3545 3552
3546 3553 /* Protected by tcp_random_lock */
3547 3554 static int tcp_randtbl[DEG_3 + 1];
3548 3555
3549 3556 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1];
3550 3557 static int *tcp_random_rptr = &tcp_randtbl[1];
3551 3558
3552 3559 static int *tcp_random_state = &tcp_randtbl[1];
3553 3560 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1];
3554 3561
3555 3562 kmutex_t tcp_random_lock;
3556 3563
3557 3564 void
3558 3565 tcp_random_init(void)
3559 3566 {
3560 3567 int i;
3561 3568 hrtime_t hrt;
3562 3569 time_t wallclock;
3563 3570 uint64_t result;
3564 3571
3565 3572 /*
3566 3573 * Use high-res timer and current time for seed. Gethrtime() returns
3567 3574 * a longlong, which may contain resolution down to nanoseconds.
3568 3575 * The current time will either be a 32-bit or a 64-bit quantity.
3569 3576 * XOR the two together in a 64-bit result variable.
3570 3577 * Convert the result to a 32-bit value by multiplying the high-order
3571 3578 * 32-bits by the low-order 32-bits.
3572 3579 */
3573 3580
3574 3581 hrt = gethrtime();
3575 3582 (void) drv_getparm(TIME, &wallclock);
3576 3583 result = (uint64_t)wallclock ^ (uint64_t)hrt;
3577 3584 mutex_enter(&tcp_random_lock);
3578 3585 tcp_random_state[0] = ((result >> 32) & 0xffffffff) *
3579 3586 (result & 0xffffffff);
3580 3587
3581 3588 for (i = 1; i < DEG_3; i++)
3582 3589 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1]
3583 3590 + 12345;
3584 3591 tcp_random_fptr = &tcp_random_state[SEP_3];
3585 3592 tcp_random_rptr = &tcp_random_state[0];
3586 3593 mutex_exit(&tcp_random_lock);
3587 3594 for (i = 0; i < 10 * DEG_3; i++)
3588 3595 (void) tcp_random();
3589 3596 }
3590 3597
3591 3598 /*
3592 3599 * tcp_random: Return a random number in the range [1 - (128K + 1)].
3593 3600 * This range is selected to be approximately centered on TCP_ISS / 2,
3594 3601 * and easy to compute. We get this value by generating a 32-bit random
3595 3602 * number, selecting out the high-order 17 bits, and then adding one so
3596 3603 * that we never return zero.
3597 3604 */
3598 3605 int
3599 3606 tcp_random(void)
3600 3607 {
3601 3608 int i;
3602 3609
3603 3610 mutex_enter(&tcp_random_lock);
3604 3611 *tcp_random_fptr += *tcp_random_rptr;
3605 3612
3606 3613 /*
3607 3614 * The high-order bits are more random than the low-order bits,
3608 3615 * so we select out the high-order 17 bits and add one so that
3609 3616 * we never return zero.
3610 3617 */
3611 3618 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1;
3612 3619 if (++tcp_random_fptr >= tcp_random_end_ptr) {
3613 3620 tcp_random_fptr = tcp_random_state;
3614 3621 ++tcp_random_rptr;
3615 3622 } else if (++tcp_random_rptr >= tcp_random_end_ptr)
3616 3623 tcp_random_rptr = tcp_random_state;
3617 3624
3618 3625 mutex_exit(&tcp_random_lock);
3619 3626 return (i);
3620 3627 }
3621 3628
3622 3629 /*
3623 3630 * Split this function out so that if the secret changes, I'm okay.
3624 3631 *
3625 3632 * Initialize the tcp_iss_cookie and tcp_iss_key.
3626 3633 */
3627 3634
3628 3635 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */
3629 3636
3630 3637 void
3631 3638 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps)
3632 3639 {
3633 3640 struct {
3634 3641 int32_t current_time;
3635 3642 uint32_t randnum;
3636 3643 uint16_t pad;
3637 3644 uint8_t ether[6];
3638 3645 uint8_t passwd[PASSWD_SIZE];
3639 3646 } tcp_iss_cookie;
3640 3647 time_t t;
3641 3648
3642 3649 /*
3643 3650 * Start with the current absolute time.
3644 3651 */
3645 3652 (void) drv_getparm(TIME, &t);
3646 3653 tcp_iss_cookie.current_time = t;
3647 3654
3648 3655 /*
3649 3656 * XXX - Need a more random number per RFC 1750, not this crap.
3650 3657 * OTOH, if what follows is pretty random, then I'm in better shape.
3651 3658 */
3652 3659 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random());
3653 3660 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */
3654 3661
3655 3662 /*
3656 3663 * The cpu_type_info is pretty non-random. Ugggh. It does serve
3657 3664 * as a good template.
3658 3665 */
3659 3666 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd,
3660 3667 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info)));
3661 3668
3662 3669 /*
3663 3670 * The pass-phrase. Normally this is supplied by user-called NDD.
3664 3671 */
3665 3672 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len));
3666 3673
3667 3674 /*
3668 3675 * See 4010593 if this section becomes a problem again,
3669 3676 * but the local ethernet address is useful here.
3670 3677 */
3671 3678 (void) localetheraddr(NULL,
3672 3679 (struct ether_addr *)&tcp_iss_cookie.ether);
3673 3680
3674 3681 /*
3675 3682 * Hash 'em all together. The MD5Final is called per-connection.
3676 3683 */
3677 3684 mutex_enter(&tcps->tcps_iss_key_lock);
3678 3685 MD5Init(&tcps->tcps_iss_key);
3679 3686 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie,
3680 3687 sizeof (tcp_iss_cookie));
3681 3688 mutex_exit(&tcps->tcps_iss_key_lock);
3682 3689 }
3683 3690
3684 3691 /*
3685 3692 * Called by IP when IP is loaded into the kernel
3686 3693 */
3687 3694 void
3688 3695 tcp_ddi_g_init(void)
3689 3696 {
3690 3697 tcp_timercache = kmem_cache_create("tcp_timercache",
3691 3698 sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
3692 3699 NULL, NULL, NULL, NULL, NULL, 0);
3693 3700
3694 3701 tcp_notsack_blk_cache = kmem_cache_create("tcp_notsack_blk_cache",
3695 3702 sizeof (notsack_blk_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
3696 3703
3697 3704 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL);
3698 3705
3699 3706 /* Initialize the random number generator */
3700 3707 tcp_random_init();
3701 3708
3702 3709 /* A single callback independently of how many netstacks we have */
3703 3710 ip_squeue_init(tcp_squeue_add);
3704 3711
3705 3712 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics);
3706 3713
3707 3714 tcp_squeue_flag = tcp_squeue_switch(tcp_squeue_wput);
3708 3715
3709 3716 /*
3710 3717 * We want to be informed each time a stack is created or
3711 3718 * destroyed in the kernel, so we can maintain the
3712 3719 * set of tcp_stack_t's.
3713 3720 */
3714 3721 netstack_register(NS_TCP, tcp_stack_init, NULL, tcp_stack_fini);
3715 3722 }
3716 3723
3717 3724
3718 3725 #define INET_NAME "ip"
3719 3726
3720 3727 /*
3721 3728 * Initialize the TCP stack instance.
3722 3729 */
3723 3730 static void *
3724 3731 tcp_stack_init(netstackid_t stackid, netstack_t *ns)
3725 3732 {
3726 3733 tcp_stack_t *tcps;
3727 3734 int i;
3728 3735 int error = 0;
3729 3736 major_t major;
3730 3737 size_t arrsz;
3731 3738
3732 3739 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP);
3733 3740 tcps->tcps_netstack = ns;
3734 3741
3735 3742 /* Initialize locks */
3736 3743 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL);
3737 3744 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL);
3738 3745
3739 3746 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS;
3740 3747 tcps->tcps_g_epriv_ports[0] = ULP_DEF_EPRIV_PORT1;
3741 3748 tcps->tcps_g_epriv_ports[1] = ULP_DEF_EPRIV_PORT2;
3742 3749 tcps->tcps_min_anonpriv_port = 512;
3743 3750
3744 3751 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) *
3745 3752 TCP_BIND_FANOUT_SIZE, KM_SLEEP);
3746 3753 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) *
3747 3754 TCP_ACCEPTOR_FANOUT_SIZE, KM_SLEEP);
3748 3755
3749 3756 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3750 3757 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL,
3751 3758 MUTEX_DEFAULT, NULL);
3752 3759 }
3753 3760
3754 3761 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3755 3762 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL,
3756 3763 MUTEX_DEFAULT, NULL);
3757 3764 }
3758 3765
3759 3766 /* TCP's IPsec code calls the packet dropper. */
3760 3767 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement");
3761 3768
3762 3769 arrsz = tcp_propinfo_count * sizeof (mod_prop_info_t);
3763 3770 tcps->tcps_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz,
3764 3771 KM_SLEEP);
3765 3772 bcopy(tcp_propinfo_tbl, tcps->tcps_propinfo_tbl, arrsz);
3766 3773
3767 3774 /*
3768 3775 * Note: To really walk the device tree you need the devinfo
3769 3776 * pointer to your device which is only available after probe/attach.
3770 3777 * The following is safe only because it uses ddi_root_node()
3771 3778 */
3772 3779 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr,
3773 3780 tcp_opt_obj.odb_opt_arr_cnt);
3774 3781
3775 3782 /*
3776 3783 * Initialize RFC 1948 secret values. This will probably be reset once
3777 3784 * by the boot scripts.
3778 3785 *
3779 3786 * Use NULL name, as the name is caught by the new lockstats.
3780 3787 *
3781 3788 * Initialize with some random, non-guessable string, like the global
3782 3789 * T_INFO_ACK.
3783 3790 */
3784 3791
3785 3792 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack,
3786 3793 sizeof (tcp_g_t_info_ack), tcps);
3787 3794
3788 3795 tcps->tcps_kstat = tcp_kstat2_init(stackid);
3789 3796 tcps->tcps_mibkp = tcp_kstat_init(stackid);
3790 3797
3791 3798 major = mod_name_to_major(INET_NAME);
3792 3799 error = ldi_ident_from_major(major, &tcps->tcps_ldi_ident);
3793 3800 ASSERT(error == 0);
3794 3801 tcps->tcps_ixa_cleanup_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
3795 3802 ASSERT(tcps->tcps_ixa_cleanup_mp != NULL);
3796 3803 cv_init(&tcps->tcps_ixa_cleanup_ready_cv, NULL, CV_DEFAULT, NULL);
3797 3804 cv_init(&tcps->tcps_ixa_cleanup_done_cv, NULL, CV_DEFAULT, NULL);
3798 3805 mutex_init(&tcps->tcps_ixa_cleanup_lock, NULL, MUTEX_DEFAULT, NULL);
3799 3806
3800 3807 mutex_init(&tcps->tcps_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
3801 3808 tcps->tcps_reclaim = B_FALSE;
3802 3809 tcps->tcps_reclaim_tid = 0;
3803 3810 tcps->tcps_reclaim_period = tcps->tcps_rexmit_interval_max;
3804 3811
3805 3812 /*
3806 3813 * ncpus is the current number of CPUs, which can be bigger than
3807 3814 * boot_ncpus. But we don't want to use ncpus to allocate all the
3808 3815 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3809 3816 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3810 3817 * there are many CPUs as we will be adding them 1 by 1.
3811 3818 *
3812 3819 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3813 3820 * are not freed until the stack is going away. So there is no need
3814 3821 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3815 3822 */
3816 3823 mutex_enter(&cpu_lock);
3817 3824 tcps->tcps_sc_cnt = MAX(ncpus, boot_ncpus);
3818 3825 mutex_exit(&cpu_lock);
3819 3826 tcps->tcps_sc = kmem_zalloc(max_ncpus * sizeof (tcp_stats_cpu_t *),
3820 3827 KM_SLEEP);
3821 3828 for (i = 0; i < tcps->tcps_sc_cnt; i++) {
3822 3829 tcps->tcps_sc[i] = kmem_zalloc(sizeof (tcp_stats_cpu_t),
3823 3830 KM_SLEEP);
3824 3831 }
3825 3832
3826 3833 mutex_init(&tcps->tcps_listener_conf_lock, NULL, MUTEX_DEFAULT, NULL);
3827 3834 list_create(&tcps->tcps_listener_conf, sizeof (tcp_listener_t),
3828 3835 offsetof(tcp_listener_t, tl_link));
3829 3836
3830 3837 return (tcps);
3831 3838 }
3832 3839
3833 3840 /*
3834 3841 * Called when the IP module is about to be unloaded.
3835 3842 */
3836 3843 void
3837 3844 tcp_ddi_g_destroy(void)
3838 3845 {
3839 3846 tcp_g_kstat_fini(tcp_g_kstat);
3840 3847 tcp_g_kstat = NULL;
3841 3848 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics));
3842 3849
3843 3850 mutex_destroy(&tcp_random_lock);
3844 3851
3845 3852 kmem_cache_destroy(tcp_timercache);
3846 3853 kmem_cache_destroy(tcp_notsack_blk_cache);
3847 3854
3848 3855 netstack_unregister(NS_TCP);
3849 3856 }
3850 3857
3851 3858 /*
3852 3859 * Free the TCP stack instance.
3853 3860 */
3854 3861 static void
3855 3862 tcp_stack_fini(netstackid_t stackid, void *arg)
3856 3863 {
3857 3864 tcp_stack_t *tcps = (tcp_stack_t *)arg;
3858 3865 int i;
3859 3866
3860 3867 freeb(tcps->tcps_ixa_cleanup_mp);
3861 3868 tcps->tcps_ixa_cleanup_mp = NULL;
3862 3869 cv_destroy(&tcps->tcps_ixa_cleanup_ready_cv);
3863 3870 cv_destroy(&tcps->tcps_ixa_cleanup_done_cv);
3864 3871 mutex_destroy(&tcps->tcps_ixa_cleanup_lock);
3865 3872
3866 3873 /*
3867 3874 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3868 3875 * the timer.
3869 3876 */
3870 3877 mutex_enter(&tcps->tcps_reclaim_lock);
3871 3878 tcps->tcps_reclaim = B_FALSE;
3872 3879 mutex_exit(&tcps->tcps_reclaim_lock);
3873 3880 if (tcps->tcps_reclaim_tid != 0)
3874 3881 (void) untimeout(tcps->tcps_reclaim_tid);
3875 3882 mutex_destroy(&tcps->tcps_reclaim_lock);
3876 3883
3877 3884 tcp_listener_conf_cleanup(tcps);
3878 3885
3879 3886 for (i = 0; i < tcps->tcps_sc_cnt; i++)
3880 3887 kmem_free(tcps->tcps_sc[i], sizeof (tcp_stats_cpu_t));
3881 3888 kmem_free(tcps->tcps_sc, max_ncpus * sizeof (tcp_stats_cpu_t *));
3882 3889
3883 3890 kmem_free(tcps->tcps_propinfo_tbl,
3884 3891 tcp_propinfo_count * sizeof (mod_prop_info_t));
3885 3892 tcps->tcps_propinfo_tbl = NULL;
3886 3893
3887 3894 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
3888 3895 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL);
3889 3896 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock);
3890 3897 }
3891 3898
3892 3899 for (i = 0; i < TCP_ACCEPTOR_FANOUT_SIZE; i++) {
3893 3900 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL);
3894 3901 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock);
3895 3902 }
3896 3903
3897 3904 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE);
3898 3905 tcps->tcps_bind_fanout = NULL;
3899 3906
3900 3907 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) *
3901 3908 TCP_ACCEPTOR_FANOUT_SIZE);
3902 3909 tcps->tcps_acceptor_fanout = NULL;
3903 3910
3904 3911 mutex_destroy(&tcps->tcps_iss_key_lock);
3905 3912 mutex_destroy(&tcps->tcps_epriv_port_lock);
3906 3913
3907 3914 ip_drop_unregister(&tcps->tcps_dropper);
3908 3915
3909 3916 tcp_kstat2_fini(stackid, tcps->tcps_kstat);
3910 3917 tcps->tcps_kstat = NULL;
3911 3918
3912 3919 tcp_kstat_fini(stackid, tcps->tcps_mibkp);
3913 3920 tcps->tcps_mibkp = NULL;
3914 3921
3915 3922 ldi_ident_release(tcps->tcps_ldi_ident);
3916 3923 kmem_free(tcps, sizeof (*tcps));
3917 3924 }
3918 3925
3919 3926 /*
3920 3927 * Generate ISS, taking into account NDD changes may happen halfway through.
3921 3928 * (If the iss is not zero, set it.)
3922 3929 */
3923 3930
3924 3931 static void
3925 3932 tcp_iss_init(tcp_t *tcp)
3926 3933 {
3927 3934 MD5_CTX context;
3928 3935 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg;
3929 3936 uint32_t answer[4];
3930 3937 tcp_stack_t *tcps = tcp->tcp_tcps;
3931 3938 conn_t *connp = tcp->tcp_connp;
3932 3939
3933 3940 tcps->tcps_iss_incr_extra += (tcps->tcps_iss_incr >> 1);
3934 3941 tcp->tcp_iss = tcps->tcps_iss_incr_extra;
3935 3942 switch (tcps->tcps_strong_iss) {
3936 3943 case 2:
3937 3944 mutex_enter(&tcps->tcps_iss_key_lock);
3938 3945 context = tcps->tcps_iss_key;
3939 3946 mutex_exit(&tcps->tcps_iss_key_lock);
3940 3947 arg.ports = connp->conn_ports;
3941 3948 arg.src = connp->conn_laddr_v6;
3942 3949 arg.dst = connp->conn_faddr_v6;
3943 3950 MD5Update(&context, (uchar_t *)&arg, sizeof (arg));
3944 3951 MD5Final((uchar_t *)answer, &context);
3945 3952 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3];
3946 3953 /*
3947 3954 * Now that we've hashed into a unique per-connection sequence
3948 3955 * space, add a random increment per strong_iss == 1. So I
3949 3956 * guess we'll have to...
3950 3957 */
3951 3958 /* FALLTHRU */
3952 3959 case 1:
3953 3960 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random();
3954 3961 break;
3955 3962 default:
3956 3963 tcp->tcp_iss += (uint32_t)gethrestime_sec() *
3957 3964 tcps->tcps_iss_incr;
3958 3965 break;
3959 3966 }
3960 3967 tcp->tcp_valid_bits = TCP_ISS_VALID;
3961 3968 tcp->tcp_fss = tcp->tcp_iss - 1;
3962 3969 tcp->tcp_suna = tcp->tcp_iss;
3963 3970 tcp->tcp_snxt = tcp->tcp_iss + 1;
3964 3971 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
3965 3972 tcp->tcp_csuna = tcp->tcp_snxt;
3966 3973 }
3967 3974
3968 3975 /*
3969 3976 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3970 3977 * on the specified backing STREAMS q. Note, the caller may make the
3971 3978 * decision to call based on the tcp_t.tcp_flow_stopped value which
3972 3979 * when check outside the q's lock is only an advisory check ...
3973 3980 */
3974 3981 void
3975 3982 tcp_setqfull(tcp_t *tcp)
3976 3983 {
3977 3984 tcp_stack_t *tcps = tcp->tcp_tcps;
3978 3985 conn_t *connp = tcp->tcp_connp;
3979 3986
3980 3987 if (tcp->tcp_closed)
3981 3988 return;
3982 3989
3983 3990 conn_setqfull(connp, &tcp->tcp_flow_stopped);
3984 3991 if (tcp->tcp_flow_stopped)
3985 3992 TCP_STAT(tcps, tcp_flwctl_on);
3986 3993 }
3987 3994
3988 3995 void
3989 3996 tcp_clrqfull(tcp_t *tcp)
3990 3997 {
3991 3998 conn_t *connp = tcp->tcp_connp;
3992 3999
3993 4000 if (tcp->tcp_closed)
3994 4001 return;
3995 4002 conn_clrqfull(connp, &tcp->tcp_flow_stopped);
3996 4003 }
3997 4004
3998 4005 static int
3999 4006 tcp_squeue_switch(int val)
4000 4007 {
4001 4008 int rval = SQ_FILL;
4002 4009
4003 4010 switch (val) {
4004 4011 case 1:
4005 4012 rval = SQ_NODRAIN;
4006 4013 break;
4007 4014 case 2:
4008 4015 rval = SQ_PROCESS;
4009 4016 break;
4010 4017 default:
4011 4018 break;
4012 4019 }
4013 4020 return (rval);
4014 4021 }
4015 4022
4016 4023 /*
4017 4024 * This is called once for each squeue - globally for all stack
4018 4025 * instances.
4019 4026 */
4020 4027 static void
4021 4028 tcp_squeue_add(squeue_t *sqp)
4022 4029 {
4023 4030 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc(
4024 4031 sizeof (tcp_squeue_priv_t), KM_SLEEP);
4025 4032
4026 4033 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait;
4027 4034 if (tcp_free_list_max_cnt == 0) {
4028 4035 int tcp_ncpus = ((boot_max_ncpus == -1) ?
4029 4036 max_ncpus : boot_max_ncpus);
4030 4037
4031 4038 /*
4032 4039 * Limit number of entries to 1% of availble memory / tcp_ncpus
4033 4040 */
4034 4041 tcp_free_list_max_cnt = (freemem * PAGESIZE) /
4035 4042 (tcp_ncpus * sizeof (tcp_t) * 100);
4036 4043 }
4037 4044 tcp_time_wait->tcp_free_list_cnt = 0;
4038 4045 }
4039 4046 /*
4040 4047 * Return unix error is tli error is TSYSERR, otherwise return a negative
4041 4048 * tli error.
4042 4049 */
4043 4050 int
4044 4051 tcp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
4045 4052 boolean_t bind_to_req_port_only)
4046 4053 {
4047 4054 int error;
4048 4055 tcp_t *tcp = connp->conn_tcp;
4049 4056
4050 4057 if (tcp->tcp_state >= TCPS_BOUND) {
4051 4058 if (connp->conn_debug) {
4052 4059 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4053 4060 "tcp_bind: bad state, %d", tcp->tcp_state);
4054 4061 }
4055 4062 return (-TOUTSTATE);
4056 4063 }
4057 4064
4058 4065 error = tcp_bind_check(connp, sa, len, cr, bind_to_req_port_only);
4059 4066 if (error != 0)
4060 4067 return (error);
4061 4068
4062 4069 ASSERT(tcp->tcp_state == TCPS_BOUND);
4063 4070 tcp->tcp_conn_req_max = 0;
4064 4071 return (0);
4065 4072 }
4066 4073
4067 4074 /*
4068 4075 * If the return value from this function is positive, it's a UNIX error.
4069 4076 * Otherwise, if it's negative, then the absolute value is a TLI error.
4070 4077 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
4071 4078 */
4072 4079 int
4073 4080 tcp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
4074 4081 cred_t *cr, pid_t pid)
4075 4082 {
4076 4083 tcp_t *tcp = connp->conn_tcp;
4077 4084 sin_t *sin = (sin_t *)sa;
4078 4085 sin6_t *sin6 = (sin6_t *)sa;
4079 4086 ipaddr_t *dstaddrp;
4080 4087 in_port_t dstport;
4081 4088 uint_t srcid;
4082 4089 int error;
4083 4090 uint32_t mss;
4084 4091 mblk_t *syn_mp;
4085 4092 tcp_stack_t *tcps = tcp->tcp_tcps;
4086 4093 int32_t oldstate;
4087 4094 ip_xmit_attr_t *ixa = connp->conn_ixa;
4088 4095
4089 4096 oldstate = tcp->tcp_state;
4090 4097
4091 4098 switch (len) {
4092 4099 default:
4093 4100 /*
4094 4101 * Should never happen
4095 4102 */
4096 4103 return (EINVAL);
4097 4104
4098 4105 case sizeof (sin_t):
4099 4106 sin = (sin_t *)sa;
4100 4107 if (sin->sin_port == 0) {
4101 4108 return (-TBADADDR);
4102 4109 }
4103 4110 if (connp->conn_ipv6_v6only) {
4104 4111 return (EAFNOSUPPORT);
4105 4112 }
4106 4113 break;
4107 4114
4108 4115 case sizeof (sin6_t):
4109 4116 sin6 = (sin6_t *)sa;
4110 4117 if (sin6->sin6_port == 0) {
4111 4118 return (-TBADADDR);
4112 4119 }
4113 4120 break;
4114 4121 }
4115 4122 /*
4116 4123 * If we're connecting to an IPv4-mapped IPv6 address, we need to
4117 4124 * make sure that the conn_ipversion is IPV4_VERSION. We
4118 4125 * need to this before we call tcp_bindi() so that the port lookup
4119 4126 * code will look for ports in the correct port space (IPv4 and
4120 4127 * IPv6 have separate port spaces).
4121 4128 */
4122 4129 if (connp->conn_family == AF_INET6 &&
4123 4130 connp->conn_ipversion == IPV6_VERSION &&
4124 4131 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4125 4132 if (connp->conn_ipv6_v6only)
4126 4133 return (EADDRNOTAVAIL);
4127 4134
4128 4135 connp->conn_ipversion = IPV4_VERSION;
4129 4136 }
4130 4137
4131 4138 switch (tcp->tcp_state) {
4132 4139 case TCPS_LISTEN:
4133 4140 /*
4134 4141 * Listening sockets are not allowed to issue connect().
4135 4142 */
4136 4143 if (IPCL_IS_NONSTR(connp))
4137 4144 return (EOPNOTSUPP);
4138 4145 /* FALLTHRU */
4139 4146 case TCPS_IDLE:
4140 4147 /*
4141 4148 * We support quick connect, refer to comments in
4142 4149 * tcp_connect_*()
4143 4150 */
4144 4151 /* FALLTHRU */
4145 4152 case TCPS_BOUND:
4146 4153 break;
4147 4154 default:
4148 4155 return (-TOUTSTATE);
4149 4156 }
4150 4157
4151 4158 /*
4152 4159 * We update our cred/cpid based on the caller of connect
4153 4160 */
4154 4161 if (connp->conn_cred != cr) {
4155 4162 crhold(cr);
4156 4163 crfree(connp->conn_cred);
4157 4164 connp->conn_cred = cr;
4158 4165 }
4159 4166 connp->conn_cpid = pid;
4160 4167
4161 4168 /* Cache things in the ixa without any refhold */
4162 4169 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED));
4163 4170 ixa->ixa_cred = cr;
4164 4171 ixa->ixa_cpid = pid;
4165 4172 if (is_system_labeled()) {
4166 4173 /* We need to restart with a label based on the cred */
4167 4174 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred);
4168 4175 }
4169 4176
4170 4177 if (connp->conn_family == AF_INET6) {
4171 4178 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
4172 4179 error = tcp_connect_ipv6(tcp, &sin6->sin6_addr,
4173 4180 sin6->sin6_port, sin6->sin6_flowinfo,
4174 4181 sin6->__sin6_src_id, sin6->sin6_scope_id);
4175 4182 } else {
4176 4183 /*
4177 4184 * Destination adress is mapped IPv6 address.
4178 4185 * Source bound address should be unspecified or
4179 4186 * IPv6 mapped address as well.
4180 4187 */
4181 4188 if (!IN6_IS_ADDR_UNSPECIFIED(
4182 4189 &connp->conn_bound_addr_v6) &&
4183 4190 !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) {
4184 4191 return (EADDRNOTAVAIL);
4185 4192 }
4186 4193 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr));
4187 4194 dstport = sin6->sin6_port;
4188 4195 srcid = sin6->__sin6_src_id;
4189 4196 error = tcp_connect_ipv4(tcp, dstaddrp, dstport,
4190 4197 srcid);
4191 4198 }
4192 4199 } else {
4193 4200 dstaddrp = &sin->sin_addr.s_addr;
4194 4201 dstport = sin->sin_port;
4195 4202 srcid = 0;
4196 4203 error = tcp_connect_ipv4(tcp, dstaddrp, dstport, srcid);
4197 4204 }
4198 4205
4199 4206 if (error != 0)
4200 4207 goto connect_failed;
4201 4208
4202 4209 CL_INET_CONNECT(connp, B_TRUE, error);
4203 4210 if (error != 0)
4204 4211 goto connect_failed;
4205 4212
4206 4213 /* connect succeeded */
4207 4214 TCPS_BUMP_MIB(tcps, tcpActiveOpens);
4208 4215 tcp->tcp_active_open = 1;
4209 4216
4210 4217 /*
4211 4218 * tcp_set_destination() does not adjust for TCP/IP header length.
4212 4219 */
4213 4220 mss = tcp->tcp_mss - connp->conn_ht_iphc_len;
4214 4221
4215 4222 /*
4216 4223 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4217 4224 * to the nearest MSS.
4218 4225 *
4219 4226 * We do the round up here because we need to get the interface MTU
4220 4227 * first before we can do the round up.
4221 4228 */
4222 4229 tcp->tcp_rwnd = connp->conn_rcvbuf;
4223 4230 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss),
4224 4231 tcps->tcps_recv_hiwat_minmss * mss);
4225 4232 connp->conn_rcvbuf = tcp->tcp_rwnd;
4226 4233 tcp_set_ws_value(tcp);
4227 4234 tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
4228 4235 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always)
4229 4236 tcp->tcp_snd_ws_ok = B_TRUE;
4230 4237
4231 4238 /*
4232 4239 * Set tcp_snd_ts_ok to true
4233 4240 * so that tcp_xmit_mp will
4234 4241 * include the timestamp
4235 4242 * option in the SYN segment.
4236 4243 */
4237 4244 if (tcps->tcps_tstamp_always ||
4238 4245 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) {
4239 4246 tcp->tcp_snd_ts_ok = B_TRUE;
4240 4247 }
4241 4248
4242 4249 /*
4243 4250 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4244 4251 * the SACK metric is set. So here we just check the per stack SACK
4245 4252 * permitted param.
4246 4253 */
4247 4254 if (tcps->tcps_sack_permitted == 2) {
4248 4255 ASSERT(tcp->tcp_num_sack_blk == 0);
4249 4256 ASSERT(tcp->tcp_notsack_list == NULL);
4250 4257 tcp->tcp_snd_sack_ok = B_TRUE;
4251 4258 }
4252 4259
4253 4260 /*
4254 4261 * Should we use ECN? Note that the current
4255 4262 * default value (SunOS 5.9) of tcp_ecn_permitted
4256 4263 * is 1. The reason for doing this is that there
4257 4264 * are equipments out there that will drop ECN
4258 4265 * enabled IP packets. Setting it to 1 avoids
4259 4266 * compatibility problems.
4260 4267 */
4261 4268 if (tcps->tcps_ecn_permitted == 2)
4262 4269 tcp->tcp_ecn_ok = B_TRUE;
4263 4270
4264 4271 /* Trace change from BOUND -> SYN_SENT here */
4265 4272 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4266 4273 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4267 4274 int32_t, TCPS_BOUND);
4268 4275
4269 4276 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
4270 4277 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
4271 4278 tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
4272 4279 if (syn_mp != NULL) {
4273 4280 /*
4274 4281 * We must bump the generation before sending the syn
4275 4282 * to ensure that we use the right generation in case
4276 4283 * this thread issues a "connected" up call.
4277 4284 */
4278 4285 SOCK_CONNID_BUMP(tcp->tcp_connid);
4279 4286 /*
4280 4287 * DTrace sending the first SYN as a
4281 4288 * tcp:::connect-request event.
4282 4289 */
4283 4290 DTRACE_TCP5(connect__request, mblk_t *, NULL,
4284 4291 ip_xmit_attr_t *, connp->conn_ixa,
4285 4292 void_ip_t *, syn_mp->b_rptr, tcp_t *, tcp,
4286 4293 tcph_t *,
4287 4294 &syn_mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
4288 4295 tcp_send_data(tcp, syn_mp);
4289 4296 }
4290 4297
4291 4298 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4292 4299 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4293 4300 return (0);
4294 4301
4295 4302 connect_failed:
4296 4303 connp->conn_faddr_v6 = ipv6_all_zeros;
4297 4304 connp->conn_fport = 0;
4298 4305 tcp->tcp_state = oldstate;
4299 4306 if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
4300 4307 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
4301 4308 return (error);
4302 4309 }
4303 4310
4304 4311 int
4305 4312 tcp_do_listen(conn_t *connp, struct sockaddr *sa, socklen_t len,
4306 4313 int backlog, cred_t *cr, boolean_t bind_to_req_port_only)
4307 4314 {
4308 4315 tcp_t *tcp = connp->conn_tcp;
4309 4316 int error = 0;
4310 4317 tcp_stack_t *tcps = tcp->tcp_tcps;
4311 4318 int32_t oldstate;
4312 4319
4313 4320 /* All Solaris components should pass a cred for this operation. */
4314 4321 ASSERT(cr != NULL);
4315 4322
4316 4323 if (tcp->tcp_state >= TCPS_BOUND) {
4317 4324 if ((tcp->tcp_state == TCPS_BOUND ||
4318 4325 tcp->tcp_state == TCPS_LISTEN) && backlog > 0) {
4319 4326 /*
4320 4327 * Handle listen() increasing backlog.
4321 4328 * This is more "liberal" then what the TPI spec
4322 4329 * requires but is needed to avoid a t_unbind
4323 4330 * when handling listen() since the port number
4324 4331 * might be "stolen" between the unbind and bind.
4325 4332 */
4326 4333 goto do_listen;
4327 4334 }
4328 4335 if (connp->conn_debug) {
4329 4336 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
4330 4337 "tcp_listen: bad state, %d", tcp->tcp_state);
4331 4338 }
4332 4339 return (-TOUTSTATE);
4333 4340 } else {
4334 4341 if (sa == NULL) {
4335 4342 sin6_t addr;
4336 4343 sin_t *sin;
4337 4344 sin6_t *sin6;
4338 4345
4339 4346 ASSERT(IPCL_IS_NONSTR(connp));
4340 4347 /* Do an implicit bind: Request for a generic port. */
4341 4348 if (connp->conn_family == AF_INET) {
4342 4349 len = sizeof (sin_t);
4343 4350 sin = (sin_t *)&addr;
4344 4351 *sin = sin_null;
4345 4352 sin->sin_family = AF_INET;
4346 4353 } else {
4347 4354 ASSERT(connp->conn_family == AF_INET6);
4348 4355 len = sizeof (sin6_t);
4349 4356 sin6 = (sin6_t *)&addr;
4350 4357 *sin6 = sin6_null;
4351 4358 sin6->sin6_family = AF_INET6;
4352 4359 }
4353 4360 sa = (struct sockaddr *)&addr;
4354 4361 }
4355 4362
4356 4363 error = tcp_bind_check(connp, sa, len, cr,
4357 4364 bind_to_req_port_only);
4358 4365 if (error)
4359 4366 return (error);
4360 4367 /* Fall through and do the fanout insertion */
4361 4368 }
4362 4369
4363 4370 do_listen:
4364 4371 ASSERT(tcp->tcp_state == TCPS_BOUND || tcp->tcp_state == TCPS_LISTEN);
4365 4372 tcp->tcp_conn_req_max = backlog;
4366 4373 if (tcp->tcp_conn_req_max) {
4367 4374 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min)
4368 4375 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min;
4369 4376 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q)
4370 4377 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q;
4371 4378 /*
4372 4379 * If this is a listener, do not reset the eager list
4373 4380 * and other stuffs. Note that we don't check if the
4374 4381 * existing eager list meets the new tcp_conn_req_max
4375 4382 * requirement.
4376 4383 */
4377 4384 if (tcp->tcp_state != TCPS_LISTEN) {
4378 4385 tcp->tcp_state = TCPS_LISTEN;
4379 4386 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4380 4387 connp->conn_ixa, void, NULL, tcp_t *, tcp,
4381 4388 void, NULL, int32_t, TCPS_BOUND);
4382 4389 /* Initialize the chain. Don't need the eager_lock */
4383 4390 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
4384 4391 tcp->tcp_eager_next_drop_q0 = tcp;
4385 4392 tcp->tcp_eager_prev_drop_q0 = tcp;
4386 4393 tcp->tcp_second_ctimer_threshold =
4387 4394 tcps->tcps_ip_abort_linterval;
4388 4395 }
4389 4396 }
4390 4397
4391 4398 /*
4392 4399 * We need to make sure that the conn_recv is set to a non-null
4393 4400 * value before we insert the conn into the classifier table.
4394 4401 * This is to avoid a race with an incoming packet which does an
4395 4402 * ipcl_classify().
4396 4403 * We initially set it to tcp_input_listener_unbound to try to
4397 4404 * pick a good squeue for the listener when the first SYN arrives.
4398 4405 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4399 4406 * first SYN.
4400 4407 */
4401 4408 connp->conn_recv = tcp_input_listener_unbound;
4402 4409
4403 4410 /* Insert the listener in the classifier table */
4404 4411 error = ip_laddr_fanout_insert(connp);
4405 4412 if (error != 0) {
4406 4413 /* Undo the bind - release the port number */
4407 4414 oldstate = tcp->tcp_state;
4408 4415 tcp->tcp_state = TCPS_IDLE;
4409 4416 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
4410 4417 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
4411 4418 int32_t, oldstate);
4412 4419 connp->conn_bound_addr_v6 = ipv6_all_zeros;
4413 4420
4414 4421 connp->conn_laddr_v6 = ipv6_all_zeros;
4415 4422 connp->conn_saddr_v6 = ipv6_all_zeros;
4416 4423 connp->conn_ports = 0;
4417 4424
4418 4425 if (connp->conn_anon_port) {
4419 4426 zone_t *zone;
4420 4427
4421 4428 zone = crgetzone(cr);
4422 4429 connp->conn_anon_port = B_FALSE;
4423 4430 (void) tsol_mlp_anon(zone, connp->conn_mlp_type,
4424 4431 connp->conn_proto, connp->conn_lport, B_FALSE);
4425 4432 }
4426 4433 connp->conn_mlp_type = mlptSingle;
4427 4434
4428 4435 tcp_bind_hash_remove(tcp);
4429 4436 return (error);
4430 4437 } else {
4431 4438 /*
4432 4439 * If there is a connection limit, allocate and initialize
4433 4440 * the counter struct. Note that since listen can be called
4434 4441 * multiple times, the struct may have been allready allocated.
4435 4442 */
4436 4443 if (!list_is_empty(&tcps->tcps_listener_conf) &&
4437 4444 tcp->tcp_listen_cnt == NULL) {
4438 4445 tcp_listen_cnt_t *tlc;
4439 4446 uint32_t ratio;
4440 4447
4441 4448 ratio = tcp_find_listener_conf(tcps,
4442 4449 ntohs(connp->conn_lport));
4443 4450 if (ratio != 0) {
4444 4451 uint32_t mem_ratio, tot_buf;
4445 4452
4446 4453 tlc = kmem_alloc(sizeof (tcp_listen_cnt_t),
4447 4454 KM_SLEEP);
4448 4455 /*
4449 4456 * Calculate the connection limit based on
4450 4457 * the configured ratio and maxusers. Maxusers
4451 4458 * are calculated based on memory size,
4452 4459 * ~ 1 user per MB. Note that the conn_rcvbuf
4453 4460 * and conn_sndbuf may change after a
4454 4461 * connection is accepted. So what we have
4455 4462 * is only an approximation.
4456 4463 */
4457 4464 if ((tot_buf = connp->conn_rcvbuf +
4458 4465 connp->conn_sndbuf) < MB) {
4459 4466 mem_ratio = MB / tot_buf;
4460 4467 tlc->tlc_max = maxusers / ratio *
4461 4468 mem_ratio;
4462 4469 } else {
4463 4470 mem_ratio = tot_buf / MB;
4464 4471 tlc->tlc_max = maxusers / ratio /
4465 4472 mem_ratio;
4466 4473 }
4467 4474 /* At least we should allow two connections! */
4468 4475 if (tlc->tlc_max <= tcp_min_conn_listener)
4469 4476 tlc->tlc_max = tcp_min_conn_listener;
4470 4477 tlc->tlc_cnt = 1;
4471 4478 tlc->tlc_drop = 0;
4472 4479 tcp->tcp_listen_cnt = tlc;
4473 4480 }
4474 4481 }
4475 4482 }
4476 4483 return (error);
4477 4484 }
|
↓ open down ↓ |
2798 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX