1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/types.h>
28 #include <sys/strlog.h>
29 #include <sys/policy.h>
30 #include <sys/strsun.h>
31 #include <sys/squeue_impl.h>
32 #include <sys/squeue.h>
33 #include <sys/vmsystm.h>
34
35 #include <inet/common.h>
36 #include <inet/ip.h>
37 #include <inet/tcp.h>
38 #include <inet/tcp_impl.h>
39
40 /* Control whether TCP can enter defensive mode when under memory pressure. */
41 static boolean_t tcp_do_reclaim = B_TRUE;
42
43 /*
44 * Routines related to the TCP_IOC_ABORT_CONN ioctl command.
45 *
46 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting
47 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure
48 * (defined in tcp.h) needs to be filled in and passed into the kernel
49 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t
50 * structure contains the four-tuple of a TCP connection and a range of TCP
51 * states (specified by ac_start and ac_end). The use of wildcard addresses
52 * and ports is allowed. Connections with a matching four tuple and a state
53 * within the specified range will be aborted. The valid states for the
54 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT,
55 * inclusive.
56 *
57 * An application which has its connection aborted by this ioctl will receive
58 * an error that is dependent on the connection state at the time of the abort.
59 * If the connection state is < TCPS_TIME_WAIT, an application should behave as
60 * though a RST packet has been received. If the connection state is equal to
61 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel
62 * and all resources associated with the connection will be freed.
63 */
64 static mblk_t *tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *);
65 static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *);
66 static void tcp_ioctl_abort_handler(void *arg, mblk_t *mp, void *arg2,
67 ip_recv_attr_t *dummy);
68 static int tcp_ioctl_abort(tcp_ioc_abort_conn_t *, tcp_stack_t *tcps);
69 void tcp_ioctl_abort_conn(queue_t *, mblk_t *);
70 static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *,
71 boolean_t, tcp_stack_t *);
72
73 /*
74 * Macros used for accessing the different types of sockaddr
75 * structures inside a tcp_ioc_abort_conn_t.
76 */
77 #define TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local)
78 #define TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote)
79 #define TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr)
80 #define TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr)
81 #define TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port)
82 #define TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port)
83 #define TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local)
84 #define TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote)
85 #define TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr)
86 #define TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr)
87 #define TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port)
88 #define TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port)
89
90 /*
91 * Return the correct error code to mimic the behavior
92 * of a connection reset.
93 */
94 #define TCP_AC_GET_ERRCODE(state, err) { \
95 switch ((state)) { \
96 case TCPS_SYN_SENT: \
97 case TCPS_SYN_RCVD: \
98 (err) = ECONNREFUSED; \
99 break; \
100 case TCPS_ESTABLISHED: \
101 case TCPS_FIN_WAIT_1: \
102 case TCPS_FIN_WAIT_2: \
103 case TCPS_CLOSE_WAIT: \
104 (err) = ECONNRESET; \
105 break; \
106 case TCPS_CLOSING: \
107 case TCPS_LAST_ACK: \
108 case TCPS_TIME_WAIT: \
109 (err) = 0; \
110 break; \
111 default: \
112 (err) = ENXIO; \
113 } \
114 }
115
116 /*
117 * Check if a tcp structure matches the info in acp.
118 */
119 #define TCP_AC_ADDR_MATCH(acp, connp, tcp) \
120 (((acp)->ac_local.ss_family == AF_INET) ? \
121 ((TCP_AC_V4LOCAL((acp)) == INADDR_ANY || \
122 TCP_AC_V4LOCAL((acp)) == (connp)->conn_laddr_v4) && \
123 (TCP_AC_V4REMOTE((acp)) == INADDR_ANY || \
124 TCP_AC_V4REMOTE((acp)) == (connp)->conn_faddr_v4) && \
125 (TCP_AC_V4LPORT((acp)) == 0 || \
126 TCP_AC_V4LPORT((acp)) == (connp)->conn_lport) && \
127 (TCP_AC_V4RPORT((acp)) == 0 || \
128 TCP_AC_V4RPORT((acp)) == (connp)->conn_fport) && \
129 (acp)->ac_start <= (tcp)->tcp_state && \
130 (acp)->ac_end >= (tcp)->tcp_state) : \
131 ((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) || \
132 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)), \
133 &(connp)->conn_laddr_v6)) && \
134 (IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) || \
135 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)), \
136 &(connp)->conn_faddr_v6)) && \
137 (TCP_AC_V6LPORT((acp)) == 0 || \
138 TCP_AC_V6LPORT((acp)) == (connp)->conn_lport) && \
139 (TCP_AC_V6RPORT((acp)) == 0 || \
140 TCP_AC_V6RPORT((acp)) == (connp)->conn_fport) && \
141 (acp)->ac_start <= (tcp)->tcp_state && \
142 (acp)->ac_end >= (tcp)->tcp_state))
143
144 #define TCP_AC_MATCH(acp, connp, tcp) \
145 (((acp)->ac_zoneid == ALL_ZONES || \
146 (acp)->ac_zoneid == (connp)->conn_zoneid) ? \
147 TCP_AC_ADDR_MATCH(acp, connp, tcp) : 0)
148
149 /*
150 * Build a message containing a tcp_ioc_abort_conn_t structure
151 * which is filled in with information from acp and tp.
152 */
153 static mblk_t *
154 tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp)
155 {
156 mblk_t *mp;
157 tcp_ioc_abort_conn_t *tacp;
158
159 mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO);
160 if (mp == NULL)
161 return (NULL);
162
163 *((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN;
164 tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr +
165 sizeof (uint32_t));
166
167 tacp->ac_start = acp->ac_start;
168 tacp->ac_end = acp->ac_end;
169 tacp->ac_zoneid = acp->ac_zoneid;
170
171 if (acp->ac_local.ss_family == AF_INET) {
172 tacp->ac_local.ss_family = AF_INET;
173 tacp->ac_remote.ss_family = AF_INET;
174 TCP_AC_V4LOCAL(tacp) = tp->tcp_connp->conn_laddr_v4;
175 TCP_AC_V4REMOTE(tacp) = tp->tcp_connp->conn_faddr_v4;
176 TCP_AC_V4LPORT(tacp) = tp->tcp_connp->conn_lport;
177 TCP_AC_V4RPORT(tacp) = tp->tcp_connp->conn_fport;
178 } else {
179 tacp->ac_local.ss_family = AF_INET6;
180 tacp->ac_remote.ss_family = AF_INET6;
181 TCP_AC_V6LOCAL(tacp) = tp->tcp_connp->conn_laddr_v6;
182 TCP_AC_V6REMOTE(tacp) = tp->tcp_connp->conn_faddr_v6;
183 TCP_AC_V6LPORT(tacp) = tp->tcp_connp->conn_lport;
184 TCP_AC_V6RPORT(tacp) = tp->tcp_connp->conn_fport;
185 }
186 mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp);
187 return (mp);
188 }
189
190 /*
191 * Print a tcp_ioc_abort_conn_t structure.
192 */
193 static void
194 tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp)
195 {
196 char lbuf[128];
197 char rbuf[128];
198 sa_family_t af;
199 in_port_t lport, rport;
200 ushort_t logflags;
201
202 af = acp->ac_local.ss_family;
203
204 if (af == AF_INET) {
205 (void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp),
206 lbuf, 128);
207 (void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp),
208 rbuf, 128);
209 lport = ntohs(TCP_AC_V4LPORT(acp));
210 rport = ntohs(TCP_AC_V4RPORT(acp));
211 } else {
212 (void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp),
213 lbuf, 128);
214 (void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp),
215 rbuf, 128);
216 lport = ntohs(TCP_AC_V6LPORT(acp));
217 rport = ntohs(TCP_AC_V6RPORT(acp));
218 }
219
220 logflags = SL_TRACE | SL_NOTE;
221 /*
222 * Don't print this message to the console if the operation was done
223 * to a non-global zone.
224 */
225 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES)
226 logflags |= SL_CONSOLE;
227 (void) strlog(TCP_MOD_ID, 0, 1, logflags,
228 "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, "
229 "start = %d, end = %d\n", lbuf, lport, rbuf, rport,
230 acp->ac_start, acp->ac_end);
231 }
232
233 /*
234 * Called using SQ_FILL when a message built using
235 * tcp_ioctl_abort_build_msg is put into a queue.
236 * Note that when we get here there is no wildcard in acp any more.
237 */
238 /* ARGSUSED2 */
239 static void
240 tcp_ioctl_abort_handler(void *arg, mblk_t *mp, void *arg2,
241 ip_recv_attr_t *dummy)
242 {
243 conn_t *connp = (conn_t *)arg;
244 tcp_t *tcp = connp->conn_tcp;
245 tcp_ioc_abort_conn_t *acp;
246
247 /*
248 * Don't accept any input on a closed tcp as this TCP logically does
249 * not exist on the system. Don't proceed further with this TCP.
250 * For eg. this packet could trigger another close of this tcp
251 * which would be disastrous for tcp_refcnt. tcp_close_detached /
252 * tcp_clean_death / tcp_closei_local must be called at most once
253 * on a TCP.
254 */
255 if (tcp->tcp_state == TCPS_CLOSED ||
256 tcp->tcp_state == TCPS_BOUND) {
257 freemsg(mp);
258 return;
259 }
260
261 acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t));
262 if (tcp->tcp_state <= acp->ac_end) {
263 /*
264 * If we get here, we are already on the correct
265 * squeue. This ioctl follows the following path
266 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn
267 * ->tcp_ioctl_abort->squeue_enter (if on a
268 * different squeue)
269 */
270 int errcode;
271
272 TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode);
273 (void) tcp_clean_death(tcp, errcode);
274 }
275 freemsg(mp);
276 }
277
278 /*
279 * Abort all matching connections on a hash chain.
280 */
281 static int
282 tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count,
283 boolean_t exact, tcp_stack_t *tcps)
284 {
285 int nmatch, err = 0;
286 tcp_t *tcp;
287 MBLKP mp, last, listhead = NULL;
288 conn_t *tconnp;
289 connf_t *connfp;
290 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
291
292 connfp = &ipst->ips_ipcl_conn_fanout[index];
293
294 startover:
295 nmatch = 0;
296
297 mutex_enter(&connfp->connf_lock);
298 for (tconnp = connfp->connf_head; tconnp != NULL;
299 tconnp = tconnp->conn_next) {
300 tcp = tconnp->conn_tcp;
301 /*
302 * We are missing a check on sin6_scope_id for linklocals here,
303 * but current usage is just for aborting based on zoneid
304 * for shared-IP zones.
305 */
306 if (TCP_AC_MATCH(acp, tconnp, tcp)) {
307 CONN_INC_REF(tconnp);
308 mp = tcp_ioctl_abort_build_msg(acp, tcp);
309 if (mp == NULL) {
310 err = ENOMEM;
311 CONN_DEC_REF(tconnp);
312 break;
313 }
314 mp->b_prev = (mblk_t *)tcp;
315
316 if (listhead == NULL) {
317 listhead = mp;
318 last = mp;
319 } else {
320 last->b_next = mp;
321 last = mp;
322 }
323 nmatch++;
324 if (exact)
325 break;
326 }
327
328 /* Avoid holding lock for too long. */
329 if (nmatch >= 500)
330 break;
331 }
332 mutex_exit(&connfp->connf_lock);
333
334 /* Pass mp into the correct tcp */
335 while ((mp = listhead) != NULL) {
336 listhead = listhead->b_next;
337 tcp = (tcp_t *)mp->b_prev;
338 mp->b_next = mp->b_prev = NULL;
339 SQUEUE_ENTER_ONE(tcp->tcp_connp->conn_sqp, mp,
340 tcp_ioctl_abort_handler, tcp->tcp_connp, NULL,
341 SQ_FILL, SQTAG_TCP_ABORT_BUCKET);
342 }
343
344 *count += nmatch;
345 if (nmatch >= 500 && err == 0)
346 goto startover;
347 return (err);
348 }
349
350 /*
351 * Abort all connections that matches the attributes specified in acp.
352 */
353 static int
354 tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp, tcp_stack_t *tcps)
355 {
356 sa_family_t af;
357 uint32_t ports;
358 uint16_t *pports;
359 int err = 0, count = 0;
360 boolean_t exact = B_FALSE; /* set when there is no wildcard */
361 int index = -1;
362 ushort_t logflags;
363 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
364
365 af = acp->ac_local.ss_family;
366
367 if (af == AF_INET) {
368 if (TCP_AC_V4REMOTE(acp) != INADDR_ANY &&
369 TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) {
370 pports = (uint16_t *)&ports;
371 pports[1] = TCP_AC_V4LPORT(acp);
372 pports[0] = TCP_AC_V4RPORT(acp);
373 exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY);
374 }
375 } else {
376 if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) &&
377 TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) {
378 pports = (uint16_t *)&ports;
379 pports[1] = TCP_AC_V6LPORT(acp);
380 pports[0] = TCP_AC_V6RPORT(acp);
381 exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp));
382 }
383 }
384
385 /*
386 * For cases where remote addr, local port, and remote port are non-
387 * wildcards, tcp_ioctl_abort_bucket will only be called once.
388 */
389 if (index != -1) {
390 err = tcp_ioctl_abort_bucket(acp, index,
391 &count, exact, tcps);
392 } else {
393 /*
394 * loop through all entries for wildcard case
395 */
396 for (index = 0;
397 index < ipst->ips_ipcl_conn_fanout_size;
398 index++) {
399 err = tcp_ioctl_abort_bucket(acp, index,
400 &count, exact, tcps);
401 if (err != 0)
402 break;
403 }
404 }
405
406 logflags = SL_TRACE | SL_NOTE;
407 /*
408 * Don't print this message to the console if the operation was done
409 * to a non-global zone.
410 */
411 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES)
412 logflags |= SL_CONSOLE;
413 (void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: "
414 "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' '));
415 if (err == 0 && count == 0)
416 err = ENOENT;
417 return (err);
418 }
419
420 /*
421 * Process the TCP_IOC_ABORT_CONN ioctl request.
422 */
423 void
424 tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp)
425 {
426 int err;
427 IOCP iocp;
428 MBLKP mp1;
429 sa_family_t laf, raf;
430 tcp_ioc_abort_conn_t *acp;
431 zone_t *zptr;
432 conn_t *connp = Q_TO_CONN(q);
433 zoneid_t zoneid = connp->conn_zoneid;
434 tcp_t *tcp = connp->conn_tcp;
435 tcp_stack_t *tcps = tcp->tcp_tcps;
436
437 iocp = (IOCP)mp->b_rptr;
438
439 if ((mp1 = mp->b_cont) == NULL ||
440 iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) {
441 err = EINVAL;
442 goto out;
443 }
444
445 /* check permissions */
446 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) {
447 err = EPERM;
448 goto out;
449 }
450
451 if (mp1->b_cont != NULL) {
452 freemsg(mp1->b_cont);
453 mp1->b_cont = NULL;
454 }
455
456 acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr;
457 laf = acp->ac_local.ss_family;
458 raf = acp->ac_remote.ss_family;
459
460 /* check that a zone with the supplied zoneid exists */
461 if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) {
462 zptr = zone_find_by_id(zoneid);
463 if (zptr != NULL) {
464 zone_rele(zptr);
465 } else {
466 err = EINVAL;
467 goto out;
468 }
469 }
470
471 /*
472 * For exclusive stacks we set the zoneid to zero
473 * to make TCP operate as if in the global zone.
474 */
475 if (tcps->tcps_netstack->netstack_stackid != GLOBAL_NETSTACKID)
476 acp->ac_zoneid = GLOBAL_ZONEID;
477
478 if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT ||
479 acp->ac_start > acp->ac_end || laf != raf ||
480 (laf != AF_INET && laf != AF_INET6)) {
481 err = EINVAL;
482 goto out;
483 }
484
485 tcp_ioctl_abort_dump(acp);
486 err = tcp_ioctl_abort(acp, tcps);
487
488 out:
489 if (mp1 != NULL) {
490 freemsg(mp1);
491 mp->b_cont = NULL;
492 }
493
494 if (err != 0)
495 miocnak(q, mp, 0, err);
496 else
497 miocack(q, mp, 0, 0);
498 }
499
500 /*
501 * Timeout function to reset the TCP stack variable tcps_reclaim to false.
502 */
503 void
504 tcp_reclaim_timer(void *arg)
505 {
506 tcp_stack_t *tcps = (tcp_stack_t *)arg;
507 int64_t tot_conn = 0;
508 int i;
509
510 for (i = 0; i < tcps->tcps_sc_cnt; i++)
511 tot_conn += tcps->tcps_sc[i]->tcp_sc_conn_cnt;
512
513 /*
514 * This happens only when a stack is going away. tcps_reclaim_tid
515 * should not be reset to 0 when returning in this case.
516 */
517 mutex_enter(&tcps->tcps_reclaim_lock);
518 if (!tcps->tcps_reclaim) {
519 mutex_exit(&tcps->tcps_reclaim_lock);
520 return;
521 }
522
523 if ((freemem >= lotsfree + needfree) || tot_conn < maxusers) {
524 tcps->tcps_reclaim = B_FALSE;
525 tcps->tcps_reclaim_tid = 0;
526 } else {
527 /* Stay in defensive mode and restart the timer */
528 tcps->tcps_reclaim_tid = timeout(tcp_reclaim_timer,
529 tcps, MSEC_TO_TICK(tcps->tcps_reclaim_period));
530 }
531 mutex_exit(&tcps->tcps_reclaim_lock);
532 }
533
534 /*
535 * Kmem reclaim call back function. When the system is under memory
536 * pressure, we set the TCP stack variable tcps_reclaim to true. This
537 * variable is reset to false after tcps_reclaim_period msecs. During this
538 * period, TCP will be more aggressive in aborting connections not making
539 * progress, meaning retransmitting for some time (tcp_early_abort seconds).
540 * TCP will also not accept new connection request for those listeners whose
541 * q or q0 is not empty.
542 */
543 /* ARGSUSED */
544 void
545 tcp_conn_reclaim(void *arg)
546 {
547 netstack_handle_t nh;
548 netstack_t *ns;
549 tcp_stack_t *tcps;
550
551 if (!tcp_do_reclaim)
552 return;
553
554 /*
555 * The reclaim function may be called even when the system is not
556 * really under memory pressure.
557 */
558 if (freemem >= lotsfree + needfree)
559 return;
560
561 netstack_next_init(&nh);
562 while ((ns = netstack_next(&nh)) != NULL) {
563 int i;
564 int64_t tot_conn = 0;
565
566 /*
567 * During boot time, the first netstack_t is created and
568 * initialized before TCP has registered with the netstack
569 * framework. If this reclaim function is called before TCP
570 * has finished its initialization, netstack_next() will
571 * return the first netstack_t (since its netstack_flags is
572 * not NSF_UNINIT). And its netstack_tcp will be NULL. We
573 * need to catch it.
574 *
575 * All subsequent netstack_t creation will not have this
576 * problem since the initialization is not finished until TCP
577 * has finished its own tcp_stack_t initialization. Hence
578 * netstack_next() will not return one with NULL netstack_tcp.
579 */
580 if ((tcps = ns->netstack_tcp) == NULL) {
581 netstack_rele(ns);
582 continue;
583 }
584
585 /*
586 * Even if the system is under memory pressure, the reason may
587 * not be because of TCP activity. Check the number of
588 * connections in each stack. If the number exceeds the
589 * threshold (maxusers), turn on defensive mode.
590 */
591 for (i = 0; i < tcps->tcps_sc_cnt; i++)
592 tot_conn += tcps->tcps_sc[i]->tcp_sc_conn_cnt;
593 if (tot_conn < maxusers) {
594 netstack_rele(ns);
595 continue;
596 }
597
598 mutex_enter(&tcps->tcps_reclaim_lock);
599 if (!tcps->tcps_reclaim) {
600 tcps->tcps_reclaim = B_TRUE;
601 tcps->tcps_reclaim_tid = timeout(tcp_reclaim_timer,
602 tcps, MSEC_TO_TICK(tcps->tcps_reclaim_period));
603 TCP_STAT(tcps, tcp_reclaim_cnt);
604 }
605 mutex_exit(&tcps->tcps_reclaim_lock);
606 netstack_rele(ns);
607 }
608 netstack_next_fini(&nh);
609 }
610
611 /*
612 * Given a tcp_stack_t and a port (in host byte order), find a listener
613 * configuration for that port and return the ratio.
614 */
615 uint32_t
616 tcp_find_listener_conf(tcp_stack_t *tcps, in_port_t port)
617 {
618 tcp_listener_t *tl;
619 uint32_t ratio = 0;
620
621 mutex_enter(&tcps->tcps_listener_conf_lock);
622 for (tl = list_head(&tcps->tcps_listener_conf); tl != NULL;
623 tl = list_next(&tcps->tcps_listener_conf, tl)) {
624 if (tl->tl_port == port) {
625 ratio = tl->tl_ratio;
626 break;
627 }
628 }
629 mutex_exit(&tcps->tcps_listener_conf_lock);
630 return (ratio);
631 }
632
633 /*
634 * To remove all listener limit configuration in a tcp_stack_t.
635 */
636 void
637 tcp_listener_conf_cleanup(tcp_stack_t *tcps)
638 {
639 tcp_listener_t *tl;
640
641 mutex_enter(&tcps->tcps_listener_conf_lock);
642 while ((tl = list_head(&tcps->tcps_listener_conf)) != NULL) {
643 list_remove(&tcps->tcps_listener_conf, tl);
644 kmem_free(tl, sizeof (tcp_listener_t));
645 }
646 mutex_destroy(&tcps->tcps_listener_conf_lock);
647 list_destroy(&tcps->tcps_listener_conf);
648 }
649
650 /*
651 * When a CPU is added, we need to allocate the per CPU stats struct.
652 */
653 void
654 tcp_stack_cpu_add(tcp_stack_t *tcps, processorid_t cpu_seqid)
655 {
656 int i;
657
658 if (cpu_seqid < tcps->tcps_sc_cnt)
659 return;
660 for (i = tcps->tcps_sc_cnt; i <= cpu_seqid; i++) {
661 ASSERT(tcps->tcps_sc[i] == NULL);
662 tcps->tcps_sc[i] = kmem_zalloc(sizeof (tcp_stats_cpu_t),
663 KM_SLEEP);
664 }
665 membar_producer();
666 tcps->tcps_sc_cnt = cpu_seqid + 1;
667 }
668
669 /*
670 * Diagnostic routine used to return a string associated with the tcp state.
671 * Note that if the caller does not supply a buffer, it will use an internal
672 * static string. This means that if multiple threads call this function at
673 * the same time, output can be corrupted... Note also that this function
674 * does not check the size of the supplied buffer. The caller has to make
675 * sure that it is big enough.
676 */
677 char *
678 tcp_display(tcp_t *tcp, char *sup_buf, char format)
679 {
680 char buf1[30];
681 static char priv_buf[INET6_ADDRSTRLEN * 2 + 80];
682 char *buf;
683 char *cp;
684 in6_addr_t local, remote;
685 char local_addrbuf[INET6_ADDRSTRLEN];
686 char remote_addrbuf[INET6_ADDRSTRLEN];
687 conn_t *connp;
688
689 if (sup_buf != NULL)
690 buf = sup_buf;
691 else
692 buf = priv_buf;
693
694 if (tcp == NULL)
695 return ("NULL_TCP");
696
697 connp = tcp->tcp_connp;
698 switch (tcp->tcp_state) {
699 case TCPS_CLOSED:
700 cp = "TCP_CLOSED";
701 break;
702 case TCPS_IDLE:
703 cp = "TCP_IDLE";
704 break;
705 case TCPS_BOUND:
706 cp = "TCP_BOUND";
707 break;
708 case TCPS_LISTEN:
709 cp = "TCP_LISTEN";
710 break;
711 case TCPS_SYN_SENT:
712 cp = "TCP_SYN_SENT";
713 break;
714 case TCPS_SYN_RCVD:
715 cp = "TCP_SYN_RCVD";
716 break;
717 case TCPS_ESTABLISHED:
718 cp = "TCP_ESTABLISHED";
719 break;
720 case TCPS_CLOSE_WAIT:
721 cp = "TCP_CLOSE_WAIT";
722 break;
723 case TCPS_FIN_WAIT_1:
724 cp = "TCP_FIN_WAIT_1";
725 break;
726 case TCPS_CLOSING:
727 cp = "TCP_CLOSING";
728 break;
729 case TCPS_LAST_ACK:
730 cp = "TCP_LAST_ACK";
731 break;
732 case TCPS_FIN_WAIT_2:
733 cp = "TCP_FIN_WAIT_2";
734 break;
735 case TCPS_TIME_WAIT:
736 cp = "TCP_TIME_WAIT";
737 break;
738 default:
739 (void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state);
740 cp = buf1;
741 break;
742 }
743 switch (format) {
744 case DISP_ADDR_AND_PORT:
745 if (connp->conn_ipversion == IPV4_VERSION) {
746 /*
747 * Note that we use the remote address in the tcp_b
748 * structure. This means that it will print out
749 * the real destination address, not the next hop's
750 * address if source routing is used.
751 */
752 IN6_IPADDR_TO_V4MAPPED(connp->conn_laddr_v4, &local);
753 IN6_IPADDR_TO_V4MAPPED(connp->conn_faddr_v4, &remote);
754
755 } else {
756 local = connp->conn_laddr_v6;
757 remote = connp->conn_faddr_v6;
758 }
759 (void) inet_ntop(AF_INET6, &local, local_addrbuf,
760 sizeof (local_addrbuf));
761 (void) inet_ntop(AF_INET6, &remote, remote_addrbuf,
762 sizeof (remote_addrbuf));
763 (void) mi_sprintf(buf, "[%s.%u, %s.%u] %s",
764 local_addrbuf, ntohs(connp->conn_lport), remote_addrbuf,
765 ntohs(connp->conn_fport), cp);
766 break;
767 case DISP_PORT_ONLY:
768 default:
769 (void) mi_sprintf(buf, "[%u, %u] %s",
770 ntohs(connp->conn_lport), ntohs(connp->conn_fport), cp);
771 break;
772 }
773
774 return (buf);
775 }