1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 /* This file contains all TCP output processing functions. */
  27 
  28 #include <sys/types.h>
  29 #include <sys/stream.h>
  30 #include <sys/strsun.h>
  31 #include <sys/strsubr.h>
  32 #include <sys/stropts.h>
  33 #include <sys/strlog.h>
  34 #define _SUN_TPI_VERSION 2
  35 #include <sys/tihdr.h>
  36 #include <sys/suntpi.h>
  37 #include <sys/xti_inet.h>
  38 #include <sys/timod.h>
  39 #include <sys/pattr.h>
  40 #include <sys/squeue_impl.h>
  41 #include <sys/squeue.h>
  42 #include <sys/sockio.h>
  43 #include <sys/tsol/tnet.h>
  44 
  45 #include <inet/common.h>
  46 #include <inet/ip.h>
  47 #include <inet/tcp.h>
  48 #include <inet/tcp_impl.h>
  49 #include <inet/snmpcom.h>
  50 #include <inet/proto_set.h>
  51 #include <inet/ipsec_impl.h>
  52 #include <inet/ip_ndp.h>
  53 
  54 static mblk_t   *tcp_get_seg_mp(tcp_t *, uint32_t, int32_t *);
  55 static void     tcp_wput_cmdblk(queue_t *, mblk_t *);
  56 static void     tcp_wput_flush(tcp_t *, mblk_t *);
  57 static void     tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp);
  58 static int      tcp_xmit_end(tcp_t *);
  59 static int      tcp_send(tcp_t *, const int, const int, const int,
  60                     const int, int *, uint_t *, int *, mblk_t **, mblk_t *);
  61 static void     tcp_xmit_early_reset(char *, mblk_t *, uint32_t, uint32_t,
  62                     int, ip_recv_attr_t *, ip_stack_t *, conn_t *);
  63 static boolean_t        tcp_send_rst_chk(tcp_stack_t *);
  64 static void     tcp_process_shrunk_swnd(tcp_t *, uint32_t);
  65 static void     tcp_fill_header(tcp_t *, uchar_t *, clock_t, int);
  66 
  67 /*
  68  * Functions called directly via squeue having a prototype of edesc_t.
  69  */
  70 static void     tcp_wput_nondata(void *, mblk_t *, void *, ip_recv_attr_t *);
  71 static void     tcp_wput_ioctl(void *, mblk_t *, void *, ip_recv_attr_t *);
  72 static void     tcp_wput_proto(void *, mblk_t *, void *, ip_recv_attr_t *);
  73 
  74 /*
  75  * This controls how tiny a write must be before we try to copy it
  76  * into the mblk on the tail of the transmit queue.  Not much
  77  * speedup is observed for values larger than sixteen.  Zero will
  78  * disable the optimisation.
  79  */
  80 static int tcp_tx_pull_len = 16;
  81 
  82 void
  83 tcp_wput(queue_t *q, mblk_t *mp)
  84 {
  85         conn_t  *connp = Q_TO_CONN(q);
  86         tcp_t   *tcp;
  87         void (*output_proc)();
  88         t_scalar_t type;
  89         uchar_t *rptr;
  90         struct iocblk   *iocp;
  91         size_t size;
  92 
  93         ASSERT(connp->conn_ref >= 2);
  94 
  95         switch (DB_TYPE(mp)) {
  96         case M_DATA:
  97                 tcp = connp->conn_tcp;
  98                 ASSERT(tcp != NULL);
  99 
 100                 size = msgdsize(mp);
 101 
 102                 mutex_enter(&tcp->tcp_non_sq_lock);
 103                 tcp->tcp_squeue_bytes += size;
 104                 if (TCP_UNSENT_BYTES(tcp) > connp->conn_sndbuf) {
 105                         tcp_setqfull(tcp);
 106                 }
 107                 mutex_exit(&tcp->tcp_non_sq_lock);
 108 
 109                 CONN_INC_REF(connp);
 110                 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp,
 111                     NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
 112                 return;
 113 
 114         case M_CMD:
 115                 tcp_wput_cmdblk(q, mp);
 116                 return;
 117 
 118         case M_PROTO:
 119         case M_PCPROTO:
 120                 /*
 121                  * if it is a snmp message, don't get behind the squeue
 122                  */
 123                 tcp = connp->conn_tcp;
 124                 rptr = mp->b_rptr;
 125                 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
 126                         type = ((union T_primitives *)rptr)->type;
 127                 } else {
 128                         if (connp->conn_debug) {
 129                                 (void) strlog(TCP_MOD_ID, 0, 1,
 130                                     SL_ERROR|SL_TRACE,
 131                                     "tcp_wput_proto, dropping one...");
 132                         }
 133                         freemsg(mp);
 134                         return;
 135                 }
 136                 if (type == T_SVR4_OPTMGMT_REQ) {
 137                         /*
 138                          * All Solaris components should pass a db_credp
 139                          * for this TPI message, hence we ASSERT.
 140                          * But in case there is some other M_PROTO that looks
 141                          * like a TPI message sent by some other kernel
 142                          * component, we check and return an error.
 143                          */
 144                         cred_t  *cr = msg_getcred(mp, NULL);
 145 
 146                         ASSERT(cr != NULL);
 147                         if (cr == NULL) {
 148                                 tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
 149                                 return;
 150                         }
 151                         if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get,
 152                             cr)) {
 153                                 /*
 154                                  * This was a SNMP request
 155                                  */
 156                                 return;
 157                         } else {
 158                                 output_proc = tcp_wput_proto;
 159                         }
 160                 } else {
 161                         output_proc = tcp_wput_proto;
 162                 }
 163                 break;
 164         case M_IOCTL:
 165                 /*
 166                  * Most ioctls can be processed right away without going via
 167                  * squeues - process them right here. Those that do require
 168                  * squeue (currently _SIOCSOCKFALLBACK)
 169                  * are processed by tcp_wput_ioctl().
 170                  */
 171                 iocp = (struct iocblk *)mp->b_rptr;
 172                 tcp = connp->conn_tcp;
 173 
 174                 switch (iocp->ioc_cmd) {
 175                 case TCP_IOC_ABORT_CONN:
 176                         tcp_ioctl_abort_conn(q, mp);
 177                         return;
 178                 case TI_GETPEERNAME:
 179                 case TI_GETMYNAME:
 180                         mi_copyin(q, mp, NULL,
 181                             SIZEOF_STRUCT(strbuf, iocp->ioc_flag));
 182                         return;
 183 
 184                 default:
 185                         output_proc = tcp_wput_ioctl;
 186                         break;
 187                 }
 188                 break;
 189         default:
 190                 output_proc = tcp_wput_nondata;
 191                 break;
 192         }
 193 
 194         CONN_INC_REF(connp);
 195         SQUEUE_ENTER_ONE(connp->conn_sqp, mp, output_proc, connp,
 196             NULL, tcp_squeue_flag, SQTAG_TCP_WPUT_OTHER);
 197 }
 198 
 199 /*
 200  * The TCP normal data output path.
 201  * NOTE: the logic of the fast path is duplicated from this function.
 202  */
 203 void
 204 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent)
 205 {
 206         int             len;
 207         mblk_t          *local_time;
 208         mblk_t          *mp1;
 209         uint32_t        snxt;
 210         int             tail_unsent;
 211         int             tcpstate;
 212         int             usable = 0;
 213         mblk_t          *xmit_tail;
 214         int32_t         mss;
 215         int32_t         num_sack_blk = 0;
 216         int32_t         total_hdr_len;
 217         int32_t         tcp_hdr_len;
 218         int             rc;
 219         tcp_stack_t     *tcps = tcp->tcp_tcps;
 220         conn_t          *connp = tcp->tcp_connp;
 221         clock_t         now = LBOLT_FASTPATH;
 222 
 223         tcpstate = tcp->tcp_state;
 224         if (mp == NULL) {
 225                 /*
 226                  * tcp_wput_data() with NULL mp should only be called when
 227                  * there is unsent data.
 228                  */
 229                 ASSERT(tcp->tcp_unsent > 0);
 230                 /* Really tacky... but we need this for detached closes. */
 231                 len = tcp->tcp_unsent;
 232                 goto data_null;
 233         }
 234 
 235         ASSERT(mp->b_datap->db_type == M_DATA);
 236         /*
 237          * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ,
 238          * or before a connection attempt has begun.
 239          */
 240         if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT ||
 241             (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
 242                 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
 243 #ifdef DEBUG
 244                         cmn_err(CE_WARN,
 245                             "tcp_wput_data: data after ordrel, %s",
 246                             tcp_display(tcp, NULL,
 247                             DISP_ADDR_AND_PORT));
 248 #else
 249                         if (connp->conn_debug) {
 250                                 (void) strlog(TCP_MOD_ID, 0, 1,
 251                                     SL_TRACE|SL_ERROR,
 252                                     "tcp_wput_data: data after ordrel, %s\n",
 253                                     tcp_display(tcp, NULL,
 254                                     DISP_ADDR_AND_PORT));
 255                         }
 256 #endif /* DEBUG */
 257                 }
 258                 if (tcp->tcp_snd_zcopy_aware &&
 259                     (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
 260                         tcp_zcopy_notify(tcp);
 261                 freemsg(mp);
 262                 mutex_enter(&tcp->tcp_non_sq_lock);
 263                 if (tcp->tcp_flow_stopped &&
 264                     TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
 265                         tcp_clrqfull(tcp);
 266                 }
 267                 mutex_exit(&tcp->tcp_non_sq_lock);
 268                 return;
 269         }
 270 
 271         /* Strip empties */
 272         for (;;) {
 273                 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
 274                     (uintptr_t)INT_MAX);
 275                 len = (int)(mp->b_wptr - mp->b_rptr);
 276                 if (len > 0)
 277                         break;
 278                 mp1 = mp;
 279                 mp = mp->b_cont;
 280                 freeb(mp1);
 281                 if (mp == NULL) {
 282                         return;
 283                 }
 284         }
 285 
 286         /* If we are the first on the list ... */
 287         if (tcp->tcp_xmit_head == NULL) {
 288                 tcp->tcp_xmit_head = mp;
 289                 tcp->tcp_xmit_tail = mp;
 290                 tcp->tcp_xmit_tail_unsent = len;
 291         } else {
 292                 /* If tiny tx and room in txq tail, pullup to save mblks. */
 293                 struct datab *dp;
 294 
 295                 mp1 = tcp->tcp_xmit_last;
 296                 if (len < tcp_tx_pull_len &&
 297                     (dp = mp1->b_datap)->db_ref == 1 &&
 298                     dp->db_lim - mp1->b_wptr >= len) {
 299                         ASSERT(len > 0);
 300                         ASSERT(!mp1->b_cont);
 301                         if (len == 1) {
 302                                 *mp1->b_wptr++ = *mp->b_rptr;
 303                         } else {
 304                                 bcopy(mp->b_rptr, mp1->b_wptr, len);
 305                                 mp1->b_wptr += len;
 306                         }
 307                         if (mp1 == tcp->tcp_xmit_tail)
 308                                 tcp->tcp_xmit_tail_unsent += len;
 309                         mp1->b_cont = mp->b_cont;
 310                         if (tcp->tcp_snd_zcopy_aware &&
 311                             (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
 312                                 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
 313                         freeb(mp);
 314                         mp = mp1;
 315                 } else {
 316                         tcp->tcp_xmit_last->b_cont = mp;
 317                 }
 318                 len += tcp->tcp_unsent;
 319         }
 320 
 321         /* Tack on however many more positive length mblks we have */
 322         if ((mp1 = mp->b_cont) != NULL) {
 323                 do {
 324                         int tlen;
 325                         ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
 326                             (uintptr_t)INT_MAX);
 327                         tlen = (int)(mp1->b_wptr - mp1->b_rptr);
 328                         if (tlen <= 0) {
 329                                 mp->b_cont = mp1->b_cont;
 330                                 freeb(mp1);
 331                         } else {
 332                                 len += tlen;
 333                                 mp = mp1;
 334                         }
 335                 } while ((mp1 = mp->b_cont) != NULL);
 336         }
 337         tcp->tcp_xmit_last = mp;
 338         tcp->tcp_unsent = len;
 339 
 340         if (urgent)
 341                 usable = 1;
 342 
 343 data_null:
 344         snxt = tcp->tcp_snxt;
 345         xmit_tail = tcp->tcp_xmit_tail;
 346         tail_unsent = tcp->tcp_xmit_tail_unsent;
 347 
 348         /*
 349          * Note that tcp_mss has been adjusted to take into account the
 350          * timestamp option if applicable.  Because SACK options do not
 351          * appear in every TCP segments and they are of variable lengths,
 352          * they cannot be included in tcp_mss.  Thus we need to calculate
 353          * the actual segment length when we need to send a segment which
 354          * includes SACK options.
 355          */
 356         if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
 357                 int32_t opt_len;
 358 
 359                 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
 360                     tcp->tcp_num_sack_blk);
 361                 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN *
 362                     2 + TCPOPT_HEADER_LEN;
 363                 mss = tcp->tcp_mss - opt_len;
 364                 total_hdr_len = connp->conn_ht_iphc_len + opt_len;
 365                 tcp_hdr_len = connp->conn_ht_ulp_len + opt_len;
 366         } else {
 367                 mss = tcp->tcp_mss;
 368                 total_hdr_len = connp->conn_ht_iphc_len;
 369                 tcp_hdr_len = connp->conn_ht_ulp_len;
 370         }
 371 
 372         if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
 373             (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
 374                 TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
 375         }
 376         if (tcpstate == TCPS_SYN_RCVD) {
 377                 /*
 378                  * The three-way connection establishment handshake is not
 379                  * complete yet. We want to queue the data for transmission
 380                  * after entering ESTABLISHED state (RFC793). A jump to
 381                  * "done" label effectively leaves data on the queue.
 382                  */
 383                 goto done;
 384         } else {
 385                 int usable_r;
 386 
 387                 /*
 388                  * In the special case when cwnd is zero, which can only
 389                  * happen if the connection is ECN capable, return now.
 390                  * New segments is sent using tcp_timer().  The timer
 391                  * is set in tcp_input_data().
 392                  */
 393                 if (tcp->tcp_cwnd == 0) {
 394                         /*
 395                          * Note that tcp_cwnd is 0 before 3-way handshake is
 396                          * finished.
 397                          */
 398                         ASSERT(tcp->tcp_ecn_ok ||
 399                             tcp->tcp_state < TCPS_ESTABLISHED);
 400                         return;
 401                 }
 402 
 403                 /* NOTE: trouble if xmitting while SYN not acked? */
 404                 usable_r = snxt - tcp->tcp_suna;
 405                 usable_r = tcp->tcp_swnd - usable_r;
 406 
 407                 /*
 408                  * Check if the receiver has shrunk the window.  If
 409                  * tcp_wput_data() with NULL mp is called, tcp_fin_sent
 410                  * cannot be set as there is unsent data, so FIN cannot
 411                  * be sent out.  Otherwise, we need to take into account
 412                  * of FIN as it consumes an "invisible" sequence number.
 413                  */
 414                 ASSERT(tcp->tcp_fin_sent == 0);
 415                 if (usable_r < 0) {
 416                         /*
 417                          * The receiver has shrunk the window and we have sent
 418                          * -usable_r date beyond the window, re-adjust.
 419                          *
 420                          * If TCP window scaling is enabled, there can be
 421                          * round down error as the advertised receive window
 422                          * is actually right shifted n bits.  This means that
 423                          * the lower n bits info is wiped out.  It will look
 424                          * like the window is shrunk.  Do a check here to
 425                          * see if the shrunk amount is actually within the
 426                          * error in window calculation.  If it is, just
 427                          * return.  Note that this check is inside the
 428                          * shrunk window check.  This makes sure that even
 429                          * though tcp_process_shrunk_swnd() is not called,
 430                          * we will stop further processing.
 431                          */
 432                         if ((-usable_r >> tcp->tcp_snd_ws) > 0) {
 433                                 tcp_process_shrunk_swnd(tcp, -usable_r);
 434                         }
 435                         return;
 436                 }
 437 
 438                 /* usable = MIN(swnd, cwnd) - unacked_bytes */
 439                 if (tcp->tcp_swnd > tcp->tcp_cwnd)
 440                         usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd;
 441 
 442                 /* usable = MIN(usable, unsent) */
 443                 if (usable_r > len)
 444                         usable_r = len;
 445 
 446                 /* usable = MAX(usable, {1 for urgent, 0 for data}) */
 447                 if (usable_r > 0) {
 448                         usable = usable_r;
 449                 } else {
 450                         /* Bypass all other unnecessary processing. */
 451                         goto done;
 452                 }
 453         }
 454 
 455         local_time = (mblk_t *)now;
 456 
 457         /*
 458          * "Our" Nagle Algorithm.  This is not the same as in the old
 459          * BSD.  This is more in line with the true intent of Nagle.
 460          *
 461          * The conditions are:
 462          * 1. The amount of unsent data (or amount of data which can be
 463          *    sent, whichever is smaller) is less than Nagle limit.
 464          * 2. The last sent size is also less than Nagle limit.
 465          * 3. There is unack'ed data.
 466          * 4. Urgent pointer is not set.  Send urgent data ignoring the
 467          *    Nagle algorithm.  This reduces the probability that urgent
 468          *    bytes get "merged" together.
 469          * 5. The app has not closed the connection.  This eliminates the
 470          *    wait time of the receiving side waiting for the last piece of
 471          *    (small) data.
 472          *
 473          * If all are satisified, exit without sending anything.  Note
 474          * that Nagle limit can be smaller than 1 MSS.  Nagle limit is
 475          * the smaller of 1 MSS and global tcp_naglim_def (default to be
 476          * 4095).
 477          */
 478         if (usable < (int)tcp->tcp_naglim &&
 479             tcp->tcp_naglim > tcp->tcp_last_sent_len &&
 480             snxt != tcp->tcp_suna &&
 481             !(tcp->tcp_valid_bits & TCP_URG_VALID) &&
 482             !(tcp->tcp_valid_bits & TCP_FSS_VALID)) {
 483                 goto done;
 484         }
 485 
 486         /*
 487          * If tcp_zero_win_probe is not set and the tcp->tcp_cork option
 488          * is set, then we have to force TCP not to send partial segment
 489          * (smaller than MSS bytes). We are calculating the usable now
 490          * based on full mss and will save the rest of remaining data for
 491          * later. When tcp_zero_win_probe is set, TCP needs to send out
 492          * something to do zero window probe.
 493          */
 494         if (tcp->tcp_cork && !tcp->tcp_zero_win_probe) {
 495                 if (usable < mss)
 496                         goto done;
 497                 usable = (usable / mss) * mss;
 498         }
 499 
 500         /* Update the latest receive window size in TCP header. */
 501         tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
 502 
 503         /* Send the packet. */
 504         rc = tcp_send(tcp, mss, total_hdr_len, tcp_hdr_len,
 505             num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail,
 506             local_time);
 507 
 508         /* Pretend that all we were trying to send really got sent */
 509         if (rc < 0 && tail_unsent < 0) {
 510                 do {
 511                         xmit_tail = xmit_tail->b_cont;
 512                         xmit_tail->b_prev = local_time;
 513                         ASSERT((uintptr_t)(xmit_tail->b_wptr -
 514                             xmit_tail->b_rptr) <= (uintptr_t)INT_MAX);
 515                         tail_unsent += (int)(xmit_tail->b_wptr -
 516                             xmit_tail->b_rptr);
 517                 } while (tail_unsent < 0);
 518         }
 519 done:;
 520         tcp->tcp_xmit_tail = xmit_tail;
 521         tcp->tcp_xmit_tail_unsent = tail_unsent;
 522         len = tcp->tcp_snxt - snxt;
 523         if (len) {
 524                 /*
 525                  * If new data was sent, need to update the notsack
 526                  * list, which is, afterall, data blocks that have
 527                  * not been sack'ed by the receiver.  New data is
 528                  * not sack'ed.
 529                  */
 530                 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
 531                         /* len is a negative value. */
 532                         tcp->tcp_pipe -= len;
 533                         tcp_notsack_update(&(tcp->tcp_notsack_list),
 534                             tcp->tcp_snxt, snxt,
 535                             &(tcp->tcp_num_notsack_blk),
 536                             &(tcp->tcp_cnt_notsack_list));
 537                 }
 538                 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent;
 539                 tcp->tcp_rack = tcp->tcp_rnxt;
 540                 tcp->tcp_rack_cnt = 0;
 541                 if ((snxt + len) == tcp->tcp_suna) {
 542                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
 543                 }
 544         } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) {
 545                 /*
 546                  * Didn't send anything. Make sure the timer is running
 547                  * so that we will probe a zero window.
 548                  */
 549                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
 550         }
 551         /* Note that len is the amount we just sent but with a negative sign */
 552         tcp->tcp_unsent += len;
 553         mutex_enter(&tcp->tcp_non_sq_lock);
 554         if (tcp->tcp_flow_stopped) {
 555                 if (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
 556                         tcp_clrqfull(tcp);
 557                 }
 558         } else if (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf) {
 559                 if (!(tcp->tcp_detached))
 560                         tcp_setqfull(tcp);
 561         }
 562         mutex_exit(&tcp->tcp_non_sq_lock);
 563 }
 564 
 565 /*
 566  * Initial STREAMS write side put() procedure for sockets. It tries to
 567  * handle the T_CAPABILITY_REQ which sockfs sends down while setting
 568  * up the socket without using the squeue. Non T_CAPABILITY_REQ messages
 569  * are handled by tcp_wput() as usual.
 570  *
 571  * All further messages will also be handled by tcp_wput() because we cannot
 572  * be sure that the above short cut is safe later.
 573  */
 574 void
 575 tcp_wput_sock(queue_t *wq, mblk_t *mp)
 576 {
 577         conn_t                  *connp = Q_TO_CONN(wq);
 578         tcp_t                   *tcp = connp->conn_tcp;
 579         struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr;
 580 
 581         ASSERT(wq->q_qinfo == &tcp_sock_winit);
 582         wq->q_qinfo = &tcp_winit;
 583 
 584         ASSERT(IPCL_IS_TCP(connp));
 585         ASSERT(TCP_IS_SOCKET(tcp));
 586 
 587         if (DB_TYPE(mp) == M_PCPROTO &&
 588             MBLKL(mp) == sizeof (struct T_capability_req) &&
 589             car->PRIM_type == T_CAPABILITY_REQ) {
 590                 tcp_capability_req(tcp, mp);
 591                 return;
 592         }
 593 
 594         tcp_wput(wq, mp);
 595 }
 596 
 597 /* ARGSUSED */
 598 void
 599 tcp_wput_fallback(queue_t *wq, mblk_t *mp)
 600 {
 601 #ifdef DEBUG
 602         cmn_err(CE_CONT, "tcp_wput_fallback: Message during fallback \n");
 603 #endif
 604         freemsg(mp);
 605 }
 606 
 607 /*
 608  * Call by tcp_wput() to handle misc non M_DATA messages.
 609  */
 610 /* ARGSUSED */
 611 static void
 612 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 613 {
 614         conn_t  *connp = (conn_t *)arg;
 615         tcp_t   *tcp = connp->conn_tcp;
 616 
 617         ASSERT(DB_TYPE(mp) != M_IOCTL);
 618         /*
 619          * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close.
 620          * Once the close starts, streamhead and sockfs will not let any data
 621          * packets come down (close ensures that there are no threads using the
 622          * queue and no new threads will come down) but since qprocsoff()
 623          * hasn't happened yet, a M_FLUSH or some non data message might
 624          * get reflected back (in response to our own FLUSHRW) and get
 625          * processed after tcp_close() is done. The conn would still be valid
 626          * because a ref would have added but we need to check the state
 627          * before actually processing the packet.
 628          */
 629         if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) {
 630                 freemsg(mp);
 631                 return;
 632         }
 633 
 634         switch (DB_TYPE(mp)) {
 635         case M_IOCDATA:
 636                 tcp_wput_iocdata(tcp, mp);
 637                 break;
 638         case M_FLUSH:
 639                 tcp_wput_flush(tcp, mp);
 640                 break;
 641         default:
 642                 ip_wput_nondata(connp->conn_wq, mp);
 643                 break;
 644         }
 645 }
 646 
 647 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */
 648 static void
 649 tcp_wput_flush(tcp_t *tcp, mblk_t *mp)
 650 {
 651         uchar_t fval = *mp->b_rptr;
 652         mblk_t  *tail;
 653         conn_t  *connp = tcp->tcp_connp;
 654         queue_t *q = connp->conn_wq;
 655 
 656         /* TODO: How should flush interact with urgent data? */
 657         if ((fval & FLUSHW) && tcp->tcp_xmit_head != NULL &&
 658             !(tcp->tcp_valid_bits & TCP_URG_VALID)) {
 659                 /*
 660                  * Flush only data that has not yet been put on the wire.  If
 661                  * we flush data that we have already transmitted, life, as we
 662                  * know it, may come to an end.
 663                  */
 664                 tail = tcp->tcp_xmit_tail;
 665                 tail->b_wptr -= tcp->tcp_xmit_tail_unsent;
 666                 tcp->tcp_xmit_tail_unsent = 0;
 667                 tcp->tcp_unsent = 0;
 668                 if (tail->b_wptr != tail->b_rptr)
 669                         tail = tail->b_cont;
 670                 if (tail) {
 671                         mblk_t **excess = &tcp->tcp_xmit_head;
 672                         for (;;) {
 673                                 mblk_t *mp1 = *excess;
 674                                 if (mp1 == tail)
 675                                         break;
 676                                 tcp->tcp_xmit_tail = mp1;
 677                                 tcp->tcp_xmit_last = mp1;
 678                                 excess = &mp1->b_cont;
 679                         }
 680                         *excess = NULL;
 681                         tcp_close_mpp(&tail);
 682                         if (tcp->tcp_snd_zcopy_aware)
 683                                 tcp_zcopy_notify(tcp);
 684                 }
 685                 /*
 686                  * We have no unsent data, so unsent must be less than
 687                  * conn_sndlowat, so re-enable flow.
 688                  */
 689                 mutex_enter(&tcp->tcp_non_sq_lock);
 690                 if (tcp->tcp_flow_stopped) {
 691                         tcp_clrqfull(tcp);
 692                 }
 693                 mutex_exit(&tcp->tcp_non_sq_lock);
 694         }
 695         /*
 696          * TODO: you can't just flush these, you have to increase rwnd for one
 697          * thing.  For another, how should urgent data interact?
 698          */
 699         if (fval & FLUSHR) {
 700                 *mp->b_rptr = fval & ~FLUSHW;
 701                 /* XXX */
 702                 qreply(q, mp);
 703                 return;
 704         }
 705         freemsg(mp);
 706 }
 707 
 708 /*
 709  * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA
 710  * messages.
 711  */
 712 static void
 713 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp)
 714 {
 715         mblk_t          *mp1;
 716         struct iocblk   *iocp = (struct iocblk *)mp->b_rptr;
 717         STRUCT_HANDLE(strbuf, sb);
 718         uint_t          addrlen;
 719         conn_t          *connp = tcp->tcp_connp;
 720         queue_t         *q = connp->conn_wq;
 721 
 722         /* Make sure it is one of ours. */
 723         switch (iocp->ioc_cmd) {
 724         case TI_GETMYNAME:
 725         case TI_GETPEERNAME:
 726                 break;
 727         default:
 728                 /*
 729                  * If the conn is closing, then error the ioctl here. Otherwise
 730                  * use the CONN_IOCTLREF_* macros to hold off tcp_close until
 731                  * we're done here.
 732                  */
 733                 mutex_enter(&connp->conn_lock);
 734                 if (connp->conn_state_flags & CONN_CLOSING) {
 735                         mutex_exit(&connp->conn_lock);
 736                         iocp->ioc_error = EINVAL;
 737                         mp->b_datap->db_type = M_IOCNAK;
 738                         iocp->ioc_count = 0;
 739                         qreply(q, mp);
 740                         return;
 741                 }
 742 
 743                 CONN_INC_IOCTLREF_LOCKED(connp);
 744                 ip_wput_nondata(q, mp);
 745                 CONN_DEC_IOCTLREF(connp);
 746                 return;
 747         }
 748         switch (mi_copy_state(q, mp, &mp1)) {
 749         case -1:
 750                 return;
 751         case MI_COPY_CASE(MI_COPY_IN, 1):
 752                 break;
 753         case MI_COPY_CASE(MI_COPY_OUT, 1):
 754                 /* Copy out the strbuf. */
 755                 mi_copyout(q, mp);
 756                 return;
 757         case MI_COPY_CASE(MI_COPY_OUT, 2):
 758                 /* All done. */
 759                 mi_copy_done(q, mp, 0);
 760                 return;
 761         default:
 762                 mi_copy_done(q, mp, EPROTO);
 763                 return;
 764         }
 765         /* Check alignment of the strbuf */
 766         if (!OK_32PTR(mp1->b_rptr)) {
 767                 mi_copy_done(q, mp, EINVAL);
 768                 return;
 769         }
 770 
 771         STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr);
 772 
 773         if (connp->conn_family == AF_INET)
 774                 addrlen = sizeof (sin_t);
 775         else
 776                 addrlen = sizeof (sin6_t);
 777 
 778         if (STRUCT_FGET(sb, maxlen) < addrlen) {
 779                 mi_copy_done(q, mp, EINVAL);
 780                 return;
 781         }
 782 
 783         switch (iocp->ioc_cmd) {
 784         case TI_GETMYNAME:
 785                 break;
 786         case TI_GETPEERNAME:
 787                 if (tcp->tcp_state < TCPS_SYN_RCVD) {
 788                         mi_copy_done(q, mp, ENOTCONN);
 789                         return;
 790                 }
 791                 break;
 792         }
 793         mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE);
 794         if (!mp1)
 795                 return;
 796 
 797         STRUCT_FSET(sb, len, addrlen);
 798         switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
 799         case TI_GETMYNAME:
 800                 (void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr,
 801                     &addrlen);
 802                 break;
 803         case TI_GETPEERNAME:
 804                 (void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr,
 805                     &addrlen);
 806                 break;
 807         }
 808         mp1->b_wptr += addrlen;
 809         /* Copy out the address */
 810         mi_copyout(q, mp);
 811 }
 812 
 813 /*
 814  * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL
 815  * messages.
 816  */
 817 /* ARGSUSED */
 818 static void
 819 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 820 {
 821         conn_t          *connp = (conn_t *)arg;
 822         tcp_t           *tcp = connp->conn_tcp;
 823         queue_t         *q = connp->conn_wq;
 824         struct iocblk   *iocp;
 825 
 826         ASSERT(DB_TYPE(mp) == M_IOCTL);
 827         /*
 828          * Try and ASSERT the minimum possible references on the
 829          * conn early enough. Since we are executing on write side,
 830          * the connection is obviously not detached and that means
 831          * there is a ref each for TCP and IP. Since we are behind
 832          * the squeue, the minimum references needed are 3. If the
 833          * conn is in classifier hash list, there should be an
 834          * extra ref for that (we check both the possibilities).
 835          */
 836         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
 837             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
 838 
 839         iocp = (struct iocblk *)mp->b_rptr;
 840         switch (iocp->ioc_cmd) {
 841         case _SIOCSOCKFALLBACK:
 842                 /*
 843                  * Either sockmod is about to be popped and the socket
 844                  * would now be treated as a plain stream, or a module
 845                  * is about to be pushed so we could no longer use read-
 846                  * side synchronous streams for fused loopback tcp.
 847                  * Drain any queued data and disable direct sockfs
 848                  * interface from now on.
 849                  */
 850                 if (!tcp->tcp_issocket) {
 851                         DB_TYPE(mp) = M_IOCNAK;
 852                         iocp->ioc_error = EINVAL;
 853                 } else {
 854                         tcp_use_pure_tpi(tcp);
 855                         DB_TYPE(mp) = M_IOCACK;
 856                         iocp->ioc_error = 0;
 857                 }
 858                 iocp->ioc_count = 0;
 859                 iocp->ioc_rval = 0;
 860                 qreply(q, mp);
 861                 return;
 862         }
 863 
 864         /*
 865          * If the conn is closing, then error the ioctl here. Otherwise bump the
 866          * conn_ioctlref to hold off tcp_close until we're done here.
 867          */
 868         mutex_enter(&(connp)->conn_lock);
 869         if ((connp)->conn_state_flags & CONN_CLOSING) {
 870                 mutex_exit(&(connp)->conn_lock);
 871                 iocp->ioc_error = EINVAL;
 872                 mp->b_datap->db_type = M_IOCNAK;
 873                 iocp->ioc_count = 0;
 874                 qreply(q, mp);
 875                 return;
 876         }
 877 
 878         CONN_INC_IOCTLREF_LOCKED(connp);
 879         ip_wput_nondata(q, mp);
 880         CONN_DEC_IOCTLREF(connp);
 881 }
 882 
 883 /*
 884  * This routine is called by tcp_wput() to handle all TPI requests.
 885  */
 886 /* ARGSUSED */
 887 static void
 888 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
 889 {
 890         conn_t          *connp = (conn_t *)arg;
 891         tcp_t           *tcp = connp->conn_tcp;
 892         union T_primitives *tprim = (union T_primitives *)mp->b_rptr;
 893         uchar_t         *rptr;
 894         t_scalar_t      type;
 895         cred_t          *cr;
 896 
 897         /*
 898          * Try and ASSERT the minimum possible references on the
 899          * conn early enough. Since we are executing on write side,
 900          * the connection is obviously not detached and that means
 901          * there is a ref each for TCP and IP. Since we are behind
 902          * the squeue, the minimum references needed are 3. If the
 903          * conn is in classifier hash list, there should be an
 904          * extra ref for that (we check both the possibilities).
 905          */
 906         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
 907             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
 908 
 909         rptr = mp->b_rptr;
 910         ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
 911         if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
 912                 type = ((union T_primitives *)rptr)->type;
 913                 if (type == T_EXDATA_REQ) {
 914                         tcp_output_urgent(connp, mp, arg2, NULL);
 915                 } else if (type != T_DATA_REQ) {
 916                         goto non_urgent_data;
 917                 } else {
 918                         /* TODO: options, flags, ... from user */
 919                         /* Set length to zero for reclamation below */
 920                         tcp_wput_data(tcp, mp->b_cont, B_TRUE);
 921                         freeb(mp);
 922                 }
 923                 return;
 924         } else {
 925                 if (connp->conn_debug) {
 926                         (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
 927                             "tcp_wput_proto, dropping one...");
 928                 }
 929                 freemsg(mp);
 930                 return;
 931         }
 932 
 933 non_urgent_data:
 934 
 935         switch ((int)tprim->type) {
 936         case O_T_BIND_REQ:      /* bind request */
 937         case T_BIND_REQ:        /* new semantics bind request */
 938                 tcp_tpi_bind(tcp, mp);
 939                 break;
 940         case T_UNBIND_REQ:      /* unbind request */
 941                 tcp_tpi_unbind(tcp, mp);
 942                 break;
 943         case O_T_CONN_RES:      /* old connection response XXX */
 944         case T_CONN_RES:        /* connection response */
 945                 tcp_tli_accept(tcp, mp);
 946                 break;
 947         case T_CONN_REQ:        /* connection request */
 948                 tcp_tpi_connect(tcp, mp);
 949                 break;
 950         case T_DISCON_REQ:      /* disconnect request */
 951                 tcp_disconnect(tcp, mp);
 952                 break;
 953         case T_CAPABILITY_REQ:
 954                 tcp_capability_req(tcp, mp);    /* capability request */
 955                 break;
 956         case T_INFO_REQ:        /* information request */
 957                 tcp_info_req(tcp, mp);
 958                 break;
 959         case T_SVR4_OPTMGMT_REQ:        /* manage options req */
 960         case T_OPTMGMT_REQ:
 961                 /*
 962                  * Note:  no support for snmpcom_req() through new
 963                  * T_OPTMGMT_REQ. See comments in ip.c
 964                  */
 965 
 966                 /*
 967                  * All Solaris components should pass a db_credp
 968                  * for this TPI message, hence we ASSERT.
 969                  * But in case there is some other M_PROTO that looks
 970                  * like a TPI message sent by some other kernel
 971                  * component, we check and return an error.
 972                  */
 973                 cr = msg_getcred(mp, NULL);
 974                 ASSERT(cr != NULL);
 975                 if (cr == NULL) {
 976                         tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
 977                         return;
 978                 }
 979                 /*
 980                  * If EINPROGRESS is returned, the request has been queued
 981                  * for subsequent processing by ip_restart_optmgmt(), which
 982                  * will do the CONN_DEC_REF().
 983                  */
 984                 if ((int)tprim->type == T_SVR4_OPTMGMT_REQ) {
 985                         svr4_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
 986                 } else {
 987                         tpi_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
 988                 }
 989                 break;
 990 
 991         case T_UNITDATA_REQ:    /* unitdata request */
 992                 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
 993                 break;
 994         case T_ORDREL_REQ:      /* orderly release req */
 995                 freemsg(mp);
 996 
 997                 if (tcp->tcp_fused)
 998                         tcp_unfuse(tcp);
 999 
1000                 if (tcp_xmit_end(tcp) != 0) {
1001                         /*
1002                          * We were crossing FINs and got a reset from
1003                          * the other side. Just ignore it.
1004                          */
1005                         if (connp->conn_debug) {
1006                                 (void) strlog(TCP_MOD_ID, 0, 1,
1007                                     SL_ERROR|SL_TRACE,
1008                                     "tcp_wput_proto, T_ORDREL_REQ out of "
1009                                     "state %s",
1010                                     tcp_display(tcp, NULL,
1011                                     DISP_ADDR_AND_PORT));
1012                         }
1013                 }
1014                 break;
1015         case T_ADDR_REQ:
1016                 tcp_addr_req(tcp, mp);
1017                 break;
1018         default:
1019                 if (connp->conn_debug) {
1020                         (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
1021                             "tcp_wput_proto, bogus TPI msg, type %d",
1022                             tprim->type);
1023                 }
1024                 /*
1025                  * We used to M_ERROR.  Sending TNOTSUPPORT gives the user
1026                  * to recover.
1027                  */
1028                 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
1029                 break;
1030         }
1031 }
1032 
1033 /*
1034  * Handle special out-of-band ioctl requests (see PSARC/2008/265).
1035  */
1036 static void
1037 tcp_wput_cmdblk(queue_t *q, mblk_t *mp)
1038 {
1039         void    *data;
1040         mblk_t  *datamp = mp->b_cont;
1041         conn_t  *connp = Q_TO_CONN(q);
1042         tcp_t   *tcp = connp->conn_tcp;
1043         cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr;
1044 
1045         if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) {
1046                 cmdp->cb_error = EPROTO;
1047                 qreply(q, mp);
1048                 return;
1049         }
1050 
1051         data = datamp->b_rptr;
1052 
1053         switch (cmdp->cb_cmd) {
1054         case TI_GETPEERNAME:
1055                 if (tcp->tcp_state < TCPS_SYN_RCVD)
1056                         cmdp->cb_error = ENOTCONN;
1057                 else
1058                         cmdp->cb_error = conn_getpeername(connp, data,
1059                             &cmdp->cb_len);
1060                 break;
1061         case TI_GETMYNAME:
1062                 cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len);
1063                 break;
1064         default:
1065                 cmdp->cb_error = EINVAL;
1066                 break;
1067         }
1068 
1069         qreply(q, mp);
1070 }
1071 
1072 /*
1073  * The TCP fast path write put procedure.
1074  * NOTE: the logic of the fast path is duplicated from tcp_wput_data()
1075  */
1076 /* ARGSUSED */
1077 void
1078 tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1079 {
1080         int             len;
1081         int             hdrlen;
1082         int             plen;
1083         mblk_t          *mp1;
1084         uchar_t         *rptr;
1085         uint32_t        snxt;
1086         tcpha_t         *tcpha;
1087         struct datab    *db;
1088         uint32_t        suna;
1089         uint32_t        mss;
1090         ipaddr_t        *dst;
1091         ipaddr_t        *src;
1092         uint32_t        sum;
1093         int             usable;
1094         conn_t          *connp = (conn_t *)arg;
1095         tcp_t           *tcp = connp->conn_tcp;
1096         uint32_t        msize;
1097         tcp_stack_t     *tcps = tcp->tcp_tcps;
1098         ip_xmit_attr_t  *ixa;
1099         clock_t         now;
1100 
1101         /*
1102          * Try and ASSERT the minimum possible references on the
1103          * conn early enough. Since we are executing on write side,
1104          * the connection is obviously not detached and that means
1105          * there is a ref each for TCP and IP. Since we are behind
1106          * the squeue, the minimum references needed are 3. If the
1107          * conn is in classifier hash list, there should be an
1108          * extra ref for that (we check both the possibilities).
1109          */
1110         ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
1111             (connp->conn_fanout == NULL && connp->conn_ref >= 3));
1112 
1113         ASSERT(DB_TYPE(mp) == M_DATA);
1114         msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
1115 
1116         mutex_enter(&tcp->tcp_non_sq_lock);
1117         tcp->tcp_squeue_bytes -= msize;
1118         mutex_exit(&tcp->tcp_non_sq_lock);
1119 
1120         /* Bypass tcp protocol for fused tcp loopback */
1121         if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1122                 return;
1123 
1124         mss = tcp->tcp_mss;
1125         /*
1126          * If ZEROCOPY has turned off, try not to send any zero-copy message
1127          * down. Do backoff, now.
1128          */
1129         if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on)
1130                 mp = tcp_zcopy_backoff(tcp, mp, B_FALSE);
1131 
1132 
1133         ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
1134         len = (int)(mp->b_wptr - mp->b_rptr);
1135 
1136         /*
1137          * Criteria for fast path:
1138          *
1139          *   1. no unsent data
1140          *   2. single mblk in request
1141          *   3. connection established
1142          *   4. data in mblk
1143          *   5. len <= mss
1144          *   6. no tcp_valid bits
1145          */
1146         if ((tcp->tcp_unsent != 0) ||
1147             (tcp->tcp_cork) ||
1148             (mp->b_cont != NULL) ||
1149             (tcp->tcp_state != TCPS_ESTABLISHED) ||
1150             (len == 0) ||
1151             (len > mss) ||
1152             (tcp->tcp_valid_bits != 0)) {
1153                 tcp_wput_data(tcp, mp, B_FALSE);
1154                 return;
1155         }
1156 
1157         ASSERT(tcp->tcp_xmit_tail_unsent == 0);
1158         ASSERT(tcp->tcp_fin_sent == 0);
1159 
1160         /* queue new packet onto retransmission queue */
1161         if (tcp->tcp_xmit_head == NULL) {
1162                 tcp->tcp_xmit_head = mp;
1163         } else {
1164                 tcp->tcp_xmit_last->b_cont = mp;
1165         }
1166         tcp->tcp_xmit_last = mp;
1167         tcp->tcp_xmit_tail = mp;
1168 
1169         /* find out how much we can send */
1170         /* BEGIN CSTYLED */
1171         /*
1172          *    un-acked     usable
1173          *  |--------------|-----------------|
1174          *  tcp_suna       tcp_snxt       tcp_suna+tcp_swnd
1175          */
1176         /* END CSTYLED */
1177 
1178         /* start sending from tcp_snxt */
1179         snxt = tcp->tcp_snxt;
1180 
1181         /*
1182          * Check to see if this connection has been idled for some
1183          * time and no ACK is expected.  If it is, we need to slow
1184          * start again to get back the connection's "self-clock" as
1185          * described in VJ's paper.
1186          *
1187          * Reinitialize tcp_cwnd after idle.
1188          */
1189         now = LBOLT_FASTPATH;
1190         if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
1191             (TICK_TO_MSEC(now - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
1192                 TCP_SET_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
1193         }
1194 
1195         usable = tcp->tcp_swnd;              /* tcp window size */
1196         if (usable > tcp->tcp_cwnd)
1197                 usable = tcp->tcp_cwnd;      /* congestion window smaller */
1198         usable -= snxt;         /* subtract stuff already sent */
1199         suna = tcp->tcp_suna;
1200         usable += suna;
1201         /* usable can be < 0 if the congestion window is smaller */
1202         if (len > usable) {
1203                 /* Can't send complete M_DATA in one shot */
1204                 goto slow;
1205         }
1206 
1207         mutex_enter(&tcp->tcp_non_sq_lock);
1208         if (tcp->tcp_flow_stopped &&
1209             TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
1210                 tcp_clrqfull(tcp);
1211         }
1212         mutex_exit(&tcp->tcp_non_sq_lock);
1213 
1214         /*
1215          * determine if anything to send (Nagle).
1216          *
1217          *   1. len < tcp_mss (i.e. small)
1218          *   2. unacknowledged data present
1219          *   3. len < nagle limit
1220          *   4. last packet sent < nagle limit (previous packet sent)
1221          */
1222         if ((len < mss) && (snxt != suna) &&
1223             (len < (int)tcp->tcp_naglim) &&
1224             (tcp->tcp_last_sent_len < tcp->tcp_naglim)) {
1225                 /*
1226                  * This was the first unsent packet and normally
1227                  * mss < xmit_hiwater so there is no need to worry
1228                  * about flow control. The next packet will go
1229                  * through the flow control check in tcp_wput_data().
1230                  */
1231                 /* leftover work from above */
1232                 tcp->tcp_unsent = len;
1233                 tcp->tcp_xmit_tail_unsent = len;
1234 
1235                 return;
1236         }
1237 
1238         /*
1239          * len <= tcp->tcp_mss && len == unsent so no sender silly window.  Can
1240          * send now.
1241          */
1242 
1243         if (snxt == suna) {
1244                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1245         }
1246 
1247         /* we have always sent something */
1248         tcp->tcp_rack_cnt = 0;
1249 
1250         tcp->tcp_snxt = snxt + len;
1251         tcp->tcp_rack = tcp->tcp_rnxt;
1252 
1253         if ((mp1 = dupb(mp)) == 0)
1254                 goto no_memory;
1255         mp->b_prev = (mblk_t *)(uintptr_t)now;
1256         mp->b_next = (mblk_t *)(uintptr_t)snxt;
1257 
1258         /* adjust tcp header information */
1259         tcpha = tcp->tcp_tcpha;
1260         tcpha->tha_flags = (TH_ACK|TH_PUSH);
1261 
1262         sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
1263         sum = (sum >> 16) + (sum & 0xFFFF);
1264         tcpha->tha_sum = htons(sum);
1265 
1266         tcpha->tha_seq = htonl(snxt);
1267 
1268         TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1269         TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1270         BUMP_LOCAL(tcp->tcp_obsegs);
1271 
1272         /* Update the latest receive window size in TCP header. */
1273         tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
1274 
1275         tcp->tcp_last_sent_len = (ushort_t)len;
1276 
1277         plen = len + connp->conn_ht_iphc_len;
1278 
1279         ixa = connp->conn_ixa;
1280         ixa->ixa_pktlen = plen;
1281 
1282         if (ixa->ixa_flags & IXAF_IS_IPV4) {
1283                 tcp->tcp_ipha->ipha_length = htons(plen);
1284         } else {
1285                 tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
1286         }
1287 
1288         /* see if we need to allocate a mblk for the headers */
1289         hdrlen = connp->conn_ht_iphc_len;
1290         rptr = mp1->b_rptr - hdrlen;
1291         db = mp1->b_datap;
1292         if ((db->db_ref != 2) || rptr < db->db_base ||
1293             (!OK_32PTR(rptr))) {
1294                 /* NOTE: we assume allocb returns an OK_32PTR */
1295                 mp = allocb(hdrlen + tcps->tcps_wroff_xtra, BPRI_MED);
1296                 if (!mp) {
1297                         freemsg(mp1);
1298                         goto no_memory;
1299                 }
1300                 mp->b_cont = mp1;
1301                 mp1 = mp;
1302                 /* Leave room for Link Level header */
1303                 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra];
1304                 mp1->b_wptr = &rptr[hdrlen];
1305         }
1306         mp1->b_rptr = rptr;
1307 
1308         /* Fill in the timestamp option. */
1309         if (tcp->tcp_snd_ts_ok) {
1310                 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
1311 
1312                 U32_TO_BE32(llbolt,
1313                     (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
1314                 U32_TO_BE32(tcp->tcp_ts_recent,
1315                     (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
1316         } else {
1317                 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
1318         }
1319 
1320         /* copy header into outgoing packet */
1321         dst = (ipaddr_t *)rptr;
1322         src = (ipaddr_t *)connp->conn_ht_iphc;
1323         dst[0] = src[0];
1324         dst[1] = src[1];
1325         dst[2] = src[2];
1326         dst[3] = src[3];
1327         dst[4] = src[4];
1328         dst[5] = src[5];
1329         dst[6] = src[6];
1330         dst[7] = src[7];
1331         dst[8] = src[8];
1332         dst[9] = src[9];
1333         if (hdrlen -= 40) {
1334                 hdrlen >>= 2;
1335                 dst += 10;
1336                 src += 10;
1337                 do {
1338                         *dst++ = *src++;
1339                 } while (--hdrlen);
1340         }
1341 
1342         /*
1343          * Set the ECN info in the TCP header.  Note that this
1344          * is not the template header.
1345          */
1346         if (tcp->tcp_ecn_ok) {
1347                 TCP_SET_ECT(tcp, rptr);
1348 
1349                 tcpha = (tcpha_t *)(rptr + ixa->ixa_ip_hdr_length);
1350                 if (tcp->tcp_ecn_echo_on)
1351                         tcpha->tha_flags |= TH_ECE;
1352                 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
1353                         tcpha->tha_flags |= TH_CWR;
1354                         tcp->tcp_ecn_cwr_sent = B_TRUE;
1355                 }
1356         }
1357 
1358         if (tcp->tcp_ip_forward_progress) {
1359                 tcp->tcp_ip_forward_progress = B_FALSE;
1360                 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
1361         } else {
1362                 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
1363         }
1364         tcp_send_data(tcp, mp1);
1365         return;
1366 
1367         /*
1368          * If we ran out of memory, we pretend to have sent the packet
1369          * and that it was lost on the wire.
1370          */
1371 no_memory:
1372         return;
1373 
1374 slow:
1375         /* leftover work from above */
1376         tcp->tcp_unsent = len;
1377         tcp->tcp_xmit_tail_unsent = len;
1378         tcp_wput_data(tcp, NULL, B_FALSE);
1379 }
1380 
1381 /* ARGSUSED2 */
1382 void
1383 tcp_output_urgent(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1384 {
1385         int len;
1386         uint32_t msize;
1387         conn_t *connp = (conn_t *)arg;
1388         tcp_t *tcp = connp->conn_tcp;
1389 
1390         msize = msgdsize(mp);
1391 
1392         len = msize - 1;
1393         if (len < 0) {
1394                 freemsg(mp);
1395                 return;
1396         }
1397 
1398         /*
1399          * Try to force urgent data out on the wire. Even if we have unsent
1400          * data this will at least send the urgent flag.
1401          * XXX does not handle more flag correctly.
1402          */
1403         len += tcp->tcp_unsent;
1404         len += tcp->tcp_snxt;
1405         tcp->tcp_urg = len;
1406         tcp->tcp_valid_bits |= TCP_URG_VALID;
1407 
1408         /* Bypass tcp protocol for fused tcp loopback */
1409         if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
1410                 return;
1411 
1412         /* Strip off the T_EXDATA_REQ if the data is from TPI */
1413         if (DB_TYPE(mp) != M_DATA) {
1414                 mblk_t *mp1 = mp;
1415                 ASSERT(!IPCL_IS_NONSTR(connp));
1416                 mp = mp->b_cont;
1417                 freeb(mp1);
1418         }
1419         tcp_wput_data(tcp, mp, B_TRUE);
1420 }
1421 
1422 /*
1423  * Called by streams close routine via squeues when our client blows off her
1424  * descriptor, we take this to mean: "close the stream state NOW, close the tcp
1425  * connection politely" When SO_LINGER is set (with a non-zero linger time and
1426  * it is not a nonblocking socket) then this routine sleeps until the FIN is
1427  * acked.
1428  *
1429  * NOTE: tcp_close potentially returns error when lingering.
1430  * However, the stream head currently does not pass these errors
1431  * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK
1432  * errors to the application (from tsleep()) and not errors
1433  * like ECONNRESET caused by receiving a reset packet.
1434  */
1435 
1436 /* ARGSUSED */
1437 void
1438 tcp_close_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1439 {
1440         char    *msg;
1441         conn_t  *connp = (conn_t *)arg;
1442         tcp_t   *tcp = connp->conn_tcp;
1443         clock_t delta = 0;
1444         tcp_stack_t     *tcps = tcp->tcp_tcps;
1445 
1446         /*
1447          * When a non-STREAMS socket is being closed, it does not always
1448          * stick around waiting for tcp_close_output to run and can therefore
1449          * have dropped a reference already. So adjust the asserts accordingly.
1450          */
1451         ASSERT((connp->conn_fanout != NULL &&
1452             connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 3 : 4)) ||
1453             (connp->conn_fanout == NULL &&
1454             connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3)));
1455 
1456         mutex_enter(&tcp->tcp_eager_lock);
1457         if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
1458                 /*
1459                  * Cleanup for listener. For non-STREAM sockets sockfs will
1460                  * close all the eagers on 'q', so in that case only deal
1461                  * with 'q0'.
1462                  */
1463                 tcp_eager_cleanup(tcp, IPCL_IS_NONSTR(connp) ? 1 : 0);
1464                 tcp->tcp_wait_for_eagers = 1;
1465         }
1466         mutex_exit(&tcp->tcp_eager_lock);
1467 
1468         tcp->tcp_lso = B_FALSE;
1469 
1470         msg = NULL;
1471         switch (tcp->tcp_state) {
1472         case TCPS_CLOSED:
1473         case TCPS_IDLE:
1474                 break;
1475         case TCPS_BOUND:
1476                 if (tcp->tcp_listener != NULL) {
1477                         ASSERT(IPCL_IS_NONSTR(connp));
1478                         /*
1479                          * Unlink from the listener and drop the reference
1480                          * put on it by the eager. tcp_closei_local will not
1481                          * do it because tcp_tconnind_started is TRUE.
1482                          */
1483                         mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1484                         tcp_eager_unlink(tcp);
1485                         mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1486                         CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1487                 }
1488                 break;
1489         case TCPS_LISTEN:
1490                 break;
1491         case TCPS_SYN_SENT:
1492                 msg = "tcp_close, during connect";
1493                 break;
1494         case TCPS_SYN_RCVD:
1495                 /*
1496                  * Close during the connect 3-way handshake
1497                  * but here there may or may not be pending data
1498                  * already on queue. Process almost same as in
1499                  * the ESTABLISHED state.
1500                  */
1501                 /* FALLTHRU */
1502         default:
1503                 if (tcp->tcp_fused)
1504                         tcp_unfuse(tcp);
1505 
1506                 /*
1507                  * If SO_LINGER has set a zero linger time, abort the
1508                  * connection with a reset.
1509                  */
1510                 if (connp->conn_linger && connp->conn_lingertime == 0) {
1511                         msg = "tcp_close, zero lingertime";
1512                         break;
1513                 }
1514 
1515                 /*
1516                  * Abort connection if there is unread data queued.
1517                  */
1518                 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) {
1519                         msg = "tcp_close, unread data";
1520                         break;
1521                 }
1522 
1523                 /*
1524                  * Abort connection if it is being closed without first
1525                  * being accepted. This can happen if a listening non-STREAM
1526                  * socket wants to get rid of the socket, for example, if the
1527                  * listener is closing.
1528                  */
1529                 if (tcp->tcp_listener != NULL) {
1530                         ASSERT(IPCL_IS_NONSTR(connp));
1531                         msg = "tcp_close, close before accept";
1532 
1533                         /*
1534                          * Unlink from the listener and drop the reference
1535                          * put on it by the eager. tcp_closei_local will not
1536                          * do it because tcp_tconnind_started is TRUE.
1537                          */
1538                         mutex_enter(&tcp->tcp_saved_listener->tcp_eager_lock);
1539                         tcp_eager_unlink(tcp);
1540                         mutex_exit(&tcp->tcp_saved_listener->tcp_eager_lock);
1541                         CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
1542                         break;
1543                 }
1544 
1545                 /*
1546                  * Transmit the FIN before detaching the tcp_t.
1547                  * After tcp_detach returns this queue/perimeter
1548                  * no longer owns the tcp_t thus others can modify it.
1549                  */
1550                 (void) tcp_xmit_end(tcp);
1551 
1552                 /*
1553                  * If lingering on close then wait until the fin is acked,
1554                  * the SO_LINGER time passes, or a reset is sent/received.
1555                  */
1556                 if (connp->conn_linger && connp->conn_lingertime > 0 &&
1557                     !(tcp->tcp_fin_acked) &&
1558                     tcp->tcp_state >= TCPS_ESTABLISHED) {
1559                         if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) {
1560                                 tcp->tcp_client_errno = EWOULDBLOCK;
1561                         } else if (tcp->tcp_client_errno == 0) {
1562 
1563                                 ASSERT(tcp->tcp_linger_tid == 0);
1564 
1565                                 /* conn_lingertime is in sec. */
1566                                 tcp->tcp_linger_tid = TCP_TIMER(tcp,
1567                                     tcp_close_linger_timeout,
1568                                     connp->conn_lingertime * MILLISEC);
1569 
1570                                 /* tcp_close_linger_timeout will finish close */
1571                                 if (tcp->tcp_linger_tid == 0)
1572                                         tcp->tcp_client_errno = ENOSR;
1573                                 else
1574                                         return;
1575                         }
1576 
1577                         /*
1578                          * Check if we need to detach or just close
1579                          * the instance.
1580                          */
1581                         if (tcp->tcp_state <= TCPS_LISTEN)
1582                                 break;
1583                 }
1584 
1585                 /*
1586                  * Make sure that no other thread will access the conn_rq of
1587                  * this instance (through lookups etc.) as conn_rq will go
1588                  * away shortly.
1589                  */
1590                 tcp_acceptor_hash_remove(tcp);
1591 
1592                 mutex_enter(&tcp->tcp_non_sq_lock);
1593                 if (tcp->tcp_flow_stopped) {
1594                         tcp_clrqfull(tcp);
1595                 }
1596                 mutex_exit(&tcp->tcp_non_sq_lock);
1597 
1598                 if (tcp->tcp_timer_tid != 0) {
1599                         delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
1600                         tcp->tcp_timer_tid = 0;
1601                 }
1602                 /*
1603                  * Need to cancel those timers which will not be used when
1604                  * TCP is detached.  This has to be done before the conn_wq
1605                  * is set to NULL.
1606                  */
1607                 tcp_timers_stop(tcp);
1608 
1609                 tcp->tcp_detached = B_TRUE;
1610                 if (tcp->tcp_state == TCPS_TIME_WAIT) {
1611                         tcp_time_wait_append(tcp);
1612                         TCP_DBGSTAT(tcps, tcp_detach_time_wait);
1613                         ASSERT(connp->conn_ref >=
1614                             (IPCL_IS_NONSTR(connp) ? 2 : 3));
1615                         goto finish;
1616                 }
1617 
1618                 /*
1619                  * If delta is zero the timer event wasn't executed and was
1620                  * successfully canceled. In this case we need to restart it
1621                  * with the minimal delta possible.
1622                  */
1623                 if (delta >= 0)
1624                         tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
1625                             delta ? delta : 1);
1626 
1627                 ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 2 : 3));
1628                 goto finish;
1629         }
1630 
1631         /* Detach did not complete. Still need to remove q from stream. */
1632         if (msg) {
1633                 if (tcp->tcp_state == TCPS_ESTABLISHED ||
1634                     tcp->tcp_state == TCPS_CLOSE_WAIT)
1635                         TCPS_BUMP_MIB(tcps, tcpEstabResets);
1636                 if (tcp->tcp_state == TCPS_SYN_SENT ||
1637                     tcp->tcp_state == TCPS_SYN_RCVD)
1638                         TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1639                 tcp_xmit_ctl(msg, tcp,  tcp->tcp_snxt, 0, TH_RST);
1640         }
1641 
1642         tcp_closei_local(tcp);
1643         CONN_DEC_REF(connp);
1644         ASSERT(connp->conn_ref >= (IPCL_IS_NONSTR(connp) ? 1 : 2));
1645 
1646 finish:
1647         /*
1648          * Don't change the queues in the case of a listener that has
1649          * eagers in its q or q0. It could surprise the eagers.
1650          * Instead wait for the eagers outside the squeue.
1651          *
1652          * For non-STREAMS sockets tcp_wait_for_eagers implies that
1653          * we should delay the su_closed upcall until all eagers have
1654          * dropped their references.
1655          */
1656         if (!tcp->tcp_wait_for_eagers) {
1657                 tcp->tcp_detached = B_TRUE;
1658                 connp->conn_rq = NULL;
1659                 connp->conn_wq = NULL;
1660 
1661                 /* non-STREAM socket, release the upper handle */
1662                 if (IPCL_IS_NONSTR(connp)) {
1663                         ASSERT(connp->conn_upper_handle != NULL);
1664                         (*connp->conn_upcalls->su_closed)
1665                             (connp->conn_upper_handle);
1666                         connp->conn_upper_handle = NULL;
1667                         connp->conn_upcalls = NULL;
1668                 }
1669         }
1670 
1671         /* Signal tcp_close() to finish closing. */
1672         mutex_enter(&tcp->tcp_closelock);
1673         tcp->tcp_closed = 1;
1674         cv_signal(&tcp->tcp_closecv);
1675         mutex_exit(&tcp->tcp_closelock);
1676 }
1677 
1678 /* ARGSUSED */
1679 void
1680 tcp_shutdown_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1681 {
1682         conn_t  *connp = (conn_t *)arg;
1683         tcp_t   *tcp = connp->conn_tcp;
1684 
1685         freemsg(mp);
1686 
1687         if (tcp->tcp_fused)
1688                 tcp_unfuse(tcp);
1689 
1690         if (tcp_xmit_end(tcp) != 0) {
1691                 /*
1692                  * We were crossing FINs and got a reset from
1693                  * the other side. Just ignore it.
1694                  */
1695                 if (connp->conn_debug) {
1696                         (void) strlog(TCP_MOD_ID, 0, 1,
1697                             SL_ERROR|SL_TRACE,
1698                             "tcp_shutdown_output() out of state %s",
1699                             tcp_display(tcp, NULL, DISP_ADDR_AND_PORT));
1700                 }
1701         }
1702 }
1703 
1704 #pragma inline(tcp_send_data)
1705 
1706 void
1707 tcp_send_data(tcp_t *tcp, mblk_t *mp)
1708 {
1709         conn_t          *connp = tcp->tcp_connp;
1710 
1711         /*
1712          * Check here to avoid sending zero-copy message down to IP when
1713          * ZEROCOPY capability has turned off. We only need to deal with
1714          * the race condition between sockfs and the notification here.
1715          * Since we have tried to backoff the tcp_xmit_head when turning
1716          * zero-copy off and new messages in tcp_output(), we simply drop
1717          * the dup'ed packet here and let tcp retransmit, if tcp_xmit_zc_clean
1718          * is not true.
1719          */
1720         if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on &&
1721             !tcp->tcp_xmit_zc_clean) {
1722                 ip_drop_output("TCP ZC was disabled but not clean", mp, NULL);
1723                 freemsg(mp);
1724                 return;
1725         }
1726 
1727         DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa,
1728             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, tcp,
1729             __dtrace_tcp_tcph_t *,
1730             &mp->b_rptr[connp->conn_ixa->ixa_ip_hdr_length]);
1731 
1732         ASSERT(connp->conn_ixa->ixa_notify_cookie == connp->conn_tcp);
1733         (void) conn_ip_output(mp, connp->conn_ixa);
1734 }
1735 
1736 /* ARGSUSED2 */
1737 void
1738 tcp_send_synack(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1739 {
1740         conn_t  *econnp = (conn_t *)arg;
1741         tcp_t   *tcp = econnp->conn_tcp;
1742         ip_xmit_attr_t *ixa = econnp->conn_ixa;
1743 
1744         /* Guard against a RST having blown it away while on the squeue */
1745         if (tcp->tcp_state == TCPS_CLOSED) {
1746                 freemsg(mp);
1747                 return;
1748         }
1749 
1750         /*
1751          * In the off-chance that the eager received and responded to
1752          * some other packet while the SYN|ACK was queued, we recalculate
1753          * the ixa_pktlen. It would be better to fix the SYN/accept
1754          * multithreading scheme to avoid this complexity.
1755          */
1756         ixa->ixa_pktlen = msgdsize(mp);
1757         (void) conn_ip_output(mp, ixa);
1758 }
1759 
1760 /*
1761  * tcp_send() is called by tcp_wput_data() and returns one of the following:
1762  *
1763  * -1 = failed allocation.
1764  *  0 = success; burst count reached, or usable send window is too small,
1765  *      and that we'd rather wait until later before sending again.
1766  */
1767 static int
1768 tcp_send(tcp_t *tcp, const int mss, const int total_hdr_len,
1769     const int tcp_hdr_len, const int num_sack_blk, int *usable,
1770     uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time)
1771 {
1772         int             num_burst_seg = tcp->tcp_snd_burst;
1773         int             num_lso_seg = 1;
1774         uint_t          lso_usable;
1775         boolean_t       do_lso_send = B_FALSE;
1776         tcp_stack_t     *tcps = tcp->tcp_tcps;
1777         conn_t          *connp = tcp->tcp_connp;
1778         ip_xmit_attr_t  *ixa = connp->conn_ixa;
1779 
1780         /*
1781          * Check LSO possibility. The value of tcp->tcp_lso indicates whether
1782          * the underlying connection is LSO capable. Will check whether having
1783          * enough available data to initiate LSO transmission in the for(){}
1784          * loops.
1785          */
1786         if (tcp->tcp_lso && (tcp->tcp_valid_bits & ~TCP_FSS_VALID) == 0)
1787                 do_lso_send = B_TRUE;
1788 
1789         for (;;) {
1790                 struct datab    *db;
1791                 tcpha_t         *tcpha;
1792                 uint32_t        sum;
1793                 mblk_t          *mp, *mp1;
1794                 uchar_t         *rptr;
1795                 int             len;
1796 
1797                 /*
1798                  * Burst count reached, return successfully.
1799                  */
1800                 if (num_burst_seg == 0)
1801                         break;
1802 
1803                 /*
1804                  * Calculate the maximum payload length we can send at one
1805                  * time.
1806                  */
1807                 if (do_lso_send) {
1808                         /*
1809                          * Check whether be able to to do LSO for the current
1810                          * available data.
1811                          */
1812                         if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) {
1813                                 lso_usable = MIN(tcp->tcp_lso_max, *usable);
1814                                 lso_usable = MIN(lso_usable,
1815                                     num_burst_seg * mss);
1816 
1817                                 num_lso_seg = lso_usable / mss;
1818                                 if (lso_usable % mss) {
1819                                         num_lso_seg++;
1820                                         tcp->tcp_last_sent_len = (ushort_t)
1821                                             (lso_usable % mss);
1822                                 } else {
1823                                         tcp->tcp_last_sent_len = (ushort_t)mss;
1824                                 }
1825                         } else {
1826                                 do_lso_send = B_FALSE;
1827                                 num_lso_seg = 1;
1828                                 lso_usable = mss;
1829                         }
1830                 }
1831 
1832                 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1);
1833 #ifdef DEBUG
1834                 DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg, boolean_t,
1835                     do_lso_send);
1836 #endif
1837                 /*
1838                  * Adjust num_burst_seg here.
1839                  */
1840                 num_burst_seg -= num_lso_seg;
1841 
1842                 len = mss;
1843                 if (len > *usable) {
1844                         ASSERT(do_lso_send == B_FALSE);
1845 
1846                         len = *usable;
1847                         if (len <= 0) {
1848                                 /* Terminate the loop */
1849                                 break;  /* success; too small */
1850                         }
1851                         /*
1852                          * Sender silly-window avoidance.
1853                          * Ignore this if we are going to send a
1854                          * zero window probe out.
1855                          *
1856                          * TODO: force data into microscopic window?
1857                          *      ==> (!pushed || (unsent > usable))
1858                          */
1859                         if (len < (tcp->tcp_max_swnd >> 1) &&
1860                             (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len &&
1861                             !((tcp->tcp_valid_bits & TCP_URG_VALID) &&
1862                             len == 1) && (! tcp->tcp_zero_win_probe)) {
1863                                 /*
1864                                  * If the retransmit timer is not running
1865                                  * we start it so that we will retransmit
1866                                  * in the case when the receiver has
1867                                  * decremented the window.
1868                                  */
1869                                 if (*snxt == tcp->tcp_snxt &&
1870                                     *snxt == tcp->tcp_suna) {
1871                                         /*
1872                                          * We are not supposed to send
1873                                          * anything.  So let's wait a little
1874                                          * bit longer before breaking SWS
1875                                          * avoidance.
1876                                          *
1877                                          * What should the value be?
1878                                          * Suggestion: MAX(init rexmit time,
1879                                          * tcp->tcp_rto)
1880                                          */
1881                                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
1882                                 }
1883                                 break;  /* success; too small */
1884                         }
1885                 }
1886 
1887                 tcpha = tcp->tcp_tcpha;
1888 
1889                 /*
1890                  * The reason to adjust len here is that we need to set flags
1891                  * and calculate checksum.
1892                  */
1893                 if (do_lso_send)
1894                         len = lso_usable;
1895 
1896                 *usable -= len; /* Approximate - can be adjusted later */
1897                 if (*usable > 0)
1898                         tcpha->tha_flags = TH_ACK;
1899                 else
1900                         tcpha->tha_flags = (TH_ACK | TH_PUSH);
1901 
1902                 /*
1903                  * Prime pump for IP's checksumming on our behalf.
1904                  * Include the adjustment for a source route if any.
1905                  * In case of LSO, the partial pseudo-header checksum should
1906                  * exclusive TCP length, so zero tha_sum before IP calculate
1907                  * pseudo-header checksum for partial checksum offload.
1908                  */
1909                 if (do_lso_send) {
1910                         sum = 0;
1911                 } else {
1912                         sum = len + tcp_hdr_len + connp->conn_sum;
1913                         sum = (sum >> 16) + (sum & 0xFFFF);
1914                 }
1915                 tcpha->tha_sum = htons(sum);
1916                 tcpha->tha_seq = htonl(*snxt);
1917 
1918                 /*
1919                  * Branch off to tcp_xmit_mp() if any of the VALID bits is
1920                  * set.  For the case when TCP_FSS_VALID is the only valid
1921                  * bit (normal active close), branch off only when we think
1922                  * that the FIN flag needs to be set.  Note for this case,
1923                  * that (snxt + len) may not reflect the actual seg_len,
1924                  * as len may be further reduced in tcp_xmit_mp().  If len
1925                  * gets modified, we will end up here again.
1926                  */
1927                 if (tcp->tcp_valid_bits != 0 &&
1928                     (tcp->tcp_valid_bits != TCP_FSS_VALID ||
1929                     ((*snxt + len) == tcp->tcp_fss))) {
1930                         uchar_t         *prev_rptr;
1931                         uint32_t        prev_snxt = tcp->tcp_snxt;
1932 
1933                         if (*tail_unsent == 0) {
1934                                 ASSERT((*xmit_tail)->b_cont != NULL);
1935                                 *xmit_tail = (*xmit_tail)->b_cont;
1936                                 prev_rptr = (*xmit_tail)->b_rptr;
1937                                 *tail_unsent = (int)((*xmit_tail)->b_wptr -
1938                                     (*xmit_tail)->b_rptr);
1939                         } else {
1940                                 prev_rptr = (*xmit_tail)->b_rptr;
1941                                 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr -
1942                                     *tail_unsent;
1943                         }
1944                         mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL,
1945                             *snxt, B_FALSE, (uint32_t *)&len, B_FALSE);
1946                         /* Restore tcp_snxt so we get amount sent right. */
1947                         tcp->tcp_snxt = prev_snxt;
1948                         if (prev_rptr == (*xmit_tail)->b_rptr) {
1949                                 /*
1950                                  * If the previous timestamp is still in use,
1951                                  * don't stomp on it.
1952                                  */
1953                                 if ((*xmit_tail)->b_next == NULL) {
1954                                         (*xmit_tail)->b_prev = local_time;
1955                                         (*xmit_tail)->b_next =
1956                                             (mblk_t *)(uintptr_t)(*snxt);
1957                                 }
1958                         } else
1959                                 (*xmit_tail)->b_rptr = prev_rptr;
1960 
1961                         if (mp == NULL) {
1962                                 return (-1);
1963                         }
1964                         mp1 = mp->b_cont;
1965 
1966                         if (len <= mss) /* LSO is unusable (!do_lso_send) */
1967                                 tcp->tcp_last_sent_len = (ushort_t)len;
1968                         while (mp1->b_cont) {
1969                                 *xmit_tail = (*xmit_tail)->b_cont;
1970                                 (*xmit_tail)->b_prev = local_time;
1971                                 (*xmit_tail)->b_next =
1972                                     (mblk_t *)(uintptr_t)(*snxt);
1973                                 mp1 = mp1->b_cont;
1974                         }
1975                         *snxt += len;
1976                         *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
1977                         BUMP_LOCAL(tcp->tcp_obsegs);
1978                         TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1979                         TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1980                         tcp_send_data(tcp, mp);
1981                         continue;
1982                 }
1983 
1984                 *snxt += len;   /* Adjust later if we don't send all of len */
1985                 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
1986                 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, len);
1987 
1988                 if (*tail_unsent) {
1989                         /* Are the bytes above us in flight? */
1990                         rptr = (*xmit_tail)->b_wptr - *tail_unsent;
1991                         if (rptr != (*xmit_tail)->b_rptr) {
1992                                 *tail_unsent -= len;
1993                                 if (len <= mss) /* LSO is unusable */
1994                                         tcp->tcp_last_sent_len = (ushort_t)len;
1995                                 len += total_hdr_len;
1996                                 ixa->ixa_pktlen = len;
1997 
1998                                 if (ixa->ixa_flags & IXAF_IS_IPV4) {
1999                                         tcp->tcp_ipha->ipha_length = htons(len);
2000                                 } else {
2001                                         tcp->tcp_ip6h->ip6_plen =
2002                                             htons(len - IPV6_HDR_LEN);
2003                                 }
2004 
2005                                 mp = dupb(*xmit_tail);
2006                                 if (mp == NULL) {
2007                                         return (-1);    /* out_of_mem */
2008                                 }
2009                                 mp->b_rptr = rptr;
2010                                 /*
2011                                  * If the old timestamp is no longer in use,
2012                                  * sample a new timestamp now.
2013                                  */
2014                                 if ((*xmit_tail)->b_next == NULL) {
2015                                         (*xmit_tail)->b_prev = local_time;
2016                                         (*xmit_tail)->b_next =
2017                                             (mblk_t *)(uintptr_t)(*snxt-len);
2018                                 }
2019                                 goto must_alloc;
2020                         }
2021                 } else {
2022                         *xmit_tail = (*xmit_tail)->b_cont;
2023                         ASSERT((uintptr_t)((*xmit_tail)->b_wptr -
2024                             (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX);
2025                         *tail_unsent = (int)((*xmit_tail)->b_wptr -
2026                             (*xmit_tail)->b_rptr);
2027                 }
2028 
2029                 (*xmit_tail)->b_prev = local_time;
2030                 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len);
2031 
2032                 *tail_unsent -= len;
2033                 if (len <= mss) /* LSO is unusable (!do_lso_send) */
2034                         tcp->tcp_last_sent_len = (ushort_t)len;
2035 
2036                 len += total_hdr_len;
2037                 ixa->ixa_pktlen = len;
2038 
2039                 if (ixa->ixa_flags & IXAF_IS_IPV4) {
2040                         tcp->tcp_ipha->ipha_length = htons(len);
2041                 } else {
2042                         tcp->tcp_ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2043                 }
2044 
2045                 mp = dupb(*xmit_tail);
2046                 if (mp == NULL) {
2047                         return (-1);    /* out_of_mem */
2048                 }
2049 
2050                 len = total_hdr_len;
2051                 /*
2052                  * There are four reasons to allocate a new hdr mblk:
2053                  *  1) The bytes above us are in use by another packet
2054                  *  2) We don't have good alignment
2055                  *  3) The mblk is being shared
2056                  *  4) We don't have enough room for a header
2057                  */
2058                 rptr = mp->b_rptr - len;
2059                 if (!OK_32PTR(rptr) ||
2060                     ((db = mp->b_datap), db->db_ref != 2) ||
2061                     rptr < db->db_base) {
2062                         /* NOTE: we assume allocb returns an OK_32PTR */
2063 
2064                 must_alloc:;
2065                         mp1 = allocb(connp->conn_ht_iphc_allocated +
2066                             tcps->tcps_wroff_xtra, BPRI_MED);
2067                         if (mp1 == NULL) {
2068                                 freemsg(mp);
2069                                 return (-1);    /* out_of_mem */
2070                         }
2071                         mp1->b_cont = mp;
2072                         mp = mp1;
2073                         /* Leave room for Link Level header */
2074                         len = total_hdr_len;
2075                         rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2076                         mp->b_wptr = &rptr[len];
2077                 }
2078 
2079                 /*
2080                  * Fill in the header using the template header, and add
2081                  * options such as time-stamp, ECN and/or SACK, as needed.
2082                  */
2083                 tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk);
2084 
2085                 mp->b_rptr = rptr;
2086 
2087                 if (*tail_unsent) {
2088                         int spill = *tail_unsent;
2089 
2090                         mp1 = mp->b_cont;
2091                         if (mp1 == NULL)
2092                                 mp1 = mp;
2093 
2094                         /*
2095                          * If we're a little short, tack on more mblks until
2096                          * there is no more spillover.
2097                          */
2098                         while (spill < 0) {
2099                                 mblk_t *nmp;
2100                                 int nmpsz;
2101 
2102                                 nmp = (*xmit_tail)->b_cont;
2103                                 nmpsz = MBLKL(nmp);
2104 
2105                                 /*
2106                                  * Excess data in mblk; can we split it?
2107                                  * If LSO is enabled for the connection,
2108                                  * keep on splitting as this is a transient
2109                                  * send path.
2110                                  */
2111                                 if (!do_lso_send && (spill + nmpsz > 0)) {
2112                                         /*
2113                                          * Don't split if stream head was
2114                                          * told to break up larger writes
2115                                          * into smaller ones.
2116                                          */
2117                                         if (tcp->tcp_maxpsz_multiplier > 0)
2118                                                 break;
2119 
2120                                         /*
2121                                          * Next mblk is less than SMSS/2
2122                                          * rounded up to nearest 64-byte;
2123                                          * let it get sent as part of the
2124                                          * next segment.
2125                                          */
2126                                         if (tcp->tcp_localnet &&
2127                                             !tcp->tcp_cork &&
2128                                             (nmpsz < roundup((mss >> 1), 64)))
2129                                                 break;
2130                                 }
2131 
2132                                 *xmit_tail = nmp;
2133                                 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX);
2134                                 /* Stash for rtt use later */
2135                                 (*xmit_tail)->b_prev = local_time;
2136                                 (*xmit_tail)->b_next =
2137                                     (mblk_t *)(uintptr_t)(*snxt - len);
2138                                 mp1->b_cont = dupb(*xmit_tail);
2139                                 mp1 = mp1->b_cont;
2140 
2141                                 spill += nmpsz;
2142                                 if (mp1 == NULL) {
2143                                         *tail_unsent = spill;
2144                                         freemsg(mp);
2145                                         return (-1);    /* out_of_mem */
2146                                 }
2147                         }
2148 
2149                         /* Trim back any surplus on the last mblk */
2150                         if (spill >= 0) {
2151                                 mp1->b_wptr -= spill;
2152                                 *tail_unsent = spill;
2153                         } else {
2154                                 /*
2155                                  * We did not send everything we could in
2156                                  * order to remain within the b_cont limit.
2157                                  */
2158                                 *usable -= spill;
2159                                 *snxt += spill;
2160                                 tcp->tcp_last_sent_len += spill;
2161                                 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, spill);
2162                                 /*
2163                                  * Adjust the checksum
2164                                  */
2165                                 tcpha = (tcpha_t *)(rptr +
2166                                     ixa->ixa_ip_hdr_length);
2167                                 sum += spill;
2168                                 sum = (sum >> 16) + (sum & 0xFFFF);
2169                                 tcpha->tha_sum = htons(sum);
2170                                 if (connp->conn_ipversion == IPV4_VERSION) {
2171                                         sum = ntohs(
2172                                             ((ipha_t *)rptr)->ipha_length) +
2173                                             spill;
2174                                         ((ipha_t *)rptr)->ipha_length =
2175                                             htons(sum);
2176                                 } else {
2177                                         sum = ntohs(
2178                                             ((ip6_t *)rptr)->ip6_plen) +
2179                                             spill;
2180                                         ((ip6_t *)rptr)->ip6_plen =
2181                                             htons(sum);
2182                                 }
2183                                 ixa->ixa_pktlen += spill;
2184                                 *tail_unsent = 0;
2185                         }
2186                 }
2187                 if (tcp->tcp_ip_forward_progress) {
2188                         tcp->tcp_ip_forward_progress = B_FALSE;
2189                         ixa->ixa_flags |= IXAF_REACH_CONF;
2190                 } else {
2191                         ixa->ixa_flags &= ~IXAF_REACH_CONF;
2192                 }
2193 
2194                 if (do_lso_send) {
2195                         /* Append LSO information to the mp. */
2196                         lso_info_set(mp, mss, HW_LSO);
2197                         ixa->ixa_fragsize = IP_MAXPACKET;
2198                         ixa->ixa_extra_ident = num_lso_seg - 1;
2199 
2200                         DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
2201                             boolean_t, B_TRUE);
2202 
2203                         tcp_send_data(tcp, mp);
2204 
2205                         /*
2206                          * Restore values of ixa_fragsize and ixa_extra_ident.
2207                          */
2208                         ixa->ixa_fragsize = ixa->ixa_pmtu;
2209                         ixa->ixa_extra_ident = 0;
2210                         tcp->tcp_obsegs += num_lso_seg;
2211                         TCP_STAT(tcps, tcp_lso_times);
2212                         TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
2213                 } else {
2214                         /*
2215                          * Make sure to clean up LSO information. Wherever a
2216                          * new mp uses the prepended header room after dupb(),
2217                          * lso_info_cleanup() should be called.
2218                          */
2219                         lso_info_cleanup(mp);
2220                         tcp_send_data(tcp, mp);
2221                         BUMP_LOCAL(tcp->tcp_obsegs);
2222                 }
2223         }
2224 
2225         return (0);
2226 }
2227 
2228 /*
2229  * Initiate closedown sequence on an active connection.  (May be called as
2230  * writer.)  Return value zero for OK return, non-zero for error return.
2231  */
2232 static int
2233 tcp_xmit_end(tcp_t *tcp)
2234 {
2235         mblk_t          *mp;
2236         tcp_stack_t     *tcps = tcp->tcp_tcps;
2237         iulp_t          uinfo;
2238         ip_stack_t      *ipst = tcps->tcps_netstack->netstack_ip;
2239         conn_t          *connp = tcp->tcp_connp;
2240 
2241         if (tcp->tcp_state < TCPS_SYN_RCVD ||
2242             tcp->tcp_state > TCPS_CLOSE_WAIT) {
2243                 /*
2244                  * Invalid state, only states TCPS_SYN_RCVD,
2245                  * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid
2246                  */
2247                 return (-1);
2248         }
2249 
2250         tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent;
2251         tcp->tcp_valid_bits |= TCP_FSS_VALID;
2252         /*
2253          * If there is nothing more unsent, send the FIN now.
2254          * Otherwise, it will go out with the last segment.
2255          */
2256         if (tcp->tcp_unsent == 0) {
2257                 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
2258                     tcp->tcp_fss, B_FALSE, NULL, B_FALSE);
2259 
2260                 if (mp) {
2261                         tcp_send_data(tcp, mp);
2262                 } else {
2263                         /*
2264                          * Couldn't allocate msg.  Pretend we got it out.
2265                          * Wait for rexmit timeout.
2266                          */
2267                         tcp->tcp_snxt = tcp->tcp_fss + 1;
2268                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
2269                 }
2270 
2271                 /*
2272                  * If needed, update tcp_rexmit_snxt as tcp_snxt is
2273                  * changed.
2274                  */
2275                 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) {
2276                         tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
2277                 }
2278         } else {
2279                 /*
2280                  * If tcp->tcp_cork is set, then the data will not get sent,
2281                  * so we have to check that and unset it first.
2282                  */
2283                 if (tcp->tcp_cork)
2284                         tcp->tcp_cork = B_FALSE;
2285                 tcp_wput_data(tcp, NULL, B_FALSE);
2286         }
2287 
2288         /*
2289          * If TCP does not get enough samples of RTT or tcp_rtt_updates
2290          * is 0, don't update the cache.
2291          */
2292         if (tcps->tcps_rtt_updates == 0 ||
2293             tcp->tcp_rtt_update < tcps->tcps_rtt_updates)
2294                 return (0);
2295 
2296         /*
2297          * We do not have a good algorithm to update ssthresh at this time.
2298          * So don't do any update.
2299          */
2300         bzero(&uinfo, sizeof (uinfo));
2301         uinfo.iulp_rtt = tcp->tcp_rtt_sa;
2302         uinfo.iulp_rtt_sd = tcp->tcp_rtt_sd;
2303 
2304         /*
2305          * Note that uinfo is kept for conn_faddr in the DCE. Could update even
2306          * if source routed but we don't.
2307          */
2308         if (connp->conn_ipversion == IPV4_VERSION) {
2309                 if (connp->conn_faddr_v4 !=  tcp->tcp_ipha->ipha_dst) {
2310                         return (0);
2311                 }
2312                 (void) dce_update_uinfo_v4(connp->conn_faddr_v4, &uinfo, ipst);
2313         } else {
2314                 uint_t ifindex;
2315 
2316                 if (!(IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6,
2317                     &tcp->tcp_ip6h->ip6_dst))) {
2318                         return (0);
2319                 }
2320                 ifindex = 0;
2321                 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6)) {
2322                         ip_xmit_attr_t *ixa = connp->conn_ixa;
2323 
2324                         /*
2325                          * If we are going to create a DCE we'd better have
2326                          * an ifindex
2327                          */
2328                         if (ixa->ixa_nce != NULL) {
2329                                 ifindex = ixa->ixa_nce->nce_common->ncec_ill->
2330                                     ill_phyint->phyint_ifindex;
2331                         } else {
2332                                 return (0);
2333                         }
2334                 }
2335 
2336                 (void) dce_update_uinfo(&connp->conn_faddr_v6, ifindex, &uinfo,
2337                     ipst);
2338         }
2339         return (0);
2340 }
2341 
2342 /*
2343  * Send out a control packet on the tcp connection specified.  This routine
2344  * is typically called where we need a simple ACK or RST generated.
2345  */
2346 void
2347 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl)
2348 {
2349         uchar_t         *rptr;
2350         tcpha_t         *tcpha;
2351         ipha_t          *ipha = NULL;
2352         ip6_t           *ip6h = NULL;
2353         uint32_t        sum;
2354         int             total_hdr_len;
2355         int             ip_hdr_len;
2356         mblk_t          *mp;
2357         tcp_stack_t     *tcps = tcp->tcp_tcps;
2358         conn_t          *connp = tcp->tcp_connp;
2359         ip_xmit_attr_t  *ixa = connp->conn_ixa;
2360 
2361         /*
2362          * Save sum for use in source route later.
2363          */
2364         sum = connp->conn_ht_ulp_len + connp->conn_sum;
2365         total_hdr_len = connp->conn_ht_iphc_len;
2366         ip_hdr_len = ixa->ixa_ip_hdr_length;
2367 
2368         /* If a text string is passed in with the request, pass it to strlog. */
2369         if (str != NULL && connp->conn_debug) {
2370                 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2371                     "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x",
2372                     str, seq, ack, ctl);
2373         }
2374         mp = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
2375             BPRI_MED);
2376         if (mp == NULL) {
2377                 return;
2378         }
2379         rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
2380         mp->b_rptr = rptr;
2381         mp->b_wptr = &rptr[total_hdr_len];
2382         bcopy(connp->conn_ht_iphc, rptr, total_hdr_len);
2383 
2384         ixa->ixa_pktlen = total_hdr_len;
2385 
2386         if (ixa->ixa_flags & IXAF_IS_IPV4) {
2387                 ipha = (ipha_t *)rptr;
2388                 ipha->ipha_length = htons(total_hdr_len);
2389         } else {
2390                 ip6h = (ip6_t *)rptr;
2391                 ip6h->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
2392         }
2393         tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2394         tcpha->tha_flags = (uint8_t)ctl;
2395         if (ctl & TH_RST) {
2396                 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2397                 TCPS_BUMP_MIB(tcps, tcpOutControl);
2398                 /*
2399                  * Don't send TSopt w/ TH_RST packets per RFC 1323.
2400                  */
2401                 if (tcp->tcp_snd_ts_ok &&
2402                     tcp->tcp_state > TCPS_SYN_SENT) {
2403                         mp->b_wptr = &rptr[total_hdr_len - TCPOPT_REAL_TS_LEN];
2404                         *(mp->b_wptr) = TCPOPT_EOL;
2405 
2406                         ixa->ixa_pktlen = total_hdr_len - TCPOPT_REAL_TS_LEN;
2407 
2408                         if (connp->conn_ipversion == IPV4_VERSION) {
2409                                 ipha->ipha_length = htons(total_hdr_len -
2410                                     TCPOPT_REAL_TS_LEN);
2411                         } else {
2412                                 ip6h->ip6_plen = htons(total_hdr_len -
2413                                     IPV6_HDR_LEN - TCPOPT_REAL_TS_LEN);
2414                         }
2415                         tcpha->tha_offset_and_reserved -= (3 << 4);
2416                         sum -= TCPOPT_REAL_TS_LEN;
2417                 }
2418         }
2419         if (ctl & TH_ACK) {
2420                 if (tcp->tcp_snd_ts_ok) {
2421                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2422 
2423                         U32_TO_BE32(llbolt,
2424                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2425                         U32_TO_BE32(tcp->tcp_ts_recent,
2426                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2427                 }
2428 
2429                 /* Update the latest receive window size in TCP header. */
2430                 tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2431                 /* Track what we sent to the peer */
2432                 tcp->tcp_tcpha->tha_win = tcpha->tha_win;
2433                 tcp->tcp_rack = ack;
2434                 tcp->tcp_rack_cnt = 0;
2435                 TCPS_BUMP_MIB(tcps, tcpOutAck);
2436         }
2437         BUMP_LOCAL(tcp->tcp_obsegs);
2438         tcpha->tha_seq = htonl(seq);
2439         tcpha->tha_ack = htonl(ack);
2440         /*
2441          * Include the adjustment for a source route if any.
2442          */
2443         sum = (sum >> 16) + (sum & 0xFFFF);
2444         tcpha->tha_sum = htons(sum);
2445         tcp_send_data(tcp, mp);
2446 }
2447 
2448 /*
2449  * Generate a reset based on an inbound packet, connp is set by caller
2450  * when RST is in response to an unexpected inbound packet for which
2451  * there is active tcp state in the system.
2452  *
2453  * IPSEC NOTE : Try to send the reply with the same protection as it came
2454  * in.  We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
2455  * That way the packet will go out at the same level of protection as it
2456  * came in with.
2457  */
2458 static void
2459 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, uint32_t ack, int ctl,
2460     ip_recv_attr_t *ira, ip_stack_t *ipst, conn_t *connp)
2461 {
2462         ipha_t          *ipha = NULL;
2463         ip6_t           *ip6h = NULL;
2464         ushort_t        len;
2465         tcpha_t         *tcpha;
2466         int             i;
2467         ipaddr_t        v4addr;
2468         in6_addr_t      v6addr;
2469         netstack_t      *ns = ipst->ips_netstack;
2470         tcp_stack_t     *tcps = ns->netstack_tcp;
2471         ip_xmit_attr_t  ixas, *ixa;
2472         uint_t          ip_hdr_len = ira->ira_ip_hdr_length;
2473         boolean_t       need_refrele = B_FALSE;         /* ixa_refrele(ixa) */
2474         ushort_t        port;
2475 
2476         if (!tcp_send_rst_chk(tcps)) {
2477                 TCP_STAT(tcps, tcp_rst_unsent);
2478                 freemsg(mp);
2479                 return;
2480         }
2481 
2482         /*
2483          * If connp != NULL we use conn_ixa to keep IP_NEXTHOP and other
2484          * options from the listener. In that case the caller must ensure that
2485          * we are running on the listener = connp squeue.
2486          *
2487          * We get a safe copy of conn_ixa so we don't need to restore anything
2488          * we or ip_output_simple might change in the ixa.
2489          */
2490         if (connp != NULL) {
2491                 ASSERT(connp->conn_on_sqp);
2492 
2493                 ixa = conn_get_ixa_exclusive(connp);
2494                 if (ixa == NULL) {
2495                         TCP_STAT(tcps, tcp_rst_unsent);
2496                         freemsg(mp);
2497                         return;
2498                 }
2499                 need_refrele = B_TRUE;
2500         } else {
2501                 bzero(&ixas, sizeof (ixas));
2502                 ixa = &ixas;
2503                 /*
2504                  * IXAF_VERIFY_SOURCE is overkill since we know the
2505                  * packet was for us.
2506                  */
2507                 ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE;
2508                 ixa->ixa_protocol = IPPROTO_TCP;
2509                 ixa->ixa_zoneid = ira->ira_zoneid;
2510                 ixa->ixa_ifindex = 0;
2511                 ixa->ixa_ipst = ipst;
2512                 ixa->ixa_cred = kcred;
2513                 ixa->ixa_cpid = NOPID;
2514         }
2515 
2516         if (str && tcps->tcps_dbg) {
2517                 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2518                     "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, "
2519                     "flags 0x%x",
2520                     str, seq, ack, ctl);
2521         }
2522         if (mp->b_datap->db_ref != 1) {
2523                 mblk_t *mp1 = copyb(mp);
2524                 freemsg(mp);
2525                 mp = mp1;
2526                 if (mp == NULL)
2527                         goto done;
2528         } else if (mp->b_cont) {
2529                 freemsg(mp->b_cont);
2530                 mp->b_cont = NULL;
2531                 DB_CKSUMFLAGS(mp) = 0;
2532         }
2533         /*
2534          * We skip reversing source route here.
2535          * (for now we replace all IP options with EOL)
2536          */
2537         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2538                 ipha = (ipha_t *)mp->b_rptr;
2539                 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
2540                         mp->b_rptr[i] = IPOPT_EOL;
2541                 /*
2542                  * Make sure that src address isn't flagrantly invalid.
2543                  * Not all broadcast address checking for the src address
2544                  * is possible, since we don't know the netmask of the src
2545                  * addr.  No check for destination address is done, since
2546                  * IP will not pass up a packet with a broadcast dest
2547                  * address to TCP.  Similar checks are done below for IPv6.
2548                  */
2549                 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST ||
2550                     CLASSD(ipha->ipha_src)) {
2551                         BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
2552                         ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2553                         freemsg(mp);
2554                         goto done;
2555                 }
2556         } else {
2557                 ip6h = (ip6_t *)mp->b_rptr;
2558 
2559                 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) ||
2560                     IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) {
2561                         BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards);
2562                         ip_drop_input("ipIfStatsInDiscards", mp, NULL);
2563                         freemsg(mp);
2564                         goto done;
2565                 }
2566 
2567                 /* Remove any extension headers assuming partial overlay */
2568                 if (ip_hdr_len > IPV6_HDR_LEN) {
2569                         uint8_t *to;
2570 
2571                         to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN;
2572                         ovbcopy(ip6h, to, IPV6_HDR_LEN);
2573                         mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN;
2574                         ip_hdr_len = IPV6_HDR_LEN;
2575                         ip6h = (ip6_t *)mp->b_rptr;
2576                         ip6h->ip6_nxt = IPPROTO_TCP;
2577                 }
2578         }
2579         tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
2580         if (tcpha->tha_flags & TH_RST) {
2581                 freemsg(mp);
2582                 goto done;
2583         }
2584         tcpha->tha_offset_and_reserved = (5 << 4);
2585         len = ip_hdr_len + sizeof (tcpha_t);
2586         mp->b_wptr = &mp->b_rptr[len];
2587         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2588                 ipha->ipha_length = htons(len);
2589                 /* Swap addresses */
2590                 v4addr = ipha->ipha_src;
2591                 ipha->ipha_src = ipha->ipha_dst;
2592                 ipha->ipha_dst = v4addr;
2593                 ipha->ipha_ident = 0;
2594                 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl;
2595                 ixa->ixa_flags |= IXAF_IS_IPV4;
2596                 ixa->ixa_ip_hdr_length = ip_hdr_len;
2597         } else {
2598                 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
2599                 /* Swap addresses */
2600                 v6addr = ip6h->ip6_src;
2601                 ip6h->ip6_src = ip6h->ip6_dst;
2602                 ip6h->ip6_dst = v6addr;
2603                 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit;
2604                 ixa->ixa_flags &= ~IXAF_IS_IPV4;
2605 
2606                 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) {
2607                         ixa->ixa_flags |= IXAF_SCOPEID_SET;
2608                         ixa->ixa_scopeid = ira->ira_ruifindex;
2609                 }
2610                 ixa->ixa_ip_hdr_length = IPV6_HDR_LEN;
2611         }
2612         ixa->ixa_pktlen = len;
2613 
2614         /* Swap the ports */
2615         port = tcpha->tha_fport;
2616         tcpha->tha_fport = tcpha->tha_lport;
2617         tcpha->tha_lport = port;
2618 
2619         tcpha->tha_ack = htonl(ack);
2620         tcpha->tha_seq = htonl(seq);
2621         tcpha->tha_win = 0;
2622         tcpha->tha_sum = htons(sizeof (tcpha_t));
2623         tcpha->tha_flags = (uint8_t)ctl;
2624         if (ctl & TH_RST) {
2625                 if (ctl & TH_ACK) {
2626                         /*
2627                          * Probe connection rejection here.
2628                          * tcp_xmit_listeners_reset() drops non-SYN segments
2629                          * that do not specify TH_ACK in their flags without
2630                          * calling this function.  As a consequence, if this
2631                          * function is called with a TH_RST|TH_ACK ctl argument,
2632                          * it is being called in response to a SYN segment
2633                          * and thus the tcp:::accept-refused probe point
2634                          * is valid here.
2635                          */
2636                         DTRACE_TCP5(accept__refused, mblk_t *, NULL,
2637                             void, NULL, void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2638                             tcph_t *, tcpha);
2639                 }
2640                 TCPS_BUMP_MIB(tcps, tcpOutRsts);
2641                 TCPS_BUMP_MIB(tcps, tcpOutControl);
2642         }
2643 
2644         /* Discard any old label */
2645         if (ixa->ixa_free_flags & IXA_FREE_TSL) {
2646                 ASSERT(ixa->ixa_tsl != NULL);
2647                 label_rele(ixa->ixa_tsl);
2648                 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
2649         }
2650         ixa->ixa_tsl = ira->ira_tsl;      /* Behave as a multi-level responder */
2651 
2652         if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2653                 /*
2654                  * Apply IPsec based on how IPsec was applied to
2655                  * the packet that caused the RST.
2656                  */
2657                 if (!ipsec_in_to_out(ira, ixa, mp, ipha, ip6h)) {
2658                         BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2659                         /* Note: mp already consumed and ip_drop_packet done */
2660                         goto done;
2661                 }
2662         } else {
2663                 /*
2664                  * This is in clear. The RST message we are building
2665                  * here should go out in clear, independent of our policy.
2666                  */
2667                 ixa->ixa_flags |= IXAF_NO_IPSEC;
2668         }
2669 
2670         DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa,
2671             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2672             __dtrace_tcp_tcph_t *, tcpha);
2673 
2674         /*
2675          * NOTE:  one might consider tracing a TCP packet here, but
2676          * this function has no active TCP state and no tcp structure
2677          * that has a trace buffer.  If we traced here, we would have
2678          * to keep a local trace buffer in tcp_record_trace().
2679          */
2680 
2681         (void) ip_output_simple(mp, ixa);
2682 done:
2683         ixa_cleanup(ixa);
2684         if (need_refrele) {
2685                 ASSERT(ixa != &ixas);
2686                 ixa_refrele(ixa);
2687         }
2688 }
2689 
2690 /*
2691  * Generate a "no listener here" RST in response to an "unknown" segment.
2692  * connp is set by caller when RST is in response to an unexpected
2693  * inbound packet for which there is active tcp state in the system.
2694  * Note that we are reusing the incoming mp to construct the outgoing RST.
2695  */
2696 void
2697 tcp_xmit_listeners_reset(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst,
2698     conn_t *connp)
2699 {
2700         uchar_t         *rptr;
2701         uint32_t        seg_len;
2702         tcpha_t         *tcpha;
2703         uint32_t        seg_seq;
2704         uint32_t        seg_ack;
2705         uint_t          flags;
2706         ipha_t          *ipha;
2707         ip6_t           *ip6h;
2708         boolean_t       policy_present;
2709         netstack_t      *ns = ipst->ips_netstack;
2710         tcp_stack_t     *tcps = ns->netstack_tcp;
2711         ipsec_stack_t   *ipss = tcps->tcps_netstack->netstack_ipsec;
2712         uint_t          ip_hdr_len = ira->ira_ip_hdr_length;
2713 
2714         TCP_STAT(tcps, tcp_no_listener);
2715 
2716         /*
2717          * DTrace this "unknown" segment as a tcp:::receive, as we did
2718          * just receive something that was TCP.
2719          */
2720         DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, NULL,
2721             __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, NULL,
2722             __dtrace_tcp_tcph_t *, &mp->b_rptr[ip_hdr_len]);
2723 
2724         if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
2725                 policy_present = ipss->ipsec_inbound_v4_policy_present;
2726                 ipha = (ipha_t *)mp->b_rptr;
2727                 ip6h = NULL;
2728         } else {
2729                 policy_present = ipss->ipsec_inbound_v6_policy_present;
2730                 ipha = NULL;
2731                 ip6h = (ip6_t *)mp->b_rptr;
2732         }
2733 
2734         if (policy_present) {
2735                 /*
2736                  * The conn_t parameter is NULL because we already know
2737                  * nobody's home.
2738                  */
2739                 mp = ipsec_check_global_policy(mp, (conn_t *)NULL, ipha, ip6h,
2740                     ira, ns);
2741                 if (mp == NULL)
2742                         return;
2743         }
2744         if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
2745                 DTRACE_PROBE2(
2746                     tx__ip__log__error__nolistener__tcp,
2747                     char *, "Could not reply with RST to mp(1)",
2748                     mblk_t *, mp);
2749                 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n"));
2750                 freemsg(mp);
2751                 return;
2752         }
2753 
2754         rptr = mp->b_rptr;
2755 
2756         tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2757         seg_seq = ntohl(tcpha->tha_seq);
2758         seg_ack = ntohl(tcpha->tha_ack);
2759         flags = tcpha->tha_flags;
2760 
2761         seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcpha) + ip_hdr_len);
2762         if (flags & TH_RST) {
2763                 freemsg(mp);
2764         } else if (flags & TH_ACK) {
2765                 tcp_xmit_early_reset("no tcp, reset", mp, seg_ack, 0, TH_RST,
2766                     ira, ipst, connp);
2767         } else {
2768                 if (flags & TH_SYN) {
2769                         seg_len++;
2770                 } else {
2771                         /*
2772                          * Here we violate the RFC.  Note that a normal
2773                          * TCP will never send a segment without the ACK
2774                          * flag, except for RST or SYN segment.  This
2775                          * segment is neither.  Just drop it on the
2776                          * floor.
2777                          */
2778                         freemsg(mp);
2779                         TCP_STAT(tcps, tcp_rst_unsent);
2780                         return;
2781                 }
2782 
2783                 tcp_xmit_early_reset("no tcp, reset/ack", mp, 0,
2784                     seg_seq + seg_len, TH_RST | TH_ACK, ira, ipst, connp);
2785         }
2786 }
2787 
2788 /*
2789  * Helper function for tcp_xmit_mp() in handling connection set up flag
2790  * options setting.
2791  */
2792 static void
2793 tcp_xmit_mp_aux_iss(tcp_t *tcp, conn_t *connp, tcpha_t *tcpha, mblk_t *mp,
2794     uint_t *flags)
2795 {
2796         uint32_t u1;
2797         uint8_t *wptr = mp->b_wptr;
2798         tcp_stack_t *tcps = tcp->tcp_tcps;
2799         boolean_t add_sack = B_FALSE;
2800 
2801         /*
2802          * If TCP_ISS_VALID and the seq number is tcp_iss,
2803          * TCP can only be in SYN-SENT, SYN-RCVD or
2804          * FIN-WAIT-1 state.  It can be FIN-WAIT-1 if
2805          * our SYN is not ack'ed but the app closes this
2806          * TCP connection.
2807          */
2808         ASSERT(tcp->tcp_state == TCPS_SYN_SENT ||
2809             tcp->tcp_state == TCPS_SYN_RCVD ||
2810             tcp->tcp_state == TCPS_FIN_WAIT_1);
2811 
2812         /*
2813          * Tack on the MSS option.  It is always needed
2814          * for both active and passive open.
2815          *
2816          * MSS option value should be interface MTU - MIN
2817          * TCP/IP header according to RFC 793 as it means
2818          * the maximum segment size TCP can receive.  But
2819          * to get around some broken middle boxes/end hosts
2820          * out there, we allow the option value to be the
2821          * same as the MSS option size on the peer side.
2822          * In this way, the other side will not send
2823          * anything larger than they can receive.
2824          *
2825          * Note that for SYN_SENT state, the ndd param
2826          * tcp_use_smss_as_mss_opt has no effect as we
2827          * don't know the peer's MSS option value. So
2828          * the only case we need to take care of is in
2829          * SYN_RCVD state, which is done later.
2830          */
2831         wptr[0] = TCPOPT_MAXSEG;
2832         wptr[1] = TCPOPT_MAXSEG_LEN;
2833         wptr += 2;
2834         u1 = tcp->tcp_initial_pmtu - (connp->conn_ipversion == IPV4_VERSION ?
2835             IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - TCP_MIN_HEADER_LENGTH;
2836         U16_TO_BE16(u1, wptr);
2837         wptr += 2;
2838 
2839         /* Update the offset to cover the additional word */
2840         tcpha->tha_offset_and_reserved += (1 << 4);
2841 
2842         switch (tcp->tcp_state) {
2843         case TCPS_SYN_SENT:
2844                 *flags = TH_SYN;
2845 
2846                 if (tcp->tcp_snd_sack_ok)
2847                         add_sack = B_TRUE;
2848 
2849                 if (tcp->tcp_snd_ts_ok) {
2850                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2851 
2852                         if (add_sack) {
2853                                 wptr[0] = TCPOPT_SACK_PERMITTED;
2854                                 wptr[1] = TCPOPT_SACK_OK_LEN;
2855                                 add_sack = B_FALSE;
2856                         } else {
2857                                 wptr[0] = TCPOPT_NOP;
2858                                 wptr[1] = TCPOPT_NOP;
2859                         }
2860                         wptr[2] = TCPOPT_TSTAMP;
2861                         wptr[3] = TCPOPT_TSTAMP_LEN;
2862                         wptr += 4;
2863                         U32_TO_BE32(llbolt, wptr);
2864                         wptr += 4;
2865                         ASSERT(tcp->tcp_ts_recent == 0);
2866                         U32_TO_BE32(0L, wptr);
2867                         wptr += 4;
2868                         tcpha->tha_offset_and_reserved += (3 << 4);
2869                 }
2870 
2871                 /*
2872                  * Set up all the bits to tell other side
2873                  * we are ECN capable.
2874                  */
2875                 if (tcp->tcp_ecn_ok)
2876                         *flags |= (TH_ECE | TH_CWR);
2877 
2878                 break;
2879 
2880         case TCPS_SYN_RCVD:
2881                 *flags |= TH_SYN;
2882 
2883                 /*
2884                  * Reset the MSS option value to be SMSS
2885                  * We should probably add back the bytes
2886                  * for timestamp option and IPsec.  We
2887                  * don't do that as this is a workaround
2888                  * for broken middle boxes/end hosts, it
2889                  * is better for us to be more cautious.
2890                  * They may not take these things into
2891                  * account in their SMSS calculation.  Thus
2892                  * the peer's calculated SMSS may be smaller
2893                  * than what it can be.  This should be OK.
2894                  */
2895                 if (tcps->tcps_use_smss_as_mss_opt) {
2896                         u1 = tcp->tcp_mss;
2897                         /*
2898                          * Note that wptr points just past the MSS
2899                          * option value.
2900                          */
2901                         U16_TO_BE16(u1, wptr - 2);
2902                 }
2903 
2904                 /*
2905                  * tcp_snd_ts_ok can only be set in TCPS_SYN_RCVD
2906                  * when the peer also uses timestamps option.  And
2907                  * the TCP header template must have already been
2908                  * updated to include the timestamps option.
2909                  */
2910                 if (tcp->tcp_snd_sack_ok) {
2911                         if (tcp->tcp_snd_ts_ok) {
2912                                 uint8_t *tmp_wptr;
2913 
2914                                 /*
2915                                  * Use the NOP in the header just
2916                                  * before timestamps opton.
2917                                  */
2918                                 tmp_wptr = (uint8_t *)tcpha +
2919                                     TCP_MIN_HEADER_LENGTH;
2920                                 ASSERT(tmp_wptr[0] == TCPOPT_NOP &&
2921                                     tmp_wptr[1] == TCPOPT_NOP);
2922                                 tmp_wptr[0] = TCPOPT_SACK_PERMITTED;
2923                                 tmp_wptr[1] = TCPOPT_SACK_OK_LEN;
2924                         } else {
2925                                 add_sack = B_TRUE;
2926                         }
2927                 }
2928 
2929 
2930                 /*
2931                  * If the other side is ECN capable, reply
2932                  * that we are also ECN capable.
2933                  */
2934                 if (tcp->tcp_ecn_ok)
2935                         *flags |= TH_ECE;
2936                 break;
2937 
2938         default:
2939                 /*
2940                  * The above ASSERT() makes sure that this
2941                  * must be FIN-WAIT-1 state.  Our SYN has
2942                  * not been ack'ed so retransmit it.
2943                  */
2944                 *flags |= TH_SYN;
2945                 break;
2946         }
2947 
2948         if (add_sack) {
2949                 wptr[0] = TCPOPT_NOP;
2950                 wptr[1] = TCPOPT_NOP;
2951                 wptr[2] = TCPOPT_SACK_PERMITTED;
2952                 wptr[3] = TCPOPT_SACK_OK_LEN;
2953                 wptr += TCPOPT_REAL_SACK_OK_LEN;
2954                 tcpha->tha_offset_and_reserved += (1 << 4);
2955         }
2956 
2957         if (tcp->tcp_snd_ws_ok) {
2958                 wptr[0] =  TCPOPT_NOP;
2959                 wptr[1] =  TCPOPT_WSCALE;
2960                 wptr[2] =  TCPOPT_WS_LEN;
2961                 wptr[3] = (uchar_t)tcp->tcp_rcv_ws;
2962                 wptr += TCPOPT_REAL_WS_LEN;
2963                 tcpha->tha_offset_and_reserved += (1 << 4);
2964         }
2965 
2966         mp->b_wptr = wptr;
2967         u1 = (int)(mp->b_wptr - mp->b_rptr);
2968         /*
2969          * Get IP set to checksum on our behalf
2970          * Include the adjustment for a source route if any.
2971          */
2972         u1 += connp->conn_sum;
2973         u1 = (u1 >> 16) + (u1 & 0xFFFF);
2974         tcpha->tha_sum = htons(u1);
2975         TCPS_BUMP_MIB(tcps, tcpOutControl);
2976 }
2977 
2978 /*
2979  * Helper function for tcp_xmit_mp() in handling connection tear down
2980  * flag setting and state changes.
2981  */
2982 static void
2983 tcp_xmit_mp_aux_fss(tcp_t *tcp, ip_xmit_attr_t *ixa, uint_t *flags)
2984 {
2985         if (!tcp->tcp_fin_acked) {
2986                 *flags |= TH_FIN;
2987                 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutControl);
2988         }
2989         if (!tcp->tcp_fin_sent) {
2990                 tcp->tcp_fin_sent = B_TRUE;
2991                 switch (tcp->tcp_state) {
2992                 case TCPS_SYN_RCVD:
2993                         tcp->tcp_state = TCPS_FIN_WAIT_1;
2994                         DTRACE_TCP6(state__change, void, NULL,
2995                             ip_xmit_attr_t *, ixa, void, NULL,
2996                             tcp_t *, tcp, void, NULL,
2997                             int32_t, TCPS_SYN_RCVD);
2998                         break;
2999                 case TCPS_ESTABLISHED:
3000                         tcp->tcp_state = TCPS_FIN_WAIT_1;
3001                         DTRACE_TCP6(state__change, void, NULL,
3002                             ip_xmit_attr_t *, ixa, void, NULL,
3003                             tcp_t *, tcp, void, NULL,
3004                             int32_t, TCPS_ESTABLISHED);
3005                         break;
3006                 case TCPS_CLOSE_WAIT:
3007                         tcp->tcp_state = TCPS_LAST_ACK;
3008                         DTRACE_TCP6(state__change, void, NULL,
3009                             ip_xmit_attr_t *, ixa, void, NULL,
3010                             tcp_t *, tcp, void, NULL,
3011                             int32_t, TCPS_CLOSE_WAIT);
3012                         break;
3013                 }
3014                 if (tcp->tcp_suna == tcp->tcp_snxt)
3015                         TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3016                 tcp->tcp_snxt = tcp->tcp_fss + 1;
3017         }
3018 }
3019 
3020 /*
3021  * tcp_xmit_mp is called to return a pointer to an mblk chain complete with
3022  * ip and tcp header ready to pass down to IP.  If the mp passed in is
3023  * non-NULL, then up to max_to_send bytes of data will be dup'ed off that
3024  * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary
3025  * otherwise it will dup partial mblks.)
3026  * Otherwise, an appropriate ACK packet will be generated.  This
3027  * routine is not usually called to send new data for the first time.  It
3028  * is mostly called out of the timer for retransmits, and to generate ACKs.
3029  *
3030  * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will
3031  * be adjusted by *offset.  And after dupb(), the offset and the ending mblk
3032  * of the original mblk chain will be returned in *offset and *end_mp.
3033  */
3034 mblk_t *
3035 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
3036     mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len,
3037     boolean_t rexmit)
3038 {
3039         int     data_length;
3040         int32_t off = 0;
3041         uint_t  flags;
3042         mblk_t  *mp1;
3043         mblk_t  *mp2;
3044         uchar_t *rptr;
3045         tcpha_t *tcpha;
3046         int32_t num_sack_blk = 0;
3047         int32_t sack_opt_len = 0;
3048         tcp_stack_t     *tcps = tcp->tcp_tcps;
3049         conn_t          *connp = tcp->tcp_connp;
3050         ip_xmit_attr_t  *ixa = connp->conn_ixa;
3051 
3052         /* Allocate for our maximum TCP header + link-level */
3053         mp1 = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
3054             BPRI_MED);
3055         if (mp1 == NULL)
3056                 return (NULL);
3057         data_length = 0;
3058 
3059         /*
3060          * Note that tcp_mss has been adjusted to take into account the
3061          * timestamp option if applicable.  Because SACK options do not
3062          * appear in every TCP segments and they are of variable lengths,
3063          * they cannot be included in tcp_mss.  Thus we need to calculate
3064          * the actual segment length when we need to send a segment which
3065          * includes SACK options.
3066          */
3067         if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
3068                 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
3069                     tcp->tcp_num_sack_blk);
3070                 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
3071                     TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
3072                 if (max_to_send + sack_opt_len > tcp->tcp_mss)
3073                         max_to_send -= sack_opt_len;
3074         }
3075 
3076         if (offset != NULL) {
3077                 off = *offset;
3078                 /* We use offset as an indicator that end_mp is not NULL. */
3079                 *end_mp = NULL;
3080         }
3081         for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) {
3082                 /* This could be faster with cooperation from downstream */
3083                 if (mp2 != mp1 && !sendall &&
3084                     data_length + (int)(mp->b_wptr - mp->b_rptr) >
3085                     max_to_send)
3086                         /*
3087                          * Don't send the next mblk since the whole mblk
3088                          * does not fit.
3089                          */
3090                         break;
3091                 mp2->b_cont = dupb(mp);
3092                 mp2 = mp2->b_cont;
3093                 if (!mp2) {
3094                         freemsg(mp1);
3095                         return (NULL);
3096                 }
3097                 mp2->b_rptr += off;
3098                 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
3099                     (uintptr_t)INT_MAX);
3100 
3101                 data_length += (int)(mp2->b_wptr - mp2->b_rptr);
3102                 if (data_length > max_to_send) {
3103                         mp2->b_wptr -= data_length - max_to_send;
3104                         data_length = max_to_send;
3105                         off = mp2->b_wptr - mp->b_rptr;
3106                         break;
3107                 } else {
3108                         off = 0;
3109                 }
3110         }
3111         if (offset != NULL) {
3112                 *offset = off;
3113                 *end_mp = mp;
3114         }
3115         if (seg_len != NULL) {
3116                 *seg_len = data_length;
3117         }
3118 
3119         /* Update the latest receive window size in TCP header. */
3120         tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
3121 
3122         rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
3123         mp1->b_rptr = rptr;
3124         mp1->b_wptr = rptr + connp->conn_ht_iphc_len + sack_opt_len;
3125         bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
3126         tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
3127         tcpha->tha_seq = htonl(seq);
3128 
3129         /*
3130          * Use tcp_unsent to determine if the PUSH bit should be used assumes
3131          * that this function was called from tcp_wput_data. Thus, when called
3132          * to retransmit data the setting of the PUSH bit may appear some
3133          * what random in that it might get set when it should not. This
3134          * should not pose any performance issues.
3135          */
3136         if (data_length != 0 && (tcp->tcp_unsent == 0 ||
3137             tcp->tcp_unsent == data_length)) {
3138                 flags = TH_ACK | TH_PUSH;
3139         } else {
3140                 flags = TH_ACK;
3141         }
3142 
3143         if (tcp->tcp_ecn_ok) {
3144                 if (tcp->tcp_ecn_echo_on)
3145                         flags |= TH_ECE;
3146 
3147                 /*
3148                  * Only set ECT bit and ECN_CWR if a segment contains new data.
3149                  * There is no TCP flow control for non-data segments, and
3150                  * only data segment is transmitted reliably.
3151                  */
3152                 if (data_length > 0 && !rexmit) {
3153                         TCP_SET_ECT(tcp, rptr);
3154                         if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3155                                 flags |= TH_CWR;
3156                                 tcp->tcp_ecn_cwr_sent = B_TRUE;
3157                         }
3158                 }
3159         }
3160 
3161         /* Check if there is any special processing needs to be done. */
3162         if (tcp->tcp_valid_bits) {
3163                 uint32_t u1;
3164 
3165                 /* We don't allow having SYN and FIN in the same segment... */
3166                 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) &&
3167                     seq == tcp->tcp_iss) {
3168                         /* Need to do connection set up processing. */
3169                         tcp_xmit_mp_aux_iss(tcp, connp, tcpha, mp1, &flags);
3170                 } else if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
3171                     (seq + data_length) == tcp->tcp_fss) {
3172                         /* Need to do connection tear down processing. */
3173                         tcp_xmit_mp_aux_fss(tcp, ixa, &flags);
3174                 }
3175 
3176                 /*
3177                  * Need to do urgent pointer processing.
3178                  *
3179                  * Note the trick here.  u1 is unsigned.  When tcp_urg
3180                  * is smaller than seq, u1 will become a very huge value.
3181                  * So the comparison will fail.  Also note that tcp_urp
3182                  * should be positive, see RFC 793 page 17.
3183                  */
3184                 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION;
3185                 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 &&
3186                     u1 < (uint32_t)(64 * 1024)) {
3187                         flags |= TH_URG;
3188                         TCPS_BUMP_MIB(tcps, tcpOutUrg);
3189                         tcpha->tha_urp = htons(u1);
3190                 }
3191         }
3192         tcpha->tha_flags = (uchar_t)flags;
3193         tcp->tcp_rack = tcp->tcp_rnxt;
3194         tcp->tcp_rack_cnt = 0;
3195 
3196         /* Fill in the current value of timestamps option. */
3197         if (tcp->tcp_snd_ts_ok) {
3198                 if (tcp->tcp_state != TCPS_SYN_SENT) {
3199                         uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
3200 
3201                         U32_TO_BE32(llbolt,
3202                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
3203                         U32_TO_BE32(tcp->tcp_ts_recent,
3204                             (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
3205                 }
3206         }
3207 
3208         /* Fill in the SACK blocks. */
3209         if (num_sack_blk > 0) {
3210                 uchar_t *wptr = (uchar_t *)tcpha + connp->conn_ht_ulp_len;
3211                 sack_blk_t *tmp;
3212                 int32_t i;
3213 
3214                 wptr[0] = TCPOPT_NOP;
3215                 wptr[1] = TCPOPT_NOP;
3216                 wptr[2] = TCPOPT_SACK;
3217                 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3218                     sizeof (sack_blk_t);
3219                 wptr += TCPOPT_REAL_SACK_LEN;
3220 
3221                 tmp = tcp->tcp_sack_list;
3222                 for (i = 0; i < num_sack_blk; i++) {
3223                         U32_TO_BE32(tmp[i].begin, wptr);
3224                         wptr += sizeof (tcp_seq);
3225                         U32_TO_BE32(tmp[i].end, wptr);
3226                         wptr += sizeof (tcp_seq);
3227                 }
3228                 tcpha->tha_offset_and_reserved += ((num_sack_blk * 2 + 1) << 4);
3229         }
3230         ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX);
3231         data_length += (int)(mp1->b_wptr - rptr);
3232 
3233         ixa->ixa_pktlen = data_length;
3234 
3235         if (ixa->ixa_flags & IXAF_IS_IPV4) {
3236                 ((ipha_t *)rptr)->ipha_length = htons(data_length);
3237         } else {
3238                 ip6_t *ip6 = (ip6_t *)rptr;
3239 
3240                 ip6->ip6_plen = htons(data_length - IPV6_HDR_LEN);
3241         }
3242 
3243         /*
3244          * Prime pump for IP
3245          * Include the adjustment for a source route if any.
3246          */
3247         data_length -= ixa->ixa_ip_hdr_length;
3248         data_length += connp->conn_sum;
3249         data_length = (data_length >> 16) + (data_length & 0xFFFF);
3250         tcpha->tha_sum = htons(data_length);
3251         if (tcp->tcp_ip_forward_progress) {
3252                 tcp->tcp_ip_forward_progress = B_FALSE;
3253                 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
3254         } else {
3255                 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
3256         }
3257         return (mp1);
3258 }
3259 
3260 /*
3261  * If this routine returns B_TRUE, TCP can generate a RST in response
3262  * to a segment.  If it returns B_FALSE, TCP should not respond.
3263  */
3264 static boolean_t
3265 tcp_send_rst_chk(tcp_stack_t *tcps)
3266 {
3267         int64_t now;
3268 
3269         /*
3270          * TCP needs to protect itself from generating too many RSTs.
3271          * This can be a DoS attack by sending us random segments
3272          * soliciting RSTs.
3273          *
3274          * What we do here is to have a limit of tcp_rst_sent_rate RSTs
3275          * in each 1 second interval.  In this way, TCP still generate
3276          * RSTs in normal cases but when under attack, the impact is
3277          * limited.
3278          */
3279         if (tcps->tcps_rst_sent_rate_enabled != 0) {
3280                 now = ddi_get_lbolt64();
3281                 if (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) >
3282                     1*SECONDS) {
3283                         tcps->tcps_last_rst_intrvl = now;
3284                         tcps->tcps_rst_cnt = 1;
3285                 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) {
3286                         return (B_FALSE);
3287                 }
3288         }
3289         return (B_TRUE);
3290 }
3291 
3292 /*
3293  * This function handles all retransmissions if SACK is enabled for this
3294  * connection.  First it calculates how many segments can be retransmitted
3295  * based on tcp_pipe.  Then it goes thru the notsack list to find eligible
3296  * segments.  A segment is eligible if sack_cnt for that segment is greater
3297  * than or equal tcp_dupack_fast_retransmit.  After it has retransmitted
3298  * all eligible segments, it checks to see if TCP can send some new segments
3299  * (fast recovery).  If it can, set the appropriate flag for tcp_input_data().
3300  *
3301  * Parameters:
3302  *      tcp_t *tcp: the tcp structure of the connection.
3303  *      uint_t *flags: in return, appropriate value will be set for
3304  *      tcp_input_data().
3305  */
3306 void
3307 tcp_sack_rexmit(tcp_t *tcp, uint_t *flags)
3308 {
3309         notsack_blk_t   *notsack_blk;
3310         int32_t         usable_swnd;
3311         int32_t         mss;
3312         uint32_t        seg_len;
3313         mblk_t          *xmit_mp;
3314         tcp_stack_t     *tcps = tcp->tcp_tcps;
3315 
3316         ASSERT(tcp->tcp_notsack_list != NULL);
3317         ASSERT(tcp->tcp_rexmit == B_FALSE);
3318 
3319         /* Defensive coding in case there is a bug... */
3320         if (tcp->tcp_notsack_list == NULL) {
3321                 return;
3322         }
3323         notsack_blk = tcp->tcp_notsack_list;
3324         mss = tcp->tcp_mss;
3325 
3326         /*
3327          * Limit the num of outstanding data in the network to be
3328          * tcp_cwnd_ssthresh, which is half of the original congestion wnd.
3329          */
3330         usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3331 
3332         /* At least retransmit 1 MSS of data. */
3333         if (usable_swnd <= 0) {
3334                 usable_swnd = mss;
3335         }
3336 
3337         /* Make sure no new RTT samples will be taken. */
3338         tcp->tcp_csuna = tcp->tcp_snxt;
3339 
3340         notsack_blk = tcp->tcp_notsack_list;
3341         while (usable_swnd > 0) {
3342                 mblk_t          *snxt_mp, *tmp_mp;
3343                 tcp_seq         begin = tcp->tcp_sack_snxt;
3344                 tcp_seq         end;
3345                 int32_t         off;
3346 
3347                 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) {
3348                         if (SEQ_GT(notsack_blk->end, begin) &&
3349                             (notsack_blk->sack_cnt >=
3350                             tcps->tcps_dupack_fast_retransmit)) {
3351                                 end = notsack_blk->end;
3352                                 if (SEQ_LT(begin, notsack_blk->begin)) {
3353                                         begin = notsack_blk->begin;
3354                                 }
3355                                 break;
3356                         }
3357                 }
3358                 /*
3359                  * All holes are filled.  Manipulate tcp_cwnd to send more
3360                  * if we can.  Note that after the SACK recovery, tcp_cwnd is
3361                  * set to tcp_cwnd_ssthresh.
3362                  */
3363                 if (notsack_blk == NULL) {
3364                         usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
3365                         if (usable_swnd <= 0 || tcp->tcp_unsent == 0) {
3366                                 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna;
3367                                 ASSERT(tcp->tcp_cwnd > 0);
3368                                 return;
3369                         } else {
3370                                 usable_swnd = usable_swnd / mss;
3371                                 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna +
3372                                     MAX(usable_swnd * mss, mss);
3373                                 *flags |= TH_XMIT_NEEDED;
3374                                 return;
3375                         }
3376                 }
3377 
3378                 /*
3379                  * Note that we may send more than usable_swnd allows here
3380                  * because of round off, but no more than 1 MSS of data.
3381                  */
3382                 seg_len = end - begin;
3383                 if (seg_len > mss)
3384                         seg_len = mss;
3385                 snxt_mp = tcp_get_seg_mp(tcp, begin, &off);
3386                 ASSERT(snxt_mp != NULL);
3387                 /* This should not happen.  Defensive coding again... */
3388                 if (snxt_mp == NULL) {
3389                         return;
3390                 }
3391 
3392                 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
3393                     &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
3394                 if (xmit_mp == NULL)
3395                         return;
3396 
3397                 usable_swnd -= seg_len;
3398                 tcp->tcp_pipe += seg_len;
3399                 tcp->tcp_sack_snxt = begin + seg_len;
3400 
3401                 tcp_send_data(tcp, xmit_mp);
3402 
3403                 /*
3404                  * Update the send timestamp to avoid false retransmission.
3405                  */
3406                 snxt_mp->b_prev = (mblk_t *)ddi_get_lbolt();
3407 
3408                 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3409                 TCPS_UPDATE_MIB(tcps, tcpRetransBytes, seg_len);
3410                 TCPS_BUMP_MIB(tcps, tcpOutSackRetransSegs);
3411                 /*
3412                  * Update tcp_rexmit_max to extend this SACK recovery phase.
3413                  * This happens when new data sent during fast recovery is
3414                  * also lost.  If TCP retransmits those new data, it needs
3415                  * to extend SACK recover phase to avoid starting another
3416                  * fast retransmit/recovery unnecessarily.
3417                  */
3418                 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
3419                         tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
3420                 }
3421         }
3422 }
3423 
3424 /*
3425  * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
3426  * or ICMP errors.
3427  *
3428  * To limit the number of duplicate segments, we limit the number of segment
3429  * to be sent in one time to tcp_snd_burst, the burst variable.
3430  */
3431 void
3432 tcp_ss_rexmit(tcp_t *tcp)
3433 {
3434         uint32_t        snxt;
3435         uint32_t        smax;
3436         int32_t         win;
3437         int32_t         mss;
3438         int32_t         off;
3439         int32_t         burst = tcp->tcp_snd_burst;
3440         mblk_t          *snxt_mp;
3441         tcp_stack_t     *tcps = tcp->tcp_tcps;
3442 
3443         /*
3444          * Note that tcp_rexmit can be set even though TCP has retransmitted
3445          * all unack'ed segments.
3446          */
3447         if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) {
3448                 smax = tcp->tcp_rexmit_max;
3449                 snxt = tcp->tcp_rexmit_nxt;
3450                 if (SEQ_LT(snxt, tcp->tcp_suna)) {
3451                         snxt = tcp->tcp_suna;
3452                 }
3453                 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd);
3454                 win -= snxt - tcp->tcp_suna;
3455                 mss = tcp->tcp_mss;
3456                 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off);
3457 
3458                 while (SEQ_LT(snxt, smax) && (win > 0) &&
3459                     (burst > 0) && (snxt_mp != NULL)) {
3460                         mblk_t  *xmit_mp;
3461                         mblk_t  *old_snxt_mp = snxt_mp;
3462                         uint32_t cnt = mss;
3463 
3464                         if (win < cnt) {
3465                                 cnt = win;
3466                         }
3467                         if (SEQ_GT(snxt + cnt, smax)) {
3468                                 cnt = smax - snxt;
3469                         }
3470                         xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
3471                             &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
3472                         if (xmit_mp == NULL)
3473                                 return;
3474 
3475                         tcp_send_data(tcp, xmit_mp);
3476 
3477                         snxt += cnt;
3478                         win -= cnt;
3479                         /*
3480                          * Update the send timestamp to avoid false
3481                          * retransmission.
3482                          */
3483                         old_snxt_mp->b_prev = (mblk_t *)ddi_get_lbolt();
3484                         TCPS_BUMP_MIB(tcps, tcpRetransSegs);
3485                         TCPS_UPDATE_MIB(tcps, tcpRetransBytes, cnt);
3486 
3487                         tcp->tcp_rexmit_nxt = snxt;
3488                         burst--;
3489                 }
3490                 /*
3491                  * If we have transmitted all we have at the time
3492                  * we started the retranmission, we can leave
3493                  * the rest of the job to tcp_wput_data().  But we
3494                  * need to check the send window first.  If the
3495                  * win is not 0, go on with tcp_wput_data().
3496                  */
3497                 if (SEQ_LT(snxt, smax) || win == 0) {
3498                         return;
3499                 }
3500         }
3501         /* Only call tcp_wput_data() if there is data to be sent. */
3502         if (tcp->tcp_unsent) {
3503                 tcp_wput_data(tcp, NULL, B_FALSE);
3504         }
3505 }
3506 
3507 /*
3508  * Do slow start retransmission after ICMP errors of PMTU changes.
3509  */
3510 void
3511 tcp_rexmit_after_error(tcp_t *tcp)
3512 {
3513         /*
3514          * All sent data has been acknowledged or no data left to send, just
3515          * to return.
3516          */
3517         if (!SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) ||
3518             (tcp->tcp_xmit_head == NULL))
3519                 return;
3520 
3521         if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && (tcp->tcp_unsent == 0))
3522                 tcp->tcp_rexmit_max = tcp->tcp_fss;
3523         else
3524                 tcp->tcp_rexmit_max = tcp->tcp_snxt;
3525 
3526         tcp->tcp_rexmit_nxt = tcp->tcp_suna;
3527         tcp->tcp_rexmit = B_TRUE;
3528         tcp->tcp_dupack_cnt = 0;
3529         tcp->tcp_snd_burst = TCP_CWND_SS;
3530         tcp_ss_rexmit(tcp);
3531 }
3532 
3533 /*
3534  * tcp_get_seg_mp() is called to get the pointer to a segment in the
3535  * send queue which starts at the given sequence number. If the given
3536  * sequence number is equal to last valid sequence number (tcp_snxt), the
3537  * returned mblk is the last valid mblk, and off is set to the length of
3538  * that mblk.
3539  *
3540  * send queue which starts at the given seq. no.
3541  *
3542  * Parameters:
3543  *      tcp_t *tcp: the tcp instance pointer.
3544  *      uint32_t seq: the starting seq. no of the requested segment.
3545  *      int32_t *off: after the execution, *off will be the offset to
3546  *              the returned mblk which points to the requested seq no.
3547  *              It is the caller's responsibility to send in a non-null off.
3548  *
3549  * Return:
3550  *      A mblk_t pointer pointing to the requested segment in send queue.
3551  */
3552 static mblk_t *
3553 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off)
3554 {
3555         int32_t cnt;
3556         mblk_t  *mp;
3557 
3558         /* Defensive coding.  Make sure we don't send incorrect data. */
3559         if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GT(seq, tcp->tcp_snxt))
3560                 return (NULL);
3561 
3562         cnt = seq - tcp->tcp_suna;
3563         mp = tcp->tcp_xmit_head;
3564         while (cnt > 0 && mp != NULL) {
3565                 cnt -= mp->b_wptr - mp->b_rptr;
3566                 if (cnt <= 0) {
3567                         cnt += mp->b_wptr - mp->b_rptr;
3568                         break;
3569                 }
3570                 mp = mp->b_cont;
3571         }
3572         ASSERT(mp != NULL);
3573         *off = cnt;
3574         return (mp);
3575 }
3576 
3577 /*
3578  * This routine adjusts next-to-send sequence number variables, in the
3579  * case where the reciever has shrunk it's window.
3580  */
3581 void
3582 tcp_update_xmit_tail(tcp_t *tcp, uint32_t snxt)
3583 {
3584         mblk_t *xmit_tail;
3585         int32_t offset;
3586 
3587         tcp->tcp_snxt = snxt;
3588 
3589         /* Get the mblk, and the offset in it, as per the shrunk window */
3590         xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset);
3591         ASSERT(xmit_tail != NULL);
3592         tcp->tcp_xmit_tail = xmit_tail;
3593         tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr -
3594             xmit_tail->b_rptr - offset;
3595 }
3596 
3597 /*
3598  * This handles the case when the receiver has shrunk its win. Per RFC 1122
3599  * if the receiver shrinks the window, i.e. moves the right window to the
3600  * left, the we should not send new data, but should retransmit normally the
3601  * old unacked data between suna and suna + swnd. We might has sent data
3602  * that is now outside the new window, pretend that we didn't send  it.
3603  */
3604 static void
3605 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count)
3606 {
3607         uint32_t        snxt = tcp->tcp_snxt;
3608 
3609         ASSERT(shrunk_count > 0);
3610 
3611         if (!tcp->tcp_is_wnd_shrnk) {
3612                 tcp->tcp_snxt_shrunk = snxt;
3613                 tcp->tcp_is_wnd_shrnk = B_TRUE;
3614         } else if (SEQ_GT(snxt, tcp->tcp_snxt_shrunk)) {
3615                 tcp->tcp_snxt_shrunk = snxt;
3616         }
3617 
3618         /* Pretend we didn't send the data outside the window */
3619         snxt -= shrunk_count;
3620 
3621         /* Reset all the values per the now shrunk window */
3622         tcp_update_xmit_tail(tcp, snxt);
3623         tcp->tcp_unsent += shrunk_count;
3624 
3625         /*
3626          * If the SACK option is set, delete the entire list of
3627          * notsack'ed blocks.
3628          */
3629         TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
3630 
3631         if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0)
3632                 /*
3633                  * Make sure the timer is running so that we will probe a zero
3634                  * window.
3635                  */
3636                 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3637 }
3638 
3639 /*
3640  * tcp_fill_header is called by tcp_send() to fill the outgoing TCP header
3641  * with the template header, as well as other options such as time-stamp,
3642  * ECN and/or SACK.
3643  */
3644 static void
3645 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk)
3646 {
3647         tcpha_t *tcp_tmpl, *tcpha;
3648         uint32_t *dst, *src;
3649         int hdrlen;
3650         conn_t *connp = tcp->tcp_connp;
3651 
3652         ASSERT(OK_32PTR(rptr));
3653 
3654         /* Template header */
3655         tcp_tmpl = tcp->tcp_tcpha;
3656 
3657         /* Header of outgoing packet */
3658         tcpha = (tcpha_t *)(rptr + connp->conn_ixa->ixa_ip_hdr_length);
3659 
3660         /* dst and src are opaque 32-bit fields, used for copying */
3661         dst = (uint32_t *)rptr;
3662         src = (uint32_t *)connp->conn_ht_iphc;
3663         hdrlen = connp->conn_ht_iphc_len;
3664 
3665         /* Fill time-stamp option if needed */
3666         if (tcp->tcp_snd_ts_ok) {
3667                 U32_TO_BE32((uint32_t)now,
3668                     (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4);
3669                 U32_TO_BE32(tcp->tcp_ts_recent,
3670                     (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8);
3671         } else {
3672                 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
3673         }
3674 
3675         /*
3676          * Copy the template header; is this really more efficient than
3677          * calling bcopy()?  For simple IPv4/TCP, it may be the case,
3678          * but perhaps not for other scenarios.
3679          */
3680         dst[0] = src[0];
3681         dst[1] = src[1];
3682         dst[2] = src[2];
3683         dst[3] = src[3];
3684         dst[4] = src[4];
3685         dst[5] = src[5];
3686         dst[6] = src[6];
3687         dst[7] = src[7];
3688         dst[8] = src[8];
3689         dst[9] = src[9];
3690         if (hdrlen -= 40) {
3691                 hdrlen >>= 2;
3692                 dst += 10;
3693                 src += 10;
3694                 do {
3695                         *dst++ = *src++;
3696                 } while (--hdrlen);
3697         }
3698 
3699         /*
3700          * Set the ECN info in the TCP header if it is not a zero
3701          * window probe.  Zero window probe is only sent in
3702          * tcp_wput_data() and tcp_timer().
3703          */
3704         if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) {
3705                 TCP_SET_ECT(tcp, rptr);
3706 
3707                 if (tcp->tcp_ecn_echo_on)
3708                         tcpha->tha_flags |= TH_ECE;
3709                 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
3710                         tcpha->tha_flags |= TH_CWR;
3711                         tcp->tcp_ecn_cwr_sent = B_TRUE;
3712                 }
3713         }
3714 
3715         /* Fill in SACK options */
3716         if (num_sack_blk > 0) {
3717                 uchar_t *wptr = rptr + connp->conn_ht_iphc_len;
3718                 sack_blk_t *tmp;
3719                 int32_t i;
3720 
3721                 wptr[0] = TCPOPT_NOP;
3722                 wptr[1] = TCPOPT_NOP;
3723                 wptr[2] = TCPOPT_SACK;
3724                 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
3725                     sizeof (sack_blk_t);
3726                 wptr += TCPOPT_REAL_SACK_LEN;
3727 
3728                 tmp = tcp->tcp_sack_list;
3729                 for (i = 0; i < num_sack_blk; i++) {
3730                         U32_TO_BE32(tmp[i].begin, wptr);
3731                         wptr += sizeof (tcp_seq);
3732                         U32_TO_BE32(tmp[i].end, wptr);
3733                         wptr += sizeof (tcp_seq);
3734                 }
3735                 tcpha->tha_offset_and_reserved +=
3736                     ((num_sack_blk * 2 + 1) << 4);
3737         }
3738 }