Print this page
    
3354 kernel crash in rpcsec_gss after using gsscred
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Carlos Neira <cneirabustos@gmail.com>
Approved by: Robert Mustacchi <rm@joyent.com>
NEX-4123 xdrmblk_getpos() is unreliable
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
re #13613 rb4516 Tunables needs volatile keyword
closes  #11843 rb3753 - NFSv3/UDP server sends packets with wrong Source IP in header (picked from ncp3-gate)
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/rpc/svc_clts.c
          +++ new/usr/src/uts/common/rpc/svc_clts.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  
    | 
      ↓ open down ↓ | 
    13 lines elided | 
    
      ↑ open up ↑ | 
  
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  24      - *  Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
       24 + * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
  25   25   * Copyright (c) 2012 by Delphix. All rights reserved.
       26 + * Copyright 2012 Marcel Telka <marcel@telka.sk>
       27 + * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
  26   28   */
  27   29  
  28   30  /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T     */
  29      -/*        All Rights Reserved   */
       31 +/*      All Rights Reserved     */
  30   32  
  31   33  /*
  32   34   * Portions of this source code were derived from Berkeley 4.3 BSD
  33   35   * under license from the Regents of the University of California.
  34   36   */
  35   37  
  36   38  /*
  37   39   * svc_clts.c
  38   40   * Server side for RPC in the kernel.
  39   41   *
  40   42   */
  41   43  
  42   44  #include <sys/param.h>
  43   45  #include <sys/types.h>
  44   46  #include <sys/sysmacros.h>
  45   47  #include <sys/file.h>
  46   48  #include <sys/stream.h>
  47   49  #include <sys/strsun.h>
  48   50  #include <sys/strsubr.h>
  49   51  #include <sys/tihdr.h>
  50   52  #include <sys/tiuser.h>
  51   53  #include <sys/t_kuser.h>
  52   54  #include <sys/fcntl.h>
  53   55  #include <sys/errno.h>
  54   56  #include <sys/kmem.h>
  55   57  #include <sys/systm.h>
  56   58  #include <sys/cmn_err.h>
  57   59  #include <sys/kstat.h>
  58   60  #include <sys/vtrace.h>
  59   61  #include <sys/debug.h>
  60   62  
  61   63  #include <rpc/types.h>
  62   64  #include <rpc/xdr.h>
  63   65  #include <rpc/auth.h>
  64   66  #include <rpc/clnt.h>
  65   67  #include <rpc/rpc_msg.h>
  66   68  #include <rpc/svc.h>
  67   69  #include <inet/ip.h>
  68   70  
  69   71  /*
  70   72   * Routines exported through ops vector.
  71   73   */
  72   74  static bool_t           svc_clts_krecv(SVCXPRT *, mblk_t *, struct rpc_msg *);
  73   75  static bool_t           svc_clts_ksend(SVCXPRT *, struct rpc_msg *);
  74   76  static bool_t           svc_clts_kgetargs(SVCXPRT *, xdrproc_t, caddr_t);
  75   77  static bool_t           svc_clts_kfreeargs(SVCXPRT *, xdrproc_t, caddr_t);
  76   78  static void             svc_clts_kdestroy(SVCMASTERXPRT *);
  77   79  static int              svc_clts_kdup(struct svc_req *, caddr_t, int,
  78   80                                  struct dupreq **, bool_t *);
  79   81  static void             svc_clts_kdupdone(struct dupreq *, caddr_t,
  80   82                                  void (*)(), int, int);
  81   83  static int32_t          *svc_clts_kgetres(SVCXPRT *, int);
  82   84  static void             svc_clts_kclone_destroy(SVCXPRT *);
  83   85  static void             svc_clts_kfreeres(SVCXPRT *);
  84   86  static void             svc_clts_kstart(SVCMASTERXPRT *);
  85   87  static void             svc_clts_kclone_xprt(SVCXPRT *, SVCXPRT *);
  86   88  static void             svc_clts_ktattrs(SVCXPRT *, int, void **);
  87   89  
  88   90  /*
  89   91   * Server transport operations vector.
  90   92   */
  91   93  struct svc_ops svc_clts_op = {
  92   94          svc_clts_krecv,         /* Get requests */
  93   95          svc_clts_kgetargs,      /* Deserialize arguments */
  
    | 
      ↓ open down ↓ | 
    54 lines elided | 
    
      ↑ open up ↑ | 
  
  94   96          svc_clts_ksend,         /* Send reply */
  95   97          svc_clts_kfreeargs,     /* Free argument data space */
  96   98          svc_clts_kdestroy,      /* Destroy transport handle */
  97   99          svc_clts_kdup,          /* Check entry in dup req cache */
  98  100          svc_clts_kdupdone,      /* Mark entry in dup req cache as done */
  99  101          svc_clts_kgetres,       /* Get pointer to response buffer */
 100  102          svc_clts_kfreeres,      /* Destroy pre-serialized response header */
 101  103          svc_clts_kclone_destroy, /* Destroy a clone xprt */
 102  104          svc_clts_kstart,        /* Tell `ready-to-receive' to rpcmod */
 103  105          svc_clts_kclone_xprt,   /* transport specific clone xprt function */
 104      -        svc_clts_ktattrs        /* Transport specific attributes. */
      106 +        svc_clts_ktattrs,       /* Transport specific attributes */
      107 +        rpcmod_hold,            /* Increment transport reference count */
      108 +        rpcmod_release          /* Decrement transport reference count */
 105  109  };
 106  110  
 107  111  /*
 108  112   * Transport private data.
 109  113   * Kept in xprt->xp_p2buf.
 110  114   */
 111  115  struct udp_data {
 112  116          mblk_t  *ud_resp;                       /* buffer for response */
 113  117          mblk_t  *ud_inmp;                       /* mblk chain of request */
      118 +        sin6_t  ud_local;                       /* local address */
 114  119  };
 115  120  
 116  121  #define UD_MAXSIZE      8800
 117  122  #define UD_INITSIZE     2048
 118  123  
 119  124  /*
 120  125   * Connectionless server statistics
 121  126   */
 122  127  static const struct rpc_clts_server {
 123  128          kstat_named_t   rscalls;
 124  129          kstat_named_t   rsbadcalls;
 125  130          kstat_named_t   rsnullrecv;
 126  131          kstat_named_t   rsbadlen;
 127  132          kstat_named_t   rsxdrcall;
 128  133          kstat_named_t   rsdupchecks;
 129  134          kstat_named_t   rsdupreqs;
 130  135  } clts_rsstat_tmpl = {
 131  136          { "calls",      KSTAT_DATA_UINT64 },
 132  137          { "badcalls",   KSTAT_DATA_UINT64 },
 133  138          { "nullrecv",   KSTAT_DATA_UINT64 },
 134  139          { "badlen",     KSTAT_DATA_UINT64 },
 135  140          { "xdrcall",    KSTAT_DATA_UINT64 },
 136  141          { "dupchecks",  KSTAT_DATA_UINT64 },
 137  142          { "dupreqs",    KSTAT_DATA_UINT64 }
 138  143  };
 139  144  
 140  145  static uint_t clts_rsstat_ndata =
 141  146          sizeof (clts_rsstat_tmpl) / sizeof (kstat_named_t);
 142  147  
 143  148  #define CLONE2STATS(clone_xprt) \
 144  149          (struct rpc_clts_server *)(clone_xprt)->xp_master->xp_p2
 145  150  
 146  151  #define RSSTAT_INCR(stats, x)   \
 147  152          atomic_inc_64(&(stats)->x.value.ui64)
 148  153  
 149  154  /*
 150  155   * Create a transport record.
 151  156   * The transport record, output buffer, and private data structure
 152  157   * are allocated.  The output buffer is serialized into using xdrmem.
 153  158   * There is one transport record per user process which implements a
 154  159   * set of services.
 155  160   */
 156  161  /* ARGSUSED */
 157  162  int
 158  163  svc_clts_kcreate(file_t *fp, uint_t sendsz, struct T_info_ack *tinfo,
 159  164      SVCMASTERXPRT **nxprt)
 160  165  {
 161  166          SVCMASTERXPRT *xprt;
 162  167          struct rpcstat *rpcstat;
 163  168  
 164  169          if (nxprt == NULL)
 165  170                  return (EINVAL);
 166  171  
 167  172          rpcstat = zone_getspecific(rpcstat_zone_key, curproc->p_zone);
 168  173          ASSERT(rpcstat != NULL);
 169  174  
 170  175          xprt = kmem_zalloc(sizeof (*xprt), KM_SLEEP);
 171  176          xprt->xp_lcladdr.buf = kmem_zalloc(sizeof (sin6_t), KM_SLEEP);
 172  177          xprt->xp_p2 = (caddr_t)rpcstat->rpc_clts_server;
 173  178          xprt->xp_ops = &svc_clts_op;
 174  179          xprt->xp_msg_size = tinfo->TSDU_size;
 175  180  
 176  181          xprt->xp_rtaddr.buf = NULL;
 177  182          xprt->xp_rtaddr.maxlen = tinfo->ADDR_size;
 178  183          xprt->xp_rtaddr.len = 0;
 179  184  
 180  185          *nxprt = xprt;
 181  186  
 182  187          return (0);
 183  188  }
 184  189  
 185  190  /*
 186  191   * Destroy a transport record.
 187  192   * Frees the space allocated for a transport record.
 188  193   */
 189  194  static void
 190  195  svc_clts_kdestroy(SVCMASTERXPRT *xprt)
 191  196  {
 192  197          if (xprt->xp_netid)
 193  198                  kmem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
 194  199          if (xprt->xp_addrmask.maxlen)
 195  200                  kmem_free(xprt->xp_addrmask.buf, xprt->xp_addrmask.maxlen);
 196  201  
 197  202          mutex_destroy(&xprt->xp_req_lock);
 198  203          mutex_destroy(&xprt->xp_thread_lock);
 199  204  
 200  205          kmem_free(xprt->xp_lcladdr.buf, sizeof (sin6_t));
 201  206          kmem_free(xprt, sizeof (SVCMASTERXPRT));
 202  207  }
 203  208  
 204  209  /*
 205  210   * Transport-type specific part of svc_xprt_cleanup().
 206  211   * Frees the message buffer space allocated for a clone of a transport record
 207  212   */
 208  213  static void
 209  214  svc_clts_kclone_destroy(SVCXPRT *clone_xprt)
 210  215  {
 211  216          /* LINTED pointer alignment */
 212  217          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 213  218  
 214  219          if (ud->ud_resp) {
 215  220                  /*
 216  221                   * There should not be any left over results buffer.
 217  222                   */
 218  223                  ASSERT(ud->ud_resp->b_cont == NULL);
 219  224  
 220  225                  /*
 221  226                   * Free the T_UNITDATA_{REQ/IND} that svc_clts_krecv
 222  227                   * saved.
 223  228                   */
 224  229                  freeb(ud->ud_resp);
 225  230          }
 226  231          if (ud->ud_inmp)
 227  232                  freemsg(ud->ud_inmp);
 228  233  }
 229  234  
 230  235  /*
 231  236   * svc_tli_kcreate() calls this function at the end to tell
 232  237   * rpcmod that the transport is ready to receive requests.
 233  238   */
 234  239  /* ARGSUSED */
 235  240  static void
 236  241  svc_clts_kstart(SVCMASTERXPRT *xprt)
 237  242  {
 238  243  }
 239  244  
 240  245  static void
 241  246  svc_clts_kclone_xprt(SVCXPRT *src_xprt, SVCXPRT *dst_xprt)
 242  247  {
 243  248          struct udp_data *ud_src =
 244  249              (struct udp_data *)src_xprt->xp_p2buf;
 245  250          struct udp_data *ud_dst =
 246  251              (struct udp_data *)dst_xprt->xp_p2buf;
 247  252  
 248  253          if (ud_src->ud_resp)
 249  254                  ud_dst->ud_resp = dupb(ud_src->ud_resp);
 250  255  
 251  256  }
 252  257  
 253  258  static void
 254  259  svc_clts_ktattrs(SVCXPRT *clone_xprt, int attrflag, void **tattr)
 255  260  {
 256  261          *tattr = NULL;
 257  262  
 258  263          switch (attrflag) {
 259  264          case SVC_TATTR_ADDRMASK:
 260  265                  *tattr = (void *)&clone_xprt->xp_master->xp_addrmask;
 261  266          }
 262  267  }
 263  268  
 264  269  /*
 265  270   * Receive rpc requests.
 266  271   * Pulls a request in off the socket, checks if the packet is intact,
 267  272   * and deserializes the call packet.
 268  273   */
 269  274  static bool_t
 270  275  svc_clts_krecv(SVCXPRT *clone_xprt, mblk_t *mp, struct rpc_msg *msg)
 271  276  {
 272  277          /* LINTED pointer alignment */
 273  278          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 274  279          XDR *xdrs = &clone_xprt->xp_xdrin;
 275  280          struct rpc_clts_server *stats = CLONE2STATS(clone_xprt);
 276  281          union T_primitives *pptr;
 277  282          int hdrsz;
 278  283          cred_t *cr;
 279  284  
 280  285          TRACE_0(TR_FAC_KRPC, TR_SVC_CLTS_KRECV_START,
 281  286              "svc_clts_krecv_start:");
 282  287  
 283  288          RSSTAT_INCR(stats, rscalls);
 284  289  
 285  290          /*
 286  291           * The incoming request should start with an M_PROTO message.
 287  292           */
 288  293          if (mp->b_datap->db_type != M_PROTO) {
 289  294                  goto bad;
 290  295          }
 291  296  
 292  297          /*
 293  298           * The incoming request should be an T_UNITDTA_IND.  There
 294  299           * might be other messages coming up the stream, but we can
 295  300           * ignore them.
 296  301           */
 297  302          pptr = (union T_primitives *)mp->b_rptr;
 298  303          if (pptr->type != T_UNITDATA_IND) {
 299  304                  goto bad;
 300  305          }
 301  306          /*
 302  307           * Do some checking to make sure that the header at least looks okay.
 303  308           */
 304  309          hdrsz = (int)(mp->b_wptr - mp->b_rptr);
 305  310          if (hdrsz < TUNITDATAINDSZ ||
 306  311              hdrsz < (pptr->unitdata_ind.OPT_offset +
 307  312              pptr->unitdata_ind.OPT_length) ||
 308  313              hdrsz < (pptr->unitdata_ind.SRC_offset +
 309  314              pptr->unitdata_ind.SRC_length)) {
 310  315                  goto bad;
 311  316          }
 312  317  
 313  318          /*
 314  319           * Make sure that the transport provided a usable address.
 315  320           */
 316  321          if (pptr->unitdata_ind.SRC_length <= 0) {
  
    | 
      ↓ open down ↓ | 
    193 lines elided | 
    
      ↑ open up ↑ | 
  
 317  322                  goto bad;
 318  323          }
 319  324          /*
 320  325           * Point the remote transport address in the service_transport
 321  326           * handle at the address in the request.
 322  327           */
 323  328          clone_xprt->xp_rtaddr.buf = (char *)mp->b_rptr +
 324  329              pptr->unitdata_ind.SRC_offset;
 325  330          clone_xprt->xp_rtaddr.len = pptr->unitdata_ind.SRC_length;
 326  331  
      332 +        clone_xprt->xp_lcladdr.buf = (char *)&ud->ud_local;
      333 +
 327  334          /*
 328  335           * Copy the local transport address in the service_transport
 329  336           * handle at the address in the request. We will have only
 330  337           * the local IP address in options.
 331  338           */
 332  339          ((sin_t *)(clone_xprt->xp_lcladdr.buf))->sin_family = AF_UNSPEC;
 333  340          if (pptr->unitdata_ind.OPT_length && pptr->unitdata_ind.OPT_offset) {
 334  341                  char *dstopt = (char *)mp->b_rptr +
 335  342                      pptr->unitdata_ind.OPT_offset;
 336  343                  struct T_opthdr *toh = (struct T_opthdr *)dstopt;
 337  344  
 338  345                  if (toh->level == IPPROTO_IPV6 && toh->status == 0 &&
 339  346                      toh->name == IPV6_PKTINFO) {
 340  347                          struct in6_pktinfo *pkti;
 341  348  
 342  349                          dstopt += sizeof (struct T_opthdr);
 343  350                          pkti = (struct in6_pktinfo *)dstopt;
 344  351                          ((sin6_t *)(clone_xprt->xp_lcladdr.buf))->sin6_addr
 345  352                              = pkti->ipi6_addr;
 346  353                          ((sin6_t *)(clone_xprt->xp_lcladdr.buf))->sin6_family
 347  354                              = AF_INET6;
 348  355                  } else if (toh->level == IPPROTO_IP && toh->status == 0 &&
 349  356                      toh->name == IP_RECVDSTADDR) {
 350  357                          dstopt += sizeof (struct T_opthdr);
 351  358                          ((sin_t *)(clone_xprt->xp_lcladdr.buf))->sin_addr
 352  359                              = *(struct in_addr *)dstopt;
 353  360                          ((sin_t *)(clone_xprt->xp_lcladdr.buf))->sin_family
 354  361                              = AF_INET;
 355  362                  }
 356  363          }
 357  364  
 358  365          /*
 359  366           * Save the first mblk which contains the T_unidata_ind in
 360  367           * ud_resp.  It will be used to generate the T_unitdata_req
 361  368           * during the reply.
 362  369           * We reuse any options in the T_unitdata_ind for the T_unitdata_req
 363  370           * since we must pass any SCM_UCRED across in order for TX to
 364  371           * work. We also make sure any cred_t is carried across.
 365  372           */
 366  373          if (ud->ud_resp) {
 367  374                  if (ud->ud_resp->b_cont != NULL) {
 368  375                          cmn_err(CE_WARN, "svc_clts_krecv: ud_resp %p, "
 369  376                              "b_cont %p", (void *)ud->ud_resp,
 370  377                              (void *)ud->ud_resp->b_cont);
 371  378                  }
 372  379                  freeb(ud->ud_resp);
 373  380          }
 374  381          /* Move any cred_t to the first mblk in the message */
 375  382          cr = msg_getcred(mp, NULL);
 376  383          if (cr != NULL)
 377  384                  mblk_setcred(mp, cr, NOPID);
 378  385  
 379  386          ud->ud_resp = mp;
 380  387          mp = mp->b_cont;
 381  388          ud->ud_resp->b_cont = NULL;
 382  389  
 383  390          xdrmblk_init(xdrs, mp, XDR_DECODE, 0);
 384  391  
 385  392          TRACE_0(TR_FAC_KRPC, TR_XDR_CALLMSG_START,
 386  393              "xdr_callmsg_start:");
 387  394          if (! xdr_callmsg(xdrs, msg)) {
 388  395                  XDR_DESTROY(xdrs);
 389  396                  TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END,
 390  397                      "xdr_callmsg_end:(%S)", "bad");
 391  398                  RSSTAT_INCR(stats, rsxdrcall);
 392  399                  goto bad;
 393  400          }
 394  401          TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END,
 395  402              "xdr_callmsg_end:(%S)", "good");
 396  403  
 397  404          clone_xprt->xp_xid = msg->rm_xid;
 398  405          ud->ud_inmp = mp;
 399  406  
 400  407          TRACE_1(TR_FAC_KRPC, TR_SVC_CLTS_KRECV_END,
 401  408              "svc_clts_krecv_end:(%S)", "good");
 402  409          return (TRUE);
 403  410  
 404  411  bad:
 405  412          freemsg(mp);
 406  413          if (ud->ud_resp) {
 407  414                  /*
 408  415                   * There should not be any left over results buffer.
 409  416                   */
 410  417                  ASSERT(ud->ud_resp->b_cont == NULL);
 411  418                  freeb(ud->ud_resp);
 412  419                  ud->ud_resp = NULL;
 413  420          }
 414  421  
 415  422          RSSTAT_INCR(stats, rsbadcalls);
 416  423          TRACE_1(TR_FAC_KRPC, TR_SVC_CLTS_KRECV_END,
 417  424              "svc_clts_krecv_end:(%S)", "bad");
 418  425          return (FALSE);
 419  426  }
 420  427  
 421  428  /*
 422  429   * Send rpc reply.
 423  430   * Serialize the reply packet into the output buffer then
 424  431   * call t_ksndudata to send it.
 425  432   */
 426  433  static bool_t
 427  434  svc_clts_ksend(SVCXPRT *clone_xprt, struct rpc_msg *msg)
 428  435  {
 429  436          /* LINTED pointer alignment */
 430  437          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 431  438          XDR *xdrs = &clone_xprt->xp_xdrout;
 432  439          int stat = FALSE;
 433  440          mblk_t *mp;
 434  441          int msgsz;
 435  442          struct T_unitdata_req *udreq;
 436  443          xdrproc_t xdr_results;
 437  444          caddr_t xdr_location;
 438  445          bool_t has_args;
 439  446  
 440  447          TRACE_0(TR_FAC_KRPC, TR_SVC_CLTS_KSEND_START,
 441  448              "svc_clts_ksend_start:");
 442  449  
 443  450          ASSERT(ud->ud_resp != NULL);
 444  451  
 445  452          /*
 446  453           * If there is a result procedure specified in the reply message,
 447  454           * it will be processed in the xdr_replymsg and SVCAUTH_WRAP.
 448  455           * We need to make sure it won't be processed twice, so we null
 449  456           * it for xdr_replymsg here.
 450  457           */
 451  458          has_args = FALSE;
 452  459          if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
 453  460              msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
 454  461                  if ((xdr_results = msg->acpted_rply.ar_results.proc) != NULL) {
 455  462                          has_args = TRUE;
 456  463                          xdr_location = msg->acpted_rply.ar_results.where;
 457  464                          msg->acpted_rply.ar_results.proc = xdr_void;
 458  465                          msg->acpted_rply.ar_results.where = NULL;
 459  466                  }
 460  467          }
 461  468  
 462  469          if (ud->ud_resp->b_cont == NULL) {
 463  470                  /*
 464  471                   * Allocate an initial mblk for the response data.
 465  472                   */
 466  473                  while ((mp = allocb(UD_INITSIZE, BPRI_LO)) == NULL) {
 467  474                          if (strwaitbuf(UD_INITSIZE, BPRI_LO)) {
 468  475                                  TRACE_1(TR_FAC_KRPC, TR_SVC_CLTS_KSEND_END,
 469  476                                      "svc_clts_ksend_end:(%S)", "strwaitbuf");
 470  477                                  return (FALSE);
 471  478                          }
 472  479                  }
 473  480  
 474  481                  /*
 475  482                   * Initialize the XDR encode stream.  Additional mblks
 476  483                   * will be allocated if necessary.  They will be UD_MAXSIZE
 477  484                   * sized.
 478  485                   */
 479  486                  xdrmblk_init(xdrs, mp, XDR_ENCODE, UD_MAXSIZE);
 480  487  
 481  488                  /*
 482  489                   * Leave some space for protocol headers.
 483  490                   */
 484  491                  (void) XDR_SETPOS(xdrs, 512);
 485  492                  mp->b_rptr += 512;
 486  493  
 487  494                  msg->rm_xid = clone_xprt->xp_xid;
 488  495  
 489  496                  ud->ud_resp->b_cont = mp;
 490  497  
 491  498                  TRACE_0(TR_FAC_KRPC, TR_XDR_REPLYMSG_START,
 492  499                      "xdr_replymsg_start:");
 493  500                  if (!(xdr_replymsg(xdrs, msg) &&
 494  501                      (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs,
 495  502                      xdr_results, xdr_location)))) {
 496  503                          XDR_DESTROY(xdrs);
 497  504                          TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END,
 498  505                              "xdr_replymsg_end:(%S)", "bad");
 499  506                          RPCLOG0(1, "xdr_replymsg/SVCAUTH_WRAP failed\n");
 500  507                          goto out;
 501  508                  }
 502  509                  TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END,
 503  510                      "xdr_replymsg_end:(%S)", "good");
 504  511  
 505  512          } else if (!(xdr_replymsg_body(xdrs, msg) &&
 506  513              (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs,
 507  514              xdr_results, xdr_location)))) {
 508  515                  XDR_DESTROY(xdrs);
 509  516                  RPCLOG0(1, "xdr_replymsg_body/SVCAUTH_WRAP failed\n");
 510  517                  goto out;
 511  518          }
 512  519  
 513  520          XDR_DESTROY(xdrs);
 514  521  
 515  522          msgsz = (int)xmsgsize(ud->ud_resp->b_cont);
 516  523  
 517  524          if (msgsz <= 0 || (clone_xprt->xp_msg_size != -1 &&
 518  525              msgsz > clone_xprt->xp_msg_size)) {
 519  526  #ifdef  DEBUG
 520  527                  cmn_err(CE_NOTE,
 521  528  "KRPC: server response message of %d bytes; transport limits are [0, %d]",
 522  529                      msgsz, clone_xprt->xp_msg_size);
 523  530  #endif
 524  531                  goto out;
 525  532          }
 526  533  
 527  534          /*
 528  535           * Construct the T_unitdata_req.  We take advantage of the fact that
 529  536           * T_unitdata_ind looks just like T_unitdata_req, except for the
 530  537           * primitive type.  Reusing it means we preserve the SCM_UCRED, and
 531  538           * we must preserve it for TX to work.
 532  539           *
 533  540           * This has the side effect that we can also pass certain receive-side
 534  541           * options like IPV6_PKTINFO back down the send side.  This implies
 535  542           * that we can not ASSERT on a non-NULL db_credp when we have send-side
 536  543           * options in UDP.
 537  544           */
 538  545          ASSERT(MBLKL(ud->ud_resp) >= TUNITDATAREQSZ);
 539  546          udreq = (struct T_unitdata_req *)ud->ud_resp->b_rptr;
 540  547          ASSERT(udreq->PRIM_type == T_UNITDATA_IND);
 541  548          udreq->PRIM_type = T_UNITDATA_REQ;
 542  549  
 543  550          /*
 544  551           * If the local IPv4 transport address is known use it as a source
 545  552           * address for the outgoing UDP packet.
 546  553           */
 547  554          if (((sin_t *)(clone_xprt->xp_lcladdr.buf))->sin_family == AF_INET) {
 548  555                  struct T_opthdr *opthdr;
 549  556                  in_pktinfo_t *pktinfo;
 550  557                  size_t size;
 551  558  
 552  559                  if (udreq->DEST_length == 0)
 553  560                          udreq->OPT_offset = _TPI_ALIGN_TOPT(TUNITDATAREQSZ);
 554  561                  else
 555  562                          udreq->OPT_offset = _TPI_ALIGN_TOPT(udreq->DEST_offset +
 556  563                              udreq->DEST_length);
 557  564  
 558  565                  udreq->OPT_length = sizeof (struct T_opthdr) +
 559  566                      sizeof (in_pktinfo_t);
 560  567  
 561  568                  size = udreq->OPT_length + udreq->OPT_offset;
 562  569  
 563  570                  /* make sure we have enough space for the option data */
 564  571                  mp = reallocb(ud->ud_resp, size, 1);
 565  572                  if (mp == NULL)
 566  573                          goto out;
 567  574                  ud->ud_resp = mp;
 568  575                  udreq = (struct T_unitdata_req *)mp->b_rptr;
 569  576  
 570  577                  /* set desired option header */
 571  578                  opthdr = (struct T_opthdr *)(mp->b_rptr + udreq->OPT_offset);
 572  579                  opthdr->len = udreq->OPT_length;
 573  580                  opthdr->level = IPPROTO_IP;
 574  581                  opthdr->name = IP_PKTINFO;
 575  582  
 576  583                  /*
 577  584                   * 1. set source IP of outbound packet
 578  585                   * 2. value '0' for index means IP layer uses this as source
 579  586                   *    address
 580  587                   */
 581  588                  pktinfo = (in_pktinfo_t *)(opthdr + 1);
 582  589                  (void) memset(pktinfo, 0, sizeof (in_pktinfo_t));
 583  590                  pktinfo->ipi_spec_dst.s_addr =
 584  591                      ((sin_t *)(clone_xprt->xp_lcladdr.buf))->sin_addr.s_addr;
 585  592                  pktinfo->ipi_ifindex = 0;
 586  593  
 587  594                  /* adjust the end of active data */
 588  595                  mp->b_wptr = mp->b_rptr + size;
 589  596          }
 590  597  
 591  598          put(clone_xprt->xp_wq, ud->ud_resp);
 592  599          stat = TRUE;
 593  600          ud->ud_resp = NULL;
 594  601  
 595  602  out:
 596  603          if (stat == FALSE) {
 597  604                  freemsg(ud->ud_resp);
 598  605                  ud->ud_resp = NULL;
 599  606          }
 600  607  
 601  608          /*
 602  609           * This is completely disgusting.  If public is set it is
 603  610           * a pointer to a structure whose first field is the address
 604  611           * of the function to free that structure and any related
 605  612           * stuff.  (see rrokfree in nfs_xdr.c).
 606  613           */
 607  614          if (xdrs->x_public) {
 608  615                  /* LINTED pointer alignment */
 609  616                  (**((int (**)())xdrs->x_public))(xdrs->x_public);
 610  617          }
 611  618  
 612  619          TRACE_1(TR_FAC_KRPC, TR_SVC_CLTS_KSEND_END,
 613  620              "svc_clts_ksend_end:(%S)", "done");
 614  621          return (stat);
 615  622  }
 616  623  
 617  624  /*
 618  625   * Deserialize arguments.
 619  626   */
 620  627  static bool_t
 621  628  svc_clts_kgetargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args,
 622  629      caddr_t args_ptr)
 623  630  {
 624  631  
 625  632          /* LINTED pointer alignment */
 626  633          return (SVCAUTH_UNWRAP(&clone_xprt->xp_auth, &clone_xprt->xp_xdrin,
 627  634              xdr_args, args_ptr));
 628  635  
 629  636  }
 630  637  
 631  638  static bool_t
 632  639  svc_clts_kfreeargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args,
 633  640      caddr_t args_ptr)
 634  641  {
 635  642          /* LINTED pointer alignment */
 636  643          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 637  644          XDR *xdrs = &clone_xprt->xp_xdrin;
 638  645          bool_t retval;
 639  646  
 640  647          if (args_ptr) {
 641  648                  xdrs->x_op = XDR_FREE;
 642  649                  retval = (*xdr_args)(xdrs, args_ptr);
 643  650          } else
 644  651                  retval = TRUE;
 645  652  
 646  653          XDR_DESTROY(xdrs);
 647  654  
 648  655          if (ud->ud_inmp) {
 649  656                  freemsg(ud->ud_inmp);
 650  657                  ud->ud_inmp = NULL;
 651  658          }
 652  659  
 653  660          return (retval);
 654  661  }
 655  662  
 656  663  static int32_t *
 657  664  svc_clts_kgetres(SVCXPRT *clone_xprt, int size)
 658  665  {
 659  666          /* LINTED pointer alignment */
 660  667          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 661  668          XDR *xdrs = &clone_xprt->xp_xdrout;
 662  669          mblk_t *mp;
 663  670          int32_t *buf;
 664  671          struct rpc_msg rply;
 665  672  
 666  673          /*
 667  674           * Allocate an initial mblk for the response data.
 668  675           */
 669  676          while ((mp = allocb(UD_INITSIZE, BPRI_LO)) == NULL) {
 670  677                  if (strwaitbuf(UD_INITSIZE, BPRI_LO)) {
 671  678                          return (NULL);
 672  679                  }
 673  680          }
 674  681  
 675  682          mp->b_cont = NULL;
 676  683  
 677  684          /*
 678  685           * Initialize the XDR encode stream.  Additional mblks
 679  686           * will be allocated if necessary.  They will be UD_MAXSIZE
 680  687           * sized.
 681  688           */
 682  689          xdrmblk_init(xdrs, mp, XDR_ENCODE, UD_MAXSIZE);
 683  690  
 684  691          /*
 685  692           * Leave some space for protocol headers.
 686  693           */
 687  694          (void) XDR_SETPOS(xdrs, 512);
 688  695          mp->b_rptr += 512;
 689  696  
 690  697          /*
 691  698           * Assume a successful RPC since most of them are.
 692  699           */
 693  700          rply.rm_xid = clone_xprt->xp_xid;
 694  701          rply.rm_direction = REPLY;
 695  702          rply.rm_reply.rp_stat = MSG_ACCEPTED;
 696  703          rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
 697  704          rply.acpted_rply.ar_stat = SUCCESS;
 698  705  
 699  706          if (!xdr_replymsg_hdr(xdrs, &rply)) {
 700  707                  XDR_DESTROY(xdrs);
 701  708                  freeb(mp);
 702  709                  return (NULL);
 703  710          }
 704  711  
 705  712          buf = XDR_INLINE(xdrs, size);
 706  713  
 707  714          if (buf == NULL) {
 708  715                  XDR_DESTROY(xdrs);
 709  716                  freeb(mp);
 710  717          } else {
 711  718                  ud->ud_resp->b_cont = mp;
 712  719          }
 713  720  
 714  721          return (buf);
 715  722  }
 716  723  
 717  724  static void
 718  725  svc_clts_kfreeres(SVCXPRT *clone_xprt)
 719  726  {
 720  727          /* LINTED pointer alignment */
 721  728          struct udp_data *ud = (struct udp_data *)clone_xprt->xp_p2buf;
 722  729  
 723  730          if (ud->ud_resp == NULL || ud->ud_resp->b_cont == NULL)
 724  731                  return;
 725  732  
 726  733          XDR_DESTROY(&clone_xprt->xp_xdrout);
 727  734  
 728  735          /*
 729  736           * SVC_FREERES() is called whenever the server decides not to
 730  737           * send normal reply. Thus, we expect only one mblk to be allocated,
 731  738           * because we have not attempted any XDR encoding.
 732  739           * If we do any XDR encoding and we get an error, then SVC_REPLY()
 733  740           * will freemsg(ud->ud_resp);
 734  741           */
 735  742          ASSERT(ud->ud_resp->b_cont->b_cont == NULL);
 736  743          freeb(ud->ud_resp->b_cont);
 737  744          ud->ud_resp->b_cont = NULL;
 738  745  }
 739  746  
 740  747  /*
 741  748   * the dup cacheing routines below provide a cache of non-failure
 742  749   * transaction id's.  rpc service routines can use this to detect
 743  750   * retransmissions and re-send a non-failure response.
 744  751   */
 745  752  
 746  753  /*
 747  754   * MAXDUPREQS is the number of cached items.  It should be adjusted
 748  755   * to the service load so that there is likely to be a response entry
 749  756   * when the first retransmission comes in.
 750  757   */
 751  758  #define MAXDUPREQS      8192
 752  759  
 753  760  /*
  
    | 
      ↓ open down ↓ | 
    417 lines elided | 
    
      ↑ open up ↑ | 
  
 754  761   * This should be appropriately scaled to MAXDUPREQS.  To produce as less as
 755  762   * possible collisions it is suggested to set this to a prime.
 756  763   */
 757  764  #define DRHASHSZ        2053
 758  765  
 759  766  #define XIDHASH(xid)    ((xid) % DRHASHSZ)
 760  767  #define DRHASH(dr)      XIDHASH((dr)->dr_xid)
 761  768  #define REQTOXID(req)   ((req)->rq_xprt->xp_xid)
 762  769  
 763  770  static int      ndupreqs = 0;
 764      -int     maxdupreqs = MAXDUPREQS;
      771 +volatile int    maxdupreqs = MAXDUPREQS;
 765  772  static kmutex_t dupreq_lock;
 766  773  static struct dupreq *drhashtbl[DRHASHSZ];
 767  774  static int      drhashstat[DRHASHSZ];
 768  775  
 769  776  static void unhash(struct dupreq *);
 770  777  
 771  778  /*
 772  779   * drmru points to the head of a circular linked list in lru order.
 773  780   * drmru->dr_next == drlru
 774  781   */
 775  782  struct dupreq *drmru;
 776  783  
 777  784  /*
 778  785   * PSARC 2003/523 Contract Private Interface
 779  786   * svc_clts_kdup
 780  787   * Changes must be reviewed by Solaris File Sharing
 781  788   * Changes must be communicated to contract-2003-523@sun.com
 782  789   *
 783  790   * svc_clts_kdup searches the request cache and returns 0 if the
 784  791   * request is not found in the cache.  If it is found, then it
 785  792   * returns the state of the request (in progress or done) and
 786  793   * the status or attributes that were part of the original reply.
 787  794   *
 788  795   * If DUP_DONE (there is a duplicate) svc_clts_kdup copies over the
 789  796   * value of the response. In that case, also return in *dupcachedp
 790  797   * whether the response free routine is cached in the dupreq - in which case
 791  798   * the caller should not be freeing it, because it will be done later
 792  799   * in the svc_clts_kdup code when the dupreq is reused.
 793  800   */
 794  801  static int
 795  802  svc_clts_kdup(struct svc_req *req, caddr_t res, int size, struct dupreq **drpp,
 796  803      bool_t *dupcachedp)
 797  804  {
 798  805          struct rpc_clts_server *stats = CLONE2STATS(req->rq_xprt);
 799  806          struct dupreq *dr;
 800  807          uint32_t xid;
 801  808          uint32_t drhash;
 802  809          int status;
 803  810  
 804  811          xid = REQTOXID(req);
 805  812          mutex_enter(&dupreq_lock);
 806  813          RSSTAT_INCR(stats, rsdupchecks);
 807  814          /*
 808  815           * Check to see whether an entry already exists in the cache.
 809  816           */
 810  817          dr = drhashtbl[XIDHASH(xid)];
 811  818          while (dr != NULL) {
 812  819                  if (dr->dr_xid == xid &&
 813  820                      dr->dr_proc == req->rq_proc &&
 814  821                      dr->dr_prog == req->rq_prog &&
 815  822                      dr->dr_vers == req->rq_vers &&
 816  823                      dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
 817  824                      bcmp(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
 818  825                      dr->dr_addr.len) == 0) {
 819  826                          status = dr->dr_status;
 820  827                          if (status == DUP_DONE) {
 821  828                                  bcopy(dr->dr_resp.buf, res, size);
 822  829                                  if (dupcachedp != NULL)
 823  830                                          *dupcachedp = (dr->dr_resfree != NULL);
 824  831                          } else {
 825  832                                  dr->dr_status = DUP_INPROGRESS;
 826  833                                  *drpp = dr;
 827  834                          }
 828  835                          RSSTAT_INCR(stats, rsdupreqs);
 829  836                          mutex_exit(&dupreq_lock);
 830  837                          return (status);
 831  838                  }
 832  839                  dr = dr->dr_chain;
 833  840          }
 834  841  
 835  842          /*
 836  843           * There wasn't an entry, either allocate a new one or recycle
 837  844           * an old one.
 838  845           */
 839  846          if (ndupreqs < maxdupreqs) {
 840  847                  dr = kmem_alloc(sizeof (*dr), KM_NOSLEEP);
 841  848                  if (dr == NULL) {
 842  849                          mutex_exit(&dupreq_lock);
 843  850                          return (DUP_ERROR);
 844  851                  }
 845  852                  dr->dr_resp.buf = NULL;
 846  853                  dr->dr_resp.maxlen = 0;
 847  854                  dr->dr_addr.buf = NULL;
 848  855                  dr->dr_addr.maxlen = 0;
 849  856                  if (drmru) {
 850  857                          dr->dr_next = drmru->dr_next;
 851  858                          drmru->dr_next = dr;
 852  859                  } else {
 853  860                          dr->dr_next = dr;
 854  861                  }
 855  862                  ndupreqs++;
 856  863          } else {
 857  864                  dr = drmru->dr_next;
 858  865                  while (dr->dr_status == DUP_INPROGRESS) {
 859  866                          dr = dr->dr_next;
 860  867                          if (dr == drmru->dr_next) {
 861  868                                  cmn_err(CE_WARN, "svc_clts_kdup no slots free");
 862  869                                  mutex_exit(&dupreq_lock);
 863  870                                  return (DUP_ERROR);
 864  871                          }
 865  872                  }
 866  873                  unhash(dr);
 867  874                  if (dr->dr_resfree) {
 868  875                          (*dr->dr_resfree)(dr->dr_resp.buf);
 869  876                  }
 870  877          }
 871  878          dr->dr_resfree = NULL;
 872  879          drmru = dr;
 873  880  
 874  881          dr->dr_xid = REQTOXID(req);
 875  882          dr->dr_prog = req->rq_prog;
 876  883          dr->dr_vers = req->rq_vers;
 877  884          dr->dr_proc = req->rq_proc;
 878  885          if (dr->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) {
 879  886                  if (dr->dr_addr.buf != NULL)
 880  887                          kmem_free(dr->dr_addr.buf, dr->dr_addr.maxlen);
 881  888                  dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
 882  889                  dr->dr_addr.buf = kmem_alloc(dr->dr_addr.maxlen,
 883  890                      KM_NOSLEEP);
 884  891                  if (dr->dr_addr.buf == NULL) {
 885  892                          dr->dr_addr.maxlen = 0;
 886  893                          dr->dr_status = DUP_DROP;
 887  894                          mutex_exit(&dupreq_lock);
 888  895                          return (DUP_ERROR);
 889  896                  }
 890  897          }
 891  898          dr->dr_addr.len = req->rq_xprt->xp_rtaddr.len;
 892  899          bcopy(req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.buf, dr->dr_addr.len);
 893  900          if (dr->dr_resp.maxlen < size) {
 894  901                  if (dr->dr_resp.buf != NULL)
 895  902                          kmem_free(dr->dr_resp.buf, dr->dr_resp.maxlen);
 896  903                  dr->dr_resp.maxlen = (unsigned int)size;
 897  904                  dr->dr_resp.buf = kmem_alloc(size, KM_NOSLEEP);
 898  905                  if (dr->dr_resp.buf == NULL) {
 899  906                          dr->dr_resp.maxlen = 0;
 900  907                          dr->dr_status = DUP_DROP;
 901  908                          mutex_exit(&dupreq_lock);
 902  909                          return (DUP_ERROR);
 903  910                  }
 904  911          }
 905  912          dr->dr_status = DUP_INPROGRESS;
 906  913  
 907  914          drhash = (uint32_t)DRHASH(dr);
 908  915          dr->dr_chain = drhashtbl[drhash];
 909  916          drhashtbl[drhash] = dr;
 910  917          drhashstat[drhash]++;
 911  918          mutex_exit(&dupreq_lock);
 912  919          *drpp = dr;
 913  920          return (DUP_NEW);
 914  921  }
 915  922  
 916  923  /*
 917  924   * PSARC 2003/523 Contract Private Interface
 918  925   * svc_clts_kdupdone
 919  926   * Changes must be reviewed by Solaris File Sharing
 920  927   * Changes must be communicated to contract-2003-523@sun.com
 921  928   *
 922  929   * svc_clts_kdupdone marks the request done (DUP_DONE or DUP_DROP)
 923  930   * and stores the response.
 924  931   */
 925  932  static void
 926  933  svc_clts_kdupdone(struct dupreq *dr, caddr_t res, void (*dis_resfree)(),
 927  934      int size, int status)
 928  935  {
 929  936  
 930  937          ASSERT(dr->dr_resfree == NULL);
 931  938          if (status == DUP_DONE) {
 932  939                  bcopy(res, dr->dr_resp.buf, size);
 933  940                  dr->dr_resfree = dis_resfree;
 934  941          }
 935  942          dr->dr_status = status;
 936  943  }
 937  944  
 938  945  /*
 939  946   * This routine expects that the mutex, dupreq_lock, is already held.
 940  947   */
 941  948  static void
 942  949  unhash(struct dupreq *dr)
 943  950  {
 944  951          struct dupreq *drt;
 945  952          struct dupreq *drtprev = NULL;
 946  953          uint32_t drhash;
 947  954  
 948  955          ASSERT(MUTEX_HELD(&dupreq_lock));
 949  956  
 950  957          drhash = (uint32_t)DRHASH(dr);
 951  958          drt = drhashtbl[drhash];
 952  959          while (drt != NULL) {
 953  960                  if (drt == dr) {
 954  961                          drhashstat[drhash]--;
 955  962                          if (drtprev == NULL) {
 956  963                                  drhashtbl[drhash] = drt->dr_chain;
 957  964                          } else {
 958  965                                  drtprev->dr_chain = drt->dr_chain;
 959  966                          }
 960  967                          return;
 961  968                  }
 962  969                  drtprev = drt;
 963  970                  drt = drt->dr_chain;
 964  971          }
 965  972  }
 966  973  
 967  974  void
 968  975  svc_clts_stats_init(zoneid_t zoneid, struct rpc_clts_server **statsp)
 969  976  {
 970  977          kstat_t *ksp;
 971  978          kstat_named_t *knp;
 972  979  
 973  980          knp = rpcstat_zone_init_common(zoneid, "unix", "rpc_clts_server",
 974  981              (const kstat_named_t *)&clts_rsstat_tmpl,
 975  982              sizeof (clts_rsstat_tmpl));
 976  983          /*
 977  984           * Backwards compatibility for old kstat clients
 978  985           */
 979  986          ksp = kstat_create_zone("unix", 0, "rpc_server", "rpc",
 980  987              KSTAT_TYPE_NAMED, clts_rsstat_ndata,
 981  988              KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE, zoneid);
 982  989          if (ksp) {
 983  990                  ksp->ks_data = knp;
 984  991                  kstat_install(ksp);
 985  992          }
 986  993          *statsp = (struct rpc_clts_server *)knp;
 987  994  }
 988  995  
 989  996  void
 990  997  svc_clts_stats_fini(zoneid_t zoneid, struct rpc_clts_server **statsp)
 991  998  {
 992  999          rpcstat_zone_fini_common(zoneid, "unix", "rpc_clts_server");
 993 1000          kstat_delete_byname_zone("unix", 0, "rpc_server", zoneid);
 994 1001          kmem_free(*statsp, sizeof (clts_rsstat_tmpl));
 995 1002  }
 996 1003  
 997 1004  void
 998 1005  svc_clts_init()
 999 1006  {
1000 1007          /*
1001 1008           * Check to make sure that the clts private data will fit into
1002 1009           * the stack buffer allocated by svc_run.  The compiler should
1003 1010           * remove this check, but it's a safety net if the udp_data
1004 1011           * structure ever changes.
1005 1012           */
1006 1013          /*CONSTANTCONDITION*/
1007 1014          ASSERT(sizeof (struct udp_data) <= SVC_P2LEN);
1008 1015  
1009 1016          mutex_init(&dupreq_lock, NULL, MUTEX_DEFAULT, NULL);
1010 1017  }
  
    | 
      ↓ open down ↓ | 
    236 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX