1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2016 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2016 by Delphix. All rights reserved.
  25  * Copyright 2019 Joyent, Inc.
  26  */
  27 
  28 /*
  29  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  30  * Use is subject to license terms.
  31  */
  32 
  33 /*
  34  * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T
  35  *              All Rights Reserved
  36  */
  37 
  38 /*
  39  * Portions of this source code were derived from Berkeley 4.3 BSD
  40  * under license from the Regents of the University of California.
  41  */
  42 
  43 
  44 /*
  45  * Implements a kernel based, client side RPC over Connection Oriented
  46  * Transports (COTS).
  47  */
  48 
  49 /*
  50  * Much of this file has been re-written to let NFS work better over slow
  51  * transports. A description follows.
  52  *
  53  * One of the annoying things about kRPC/COTS is that it will temporarily
  54  * create more than one connection between a client and server. This
  55  * happens because when a connection is made, the end-points entry in the
  56  * linked list of connections (headed by cm_hd), is removed so that other
  57  * threads don't mess with it. Went ahead and bit the bullet by keeping
  58  * the endpoint on the connection list and introducing state bits,
  59  * condition variables etc. to the connection entry data structure (struct
  60  * cm_xprt).
  61  *
  62  * Here is a summary of the changes to cm-xprt:
  63  *
  64  *      x_ctime is the timestamp of when the endpoint was last
  65  *      connected or disconnected. If an end-point is ever disconnected
  66  *      or re-connected, then any outstanding RPC request is presumed
  67  *      lost, telling clnt_cots_kcallit that it needs to re-send the
  68  *      request, not just wait for the original request's reply to
  69  *      arrive.
  70  *
  71  *      x_thread flag which tells us if a thread is doing a connection attempt.
  72  *
  73  *      x_waitdis flag which tells us we are waiting a disconnect ACK.
  74  *
  75  *      x_needdis flag which tells us we need to send a T_DISCONN_REQ
  76  *      to kill the connection.
  77  *
  78  *      x_needrel flag which tells us we need to send a T_ORDREL_REQ to
  79  *      gracefully close the connection.
  80  *
  81  *      #defined bitmasks for the all the b_* bits so that more
  82  *      efficient (and at times less clumsy) masks can be used to
  83  *      manipulated state in cases where multiple bits have to
  84  *      set/cleared/checked in the same critical section.
  85  *
  86  *      x_conn_cv and x_dis-_cv are new condition variables to let
  87  *      threads knows when the connection attempt is done, and to let
  88  *      the connecting thread know when the disconnect handshake is
  89  *      done.
  90  *
  91  * Added the CONN_HOLD() macro so that all reference holds have the same
  92  * look and feel.
  93  *
  94  * In the private (cku_private) portion of the client handle,
  95  *
  96  *      cku_flags replaces the cku_sent a boolean. cku_flags keeps
  97  *      track of whether a request as been sent, and whether the
  98  *      client's handles call record is on the dispatch list (so that
  99  *      the reply can be matched by XID to the right client handle).
 100  *      The idea of CKU_ONQUEUE is that we can exit clnt_cots_kcallit()
 101  *      and still have the response find the right client handle so
 102  *      that the retry of CLNT_CALL() gets the result. Testing, found
 103  *      situations where if the timeout was increased, performance
 104  *      degraded. This was due to us hitting a window where the thread
 105  *      was back in rfscall() (probably printing server not responding)
 106  *      while the response came back but no place to put it.
 107  *
 108  *      cku_ctime is just a cache of x_ctime. If they match,
 109  *      clnt_cots_kcallit() won't to send a retry (unless the maximum
 110  *      receive count limit as been reached). If the don't match, then
 111  *      we assume the request has been lost, and a retry of the request
 112  *      is needed.
 113  *
 114  *      cku_recv_attempts counts the number of receive count attempts
 115  *      after one try is sent on the wire.
 116  *
 117  * Added the clnt_delay() routine so that interruptible and
 118  * noninterruptible delays are possible.
 119  *
 120  * CLNT_MIN_TIMEOUT has been bumped to 10 seconds from 3. This is used to
 121  * control how long the client delays before returned after getting
 122  * ECONNREFUSED. At 3 seconds, 8 client threads per mount really does bash
 123  * a server that may be booting and not yet started nfsd.
 124  *
 125  * CLNT_MAXRECV_WITHOUT_RETRY is a new macro (value of 3) (with a tunable)
 126  * Why don't we just wait forever (receive an infinite # of times)?
 127  * Because the server may have rebooted. More insidious is that some
 128  * servers (ours) will drop NFS/TCP requests in some cases. This is bad,
 129  * but it is a reality.
 130  *
 131  * The case of a server doing orderly release really messes up the
 132  * client's recovery, especially if the server's TCP implementation is
 133  * buggy.  It was found was that the kRPC/COTS client was breaking some
 134  * TPI rules, such as not waiting for the acknowledgement of a
 135  * T_DISCON_REQ (hence the added case statements T_ERROR_ACK, T_OK_ACK and
 136  * T_DISCON_REQ in clnt_dispatch_notifyall()).
 137  *
 138  * One of things that we've seen is that a kRPC TCP endpoint goes into
 139  * TIMEWAIT and a thus a reconnect takes a long time to satisfy because
 140  * that the TIMEWAIT state takes a while to finish.  If a server sends a
 141  * T_ORDREL_IND, there is little point in an RPC client doing a
 142  * T_ORDREL_REQ, because the RPC request isn't going to make it (the
 143  * server is saying that it won't accept any more data). So kRPC was
 144  * changed to send a T_DISCON_REQ when we get a T_ORDREL_IND. So now the
 145  * connection skips the TIMEWAIT state and goes straight to a bound state
 146  * that kRPC can quickly switch to connected.
 147  *
 148  * Code that issues TPI request must use waitforack() to wait for the
 149  * corresponding ack (assuming there is one) in any future modifications.
 150  * This works around problems that may be introduced by breaking TPI rules
 151  * (by submitting new calls before earlier requests have been acked) in the
 152  * case of a signal or other early return.  waitforack() depends on
 153  * clnt_dispatch_notifyconn() to issue the wakeup when the ack
 154  * arrives, so adding new TPI calls may require corresponding changes
 155  * to clnt_dispatch_notifyconn(). Presently, the timeout period is based on
 156  * CLNT_MIN_TIMEOUT which is 10 seconds. If you modify this value, be sure
 157  * not to set it too low or TPI ACKS will be lost.
 158  */
 159 
 160 #include <sys/param.h>
 161 #include <sys/types.h>
 162 #include <sys/user.h>
 163 #include <sys/systm.h>
 164 #include <sys/sysmacros.h>
 165 #include <sys/proc.h>
 166 #include <sys/socket.h>
 167 #include <sys/file.h>
 168 #include <sys/stream.h>
 169 #include <sys/strsubr.h>
 170 #include <sys/stropts.h>
 171 #include <sys/strsun.h>
 172 #include <sys/timod.h>
 173 #include <sys/tiuser.h>
 174 #include <sys/tihdr.h>
 175 #include <sys/t_kuser.h>
 176 #include <sys/fcntl.h>
 177 #include <sys/errno.h>
 178 #include <sys/kmem.h>
 179 #include <sys/debug.h>
 180 #include <sys/systm.h>
 181 #include <sys/kstat.h>
 182 #include <sys/t_lock.h>
 183 #include <sys/ddi.h>
 184 #include <sys/cmn_err.h>
 185 #include <sys/time.h>
 186 #include <sys/isa_defs.h>
 187 #include <sys/callb.h>
 188 #include <sys/sunddi.h>
 189 #include <sys/atomic.h>
 190 #include <sys/sdt.h>
 191 
 192 #include <netinet/in.h>
 193 #include <netinet/tcp.h>
 194 
 195 #include <rpc/types.h>
 196 #include <rpc/xdr.h>
 197 #include <rpc/auth.h>
 198 #include <rpc/clnt.h>
 199 #include <rpc/rpc_msg.h>
 200 
 201 #define COTS_DEFAULT_ALLOCSIZE  2048
 202 
 203 #define WIRE_HDR_SIZE   20      /* serialized call header, sans proc number */
 204 #define MSG_OFFSET      128     /* offset of call into the mblk */
 205 
 206 const char *kinet_ntop6(uchar_t *, char *, size_t);
 207 
 208 static int      clnt_cots_ksettimers(CLIENT *, struct rpc_timers *,
 209     struct rpc_timers *, int, void(*)(int, int, caddr_t), caddr_t, uint32_t);
 210 static enum clnt_stat   clnt_cots_kcallit(CLIENT *, rpcproc_t, xdrproc_t,
 211     caddr_t, xdrproc_t, caddr_t, struct timeval);
 212 static void     clnt_cots_kabort(CLIENT *);
 213 static void     clnt_cots_kerror(CLIENT *, struct rpc_err *);
 214 static bool_t   clnt_cots_kfreeres(CLIENT *, xdrproc_t, caddr_t);
 215 static void     clnt_cots_kdestroy(CLIENT *);
 216 static bool_t   clnt_cots_kcontrol(CLIENT *, int, char *);
 217 
 218 
 219 /* List of transports managed by the connection manager. */
 220 struct cm_xprt {
 221         TIUSER          *x_tiptr;       /* transport handle */
 222         queue_t         *x_wq;          /* send queue */
 223         clock_t         x_time;         /* last time we handed this xprt out */
 224         clock_t         x_ctime;        /* time we went to CONNECTED */
 225         int             x_tidu_size;    /* TIDU size of this transport */
 226         union {
 227             struct {
 228                 unsigned int
 229 #ifdef  _BIT_FIELDS_HTOL
 230                 b_closing:      1,      /* we've sent a ord rel on this conn */
 231                 b_dead:         1,      /* transport is closed or disconn */
 232                 b_doomed:       1,      /* too many conns, let this go idle */
 233                 b_connected:    1,      /* this connection is connected */
 234 
 235                 b_ordrel:       1,      /* do an orderly release? */
 236                 b_thread:       1,      /* thread doing connect */
 237                 b_waitdis:      1,      /* waiting for disconnect ACK */
 238                 b_needdis:      1,      /* need T_DISCON_REQ */
 239 
 240                 b_needrel:      1,      /* need T_ORDREL_REQ */
 241                 b_early_disc:   1,      /* got a T_ORDREL_IND or T_DISCON_IND */
 242                                         /* disconnect during connect */
 243 
 244                 b_pad:          22;
 245 
 246 #endif
 247 
 248 #ifdef  _BIT_FIELDS_LTOH
 249                 b_pad:          22,
 250 
 251                 b_early_disc:   1,      /* got a T_ORDREL_IND or T_DISCON_IND */
 252                                         /* disconnect during connect */
 253                 b_needrel:      1,      /* need T_ORDREL_REQ */
 254 
 255                 b_needdis:      1,      /* need T_DISCON_REQ */
 256                 b_waitdis:      1,      /* waiting for disconnect ACK */
 257                 b_thread:       1,      /* thread doing connect */
 258                 b_ordrel:       1,      /* do an orderly release? */
 259 
 260                 b_connected:    1,      /* this connection is connected */
 261                 b_doomed:       1,      /* too many conns, let this go idle */
 262                 b_dead:         1,      /* transport is closed or disconn */
 263                 b_closing:      1;      /* we've sent a ord rel on this conn */
 264 #endif
 265             } bit;          unsigned int word;
 266 
 267 #define x_closing       x_state.bit.b_closing
 268 #define x_dead          x_state.bit.b_dead
 269 #define x_doomed        x_state.bit.b_doomed
 270 #define x_connected     x_state.bit.b_connected
 271 
 272 #define x_ordrel        x_state.bit.b_ordrel
 273 #define x_thread        x_state.bit.b_thread
 274 #define x_waitdis       x_state.bit.b_waitdis
 275 #define x_needdis       x_state.bit.b_needdis
 276 
 277 #define x_needrel       x_state.bit.b_needrel
 278 #define x_early_disc    x_state.bit.b_early_disc
 279 
 280 #define x_state_flags   x_state.word
 281 
 282 #define X_CLOSING       0x80000000
 283 #define X_DEAD          0x40000000
 284 #define X_DOOMED        0x20000000
 285 #define X_CONNECTED     0x10000000
 286 
 287 #define X_ORDREL        0x08000000
 288 #define X_THREAD        0x04000000
 289 #define X_WAITDIS       0x02000000
 290 #define X_NEEDDIS       0x01000000
 291 
 292 #define X_NEEDREL       0x00800000
 293 #define X_EARLYDISC     0x00400000
 294 
 295 #define X_BADSTATES     (X_CLOSING | X_DEAD | X_DOOMED)
 296 
 297         }               x_state;
 298         int             x_ref;          /* number of users of this xprt */
 299         int             x_family;       /* address family of transport */
 300         dev_t           x_rdev;         /* device number of transport */
 301         struct cm_xprt  *x_next;
 302 
 303         struct netbuf   x_server;       /* destination address */
 304         struct netbuf   x_src;          /* src address (for retries) */
 305         kmutex_t        x_lock;         /* lock on this entry */
 306         kcondvar_t      x_cv;           /* to signal when can be closed */
 307         kcondvar_t      x_conn_cv;      /* to signal when connection attempt */
 308                                         /* is complete */
 309         kstat_t         *x_ksp;
 310 
 311         kcondvar_t      x_dis_cv;       /* to signal when disconnect attempt */
 312                                         /* is complete */
 313         zoneid_t        x_zoneid;       /* zone this xprt belongs to */
 314 };
 315 
 316 typedef struct cm_kstat_xprt {
 317         kstat_named_t   x_wq;
 318         kstat_named_t   x_server;
 319         kstat_named_t   x_family;
 320         kstat_named_t   x_rdev;
 321         kstat_named_t   x_time;
 322         kstat_named_t   x_state;
 323         kstat_named_t   x_ref;
 324         kstat_named_t   x_port;
 325 } cm_kstat_xprt_t;
 326 
 327 static cm_kstat_xprt_t cm_kstat_template = {
 328         { "write_queue", KSTAT_DATA_UINT32 },
 329         { "server",     KSTAT_DATA_STRING },
 330         { "addr_family", KSTAT_DATA_UINT32 },
 331         { "device",     KSTAT_DATA_UINT32 },
 332         { "time_stamp", KSTAT_DATA_UINT32 },
 333         { "status",     KSTAT_DATA_UINT32 },
 334         { "ref_count",  KSTAT_DATA_INT32 },
 335         { "port",       KSTAT_DATA_UINT32 },
 336 };
 337 
 338 /*
 339  * The inverse of this is connmgr_release().
 340  */
 341 #define CONN_HOLD(Cm_entry)     {\
 342         mutex_enter(&(Cm_entry)->x_lock);        \
 343         (Cm_entry)->x_ref++; \
 344         mutex_exit(&(Cm_entry)->x_lock); \
 345 }
 346 
 347 
 348 /*
 349  * Private data per rpc handle.  This structure is allocated by
 350  * clnt_cots_kcreate, and freed by clnt_cots_kdestroy.
 351  */
 352 typedef struct cku_private_s {
 353         CLIENT                  cku_client;     /* client handle */
 354         calllist_t              cku_call;       /* for dispatching calls */
 355         struct rpc_err          cku_err;        /* error status */
 356 
 357         struct netbuf           cku_srcaddr;    /* source address for retries */
 358         int                     cku_addrfmly;  /* for binding port */
 359         struct netbuf           cku_addr;       /* remote address */
 360         dev_t                   cku_device;     /* device to use */
 361         uint_t                  cku_flags;
 362 #define CKU_ONQUEUE             0x1
 363 #define CKU_SENT                0x2
 364 
 365         bool_t                  cku_progress;   /* for CLSET_PROGRESS */
 366         uint32_t                cku_xid;        /* current XID */
 367         clock_t                 cku_ctime;      /* time stamp of when */
 368                                                 /* connection was created */
 369         uint_t                  cku_recv_attempts;
 370         XDR                     cku_outxdr;     /* xdr routine for output */
 371         XDR                     cku_inxdr;      /* xdr routine for input */
 372         char                    cku_rpchdr[WIRE_HDR_SIZE + 4];
 373                                                 /* pre-serialized rpc header */
 374 
 375         uint_t                  cku_outbuflen;  /* default output mblk length */
 376         struct cred             *cku_cred;      /* credentials */
 377         bool_t                  cku_nodelayonerr;
 378                                                 /* for CLSET_NODELAYONERR */
 379         int                     cku_useresvport; /* Use reserved port */
 380         struct rpc_cots_client  *cku_stats;     /* stats for zone */
 381 } cku_private_t;
 382 
 383 static struct cm_xprt *connmgr_wrapconnect(struct cm_xprt *,
 384         const struct timeval *, struct netbuf *, int, struct netbuf *,
 385         struct rpc_err *, bool_t, bool_t, cred_t *);
 386 
 387 static bool_t   connmgr_connect(struct cm_xprt *, queue_t *, struct netbuf *,
 388                                 int, calllist_t *, int *, bool_t reconnect,
 389                                 const struct timeval *, bool_t, cred_t *);
 390 
 391 static void     *connmgr_opt_getoff(mblk_t *mp, t_uscalar_t offset,
 392                                 t_uscalar_t length, uint_t align_size);
 393 static bool_t   connmgr_setbufsz(calllist_t *e, queue_t *wq, cred_t *cr);
 394 static bool_t   connmgr_getopt_int(queue_t *wq, int level, int name, int *val,
 395                                 calllist_t *e, cred_t *cr);
 396 static bool_t   connmgr_setopt_int(queue_t *wq, int level, int name, int val,
 397                                 calllist_t *e, cred_t *cr);
 398 static bool_t   connmgr_setopt(queue_t *, int, int, calllist_t *, cred_t *cr);
 399 static void     connmgr_sndrel(struct cm_xprt *);
 400 static void     connmgr_snddis(struct cm_xprt *);
 401 static void     connmgr_close(struct cm_xprt *);
 402 static void     connmgr_release(struct cm_xprt *);
 403 static struct cm_xprt *connmgr_wrapget(struct netbuf *, const struct timeval *,
 404         cku_private_t *);
 405 
 406 static struct cm_xprt *connmgr_get(struct netbuf *, const struct timeval *,
 407         struct netbuf *, int, struct netbuf *, struct rpc_err *, dev_t,
 408         bool_t, int, cred_t *);
 409 
 410 static void connmgr_cancelconn(struct cm_xprt *);
 411 static enum clnt_stat connmgr_cwait(struct cm_xprt *, const struct timeval *,
 412         bool_t);
 413 static void connmgr_dis_and_wait(struct cm_xprt *);
 414 
 415 static int      clnt_dispatch_send(queue_t *, mblk_t *, calllist_t *, uint_t,
 416                                         uint_t);
 417 
 418 static int clnt_delay(clock_t, bool_t);
 419 
 420 static int waitforack(calllist_t *, t_scalar_t, const struct timeval *, bool_t);
 421 
 422 /*
 423  * Operations vector for TCP/IP based RPC
 424  */
 425 static struct clnt_ops tcp_ops = {
 426         clnt_cots_kcallit,      /* do rpc call */
 427         clnt_cots_kabort,       /* abort call */
 428         clnt_cots_kerror,       /* return error status */
 429         clnt_cots_kfreeres,     /* free results */
 430         clnt_cots_kdestroy,     /* destroy rpc handle */
 431         clnt_cots_kcontrol,     /* the ioctl() of rpc */
 432         clnt_cots_ksettimers,   /* set retry timers */
 433 };
 434 
 435 static int rpc_kstat_instance = 0;  /* keeps the current instance */
 436                                 /* number for the next kstat_create */
 437 
 438 static struct cm_xprt *cm_hd = NULL;
 439 static kmutex_t connmgr_lock;   /* for connection mngr's list of transports */
 440 
 441 extern kmutex_t clnt_max_msg_lock;
 442 
 443 static calllist_t *clnt_pending = NULL;
 444 extern kmutex_t clnt_pending_lock;
 445 
 446 static int clnt_cots_hash_size = DEFAULT_HASH_SIZE;
 447 
 448 static call_table_t *cots_call_ht;
 449 
 450 static const struct rpc_cots_client {
 451         kstat_named_t   rccalls;
 452         kstat_named_t   rcbadcalls;
 453         kstat_named_t   rcbadxids;
 454         kstat_named_t   rctimeouts;
 455         kstat_named_t   rcnewcreds;
 456         kstat_named_t   rcbadverfs;
 457         kstat_named_t   rctimers;
 458         kstat_named_t   rccantconn;
 459         kstat_named_t   rcnomem;
 460         kstat_named_t   rcintrs;
 461 } cots_rcstat_tmpl = {
 462         { "calls",      KSTAT_DATA_UINT64 },
 463         { "badcalls",   KSTAT_DATA_UINT64 },
 464         { "badxids",    KSTAT_DATA_UINT64 },
 465         { "timeouts",   KSTAT_DATA_UINT64 },
 466         { "newcreds",   KSTAT_DATA_UINT64 },
 467         { "badverfs",   KSTAT_DATA_UINT64 },
 468         { "timers",     KSTAT_DATA_UINT64 },
 469         { "cantconn",   KSTAT_DATA_UINT64 },
 470         { "nomem",      KSTAT_DATA_UINT64 },
 471         { "interrupts", KSTAT_DATA_UINT64 }
 472 };
 473 
 474 #define COTSRCSTAT_INCR(p, x)   \
 475         atomic_inc_64(&(p)->x.value.ui64)
 476 
 477 #define CLNT_MAX_CONNS  1       /* concurrent connections between clnt/srvr */
 478 int clnt_max_conns = CLNT_MAX_CONNS;
 479 
 480 #define CLNT_MIN_TIMEOUT        10      /* seconds to wait after we get a */
 481                                         /* connection reset */
 482 #define CLNT_MIN_CONNTIMEOUT    5       /* seconds to wait for a connection */
 483 
 484 
 485 int clnt_cots_min_tout = CLNT_MIN_TIMEOUT;
 486 int clnt_cots_min_conntout = CLNT_MIN_CONNTIMEOUT;
 487 
 488 /*
 489  * Limit the number of times we will attempt to receive a reply without
 490  * re-sending a response.
 491  */
 492 #define CLNT_MAXRECV_WITHOUT_RETRY      3
 493 uint_t clnt_cots_maxrecv        = CLNT_MAXRECV_WITHOUT_RETRY;
 494 
 495 uint_t *clnt_max_msg_sizep;
 496 void (*clnt_stop_idle)(queue_t *wq);
 497 
 498 #define ptoh(p)         (&((p)->cku_client))
 499 #define htop(h)         ((cku_private_t *)((h)->cl_private))
 500 
 501 /*
 502  * Times to retry
 503  */
 504 #define REFRESHES       2       /* authentication refreshes */
 505 
 506 /*
 507  * The following is used to determine the global default behavior for
 508  * COTS when binding to a local port.
 509  *
 510  * If the value is set to 1 the default will be to select a reserved
 511  * (aka privileged) port, if the value is zero the default will be to
 512  * use non-reserved ports.  Users of kRPC may override this by using
 513  * CLNT_CONTROL() and CLSET_BINDRESVPORT.
 514  */
 515 int clnt_cots_do_bindresvport = 1;
 516 
 517 static zone_key_t zone_cots_key;
 518 
 519 /*
 520  * Defaults TCP send and receive buffer size for RPC connections.
 521  * These values can be tuned by /etc/system.
 522  */
 523 int rpc_send_bufsz = 1024*1024;
 524 int rpc_recv_bufsz = 1024*1024;
 525 /*
 526  * To use system-wide default for TCP send and receive buffer size,
 527  * use /etc/system to set rpc_default_tcp_bufsz to 1:
 528  *
 529  * set rpcmod:rpc_default_tcp_bufsz=1
 530  */
 531 int rpc_default_tcp_bufsz = 0;
 532 
 533 /*
 534  * We need to do this after all kernel threads in the zone have exited.
 535  */
 536 /* ARGSUSED */
 537 static void
 538 clnt_zone_destroy(zoneid_t zoneid, void *unused)
 539 {
 540         struct cm_xprt **cmp;
 541         struct cm_xprt *cm_entry;
 542         struct cm_xprt *freelist = NULL;
 543 
 544         mutex_enter(&connmgr_lock);
 545         cmp = &cm_hd;
 546         while ((cm_entry = *cmp) != NULL) {
 547                 if (cm_entry->x_zoneid == zoneid) {
 548                         *cmp = cm_entry->x_next;
 549                         cm_entry->x_next = freelist;
 550                         freelist = cm_entry;
 551                 } else {
 552                         cmp = &cm_entry->x_next;
 553                 }
 554         }
 555         mutex_exit(&connmgr_lock);
 556         while ((cm_entry = freelist) != NULL) {
 557                 freelist = cm_entry->x_next;
 558                 connmgr_close(cm_entry);
 559         }
 560 }
 561 
 562 int
 563 clnt_cots_kcreate(dev_t dev, struct netbuf *addr, int family, rpcprog_t prog,
 564     rpcvers_t vers, uint_t max_msgsize, cred_t *cred, CLIENT **ncl)
 565 {
 566         CLIENT *h;
 567         cku_private_t *p;
 568         struct rpc_msg call_msg;
 569         struct rpcstat *rpcstat;
 570 
 571         RPCLOG(8, "clnt_cots_kcreate: prog %u\n", prog);
 572 
 573         rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
 574         ASSERT(rpcstat != NULL);
 575 
 576         /* Allocate and intialize the client handle. */
 577         p = kmem_zalloc(sizeof (*p), KM_SLEEP);
 578 
 579         h = ptoh(p);
 580 
 581         h->cl_private = (caddr_t)p;
 582         h->cl_auth = authkern_create();
 583         h->cl_ops = &tcp_ops;
 584 
 585         cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL);
 586         mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL);
 587 
 588         /*
 589          * If the current sanity check size in rpcmod is smaller
 590          * than the size needed, then increase the sanity check.
 591          */
 592         if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
 593             max_msgsize > *clnt_max_msg_sizep) {
 594                 mutex_enter(&clnt_max_msg_lock);
 595                 if (max_msgsize > *clnt_max_msg_sizep)
 596                         *clnt_max_msg_sizep = max_msgsize;
 597                 mutex_exit(&clnt_max_msg_lock);
 598         }
 599 
 600         p->cku_outbuflen = COTS_DEFAULT_ALLOCSIZE;
 601 
 602         /* Preserialize the call message header */
 603 
 604         call_msg.rm_xid = 0;
 605         call_msg.rm_direction = CALL;
 606         call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
 607         call_msg.rm_call.cb_prog = prog;
 608         call_msg.rm_call.cb_vers = vers;
 609 
 610         xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, WIRE_HDR_SIZE, XDR_ENCODE);
 611 
 612         if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) {
 613                 XDR_DESTROY(&p->cku_outxdr);
 614                 RPCLOG0(1, "clnt_cots_kcreate - Fatal header serialization "
 615                     "error\n");
 616                 auth_destroy(h->cl_auth);
 617                 kmem_free(p, sizeof (cku_private_t));
 618                 RPCLOG0(1, "clnt_cots_kcreate: create failed error EINVAL\n");
 619                 return (EINVAL);                /* XXX */
 620         }
 621         XDR_DESTROY(&p->cku_outxdr);
 622 
 623         /*
 624          * The zalloc initialized the fields below.
 625          * p->cku_xid = 0;
 626          * p->cku_flags = 0;
 627          * p->cku_srcaddr.len = 0;
 628          * p->cku_srcaddr.maxlen = 0;
 629          */
 630 
 631         p->cku_cred = cred;
 632         p->cku_device = dev;
 633         p->cku_addrfmly = family;
 634         p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
 635         p->cku_addr.maxlen = addr->maxlen;
 636         p->cku_addr.len = addr->len;
 637         bcopy(addr->buf, p->cku_addr.buf, addr->len);
 638         p->cku_stats = rpcstat->rpc_cots_client;
 639         p->cku_useresvport = -1; /* value is has not been set */
 640 
 641         *ncl = h;
 642         return (0);
 643 }
 644 
 645 /*ARGSUSED*/
 646 static void
 647 clnt_cots_kabort(CLIENT *h)
 648 {
 649 }
 650 
 651 /*
 652  * Return error info on this handle.
 653  */
 654 static void
 655 clnt_cots_kerror(CLIENT *h, struct rpc_err *err)
 656 {
 657         /* LINTED pointer alignment */
 658         cku_private_t *p = htop(h);
 659 
 660         *err = p->cku_err;
 661 }
 662 
 663 /*ARGSUSED*/
 664 static bool_t
 665 clnt_cots_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr)
 666 {
 667         xdr_free(xdr_res, res_ptr);
 668 
 669         return (TRUE);
 670 }
 671 
 672 static bool_t
 673 clnt_cots_kcontrol(CLIENT *h, int cmd, char *arg)
 674 {
 675         cku_private_t *p = htop(h);
 676 
 677         switch (cmd) {
 678         case CLSET_PROGRESS:
 679                 p->cku_progress = TRUE;
 680                 return (TRUE);
 681 
 682         case CLSET_XID:
 683                 if (arg == NULL)
 684                         return (FALSE);
 685 
 686                 p->cku_xid = *((uint32_t *)arg);
 687                 return (TRUE);
 688 
 689         case CLGET_XID:
 690                 if (arg == NULL)
 691                         return (FALSE);
 692 
 693                 *((uint32_t *)arg) = p->cku_xid;
 694                 return (TRUE);
 695 
 696         case CLSET_NODELAYONERR:
 697                 if (arg == NULL)
 698                         return (FALSE);
 699 
 700                 if (*((bool_t *)arg) == TRUE) {
 701                         p->cku_nodelayonerr = TRUE;
 702                         return (TRUE);
 703                 }
 704                 if (*((bool_t *)arg) == FALSE) {
 705                         p->cku_nodelayonerr = FALSE;
 706                         return (TRUE);
 707                 }
 708                 return (FALSE);
 709 
 710         case CLGET_NODELAYONERR:
 711                 if (arg == NULL)
 712                         return (FALSE);
 713 
 714                 *((bool_t *)arg) = p->cku_nodelayonerr;
 715                 return (TRUE);
 716 
 717         case CLSET_BINDRESVPORT:
 718                 if (arg == NULL)
 719                         return (FALSE);
 720 
 721                 if (*(int *)arg != 1 && *(int *)arg != 0)
 722                         return (FALSE);
 723 
 724                 p->cku_useresvport = *(int *)arg;
 725 
 726                 return (TRUE);
 727 
 728         case CLGET_BINDRESVPORT:
 729                 if (arg == NULL)
 730                         return (FALSE);
 731 
 732                 *(int *)arg = p->cku_useresvport;
 733 
 734                 return (TRUE);
 735 
 736         default:
 737                 return (FALSE);
 738         }
 739 }
 740 
 741 /*
 742  * Destroy rpc handle.  Frees the space used for output buffer,
 743  * private data, and handle structure.
 744  */
 745 static void
 746 clnt_cots_kdestroy(CLIENT *h)
 747 {
 748         /* LINTED pointer alignment */
 749         cku_private_t *p = htop(h);
 750         calllist_t *call = &p->cku_call;
 751 
 752         RPCLOG(8, "clnt_cots_kdestroy h: %p\n", (void *)h);
 753         RPCLOG(8, "clnt_cots_kdestroy h: xid=0x%x\n", p->cku_xid);
 754 
 755         if (p->cku_flags & CKU_ONQUEUE) {
 756                 RPCLOG(64, "clnt_cots_kdestroy h: removing call for xid 0x%x "
 757                     "from dispatch list\n", p->cku_xid);
 758                 call_table_remove(call);
 759         }
 760 
 761         if (call->call_reply)
 762                 freemsg(call->call_reply);
 763         cv_destroy(&call->call_cv);
 764         mutex_destroy(&call->call_lock);
 765 
 766         kmem_free(p->cku_srcaddr.buf, p->cku_srcaddr.maxlen);
 767         kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
 768         kmem_free(p, sizeof (*p));
 769 }
 770 
 771 static int clnt_cots_pulls;
 772 #define RM_HDR_SIZE     4       /* record mark header size */
 773 
 774 /*
 775  * Call remote procedure.
 776  */
 777 static enum clnt_stat
 778 clnt_cots_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args,
 779     caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, struct timeval wait)
 780 {
 781         /* LINTED pointer alignment */
 782         cku_private_t *p = htop(h);
 783         calllist_t *call = &p->cku_call;
 784         XDR *xdrs;
 785         struct rpc_msg reply_msg;
 786         mblk_t *mp;
 787 #ifdef  RPCDEBUG
 788         clock_t time_sent;
 789 #endif
 790         struct netbuf *retryaddr;
 791         struct cm_xprt *cm_entry = NULL;
 792         queue_t *wq;
 793         int len, waitsecs, max_waitsecs;
 794         int mpsize;
 795         int refreshes = REFRESHES;
 796         int interrupted;
 797         int tidu_size;
 798         enum clnt_stat status;
 799         struct timeval cwait;
 800         bool_t delay_first = FALSE;
 801         clock_t ticks, now;
 802 
 803         RPCLOG(2, "clnt_cots_kcallit, procnum %u\n", procnum);
 804         COTSRCSTAT_INCR(p->cku_stats, rccalls);
 805 
 806         RPCLOG(2, "clnt_cots_kcallit: wait.tv_sec: %ld\n", wait.tv_sec);
 807         RPCLOG(2, "clnt_cots_kcallit: wait.tv_usec: %ld\n", wait.tv_usec);
 808         /*
 809          * Bug ID 1240234:
 810          * Look out for zero length timeouts. We don't want to
 811          * wait zero seconds for a connection to be established.
 812          */
 813         if (wait.tv_sec < clnt_cots_min_conntout) {
 814                 cwait.tv_sec = clnt_cots_min_conntout;
 815                 cwait.tv_usec = 0;
 816                 RPCLOG(8, "clnt_cots_kcallit: wait.tv_sec (%ld) too low,",
 817                     wait.tv_sec);
 818                 RPCLOG(8, " setting to: %d\n", clnt_cots_min_conntout);
 819         } else {
 820                 cwait = wait;
 821         }
 822 
 823 call_again:
 824         if (cm_entry) {
 825                 connmgr_release(cm_entry);
 826                 cm_entry = NULL;
 827         }
 828 
 829         mp = NULL;
 830 
 831         /*
 832          * If the call is not a retry, allocate a new xid and cache it
 833          * for future retries.
 834          * Bug ID 1246045:
 835          * Treat call as a retry for purposes of binding the source
 836          * port only if we actually attempted to send anything on
 837          * the previous call.
 838          */
 839         if (p->cku_xid == 0) {
 840                 p->cku_xid = alloc_xid();
 841                 call->call_zoneid = rpc_zoneid();
 842 
 843                 /*
 844                  * We need to ASSERT here that our xid != 0 because this
 845                  * determines whether or not our call record gets placed on
 846                  * the hash table or the linked list.  By design, we mandate
 847                  * that RPC calls over cots must have xid's != 0, so we can
 848                  * ensure proper management of the hash table.
 849                  */
 850                 ASSERT(p->cku_xid != 0);
 851 
 852                 retryaddr = NULL;
 853                 p->cku_flags &= ~CKU_SENT;
 854 
 855                 if (p->cku_flags & CKU_ONQUEUE) {
 856                         RPCLOG(8, "clnt_cots_kcallit: new call, dequeuing old"
 857                             " one (%p)\n", (void *)call);
 858                         call_table_remove(call);
 859                         p->cku_flags &= ~CKU_ONQUEUE;
 860                         RPCLOG(64, "clnt_cots_kcallit: removing call from "
 861                             "dispatch list because xid was zero (now 0x%x)\n",
 862                             p->cku_xid);
 863                 }
 864 
 865                 if (call->call_reply != NULL) {
 866                         freemsg(call->call_reply);
 867                         call->call_reply = NULL;
 868                 }
 869         } else if (p->cku_srcaddr.buf == NULL || p->cku_srcaddr.len == 0) {
 870                 retryaddr = NULL;
 871 
 872         } else if (p->cku_flags & CKU_SENT) {
 873                 retryaddr = &p->cku_srcaddr;
 874 
 875         } else {
 876                 /*
 877                  * Bug ID 1246045: Nothing was sent, so set retryaddr to
 878                  * NULL and let connmgr_get() bind to any source port it
 879                  * can get.
 880                  */
 881                 retryaddr = NULL;
 882         }
 883 
 884         RPCLOG(64, "clnt_cots_kcallit: xid = 0x%x", p->cku_xid);
 885         RPCLOG(64, " flags = 0x%x\n", p->cku_flags);
 886 
 887         p->cku_err.re_status = RPC_TIMEDOUT;
 888         p->cku_err.re_errno = p->cku_err.re_terrno = 0;
 889 
 890         cm_entry = connmgr_wrapget(retryaddr, &cwait, p);
 891 
 892         if (cm_entry == NULL) {
 893                 RPCLOG(1, "clnt_cots_kcallit: can't connect status %s\n",
 894                     clnt_sperrno(p->cku_err.re_status));
 895 
 896                 /*
 897                  * The reasons why we fail to create a connection are
 898                  * varied. In most cases we don't want the caller to
 899                  * immediately retry. This could have one or more
 900                  * bad effects. This includes flooding the net with
 901                  * connect requests to ports with no listener; a hard
 902                  * kernel loop due to all the "reserved" TCP ports being
 903                  * in use.
 904                  */
 905                 delay_first = TRUE;
 906 
 907                 /*
 908                  * Even if we end up returning EINTR, we still count a
 909                  * a "can't connect", because the connection manager
 910                  * might have been committed to waiting for or timing out on
 911                  * a connection.
 912                  */
 913                 COTSRCSTAT_INCR(p->cku_stats, rccantconn);
 914                 switch (p->cku_err.re_status) {
 915                 case RPC_INTR:
 916                         p->cku_err.re_errno = EINTR;
 917 
 918                         /*
 919                          * No need to delay because a UNIX signal(2)
 920                          * interrupted us. The caller likely won't
 921                          * retry the CLNT_CALL() and even if it does,
 922                          * we assume the caller knows what it is doing.
 923                          */
 924                         delay_first = FALSE;
 925                         break;
 926 
 927                 case RPC_TIMEDOUT:
 928                         p->cku_err.re_errno = ETIMEDOUT;
 929 
 930                         /*
 931                          * No need to delay because timed out already
 932                          * on the connection request and assume that the
 933                          * transport time out is longer than our minimum
 934                          * timeout, or least not too much smaller.
 935                          */
 936                         delay_first = FALSE;
 937                         break;
 938 
 939                 case RPC_SYSTEMERROR:
 940                 case RPC_TLIERROR:
 941                         /*
 942                          * We want to delay here because a transient
 943                          * system error has a better chance of going away
 944                          * if we delay a bit. If it's not transient, then
 945                          * we don't want end up in a hard kernel loop
 946                          * due to retries.
 947                          */
 948                         ASSERT(p->cku_err.re_errno != 0);
 949                         break;
 950 
 951 
 952                 case RPC_CANTCONNECT:
 953                         /*
 954                          * RPC_CANTCONNECT is set on T_ERROR_ACK which
 955                          * implies some error down in the TCP layer or
 956                          * below. If cku_nodelayonerror is set then we
 957                          * assume the caller knows not to try too hard.
 958                          */
 959                         RPCLOG0(8, "clnt_cots_kcallit: connection failed,");
 960                         RPCLOG0(8, " re_status=RPC_CANTCONNECT,");
 961                         RPCLOG(8, " re_errno=%d,", p->cku_err.re_errno);
 962                         RPCLOG(8, " cku_nodelayonerr=%d", p->cku_nodelayonerr);
 963                         if (p->cku_nodelayonerr == TRUE)
 964                                 delay_first = FALSE;
 965 
 966                         p->cku_err.re_errno = EIO;
 967 
 968                         break;
 969 
 970                 case RPC_XPRTFAILED:
 971                         /*
 972                          * We want to delay here because we likely
 973                          * got a refused connection.
 974                          */
 975                         if (p->cku_err.re_errno == 0)
 976                                 p->cku_err.re_errno = EIO;
 977 
 978                         RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
 979                             p->cku_err.re_errno);
 980 
 981                         break;
 982 
 983                 default:
 984                         /*
 985                          * We delay here because it is better to err
 986                          * on the side of caution. If we got here then
 987                          * status could have been RPC_SUCCESS, but we
 988                          * know that we did not get a connection, so
 989                          * force the rpc status to RPC_CANTCONNECT.
 990                          */
 991                         p->cku_err.re_status = RPC_CANTCONNECT;
 992                         p->cku_err.re_errno = EIO;
 993                         break;
 994                 }
 995                 if (delay_first == TRUE)
 996                         ticks = clnt_cots_min_tout * drv_usectohz(1000000);
 997                 goto cots_done;
 998         }
 999 
1000         /*
1001          * If we've never sent any request on this connection (send count
1002          * is zero, or the connection has been reset), cache the
1003          * the connection's create time and send a request (possibly a retry)
1004          */
1005         if ((p->cku_flags & CKU_SENT) == 0 ||
1006             p->cku_ctime != cm_entry->x_ctime) {
1007                 p->cku_ctime = cm_entry->x_ctime;
1008 
1009         } else if ((p->cku_flags & CKU_SENT) && (p->cku_flags & CKU_ONQUEUE) &&
1010             (call->call_reply != NULL ||
1011             p->cku_recv_attempts < clnt_cots_maxrecv)) {
1012 
1013                 /*
1014                  * If we've sent a request and our call is on the dispatch
1015                  * queue and we haven't made too many receive attempts, then
1016                  * don't re-send, just receive.
1017                  */
1018                 p->cku_recv_attempts++;
1019                 goto read_again;
1020         }
1021 
1022         /*
1023          * Now we create the RPC request in a STREAMS message.  We have to do
1024          * this after the call to connmgr_get so that we have the correct
1025          * TIDU size for the transport.
1026          */
1027         tidu_size = cm_entry->x_tidu_size;
1028         len = MSG_OFFSET + MAX(tidu_size, RM_HDR_SIZE + WIRE_HDR_SIZE);
1029 
1030         while ((mp = allocb(len, BPRI_MED)) == NULL) {
1031                 if (strwaitbuf(len, BPRI_MED)) {
1032                         p->cku_err.re_status = RPC_SYSTEMERROR;
1033                         p->cku_err.re_errno = ENOSR;
1034                         COTSRCSTAT_INCR(p->cku_stats, rcnomem);
1035                         goto cots_done;
1036                 }
1037         }
1038         xdrs = &p->cku_outxdr;
1039         xdrmblk_init(xdrs, mp, XDR_ENCODE, tidu_size);
1040         mpsize = MBLKSIZE(mp);
1041         ASSERT(mpsize >= len);
1042         ASSERT(mp->b_rptr == mp->b_datap->db_base);
1043 
1044         /*
1045          * If the size of mblk is not appreciably larger than what we
1046          * asked, then resize the mblk to exactly len bytes. The reason for
1047          * this: suppose len is 1600 bytes, the tidu is 1460 bytes
1048          * (from TCP over ethernet), and the arguments to the RPC require
1049          * 2800 bytes. Ideally we want the protocol to render two
1050          * ~1400 byte segments over the wire. However if allocb() gives us a 2k
1051          * mblk, and we allocate a second mblk for the remainder, the protocol
1052          * module may generate 3 segments over the wire:
1053          * 1460 bytes for the first, 448 (2048 - 1600) for the second, and
1054          * 892 for the third. If we "waste" 448 bytes in the first mblk,
1055          * the XDR encoding will generate two ~1400 byte mblks, and the
1056          * protocol module is more likely to produce properly sized segments.
1057          */
1058         if ((mpsize >> 1) <= len)
1059                 mp->b_rptr += (mpsize - len);
1060 
1061         /*
1062          * Adjust b_rptr to reserve space for the non-data protocol headers
1063          * any downstream modules might like to add, and for the
1064          * record marking header.
1065          */
1066         mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE);
1067 
1068         if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
1069                 /* Copy in the preserialized RPC header information. */
1070                 bcopy(p->cku_rpchdr, mp->b_rptr, WIRE_HDR_SIZE);
1071 
1072                 /* Use XDR_SETPOS() to set the b_wptr to past the RPC header. */
1073                 XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base +
1074                     WIRE_HDR_SIZE));
1075 
1076                 ASSERT((mp->b_wptr - mp->b_rptr) == WIRE_HDR_SIZE);
1077 
1078                 /* Serialize the procedure number and the arguments. */
1079                 if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) ||
1080                     (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) ||
1081                     (!(*xdr_args)(xdrs, argsp))) {
1082                         XDR_DESTROY(xdrs);
1083                         p->cku_err.re_status = RPC_CANTENCODEARGS;
1084                         p->cku_err.re_errno = EIO;
1085                         goto cots_done;
1086                 }
1087 
1088                 (*(uint32_t *)(mp->b_rptr)) = p->cku_xid;
1089         } else {
1090                 uint32_t *uproc = (uint32_t *)&p->cku_rpchdr[WIRE_HDR_SIZE];
1091                 IXDR_PUT_U_INT32(uproc, procnum);
1092 
1093                 (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid;
1094 
1095                 /* Use XDR_SETPOS() to set the b_wptr. */
1096                 XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base));
1097 
1098                 /* Serialize the procedure number and the arguments. */
1099                 if (!AUTH_WRAP(h->cl_auth, p->cku_rpchdr, WIRE_HDR_SIZE+4,
1100                     xdrs, xdr_args, argsp)) {
1101                         XDR_DESTROY(xdrs);
1102                         p->cku_err.re_status = RPC_CANTENCODEARGS;
1103                         p->cku_err.re_errno = EIO;
1104                         goto cots_done;
1105                 }
1106         }
1107 
1108         XDR_DESTROY(xdrs);
1109 
1110         RPCLOG(2, "clnt_cots_kcallit: connected, sending call, tidu_size %d\n",
1111             tidu_size);
1112 
1113         wq = cm_entry->x_wq;
1114         waitsecs = 0;
1115 
1116 dispatch_again:
1117         status = clnt_dispatch_send(wq, mp, call, p->cku_xid,
1118             (p->cku_flags & CKU_ONQUEUE));
1119 
1120         if ((status == RPC_CANTSEND) && (call->call_reason == ENOBUFS)) {
1121                 /*
1122                  * QFULL condition, allow some time for queue to drain
1123                  * and try again. Give up after waiting for all timeout
1124                  * specified for the call, or zone is going away.
1125                  */
1126                 max_waitsecs = wait.tv_sec ? wait.tv_sec : clnt_cots_min_tout;
1127                 if ((waitsecs++ < max_waitsecs) &&
1128                     !(zone_status_get(curproc->p_zone) >=
1129                     ZONE_IS_SHUTTING_DOWN)) {
1130 
1131                         /* wait 1 sec for queue to drain */
1132                         if (clnt_delay(drv_usectohz(1000000),
1133                             h->cl_nosignal) == EINTR) {
1134                                 p->cku_err.re_errno = EINTR;
1135                                 p->cku_err.re_status = RPC_INTR;
1136 
1137                                 goto cots_done;
1138                         }
1139 
1140                         /* and try again */
1141                         goto dispatch_again;
1142                 }
1143                 p->cku_err.re_status = status;
1144                 p->cku_err.re_errno = call->call_reason;
1145                 DTRACE_PROBE(krpc__e__clntcots__kcallit__cantsend);
1146 
1147                 goto cots_done;
1148         }
1149 
1150         if (waitsecs) {
1151                 /* adjust timeout to account for time wait to send */
1152                 wait.tv_sec -= waitsecs;
1153                 if (wait.tv_sec < 0) {
1154                         /* pick up reply on next retry */
1155                         wait.tv_sec = 0;
1156                 }
1157                 DTRACE_PROBE2(clnt_cots__sendwait, CLIENT *, h,
1158                     int, waitsecs);
1159         }
1160 
1161         RPCLOG(64, "clnt_cots_kcallit: sent call for xid 0x%x\n",
1162             (uint_t)p->cku_xid);
1163         p->cku_flags = (CKU_ONQUEUE|CKU_SENT);
1164         p->cku_recv_attempts = 1;
1165 
1166 #ifdef  RPCDEBUG
1167         time_sent = ddi_get_lbolt();
1168 #endif
1169 
1170         /*
1171          * Wait for a reply or a timeout.  If there is no error or timeout,
1172          * (both indicated by call_status), call->call_reply will contain
1173          * the RPC reply message.
1174          */
1175 read_again:
1176         mutex_enter(&call->call_lock);
1177         interrupted = 0;
1178         if (call->call_status == RPC_TIMEDOUT) {
1179                 /*
1180                  * Indicate that the lwp is not to be stopped while waiting
1181                  * for this network traffic.  This is to avoid deadlock while
1182                  * debugging a process via /proc and also to avoid recursive
1183                  * mutex_enter()s due to NFS page faults while stopping
1184                  * (NFS holds locks when it calls here).
1185                  */
1186                 clock_t cv_wait_ret;
1187                 clock_t timout;
1188                 clock_t oldlbolt;
1189 
1190                 klwp_t *lwp = ttolwp(curthread);
1191 
1192                 if (lwp != NULL)
1193                         lwp->lwp_nostop++;
1194 
1195                 oldlbolt = ddi_get_lbolt();
1196                 timout = wait.tv_sec * drv_usectohz(1000000) +
1197                     drv_usectohz(wait.tv_usec) + oldlbolt;
1198                 /*
1199                  * Iterate until the call_status is changed to something
1200                  * other that RPC_TIMEDOUT, or if cv_timedwait_sig() returns
1201                  * something <=0 zero. The latter means that we timed
1202                  * out.
1203                  */
1204                 if (h->cl_nosignal)
1205                         while ((cv_wait_ret = cv_timedwait(&call->call_cv,
1206                             &call->call_lock, timout)) > 0 &&
1207                             call->call_status == RPC_TIMEDOUT)
1208                                 ;
1209                 else
1210                         while ((cv_wait_ret = cv_timedwait_sig(
1211                             &call->call_cv,
1212                             &call->call_lock, timout)) > 0 &&
1213                             call->call_status == RPC_TIMEDOUT)
1214                                 ;
1215 
1216                 switch (cv_wait_ret) {
1217                 case 0:
1218                         /*
1219                          * If we got out of the above loop with
1220                          * cv_timedwait_sig() returning 0, then we were
1221                          * interrupted regardless what call_status is.
1222                          */
1223                         interrupted = 1;
1224                         break;
1225                 case -1:
1226                         /* cv_timedwait_sig() timed out */
1227                         break;
1228                 default:
1229 
1230                         /*
1231                          * We were cv_signaled(). If we didn't
1232                          * get a successful call_status and returned
1233                          * before time expired, delay up to clnt_cots_min_tout
1234                          * seconds so that the caller doesn't immediately
1235                          * try to call us again and thus force the
1236                          * same condition that got us here (such
1237                          * as a RPC_XPRTFAILED due to the server not
1238                          * listening on the end-point.
1239                          */
1240                         if (call->call_status != RPC_SUCCESS) {
1241                                 clock_t curlbolt;
1242                                 clock_t diff;
1243 
1244                                 curlbolt = ddi_get_lbolt();
1245                                 ticks = clnt_cots_min_tout *
1246                                     drv_usectohz(1000000);
1247                                 diff = curlbolt - oldlbolt;
1248                                 if (diff < ticks) {
1249                                         delay_first = TRUE;
1250                                         if (diff > 0)
1251                                                 ticks -= diff;
1252                                 }
1253                         }
1254                         break;
1255                 }
1256 
1257                 if (lwp != NULL)
1258                         lwp->lwp_nostop--;
1259         }
1260         /*
1261          * Get the reply message, if any.  This will be freed at the end
1262          * whether or not an error occurred.
1263          */
1264         mp = call->call_reply;
1265         call->call_reply = NULL;
1266 
1267         /*
1268          * call_err is the error info when the call is on dispatch queue.
1269          * cku_err is the error info returned to the caller.
1270          * Sync cku_err with call_err for local message processing.
1271          */
1272 
1273         status = call->call_status;
1274         p->cku_err = call->call_err;
1275         mutex_exit(&call->call_lock);
1276 
1277         if (status != RPC_SUCCESS) {
1278                 switch (status) {
1279                 case RPC_TIMEDOUT:
1280                         now = ddi_get_lbolt();
1281                         if (interrupted) {
1282                                 COTSRCSTAT_INCR(p->cku_stats, rcintrs);
1283                                 p->cku_err.re_status = RPC_INTR;
1284                                 p->cku_err.re_errno = EINTR;
1285                                 RPCLOG(1, "clnt_cots_kcallit: xid 0x%x",
1286                                     p->cku_xid);
1287                                 RPCLOG(1, "signal interrupted at %ld", now);
1288                                 RPCLOG(1, ", was sent at %ld\n", time_sent);
1289                         } else {
1290                                 COTSRCSTAT_INCR(p->cku_stats, rctimeouts);
1291                                 p->cku_err.re_errno = ETIMEDOUT;
1292                                 RPCLOG(1, "clnt_cots_kcallit: timed out at %ld",
1293                                     now);
1294                                 RPCLOG(1, ", was sent at %ld\n", time_sent);
1295                         }
1296                         break;
1297 
1298                 case RPC_XPRTFAILED:
1299                         if (p->cku_err.re_errno == 0)
1300                                 p->cku_err.re_errno = EIO;
1301 
1302                         RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
1303                             p->cku_err.re_errno);
1304                         break;
1305 
1306                 case RPC_SYSTEMERROR:
1307                         ASSERT(p->cku_err.re_errno);
1308                         RPCLOG(1, "clnt_cots_kcallit: system error: %d\n",
1309                             p->cku_err.re_errno);
1310                         break;
1311 
1312                 default:
1313                         p->cku_err.re_status = RPC_SYSTEMERROR;
1314                         p->cku_err.re_errno = EIO;
1315                         RPCLOG(1, "clnt_cots_kcallit: error: %s\n",
1316                             clnt_sperrno(status));
1317                         break;
1318                 }
1319                 if (p->cku_err.re_status != RPC_TIMEDOUT) {
1320 
1321                         if (p->cku_flags & CKU_ONQUEUE) {
1322                                 call_table_remove(call);
1323                                 p->cku_flags &= ~CKU_ONQUEUE;
1324                         }
1325 
1326                         RPCLOG(64, "clnt_cots_kcallit: non TIMEOUT so xid 0x%x "
1327                             "taken off dispatch list\n", p->cku_xid);
1328                         if (call->call_reply) {
1329                                 freemsg(call->call_reply);
1330                                 call->call_reply = NULL;
1331                         }
1332                 } else if (wait.tv_sec != 0) {
1333                         /*
1334                          * We've sent the request over TCP and so we have
1335                          * every reason to believe it will get
1336                          * delivered. In which case returning a timeout is not
1337                          * appropriate.
1338                          */
1339                         if (p->cku_progress == TRUE &&
1340                             p->cku_recv_attempts < clnt_cots_maxrecv) {
1341                                 p->cku_err.re_status = RPC_INPROGRESS;
1342                         }
1343                 }
1344                 goto cots_done;
1345         }
1346 
1347         xdrs = &p->cku_inxdr;
1348         xdrmblk_init(xdrs, mp, XDR_DECODE, 0);
1349 
1350         reply_msg.rm_direction = REPLY;
1351         reply_msg.rm_reply.rp_stat = MSG_ACCEPTED;
1352         reply_msg.acpted_rply.ar_stat = SUCCESS;
1353 
1354         reply_msg.acpted_rply.ar_verf = _null_auth;
1355         /*
1356          *  xdr_results will be done in AUTH_UNWRAP.
1357          */
1358         reply_msg.acpted_rply.ar_results.where = NULL;
1359         reply_msg.acpted_rply.ar_results.proc = xdr_void;
1360 
1361         if (xdr_replymsg(xdrs, &reply_msg)) {
1362                 enum clnt_stat re_status;
1363 
1364                 _seterr_reply(&reply_msg, &p->cku_err);
1365 
1366                 re_status = p->cku_err.re_status;
1367                 if (re_status == RPC_SUCCESS) {
1368                         /*
1369                          * Reply is good, check auth.
1370                          */
1371                         if (!AUTH_VALIDATE(h->cl_auth,
1372                             &reply_msg.acpted_rply.ar_verf)) {
1373                                 COTSRCSTAT_INCR(p->cku_stats, rcbadverfs);
1374                                 RPCLOG0(1, "clnt_cots_kcallit: validation "
1375                                     "failure\n");
1376                                 freemsg(mp);
1377                                 (void) xdr_rpc_free_verifier(xdrs, &reply_msg);
1378                                 XDR_DESTROY(xdrs);
1379                                 mutex_enter(&call->call_lock);
1380                                 if (call->call_reply == NULL)
1381                                         call->call_status = RPC_TIMEDOUT;
1382                                 mutex_exit(&call->call_lock);
1383                                 goto read_again;
1384                         } else if (!AUTH_UNWRAP(h->cl_auth, xdrs,
1385                             xdr_results, resultsp)) {
1386                                 RPCLOG0(1, "clnt_cots_kcallit: validation "
1387                                     "failure (unwrap)\n");
1388                                 p->cku_err.re_status = RPC_CANTDECODERES;
1389                                 p->cku_err.re_errno = EIO;
1390                         }
1391                 } else {
1392                         /* set errno in case we can't recover */
1393                         if (re_status != RPC_VERSMISMATCH &&
1394                             re_status != RPC_AUTHERROR &&
1395                             re_status != RPC_PROGVERSMISMATCH)
1396                                 p->cku_err.re_errno = EIO;
1397 
1398                         if (re_status == RPC_AUTHERROR) {
1399                                 /*
1400                                  * Maybe our credential need to be refreshed
1401                                  */
1402                                 if (cm_entry) {
1403                                         /*
1404                                          * There is the potential that the
1405                                          * cm_entry has/will be marked dead,
1406                                          * so drop the connection altogether,
1407                                          * force REFRESH to establish new
1408                                          * connection.
1409                                          */
1410                                         connmgr_cancelconn(cm_entry);
1411                                         cm_entry = NULL;
1412                                 }
1413 
1414                                 (void) xdr_rpc_free_verifier(xdrs,
1415                                     &reply_msg);
1416                                 XDR_DESTROY(xdrs);
1417 
1418                                 if (p->cku_flags & CKU_ONQUEUE) {
1419                                         call_table_remove(call);
1420                                         p->cku_flags &= ~CKU_ONQUEUE;
1421                                 }
1422                                 RPCLOG(64,
1423                                     "clnt_cots_kcallit: AUTH_ERROR, xid"
1424                                     " 0x%x removed off dispatch list\n",
1425                                     p->cku_xid);
1426                                 if (call->call_reply) {
1427                                         freemsg(call->call_reply);
1428                                         call->call_reply = NULL;
1429                                 }
1430 
1431                                 if ((refreshes > 0) &&
1432                                     AUTH_REFRESH(h->cl_auth, &reply_msg,
1433                                     p->cku_cred)) {
1434                                         refreshes--;
1435                                         freemsg(mp);
1436                                         mp = NULL;
1437 
1438                                         COTSRCSTAT_INCR(p->cku_stats,
1439                                             rcbadcalls);
1440                                         COTSRCSTAT_INCR(p->cku_stats,
1441                                             rcnewcreds);
1442                                         goto call_again;
1443                                 }
1444 
1445                                 /*
1446                                  * We have used the client handle to
1447                                  * do an AUTH_REFRESH and the RPC status may
1448                                  * be set to RPC_SUCCESS; Let's make sure to
1449                                  * set it to RPC_AUTHERROR.
1450                                  */
1451                                 p->cku_err.re_status = RPC_AUTHERROR;
1452 
1453                                 /*
1454                                  * Map recoverable and unrecoverable
1455                                  * authentication errors to appropriate errno
1456                                  */
1457                                 switch (p->cku_err.re_why) {
1458                                 case AUTH_TOOWEAK:
1459                                         /*
1460                                          * This could be a failure where the
1461                                          * server requires use of a reserved
1462                                          * port,  check and optionally set the
1463                                          * client handle useresvport trying
1464                                          * one more time. Next go round we
1465                                          * fall out with the tooweak error.
1466                                          */
1467                                         if (p->cku_useresvport != 1) {
1468                                                 p->cku_useresvport = 1;
1469                                                 p->cku_xid = 0;
1470                                                 freemsg(mp);
1471                                                 mp = NULL;
1472                                                 goto call_again;
1473                                         }
1474                                         /* FALLTHRU */
1475                                 case AUTH_BADCRED:
1476                                 case AUTH_BADVERF:
1477                                 case AUTH_INVALIDRESP:
1478                                 case AUTH_FAILED:
1479                                 case RPCSEC_GSS_NOCRED:
1480                                 case RPCSEC_GSS_FAILED:
1481                                                 p->cku_err.re_errno = EACCES;
1482                                                 break;
1483                                 case AUTH_REJECTEDCRED:
1484                                 case AUTH_REJECTEDVERF:
1485                                 default:        p->cku_err.re_errno = EIO;
1486                                                 break;
1487                                 }
1488                                 RPCLOG(1, "clnt_cots_kcallit : authentication"
1489                                     " failed with RPC_AUTHERROR of type %d\n",
1490                                     (int)p->cku_err.re_why);
1491                                 goto cots_done;
1492                         }
1493                 }
1494         } else {
1495                 /* reply didn't decode properly. */
1496                 p->cku_err.re_status = RPC_CANTDECODERES;
1497                 p->cku_err.re_errno = EIO;
1498                 RPCLOG0(1, "clnt_cots_kcallit: decode failure\n");
1499         }
1500 
1501         (void) xdr_rpc_free_verifier(xdrs, &reply_msg);
1502         XDR_DESTROY(xdrs);
1503 
1504         if (p->cku_flags & CKU_ONQUEUE) {
1505                 call_table_remove(call);
1506                 p->cku_flags &= ~CKU_ONQUEUE;
1507         }
1508 
1509         RPCLOG(64, "clnt_cots_kcallit: xid 0x%x taken off dispatch list",
1510             p->cku_xid);
1511         RPCLOG(64, " status is %s\n", clnt_sperrno(p->cku_err.re_status));
1512 cots_done:
1513         if (cm_entry)
1514                 connmgr_release(cm_entry);
1515 
1516         if (mp != NULL)
1517                 freemsg(mp);
1518         if ((p->cku_flags & CKU_ONQUEUE) == 0 && call->call_reply) {
1519                 freemsg(call->call_reply);
1520                 call->call_reply = NULL;
1521         }
1522         if (p->cku_err.re_status != RPC_SUCCESS) {
1523                 RPCLOG0(1, "clnt_cots_kcallit: tail-end failure\n");
1524                 COTSRCSTAT_INCR(p->cku_stats, rcbadcalls);
1525         }
1526 
1527         /*
1528          * No point in delaying if the zone is going away.
1529          */
1530         if (delay_first == TRUE &&
1531             !(zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)) {
1532                 if (clnt_delay(ticks, h->cl_nosignal) == EINTR) {
1533                         p->cku_err.re_errno = EINTR;
1534                         p->cku_err.re_status = RPC_INTR;
1535                 }
1536         }
1537         return (p->cku_err.re_status);
1538 }
1539 
1540 /*
1541  * Kinit routine for cots.  This sets up the correct operations in
1542  * the client handle, as the handle may have previously been a clts
1543  * handle, and clears the xid field so there is no way a new call
1544  * could be mistaken for a retry.  It also sets in the handle the
1545  * information that is passed at create/kinit time but needed at
1546  * call time, as cots creates the transport at call time - device,
1547  * address of the server, protocol family.
1548  */
1549 void
1550 clnt_cots_kinit(CLIENT *h, dev_t dev, int family, struct netbuf *addr,
1551     int max_msgsize, cred_t *cred)
1552 {
1553         /* LINTED pointer alignment */
1554         cku_private_t *p = htop(h);
1555         calllist_t *call = &p->cku_call;
1556 
1557         h->cl_ops = &tcp_ops;
1558         if (p->cku_flags & CKU_ONQUEUE) {
1559                 call_table_remove(call);
1560                 p->cku_flags &= ~CKU_ONQUEUE;
1561                 RPCLOG(64, "clnt_cots_kinit: removing call for xid 0x%x from"
1562                     " dispatch list\n", p->cku_xid);
1563         }
1564 
1565         if (call->call_reply != NULL) {
1566                 freemsg(call->call_reply);
1567                 call->call_reply = NULL;
1568         }
1569 
1570         call->call_bucket = NULL;
1571         call->call_hash = 0;
1572 
1573         /*
1574          * We don't clear cku_flags here, because clnt_cots_kcallit()
1575          * takes care of handling the cku_flags reset.
1576          */
1577         p->cku_xid = 0;
1578         p->cku_device = dev;
1579         p->cku_addrfmly = family;
1580         p->cku_cred = cred;
1581 
1582         if (p->cku_addr.maxlen < addr->len) {
1583                 if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL)
1584                         kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
1585                 p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
1586                 p->cku_addr.maxlen = addr->maxlen;
1587         }
1588 
1589         p->cku_addr.len = addr->len;
1590         bcopy(addr->buf, p->cku_addr.buf, addr->len);
1591 
1592         /*
1593          * If the current sanity check size in rpcmod is smaller
1594          * than the size needed, then increase the sanity check.
1595          */
1596         if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
1597             max_msgsize > *clnt_max_msg_sizep) {
1598                 mutex_enter(&clnt_max_msg_lock);
1599                 if (max_msgsize > *clnt_max_msg_sizep)
1600                         *clnt_max_msg_sizep = max_msgsize;
1601                 mutex_exit(&clnt_max_msg_lock);
1602         }
1603 }
1604 
1605 /*
1606  * ksettimers is a no-op for cots, with the exception of setting the xid.
1607  */
1608 /* ARGSUSED */
1609 static int
1610 clnt_cots_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all,
1611     int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg, uint32_t xid)
1612 {
1613         /* LINTED pointer alignment */
1614         cku_private_t *p = htop(h);
1615 
1616         if (xid)
1617                 p->cku_xid = xid;
1618         COTSRCSTAT_INCR(p->cku_stats, rctimers);
1619         return (0);
1620 }
1621 
1622 extern void rpc_poptimod(struct vnode *);
1623 extern int kstr_push(struct vnode *, char *);
1624 
1625 int
1626 conn_kstat_update(kstat_t *ksp, int rw)
1627 {
1628         struct cm_xprt *cm_entry;
1629         struct cm_kstat_xprt *cm_ksp_data;
1630         uchar_t *b;
1631         char *fbuf;
1632 
1633         if (rw == KSTAT_WRITE)
1634                 return (EACCES);
1635         if (ksp == NULL || ksp->ks_private == NULL)
1636                 return (EIO);
1637         cm_entry  = (struct cm_xprt *)ksp->ks_private;
1638         cm_ksp_data = (struct cm_kstat_xprt *)ksp->ks_data;
1639 
1640         cm_ksp_data->x_wq.value.ui32 = (uint32_t)(uintptr_t)cm_entry->x_wq;
1641         cm_ksp_data->x_family.value.ui32 = cm_entry->x_family;
1642         cm_ksp_data->x_rdev.value.ui32 = (uint32_t)cm_entry->x_rdev;
1643         cm_ksp_data->x_time.value.ui32 = cm_entry->x_time;
1644         cm_ksp_data->x_ref.value.ui32 = cm_entry->x_ref;
1645         cm_ksp_data->x_state.value.ui32 = cm_entry->x_state_flags;
1646 
1647         if (cm_entry->x_server.buf) {
1648                 fbuf = cm_ksp_data->x_server.value.str.addr.ptr;
1649                 if (cm_entry->x_family == AF_INET &&
1650                     cm_entry->x_server.len ==
1651                     sizeof (struct sockaddr_in)) {
1652                         struct sockaddr_in  *sa;
1653                         sa = (struct sockaddr_in *)
1654                                 cm_entry->x_server.buf;
1655                         b = (uchar_t *)&sa->sin_addr;
1656                         (void) sprintf(fbuf,
1657                             "%d.%d.%d.%d", b[0] & 0xFF, b[1] & 0xFF,
1658                             b[2] & 0xFF, b[3] & 0xFF);
1659                         cm_ksp_data->x_port.value.ui32 = ntohs(sa->sin_port);
1660                 } else if (cm_entry->x_family == AF_INET6 &&
1661                                 cm_entry->x_server.len >=
1662                                 sizeof (struct sockaddr_in6)) {
1663                         /* extract server IP address & port */
1664                         struct sockaddr_in6 *sin6;
1665                         sin6 = (struct sockaddr_in6 *)cm_entry->x_server.buf;
1666                         (void) kinet_ntop6((uchar_t *)&sin6->sin6_addr, fbuf,
1667                                 INET6_ADDRSTRLEN);
1668                         cm_ksp_data->x_port.value.ui32 = ntohs(sin6->sin6_port);
1669                 } else {
1670                         struct sockaddr_in  *sa;
1671 
1672                         sa = (struct sockaddr_in *)cm_entry->x_server.buf;
1673                         b = (uchar_t *)&sa->sin_addr;
1674                         (void) sprintf(fbuf,
1675                             "%d.%d.%d.%d", b[0] & 0xFF, b[1] & 0xFF,
1676                             b[2] & 0xFF, b[3] & 0xFF);
1677                 }
1678                 KSTAT_NAMED_STR_BUFLEN(&cm_ksp_data->x_server) =
1679                     strlen(fbuf) + 1;
1680         }
1681 
1682         return (0);
1683 }
1684 
1685 
1686 /*
1687  * We want a version of delay which is interruptible by a UNIX signal
1688  * Return EINTR if an interrupt occured.
1689  */
1690 static int
1691 clnt_delay(clock_t ticks, bool_t nosignal)
1692 {
1693         if (nosignal == TRUE) {
1694                 delay(ticks);
1695                 return (0);
1696         }
1697         return (delay_sig(ticks));
1698 }
1699 
1700 /*
1701  * Wait for a connection until a timeout, or until we are
1702  * signalled that there has been a connection state change.
1703  */
1704 static enum clnt_stat
1705 connmgr_cwait(struct cm_xprt *cm_entry, const struct timeval *waitp,
1706     bool_t nosignal)
1707 {
1708         bool_t interrupted;
1709         clock_t timout, cv_stat;
1710         enum clnt_stat clstat;
1711         unsigned int old_state;
1712 
1713         ASSERT(MUTEX_HELD(&connmgr_lock));
1714         /*
1715          * We wait for the transport connection to be made, or an
1716          * indication that it could not be made.
1717          */
1718         clstat = RPC_TIMEDOUT;
1719         interrupted = FALSE;
1720 
1721         old_state = cm_entry->x_state_flags;
1722         /*
1723          * Now loop until cv_timedwait{_sig} returns because of
1724          * a signal(0) or timeout(-1) or cv_signal(>0). But it may be
1725          * cv_signalled for various other reasons too. So loop
1726          * until there is a state change on the connection.
1727          */
1728 
1729         timout = waitp->tv_sec * drv_usectohz(1000000) +
1730             drv_usectohz(waitp->tv_usec) + ddi_get_lbolt();
1731 
1732         if (nosignal) {
1733                 while ((cv_stat = cv_timedwait(&cm_entry->x_conn_cv,
1734                     &connmgr_lock, timout)) > 0 &&
1735                     cm_entry->x_state_flags == old_state)
1736                         ;
1737         } else {
1738                 while ((cv_stat = cv_timedwait_sig(&cm_entry->x_conn_cv,
1739                     &connmgr_lock, timout)) > 0 &&
1740                     cm_entry->x_state_flags == old_state)
1741                         ;
1742 
1743                 if (cv_stat == 0) /* got intr signal? */
1744                         interrupted = TRUE;
1745         }
1746 
1747         if ((cm_entry->x_state_flags & (X_BADSTATES|X_CONNECTED)) ==
1748             X_CONNECTED) {
1749                 clstat = RPC_SUCCESS;
1750         } else {
1751                 if (interrupted == TRUE)
1752                         clstat = RPC_INTR;
1753                 RPCLOG(1, "connmgr_cwait: can't connect, error: %s\n",
1754                     clnt_sperrno(clstat));
1755         }
1756 
1757         return (clstat);
1758 }
1759 
1760 /*
1761  * Primary interface for how RPC grabs a connection.
1762  */
1763 static struct cm_xprt *
1764 connmgr_wrapget(
1765         struct netbuf *retryaddr,
1766         const struct timeval *waitp,
1767         cku_private_t *p)
1768 {
1769         struct cm_xprt *cm_entry;
1770 
1771         cm_entry = connmgr_get(retryaddr, waitp, &p->cku_addr, p->cku_addrfmly,
1772             &p->cku_srcaddr, &p->cku_err, p->cku_device,
1773             p->cku_client.cl_nosignal, p->cku_useresvport, p->cku_cred);
1774 
1775         if (cm_entry == NULL) {
1776                 /*
1777                  * Re-map the call status to RPC_INTR if the err code is
1778                  * EINTR. This can happen if calls status is RPC_TLIERROR.
1779                  * However, don't re-map if signalling has been turned off.
1780                  * XXX Really need to create a separate thread whenever
1781                  * there isn't an existing connection.
1782                  */
1783                 if (p->cku_err.re_errno == EINTR) {
1784                         if (p->cku_client.cl_nosignal == TRUE)
1785                                 p->cku_err.re_errno = EIO;
1786                         else
1787                                 p->cku_err.re_status = RPC_INTR;
1788                 }
1789         }
1790 
1791         return (cm_entry);
1792 }
1793 
1794 /*
1795  * Obtains a transport to the server specified in addr.  If a suitable transport
1796  * does not already exist in the list of cached transports, a new connection
1797  * is created, connected, and added to the list. The connection is for sending
1798  * only - the reply message may come back on another transport connection.
1799  *
1800  * To implement round-robin load balancing with multiple client connections,
1801  * the last entry on the list is always selected. Once the entry is selected
1802  * it's re-inserted to the head of the list.
1803  */
1804 static struct cm_xprt *
1805 connmgr_get(
1806         struct netbuf   *retryaddr,
1807         const struct timeval    *waitp, /* changed to a ptr to converse stack */
1808         struct netbuf   *destaddr,
1809         int             addrfmly,
1810         struct netbuf   *srcaddr,
1811         struct rpc_err  *rpcerr,
1812         dev_t           device,
1813         bool_t          nosignal,
1814         int             useresvport,
1815         cred_t          *cr)
1816 {
1817         struct cm_xprt *cm_entry;
1818         struct cm_xprt *lru_entry;
1819         struct cm_xprt **cmp, **prev;
1820         queue_t *wq;
1821         TIUSER *tiptr;
1822         int i;
1823         int retval;
1824         int tidu_size;
1825         bool_t  connected;
1826         zoneid_t zoneid = rpc_zoneid();
1827 
1828         /*
1829          * If the call is not a retry, look for a transport entry that
1830          * goes to the server of interest.
1831          */
1832         mutex_enter(&connmgr_lock);
1833 
1834         if (retryaddr == NULL) {
1835 use_new_conn:
1836                 i = 0;
1837                 cm_entry = lru_entry = NULL;
1838 
1839                 prev = cmp = &cm_hd;
1840                 while ((cm_entry = *cmp) != NULL) {
1841                         ASSERT(cm_entry != cm_entry->x_next);
1842                         /*
1843                          * Garbage collect conections that are marked
1844                          * for needs disconnect.
1845                          */
1846                         if (cm_entry->x_needdis) {
1847                                 CONN_HOLD(cm_entry);
1848                                 connmgr_dis_and_wait(cm_entry);
1849                                 connmgr_release(cm_entry);
1850                                 /*
1851                                  * connmgr_lock could have been
1852                                  * dropped for the disconnect
1853                                  * processing so start over.
1854                                  */
1855                                 goto use_new_conn;
1856                         }
1857 
1858                         /*
1859                          * Garbage collect the dead connections that have
1860                          * no threads working on them.
1861                          */
1862                         if ((cm_entry->x_state_flags & (X_DEAD|X_THREAD)) ==
1863                             X_DEAD) {
1864                                 mutex_enter(&cm_entry->x_lock);
1865                                 if (cm_entry->x_ref != 0) {
1866                                         /*
1867                                          * Currently in use.
1868                                          * Cleanup later.
1869                                          */
1870                                         cmp = &cm_entry->x_next;
1871                                         mutex_exit(&cm_entry->x_lock);
1872                                         continue;
1873                                 }
1874                                 mutex_exit(&cm_entry->x_lock);
1875                                 *cmp = cm_entry->x_next;
1876                                 mutex_exit(&connmgr_lock);
1877                                 connmgr_close(cm_entry);
1878                                 mutex_enter(&connmgr_lock);
1879                                 goto use_new_conn;
1880                         }
1881 
1882 
1883                         if ((cm_entry->x_state_flags & X_BADSTATES) == 0 &&
1884                             cm_entry->x_zoneid == zoneid &&
1885                             cm_entry->x_rdev == device &&
1886                             destaddr->len == cm_entry->x_server.len &&
1887                             bcmp(destaddr->buf, cm_entry->x_server.buf,
1888                             destaddr->len) == 0) {
1889                                 /*
1890                                  * If the matching entry isn't connected,
1891                                  * attempt to reconnect it.
1892                                  */
1893                                 if (cm_entry->x_connected == FALSE) {
1894                                         /*
1895                                          * We don't go through trying
1896                                          * to find the least recently
1897                                          * used connected because
1898                                          * connmgr_reconnect() briefly
1899                                          * dropped the connmgr_lock,
1900                                          * allowing a window for our
1901                                          * accounting to be messed up.
1902                                          * In any case, a re-connected
1903                                          * connection is as good as
1904                                          * a LRU connection.
1905                                          */
1906                                         return (connmgr_wrapconnect(cm_entry,
1907                                             waitp, destaddr, addrfmly, srcaddr,
1908                                             rpcerr, TRUE, nosignal, cr));
1909                                 }
1910                                 i++;
1911 
1912                                 /* keep track of the last entry */
1913                                 lru_entry = cm_entry;
1914                                 prev = cmp;
1915                         }
1916                         cmp = &cm_entry->x_next;
1917                 }
1918 
1919                 if (i > clnt_max_conns) {
1920                         RPCLOG(8, "connmgr_get: too many conns, dooming entry"
1921                             " %p\n", (void *)lru_entry->x_tiptr);
1922                         lru_entry->x_doomed = TRUE;
1923                         goto use_new_conn;
1924                 }
1925 
1926                 /*
1927                  * If we are at the maximum number of connections to
1928                  * the server, hand back the least recently used one.
1929                  */
1930                 if (i == clnt_max_conns) {
1931                         /*
1932                          * Copy into the handle the source address of
1933                          * the connection, which we will use in case of
1934                          * a later retry.
1935                          */
1936                         if (srcaddr->len != lru_entry->x_src.len) {
1937                                 if (srcaddr->len > 0)
1938                                         kmem_free(srcaddr->buf,
1939                                             srcaddr->maxlen);
1940                                 ASSERT(lru_entry->x_src.len != 0);
1941                                 srcaddr->buf = kmem_alloc(
1942                                     lru_entry->x_src.len, KM_SLEEP);
1943                                 srcaddr->maxlen = srcaddr->len =
1944                                     lru_entry->x_src.len;
1945                         }
1946                         bcopy(lru_entry->x_src.buf, srcaddr->buf, srcaddr->len);
1947                         RPCLOG(2, "connmgr_get: call going out on %p\n",
1948                             (void *)lru_entry);
1949                         lru_entry->x_time = ddi_get_lbolt();
1950                         CONN_HOLD(lru_entry);
1951 
1952                         if ((i > 1) && (prev != &cm_hd)) {
1953                                 /*
1954                                  * remove and re-insert entry at head of list.
1955                                  */
1956                                 *prev = lru_entry->x_next;
1957                                 lru_entry->x_next = cm_hd;
1958                                 cm_hd = lru_entry;
1959                         }
1960 
1961                         mutex_exit(&connmgr_lock);
1962                         return (lru_entry);
1963                 }
1964 
1965         } else {
1966                 /*
1967                  * This is the retry case (retryaddr != NULL).  Retries must
1968                  * be sent on the same source port as the original call.
1969                  */
1970 
1971                 /*
1972                  * Walk the list looking for a connection with a source address
1973                  * that matches the retry address.
1974                  */
1975 start_retry_loop:
1976                 cmp = &cm_hd;
1977                 while ((cm_entry = *cmp) != NULL) {
1978                         ASSERT(cm_entry != cm_entry->x_next);
1979 
1980                         /*
1981                          * determine if this connection matches the passed
1982                          * in retry address.  If it does not match, advance
1983                          * to the next element on the list.
1984                          */
1985                         if (zoneid != cm_entry->x_zoneid ||
1986                             device != cm_entry->x_rdev ||
1987                             retryaddr->len != cm_entry->x_src.len ||
1988                             bcmp(retryaddr->buf, cm_entry->x_src.buf,
1989                             retryaddr->len) != 0) {
1990                                 cmp = &cm_entry->x_next;
1991                                 continue;
1992                         }
1993                         /*
1994                          * Garbage collect conections that are marked
1995                          * for needs disconnect.
1996                          */
1997                         if (cm_entry->x_needdis) {
1998                                 CONN_HOLD(cm_entry);
1999                                 connmgr_dis_and_wait(cm_entry);
2000                                 connmgr_release(cm_entry);
2001                                 /*
2002                                  * connmgr_lock could have been
2003                                  * dropped for the disconnect
2004                                  * processing so start over.
2005                                  */
2006                                 goto start_retry_loop;
2007                         }
2008                         /*
2009                          * Garbage collect the dead connections that have
2010                          * no threads working on them.
2011                          */
2012                         if ((cm_entry->x_state_flags & (X_DEAD|X_THREAD)) ==
2013                             X_DEAD) {
2014                                 mutex_enter(&cm_entry->x_lock);
2015                                 if (cm_entry->x_ref != 0) {
2016                                         /*
2017                                          * Currently in use.
2018                                          * Cleanup later.
2019                                          */
2020                                         cmp = &cm_entry->x_next;
2021                                         mutex_exit(&cm_entry->x_lock);
2022                                         continue;
2023                                 }
2024                                 mutex_exit(&cm_entry->x_lock);
2025                                 *cmp = cm_entry->x_next;
2026                                 mutex_exit(&connmgr_lock);
2027                                 connmgr_close(cm_entry);
2028                                 mutex_enter(&connmgr_lock);
2029                                 goto start_retry_loop;
2030                         }
2031 
2032                         /*
2033                          * Sanity check: if the connection with our source
2034                          * port is going to some other server, something went
2035                          * wrong, as we never delete connections (i.e. release
2036                          * ports) unless they have been idle.  In this case,
2037                          * it is probably better to send the call out using
2038                          * a new source address than to fail it altogether,
2039                          * since that port may never be released.
2040                          */
2041                         if (destaddr->len != cm_entry->x_server.len ||
2042                             bcmp(destaddr->buf, cm_entry->x_server.buf,
2043                             destaddr->len) != 0) {
2044                                 RPCLOG(1, "connmgr_get: tiptr %p"
2045                                     " is going to a different server"
2046                                     " with the port that belongs"
2047                                     " to us!\n", (void *)cm_entry->x_tiptr);
2048                                 retryaddr = NULL;
2049                                 goto use_new_conn;
2050                         }
2051 
2052                         /*
2053                          * If the connection of interest is not connected and we
2054                          * can't reconnect it, then the server is probably
2055                          * still down.  Return NULL to the caller and let it
2056                          * retry later if it wants to.  We have a delay so the
2057                          * machine doesn't go into a tight retry loop.  If the
2058                          * entry was already connected, or the reconnected was
2059                          * successful, return this entry.
2060                          */
2061                         if (cm_entry->x_connected == FALSE) {
2062                                 return (connmgr_wrapconnect(cm_entry,
2063                                     waitp, destaddr, addrfmly, NULL,
2064                                     rpcerr, TRUE, nosignal, cr));
2065                         } else {
2066                                 CONN_HOLD(cm_entry);
2067 
2068                                 cm_entry->x_time = ddi_get_lbolt();
2069                                 mutex_exit(&connmgr_lock);
2070                                 RPCLOG(2, "connmgr_get: found old "
2071                                     "transport %p for retry\n",
2072                                     (void *)cm_entry);
2073                                 return (cm_entry);
2074                         }
2075                 }
2076 
2077                 /*
2078                  * We cannot find an entry in the list for this retry.
2079                  * Either the entry has been removed temporarily to be
2080                  * reconnected by another thread, or the original call
2081                  * got a port but never got connected,
2082                  * and hence the transport never got put in the
2083                  * list.  Fall through to the "create new connection" code -
2084                  * the former case will fail there trying to rebind the port,
2085                  * and the later case (and any other pathological cases) will
2086                  * rebind and reconnect and not hang the client machine.
2087                  */
2088                 RPCLOG0(8, "connmgr_get: no entry in list for retry\n");
2089         }
2090         /*
2091          * Set up a transport entry in the connection manager's list.
2092          */
2093         cm_entry = (struct cm_xprt *)
2094             kmem_zalloc(sizeof (struct cm_xprt), KM_SLEEP);
2095 
2096         cm_entry->x_server.buf = kmem_alloc(destaddr->len, KM_SLEEP);
2097         bcopy(destaddr->buf, cm_entry->x_server.buf, destaddr->len);
2098         cm_entry->x_server.len = cm_entry->x_server.maxlen = destaddr->len;
2099 
2100         cm_entry->x_state_flags = X_THREAD;
2101         cm_entry->x_ref = 1;
2102         cm_entry->x_family = addrfmly;
2103         cm_entry->x_rdev = device;
2104         cm_entry->x_zoneid = zoneid;
2105         mutex_init(&cm_entry->x_lock, NULL, MUTEX_DEFAULT, NULL);
2106         cv_init(&cm_entry->x_cv, NULL, CV_DEFAULT, NULL);
2107         cv_init(&cm_entry->x_conn_cv, NULL, CV_DEFAULT, NULL);
2108         cv_init(&cm_entry->x_dis_cv, NULL, CV_DEFAULT, NULL);
2109 
2110         /*
2111          * Note that we add this partially initialized entry to the
2112          * connection list. This is so that we don't have connections to
2113          * the same server.
2114          *
2115          * Note that x_src is not initialized at this point. This is because
2116          * retryaddr might be NULL in which case x_src is whatever
2117          * t_kbind/bindresvport gives us. If another thread wants a
2118          * connection to the same server, seemingly we have an issue, but we
2119          * don't. If the other thread comes in with retryaddr == NULL, then it
2120          * will never look at x_src, and it will end up waiting in
2121          * connmgr_cwait() for the first thread to finish the connection
2122          * attempt. If the other thread comes in with retryaddr != NULL, then
2123          * that means there was a request sent on a connection, in which case
2124          * the the connection should already exist. Thus the first thread
2125          * never gets here ... it finds the connection it its server in the
2126          * connection list.
2127          *
2128          * But even if theory is wrong, in the retryaddr != NULL case, the 2nd
2129          * thread will skip us because x_src.len == 0.
2130          */
2131         cm_entry->x_next = cm_hd;
2132         cm_hd = cm_entry;
2133         mutex_exit(&connmgr_lock);
2134 
2135         /*
2136          * Either we didn't find an entry to the server of interest, or we
2137          * don't have the maximum number of connections to that server -
2138          * create a new connection.
2139          */
2140         RPCLOG0(8, "connmgr_get: creating new connection\n");
2141         rpcerr->re_status = RPC_TLIERROR;
2142 
2143         i = t_kopen(NULL, device, FREAD|FWRITE|FNDELAY, &tiptr, zone_kcred());
2144         if (i) {
2145                 RPCLOG(1, "connmgr_get: can't open cots device, error %d\n", i);
2146                 rpcerr->re_errno = i;
2147                 connmgr_cancelconn(cm_entry);
2148                 return (NULL);
2149         }
2150         rpc_poptimod(tiptr->fp->f_vnode);
2151 
2152         if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0,
2153             K_TO_K, kcred, &retval)) {
2154                 RPCLOG(1, "connmgr_get: can't push cots module, %d\n", i);
2155                 (void) t_kclose(tiptr, 1);
2156                 rpcerr->re_errno = i;
2157                 connmgr_cancelconn(cm_entry);
2158                 return (NULL);
2159         }
2160 
2161         if (i = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K,
2162             kcred, &retval)) {
2163                 RPCLOG(1, "connmgr_get: can't set client status with cots "
2164                     "module, %d\n", i);
2165                 (void) t_kclose(tiptr, 1);
2166                 rpcerr->re_errno = i;
2167                 connmgr_cancelconn(cm_entry);
2168                 return (NULL);
2169         }
2170 
2171         mutex_enter(&connmgr_lock);
2172 
2173         wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next;
2174         cm_entry->x_wq = wq;
2175 
2176         mutex_exit(&connmgr_lock);
2177 
2178         if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0,
2179             K_TO_K, kcred, &retval)) {
2180                 RPCLOG(1, "connmgr_get: can't push timod, %d\n", i);
2181                 (void) t_kclose(tiptr, 1);
2182                 rpcerr->re_errno = i;
2183                 connmgr_cancelconn(cm_entry);
2184                 return (NULL);
2185         }
2186 
2187         /*
2188          * If the caller has not specified reserved port usage then
2189          * take the system default.
2190          */
2191         if (useresvport == -1)
2192                 useresvport = clnt_cots_do_bindresvport;
2193 
2194         if ((useresvport || retryaddr != NULL) &&
2195             (addrfmly == AF_INET || addrfmly == AF_INET6)) {
2196                 bool_t alloc_src = FALSE;
2197 
2198                 if (srcaddr->len != destaddr->len) {
2199                         kmem_free(srcaddr->buf, srcaddr->maxlen);
2200                         srcaddr->buf = kmem_zalloc(destaddr->len, KM_SLEEP);
2201                         srcaddr->maxlen = destaddr->len;
2202                         srcaddr->len = destaddr->len;
2203                         alloc_src = TRUE;
2204                 }
2205 
2206                 if ((i = bindresvport(tiptr, retryaddr, srcaddr, TRUE)) != 0) {
2207                         (void) t_kclose(tiptr, 1);
2208                         RPCLOG(1, "connmgr_get: couldn't bind, retryaddr: "
2209                             "%p\n", (void *)retryaddr);
2210 
2211                         /*
2212                          * 1225408: If we allocated a source address, then it
2213                          * is either garbage or all zeroes. In that case
2214                          * we need to clear srcaddr.
2215                          */
2216                         if (alloc_src == TRUE) {
2217                                 kmem_free(srcaddr->buf, srcaddr->maxlen);
2218                                 srcaddr->maxlen = srcaddr->len = 0;
2219                                 srcaddr->buf = NULL;
2220                         }
2221                         rpcerr->re_errno = i;
2222                         connmgr_cancelconn(cm_entry);
2223                         return (NULL);
2224                 }
2225         } else {
2226                 if ((i = t_kbind(tiptr, NULL, NULL)) != 0) {
2227                         RPCLOG(1, "clnt_cots_kcreate: t_kbind: %d\n", i);
2228                         (void) t_kclose(tiptr, 1);
2229                         rpcerr->re_errno = i;
2230                         connmgr_cancelconn(cm_entry);
2231                         return (NULL);
2232                 }
2233         }
2234 
2235         {
2236                 /*
2237                  * Keep the kernel stack lean. Don't move this call
2238                  * declaration to the top of this function because a
2239                  * call is declared in connmgr_wrapconnect()
2240                  */
2241                 calllist_t call;
2242 
2243                 bzero(&call, sizeof (call));
2244                 cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
2245 
2246                 /*
2247                  * This is a bound end-point so don't close it's stream.
2248                  */
2249                 connected = connmgr_connect(cm_entry, wq, destaddr, addrfmly,
2250                     &call, &tidu_size, FALSE, waitp, nosignal, cr);
2251                 *rpcerr = call.call_err;
2252                 cv_destroy(&call.call_cv);
2253 
2254         }
2255 
2256         mutex_enter(&connmgr_lock);
2257 
2258         /*
2259          * Set up a transport entry in the connection manager's list.
2260          */
2261         if (srcaddr->len > 0) {
2262                 cm_entry->x_src.buf = kmem_alloc(srcaddr->len, KM_SLEEP);
2263                 bcopy(srcaddr->buf, cm_entry->x_src.buf, srcaddr->len);
2264                 cm_entry->x_src.len = cm_entry->x_src.maxlen = srcaddr->len;
2265         } /* Else kmem_zalloc() of cm_entry already sets its x_src to NULL. */
2266 
2267         cm_entry->x_tiptr = tiptr;
2268         cm_entry->x_time = ddi_get_lbolt();
2269 
2270         if (tiptr->tp_info.servtype == T_COTS_ORD)
2271                 cm_entry->x_ordrel = TRUE;
2272         else
2273                 cm_entry->x_ordrel = FALSE;
2274 
2275         cm_entry->x_tidu_size = tidu_size;
2276 
2277         if (cm_entry->x_early_disc) {
2278                 /*
2279                  * We need to check if a disconnect request has come
2280                  * while we are connected, if so, then we need to
2281                  * set rpcerr->re_status appropriately before returning
2282                  * NULL to caller.
2283                  */
2284                 if (rpcerr->re_status == RPC_SUCCESS)
2285                         rpcerr->re_status = RPC_XPRTFAILED;
2286                 cm_entry->x_connected = FALSE;
2287         } else
2288                 cm_entry->x_connected = connected;
2289 
2290         /*
2291          * There could be a discrepancy here such that
2292          * x_early_disc is TRUE yet connected is TRUE as well
2293          * and the connection is actually connected. In that case
2294          * lets be conservative and declare the connection as not
2295          * connected.
2296          */
2297         cm_entry->x_early_disc = FALSE;
2298         cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
2299         cm_entry->x_ctime = ddi_get_lbolt();
2300 
2301         /*
2302          * Notify any threads waiting that the connection attempt is done.
2303          */
2304         cm_entry->x_thread = FALSE;
2305         cv_broadcast(&cm_entry->x_conn_cv);
2306 
2307         if (cm_entry->x_connected == FALSE) {
2308                 mutex_exit(&connmgr_lock);
2309                 connmgr_release(cm_entry);
2310                 return (NULL);
2311         }
2312 
2313         mutex_exit(&connmgr_lock);
2314 
2315         return (cm_entry);
2316 }
2317 
2318 /*
2319  * Keep the cm_xprt entry on the connecton list when making a connection. This
2320  * is to prevent multiple connections to a slow server from appearing.
2321  * We use the bit field x_thread to tell if a thread is doing a connection
2322  * which keeps other interested threads from messing with connection.
2323  * Those other threads just wait if x_thread is set.
2324  *
2325  * If x_thread is not set, then we do the actual work of connecting via
2326  * connmgr_connect().
2327  *
2328  * mutex convention: called with connmgr_lock held, returns with it released.
2329  */
2330 static struct cm_xprt *
2331 connmgr_wrapconnect(
2332         struct cm_xprt  *cm_entry,
2333         const struct timeval    *waitp,
2334         struct netbuf   *destaddr,
2335         int             addrfmly,
2336         struct netbuf   *srcaddr,
2337         struct rpc_err  *rpcerr,
2338         bool_t          reconnect,
2339         bool_t          nosignal,
2340         cred_t          *cr)
2341 {
2342         ASSERT(MUTEX_HELD(&connmgr_lock));
2343         /*
2344          * Hold this entry as we are about to drop connmgr_lock.
2345          */
2346         CONN_HOLD(cm_entry);
2347 
2348         /*
2349          * If there is a thread already making a connection for us, then
2350          * wait for it to complete the connection.
2351          */
2352         if (cm_entry->x_thread == TRUE) {
2353                 rpcerr->re_status = connmgr_cwait(cm_entry, waitp, nosignal);
2354 
2355                 if (rpcerr->re_status != RPC_SUCCESS) {
2356                         mutex_exit(&connmgr_lock);
2357                         connmgr_release(cm_entry);
2358                         return (NULL);
2359                 }
2360         } else {
2361                 bool_t connected;
2362                 calllist_t call;
2363 
2364                 cm_entry->x_thread = TRUE;
2365 
2366                 while (cm_entry->x_needrel == TRUE) {
2367                         cm_entry->x_needrel = FALSE;
2368 
2369                         connmgr_sndrel(cm_entry);
2370                         delay(drv_usectohz(1000000));
2371 
2372                         mutex_enter(&connmgr_lock);
2373                 }
2374 
2375                 /*
2376                  * If we need to send a T_DISCON_REQ, send one.
2377                  */
2378                 connmgr_dis_and_wait(cm_entry);
2379 
2380                 mutex_exit(&connmgr_lock);
2381 
2382                 bzero(&call, sizeof (call));
2383                 cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
2384 
2385                 connected = connmgr_connect(cm_entry, cm_entry->x_wq,
2386                     destaddr, addrfmly, &call, &cm_entry->x_tidu_size,
2387                     reconnect, waitp, nosignal, cr);
2388 
2389                 *rpcerr = call.call_err;
2390                 cv_destroy(&call.call_cv);
2391 
2392                 mutex_enter(&connmgr_lock);
2393 
2394 
2395                 if (cm_entry->x_early_disc) {
2396                         /*
2397                          * We need to check if a disconnect request has come
2398                          * while we are connected, if so, then we need to
2399                          * set rpcerr->re_status appropriately before returning
2400                          * NULL to caller.
2401                          */
2402                         if (rpcerr->re_status == RPC_SUCCESS)
2403                                 rpcerr->re_status = RPC_XPRTFAILED;
2404                         cm_entry->x_connected = FALSE;
2405                 } else
2406                         cm_entry->x_connected = connected;
2407 
2408                 /*
2409                  * There could be a discrepancy here such that
2410                  * x_early_disc is TRUE yet connected is TRUE as well
2411                  * and the connection is actually connected. In that case
2412                  * lets be conservative and declare the connection as not
2413                  * connected.
2414                  */
2415 
2416                 cm_entry->x_early_disc = FALSE;
2417                 cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
2418 
2419 
2420                 /*
2421                  * connmgr_connect() may have given up before the connection
2422                  * actually timed out. So ensure that before the next
2423                  * connection attempt we do a disconnect.
2424                  */
2425                 cm_entry->x_ctime = ddi_get_lbolt();
2426                 cm_entry->x_thread = FALSE;
2427 
2428                 cv_broadcast(&cm_entry->x_conn_cv);
2429 
2430                 if (cm_entry->x_connected == FALSE) {
2431                         mutex_exit(&connmgr_lock);
2432                         connmgr_release(cm_entry);
2433                         return (NULL);
2434                 }
2435         }
2436 
2437         if (srcaddr != NULL) {
2438                 /*
2439                  * Copy into the handle the
2440                  * source address of the
2441                  * connection, which we will use
2442                  * in case of a later retry.
2443                  */
2444                 if (srcaddr->len != cm_entry->x_src.len) {
2445                         if (srcaddr->maxlen > 0)
2446                                 kmem_free(srcaddr->buf, srcaddr->maxlen);
2447                         ASSERT(cm_entry->x_src.len != 0);
2448                         srcaddr->buf = kmem_alloc(cm_entry->x_src.len,
2449                             KM_SLEEP);
2450                         srcaddr->maxlen = srcaddr->len = cm_entry->x_src.len;
2451                 }
2452                 bcopy(cm_entry->x_src.buf, srcaddr->buf, srcaddr->len);
2453         }
2454         cm_entry->x_time = ddi_get_lbolt();
2455         mutex_exit(&connmgr_lock);
2456         return (cm_entry);
2457 }
2458 
2459 /*
2460  * If we need to send a T_DISCON_REQ, send one.
2461  */
2462 static void
2463 connmgr_dis_and_wait(struct cm_xprt *cm_entry)
2464 {
2465         ASSERT(MUTEX_HELD(&connmgr_lock));
2466         for (;;) {
2467                 while (cm_entry->x_needdis == TRUE) {
2468                         RPCLOG(8, "connmgr_dis_and_wait: need "
2469                             "T_DISCON_REQ for connection 0x%p\n",
2470                             (void *)cm_entry);
2471                         cm_entry->x_needdis = FALSE;
2472                         cm_entry->x_waitdis = TRUE;
2473 
2474                         connmgr_snddis(cm_entry);
2475 
2476                         mutex_enter(&connmgr_lock);
2477                 }
2478 
2479                 if (cm_entry->x_waitdis == TRUE) {
2480                         clock_t timout;
2481 
2482                         RPCLOG(8, "connmgr_dis_and_wait waiting for "
2483                             "T_DISCON_REQ's ACK for connection %p\n",
2484                             (void *)cm_entry);
2485 
2486                         timout = clnt_cots_min_conntout * drv_usectohz(1000000);
2487 
2488                         /*
2489                          * The TPI spec says that the T_DISCON_REQ
2490                          * will get acknowledged, but in practice
2491                          * the ACK may never get sent. So don't
2492                          * block forever.
2493                          */
2494                         (void) cv_reltimedwait(&cm_entry->x_dis_cv,
2495                             &connmgr_lock, timout, TR_CLOCK_TICK);
2496                 }
2497                 /*
2498                  * If we got the ACK, break. If we didn't,
2499                  * then send another T_DISCON_REQ.
2500                  */
2501                 if (cm_entry->x_waitdis == FALSE) {
2502                         break;
2503                 } else {
2504                         RPCLOG(8, "connmgr_dis_and_wait: did"
2505                             "not get T_DISCON_REQ's ACK for "
2506                             "connection  %p\n", (void *)cm_entry);
2507                         cm_entry->x_needdis = TRUE;
2508                 }
2509         }
2510 }
2511 
2512 static void
2513 connmgr_cancelconn(struct cm_xprt *cm_entry)
2514 {
2515         /*
2516          * Mark the connection table entry as dead; the next thread that
2517          * goes through connmgr_release() will notice this and deal with it.
2518          */
2519         mutex_enter(&connmgr_lock);
2520         cm_entry->x_dead = TRUE;
2521 
2522         /*
2523          * Notify any threads waiting for the connection that it isn't
2524          * going to happen.
2525          */
2526         cm_entry->x_thread = FALSE;
2527         cv_broadcast(&cm_entry->x_conn_cv);
2528         mutex_exit(&connmgr_lock);
2529 
2530         connmgr_release(cm_entry);
2531 }
2532 
2533 static void
2534 connmgr_close(struct cm_xprt *cm_entry)
2535 {
2536         mutex_enter(&cm_entry->x_lock);
2537         while (cm_entry->x_ref != 0) {
2538                 /*
2539                  * Must be a noninterruptible wait.
2540                  */
2541                 cv_wait(&cm_entry->x_cv, &cm_entry->x_lock);
2542         }
2543 
2544         if (cm_entry->x_tiptr != NULL)
2545                 (void) t_kclose(cm_entry->x_tiptr, 1);
2546 
2547         mutex_exit(&cm_entry->x_lock);
2548         if (cm_entry->x_ksp != NULL) {
2549                 mutex_enter(&connmgr_lock);
2550                 cm_entry->x_ksp->ks_private = NULL;
2551                 mutex_exit(&connmgr_lock);
2552 
2553                 /*
2554                  * Must free the buffer we allocated for the
2555                  * server address in the update function
2556                  */
2557                 if (((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
2558                     x_server.value.str.addr.ptr != NULL)
2559                         kmem_free(((struct cm_kstat_xprt *)(cm_entry->x_ksp->
2560                             ks_data))->x_server.value.str.addr.ptr,
2561                             INET6_ADDRSTRLEN);
2562                 kmem_free(cm_entry->x_ksp->ks_data,
2563                     cm_entry->x_ksp->ks_data_size);
2564                 kstat_delete(cm_entry->x_ksp);
2565         }
2566 
2567         mutex_destroy(&cm_entry->x_lock);
2568         cv_destroy(&cm_entry->x_cv);
2569         cv_destroy(&cm_entry->x_conn_cv);
2570         cv_destroy(&cm_entry->x_dis_cv);
2571 
2572         if (cm_entry->x_server.buf != NULL)
2573                 kmem_free(cm_entry->x_server.buf, cm_entry->x_server.maxlen);
2574         if (cm_entry->x_src.buf != NULL)
2575                 kmem_free(cm_entry->x_src.buf, cm_entry->x_src.maxlen);
2576         kmem_free(cm_entry, sizeof (struct cm_xprt));
2577 }
2578 
2579 /*
2580  * Called by KRPC after sending the call message to release the connection
2581  * it was using.
2582  */
2583 static void
2584 connmgr_release(struct cm_xprt *cm_entry)
2585 {
2586         mutex_enter(&cm_entry->x_lock);
2587         cm_entry->x_ref--;
2588         if (cm_entry->x_ref == 0)
2589                 cv_signal(&cm_entry->x_cv);
2590         mutex_exit(&cm_entry->x_lock);
2591 }
2592 
2593 /*
2594  * Set TCP receive and xmit buffer size for RPC connections.
2595  */
2596 static bool_t
2597 connmgr_setbufsz(calllist_t *e, queue_t *wq, cred_t *cr)
2598 {
2599         int ok = FALSE;
2600         int val;
2601 
2602         if (rpc_default_tcp_bufsz)
2603                 return (FALSE);
2604 
2605         /*
2606          * Only set new buffer size if it's larger than the system
2607          * default buffer size. If smaller buffer size is needed
2608          * then use /etc/system to set rpc_default_tcp_bufsz to 1.
2609          */
2610         ok = connmgr_getopt_int(wq, SOL_SOCKET, SO_RCVBUF, &val, e, cr);
2611         if ((ok == TRUE) && (val < rpc_send_bufsz)) {
2612                 ok = connmgr_setopt_int(wq, SOL_SOCKET, SO_RCVBUF,
2613                     rpc_send_bufsz, e, cr);
2614                 DTRACE_PROBE2(krpc__i__connmgr_rcvbufsz,
2615                     int, ok, calllist_t *, e);
2616         }
2617 
2618         ok = connmgr_getopt_int(wq, SOL_SOCKET, SO_SNDBUF, &val, e, cr);
2619         if ((ok == TRUE) && (val < rpc_recv_bufsz)) {
2620                 ok = connmgr_setopt_int(wq, SOL_SOCKET, SO_SNDBUF,
2621                     rpc_recv_bufsz, e, cr);
2622                 DTRACE_PROBE2(krpc__i__connmgr_sndbufsz,
2623                     int, ok, calllist_t *, e);
2624         }
2625         return (TRUE);
2626 }
2627 
2628 /*
2629  * Given an open stream, connect to the remote.  Returns true if connected,
2630  * false otherwise.
2631  */
2632 static bool_t
2633 connmgr_connect(
2634         struct cm_xprt          *cm_entry,
2635         queue_t                 *wq,
2636         struct netbuf           *addr,
2637         int                     addrfmly,
2638         calllist_t              *e,
2639         int                     *tidu_ptr,
2640         bool_t                  reconnect,
2641         const struct timeval    *waitp,
2642         bool_t                  nosignal,
2643         cred_t                  *cr)
2644 {
2645         mblk_t *mp;
2646         struct T_conn_req *tcr;
2647         struct T_info_ack *tinfo;
2648         int interrupted, error;
2649         int tidu_size, kstat_instance;
2650 
2651         /* if it's a reconnect, flush any lingering data messages */
2652         if (reconnect)
2653                 (void) putctl1(wq, M_FLUSH, FLUSHRW);
2654 
2655         /*
2656          * Note: if the receiver uses SCM_UCRED/getpeerucred the pid will
2657          * appear as -1.
2658          */
2659         mp = allocb_cred(sizeof (*tcr) + addr->len, cr, NOPID);
2660         if (mp == NULL) {
2661                 /*
2662                  * This is unfortunate, but we need to look up the stats for
2663                  * this zone to increment the "memory allocation failed"
2664                  * counter.  curproc->p_zone is safe since we're initiating a
2665                  * connection and not in some strange streams context.
2666                  */
2667                 struct rpcstat *rpcstat;
2668 
2669                 rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
2670                 ASSERT(rpcstat != NULL);
2671 
2672                 RPCLOG0(1, "connmgr_connect: cannot alloc mp for "
2673                     "sending conn request\n");
2674                 COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcnomem);
2675                 e->call_status = RPC_SYSTEMERROR;
2676                 e->call_reason = ENOSR;
2677                 return (FALSE);
2678         }
2679 
2680         /* Set TCP buffer size for RPC connections if needed */
2681         if (addrfmly == AF_INET || addrfmly == AF_INET6)
2682                 (void) connmgr_setbufsz(e, wq, cr);
2683 
2684         mp->b_datap->db_type = M_PROTO;
2685         tcr = (struct T_conn_req *)mp->b_rptr;
2686         bzero(tcr, sizeof (*tcr));
2687         tcr->PRIM_type = T_CONN_REQ;
2688         tcr->DEST_length = addr->len;
2689         tcr->DEST_offset = sizeof (struct T_conn_req);
2690         mp->b_wptr = mp->b_rptr + sizeof (*tcr);
2691 
2692         bcopy(addr->buf, mp->b_wptr, tcr->DEST_length);
2693         mp->b_wptr += tcr->DEST_length;
2694 
2695         RPCLOG(8, "connmgr_connect: sending conn request on queue "
2696             "%p", (void *)wq);
2697         RPCLOG(8, " call %p\n", (void *)wq);
2698         /*
2699          * We use the entry in the handle that is normally used for
2700          * waiting for RPC replies to wait for the connection accept.
2701          */
2702         if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
2703                 DTRACE_PROBE(krpc__e__connmgr__connect__cantsend);
2704                 freemsg(mp);
2705                 return (FALSE);
2706         }
2707 
2708         mutex_enter(&clnt_pending_lock);
2709 
2710         /*
2711          * We wait for the transport connection to be made, or an
2712          * indication that it could not be made.
2713          */
2714         interrupted = 0;
2715 
2716         /*
2717          * waitforack should have been called with T_OK_ACK, but the
2718          * present implementation needs to be passed T_INFO_ACK to
2719          * work correctly.
2720          */
2721         error = waitforack(e, T_INFO_ACK, waitp, nosignal);
2722         if (error == EINTR)
2723                 interrupted = 1;
2724         if (zone_status_get(curproc->p_zone) >= ZONE_IS_EMPTY) {
2725                 /*
2726                  * No time to lose; we essentially have been signaled to
2727                  * quit.
2728                  */
2729                 interrupted = 1;
2730         }
2731 #ifdef RPCDEBUG
2732         if (error == ETIME)
2733                 RPCLOG0(8, "connmgr_connect: giving up "
2734                     "on connection attempt; "
2735                     "clnt_dispatch notifyconn "
2736                     "diagnostic 'no one waiting for "
2737                     "connection' should not be "
2738                     "unexpected\n");
2739 #endif
2740         if (e->call_prev)
2741                 e->call_prev->call_next = e->call_next;
2742         else
2743                 clnt_pending = e->call_next;
2744         if (e->call_next)
2745                 e->call_next->call_prev = e->call_prev;
2746         mutex_exit(&clnt_pending_lock);
2747 
2748         if (e->call_status != RPC_SUCCESS || error != 0) {
2749                 if (interrupted)
2750                         e->call_status = RPC_INTR;
2751                 else if (error == ETIME)
2752                         e->call_status = RPC_TIMEDOUT;
2753                 else if (error == EPROTO) {
2754                         e->call_status = RPC_SYSTEMERROR;
2755                         e->call_reason = EPROTO;
2756                 }
2757 
2758                 RPCLOG(8, "connmgr_connect: can't connect, status: "
2759                     "%s\n", clnt_sperrno(e->call_status));
2760 
2761                 if (e->call_reply) {
2762                         freemsg(e->call_reply);
2763                         e->call_reply = NULL;
2764                 }
2765 
2766                 return (FALSE);
2767         }
2768         /*
2769          * The result of the "connection accept" is a T_info_ack
2770          * in the call_reply field.
2771          */
2772         ASSERT(e->call_reply != NULL);
2773         mp = e->call_reply;
2774         e->call_reply = NULL;
2775         tinfo = (struct T_info_ack *)mp->b_rptr;
2776 
2777         tidu_size = tinfo->TIDU_size;
2778         tidu_size -= (tidu_size % BYTES_PER_XDR_UNIT);
2779         if (tidu_size > COTS_DEFAULT_ALLOCSIZE || (tidu_size <= 0))
2780                 tidu_size = COTS_DEFAULT_ALLOCSIZE;
2781         *tidu_ptr = tidu_size;
2782 
2783         freemsg(mp);
2784 
2785         /*
2786          * Set up the pertinent options.  NODELAY is so the transport doesn't
2787          * buffer up RPC messages on either end.  This may not be valid for
2788          * all transports.  Failure to set this option is not cause to
2789          * bail out so we return success anyway.  Note that lack of NODELAY
2790          * or some other way to flush the message on both ends will cause
2791          * lots of retries and terrible performance.
2792          */
2793         if (addrfmly == AF_INET || addrfmly == AF_INET6) {
2794                 (void) connmgr_setopt(wq, IPPROTO_TCP, TCP_NODELAY, e, cr);
2795                 if (e->call_status == RPC_XPRTFAILED)
2796                         return (FALSE);
2797         }
2798 
2799         /*
2800          * Since we have a connection, we now need to figure out if
2801          * we need to create a kstat. If x_ksp is not NULL then we
2802          * are reusing a connection and so we do not need to create
2803          * another kstat -- lets just return.
2804          */
2805         if (cm_entry->x_ksp != NULL)
2806                 return (TRUE);
2807 
2808         /*
2809          * We need to increment rpc_kstat_instance atomically to prevent
2810          * two kstats being created with the same instance.
2811          */
2812         kstat_instance = atomic_inc_32_nv((uint32_t *)&rpc_kstat_instance);
2813 
2814         if ((cm_entry->x_ksp = kstat_create_zone("unix", kstat_instance,
2815             "rpc_cots_connections", "rpc", KSTAT_TYPE_NAMED,
2816             (uint_t)(sizeof (cm_kstat_xprt_t) / sizeof (kstat_named_t)),
2817             KSTAT_FLAG_VIRTUAL, cm_entry->x_zoneid)) == NULL) {
2818                 return (TRUE);
2819         }
2820 
2821         cm_entry->x_ksp->ks_lock = &connmgr_lock;
2822         cm_entry->x_ksp->ks_private = cm_entry;
2823         cm_entry->x_ksp->ks_data_size = ((INET6_ADDRSTRLEN * sizeof (char))
2824             + sizeof (cm_kstat_template));
2825         cm_entry->x_ksp->ks_data = kmem_alloc(cm_entry->x_ksp->ks_data_size,
2826             KM_SLEEP);
2827         bcopy(&cm_kstat_template, cm_entry->x_ksp->ks_data,
2828             cm_entry->x_ksp->ks_data_size);
2829         ((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
2830             x_server.value.str.addr.ptr =
2831             kmem_alloc(INET6_ADDRSTRLEN, KM_SLEEP);
2832 
2833         cm_entry->x_ksp->ks_update = conn_kstat_update;
2834         kstat_install(cm_entry->x_ksp);
2835         return (TRUE);
2836 }
2837 
2838 /*
2839  * Verify that the specified offset falls within the mblk and
2840  * that the resulting pointer is aligned.
2841  * Returns NULL if not.
2842  *
2843  * code from fs/sockfs/socksubr.c
2844  */
2845 static void *
2846 connmgr_opt_getoff(mblk_t *mp, t_uscalar_t offset,
2847     t_uscalar_t length, uint_t align_size)
2848 {
2849         uintptr_t ptr1, ptr2;
2850 
2851         ASSERT(mp && mp->b_wptr >= mp->b_rptr);
2852         ptr1 = (uintptr_t)mp->b_rptr + offset;
2853         ptr2 = (uintptr_t)ptr1 + length;
2854         if (ptr1 < (uintptr_t)mp->b_rptr || ptr2 > (uintptr_t)mp->b_wptr) {
2855                 return (NULL);
2856         }
2857         if ((ptr1 & (align_size - 1)) != 0) {
2858                 return (NULL);
2859         }
2860         return ((void *)ptr1);
2861 }
2862 
2863 static bool_t
2864 connmgr_getopt_int(queue_t *wq, int level, int name, int *val,
2865     calllist_t *e, cred_t *cr)
2866 {
2867         mblk_t *mp;
2868         struct opthdr *opt, *opt_res;
2869         struct T_optmgmt_req *tor;
2870         struct T_optmgmt_ack *opt_ack;
2871         struct timeval waitp;
2872         int error;
2873 
2874         mp = allocb_cred(sizeof (struct T_optmgmt_req) +
2875             sizeof (struct opthdr) + sizeof (int), cr, NOPID);
2876         if (mp == NULL)
2877                 return (FALSE);
2878 
2879         mp->b_datap->db_type = M_PROTO;
2880         tor = (struct T_optmgmt_req *)(mp->b_rptr);
2881         tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
2882         tor->MGMT_flags = T_CURRENT;
2883         tor->OPT_length = sizeof (struct opthdr) + sizeof (int);
2884         tor->OPT_offset = sizeof (struct T_optmgmt_req);
2885 
2886         opt = (struct opthdr *)(mp->b_rptr + sizeof (struct T_optmgmt_req));
2887         opt->level = level;
2888         opt->name = name;
2889         opt->len = sizeof (int);
2890         mp->b_wptr += sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
2891             sizeof (int);
2892 
2893         /*
2894          * We will use this connection regardless
2895          * of whether or not the option is readable.
2896          */
2897         if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
2898                 DTRACE_PROBE(krpc__e__connmgr__getopt__cantsend);
2899                 freemsg(mp);
2900                 return (FALSE);
2901         }
2902 
2903         mutex_enter(&clnt_pending_lock);
2904 
2905         waitp.tv_sec = clnt_cots_min_conntout;
2906         waitp.tv_usec = 0;
2907         error = waitforack(e, T_OPTMGMT_ACK, &waitp, 1);
2908 
2909         if (e->call_prev)
2910                 e->call_prev->call_next = e->call_next;
2911         else
2912                 clnt_pending = e->call_next;
2913         if (e->call_next)
2914                 e->call_next->call_prev = e->call_prev;
2915         mutex_exit(&clnt_pending_lock);
2916 
2917         /* get reply message */
2918         mp = e->call_reply;
2919         e->call_reply = NULL;
2920 
2921         if ((!mp) || (e->call_status != RPC_SUCCESS) || (error != 0)) {
2922 
2923                 DTRACE_PROBE4(krpc__e__connmgr_getopt, int, name,
2924                     int, e->call_status, int, error, mblk_t *, mp);
2925 
2926                 if (mp)
2927                         freemsg(mp);
2928                 return (FALSE);
2929         }
2930 
2931         opt_ack = (struct T_optmgmt_ack *)mp->b_rptr;
2932         opt_res = (struct opthdr *)connmgr_opt_getoff(mp, opt_ack->OPT_offset,
2933             opt_ack->OPT_length, __TPI_ALIGN_SIZE);
2934 
2935         if (!opt_res) {
2936                 DTRACE_PROBE4(krpc__e__connmgr_optres, mblk_t *, mp, int, name,
2937                     int, opt_ack->OPT_offset, int, opt_ack->OPT_length);
2938                 freemsg(mp);
2939                 return (FALSE);
2940         }
2941         *val = *(int *)&opt_res[1];
2942 
2943         DTRACE_PROBE2(connmgr_getopt__ok, int, name, int, *val);
2944 
2945         freemsg(mp);
2946         return (TRUE);
2947 }
2948 
2949 /*
2950  * Called by connmgr_connect to set an option on the new stream.
2951  */
2952 static bool_t
2953 connmgr_setopt_int(queue_t *wq, int level, int name, int val,
2954     calllist_t *e, cred_t *cr)
2955 {
2956         mblk_t *mp;
2957         struct opthdr *opt;
2958         struct T_optmgmt_req *tor;
2959         struct timeval waitp;
2960         int error;
2961 
2962         mp = allocb_cred(sizeof (struct T_optmgmt_req) +
2963             sizeof (struct opthdr) + sizeof (int), cr, NOPID);
2964         if (mp == NULL) {
2965                 RPCLOG0(1, "connmgr_setopt: cannot alloc mp for option "
2966                     "request\n");
2967                 return (FALSE);
2968         }
2969 
2970         mp->b_datap->db_type = M_PROTO;
2971         tor = (struct T_optmgmt_req *)(mp->b_rptr);
2972         tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
2973         tor->MGMT_flags = T_NEGOTIATE;
2974         tor->OPT_length = sizeof (struct opthdr) + sizeof (int);
2975         tor->OPT_offset = sizeof (struct T_optmgmt_req);
2976 
2977         opt = (struct opthdr *)(mp->b_rptr + sizeof (struct T_optmgmt_req));
2978         opt->level = level;
2979         opt->name = name;
2980         opt->len = sizeof (int);
2981         *(int *)((char *)opt + sizeof (*opt)) = val;
2982         mp->b_wptr += sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
2983             sizeof (int);
2984 
2985         /*
2986          * We will use this connection regardless
2987          * of whether or not the option is settable.
2988          */
2989         if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
2990                 DTRACE_PROBE(krpc__e__connmgr__setopt__cantsend);
2991                 freemsg(mp);
2992                 return (FALSE);
2993         }
2994 
2995         mutex_enter(&clnt_pending_lock);
2996 
2997         waitp.tv_sec = clnt_cots_min_conntout;
2998         waitp.tv_usec = 0;
2999         error = waitforack(e, T_OPTMGMT_ACK, &waitp, 1);
3000 
3001         if (e->call_prev)
3002                 e->call_prev->call_next = e->call_next;
3003         else
3004                 clnt_pending = e->call_next;
3005         if (e->call_next)
3006                 e->call_next->call_prev = e->call_prev;
3007         mutex_exit(&clnt_pending_lock);
3008 
3009         if (e->call_reply != NULL) {
3010                 freemsg(e->call_reply);
3011                 e->call_reply = NULL;
3012         }
3013 
3014         if (e->call_status != RPC_SUCCESS || error != 0) {
3015                 RPCLOG(1, "connmgr_setopt: can't set option: %d\n", name);
3016                 return (FALSE);
3017         }
3018         RPCLOG(8, "connmgr_setopt: successfully set option: %d\n", name);
3019         return (TRUE);
3020 }
3021 
3022 static bool_t
3023 connmgr_setopt(queue_t *wq, int level, int name, calllist_t *e, cred_t *cr)
3024 {
3025         return (connmgr_setopt_int(wq, level, name, 1, e, cr));
3026 }
3027 
3028 #ifdef  DEBUG
3029 
3030 /*
3031  * This is a knob to let us force code coverage in allocation failure
3032  * case.
3033  */
3034 static int      connmgr_failsnd;
3035 #define CONN_SND_ALLOC(Size, Pri)       \
3036         ((connmgr_failsnd-- > 0) ? NULL : allocb(Size, Pri))
3037 
3038 #else
3039 
3040 #define CONN_SND_ALLOC(Size, Pri)       allocb(Size, Pri)
3041 
3042 #endif
3043 
3044 /*
3045  * Sends an orderly release on the specified queue.
3046  * Entered with connmgr_lock. Exited without connmgr_lock
3047  */
3048 static void
3049 connmgr_sndrel(struct cm_xprt *cm_entry)
3050 {
3051         struct T_ordrel_req *torr;
3052         mblk_t *mp;
3053         queue_t *q = cm_entry->x_wq;
3054         ASSERT(MUTEX_HELD(&connmgr_lock));
3055         mp = CONN_SND_ALLOC(sizeof (struct T_ordrel_req), BPRI_LO);
3056         if (mp == NULL) {
3057                 cm_entry->x_needrel = TRUE;
3058                 mutex_exit(&connmgr_lock);
3059                 RPCLOG(1, "connmgr_sndrel: cannot alloc mp for sending ordrel "
3060                     "to queue %p\n", (void *)q);
3061                 return;
3062         }
3063         mutex_exit(&connmgr_lock);
3064 
3065         mp->b_datap->db_type = M_PROTO;
3066         torr = (struct T_ordrel_req *)(mp->b_rptr);
3067         torr->PRIM_type = T_ORDREL_REQ;
3068         mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_req);
3069 
3070         RPCLOG(8, "connmgr_sndrel: sending ordrel to queue %p\n", (void *)q);
3071         put(q, mp);
3072 }
3073 
3074 /*
3075  * Sends an disconnect on the specified queue.
3076  * Entered with connmgr_lock. Exited without connmgr_lock
3077  */
3078 static void
3079 connmgr_snddis(struct cm_xprt *cm_entry)
3080 {
3081         struct T_discon_req *tdis;
3082         mblk_t *mp;
3083         queue_t *q = cm_entry->x_wq;
3084 
3085         ASSERT(MUTEX_HELD(&connmgr_lock));
3086         mp = CONN_SND_ALLOC(sizeof (*tdis), BPRI_LO);
3087         if (mp == NULL) {
3088                 cm_entry->x_needdis = TRUE;
3089                 mutex_exit(&connmgr_lock);
3090                 RPCLOG(1, "connmgr_snddis: cannot alloc mp for sending discon "
3091                     "to queue %p\n", (void *)q);
3092                 return;
3093         }
3094         mutex_exit(&connmgr_lock);
3095 
3096         mp->b_datap->db_type = M_PROTO;
3097         tdis = (struct T_discon_req *)mp->b_rptr;
3098         tdis->PRIM_type = T_DISCON_REQ;
3099         mp->b_wptr = mp->b_rptr + sizeof (*tdis);
3100 
3101         RPCLOG(8, "connmgr_snddis: sending discon to queue %p\n", (void *)q);
3102         put(q, mp);
3103 }
3104 
3105 /*
3106  * Sets up the entry for receiving replies, and calls rpcmod's write put proc
3107  * (through put) to send the call.
3108  */
3109 static int
3110 clnt_dispatch_send(queue_t *q, mblk_t *mp, calllist_t *e, uint_t xid,
3111     uint_t queue_flag)
3112 {
3113         ASSERT(e != NULL);
3114 
3115         e->call_status = RPC_TIMEDOUT;       /* optimistic, eh? */
3116         e->call_reason = 0;
3117         e->call_wq = q;
3118         e->call_xid = xid;
3119         e->call_notified = FALSE;
3120 
3121         if (!canput(q)) {
3122                 e->call_status = RPC_CANTSEND;
3123                 e->call_reason = ENOBUFS;
3124                 return (RPC_CANTSEND);
3125         }
3126 
3127         /*
3128          * If queue_flag is set then the calllist_t is already on the hash
3129          * queue.  In this case just send the message and return.
3130          */
3131         if (queue_flag) {
3132                 put(q, mp);
3133                 return (RPC_SUCCESS);
3134 
3135         }
3136 
3137         /*
3138          * Set up calls for RPC requests (with XID != 0) on the hash
3139          * queue for fast lookups and place other calls (i.e.
3140          * connection management) on the linked list.
3141          */
3142         if (xid != 0) {
3143                 RPCLOG(64, "clnt_dispatch_send: putting xid 0x%x on "
3144                     "dispatch list\n", xid);
3145                 e->call_hash = call_hash(xid, clnt_cots_hash_size);
3146                 e->call_bucket = &cots_call_ht[e->call_hash];
3147                 call_table_enter(e);
3148         } else {
3149                 mutex_enter(&clnt_pending_lock);
3150                 if (clnt_pending)
3151                         clnt_pending->call_prev = e;
3152                 e->call_next = clnt_pending;
3153                 e->call_prev = NULL;
3154                 clnt_pending = e;
3155                 mutex_exit(&clnt_pending_lock);
3156         }
3157 
3158         put(q, mp);
3159         return (RPC_SUCCESS);
3160 }
3161 
3162 /*
3163  * Called by rpcmod to notify a client with a clnt_pending call that its reply
3164  * has arrived.  If we can't find a client waiting for this reply, we log
3165  * the error and return.
3166  */
3167 bool_t
3168 clnt_dispatch_notify(mblk_t *mp, zoneid_t zoneid)
3169 {
3170         calllist_t *e = NULL;
3171         call_table_t *chtp;
3172         uint32_t xid;
3173         uint_t hash;
3174 
3175         if ((IS_P2ALIGNED(mp->b_rptr, sizeof (uint32_t))) &&
3176             (mp->b_wptr - mp->b_rptr) >= sizeof (xid))
3177                 xid = *((uint32_t *)mp->b_rptr);
3178         else {
3179                 int i = 0;
3180                 unsigned char *p = (unsigned char *)&xid;
3181                 unsigned char *rptr;
3182                 mblk_t *tmp = mp;
3183 
3184                 /*
3185                  * Copy the xid, byte-by-byte into xid.
3186                  */
3187                 while (tmp) {
3188                         rptr = tmp->b_rptr;
3189                         while (rptr < tmp->b_wptr) {
3190                                 *p++ = *rptr++;
3191                                 if (++i >= sizeof (xid))
3192                                         goto done_xid_copy;
3193                         }
3194                         tmp = tmp->b_cont;
3195                 }
3196 
3197                 /*
3198                  * If we got here, we ran out of mblk space before the
3199                  * xid could be copied.
3200                  */
3201                 ASSERT(tmp == NULL && i < sizeof (xid));
3202 
3203                 RPCLOG0(1,
3204                     "clnt_dispatch_notify: message less than size of xid\n");
3205                 return (FALSE);
3206 
3207         }
3208 done_xid_copy:
3209 
3210         hash = call_hash(xid, clnt_cots_hash_size);
3211         chtp = &cots_call_ht[hash];
3212         /* call_table_find returns with the hash bucket locked */
3213         call_table_find(chtp, xid, e);
3214 
3215         if (e != NULL) {
3216                 /*
3217                  * Found thread waiting for this reply
3218                  */
3219                 mutex_enter(&e->call_lock);
3220 
3221                 /*
3222                  * verify that the reply is coming in on
3223                  * the same zone that it was sent from.
3224                  */
3225                 if (e->call_zoneid != zoneid) {
3226                         mutex_exit(&e->call_lock);
3227                         mutex_exit(&chtp->ct_lock);
3228                         RPCLOG0(1, "clnt_dispatch_notify: incorrect zoneid\n");
3229                         return (FALSE);
3230                 }
3231 
3232                 if (e->call_reply)
3233                         /*
3234                          * This can happen under the following scenario:
3235                          * clnt_cots_kcallit() times out on the response,
3236                          * rfscall() repeats the CLNT_CALL() with
3237                          * the same xid, clnt_cots_kcallit() sends the retry,
3238                          * thereby putting the clnt handle on the pending list,
3239                          * the first response arrives, signalling the thread
3240                          * in clnt_cots_kcallit(). Before that thread is
3241                          * dispatched, the second response arrives as well,
3242                          * and clnt_dispatch_notify still finds the handle on
3243                          * the pending list, with call_reply set. So free the
3244                          * old reply now.
3245                          *
3246                          * It is also possible for a response intended for
3247                          * an RPC call with a different xid to reside here.
3248                          * This can happen if the thread that owned this
3249                          * client handle prior to the current owner bailed
3250                          * out and left its call record on the dispatch
3251                          * queue.  A window exists where the response can
3252                          * arrive before the current owner dispatches its
3253                          * RPC call.
3254                          *
3255                          * In any case, this is the very last point where we
3256                          * can safely check the call_reply field before
3257                          * placing the new response there.
3258                          */
3259                         freemsg(e->call_reply);
3260                 e->call_reply = mp;
3261                 e->call_status = RPC_SUCCESS;
3262                 e->call_notified = TRUE;
3263                 cv_signal(&e->call_cv);
3264                 mutex_exit(&e->call_lock);
3265                 mutex_exit(&chtp->ct_lock);
3266                 return (TRUE);
3267         } else {
3268                 zone_t *zone;
3269                 struct rpcstat *rpcstat;
3270 
3271                 mutex_exit(&chtp->ct_lock);
3272                 RPCLOG(65, "clnt_dispatch_notify: no caller for reply 0x%x\n",
3273                     xid);
3274                 /*
3275                  * This is unfortunate, but we need to lookup the zone so we
3276                  * can increment its "rcbadxids" counter.
3277                  */
3278                 zone = zone_find_by_id(zoneid);
3279                 if (zone == NULL) {
3280                         /*
3281                          * The zone went away...
3282                          */
3283                         return (FALSE);
3284                 }
3285                 rpcstat = zone_getspecific(rpcstat_zone_key, zone);
3286                 if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
3287                         /*
3288                          * Not interested
3289                          */
3290                         zone_rele(zone);
3291                         return (FALSE);
3292                 }
3293                 COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcbadxids);
3294                 zone_rele(zone);
3295         }
3296         return (FALSE);
3297 }
3298 
3299 /*
3300  * Called by rpcmod when a non-data indication arrives.  The ones in which we
3301  * are interested are connection indications and options acks.  We dispatch
3302  * based on the queue the indication came in on.  If we are not interested in
3303  * what came in, we return false to rpcmod, who will then pass it upstream.
3304  */
3305 bool_t
3306 clnt_dispatch_notifyconn(queue_t *q, mblk_t *mp)
3307 {
3308         calllist_t *e;
3309         int type;
3310 
3311         ASSERT((q->q_flag & QREADR) == 0);
3312 
3313         type = ((union T_primitives *)mp->b_rptr)->type;
3314         RPCLOG(8, "clnt_dispatch_notifyconn: prim type: [%s]\n",
3315             rpc_tpiprim2name(type));
3316         mutex_enter(&clnt_pending_lock);
3317         for (e = clnt_pending; /* NO CONDITION */; e = e->call_next) {
3318                 if (e == NULL) {
3319                         mutex_exit(&clnt_pending_lock);
3320                         RPCLOG(1, "clnt_dispatch_notifyconn: no one waiting "
3321                             "for connection on queue 0x%p\n", (void *)q);
3322                         return (FALSE);
3323                 }
3324                 if (e->call_wq == q)
3325                         break;
3326         }
3327 
3328         switch (type) {
3329         case T_CONN_CON:
3330                 /*
3331                  * The transport is now connected, send a T_INFO_REQ to get
3332                  * the tidu size.
3333                  */
3334                 mutex_exit(&clnt_pending_lock);
3335                 ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >=
3336                     sizeof (struct T_info_req));
3337                 mp->b_rptr = mp->b_datap->db_base;
3338                 ((union T_primitives *)mp->b_rptr)->type = T_INFO_REQ;
3339                 mp->b_wptr = mp->b_rptr + sizeof (struct T_info_req);
3340                 mp->b_datap->db_type = M_PCPROTO;
3341                 put(q, mp);
3342                 return (TRUE);
3343         case T_INFO_ACK:
3344         case T_OPTMGMT_ACK:
3345                 e->call_status = RPC_SUCCESS;
3346                 e->call_reply = mp;
3347                 e->call_notified = TRUE;
3348                 cv_signal(&e->call_cv);
3349                 break;
3350         case T_ERROR_ACK:
3351                 e->call_status = RPC_CANTCONNECT;
3352                 e->call_reply = mp;
3353                 e->call_notified = TRUE;
3354                 cv_signal(&e->call_cv);
3355                 break;
3356         case T_OK_ACK:
3357                 /*
3358                  * Great, but we are really waiting for a T_CONN_CON
3359                  */
3360                 freemsg(mp);
3361                 break;
3362         default:
3363                 mutex_exit(&clnt_pending_lock);
3364                 RPCLOG(1, "clnt_dispatch_notifyconn: bad type %d\n", type);
3365                 return (FALSE);
3366         }
3367 
3368         mutex_exit(&clnt_pending_lock);
3369         return (TRUE);
3370 }
3371 
3372 /*
3373  * Called by rpcmod when the transport is (or should be) going away.  Informs
3374  * all callers waiting for replies and marks the entry in the connection
3375  * manager's list as unconnected, and either closing (close handshake in
3376  * progress) or dead.
3377  */
3378 void
3379 clnt_dispatch_notifyall(queue_t *q, int32_t msg_type, int32_t reason)
3380 {
3381         calllist_t *e;
3382         call_table_t *ctp;
3383         struct cm_xprt *cm_entry;
3384         int have_connmgr_lock;
3385         int i;
3386 
3387         ASSERT((q->q_flag & QREADR) == 0);
3388 
3389         RPCLOG(1, "clnt_dispatch_notifyall on queue %p", (void *)q);
3390         RPCLOG(1, " received a notifcation prim type [%s]",
3391             rpc_tpiprim2name(msg_type));
3392         RPCLOG(1, " and reason %d\n", reason);
3393 
3394         /*
3395          * Find the transport entry in the connection manager's list, close
3396          * the transport and delete the entry.  In the case where rpcmod's
3397          * idle timer goes off, it sends us a T_ORDREL_REQ, indicating we
3398          * should gracefully close the connection.
3399          */
3400         have_connmgr_lock = 1;
3401         mutex_enter(&connmgr_lock);
3402         for (cm_entry = cm_hd; cm_entry; cm_entry = cm_entry->x_next) {
3403                 ASSERT(cm_entry != cm_entry->x_next);
3404                 if (cm_entry->x_wq == q) {
3405                         ASSERT(MUTEX_HELD(&connmgr_lock));
3406                         ASSERT(have_connmgr_lock == 1);
3407                         switch (msg_type) {
3408                         case T_ORDREL_REQ:
3409 
3410                                 if (cm_entry->x_dead) {
3411                                         RPCLOG(1, "idle timeout on dead "
3412                                             "connection: %p\n",
3413                                             (void *)cm_entry);
3414                                         if (clnt_stop_idle != NULL)
3415                                                 (*clnt_stop_idle)(q);
3416                                         break;
3417                                 }
3418 
3419                                 /*
3420                                  * Only mark the connection as dead if it is
3421                                  * connected and idle.
3422                                  * An unconnected connection has probably
3423                                  * gone idle because the server is down,
3424                                  * and when it comes back up there will be
3425                                  * retries that need to use that connection.
3426                                  */
3427                                 if (cm_entry->x_connected ||
3428                                     cm_entry->x_doomed) {
3429                                         if (cm_entry->x_ordrel) {
3430                                                 if (cm_entry->x_closing ==
3431                                                     TRUE) {
3432                                                         /*
3433                                                          * The connection is
3434                                                          * obviously wedged due
3435                                                          * to a bug or problem
3436                                                          * with the transport.
3437                                                          * Mark it as dead.
3438                                                          * Otherwise we can
3439                                                          * leak connections.
3440                                                          */
3441                                                         cm_entry->x_dead = TRUE;
3442                                                         mutex_exit(
3443                                                             &connmgr_lock);
3444                                                         have_connmgr_lock = 0;
3445                                                         if (clnt_stop_idle !=
3446                                                             NULL)
3447                                                         (*clnt_stop_idle)(q);
3448                                                         break;
3449                                                 }
3450                                                 cm_entry->x_closing = TRUE;
3451                                                 connmgr_sndrel(cm_entry);
3452                                                 have_connmgr_lock = 0;
3453                                         } else {
3454                                                 cm_entry->x_dead = TRUE;
3455                                                 mutex_exit(&connmgr_lock);
3456                                                 have_connmgr_lock = 0;
3457                                                 if (clnt_stop_idle != NULL)
3458                                                         (*clnt_stop_idle)(q);
3459                                         }
3460                                 } else {
3461                                         /*
3462                                          * We don't mark the connection
3463                                          * as dead, but we turn off the
3464                                          * idle timer.
3465                                          */
3466                                         mutex_exit(&connmgr_lock);
3467                                         have_connmgr_lock = 0;
3468                                         if (clnt_stop_idle != NULL)
3469                                                 (*clnt_stop_idle)(q);
3470                                         RPCLOG(1, "clnt_dispatch_notifyall:"
3471                                             " ignoring timeout from rpcmod"
3472                                             " (q %p) because we are not "
3473                                             " connected\n", (void *)q);
3474                                 }
3475                                 break;
3476                         case T_ORDREL_IND:
3477                                 /*
3478                                  * If this entry is marked closing, then we are
3479                                  * completing a close handshake, and the
3480                                  * connection is dead.  Otherwise, the server is
3481                                  * trying to close. Since the server will not
3482                                  * be sending any more RPC replies, we abort
3483                                  * the connection, including flushing
3484                                  * any RPC requests that are in-transit.
3485                                  * In either case, mark the entry as dead so
3486                                  * that it can be closed by the connection
3487                                  * manager's garbage collector.
3488                                  */
3489                                 cm_entry->x_dead = TRUE;
3490                                 if (cm_entry->x_closing) {
3491                                         mutex_exit(&connmgr_lock);
3492                                         have_connmgr_lock = 0;
3493                                         if (clnt_stop_idle != NULL)
3494                                                 (*clnt_stop_idle)(q);
3495                                 } else {
3496                                         /*
3497                                          * if we're getting a disconnect
3498                                          * before we've finished our
3499                                          * connect attempt, mark it for
3500                                          * later processing
3501                                          */
3502                                         if (cm_entry->x_thread)
3503                                                 cm_entry->x_early_disc = TRUE;
3504                                         else
3505                                                 cm_entry->x_connected = FALSE;
3506                                         cm_entry->x_waitdis = TRUE;
3507                                         connmgr_snddis(cm_entry);
3508                                         have_connmgr_lock = 0;
3509                                 }
3510                                 break;
3511 
3512                         case T_ERROR_ACK:
3513                         case T_OK_ACK:
3514                                 cm_entry->x_waitdis = FALSE;
3515                                 cv_signal(&cm_entry->x_dis_cv);
3516                                 mutex_exit(&connmgr_lock);
3517                                 return;
3518 
3519                         case T_DISCON_REQ:
3520                                 if (cm_entry->x_thread)
3521                                         cm_entry->x_early_disc = TRUE;
3522                                 else
3523                                         cm_entry->x_connected = FALSE;
3524                                 cm_entry->x_waitdis = TRUE;
3525 
3526                                 connmgr_snddis(cm_entry);
3527                                 have_connmgr_lock = 0;
3528                                 break;
3529 
3530                         case T_DISCON_IND:
3531                         default:
3532                                 /*
3533                                  * if we're getting a disconnect before
3534                                  * we've finished our connect attempt,
3535                                  * mark it for later processing
3536                                  */
3537                                 if (cm_entry->x_closing) {
3538                                         cm_entry->x_dead = TRUE;
3539                                         mutex_exit(&connmgr_lock);
3540                                         have_connmgr_lock = 0;
3541                                         if (clnt_stop_idle != NULL)
3542                                                 (*clnt_stop_idle)(q);
3543                                 } else {
3544                                         if (cm_entry->x_thread) {
3545                                                 cm_entry->x_early_disc = TRUE;
3546                                         } else {
3547                                                 cm_entry->x_dead = TRUE;
3548                                                 cm_entry->x_connected = FALSE;
3549                                         }
3550                                 }
3551                                 break;
3552                         }
3553                         break;
3554                 }
3555         }
3556 
3557         if (have_connmgr_lock)
3558                 mutex_exit(&connmgr_lock);
3559 
3560         if (msg_type == T_ERROR_ACK || msg_type == T_OK_ACK) {
3561                 RPCLOG(1, "clnt_dispatch_notifyall: (wq %p) could not find "
3562                     "connmgr entry for discon ack\n", (void *)q);
3563                 return;
3564         }
3565 
3566         /*
3567          * Then kick all the clnt_pending calls out of their wait.  There
3568          * should be no clnt_pending calls in the case of rpcmod's idle
3569          * timer firing.
3570          */
3571         for (i = 0; i < clnt_cots_hash_size; i++) {
3572                 ctp = &cots_call_ht[i];
3573                 mutex_enter(&ctp->ct_lock);
3574                 for (e = ctp->ct_call_next;
3575                     e != (calllist_t *)ctp;
3576                     e = e->call_next) {
3577                         if (e->call_wq == q && e->call_notified == FALSE) {
3578                                 RPCLOG(1,
3579                                     "clnt_dispatch_notifyall for queue %p ",
3580                                     (void *)q);
3581                                 RPCLOG(1, "aborting clnt_pending call %p\n",
3582                                     (void *)e);
3583 
3584                                 if (msg_type == T_DISCON_IND)
3585                                         e->call_reason = reason;
3586                                 e->call_notified = TRUE;
3587                                 e->call_status = RPC_XPRTFAILED;
3588                                 cv_signal(&e->call_cv);
3589                         }
3590                 }
3591                 mutex_exit(&ctp->ct_lock);
3592         }
3593 
3594         mutex_enter(&clnt_pending_lock);
3595         for (e = clnt_pending; e; e = e->call_next) {
3596                 /*
3597                  * Only signal those RPC handles that haven't been
3598                  * signalled yet. Otherwise we can get a bogus call_reason.
3599                  * This can happen if thread A is making a call over a
3600                  * connection. If the server is killed, it will cause
3601                  * reset, and reason will default to EIO as a result of
3602                  * a T_ORDREL_IND. Thread B then attempts to recreate
3603                  * the connection but gets a T_DISCON_IND. If we set the
3604                  * call_reason code for all threads, then if thread A
3605                  * hasn't been dispatched yet, it will get the wrong
3606                  * reason. The bogus call_reason can make it harder to
3607                  * discriminate between calls that fail because the
3608                  * connection attempt failed versus those where the call
3609                  * may have been executed on the server.
3610                  */
3611                 if (e->call_wq == q && e->call_notified == FALSE) {
3612                         RPCLOG(1, "clnt_dispatch_notifyall for queue %p ",
3613                             (void *)q);
3614                         RPCLOG(1, " aborting clnt_pending call %p\n",
3615                             (void *)e);
3616 
3617                         if (msg_type == T_DISCON_IND)
3618                                 e->call_reason = reason;
3619                         e->call_notified = TRUE;
3620                         /*
3621                          * Let the caller timeout, else it will retry
3622                          * immediately.
3623                          */
3624                         e->call_status = RPC_XPRTFAILED;
3625 
3626                         /*
3627                          * We used to just signal those threads
3628                          * waiting for a connection, (call_xid = 0).
3629                          * That meant that threads waiting for a response
3630                          * waited till their timeout expired. This
3631                          * could be a long time if they've specified a
3632                          * maximum timeout. (2^31 - 1). So we
3633                          * Signal all threads now.
3634                          */
3635                         cv_signal(&e->call_cv);
3636                 }
3637         }
3638         mutex_exit(&clnt_pending_lock);
3639 }
3640 
3641 
3642 /*ARGSUSED*/
3643 /*
3644  * after resuming a system that's been suspended for longer than the
3645  * NFS server's idle timeout (svc_idle_timeout for Solaris 2), rfscall()
3646  * generates "NFS server X not responding" and "NFS server X ok" messages;
3647  * here we reset inet connections to cause a re-connect and avoid those
3648  * NFS messages.  see 4045054
3649  */
3650 boolean_t
3651 connmgr_cpr_reset(void *arg, int code)
3652 {
3653         struct cm_xprt *cxp;
3654 
3655         if (code == CB_CODE_CPR_CHKPT)
3656                 return (B_TRUE);
3657 
3658         if (mutex_tryenter(&connmgr_lock) == 0)
3659                 return (B_FALSE);
3660         for (cxp = cm_hd; cxp; cxp = cxp->x_next) {
3661                 if ((cxp->x_family == AF_INET || cxp->x_family == AF_INET6) &&
3662                     cxp->x_connected == TRUE) {
3663                         if (cxp->x_thread)
3664                                 cxp->x_early_disc = TRUE;
3665                         else
3666                                 cxp->x_connected = FALSE;
3667                         cxp->x_needdis = TRUE;
3668                 }
3669         }
3670         mutex_exit(&connmgr_lock);
3671         return (B_TRUE);
3672 }
3673 
3674 void
3675 clnt_cots_stats_init(zoneid_t zoneid, struct rpc_cots_client **statsp)
3676 {
3677 
3678         *statsp = (struct rpc_cots_client *)rpcstat_zone_init_common(zoneid,
3679             "unix", "rpc_cots_client", (const kstat_named_t *)&cots_rcstat_tmpl,
3680             sizeof (cots_rcstat_tmpl));
3681 }
3682 
3683 void
3684 clnt_cots_stats_fini(zoneid_t zoneid, struct rpc_cots_client **statsp)
3685 {
3686         rpcstat_zone_fini_common(zoneid, "unix", "rpc_cots_client");
3687         kmem_free(*statsp, sizeof (cots_rcstat_tmpl));
3688 }
3689 
3690 void
3691 clnt_cots_init(void)
3692 {
3693         mutex_init(&connmgr_lock, NULL, MUTEX_DEFAULT, NULL);
3694         mutex_init(&clnt_pending_lock, NULL, MUTEX_DEFAULT, NULL);
3695 
3696         if (clnt_cots_hash_size < DEFAULT_MIN_HASH_SIZE)
3697                 clnt_cots_hash_size = DEFAULT_MIN_HASH_SIZE;
3698 
3699         cots_call_ht = call_table_init(clnt_cots_hash_size);
3700         zone_key_create(&zone_cots_key, NULL, NULL, clnt_zone_destroy);
3701 }
3702 
3703 void
3704 clnt_cots_fini(void)
3705 {
3706         (void) zone_key_delete(zone_cots_key);
3707 }
3708 
3709 /*
3710  * Wait for TPI ack, returns success only if expected ack is received
3711  * within timeout period.
3712  */
3713 
3714 static int
3715 waitforack(calllist_t *e, t_scalar_t ack_prim, const struct timeval *waitp,
3716     bool_t nosignal)
3717 {
3718         union T_primitives *tpr;
3719         clock_t timout;
3720         int cv_stat = 1;
3721 
3722         ASSERT(MUTEX_HELD(&clnt_pending_lock));
3723         while (e->call_reply == NULL) {
3724                 if (waitp != NULL) {
3725                         timout = waitp->tv_sec * drv_usectohz(MICROSEC) +
3726                             drv_usectohz(waitp->tv_usec);
3727                         if (nosignal)
3728                                 cv_stat = cv_reltimedwait(&e->call_cv,
3729                                     &clnt_pending_lock, timout, TR_CLOCK_TICK);
3730                         else
3731                                 cv_stat = cv_reltimedwait_sig(&e->call_cv,
3732                                     &clnt_pending_lock, timout, TR_CLOCK_TICK);
3733                 } else {
3734                         if (nosignal)
3735                                 cv_wait(&e->call_cv, &clnt_pending_lock);
3736                         else
3737                                 cv_stat = cv_wait_sig(&e->call_cv,
3738                                     &clnt_pending_lock);
3739                 }
3740                 if (cv_stat == -1)
3741                         return (ETIME);
3742                 if (cv_stat == 0)
3743                         return (EINTR);
3744                 /*
3745                  * if we received an error from the server and we know a reply
3746                  * is not going to be sent, do not wait for the full timeout,
3747                  * return now.
3748                  */
3749                 if (e->call_status == RPC_XPRTFAILED)
3750                         return (e->call_reason);
3751         }
3752         tpr = (union T_primitives *)e->call_reply->b_rptr;
3753         if (tpr->type == ack_prim)
3754                 return (0); /* Success */
3755 
3756         if (tpr->type == T_ERROR_ACK) {
3757                 if (tpr->error_ack.TLI_error == TSYSERR)
3758                         return (tpr->error_ack.UNIX_error);
3759                 else
3760                         return (t_tlitosyserr(tpr->error_ack.TLI_error));
3761         }
3762 
3763         return (EPROTO); /* unknown or unexpected primitive */
3764 }