Print this page
NEX-19225 SMB client 2.1 hits redzone panic
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
NEX-14666 Need to provide SMB 2.1 Client
NEX-17187 panic in smbfs_acl_store
NEX-17231 smbfs create xattr files finds wrong file
NEX-17224 smbfs lookup EINVAL should be ENOENT
NEX-17260 SMB1 client fails to list directory after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
and: (cleanup)
NEX-16824 SMB client connection setup rework
NEX-17232 SMB client reconnect failures
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
and: (improve debug)
NEX-16818 Add fksmbcl development tool
NEX-17264 SMB client test tp_smbutil_013 fails after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
and: (fix ref leaks)
NEX-16805 Add smbutil discon command
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
SUP-548 Panic from NULL pointer dereference in smb_iod_disconnect

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
          +++ new/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
↓ open down ↓ 27 lines elided ↑ open up ↑
  28   28   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  29   29   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  30   30   * SUCH DAMAGE.
  31   31   *
  32   32   * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
  33   33   */
  34   34  
  35   35  /*
  36   36   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  37   37   * Use is subject to license terms.
       38 + *
       39 + * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
       40 + * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
  38   41   */
  39   42  
  40   43  #ifdef DEBUG
  41   44  /* See sys/queue.h */
  42   45  #define QUEUEDEBUG 1
  43   46  #endif
  44   47  
  45   48  #include <sys/param.h>
  46   49  #include <sys/systm.h>
  47   50  #include <sys/atomic.h>
↓ open down ↓ 12 lines elided ↑ open up ↑
  60   63  #include <sys/time.h>
  61   64  #include <sys/class.h>
  62   65  #include <sys/disp.h>
  63   66  #include <sys/cmn_err.h>
  64   67  #include <sys/zone.h>
  65   68  #include <sys/sdt.h>
  66   69  
  67   70  #include <netsmb/smb_osdep.h>
  68   71  
  69   72  #include <netsmb/smb.h>
       73 +#include <netsmb/smb2.h>
  70   74  #include <netsmb/smb_conn.h>
  71   75  #include <netsmb/smb_rq.h>
       76 +#include <netsmb/smb2_rq.h>
  72   77  #include <netsmb/smb_subr.h>
  73   78  #include <netsmb/smb_tran.h>
  74   79  #include <netsmb/smb_trantcp.h>
  75   80  
  76      -int smb_iod_send_echo(smb_vc_t *);
       81 +/*
       82 + * SMB messages are up to 64K.  Let's leave room for two.
       83 + * If we negotiate up to SMB2, increase these. XXX todo
       84 + */
       85 +static int smb_tcpsndbuf = 0x20000;
       86 +static int smb_tcprcvbuf = 0x20000;
       87 +static int smb_connect_timeout = 10; /* seconds */
  77   88  
       89 +static int smb1_iod_process(smb_vc_t *, mblk_t *);
       90 +static int smb2_iod_process(smb_vc_t *, mblk_t *);
       91 +static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
       92 +static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
       93 +
  78   94  /*
  79   95   * This is set/cleared when smbfs loads/unloads
  80   96   * No locks should be necessary, because smbfs
  81   97   * can't unload until all the mounts are gone.
  82   98   */
  83   99  static smb_fscb_t *fscb;
  84  100  void
  85  101  smb_fscb_set(smb_fscb_t *cb)
  86  102  {
  87  103          fscb = cb;
  88  104  }
  89  105  
  90  106  static void
  91  107  smb_iod_share_disconnected(smb_share_t *ssp)
  92  108  {
  93  109  
  94  110          smb_share_invalidate(ssp);
  95  111  
  96      -        /* smbfs_dead() */
      112 +        /*
      113 +         * This is the only fscb hook smbfs currently uses.
      114 +         * Replaces smbfs_dead() from Darwin.
      115 +         */
  97  116          if (fscb && fscb->fscb_disconn) {
  98  117                  fscb->fscb_disconn(ssp);
  99  118          }
 100  119  }
 101  120  
 102  121  /*
 103  122   * State changes are important and infrequent.
 104  123   * Make them easily observable via dtrace.
 105  124   */
 106  125  void
↓ open down ↓ 28 lines elided ↑ open up ↑
 135  154          SMBRQ_UNLOCK(rqp);
 136  155  }
 137  156  
 138  157  static void
 139  158  smb_iod_invrq(struct smb_vc *vcp)
 140  159  {
 141  160          struct smb_rq *rqp;
 142  161  
 143  162          /*
 144  163           * Invalidate all outstanding requests for this connection
      164 +         * Also wakeup iod_muxwant waiters.
 145  165           */
 146  166          rw_enter(&vcp->iod_rqlock, RW_READER);
 147  167          TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
 148  168                  smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
 149  169          }
 150  170          rw_exit(&vcp->iod_rqlock);
      171 +        cv_broadcast(&vcp->iod_muxwait);
 151  172  }
 152  173  
 153  174  /*
 154      - * Called by smb_vc_rele, smb_vc_kill, and by the driver
 155      - * close entry point if the IOD closes its dev handle.
      175 + * Called by smb_vc_rele/smb_vc_kill on last ref, and by
      176 + * the driver close function if the IOD closes its minor.
      177 + * In those cases, the caller should be the IOD thread.
 156  178   *
 157      - * Forcibly kill the connection and IOD.
      179 + * Forcibly kill the connection.
 158  180   */
 159  181  void
 160  182  smb_iod_disconnect(struct smb_vc *vcp)
 161  183  {
 162  184  
 163  185          /*
 164  186           * Inform everyone of the state change.
 165  187           */
 166  188          SMB_VC_LOCK(vcp);
 167  189          if (vcp->vc_state != SMBIOD_ST_DEAD) {
 168  190                  smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
 169  191                  cv_broadcast(&vcp->vc_statechg);
 170  192          }
 171  193          SMB_VC_UNLOCK(vcp);
 172  194  
 173      -        /*
 174      -         * Let's be safe here and avoid doing any
 175      -         * call across the network while trying to
 176      -         * shut things down.  If we just disconnect,
 177      -         * the server will take care of the logoff.
 178      -         */
 179  195          SMB_TRAN_DISCONNECT(vcp);
 180      -
 181      -        /*
 182      -         * If we have an IOD, it should immediately notice
 183      -         * that its connection has closed.  But in case
 184      -         * it doesn't, let's also send it a signal.
 185      -         */
 186      -        SMB_VC_LOCK(vcp);
 187      -        if (vcp->iod_thr != NULL &&
 188      -            vcp->iod_thr != curthread) {
 189      -                tsignal(vcp->iod_thr, SIGKILL);
 190      -        }
 191      -        SMB_VC_UNLOCK(vcp);
 192  196  }
 193  197  
 194  198  /*
 195  199   * Send one request.
 196  200   *
      201 + * SMB1 only
      202 + *
 197  203   * Called by _addrq (for internal requests)
 198  204   * and _sendall (via _addrq, _multirq, _waitrq)
      205 + * Errors are reported via the smb_rq, using:
      206 + *   smb_iod_rqprocessed(rqp, ...)
 199  207   */
 200      -static int
 201      -smb_iod_sendrq(struct smb_rq *rqp)
      208 +static void
      209 +smb1_iod_sendrq(struct smb_rq *rqp)
 202  210  {
 203  211          struct smb_vc *vcp = rqp->sr_vc;
 204  212          mblk_t *m;
 205  213          int error;
 206  214  
 207  215          ASSERT(vcp);
 208      -        ASSERT(SEMA_HELD(&vcp->vc_sendlock));
 209      -        ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
      216 +        ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
      217 +        ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
 210  218  
 211  219          /*
 212      -         * Note: Anything special for SMBR_INTERNAL here?
      220 +         * Internal requests are allowed in any state;
      221 +         * otherwise should be active.
 213  222           */
 214      -        if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
      223 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
      224 +            vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 215  225                  SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
 216      -                return (ENOTCONN);
      226 +                smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
      227 +                return;
 217  228          }
 218  229  
      230 +        /*
      231 +         * Overwrite the SMB header with the assigned MID and
      232 +         * (if we're signing) sign it.
      233 +         */
      234 +        smb_rq_fillhdr(rqp);
      235 +        if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
      236 +                smb_rq_sign(rqp);
      237 +        }
 219  238  
 220  239          /*
 221      -         * On the first send, set the MID and (maybe)
 222      -         * the signing sequence numbers.  The increments
 223      -         * here are serialized by vc_sendlock
      240 +         * The transport send consumes the message and we'd
      241 +         * prefer to keep a copy, so dupmsg() before sending.
 224  242           */
 225      -        if (rqp->sr_sendcnt == 0) {
      243 +        m = dupmsg(rqp->sr_rq.mb_top);
      244 +        if (m == NULL) {
      245 +                error = ENOBUFS;
      246 +                goto fatal;
      247 +        }
 226  248  
 227      -                rqp->sr_mid = vcp->vc_next_mid++;
      249 +#ifdef DTRACE_PROBE2
      250 +        DTRACE_PROBE2(iod_sendrq,
      251 +            (smb_rq_t *), rqp, (mblk_t *), m);
      252 +#endif
 228  253  
 229      -                if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
 230      -                        /*
 231      -                         * We're signing requests and verifying
 232      -                         * signatures on responses.  Set the
 233      -                         * sequence numbers of the request and
 234      -                         * response here, used in smb_rq_verify.
 235      -                         */
 236      -                        rqp->sr_seqno = vcp->vc_next_seq++;
 237      -                        rqp->sr_rseqno = vcp->vc_next_seq++;
 238      -                }
      254 +        error = SMB_TRAN_SEND(vcp, m);
      255 +        m = 0; /* consumed by SEND */
 239  256  
 240      -                /* Fill in UID, TID, MID, etc. */
 241      -                smb_rq_fillhdr(rqp);
 242      -
 243      -                /*
 244      -                 * Sign the message now that we're finally done
 245      -                 * filling in the SMB header fields, etc.
 246      -                 */
 247      -                if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
 248      -                        smb_rq_sign(rqp);
 249      -                }
      257 +        rqp->sr_lerror = error;
      258 +        if (error == 0) {
      259 +                SMBRQ_LOCK(rqp);
      260 +                rqp->sr_flags |= SMBR_SENT;
      261 +                rqp->sr_state = SMBRQ_SENT;
      262 +                SMBRQ_UNLOCK(rqp);
      263 +                return;
 250  264          }
 251      -        if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
 252      -                smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
      265 +        /*
      266 +         * Transport send returned an error.
      267 +         * Was it a fatal one?
      268 +         */
      269 +        if (SMB_TRAN_FATAL(vcp, error)) {
 253  270                  /*
 254      -                 * If all attempts to send a request failed, then
 255      -                 * something is seriously hosed.
      271 +                 * No further attempts should be made
 256  272                   */
 257      -                return (ENOTCONN);
      273 +        fatal:
      274 +                SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
      275 +                smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
      276 +                return;
 258  277          }
      278 +}
 259  279  
      280 +/*
      281 + * Send one request.
      282 + *
      283 + * SMB2 only
      284 + *
      285 + * Called by _addrq (for internal requests)
      286 + * and _sendall (via _addrq, _multirq, _waitrq)
      287 + * Errors are reported via the smb_rq, using:
      288 + *   smb_iod_rqprocessed(rqp, ...)
      289 + */
      290 +static void
      291 +smb2_iod_sendrq(struct smb_rq *rqp)
      292 +{
      293 +        struct smb_rq *c_rqp;   /* compound */
      294 +        struct smb_vc *vcp = rqp->sr_vc;
      295 +        mblk_t *top_m;
      296 +        mblk_t *cur_m;
      297 +        int error;
      298 +
      299 +        ASSERT(vcp);
      300 +        ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
      301 +        ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
      302 +
 260  303          /*
 261      -         * Replaced m_copym() with Solaris copymsg() which does the same
 262      -         * work when we want to do a M_COPYALL.
 263      -         * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
      304 +         * Internal requests are allowed in any state;
      305 +         * otherwise should be active.
 264  306           */
 265      -        m = copymsg(rqp->sr_rq.mb_top);
      307 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
      308 +            vcp->vc_state != SMBIOD_ST_VCACTIVE) {
      309 +                SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
      310 +                smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
      311 +                return;
      312 +        }
 266  313  
 267      -#ifdef DTRACE_PROBE
 268      -        DTRACE_PROBE2(smb_iod_sendrq,
 269      -            (smb_rq_t *), rqp, (mblk_t *), m);
 270      -#else
 271      -        SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
 272      -#endif
 273      -        m_dumpm(m);
      314 +        /*
      315 +         * Overwrite the SMB header with the assigned MID and
      316 +         * (if we're signing) sign it.  If there are compounded
      317 +         * requests after the top one, do those too.
      318 +         */
      319 +        smb2_rq_fillhdr(rqp);
      320 +        if (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
      321 +                smb2_rq_sign(rqp);
      322 +        }
      323 +        c_rqp = rqp->sr2_compound_next;
      324 +        while (c_rqp != NULL) {
      325 +                smb2_rq_fillhdr(c_rqp);
      326 +                if (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
      327 +                        smb2_rq_sign(c_rqp);
      328 +                }
      329 +                c_rqp = c_rqp->sr2_compound_next;
      330 +        }
 274  331  
 275      -        if (m != NULL) {
 276      -                error = SMB_TRAN_SEND(vcp, m);
 277      -                m = 0; /* consumed by SEND */
 278      -        } else
      332 +        /*
      333 +         * The transport send consumes the message and we'd
      334 +         * prefer to keep a copy, so dupmsg() before sending.
      335 +         * We also need this to build the compound message
      336 +         * that we'll actually send.  The message offset at
      337 +         * the start of each compounded message should be
      338 +         * eight-byte aligned.  The caller preparing the
      339 +         * compounded request has to take care of that
      340 +         * before we get here and sign messages etc.
      341 +         */
      342 +        top_m = dupmsg(rqp->sr_rq.mb_top);
      343 +        if (top_m == NULL) {
 279  344                  error = ENOBUFS;
      345 +                goto fatal;
      346 +        }
      347 +        c_rqp = rqp->sr2_compound_next;
      348 +        while (c_rqp != NULL) {
      349 +                size_t len = msgdsize(top_m);
      350 +                ASSERT((len & 7) == 0);
      351 +                cur_m = dupmsg(c_rqp->sr_rq.mb_top);
      352 +                if (cur_m == NULL) {
      353 +                        freemsg(top_m);
      354 +                        error = ENOBUFS;
      355 +                        goto fatal;
      356 +                }
      357 +                linkb(top_m, cur_m);
      358 +        }
 280  359  
      360 +        DTRACE_PROBE2(iod_sendrq,
      361 +            (smb_rq_t *), rqp, (mblk_t *), top_m);
      362 +
      363 +        error = SMB_TRAN_SEND(vcp, top_m);
      364 +        top_m = 0; /* consumed by SEND */
      365 +
 281  366          rqp->sr_lerror = error;
 282  367          if (error == 0) {
 283  368                  SMBRQ_LOCK(rqp);
 284  369                  rqp->sr_flags |= SMBR_SENT;
 285  370                  rqp->sr_state = SMBRQ_SENT;
 286      -                if (rqp->sr_flags & SMBR_SENDWAIT)
 287      -                        cv_broadcast(&rqp->sr_cond);
 288  371                  SMBRQ_UNLOCK(rqp);
 289      -                return (0);
      372 +                return;
 290  373          }
 291  374          /*
 292      -         * Check for fatal errors
      375 +         * Transport send returned an error.
      376 +         * Was it a fatal one?
 293  377           */
 294  378          if (SMB_TRAN_FATAL(vcp, error)) {
 295  379                  /*
 296  380                   * No further attempts should be made
 297  381                   */
      382 +        fatal:
 298  383                  SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
 299      -                return (ENOTCONN);
      384 +                smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
      385 +                return;
 300  386          }
 301      -        if (error)
 302      -                SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
 303      -
 304      -#ifdef APPLE
 305      -        /* If proc waiting on rqp was signaled... */
 306      -        if (smb_rq_intr(rqp))
 307      -                smb_iod_rqprocessed(rqp, EINTR, 0);
 308      -#endif
 309      -
 310      -        return (0);
 311  387  }
 312  388  
      389 +/*
      390 + * Receive one NetBIOS (or NBT over TCP) message.  If none have arrived,
      391 + * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
      392 + * none have arrived, return ETIME.
      393 + */
 313  394  static int
 314      -smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
      395 +smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
 315  396  {
 316  397          mblk_t *m;
 317      -        uchar_t *hp;
 318  398          int error;
 319  399  
 320  400  top:
 321  401          m = NULL;
 322  402          error = SMB_TRAN_RECV(vcp, &m);
 323  403          if (error == EAGAIN)
 324  404                  goto top;
 325  405          if (error)
 326  406                  return (error);
 327      -        ASSERT(m);
      407 +        ASSERT(m != NULL);
 328  408  
 329      -        m = m_pullup(m, SMB_HDRLEN);
      409 +        m = m_pullup(m, 4);
 330  410          if (m == NULL) {
 331  411                  return (ENOSR);
 332  412          }
 333  413  
 334      -        /*
 335      -         * Check the SMB header
 336      -         */
 337      -        hp = mtod(m, uchar_t *);
 338      -        if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
 339      -                m_freem(m);
 340      -                return (EPROTO);
 341      -        }
 342      -
 343  414          *mpp = m;
 344  415          return (0);
 345  416  }
 346  417  
 347  418  /*
      419 + * How long should we keep around an unused VC (connection)?
      420 + * There's usually a good chance connections will be reused,
      421 + * so the default is to keep such connections for 5 min.
      422 + */
      423 +#ifdef  DEBUG
      424 +int smb_iod_idle_keep_time = 60;        /* seconds */
      425 +#else
      426 +int smb_iod_idle_keep_time = 300;       /* seconds */
      427 +#endif
      428 +
      429 +/*
 348  430   * Process incoming packets
 349  431   *
 350      - * This is the "reader" loop, run by the IOD thread
 351      - * while in state SMBIOD_ST_VCACTIVE.  The loop now
 352      - * simply blocks in the socket recv until either a
 353      - * message arrives, or a disconnect.
      432 + * This is the "reader" loop, run by the IOD thread.  Normally we're in
      433 + * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
      434 + * other states with poll==TRUE
 354  435   *
 355      - * Any non-zero error means the IOD should terminate.
      436 + * A non-zero error return here causes the IOD work loop to terminate.
 356  437   */
 357  438  int
 358      -smb_iod_recvall(struct smb_vc *vcp)
      439 +smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
 359  440  {
 360      -        struct smb_rq *rqp;
 361  441          mblk_t *m;
 362      -        uchar_t *hp;
 363      -        ushort_t mid;
 364  442          int error = 0;
 365      -        int etime_count = 0; /* for "server not responding", etc. */
      443 +        int etime_idle = 0;     /* How many 15 sec. "ticks" idle. */
      444 +        int etime_count = 0;    /* ... and when we have requests. */
 366  445  
 367  446          for (;;) {
 368  447                  /*
 369  448                   * Check whether someone "killed" this VC,
 370  449                   * or is asking the IOD to terminate.
 371  450                   */
 372      -
 373      -                if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 374      -                        SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
 375      -                        error = 0;
 376      -                        break;
 377      -                }
 378      -
 379  451                  if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
 380  452                          SMBIODEBUG("SHUTDOWN set\n");
 381  453                          /* This IOD thread will terminate. */
 382  454                          SMB_VC_LOCK(vcp);
 383  455                          smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
 384  456                          cv_broadcast(&vcp->vc_statechg);
 385  457                          SMB_VC_UNLOCK(vcp);
 386  458                          error = EINTR;
 387  459                          break;
 388  460                  }
 389  461  
 390  462                  m = NULL;
 391      -                error = smb_iod_recv1(vcp, &m);
      463 +                error = smb_iod_recvmsg(vcp, &m);
 392  464  
      465 +                /*
      466 +                 * Internal requests (reconnecting) call this in a loop
      467 +                 * (with poll==TRUE) until the request completes.
      468 +                 */
      469 +                if (error == ETIME && poll)
      470 +                        break;
      471 +
 393  472                  if (error == ETIME &&
 394  473                      vcp->iod_rqlist.tqh_first != NULL) {
      474 +
 395  475                          /*
 396      -                         * Nothing received for 15 seconds and
 397      -                         * we have requests in the queue.
      476 +                         * Nothing received and requests waiting.
      477 +                         * Increment etime_count.  If we were idle,
      478 +                         * skip the 1st tick, because we started
      479 +                         * waiting before there were any requests.
 398  480                           */
 399      -                        etime_count++;
      481 +                        if (etime_idle != 0) {
      482 +                                etime_idle = 0;
      483 +                        } else if (etime_count < INT16_MAX) {
      484 +                                etime_count++;
      485 +                        }
 400  486  
 401  487                          /*
 402      -                         * Once, at 15 sec. notify callbacks
 403      -                         * and print the warning message.
      488 +                         * ETIME and requests in the queue.
      489 +                         * The first time (at 15 sec.)
      490 +                         * Log an error (just once).
 404  491                           */
 405      -                        if (etime_count == 1) {
 406      -                                /* Was: smb_iod_notify_down(vcp); */
 407      -                                if (fscb && fscb->fscb_down)
 408      -                                        smb_vc_walkshares(vcp,
 409      -                                            fscb->fscb_down);
      492 +                        if (etime_count > 0 &&
      493 +                            vcp->iod_noresp == B_FALSE) {
      494 +                                vcp->iod_noresp = B_TRUE;
 410  495                                  zprintf(vcp->vc_zoneid,
 411  496                                      "SMB server %s not responding\n",
 412  497                                      vcp->vc_srvname);
 413  498                          }
 414      -
 415  499                          /*
 416      -                         * At 30 sec. try sending an echo, and then
 417      -                         * once a minute thereafter.
      500 +                         * At 30 sec. try sending an echo, which
      501 +                         * should cause some response.
 418  502                           */
 419      -                        if ((etime_count & 3) == 2) {
 420      -                                (void) smb_iod_send_echo(vcp);
      503 +                        if (etime_count == 2) {
      504 +                                SMBIODEBUG("send echo\n");
      505 +                                (void) smb_iod_send_echo(vcp, CRED());
 421  506                          }
 422      -
      507 +                        /*
      508 +                         * At 45 sec. give up on the connection
      509 +                         * and try to reconnect.
      510 +                         */
      511 +                        if (etime_count == 3) {
      512 +                                SMB_VC_LOCK(vcp);
      513 +                                smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
      514 +                                SMB_VC_UNLOCK(vcp);
      515 +                                SMB_TRAN_DISCONNECT(vcp);
      516 +                                break;
      517 +                        }
 423  518                          continue;
 424      -                } /* ETIME && requests in queue */
      519 +                } /* ETIME and requests in the queue */
 425  520  
 426  521                  if (error == ETIME) {
 427  522                          /*
 428      -                         * If the IOD thread holds the last reference
 429      -                         * to this VC, let the IOD thread terminate.
      523 +                         * Nothing received and no active requests.
      524 +                         *
      525 +                         * If we've received nothing from the server for
      526 +                         * smb_iod_idle_keep_time seconds, and the IOD
      527 +                         * thread holds the last reference to this VC,
      528 +                         * move to state IDLE and drop the TCP session.
      529 +                         * The IDLE handler will destroy the VC unless
      530 +                         * vc_state goes to RECONNECT before then.
 430  531                           */
 431      -                        if (vcp->vc_co.co_usecount > 1)
      532 +                        etime_count = 0;
      533 +                        if (etime_idle < INT16_MAX)
      534 +                                etime_idle++;
      535 +                        if ((etime_idle * SMB_NBTIMO) <
      536 +                            smb_iod_idle_keep_time)
 432  537                                  continue;
 433  538                          SMB_VC_LOCK(vcp);
 434  539                          if (vcp->vc_co.co_usecount == 1) {
 435      -                                smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
      540 +                                smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
 436  541                                  SMB_VC_UNLOCK(vcp);
      542 +                                SMBIODEBUG("logoff & disconnect\n");
      543 +                                (void) smb_iod_logoff(vcp, CRED());
      544 +                                SMB_TRAN_DISCONNECT(vcp);
 437  545                                  error = 0;
 438  546                                  break;
 439  547                          }
 440  548                          SMB_VC_UNLOCK(vcp);
 441  549                          continue;
 442  550                  } /* error == ETIME */
 443  551  
 444  552                  if (error) {
 445  553                          /*
 446      -                         * The recv. above returned some error
 447      -                         * we can't continue from i.e. ENOTCONN.
 448      -                         * It's dangerous to continue here.
 449      -                         * (possible infinite loop!)
 450      -                         *
 451      -                         * If we have requests enqueued, next
 452      -                         * state is reconnecting, else idle.
      554 +                         * The recv above returned an error indicating
      555 +                         * that our TCP session is no longer usable.
      556 +                         * Disconnect the session and get ready to
      557 +                         * reconnect.  If we have pending requests,
      558 +                         * move to state reconnect immediately;
      559 +                         * otherwise move to state IDLE until a
      560 +                         * request is issued on this VC.
 453  561                           */
 454      -                        int state;
 455  562                          SMB_VC_LOCK(vcp);
 456      -                        state = (vcp->iod_rqlist.tqh_first != NULL) ?
 457      -                            SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
 458      -                        smb_iod_newstate(vcp, state);
      563 +                        if (vcp->iod_rqlist.tqh_first != NULL)
      564 +                                smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
      565 +                        else
      566 +                                smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
 459  567                          cv_broadcast(&vcp->vc_statechg);
 460  568                          SMB_VC_UNLOCK(vcp);
 461      -                        error = 0;
      569 +                        SMB_TRAN_DISCONNECT(vcp);
 462  570                          break;
 463  571                  }
 464  572  
 465  573                  /*
 466  574                   * Received something.  Yea!
 467  575                   */
 468      -                if (etime_count) {
 469      -                        etime_count = 0;
      576 +                etime_count = 0;
      577 +                etime_idle = 0;
 470  578  
      579 +                /*
      580 +                 * If we just completed a reconnect after logging
      581 +                 * "SMB server %s not responding" then log OK now.
      582 +                 */
      583 +                if (vcp->iod_noresp) {
      584 +                        vcp->iod_noresp = B_FALSE;
 471  585                          zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
 472  586                              vcp->vc_srvname);
      587 +                }
 473  588  
 474      -                        /* Was: smb_iod_notify_up(vcp); */
 475      -                        if (fscb && fscb->fscb_up)
 476      -                                smb_vc_walkshares(vcp, fscb->fscb_up);
      589 +                if ((vcp->vc_flags & SMBV_SMB2) != 0) {
      590 +                        error = smb2_iod_process(vcp, m);
      591 +                } else {
      592 +                        error = smb1_iod_process(vcp, m);
 477  593                  }
 478  594  
 479  595                  /*
 480      -                 * Have an SMB packet.  The SMB header was
 481      -                 * checked in smb_iod_recv1().
 482      -                 * Find the request...
      596 +                 * Reconnect calls this in a loop with poll=TRUE
      597 +                 * We've received a response, so break now.
 483  598                   */
 484      -                hp = mtod(m, uchar_t *);
 485      -                /*LINTED*/
 486      -                mid = letohs(SMB_HDRMID(hp));
 487      -                SMBIODEBUG("mid %04x\n", (uint_t)mid);
      599 +                if (poll) {
      600 +                        error = 0;
      601 +                        break;
      602 +                }
      603 +        }
 488  604  
 489      -                rw_enter(&vcp->iod_rqlock, RW_READER);
 490      -                TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
      605 +        return (error);
      606 +}
 491  607  
 492      -                        if (rqp->sr_mid != mid)
 493      -                                continue;
      608 +/*
      609 + * Have what should be an SMB1 reply.  Check and parse the header,
      610 + * then use the message ID to find the request this belongs to and
      611 + * post it on that request.
      612 + *
      613 + * Returns an error if the reader should give up.
      614 + * To be safe, error if we read garbage.
      615 + */
      616 +static int
      617 +smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
      618 +{
      619 +        struct mdchain md;
      620 +        struct smb_rq *rqp;
      621 +        uint8_t cmd, sig[4];
      622 +        uint16_t mid;
      623 +        int err, skip;
 494  624  
 495      -                        DTRACE_PROBE2(smb_iod_recvrq,
 496      -                            (smb_rq_t *), rqp, (mblk_t *), m);
 497      -                        m_dumpm(m);
      625 +        m = m_pullup(m, SMB_HDRLEN);
      626 +        if (m == NULL)
      627 +                return (ENOMEM);
 498  628  
 499      -                        SMBRQ_LOCK(rqp);
 500      -                        if (rqp->sr_rp.md_top == NULL) {
 501      -                                md_initm(&rqp->sr_rp, m);
      629 +        /*
      630 +         * Note: Intentionally do NOT md_done(&md)
      631 +         * because that would free the message and
      632 +         * we just want to peek here.
      633 +         */
      634 +        md_initm(&md, m);
      635 +
      636 +        /*
      637 +         * Check the SMB header version and get the MID.
      638 +         *
      639 +         * The header version should be SMB1 except when we're
      640 +         * doing SMB1-to-SMB2 negotiation, in which case we may
      641 +         * see an SMB2 header with message ID=0 (only allowed in
      642 +         * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
      643 +         */
      644 +        err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
      645 +        if (err)
      646 +                return (err);
      647 +        if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
      648 +                goto bad_hdr;
      649 +        }
      650 +        switch (sig[0]) {
      651 +        case SMB_HDR_V1:        /* SMB1 */
      652 +                md_get_uint8(&md, &cmd);
      653 +                /* Skip to and get the MID. At offset 5 now. */
      654 +                skip = SMB_HDR_OFF_MID - 5;
      655 +                md_get_mem(&md, NULL, skip, MB_MSYSTEM);
      656 +                err = md_get_uint16le(&md, &mid);
      657 +                if (err)
      658 +                        return (err);
      659 +                break;
      660 +        case SMB_HDR_V2:        /* SMB2+ */
      661 +                if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
      662 +                        /*
      663 +                         * No need to look, can only be
      664 +                         * MID=0, cmd=negotiate
      665 +                         */
      666 +                        cmd = SMB_COM_NEGOTIATE;
      667 +                        mid = 0;
      668 +                        break;
      669 +                }
      670 +                /* FALLTHROUGH */
      671 +        bad_hdr:
      672 +        default:
      673 +                SMBIODEBUG("Bad SMB hdr\n");
      674 +                m_freem(m);
      675 +                return (EPROTO);
      676 +        }
      677 +
      678 +        /*
      679 +         * Find the reqeuest and post the reply
      680 +         */
      681 +        rw_enter(&vcp->iod_rqlock, RW_READER);
      682 +        TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
      683 +
      684 +                if (rqp->sr_mid != mid)
      685 +                        continue;
      686 +
      687 +                DTRACE_PROBE2(iod_post_reply,
      688 +                    (smb_rq_t *), rqp, (mblk_t *), m);
      689 +                m_dumpm(m);
      690 +
      691 +                SMBRQ_LOCK(rqp);
      692 +                if (rqp->sr_rp.md_top == NULL) {
      693 +                        md_initm(&rqp->sr_rp, m);
      694 +                } else {
      695 +                        if (rqp->sr_flags & SMBR_MULTIPACKET) {
      696 +                                md_append_record(&rqp->sr_rp, m);
 502  697                          } else {
 503      -                                if (rqp->sr_flags & SMBR_MULTIPACKET) {
 504      -                                        md_append_record(&rqp->sr_rp, m);
 505      -                                } else {
 506      -                                        SMBRQ_UNLOCK(rqp);
 507      -                                        SMBSDEBUG("duplicate response %d "
 508      -                                            "(ignored)\n", mid);
 509      -                                        break;
 510      -                                }
      698 +                                SMBRQ_UNLOCK(rqp);
      699 +                                rqp = NULL;
      700 +                                break;
 511  701                          }
 512      -                        smb_iod_rqprocessed_LH(rqp, 0, 0);
 513      -                        SMBRQ_UNLOCK(rqp);
 514      -                        break;
 515  702                  }
      703 +                smb_iod_rqprocessed_LH(rqp, 0, 0);
      704 +                SMBRQ_UNLOCK(rqp);
      705 +                break;
      706 +        }
      707 +        rw_exit(&vcp->iod_rqlock);
 516  708  
 517      -                if (rqp == NULL) {
 518      -                        int cmd = SMB_HDRCMD(hp);
      709 +        if (rqp == NULL) {
      710 +                if (cmd != SMB_COM_ECHO) {
      711 +                        SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
      712 +                }
      713 +                m_freem(m);
      714 +                /*
      715 +                 * Keep going.  It's possible this reply came
      716 +                 * after the request timed out and went away.
      717 +                 */
      718 +        }
      719 +        return (0);
      720 +}
 519  721  
 520      -                        if (cmd != SMB_COM_ECHO)
 521      -                                SMBSDEBUG("drop resp: mid %d, cmd %d\n",
 522      -                                    (uint_t)mid, cmd);
 523      -/*                      smb_printrqlist(vcp); */
      722 +/*
      723 + * Have what should be an SMB2 reply.  Check and parse the header,
      724 + * then use the message ID to find the request this belongs to and
      725 + * post it on that request.
      726 + *
      727 + * We also want to apply any credit grant in this reply now,
      728 + * rather than waiting for the owner to wake up.
      729 + */
      730 +static int
      731 +smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
      732 +{
      733 +        struct mdchain md;
      734 +        struct smb_rq *rqp;
      735 +        uint8_t sig[4];
      736 +        mblk_t *next_m = NULL;
      737 +        uint64_t message_id, async_id;
      738 +        uint32_t flags, next_cmd_off, status;
      739 +        uint16_t command, credits_granted;
      740 +        int err;
      741 +
      742 +top:
      743 +        m = m_pullup(m, SMB2_HDRLEN);
      744 +        if (m == NULL)
      745 +                return (ENOMEM);
      746 +
      747 +        /*
      748 +         * Note: Intentionally do NOT md_done(&md)
      749 +         * because that would free the message and
      750 +         * we just want to peek here.
      751 +         */
      752 +        md_initm(&md, m);
      753 +
      754 +        /*
      755 +         * Check the SMB header.  Must be SMB2
      756 +         * (and later, could be SMB3 encrypted)
      757 +         */
      758 +        err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
      759 +        if (err)
      760 +                return (err);
      761 +        if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
      762 +                goto bad_hdr;
      763 +        }
      764 +        switch (sig[0]) {
      765 +        case SMB_HDR_V2:
      766 +                break;
      767 +        case SMB_HDR_V3E:
      768 +                /*
      769 +                 * Todo: If encryption enabled, decrypt the message
      770 +                 * and restart processing on the cleartext.
      771 +                 */
      772 +                /* FALLTHROUGH */
      773 +        bad_hdr:
      774 +        default:
      775 +                SMBIODEBUG("Bad SMB2 hdr\n");
      776 +                m_freem(m);
      777 +                return (EPROTO);
      778 +        }
      779 +
      780 +        /*
      781 +         * Parse the rest of the SMB2 header,
      782 +         * skipping what we don't need.
      783 +         */
      784 +        md_get_uint32le(&md, NULL);     /* length, credit_charge */
      785 +        md_get_uint32le(&md, &status);
      786 +        md_get_uint16le(&md, &command);
      787 +        md_get_uint16le(&md, &credits_granted);
      788 +        md_get_uint32le(&md, &flags);
      789 +        md_get_uint32le(&md, &next_cmd_off);
      790 +        md_get_uint64le(&md, &message_id);
      791 +        if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
      792 +                md_get_uint64le(&md, &async_id);
      793 +        } else {
      794 +                /* PID, TID (not needed) */
      795 +                async_id = 0;
      796 +        }
      797 +
      798 +        /*
      799 +         * If this is a compound reply, split it.
      800 +         * Next must be 8-byte aligned.
      801 +         */
      802 +        if (next_cmd_off != 0) {
      803 +                if ((next_cmd_off & 7) != 0)
      804 +                        SMBIODEBUG("Misaligned next cmd\n");
      805 +                else
      806 +                        next_m = m_split(m, next_cmd_off, 1);
      807 +        }
      808 +
      809 +        /*
      810 +         * Apply the credit grant
      811 +         */
      812 +        rw_enter(&vcp->iod_rqlock, RW_WRITER);
      813 +        vcp->vc2_limit_message_id += credits_granted;
      814 +
      815 +        /*
      816 +         * Find the reqeuest and post the reply
      817 +         */
      818 +        rw_downgrade(&vcp->iod_rqlock);
      819 +        TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
      820 +
      821 +                if (rqp->sr2_messageid != message_id)
      822 +                        continue;
      823 +
      824 +                DTRACE_PROBE2(iod_post_reply,
      825 +                    (smb_rq_t *), rqp, (mblk_t *), m);
      826 +                m_dumpm(m);
      827 +
      828 +                /*
      829 +                 * If this is an interim response, just save the
      830 +                 * async ID but don't wakup the request.
      831 +                 * Don't need SMBRQ_LOCK for this.
      832 +                 */
      833 +                if (status == NT_STATUS_PENDING && async_id != 0) {
      834 +                        rqp->sr2_rspasyncid = async_id;
 524  835                          m_freem(m);
      836 +                        break;
 525  837                  }
 526      -                rw_exit(&vcp->iod_rqlock);
 527  838  
      839 +                SMBRQ_LOCK(rqp);
      840 +                if (rqp->sr_rp.md_top == NULL) {
      841 +                        md_initm(&rqp->sr_rp, m);
      842 +                } else {
      843 +                        SMBRQ_UNLOCK(rqp);
      844 +                        rqp = NULL;
      845 +                        break;
      846 +                }
      847 +                smb_iod_rqprocessed_LH(rqp, 0, 0);
      848 +                SMBRQ_UNLOCK(rqp);
      849 +                break;
 528  850          }
      851 +        rw_exit(&vcp->iod_rqlock);
 529  852  
 530      -        return (error);
      853 +        if (rqp == NULL) {
      854 +                if (command != SMB2_ECHO) {
      855 +                        SMBSDEBUG("drop resp: MID %lld\n",
      856 +                            (long long)message_id);
      857 +                }
      858 +                m_freem(m);
      859 +                /*
      860 +                 * Keep going.  It's possible this reply came
      861 +                 * after the request timed out and went away.
      862 +                 */
      863 +        }
      864 +
      865 +        /*
      866 +         * If we split a compound reply, continue with the
      867 +         * next part of the compound.
      868 +         */
      869 +        if (next_m != NULL) {
      870 +                m = next_m;
      871 +                goto top;
      872 +        }
      873 +
      874 +        return (0);
 531  875  }
 532  876  
 533  877  /*
 534  878   * The IOD receiver thread has requests pending and
 535  879   * has not received anything in a while.  Try to
 536  880   * send an SMB echo request.  It's tricky to do a
 537  881   * send from the IOD thread because we can't block.
 538  882   *
 539  883   * Using tmo=SMBNOREPLYWAIT in the request
 540  884   * so smb_rq_reply will skip smb_iod_waitrq.
 541  885   * The smb_smb_echo call uses SMBR_INTERNAL
 542  886   * to avoid calling smb_iod_sendall().
 543  887   */
 544      -int
 545      -smb_iod_send_echo(smb_vc_t *vcp)
      888 +static int
      889 +smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
 546  890  {
 547  891          smb_cred_t scred;
 548      -        int err;
      892 +        int err, tmo = SMBNOREPLYWAIT;
 549  893  
 550      -        smb_credinit(&scred, NULL);
 551      -        err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
      894 +        ASSERT(vcp->iod_thr == curthread);
      895 +
      896 +        smb_credinit(&scred, cr);
      897 +        if ((vcp->vc_flags & SMBV_SMB2) != 0) {
      898 +                err = smb2_smb_echo(vcp, &scred, tmo);
      899 +        } else {
      900 +                err = smb_smb_echo(vcp, &scred, tmo);
      901 +        }
 552  902          smb_credrele(&scred);
 553  903          return (err);
 554  904  }
 555  905  
 556  906  /*
 557      - * The IOD thread is now just a "reader",
 558      - * so no more smb_iod_request().  Yea!
      907 + * Helper for smb1_iod_addrq, smb2_iod_addrq
      908 + * Returns zero if interrupted, else 1.
 559  909   */
      910 +static int
      911 +smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
      912 +{
      913 +        int rc;
 560  914  
      915 +        SMB_VC_LOCK(vcp);
      916 +        vcp->iod_muxwant++;
      917 +        if (sig_ok) {
      918 +                rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
      919 +        } else {
      920 +                cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
      921 +                rc = 1;
      922 +        }
      923 +        vcp->iod_muxwant--;
      924 +        SMB_VC_UNLOCK(vcp);
      925 +
      926 +        return (rc);
      927 +}
      928 +
 561  929  /*
 562      - * Place request in the queue, and send it now if possible.
      930 + * Place request in the queue, and send it.
 563  931   * Called with no locks held.
      932 + *
      933 + * Called for SMB1 only
      934 + *
      935 + * The logic for how we limit active requests differs between
      936 + * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
 564  937   */
 565  938  int
 566      -smb_iod_addrq(struct smb_rq *rqp)
      939 +smb1_iod_addrq(struct smb_rq *rqp)
 567  940  {
 568  941          struct smb_vc *vcp = rqp->sr_vc;
 569      -        int error, save_newrq;
      942 +        uint16_t need;
      943 +        boolean_t sig_ok =
      944 +            (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
 570  945  
 571  946          ASSERT(rqp->sr_cred);
      947 +        ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
 572  948  
      949 +        rqp->sr_owner = curthread;
      950 +
      951 +        rw_enter(&vcp->iod_rqlock, RW_WRITER);
      952 +
      953 +recheck:
 573  954          /*
 574      -         * State should be correct after the check in
 575      -         * smb_rq_enqueue(), but we dropped locks...
      955 +         * Internal requests can be added in any state,
      956 +         * but normal requests only in state active.
 576  957           */
 577      -        if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
      958 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
      959 +            vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 578  960                  SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
      961 +                rw_exit(&vcp->iod_rqlock);
 579  962                  return (ENOTCONN);
 580  963          }
 581  964  
 582  965          /*
 583      -         * Requests from the IOD itself are marked _INTERNAL,
 584      -         * and get some special treatment to avoid blocking
 585      -         * the reader thread (so we don't deadlock).
 586      -         * The request is not yet on the queue, so we can
 587      -         * modify it's state here without locks.
 588      -         * Only thing using this now is ECHO.
      966 +         * If we're at the limit of active requests, block until
      967 +         * enough requests complete so we can make ours active.
      968 +         * Wakeup in smb_iod_removerq().
      969 +         *
      970 +         * Normal callers leave one slot free, so internal
      971 +         * callers can have the last slot if needed.
 589  972           */
 590      -        rqp->sr_owner = curthread;
 591      -        if (rqp->sr_owner == vcp->iod_thr) {
 592      -                rqp->sr_flags |= SMBR_INTERNAL;
 593      -
 594      -                /*
 595      -                 * This is a request from the IOD thread.
 596      -                 * Always send directly from this thread.
 597      -                 * Note lock order: iod_rqlist, vc_sendlock
 598      -                 */
      973 +        need = 1;
      974 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
      975 +                need++;
      976 +        if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
      977 +                rw_exit(&vcp->iod_rqlock);
      978 +                if (rqp->sr_flags & SMBR_INTERNAL)
      979 +                        return (EBUSY);
      980 +                if (smb_iod_muxwait(vcp, sig_ok) == 0)
      981 +                        return (EINTR);
 599  982                  rw_enter(&vcp->iod_rqlock, RW_WRITER);
 600      -                TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
 601      -                rw_downgrade(&vcp->iod_rqlock);
      983 +                goto recheck;
      984 +        }
 602  985  
 603      -                /*
 604      -                 * Note: iod_sendrq expects vc_sendlock,
 605      -                 * so take that here, but carefully:
 606      -                 * Never block the IOD thread here.
 607      -                 */
 608      -                if (sema_tryp(&vcp->vc_sendlock) == 0) {
 609      -                        SMBIODEBUG("sendlock busy\n");
 610      -                        error = EAGAIN;
 611      -                } else {
 612      -                        /* Have vc_sendlock */
 613      -                        error = smb_iod_sendrq(rqp);
 614      -                        sema_v(&vcp->vc_sendlock);
 615      -                }
      986 +        /*
      987 +         * Add this request to the active list and send it.
      988 +         * For SMB2 we may have a sequence of compounded
      989 +         * requests, in which case we must add them all.
      990 +         * They're sent as a compound in smb2_iod_sendrq.
      991 +         */
      992 +        rqp->sr_mid = vcp->vc_next_mid++;
      993 +        /* If signing, set the signing sequence numbers. */
      994 +        if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
      995 +            SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
      996 +                rqp->sr_seqno = vcp->vc_next_seq++;
      997 +                rqp->sr_rseqno = vcp->vc_next_seq++;
      998 +        }
      999 +        vcp->iod_muxcnt++;
     1000 +        TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
     1001 +        smb1_iod_sendrq(rqp);
 616 1002  
 617      -                rw_exit(&vcp->iod_rqlock);
     1003 +        rw_exit(&vcp->iod_rqlock);
     1004 +        return (0);
     1005 +}
 618 1006  
 619      -                /*
 620      -                 * In the non-error case, _removerq
 621      -                 * is done by either smb_rq_reply
 622      -                 * or smb_iod_waitrq.
 623      -                 */
 624      -                if (error)
 625      -                        smb_iod_removerq(rqp);
     1007 +/*
     1008 + * Place request in the queue, and send it.
     1009 + * Called with no locks held.
     1010 + *
     1011 + * Called for SMB2 only.
     1012 + *
     1013 + * With SMB2 we have a range of valid message IDs, and we may
     1014 + * only send requests when we can assign a message ID within
     1015 + * the valid range.  We may need to wait here for some active
     1016 + * request to finish (and update vc2_limit_message_id) before
     1017 + * we can get message IDs for our new request(s).  Another
     1018 + * difference is that the request sequence we're waiting to
     1019 + * add here may require multipe message IDs, either due to
     1020 + * either compounding or multi-credit requests.  Therefore
     1021 + * we need to wait for the availibility of how ever many
     1022 + * message IDs are required by our request sequence.
     1023 + */
     1024 +int
     1025 +smb2_iod_addrq(struct smb_rq *rqp)
     1026 +{
     1027 +        struct smb_vc *vcp = rqp->sr_vc;
     1028 +        struct smb_rq *c_rqp;   /* compound req */
     1029 +        uint16_t charge;
     1030 +        boolean_t sig_ok =
     1031 +            (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
 626 1032  
 627      -                return (error);
     1033 +        ASSERT(rqp->sr_cred != NULL);
     1034 +        ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
     1035 +
     1036 +        /*
     1037 +         * Figure out the credit charges
     1038 +         * No multi-credit messages yet.
     1039 +         */
     1040 +        rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
     1041 +        c_rqp = rqp->sr2_compound_next;
     1042 +        while (c_rqp != NULL) {
     1043 +                rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
     1044 +                c_rqp = c_rqp->sr2_compound_next;
 628 1045          }
 629 1046  
     1047 +        /*
     1048 +         * Internal request must not be compounded
     1049 +         * and should use exactly one credit.
     1050 +         */
     1051 +        if (rqp->sr_flags & SMBR_INTERNAL) {
     1052 +                if (rqp->sr2_compound_next != NULL) {
     1053 +                        ASSERT(0);
     1054 +                        return (EINVAL);
     1055 +                }
     1056 +        }
     1057 +
     1058 +        rqp->sr_owner = curthread;
     1059 +
 630 1060          rw_enter(&vcp->iod_rqlock, RW_WRITER);
 631 1061  
 632      -        TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
 633      -        /* iod_rqlock/WRITER protects iod_newrq */
 634      -        save_newrq = vcp->iod_newrq;
 635      -        vcp->iod_newrq++;
     1062 +recheck:
     1063 +        /*
     1064 +         * Internal requests can be added in any state,
     1065 +         * but normal requests only in state active.
     1066 +         */
     1067 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
     1068 +            vcp->vc_state != SMBIOD_ST_VCACTIVE) {
     1069 +                SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
     1070 +                rw_exit(&vcp->iod_rqlock);
     1071 +                return (ENOTCONN);
     1072 +        }
 636 1073  
 637      -        rw_exit(&vcp->iod_rqlock);
     1074 +        /*
     1075 +         * If we're at the limit of active requests, block until
     1076 +         * enough requests complete so we can make ours active.
     1077 +         * Wakeup in smb_iod_removerq().
     1078 +         *
     1079 +         * Normal callers leave one slot free, so internal
     1080 +         * callers can have the last slot if needed.
     1081 +         */
     1082 +        charge = rqp->sr2_totalcreditcharge;
     1083 +        if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
     1084 +                charge++;
     1085 +        if ((vcp->vc2_next_message_id + charge) >
     1086 +            vcp->vc2_limit_message_id) {
     1087 +                rw_exit(&vcp->iod_rqlock);
     1088 +                if (rqp->sr_flags & SMBR_INTERNAL)
     1089 +                        return (EBUSY);
     1090 +                if (smb_iod_muxwait(vcp, sig_ok) == 0)
     1091 +                        return (EINTR);
     1092 +                rw_enter(&vcp->iod_rqlock, RW_WRITER);
     1093 +                goto recheck;
     1094 +        }
 638 1095  
 639 1096          /*
 640      -         * Now send any requests that need to be sent,
 641      -         * including the one we just put on the list.
 642      -         * Only the thread that found iod_newrq==0
 643      -         * needs to run the send loop.
     1097 +         * Add this request to the active list and send it.
     1098 +         * For SMB2 we may have a sequence of compounded
     1099 +         * requests, in which case we must add them all.
     1100 +         * They're sent as a compound in smb2_iod_sendrq.
 644 1101           */
 645      -        if (save_newrq == 0)
 646      -                smb_iod_sendall(vcp);
 647 1102  
     1103 +        rqp->sr2_messageid = vcp->vc2_next_message_id;
     1104 +        vcp->vc2_next_message_id += rqp->sr2_creditcharge;
     1105 +        TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
     1106 +
     1107 +        c_rqp = rqp->sr2_compound_next;
     1108 +        while (c_rqp != NULL) {
     1109 +                c_rqp->sr2_messageid = vcp->vc2_next_message_id;
     1110 +                vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
     1111 +                TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
     1112 +                c_rqp = c_rqp->sr2_compound_next;
     1113 +        }
     1114 +        smb2_iod_sendrq(rqp);
     1115 +
     1116 +        rw_exit(&vcp->iod_rqlock);
 648 1117          return (0);
 649 1118  }
 650 1119  
 651 1120  /*
 652 1121   * Mark an SMBR_MULTIPACKET request as
 653 1122   * needing another send.  Similar to the
 654      - * "normal" part of smb_iod_addrq.
     1123 + * "normal" part of smb1_iod_addrq.
     1124 + * Only used by SMB1
 655 1125   */
 656 1126  int
 657      -smb_iod_multirq(struct smb_rq *rqp)
     1127 +smb1_iod_multirq(struct smb_rq *rqp)
 658 1128  {
 659 1129          struct smb_vc *vcp = rqp->sr_vc;
 660      -        int save_newrq;
 661 1130  
 662 1131          ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
 663 1132  
     1133 +        if (vcp->vc_flags & SMBV_SMB2) {
     1134 +                ASSERT("!SMB2?");
     1135 +                return (EINVAL);
     1136 +        }
     1137 +
 664 1138          if (rqp->sr_flags & SMBR_INTERNAL)
 665 1139                  return (EINVAL);
 666 1140  
 667 1141          if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 668 1142                  SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
 669 1143                  return (ENOTCONN);
 670 1144          }
 671 1145  
 672 1146          rw_enter(&vcp->iod_rqlock, RW_WRITER);
 673 1147  
 674 1148          /* Already on iod_rqlist, just reset state. */
 675 1149          rqp->sr_state = SMBRQ_NOTSENT;
     1150 +        smb1_iod_sendrq(rqp);
 676 1151  
 677      -        /* iod_rqlock/WRITER protects iod_newrq */
 678      -        save_newrq = vcp->iod_newrq;
 679      -        vcp->iod_newrq++;
 680      -
 681 1152          rw_exit(&vcp->iod_rqlock);
 682 1153  
 683      -        /*
 684      -         * Now send any requests that need to be sent,
 685      -         * including the one we just marked NOTSENT.
 686      -         * Only the thread that found iod_newrq==0
 687      -         * needs to run the send loop.
 688      -         */
 689      -        if (save_newrq == 0)
 690      -                smb_iod_sendall(vcp);
 691      -
 692 1154          return (0);
 693 1155  }
 694 1156  
 695      -
     1157 +/*
     1158 + * Remove a request from the active list, and
     1159 + * wake up requests waiting to go active.
     1160 + *
     1161 + * Shared by SMB1 + SMB2
     1162 + *
     1163 + * The logic for how we limit active requests differs between
     1164 + * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
     1165 + * With SMB2 we have a range of valid message IDs, and when we
     1166 + * retire the oldest request we need to keep track of what is
     1167 + * now the oldest message ID.  In both cases, after we take a
     1168 + * request out of the list here, we should be able to wake up
     1169 + * a request waiting to get in the active list.
     1170 + */
 696 1171  void
 697 1172  smb_iod_removerq(struct smb_rq *rqp)
 698 1173  {
     1174 +        struct smb_rq *rqp2;
 699 1175          struct smb_vc *vcp = rqp->sr_vc;
     1176 +        boolean_t was_head = B_FALSE;
 700 1177  
 701 1178          rw_enter(&vcp->iod_rqlock, RW_WRITER);
     1179 +
 702 1180  #ifdef QUEUEDEBUG
 703 1181          /*
 704 1182           * Make sure we have not already removed it.
 705 1183           * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
 706 1184           * XXX: Don't like the constant 1 here...
 707 1185           */
 708 1186          ASSERT(rqp->sr_link.tqe_next != (void *)1L);
 709 1187  #endif
     1188 +
     1189 +        if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
     1190 +                was_head = B_TRUE;
 710 1191          TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
     1192 +        if (vcp->vc_flags & SMBV_SMB2) {
     1193 +                rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
     1194 +                if (was_head && rqp2 != NULL) {
     1195 +                        /* Do we still need this? */
     1196 +                        vcp->vc2_oldest_message_id =
     1197 +                            rqp2->sr2_messageid;
     1198 +                }
     1199 +        } else {
     1200 +                ASSERT(vcp->iod_muxcnt > 0);
     1201 +                vcp->iod_muxcnt--;
     1202 +        }
     1203 +
 711 1204          rw_exit(&vcp->iod_rqlock);
     1205 +
     1206 +        /*
     1207 +         * If there are requests waiting for "mux" slots,
     1208 +         * wake one.
     1209 +         */
     1210 +        SMB_VC_LOCK(vcp);
     1211 +        if (vcp->iod_muxwant != 0)
     1212 +                cv_signal(&vcp->iod_muxwait);
     1213 +        SMB_VC_UNLOCK(vcp);
 712 1214  }
 713 1215  
 714      -
 715      -
 716 1216  /*
 717 1217   * Wait for a request to complete.
 718      - *
 719      - * For normal requests, we need to deal with
 720      - * ioc_muxcnt dropping below vc_maxmux by
 721      - * making arrangements to send more...
 722 1218   */
 723 1219  int
 724 1220  smb_iod_waitrq(struct smb_rq *rqp)
 725 1221  {
 726 1222          struct smb_vc *vcp = rqp->sr_vc;
 727 1223          clock_t tr, tmo1, tmo2;
 728      -        int error, rc;
     1224 +        int error;
 729 1225  
 730 1226          if (rqp->sr_flags & SMBR_INTERNAL) {
 731      -                ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
 732      -                smb_iod_removerq(rqp);
 733      -                return (EAGAIN);
     1227 +                /* XXX - Do we ever take this path now? */
     1228 +                return (smb_iod_waitrq_int(rqp));
 734 1229          }
 735 1230  
 736 1231          /*
 737 1232           * Make sure this is NOT the IOD thread,
 738 1233           * or the wait below will stop the reader.
 739 1234           */
 740 1235          ASSERT(curthread != vcp->iod_thr);
 741 1236  
 742 1237          SMBRQ_LOCK(rqp);
 743 1238  
 744 1239          /*
 745      -         * First, wait for the request to be sent.  Normally the send
 746      -         * has already happened by the time we get here.  However, if
 747      -         * we have more than maxmux entries in the request list, our
 748      -         * request may not be sent until other requests complete.
 749      -         * The wait in this case is due to local I/O demands, so
 750      -         * we don't want the server response timeout to apply.
 751      -         *
 752      -         * If a request is allowed to interrupt this wait, then the
 753      -         * request is cancelled and never sent OTW.  Some kinds of
 754      -         * requests should never be cancelled (i.e. close) and those
 755      -         * are marked SMBR_NOINTR_SEND so they either go eventually,
 756      -         * or a connection close will terminate them with ENOTCONN.
 757      -         */
 758      -        while (rqp->sr_state == SMBRQ_NOTSENT) {
 759      -                rqp->sr_flags |= SMBR_SENDWAIT;
 760      -                if (rqp->sr_flags & SMBR_NOINTR_SEND) {
 761      -                        cv_wait(&rqp->sr_cond, &rqp->sr_lock);
 762      -                        rc = 1;
 763      -                } else
 764      -                        rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
 765      -                rqp->sr_flags &= ~SMBR_SENDWAIT;
 766      -                if (rc == 0) {
 767      -                        SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
 768      -                        error = EINTR;
 769      -                        goto out;
 770      -                }
 771      -        }
 772      -
 773      -        /*
 774 1240           * The request has been sent.  Now wait for the response,
 775 1241           * with the timeout specified for this request.
 776 1242           * Compute all the deadlines now, so we effectively
 777 1243           * start the timer(s) after the request is sent.
 778 1244           */
 779 1245          if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
 780 1246                  tmo1 = SEC_TO_TICK(smb_timo_notice);
 781 1247          else
 782 1248                  tmo1 = 0;
 783 1249          tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
↓ open down ↓ 12 lines elided ↑ open up ↑
 796 1262                          tr = cv_reltimedwait(&rqp->sr_cond,
 797 1263                              &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
 798 1264                  else
 799 1265                          tr = cv_reltimedwait_sig(&rqp->sr_cond,
 800 1266                              &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
 801 1267                  if (tr == 0) {
 802 1268                          error = EINTR;
 803 1269                          goto out;
 804 1270                  }
 805 1271                  if (tr < 0) {
 806      -#ifdef DTRACE_PROBE
 807 1272                          DTRACE_PROBE1(smb_iod_waitrq1,
 808 1273                              (smb_rq_t *), rqp);
 809      -#endif
 810      -#ifdef NOT_YET
 811      -                        /* Want this to go ONLY to the user. */
 812      -                        uprintf("SMB server %s has not responded"
 813      -                            " to request %d after %d seconds..."
 814      -                            " (still waiting).\n", vcp->vc_srvname,
 815      -                            rqp->sr_mid, smb_timo_notice);
 816      -#endif
 817 1274                  }
 818 1275          }
 819 1276  
 820 1277          /*
 821 1278           * Keep waiting until tmo2 is expired.
 822 1279           */
 823 1280          while (rqp->sr_rpgen == rqp->sr_rplast) {
 824 1281                  if (rqp->sr_flags & SMBR_NOINTR_RECV)
 825 1282                          tr = cv_timedwait(&rqp->sr_cond,
 826 1283                              &rqp->sr_lock, tmo2);
 827 1284                  else
 828 1285                          tr = cv_timedwait_sig(&rqp->sr_cond,
 829 1286                              &rqp->sr_lock, tmo2);
 830 1287                  if (tr == 0) {
 831 1288                          error = EINTR;
 832 1289                          goto out;
 833 1290                  }
 834 1291                  if (tr < 0) {
 835      -#ifdef DTRACE_PROBE
 836 1292                          DTRACE_PROBE1(smb_iod_waitrq2,
 837 1293                              (smb_rq_t *), rqp);
 838      -#endif
 839      -#ifdef NOT_YET
 840      -                        /* Want this to go ONLY to the user. */
 841      -                        uprintf("SMB server %s has not responded"
 842      -                            " to request %d after %d seconds..."
 843      -                            " (giving up).\n", vcp->vc_srvname,
 844      -                            rqp->sr_mid, rqp->sr_timo);
 845      -#endif
 846 1294                          error = ETIME;
 847 1295                          goto out;
 848 1296                  }
 849 1297                  /* got wakeup */
 850 1298          }
 851 1299          error = rqp->sr_lerror;
 852 1300          rqp->sr_rplast++;
 853 1301  
 854 1302  out:
 855 1303          SMBRQ_UNLOCK(rqp);
 856 1304  
 857 1305          /*
 858 1306           * MULTIPACKET request must stay in the list.
 859 1307           * They may need additional responses.
 860 1308           */
 861 1309          if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
 862 1310                  smb_iod_removerq(rqp);
 863 1311  
 864      -        /*
 865      -         * Some request has been completed.
 866      -         * If we reached the mux limit,
 867      -         * re-run the send loop...
 868      -         */
 869      -        if (vcp->iod_muxfull)
 870      -                smb_iod_sendall(vcp);
     1312 +        return (error);
     1313 +}
 871 1314  
     1315 +/*
     1316 + * Internal variant of smb_iod_waitrq(), for use in
     1317 + * requests run by the IOD (reader) thread itself.
     1318 + * Block only long enough to receive one reply.
     1319 + */
     1320 +int
     1321 +smb_iod_waitrq_int(struct smb_rq *rqp)
     1322 +{
     1323 +        struct smb_vc *vcp = rqp->sr_vc;
     1324 +        int timeleft = rqp->sr_timo;
     1325 +        int error;
     1326 +
     1327 +        ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
     1328 +again:
     1329 +        error = smb_iod_recvall(vcp, B_TRUE);
     1330 +        if (error == ETIME) {
     1331 +                /* We waited SMB_NBTIMO sec. */
     1332 +                timeleft -= SMB_NBTIMO;
     1333 +                if (timeleft > 0)
     1334 +                        goto again;
     1335 +        }
     1336 +
     1337 +        smb_iod_removerq(rqp);
     1338 +        if (rqp->sr_state != SMBRQ_NOTIFIED)
     1339 +                error = ETIME;
     1340 +
 872 1341          return (error);
 873 1342  }
 874 1343  
 875 1344  /*
 876 1345   * Shutdown all outstanding I/O requests on the specified share with
 877 1346   * ENXIO; used when unmounting a share.  (There shouldn't be any for a
 878 1347   * non-forced unmount; if this is a forced unmount, we have to shutdown
 879 1348   * the requests as part of the unmount process.)
 880 1349   */
 881 1350  void
↓ open down ↓ 8 lines elided ↑ open up ↑
 890 1359           */
 891 1360          rw_enter(&vcp->iod_rqlock, RW_READER);
 892 1361          TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
 893 1362                  if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
 894 1363                          smb_iod_rqprocessed(rqp, EIO, 0);
 895 1364          }
 896 1365          rw_exit(&vcp->iod_rqlock);
 897 1366  }
 898 1367  
 899 1368  /*
 900      - * Send all requests that need sending.
 901      - * Called from _addrq, _multirq, _waitrq
     1369 + * Ioctl functions called by the user-level I/O Deamon (IOD)
     1370 + * to bring up and service a connection to some SMB server.
 902 1371   */
 903      -void
 904      -smb_iod_sendall(smb_vc_t *vcp)
     1372 +
     1373 +/*
     1374 + * Handle ioctl SMBIOC_IOD_CONNECT
     1375 + */
     1376 +int
     1377 +nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
 905 1378  {
 906      -        struct smb_rq *rqp;
 907      -        int error, muxcnt;
     1379 +        int err, val;
 908 1380  
     1381 +        ASSERT(vcp->iod_thr == curthread);
     1382 +
     1383 +        if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
     1384 +                cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
     1385 +                return (EINVAL);
     1386 +        }
     1387 +
 909 1388          /*
 910      -         * Clear "newrq" to make sure threads adding
 911      -         * new requests will run this function again.
     1389 +         * Putting a TLI endpoint back in the right state for a new
     1390 +         * connection is a bit tricky.  In theory, this could be:
     1391 +         *      SMB_TRAN_DISCONNECT(vcp);
     1392 +         *      SMB_TRAN_UNBIND(vcp);
     1393 +         * but that method often results in TOUTSTATE errors.
     1394 +         * It's easier to just close it and open a new endpoint.
 912 1395           */
 913      -        rw_enter(&vcp->iod_rqlock, RW_WRITER);
 914      -        vcp->iod_newrq = 0;
     1396 +        SMB_VC_LOCK(vcp);
     1397 +        if (vcp->vc_tdata)
     1398 +                SMB_TRAN_DONE(vcp);
     1399 +        err = SMB_TRAN_CREATE(vcp, cr);
     1400 +        SMB_VC_UNLOCK(vcp);
     1401 +        if (err != 0)
     1402 +                return (err);
 915 1403  
 916 1404          /*
 917      -         * We only read iod_rqlist, so downgrade rwlock.
 918      -         * This allows the IOD to handle responses while
 919      -         * some requesting thread may be blocked in send.
     1405 +         * Set various options on this endpoint.
     1406 +         * Keep going in spite of errors.
 920 1407           */
 921      -        rw_downgrade(&vcp->iod_rqlock);
     1408 +        val = smb_tcpsndbuf;
     1409 +        err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
     1410 +        if (err != 0) {
     1411 +                cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
     1412 +        }
     1413 +        val = smb_tcprcvbuf;
     1414 +        err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
     1415 +        if (err != 0) {
     1416 +                cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
     1417 +        }
     1418 +        val = 1;
     1419 +        err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
     1420 +        if (err != 0) {
     1421 +                cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
     1422 +        }
     1423 +        val = 1;
     1424 +        err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
     1425 +        if (err != 0) {
     1426 +                cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY, err=%d", err);
     1427 +        }
     1428 +        val = smb_connect_timeout * 1000;
     1429 +        err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
     1430 +        if (err != 0) {
     1431 +                cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo, err=%d", err);
     1432 +        }
 922 1433  
 923 1434          /*
 924      -         * Serialize to prevent multiple senders.
 925      -         * Note lock order: iod_rqlock, vc_sendlock
     1435 +         * Bind and connect
 926 1436           */
 927      -        sema_p(&vcp->vc_sendlock);
     1437 +        err = SMB_TRAN_BIND(vcp, NULL);
     1438 +        if (err != 0) {
     1439 +                cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
     1440 +                /* Continue on and try connect. */
     1441 +        }
     1442 +        err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
     1443 +        /*
     1444 +         * No cmn_err here, as connect failures are normal, i.e.
     1445 +         * when a server has multiple addresses and only some are
     1446 +         * routed for us. (libsmbfs tries them all)
     1447 +         */
     1448 +        if (err == 0) {
     1449 +                SMB_VC_LOCK(vcp);
     1450 +                smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
     1451 +                SMB_VC_UNLOCK(vcp);
     1452 +        } /* else stay in state reconnect */
 928 1453  
     1454 +        return (err);
     1455 +}
     1456 +
     1457 +/*
     1458 + * Handle ioctl SMBIOC_IOD_NEGOTIATE
     1459 + * Do the whole SMB1/SMB2 negotiate
     1460 + *
     1461 + * This is where we send our first request to the server.
     1462 + * If this is the first time we're talking to this server,
     1463 + * (meaning not a reconnect) then we don't know whether
     1464 + * the server supports SMB2, so we need to use the weird
     1465 + * SMB1-to-SMB2 negotiation. That's where we send an SMB1
     1466 + * negotiate including dialect "SMB 2.???" and if the
     1467 + * server supports SMB2 we get an SMB2 reply -- Yes, an
     1468 + * SMB2 reply to an SMB1 request.  A strange protocol...
     1469 + *
     1470 + * If on the other hand we already know the server supports
     1471 + * SMB2 (because this is a reconnect) or if the client side
     1472 + * has disabled SMB1 entirely, we'll skip the SMB1 part.
     1473 + */
     1474 +int
     1475 +nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
     1476 +{
     1477 +        struct smb_sopt *sv = &vcp->vc_sopt;
     1478 +        smb_cred_t scred;
     1479 +        int err = 0;
     1480 +
     1481 +        ASSERT(vcp->iod_thr == curthread);
     1482 +
     1483 +        smb_credinit(&scred, cr);
     1484 +
     1485 +        if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
     1486 +                cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
     1487 +                err = EINVAL;
     1488 +                goto out;
     1489 +        }
     1490 +
     1491 +        if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
     1492 +                err = EINVAL;
     1493 +                goto out;
     1494 +        }
     1495 +
 929 1496          /*
 930      -         * Walk the list of requests and send when possible.
 931      -         * We avoid having more than vc_maxmux requests
 932      -         * outstanding to the server by traversing only
 933      -         * vc_maxmux entries into this list.  Simple!
     1497 +         * (Re)init negotiated values
 934 1498           */
 935      -        ASSERT(vcp->vc_maxmux > 0);
 936      -        error = muxcnt = 0;
 937      -        TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
     1499 +        bzero(sv, sizeof (*sv));
     1500 +        vcp->vc2_next_message_id = 0;
     1501 +        vcp->vc2_limit_message_id = 1;
     1502 +        vcp->vc2_session_id = 0;
     1503 +        vcp->vc_next_seq = 0;
 938 1504  
 939      -                if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 940      -                        error = ENOTCONN; /* stop everything! */
 941      -                        break;
 942      -                }
     1505 +        /*
     1506 +         * If this was reconnect, get rid of the old MAC key
     1507 +         * and session key.
     1508 +         */
     1509 +        SMB_VC_LOCK(vcp);
     1510 +        if (vcp->vc_mackey != NULL) {
     1511 +                kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
     1512 +                vcp->vc_mackey = NULL;
     1513 +                vcp->vc_mackeylen = 0;
     1514 +        }
     1515 +        if (vcp->vc_ssnkey != NULL) {
     1516 +                kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
     1517 +                vcp->vc_ssnkey = NULL;
     1518 +                vcp->vc_ssnkeylen = 0;
     1519 +        }
     1520 +        SMB_VC_UNLOCK(vcp);
 943 1521  
 944      -                if (rqp->sr_state == SMBRQ_NOTSENT) {
 945      -                        error = smb_iod_sendrq(rqp);
 946      -                        if (error)
 947      -                                break;
     1522 +        /*
     1523 +         * If this is not an SMB2 reconect (SMBV_SMB2 not set),
     1524 +         * and if SMB1 is enabled, do SMB1 neogotiate.  Then
     1525 +         * if either SMB1-to-SMB2 negotiate tells us we should
     1526 +         * switch to SMB2, or the local configuration has
     1527 +         * disabled SMB1, set the SMBV_SMB2 flag.
     1528 +         *
     1529 +         * Note that vc_maxver is handled in smb_smb_negotiate
     1530 +         * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
     1531 +         * the local configuration disables SMB2, and therefore
     1532 +         * we won't set the SMBV_SMB2 flag.
     1533 +         */
     1534 +        if ((vcp->vc_flags & SMBV_SMB2) == 0) {
     1535 +                if (vcp->vc_minver < SMB2_DIALECT_BASE) {
     1536 +                        /*
     1537 +                         * SMB1 is enabled
     1538 +                         */
     1539 +                        err = smb_smb_negotiate(vcp, &scred);
     1540 +                        if (err != 0)
     1541 +                                goto out;
 948 1542                  }
 949      -
 950      -                if (++muxcnt == vcp->vc_maxmux) {
 951      -                        SMBIODEBUG("muxcnt == vc_maxmux\n");
 952      -                        break;
     1543 +                /*
     1544 +                 * If SMB1-to-SMB2 negotiate told us we should
     1545 +                 * switch to SMB2, or if the local configuration
     1546 +                 * disables SMB1, set the SMB2 flag.
     1547 +                 */
     1548 +                if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
     1549 +                    vcp->vc_minver >= SMB2_DIALECT_BASE) {
     1550 +                        /*
     1551 +                         * Switch this VC to SMB2.
     1552 +                         */
     1553 +                        SMB_VC_LOCK(vcp);
     1554 +                        vcp->vc_flags |= SMBV_SMB2;
     1555 +                        SMB_VC_UNLOCK(vcp);
 953 1556                  }
     1557 +        }
 954 1558  
     1559 +        /*
     1560 +         * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
     1561 +         * function was called), or SMB1-to-SMB2 negotiate indicated
     1562 +         * we should switch to SMB2, or we have SMB1 disabled (both
     1563 +         * cases set SMBV_SMB2 above), then do SMB2 negotiate.
     1564 +         */
     1565 +        if ((vcp->vc_flags & SMBV_SMB2) != 0) {
     1566 +                err = smb2_smb_negotiate(vcp, &scred);
 955 1567          }
 956 1568  
     1569 +out:
     1570 +        if (err == 0) {
     1571 +                SMB_VC_LOCK(vcp);
     1572 +                smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
     1573 +                SMB_VC_UNLOCK(vcp);
     1574 +        }
 957 1575          /*
 958      -         * If we have vc_maxmux requests outstanding,
 959      -         * arrange for _waitrq to call _sendall as
 960      -         * requests are completed.
     1576 +         * (else) leave state as it was.
     1577 +         * User-level will either close this handle (if connecting
     1578 +         * for the first time) or call rcfail and then try again.
 961 1579           */
 962      -        vcp->iod_muxfull =
 963      -            (muxcnt < vcp->vc_maxmux) ? 0 : 1;
 964 1580  
 965      -        sema_v(&vcp->vc_sendlock);
 966      -        rw_exit(&vcp->iod_rqlock);
     1581 +        smb_credrele(&scred);
     1582 +
     1583 +        return (err);
 967 1584  }
 968 1585  
     1586 +/*
     1587 + * Handle ioctl SMBIOC_IOD_SSNSETUP
     1588 + * Do either SMB1 or SMB2 session setup (one call/reply)
     1589 + */
 969 1590  int
 970      -smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
     1591 +nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
 971 1592  {
 972      -        struct file *fp = NULL;
     1593 +        smb_cred_t scred;
     1594 +        int err;
     1595 +
     1596 +        ASSERT(vcp->iod_thr == curthread);
     1597 +
     1598 +        switch (vcp->vc_state) {
     1599 +        case SMBIOD_ST_NEGOTIATED:
     1600 +        case SMBIOD_ST_AUTHCONT:
     1601 +                break;
     1602 +        default:
     1603 +                return (EINVAL);
     1604 +        }
     1605 +
     1606 +        smb_credinit(&scred, cr);
     1607 +        if (vcp->vc_flags & SMBV_SMB2)
     1608 +                err = smb2_smb_ssnsetup(vcp, &scred);
     1609 +        else
     1610 +                err = smb_smb_ssnsetup(vcp, &scred);
     1611 +        smb_credrele(&scred);
     1612 +
     1613 +        SMB_VC_LOCK(vcp);
     1614 +        switch (err) {
     1615 +        case 0:
     1616 +                smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
     1617 +                break;
     1618 +        case EINPROGRESS:       /* MORE_PROCESSING_REQUIRED */
     1619 +                smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
     1620 +                break;
     1621 +        default:
     1622 +                smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
     1623 +                break;
     1624 +        }
     1625 +        SMB_VC_UNLOCK(vcp);
     1626 +
     1627 +        return (err);
     1628 +}
     1629 +
     1630 +static int
     1631 +smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
     1632 +{
     1633 +        smb_cred_t scred;
     1634 +        int err;
     1635 +
     1636 +        ASSERT(vcp->iod_thr == curthread);
     1637 +
     1638 +        smb_credinit(&scred, cr);
     1639 +        if (vcp->vc_flags & SMBV_SMB2)
     1640 +                err = smb2_smb_logoff(vcp, &scred);
     1641 +        else
     1642 +                err = smb_smb_logoff(vcp, &scred);
     1643 +        smb_credrele(&scred);
     1644 +
     1645 +        return (err);
     1646 +}
     1647 +
     1648 +/*
     1649 + * Handle ioctl SMBIOC_IOD_WORK
     1650 + *
     1651 + * The smbiod agent calls this after authentication to become
     1652 + * the reader for this session, so long as that's possible.
     1653 + * This should only return non-zero if we want that agent to
     1654 + * give up on this VC permanently.
     1655 + */
     1656 +/* ARGSUSED */
     1657 +int
     1658 +smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
     1659 +{
     1660 +        smbioc_ssn_work_t *wk = &vcp->vc_work;
 973 1661          int err = 0;
 974 1662  
 975 1663          /*
 976 1664           * This is called by the one-and-only
 977 1665           * IOD thread for this VC.
 978 1666           */
 979 1667          ASSERT(vcp->iod_thr == curthread);
 980 1668  
 981 1669          /*
 982      -         * Get the network transport file pointer,
 983      -         * and "loan" it to our transport module.
     1670 +         * Should be in state...
 984 1671           */
 985      -        if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
 986      -                err = EBADF;
 987      -                goto out;
     1672 +        if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
     1673 +                cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
     1674 +                return (EINVAL);
 988 1675          }
 989      -        if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
 990      -                goto out;
 991 1676  
 992 1677          /*
 993      -         * In case of reconnect, tell any enqueued requests
 994      -         * then can GO!
     1678 +         * Update the session key and initialize SMB signing.
     1679 +         *
     1680 +         * This implementation does not use multiple SMB sessions per
     1681 +         * TCP connection (where only the first session key is used)
     1682 +         * so we always have a new session key here.  Sanity check the
     1683 +         * length from user space.  Normally 16 or 32.
 995 1684           */
     1685 +        if (wk->wk_u_ssnkey_len > 1024) {
     1686 +                cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
     1687 +                return (EINVAL);
     1688 +        }
     1689 +
     1690 +        ASSERT(vcp->vc_ssnkey == NULL);
 996 1691          SMB_VC_LOCK(vcp);
     1692 +        if (wk->wk_u_ssnkey_len != 0 &&
     1693 +            wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
     1694 +                vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
     1695 +                vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
     1696 +                if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
     1697 +                    vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
     1698 +                        err = EFAULT;
     1699 +                }
     1700 +        }
     1701 +        SMB_VC_UNLOCK(vcp);
     1702 +        if (err)
     1703 +                return (err);
     1704 +
     1705 +        /*
     1706 +         * If we have a session key, derive the MAC key for SMB signing.
     1707 +         * If this was a NULL session, we might have no session key.
     1708 +         */
     1709 +        ASSERT(vcp->vc_mackey == NULL);
     1710 +        if (vcp->vc_ssnkey != NULL) {
     1711 +                if (vcp->vc_flags & SMBV_SMB2)
     1712 +                        err = smb2_sign_init(vcp);
     1713 +                else
     1714 +                        err = smb_sign_init(vcp);
     1715 +                if (err != 0)
     1716 +                        return (err);
     1717 +        }
     1718 +
     1719 +        /*
     1720 +         * Tell any enqueued requests they can start.
     1721 +         */
     1722 +        SMB_VC_LOCK(vcp);
 997 1723          vcp->vc_genid++;        /* possibly new connection */
 998 1724          smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
 999 1725          cv_broadcast(&vcp->vc_statechg);
1000 1726          SMB_VC_UNLOCK(vcp);
1001 1727  
1002 1728          /*
1003 1729           * The above cv_broadcast should be sufficient to
1004 1730           * get requests going again.
1005 1731           *
1006 1732           * If we have a callback function, run it.
1007 1733           * Was: smb_iod_notify_connected()
1008 1734           */
1009 1735          if (fscb && fscb->fscb_connect)
1010 1736                  smb_vc_walkshares(vcp, fscb->fscb_connect);
1011 1737  
1012 1738          /*
1013      -         * Run the "reader" loop.
     1739 +         * Run the "reader" loop.  An error return here is normal
     1740 +         * (i.e. when we need to reconnect) so ignore errors.
     1741 +         * Note: This call updates the vc_state.
1014 1742           */
1015      -        err = smb_iod_recvall(vcp);
     1743 +        (void) smb_iod_recvall(vcp, B_FALSE);
1016 1744  
1017 1745          /*
1018 1746           * The reader loop returned, so we must have a
1019 1747           * new state.  (disconnected or reconnecting)
1020 1748           *
1021 1749           * Notify shares of the disconnect.
1022 1750           * Was: smb_iod_notify_disconnect()
1023 1751           */
1024 1752          smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1025 1753  
1026 1754          /*
1027 1755           * The reader loop function returns only when
1028 1756           * there's been an error on the connection, or
1029 1757           * this VC has no more references.  It also
1030 1758           * updates the state before it returns.
1031 1759           *
1032 1760           * Tell any requests to give up or restart.
1033 1761           */
1034 1762          smb_iod_invrq(vcp);
1035 1763  
1036      -out:
1037      -        /* Recall the file descriptor loan. */
1038      -        (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1039      -        if (fp != NULL) {
1040      -                releasef(vcp->vc_tran_fd);
1041      -        }
1042      -
1043 1764          return (err);
1044 1765  }
1045 1766  
1046 1767  /*
1047      - * Wait around for someone to ask to use this VC.
1048      - * If the VC has only the IOD reference, then
1049      - * wait only a minute or so, then drop it.
     1768 + * Handle ioctl SMBIOC_IOD_IDLE
     1769 + *
     1770 + * Wait around for someone to ask to use this VC again after the
     1771 + * TCP session has closed.  When one of the connected trees adds a
     1772 + * request, smb_iod_reconnect will set vc_state to RECONNECT and
     1773 + * wake this cv_wait.  When a VC ref. goes away in smb_vc_rele,
     1774 + * that also signals this wait so we can re-check whether we
     1775 + * now hold the last ref. on this VC (and can destroy it).
1050 1776   */
1051 1777  int
1052 1778  smb_iod_vc_idle(struct smb_vc *vcp)
1053 1779  {
1054      -        clock_t tr, delta = SEC_TO_TICK(15);
1055 1780          int err = 0;
     1781 +        boolean_t destroy = B_FALSE;
1056 1782  
1057 1783          /*
1058 1784           * This is called by the one-and-only
1059 1785           * IOD thread for this VC.
1060 1786           */
1061 1787          ASSERT(vcp->iod_thr == curthread);
1062 1788  
     1789 +        /*
     1790 +         * Should be in state...
     1791 +         */
     1792 +        if (vcp->vc_state != SMBIOD_ST_IDLE &&
     1793 +            vcp->vc_state != SMBIOD_ST_RECONNECT) {
     1794 +                cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
     1795 +                return (EINVAL);
     1796 +        }
     1797 +
1063 1798          SMB_VC_LOCK(vcp);
1064      -        while (vcp->vc_state == SMBIOD_ST_IDLE) {
1065      -                tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1066      -                    delta, TR_CLOCK_TICK);
1067      -                if (tr == 0) {
     1799 +
     1800 +        while (vcp->vc_state == SMBIOD_ST_IDLE &&
     1801 +            vcp->vc_co.co_usecount > 1) {
     1802 +                if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1068 1803                          err = EINTR;
1069 1804                          break;
1070 1805                  }
1071      -                if (tr < 0) {
1072      -                        /* timeout */
1073      -                        if (vcp->vc_co.co_usecount == 1) {
1074      -                                /* Let this IOD terminate. */
1075      -                                smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1076      -                                /* nobody to cv_broadcast */
1077      -                                break;
1078      -                        }
1079      -                }
1080 1806          }
     1807 +        if (vcp->vc_state == SMBIOD_ST_IDLE &&
     1808 +            vcp->vc_co.co_usecount == 1) {
     1809 +                /*
     1810 +                 * We were woken because we now have the last ref.
     1811 +                 * Arrange for this VC to be destroyed now.
     1812 +                 * Set the "GONE" flag while holding the lock,
     1813 +                 * to prevent a race with new references.
     1814 +                 * The destroy happens after unlock.
     1815 +                 */
     1816 +                vcp->vc_flags |= SMBV_GONE;
     1817 +                destroy = B_TRUE;
     1818 +        }
     1819 +
1081 1820          SMB_VC_UNLOCK(vcp);
1082 1821  
     1822 +        if (destroy) {
     1823 +                /* This sets vc_state = DEAD */
     1824 +                smb_iod_disconnect(vcp);
     1825 +        }
     1826 +
1083 1827          return (err);
1084 1828  }
1085 1829  
1086 1830  /*
     1831 + * Handle ioctl SMBIOC_IOD_RCFAIL
     1832 + *
1087 1833   * After a failed reconnect attempt, smbiod will
1088 1834   * call this to make current requests error out.
1089 1835   */
1090 1836  int
1091 1837  smb_iod_vc_rcfail(struct smb_vc *vcp)
1092 1838  {
1093 1839          clock_t tr;
1094 1840          int err = 0;
1095 1841  
1096 1842          /*
1097 1843           * This is called by the one-and-only
1098 1844           * IOD thread for this VC.
1099 1845           */
1100 1846          ASSERT(vcp->iod_thr == curthread);
1101      -
1102      -        if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1103      -                return (EINVAL);
1104      -
1105 1847          SMB_VC_LOCK(vcp);
1106 1848  
1107 1849          smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1108 1850          cv_broadcast(&vcp->vc_statechg);
1109 1851  
1110 1852          /*
1111 1853           * Short wait here for two reasons:
1112 1854           * (1) Give requests a chance to error out.
1113 1855           * (2) Prevent immediate retry.
1114 1856           */
1115 1857          tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1116 1858              SEC_TO_TICK(5), TR_CLOCK_TICK);
1117 1859          if (tr == 0)
1118 1860                  err = EINTR;
1119 1861  
1120      -        smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1121      -        cv_broadcast(&vcp->vc_statechg);
     1862 +        /*
     1863 +         * Normally we'll switch to state IDLE here.  However,
     1864 +         * if something called smb_iod_reconnect() while we were
     1865 +         * waiting above, we'll be in in state reconnect already.
     1866 +         * In that case, keep state RECONNECT, so we essentially
     1867 +         * skip transition through state IDLE that would normally
     1868 +         * happen next.
     1869 +         */
     1870 +        if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
     1871 +                smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
     1872 +                cv_broadcast(&vcp->vc_statechg);
     1873 +        }
1122 1874  
1123 1875          SMB_VC_UNLOCK(vcp);
1124 1876  
1125 1877          return (err);
1126 1878  }
1127 1879  
1128 1880  /*
1129 1881   * Ask the IOD to reconnect (if not already underway)
1130 1882   * then wait for the reconnect to finish.
1131 1883   */
1132 1884  int
1133 1885  smb_iod_reconnect(struct smb_vc *vcp)
1134 1886  {
1135 1887          int err = 0, rv;
1136 1888  
1137 1889          SMB_VC_LOCK(vcp);
1138 1890  again:
1139 1891          switch (vcp->vc_state) {
1140 1892  
1141 1893          case SMBIOD_ST_IDLE:
     1894 +                /* Tell the IOD thread it's no longer IDLE. */
1142 1895                  smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1143 1896                  cv_signal(&vcp->iod_idle);
1144 1897                  /* FALLTHROUGH */
1145 1898  
1146 1899          case SMBIOD_ST_RECONNECT:
     1900 +        case SMBIOD_ST_CONNECTED:
     1901 +        case SMBIOD_ST_NEGOTIATED:
     1902 +        case SMBIOD_ST_AUTHCONT:
     1903 +        case SMBIOD_ST_AUTHOK:
     1904 +                /* Wait for the VC state to become ACTIVE. */
1147 1905                  rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1148 1906                  if (rv == 0) {
1149 1907                          err = EINTR;
1150 1908                          break;
1151 1909                  }
1152 1910                  goto again;
1153 1911  
1154 1912          case SMBIOD_ST_VCACTIVE:
1155 1913                  err = 0; /* success! */
1156 1914                  break;
1157 1915  
     1916 +        case SMBIOD_ST_AUTHFAIL:
1158 1917          case SMBIOD_ST_RCFAILED:
1159 1918          case SMBIOD_ST_DEAD:
1160 1919          default:
1161 1920                  err = ENOTCONN;
1162 1921                  break;
1163 1922          }
1164 1923  
1165 1924          SMB_VC_UNLOCK(vcp);
1166 1925          return (err);
1167 1926  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX