Print this page
NEX-14666 Need to provide SMB 2.1 Client
NEX-17187 panic in smbfs_acl_store
NEX-17231 smbfs create xattr files finds wrong file
NEX-17224 smbfs lookup EINVAL should be ENOENT
NEX-17260 SMB1 client fails to list directory after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
and: (cleanup)
NEX-16824 SMB client connection setup rework
NEX-17232 SMB client reconnect failures
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
and: (improve debug)


  17  *    may be used to endorse or promote products derived from this software
  18  *    without specific prior written permission.
  19  *
  20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  30  * SUCH DAMAGE.
  31  *
  32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
  33  */
  34 
  35 /*
  36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.


  37  */
  38 
  39 #include <sys/param.h>
  40 #include <sys/systm.h>
  41 #include <sys/time.h>
  42 #include <sys/kmem.h>
  43 #include <sys/proc.h>
  44 #include <sys/lock.h>
  45 #include <sys/socket.h>
  46 #include <sys/mount.h>
  47 #include <sys/sunddi.h>
  48 #include <sys/cmn_err.h>
  49 #include <sys/sdt.h>
  50 
  51 #include <netsmb/smb_osdep.h>
  52 
  53 #include <netsmb/smb.h>

  54 #include <netsmb/smb_conn.h>
  55 #include <netsmb/smb_subr.h>
  56 #include <netsmb/smb_tran.h>
  57 #include <netsmb/smb_rq.h>

  58 
  59 /*
  60  * How long to wait before restarting a request (after reconnect)
  61  */
  62 #define SMB_RCNDELAY            2       /* seconds */
  63 
  64 /*
  65  * leave this zero - we can't ssecond guess server side effects of
  66  * duplicate ops, this isn't nfs!
  67  */
  68 #define SMBMAXRESTARTS          0
  69 
  70 
  71 static int  smb_rq_reply(struct smb_rq *rqp);

  72 static int  smb_rq_enqueue(struct smb_rq *rqp);
  73 static int  smb_rq_getenv(struct smb_connobj *layer,
  74                 struct smb_vc **vcpp, struct smb_share **sspp);
  75 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
  76 static int  smb_t2_reply(struct smb_t2rq *t2p);
  77 static int  smb_nt_reply(struct smb_ntrq *ntp);
  78 
  79 
  80 /*
  81  * Done with a request object.  Free its contents.
  82  * If it was allocated (SMBR_ALLOCED) free it too.
  83  * Some of these are stack locals, not allocated.
  84  *
  85  * No locks here - this is the last ref.
  86  */
  87 void
  88 smb_rq_done(struct smb_rq *rqp)
  89 {
  90 
  91         /*
  92          * No smb_vc_rele() here - see smb_rq_init()
  93          */
  94         mb_done(&rqp->sr_rq);
  95         md_done(&rqp->sr_rp);
  96         mutex_destroy(&rqp->sr_lock);
  97         cv_destroy(&rqp->sr_cond);
  98         if (rqp->sr_flags & SMBR_ALLOCED)
  99                 kmem_free(rqp, sizeof (*rqp));
 100 }
 101 
 102 int
 103 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
 104         struct smb_rq **rqpp)
 105 {
 106         struct smb_rq *rqp;
 107         int error;
 108 

 109         rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
 110         if (rqp == NULL)
 111                 return (ENOMEM);
 112         error = smb_rq_init(rqp, layer, cmd, scred);
 113         if (error) {
 114                 smb_rq_done(rqp);
 115                 return (error);
 116         }
 117         rqp->sr_flags |= SMBR_ALLOCED;
 118         *rqpp = rqp;
 119         return (0);
 120 }
 121 
 122 int
 123 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
 124         struct smb_cred *scred)
 125 {
 126         int error;
 127 
 128         bzero(rqp, sizeof (*rqp));


 130         cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
 131 
 132         error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
 133         if (error)
 134                 return (error);
 135 
 136         /*
 137          * We copied a VC pointer (vcp) into rqp->sr_vc,
 138          * but we do NOT do a smb_vc_hold here.  Instead,
 139          * the caller is responsible for the hold on the
 140          * share or the VC as needed.  For smbfs callers,
 141          * the hold is on the share, via the smbfs mount.
 142          * For nsmb ioctl callers, the hold is done when
 143          * the driver handle gets VC or share references.
 144          * This design avoids frequent hold/rele activity
 145          * when creating and completing requests.
 146          */
 147 
 148         rqp->sr_rexmit = SMBMAXRESTARTS;
 149         rqp->sr_cred = scred;        /* Note: ref hold done by caller. */
 150         rqp->sr_pid = (uint16_t)ddi_get_pid();
 151         error = smb_rq_new(rqp, cmd);
 152 
 153         return (error);
 154 }
 155 
 156 static int
 157 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
 158 {
 159         struct mbchain *mbp = &rqp->sr_rq;
 160         struct smb_vc *vcp = rqp->sr_vc;
 161         int error;
 162 
 163         ASSERT(rqp != NULL);
 164 
 165         rqp->sr_sendcnt = 0;
 166         rqp->sr_cmd = cmd;
 167 
 168         mb_done(mbp);
 169         md_done(&rqp->sr_rp);
 170         error = mb_init(mbp);
 171         if (error)
 172                 return (error);
 173 

 174         /*
 175          * Is this the right place to save the flags?
 176          */






















 177         rqp->sr_rqflags  = vcp->vc_hflags;
 178         rqp->sr_rqflags2 = vcp->vc_hflags2;
 179 
 180         /*
 181          * The SMB header is filled in later by
 182          * smb_rq_fillhdr (see below)
 183          * Just reserve space here.
 184          */
 185         mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);

 186 
 187         return (0);
 188 }
 189 
 190 /*
 191  * Given a request with it's body already composed,
 192  * rewind to the start and fill in the SMB header.
 193  * This is called after the request is enqueued,
 194  * so we have the final MID, seq num. etc.
 195  */
 196 void
 197 smb_rq_fillhdr(struct smb_rq *rqp)
 198 {
 199         struct mbchain mbtmp, *mbp = &mbtmp;
 200         mblk_t *m;
 201 
 202         /*
 203          * Fill in the SMB header using a dup of the first mblk,
 204          * which points at the same data but has its own wptr,
 205          * so we can rewind without trashing the message.
 206          */
 207         m = dupb(rqp->sr_rq.mb_top);
 208         m->b_wptr = m->b_rptr;    /* rewind */
 209         mb_initm(mbp, m);
 210 
 211         mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
 212         mb_put_uint8(mbp, rqp->sr_cmd);
 213         mb_put_uint32le(mbp, 0);        /* status */
 214         mb_put_uint8(mbp, rqp->sr_rqflags);
 215         mb_put_uint16le(mbp, rqp->sr_rqflags2);
 216         mb_put_uint16le(mbp, 0);        /* pid-high */
 217         mb_put_mem(mbp, NULL, 8, MB_MZERO);     /* MAC sig. (later) */
 218         mb_put_uint16le(mbp, 0);        /* reserved */
 219         mb_put_uint16le(mbp, rqp->sr_rqtid);
 220         mb_put_uint16le(mbp, rqp->sr_pid);
 221         mb_put_uint16le(mbp, rqp->sr_rquid);
 222         mb_put_uint16le(mbp, rqp->sr_mid);
 223 
 224         /* This will free the mblk from dupb. */
 225         mb_done(mbp);
 226 }
 227 
 228 int
 229 smb_rq_simple(struct smb_rq *rqp)
 230 {
 231         return (smb_rq_simple_timed(rqp, smb_timo_default));
 232 }
 233 
 234 /*
 235  * Simple request-reply exchange
 236  */
 237 int
 238 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
 239 {
 240         int error = EINVAL;


 264                         (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
 265                             SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
 266 
 267                 } else {
 268                         delay(SEC_TO_TICK(SMB_RCNDELAY));
 269                 }
 270                 SMBRQ_UNLOCK(rqp);
 271                 rqp->sr_rexmit--;
 272         }
 273         return (error);
 274 }
 275 
 276 
 277 static int
 278 smb_rq_enqueue(struct smb_rq *rqp)
 279 {
 280         struct smb_vc *vcp = rqp->sr_vc;
 281         struct smb_share *ssp = rqp->sr_share;
 282         int error = 0;
 283 


 284         /*
 285          * Normal requests may initiate a reconnect,
 286          * and/or wait for state changes to finish.
 287          * Some requests set the NORECONNECT flag
 288          * to avoid all that (i.e. tree discon)
 289          */
 290         if (rqp->sr_flags & SMBR_NORECONNECT) {
 291                 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 292                         SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
 293                         return (ENOTCONN);
 294                 }
 295                 if (ssp != NULL &&
 296                     ((ssp->ss_flags & SMBS_CONNECTED) == 0))
 297                         return (ENOTCONN);
 298                 goto ok_out;
 299         }
 300 
 301         /*
 302          * If we're not connected, initiate a reconnect
 303          * and/or wait for an existing one to finish.


 308                         return (error);
 309         }
 310 
 311         /*
 312          * If this request has a "share" object
 313          * that needs a tree connect, do it now.
 314          */
 315         if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
 316                 error = smb_share_tcon(ssp, rqp->sr_cred);
 317                 if (error)
 318                         return (error);
 319         }
 320 
 321         /*
 322          * We now know what UID + TID to use.
 323          * Store them in the request.
 324          */
 325 ok_out:
 326         rqp->sr_rquid = vcp->vc_smbuid;
 327         rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
 328         error = smb_iod_addrq(rqp);
 329 
 330         return (error);
 331 }
 332 
 333 /*
































































 334  * Mark location of the word count, which is filled in later by
 335  * smb_rw_wend().  Also initialize the counter that it uses
 336  * to figure out what value to fill in.
 337  *
 338  * Note that the word count happens to be 8-bit.
 339  */
 340 void
 341 smb_rq_wstart(struct smb_rq *rqp)
 342 {
 343         rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
 344         rqp->sr_rq.mb_count = 0;
 345 }
 346 
 347 void
 348 smb_rq_wend(struct smb_rq *rqp)
 349 {
 350         uint_t wcnt;
 351 
 352         if (rqp->sr_wcount == NULL) {
 353                 SMBSDEBUG("no wcount\n");


 381 {
 382         uint_t bcnt;
 383 
 384         if (rqp->sr_bcount == NULL) {
 385                 SMBSDEBUG("no bcount\n");
 386                 return;
 387         }
 388         bcnt = rqp->sr_rq.mb_count;
 389         if (bcnt > 0xffff)
 390                 SMBSDEBUG("byte count too large (%d)\n", bcnt);
 391         /*
 392          * Fill in the byte count (16-bits)
 393          * The pointer is char * type due to
 394          * typical off-by-one alignment.
 395          */
 396         rqp->sr_bcount[0] = bcnt & 0xFF;
 397         rqp->sr_bcount[1] = (bcnt >> 8);
 398 }
 399 
 400 int
 401 smb_rq_intr(struct smb_rq *rqp)
 402 {
 403         if (rqp->sr_flags & SMBR_INTR)
 404                 return (EINTR);
 405 
 406         return (0);
 407 }
 408 
 409 static int
 410 smb_rq_getenv(struct smb_connobj *co,
 411         struct smb_vc **vcpp, struct smb_share **sspp)
 412 {
 413         struct smb_vc *vcp = NULL;
 414         struct smb_share *ssp = NULL;
 415         int error = EINVAL;
 416 
 417         if (co->co_flags & SMBO_GONE) {
 418                 SMBSDEBUG("zombie CO\n");
 419                 error = EINVAL;
 420                 goto out;
 421         }
 422 
 423         switch (co->co_level) {
 424         case SMBL_SHARE:
 425                 ssp = CPTOSS(co);
 426                 if ((co->co_flags & SMBO_GONE) ||
 427                     co->co_parent == NULL) {
 428                         SMBSDEBUG("zombie share %s\n", ssp->ss_name);
 429                         break;


 440                 }
 441                 error = 0;
 442                 break;
 443 
 444         default:
 445                 SMBSDEBUG("invalid level %d passed\n", co->co_level);
 446         }
 447 
 448 out:
 449         if (!error) {
 450                 if (vcpp)
 451                         *vcpp = vcp;
 452                 if (sspp)
 453                         *sspp = ssp;
 454         }
 455 
 456         return (error);
 457 }
 458 
 459 /*
 460  * Wait for reply on the request
 461  */
 462 static int
 463 smb_rq_reply(struct smb_rq *rqp)
 464 {
 465         struct mdchain *mdp = &rqp->sr_rp;
 466         u_int8_t tb;
 467         int error, rperror = 0;
 468 
 469         if (rqp->sr_timo == SMBNOREPLYWAIT) {
 470                 smb_iod_removerq(rqp);
 471                 return (0);
 472         }
 473 
 474         error = smb_iod_waitrq(rqp);
 475         if (error)
 476                 return (error);
 477 
 478         /*
 479          * If the request was signed, validate the
 480          * signature on the response.
 481          */
 482         if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
 483                 error = smb_rq_verify(rqp);
 484                 if (error)
 485                         return (error);
 486         }
 487 
 488         /*
 489          * Parse the SMB header
 490          */
 491         error = md_get_uint32le(mdp, NULL);
 492         if (error)
 493                 return (error);
 494         error = md_get_uint8(mdp, &tb);
 495         error = md_get_uint32le(mdp, &rqp->sr_error);
 496         error = md_get_uint8(mdp, &rqp->sr_rpflags);
 497         error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
 498         if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {











 499                 /*
 500                  * Do a special check for STATUS_BUFFER_OVERFLOW;
 501                  * it's not an error.
 502                  */
 503                 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
 504                         /*
 505                          * Don't report it as an error to our caller;
 506                          * they can look at rqp->sr_error if they
 507                          * need to know whether we got a
 508                          * STATUS_BUFFER_OVERFLOW.
 509                          * XXX - should we do that for all errors
 510                          * where (error & 0xC0000000) is 0x80000000,
 511                          * i.e. all warnings?
 512                          */
 513                         rperror = 0;
 514                 } else
 515                         rperror = smb_maperr32(rqp->sr_error);
 516         } else {
 517                 rqp->sr_errclass = rqp->sr_error & 0xff;
 518                 rqp->sr_serror = rqp->sr_error >> 16;
 519                 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
 520         }
 521         if (rperror == EMOREDATA) {
 522                 rperror = E2BIG;
 523                 rqp->sr_flags |= SMBR_MOREDATA;
 524         } else


 525                 rqp->sr_flags &= ~SMBR_MOREDATA;

 526 
 527         error = md_get_uint32le(mdp, NULL);
 528         error = md_get_uint32le(mdp, NULL);
 529         error = md_get_uint32le(mdp, NULL);
 530 
 531         error = md_get_uint16le(mdp, &rqp->sr_rptid);
 532         error = md_get_uint16le(mdp, &rqp->sr_rppid);
 533         error = md_get_uint16le(mdp, &rqp->sr_rpuid);










































 534         error = md_get_uint16le(mdp, &rqp->sr_rpmid);
 535 
 536         return ((error) ? error : rperror);
 537 }
 538 
 539 
 540 #define ALIGN4(a)       (((a) + 3) & ~3)
 541 
 542 /*
 543  * TRANS2 request implementation
 544  * TRANS implementation is in the "t2" routines
 545  * NT_TRANSACTION implementation is the separate "nt" stuff
 546  */
 547 int
 548 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
 549         struct smb_t2rq **t2pp)
 550 {
 551         struct smb_t2rq *t2p;
 552         int error;
 553 
 554         t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
 555         if (t2p == NULL)
 556                 return (ENOMEM);


1117                 smb_rq_wend(rqp);
1118                 smb_rq_bstart(rqp);
1119                 mb_put_uint8(mbp, 0);   /* name */
1120                 len = mb_fixhdr(mbp);
1121                 if (txpcount) {
1122                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1123                         error = md_get_mbuf(&mbparam, txpcount, &m);
1124                         if (error)
1125                                 goto bad;
1126                         mb_put_mbuf(mbp, m);
1127                 }
1128                 len = mb_fixhdr(mbp);
1129                 if (txdcount) {
1130                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1131                         error = md_get_mbuf(&mbdata, txdcount, &m);
1132                         if (error)
1133                                 goto bad;
1134                         mb_put_mbuf(mbp, m);
1135                 }
1136                 smb_rq_bend(rqp);
1137                 error = smb_iod_multirq(rqp);
1138                 if (error)
1139                         goto bad;
1140         }       /* while left params or data */
1141         error = smb_t2_reply(t2p);
1142         if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1143                 goto bad;
1144         mdp = &t2p->t2_rdata;
1145         if (mdp->md_top) {
1146                 md_initm(mdp, mdp->md_top);
1147         }
1148         mdp = &t2p->t2_rparam;
1149         if (mdp->md_top) {
1150                 md_initm(mdp, mdp->md_top);
1151         }
1152 bad:
1153         smb_iod_removerq(rqp);
1154 freerq:
1155         if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1156                 if (rqp->sr_flags & SMBR_RESTART)
1157                         t2p->t2_flags |= SMBT2_RESTART;


1328                 leftdcount -= txdcount;
1329                 smb_rq_wend(rqp);
1330                 smb_rq_bstart(rqp);
1331                 len = mb_fixhdr(mbp);
1332                 if (txpcount) {
1333                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1334                         error = md_get_mbuf(&mbparam, txpcount, &m);
1335                         if (error)
1336                                 goto bad;
1337                         mb_put_mbuf(mbp, m);
1338                 }
1339                 len = mb_fixhdr(mbp);
1340                 if (txdcount) {
1341                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1342                         error = md_get_mbuf(&mbdata, txdcount, &m);
1343                         if (error)
1344                                 goto bad;
1345                         mb_put_mbuf(mbp, m);
1346                 }
1347                 smb_rq_bend(rqp);
1348                 error = smb_iod_multirq(rqp);
1349                 if (error)
1350                         goto bad;
1351         }       /* while left params or data */
1352         error = smb_nt_reply(ntp);
1353         if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1354                 goto bad;
1355         mdp = &ntp->nt_rdata;
1356         if (mdp->md_top) {
1357                 md_initm(mdp, mdp->md_top);
1358         }
1359         mdp = &ntp->nt_rparam;
1360         if (mdp->md_top) {
1361                 md_initm(mdp, mdp->md_top);
1362         }
1363 bad:
1364         smb_iod_removerq(rqp);
1365 freerq:
1366         if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1367                 if (rqp->sr_flags & SMBR_RESTART)
1368                         ntp->nt_flags |= SMBT2_RESTART;


1420                 ntp->nt_flags &= ~SMBT2_RESTART;
1421                 error = smb_nt_request_int(ntp);
1422                 if (!error)
1423                         break;
1424                 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1425                     SMBT2_RESTART)
1426                         break;
1427                 if (++i > SMBMAXRESTARTS)
1428                         break;
1429                 mutex_enter(&(ntp)->nt_lock);
1430                 if (ntp->nt_share) {
1431                         (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1432                             SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1433 
1434                 } else {
1435                         delay(SEC_TO_TICK(SMB_RCNDELAY));
1436                 }
1437                 mutex_exit(&(ntp)->nt_lock);
1438         }
1439         return (error);
















































































1440 }


  17  *    may be used to endorse or promote products derived from this software
  18  *    without specific prior written permission.
  19  *
  20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  30  * SUCH DAMAGE.
  31  *
  32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
  33  */
  34 
  35 /*
  36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  37  * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
  38  * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
  39  */
  40 
  41 #include <sys/param.h>
  42 #include <sys/systm.h>
  43 #include <sys/time.h>
  44 #include <sys/kmem.h>
  45 #include <sys/proc.h>
  46 #include <sys/lock.h>
  47 #include <sys/socket.h>
  48 #include <sys/mount.h>
  49 #include <sys/sunddi.h>
  50 #include <sys/cmn_err.h>
  51 #include <sys/sdt.h>
  52 
  53 #include <netsmb/smb_osdep.h>
  54 
  55 #include <netsmb/smb.h>
  56 #include <netsmb/smb2.h>
  57 #include <netsmb/smb_conn.h>
  58 #include <netsmb/smb_subr.h>
  59 #include <netsmb/smb_tran.h>
  60 #include <netsmb/smb_rq.h>
  61 #include <netsmb/smb2_rq.h>
  62 
  63 /*
  64  * How long to wait before restarting a request (after reconnect)
  65  */
  66 #define SMB_RCNDELAY            2       /* seconds */
  67 
  68 /*
  69  * leave this zero - we can't ssecond guess server side effects of
  70  * duplicate ops, this isn't nfs!
  71  */
  72 #define SMBMAXRESTARTS          0
  73 
  74 
  75 static int  smb_rq_reply(struct smb_rq *rqp);
  76 static int  smb_rq_parsehdr(struct smb_rq *rqp);
  77 static int  smb_rq_enqueue(struct smb_rq *rqp);


  78 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
  79 static int  smb_t2_reply(struct smb_t2rq *t2p);
  80 static int  smb_nt_reply(struct smb_ntrq *ntp);
  81 
  82 
  83 /*
  84  * Done with a request object.  Free its contents.
  85  * If it was allocated (SMBR_ALLOCED) free it too.
  86  * Some of these are stack locals, not allocated.
  87  *
  88  * No locks here - this is the last ref.
  89  */
  90 void
  91 smb_rq_done(struct smb_rq *rqp)
  92 {
  93 
  94         /*
  95          * No smb_vc_rele() here - see smb_rq_init()
  96          */
  97         mb_done(&rqp->sr_rq);
  98         md_done(&rqp->sr_rp);
  99         mutex_destroy(&rqp->sr_lock);
 100         cv_destroy(&rqp->sr_cond);
 101         if (rqp->sr_flags & SMBR_ALLOCED)
 102                 kmem_free(rqp, sizeof (*rqp));
 103 }
 104 
 105 int
 106 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
 107         struct smb_rq **rqpp)
 108 {
 109         struct smb_rq *rqp;
 110         int error;
 111 
 112         // XXX kmem cache?
 113         rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
 114         if (rqp == NULL)
 115                 return (ENOMEM);
 116         error = smb_rq_init(rqp, layer, cmd, scred);
 117         if (error) {
 118                 smb_rq_done(rqp);
 119                 return (error);
 120         }
 121         rqp->sr_flags |= SMBR_ALLOCED;
 122         *rqpp = rqp;
 123         return (0);
 124 }
 125 
 126 int
 127 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
 128         struct smb_cred *scred)
 129 {
 130         int error;
 131 
 132         bzero(rqp, sizeof (*rqp));


 134         cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
 135 
 136         error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
 137         if (error)
 138                 return (error);
 139 
 140         /*
 141          * We copied a VC pointer (vcp) into rqp->sr_vc,
 142          * but we do NOT do a smb_vc_hold here.  Instead,
 143          * the caller is responsible for the hold on the
 144          * share or the VC as needed.  For smbfs callers,
 145          * the hold is on the share, via the smbfs mount.
 146          * For nsmb ioctl callers, the hold is done when
 147          * the driver handle gets VC or share references.
 148          * This design avoids frequent hold/rele activity
 149          * when creating and completing requests.
 150          */
 151 
 152         rqp->sr_rexmit = SMBMAXRESTARTS;
 153         rqp->sr_cred = scred;        /* Note: ref hold done by caller. */

 154         error = smb_rq_new(rqp, cmd);
 155 
 156         return (error);
 157 }
 158 
 159 static int
 160 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
 161 {
 162         struct mbchain *mbp = &rqp->sr_rq;
 163         struct smb_vc *vcp = rqp->sr_vc;
 164         int error;
 165 
 166         ASSERT(rqp != NULL);
 167 
 168         rqp->sr_sendcnt = 0;

 169 
 170         mb_done(mbp);
 171         md_done(&rqp->sr_rp);
 172         error = mb_init(mbp);
 173         if (error)
 174                 return (error);
 175 
 176         if (vcp->vc_flags & SMBV_SMB2) {
 177                 /*
 178                  * SMB2 request initialization
 179                  */
 180                 rqp->sr2_command = cmd;
 181                 rqp->sr2_creditcharge = 1;
 182                 rqp->sr2_creditsrequested = 1;
 183                 rqp->sr_pid = 0xFEFF;        /* Made up, just like Windows */
 184                 rqp->sr2_rqflags = 0;
 185                 if ((vcp->vc_flags & SMBV_SIGNING) != 0 &&
 186                     vcp->vc_mackey != NULL) {
 187                         rqp->sr2_rqflags |= SMB2_FLAGS_SIGNED;
 188                 }
 189 
 190                 /*
 191                  * The SMB2 header is filled in later by
 192                  * smb2_rq_fillhdr (see smb2_rq.c)
 193                  * Just reserve space here.
 194                  */
 195                 mb_put_mem(mbp, NULL, SMB2_HDRLEN, MB_MZERO);
 196         } else {
 197                 /*
 198                  * SMB1 request initialization
 199                  */
 200                 rqp->sr_cmd = cmd;
 201                 rqp->sr_pid = (uint32_t)ddi_get_pid();
 202                 rqp->sr_rqflags  = vcp->vc_hflags;
 203                 rqp->sr_rqflags2 = vcp->vc_hflags2;
 204 
 205                 /*
 206                  * The SMB header is filled in later by
 207                  * smb_rq_fillhdr (see below)
 208                  * Just reserve space here.
 209                  */
 210                 mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
 211         }
 212 
 213         return (0);
 214 }
 215 
 216 /*
 217  * Given a request with it's body already composed,
 218  * rewind to the start and fill in the SMB header.
 219  * This is called when the request is enqueued,
 220  * so we have the final MID, seq num. etc.
 221  */
 222 void
 223 smb_rq_fillhdr(struct smb_rq *rqp)
 224 {
 225         struct mbchain mbtmp, *mbp = &mbtmp;
 226         mblk_t *m;
 227 
 228         /*
 229          * Fill in the SMB header using a dup of the first mblk,
 230          * which points at the same data but has its own wptr,
 231          * so we can rewind without trashing the message.
 232          */
 233         m = dupb(rqp->sr_rq.mb_top);
 234         m->b_wptr = m->b_rptr;    /* rewind */
 235         mb_initm(mbp, m);
 236 
 237         mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
 238         mb_put_uint8(mbp, rqp->sr_cmd);
 239         mb_put_uint32le(mbp, 0);        /* status */
 240         mb_put_uint8(mbp, rqp->sr_rqflags);
 241         mb_put_uint16le(mbp, rqp->sr_rqflags2);
 242         mb_put_uint16le(mbp, 0);        /* pid-high */
 243         mb_put_mem(mbp, NULL, 8, MB_MZERO);     /* MAC sig. (later) */
 244         mb_put_uint16le(mbp, 0);        /* reserved */
 245         mb_put_uint16le(mbp, rqp->sr_rqtid);
 246         mb_put_uint16le(mbp, (uint16_t)rqp->sr_pid);
 247         mb_put_uint16le(mbp, rqp->sr_rquid);
 248         mb_put_uint16le(mbp, rqp->sr_mid);
 249 
 250         /* This will free the mblk from dupb. */
 251         mb_done(mbp);
 252 }
 253 
 254 int
 255 smb_rq_simple(struct smb_rq *rqp)
 256 {
 257         return (smb_rq_simple_timed(rqp, smb_timo_default));
 258 }
 259 
 260 /*
 261  * Simple request-reply exchange
 262  */
 263 int
 264 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
 265 {
 266         int error = EINVAL;


 290                         (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
 291                             SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
 292 
 293                 } else {
 294                         delay(SEC_TO_TICK(SMB_RCNDELAY));
 295                 }
 296                 SMBRQ_UNLOCK(rqp);
 297                 rqp->sr_rexmit--;
 298         }
 299         return (error);
 300 }
 301 
 302 
 303 static int
 304 smb_rq_enqueue(struct smb_rq *rqp)
 305 {
 306         struct smb_vc *vcp = rqp->sr_vc;
 307         struct smb_share *ssp = rqp->sr_share;
 308         int error = 0;
 309 
 310         ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
 311 
 312         /*
 313          * Normal requests may initiate a reconnect,
 314          * and/or wait for state changes to finish.
 315          * Some requests set the NORECONNECT flag
 316          * to avoid all that (i.e. tree discon)
 317          */
 318         if (rqp->sr_flags & SMBR_NORECONNECT) {
 319                 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
 320                         SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
 321                         return (ENOTCONN);
 322                 }
 323                 if (ssp != NULL &&
 324                     ((ssp->ss_flags & SMBS_CONNECTED) == 0))
 325                         return (ENOTCONN);
 326                 goto ok_out;
 327         }
 328 
 329         /*
 330          * If we're not connected, initiate a reconnect
 331          * and/or wait for an existing one to finish.


 336                         return (error);
 337         }
 338 
 339         /*
 340          * If this request has a "share" object
 341          * that needs a tree connect, do it now.
 342          */
 343         if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
 344                 error = smb_share_tcon(ssp, rqp->sr_cred);
 345                 if (error)
 346                         return (error);
 347         }
 348 
 349         /*
 350          * We now know what UID + TID to use.
 351          * Store them in the request.
 352          */
 353 ok_out:
 354         rqp->sr_rquid = vcp->vc_smbuid;
 355         rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
 356         error = smb1_iod_addrq(rqp);
 357 
 358         return (error);
 359 }
 360 
 361 /*
 362  * Used by the IOD thread during connection setup,
 363  * and for smb_echo after network timeouts.  Note that
 364  * unlike smb_rq_simple, callers must check sr_error.
 365  */
 366 int
 367 smb_rq_internal(struct smb_rq *rqp, int timeout)
 368 {
 369         struct smb_vc *vcp = rqp->sr_vc;
 370         int error;
 371 
 372         ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
 373 
 374         rqp->sr_flags &= ~SMBR_RESTART;
 375         rqp->sr_timo = timeout;      /* in seconds */
 376         rqp->sr_state = SMBRQ_NOTSENT;
 377 
 378         /*
 379          * In-line smb_rq_enqueue(rqp) here, as we don't want it
 380          * trying to reconnect etc. for an internal request.
 381          */
 382         rqp->sr_rquid = vcp->vc_smbuid;
 383         rqp->sr_rqtid = SMB_TID_UNKNOWN;
 384         rqp->sr_flags |= SMBR_INTERNAL;
 385         error = smb1_iod_addrq(rqp);
 386         if (error != 0)
 387                 return (error);
 388 
 389         /*
 390          * In-line a variant of smb_rq_reply(rqp) here as we may
 391          * need to do custom parsing for SMB1-to-SMB2 negotiate.
 392          */
 393         if (rqp->sr_timo == SMBNOREPLYWAIT) {
 394                 smb_iod_removerq(rqp);
 395                 return (0);
 396         }
 397 
 398         error = smb_iod_waitrq_int(rqp);
 399         if (error)
 400                 return (error);
 401 
 402         /*
 403          * If the request was signed, validate the
 404          * signature on the response.
 405          */
 406         if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
 407                 error = smb_rq_verify(rqp);
 408                 if (error)
 409                         return (error);
 410         }
 411 
 412         /*
 413          * Parse the SMB header.
 414          */
 415         error = smb_rq_parsehdr(rqp);
 416 
 417         /*
 418          * Skip the error translation smb_rq_reply does.
 419          * Callers of this expect "raw" NT status.
 420          */
 421 
 422         return (error);
 423 }
 424 
 425 /*
 426  * Mark location of the word count, which is filled in later by
 427  * smb_rw_wend().  Also initialize the counter that it uses
 428  * to figure out what value to fill in.
 429  *
 430  * Note that the word count happens to be 8-bit.
 431  */
 432 void
 433 smb_rq_wstart(struct smb_rq *rqp)
 434 {
 435         rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
 436         rqp->sr_rq.mb_count = 0;
 437 }
 438 
 439 void
 440 smb_rq_wend(struct smb_rq *rqp)
 441 {
 442         uint_t wcnt;
 443 
 444         if (rqp->sr_wcount == NULL) {
 445                 SMBSDEBUG("no wcount\n");


 473 {
 474         uint_t bcnt;
 475 
 476         if (rqp->sr_bcount == NULL) {
 477                 SMBSDEBUG("no bcount\n");
 478                 return;
 479         }
 480         bcnt = rqp->sr_rq.mb_count;
 481         if (bcnt > 0xffff)
 482                 SMBSDEBUG("byte count too large (%d)\n", bcnt);
 483         /*
 484          * Fill in the byte count (16-bits)
 485          * The pointer is char * type due to
 486          * typical off-by-one alignment.
 487          */
 488         rqp->sr_bcount[0] = bcnt & 0xFF;
 489         rqp->sr_bcount[1] = (bcnt >> 8);
 490 }
 491 
 492 int









 493 smb_rq_getenv(struct smb_connobj *co,
 494         struct smb_vc **vcpp, struct smb_share **sspp)
 495 {
 496         struct smb_vc *vcp = NULL;
 497         struct smb_share *ssp = NULL;
 498         int error = EINVAL;
 499 
 500         if (co->co_flags & SMBO_GONE) {
 501                 SMBSDEBUG("zombie CO\n");
 502                 error = EINVAL;
 503                 goto out;
 504         }
 505 
 506         switch (co->co_level) {
 507         case SMBL_SHARE:
 508                 ssp = CPTOSS(co);
 509                 if ((co->co_flags & SMBO_GONE) ||
 510                     co->co_parent == NULL) {
 511                         SMBSDEBUG("zombie share %s\n", ssp->ss_name);
 512                         break;


 523                 }
 524                 error = 0;
 525                 break;
 526 
 527         default:
 528                 SMBSDEBUG("invalid level %d passed\n", co->co_level);
 529         }
 530 
 531 out:
 532         if (!error) {
 533                 if (vcpp)
 534                         *vcpp = vcp;
 535                 if (sspp)
 536                         *sspp = ssp;
 537         }
 538 
 539         return (error);
 540 }
 541 
 542 /*
 543  * Wait for a reply to this request, then parse it.
 544  */
 545 static int
 546 smb_rq_reply(struct smb_rq *rqp)
 547 {
 548         int error;


 549 
 550         if (rqp->sr_timo == SMBNOREPLYWAIT) {
 551                 smb_iod_removerq(rqp);
 552                 return (0);
 553         }
 554 
 555         error = smb_iod_waitrq(rqp);
 556         if (error)
 557                 return (error);
 558 
 559         /*
 560          * If the request was signed, validate the
 561          * signature on the response.
 562          */
 563         if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
 564                 error = smb_rq_verify(rqp);
 565                 if (error)
 566                         return (error);
 567         }
 568 
 569         /*
 570          * Parse the SMB header
 571          */
 572         error = smb_rq_parsehdr(rqp);
 573         if (error != 0)
 574                 return (error);
 575 
 576         if (rqp->sr_error != 0) {


 577                 if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
 578                         error = smb_maperr32(rqp->sr_error);
 579                 } else {
 580                         uint8_t errClass = rqp->sr_error & 0xff;
 581                         uint16_t errCode = rqp->sr_error >> 16;
 582                         /* Convert to NT status */
 583                         rqp->sr_error = smb_doserr2status(errClass, errCode);
 584                         error = smb_maperror(errClass, errCode);
 585                 }
 586         }
 587 
 588         if (error != 0) {
 589                 /*
 590                  * Do a special check for STATUS_BUFFER_OVERFLOW;
 591                  * it's not an error.
 592                  */
 593                 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
 594                         /*
 595                          * Don't report it as an error to our caller;
 596                          * they can look at rqp->sr_error if they
 597                          * need to know whether we got a
 598                          * STATUS_BUFFER_OVERFLOW.



 599                          */










 600                         rqp->sr_flags |= SMBR_MOREDATA;
 601                         error = 0;
 602                 }
 603         } else {
 604                 rqp->sr_flags &= ~SMBR_MOREDATA;
 605         }
 606 
 607         return (error);
 608 }

 609 
 610 /*
 611  * Parse the SMB header
 612  */
 613 static int
 614 smb_rq_parsehdr(struct smb_rq *rqp)
 615 {
 616         struct mdchain mdp_save;
 617         struct mdchain *mdp = &rqp->sr_rp;
 618         u_int8_t tb, sig[4];
 619         int error;
 620 
 621         /*
 622          * Parse the signature.  The reader already checked that
 623          * the signature is valid.  Here we just have to check
 624          * for SMB1-to-SMB2 negotiate.  Caller handles an EPROTO
 625          * as a signal that we got an SMB2 reply.  If we return
 626          * EPROTO, rewind the mdchain back where it was.
 627          */
 628         mdp_save = *mdp;
 629         error = md_get_mem(mdp, sig, 4, MB_MSYSTEM);
 630         if (error)
 631                 return (error);
 632         if (sig[0] != SMB_HDR_V1) {
 633                 if (rqp->sr_cmd == SMB_COM_NEGOTIATE) {
 634                         *mdp = mdp_save;
 635                         return (EPROTO);
 636                 }
 637                 return (EBADRPC);
 638         }
 639 
 640         /* Check cmd */
 641         error = md_get_uint8(mdp, &tb);
 642         if (tb != rqp->sr_cmd)
 643                 return (EBADRPC);
 644 
 645         md_get_uint32le(mdp, &rqp->sr_error);
 646         md_get_uint8(mdp, &rqp->sr_rpflags);
 647         md_get_uint16le(mdp, &rqp->sr_rpflags2);
 648 
 649         /* Skip: pid-high(2), MAC sig(8), reserved(2) */
 650         md_get_mem(mdp, NULL, 12, MB_MSYSTEM);
 651 
 652         md_get_uint16le(mdp, &rqp->sr_rptid);
 653         md_get_uint16le(mdp, &rqp->sr_rppid);
 654         md_get_uint16le(mdp, &rqp->sr_rpuid);
 655         error = md_get_uint16le(mdp, &rqp->sr_rpmid);
 656 
 657         return (error);
 658 }
 659 
 660 
 661 #define ALIGN4(a)       (((a) + 3) & ~3)
 662 
 663 /*
 664  * TRANS2 request implementation
 665  * TRANS implementation is in the "t2" routines
 666  * NT_TRANSACTION implementation is the separate "nt" stuff
 667  */
 668 int
 669 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
 670         struct smb_t2rq **t2pp)
 671 {
 672         struct smb_t2rq *t2p;
 673         int error;
 674 
 675         t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
 676         if (t2p == NULL)
 677                 return (ENOMEM);


1238                 smb_rq_wend(rqp);
1239                 smb_rq_bstart(rqp);
1240                 mb_put_uint8(mbp, 0);   /* name */
1241                 len = mb_fixhdr(mbp);
1242                 if (txpcount) {
1243                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1244                         error = md_get_mbuf(&mbparam, txpcount, &m);
1245                         if (error)
1246                                 goto bad;
1247                         mb_put_mbuf(mbp, m);
1248                 }
1249                 len = mb_fixhdr(mbp);
1250                 if (txdcount) {
1251                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1252                         error = md_get_mbuf(&mbdata, txdcount, &m);
1253                         if (error)
1254                                 goto bad;
1255                         mb_put_mbuf(mbp, m);
1256                 }
1257                 smb_rq_bend(rqp);
1258                 error = smb1_iod_multirq(rqp);
1259                 if (error)
1260                         goto bad;
1261         }       /* while left params or data */
1262         error = smb_t2_reply(t2p);
1263         if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1264                 goto bad;
1265         mdp = &t2p->t2_rdata;
1266         if (mdp->md_top) {
1267                 md_initm(mdp, mdp->md_top);
1268         }
1269         mdp = &t2p->t2_rparam;
1270         if (mdp->md_top) {
1271                 md_initm(mdp, mdp->md_top);
1272         }
1273 bad:
1274         smb_iod_removerq(rqp);
1275 freerq:
1276         if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1277                 if (rqp->sr_flags & SMBR_RESTART)
1278                         t2p->t2_flags |= SMBT2_RESTART;


1449                 leftdcount -= txdcount;
1450                 smb_rq_wend(rqp);
1451                 smb_rq_bstart(rqp);
1452                 len = mb_fixhdr(mbp);
1453                 if (txpcount) {
1454                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1455                         error = md_get_mbuf(&mbparam, txpcount, &m);
1456                         if (error)
1457                                 goto bad;
1458                         mb_put_mbuf(mbp, m);
1459                 }
1460                 len = mb_fixhdr(mbp);
1461                 if (txdcount) {
1462                         mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1463                         error = md_get_mbuf(&mbdata, txdcount, &m);
1464                         if (error)
1465                                 goto bad;
1466                         mb_put_mbuf(mbp, m);
1467                 }
1468                 smb_rq_bend(rqp);
1469                 error = smb1_iod_multirq(rqp);
1470                 if (error)
1471                         goto bad;
1472         }       /* while left params or data */
1473         error = smb_nt_reply(ntp);
1474         if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1475                 goto bad;
1476         mdp = &ntp->nt_rdata;
1477         if (mdp->md_top) {
1478                 md_initm(mdp, mdp->md_top);
1479         }
1480         mdp = &ntp->nt_rparam;
1481         if (mdp->md_top) {
1482                 md_initm(mdp, mdp->md_top);
1483         }
1484 bad:
1485         smb_iod_removerq(rqp);
1486 freerq:
1487         if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1488                 if (rqp->sr_flags & SMBR_RESTART)
1489                         ntp->nt_flags |= SMBT2_RESTART;


1541                 ntp->nt_flags &= ~SMBT2_RESTART;
1542                 error = smb_nt_request_int(ntp);
1543                 if (!error)
1544                         break;
1545                 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1546                     SMBT2_RESTART)
1547                         break;
1548                 if (++i > SMBMAXRESTARTS)
1549                         break;
1550                 mutex_enter(&(ntp)->nt_lock);
1551                 if (ntp->nt_share) {
1552                         (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1553                             SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1554 
1555                 } else {
1556                         delay(SEC_TO_TICK(SMB_RCNDELAY));
1557                 }
1558                 mutex_exit(&(ntp)->nt_lock);
1559         }
1560         return (error);
1561 }
1562 
1563 /*
1564  * Run an SMB transact named pipe.
1565  * Note: send_mb is consumed.
1566  */
1567 int
1568 smb_t2_xnp(struct smb_share *ssp, uint16_t fid,
1569     struct mbchain *send_mb, struct mdchain *recv_md,
1570     uint32_t *data_out_sz, /* max / returned */
1571     uint32_t *more, struct smb_cred *scrp)
1572 {
1573         struct smb_t2rq *t2p = NULL;
1574         mblk_t *m;
1575         uint16_t setup[2];
1576         int err;
1577 
1578         setup[0] = TRANS_TRANSACT_NAMED_PIPE;
1579         setup[1] = fid;
1580 
1581         t2p = kmem_alloc(sizeof (*t2p), KM_SLEEP);
1582         err = smb_t2_init(t2p, SSTOCP(ssp), setup, 2, scrp);
1583         if (err) {
1584                 *data_out_sz = 0;
1585                 goto out;
1586         }
1587 
1588         t2p->t2_setupcount = 2;
1589         t2p->t2_setupdata  = setup;
1590 
1591         t2p->t_name = "\\PIPE\\";
1592         t2p->t_name_len = 6;
1593 
1594         t2p->t2_maxscount = 0;
1595         t2p->t2_maxpcount = 0;
1596         t2p->t2_maxdcount = (uint16_t)*data_out_sz;
1597 
1598         /* Transmit parameters (none) */
1599 
1600         /*
1601          * Transmit data
1602          *
1603          * Copy the mb, and clear the source so we
1604          * don't end up with a double free.
1605          */
1606         t2p->t2_tdata = *send_mb;
1607         bzero(send_mb, sizeof (*send_mb));
1608 
1609         /*
1610          * Run the request
1611          */
1612         err = smb_t2_request(t2p);
1613 
1614         /* No returned parameters. */
1615 
1616         if (err == 0 && (m = t2p->t2_rdata.md_top) != NULL) {
1617                 /*
1618                  * Received data
1619                  *
1620                  * Copy the mdchain, and clear the source so we
1621                  * don't end up with a double free.
1622                  */
1623                 *data_out_sz = msgdsize(m);
1624                 md_initm(recv_md, m);
1625                 t2p->t2_rdata.md_top = NULL;
1626         } else {
1627                 *data_out_sz = 0;
1628         }
1629 
1630         if (t2p->t2_sr_error == NT_STATUS_BUFFER_OVERFLOW)
1631                 *more = 1;
1632 
1633 out:
1634         if (t2p != NULL) {
1635                 /* Note: t2p->t_name no longer allocated */
1636                 smb_t2_done(t2p);
1637                 kmem_free(t2p, sizeof (*t2p));
1638         }
1639 
1640         return (err);
1641 }