1 /*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33 */
34
35 /*
36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
38 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/time.h>
44 #include <sys/kmem.h>
45 #include <sys/proc.h>
46 #include <sys/lock.h>
47 #include <sys/socket.h>
48 #include <sys/mount.h>
49 #include <sys/sunddi.h>
50 #include <sys/cmn_err.h>
51 #include <sys/sdt.h>
52
53 #include <netsmb/smb_osdep.h>
54
55 #include <netsmb/smb.h>
56 #include <netsmb/smb2.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_subr.h>
59 #include <netsmb/smb_tran.h>
60 #include <netsmb/smb_rq.h>
61 #include <netsmb/smb2_rq.h>
62
63 /*
64 * How long to wait before restarting a request (after reconnect)
65 */
66 #define SMB_RCNDELAY 2 /* seconds */
67
68 /*
69 * leave this zero - we can't ssecond guess server side effects of
70 * duplicate ops, this isn't nfs!
71 */
72 #define SMBMAXRESTARTS 0
73
74
75 static int smb_rq_reply(struct smb_rq *rqp);
76 static int smb_rq_parsehdr(struct smb_rq *rqp);
77 static int smb_rq_enqueue(struct smb_rq *rqp);
78 static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
79 static int smb_t2_reply(struct smb_t2rq *t2p);
80 static int smb_nt_reply(struct smb_ntrq *ntp);
81
82
83 /*
84 * Done with a request object. Free its contents.
85 * If it was allocated (SMBR_ALLOCED) free it too.
86 * Some of these are stack locals, not allocated.
87 *
88 * No locks here - this is the last ref.
89 */
90 void
91 smb_rq_done(struct smb_rq *rqp)
92 {
93
94 /*
95 * No smb_vc_rele() here - see smb_rq_init()
96 */
97 mb_done(&rqp->sr_rq);
98 md_done(&rqp->sr_rp);
99 mutex_destroy(&rqp->sr_lock);
100 cv_destroy(&rqp->sr_cond);
101 if (rqp->sr_flags & SMBR_ALLOCED)
102 kmem_free(rqp, sizeof (*rqp));
103 }
104
105 int
106 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
107 struct smb_rq **rqpp)
108 {
109 struct smb_rq *rqp;
110 int error;
111
112 // XXX kmem cache?
113 rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
114 if (rqp == NULL)
115 return (ENOMEM);
116 error = smb_rq_init(rqp, layer, cmd, scred);
117 if (error) {
118 smb_rq_done(rqp);
119 return (error);
120 }
121 rqp->sr_flags |= SMBR_ALLOCED;
122 *rqpp = rqp;
123 return (0);
124 }
125
126 int
127 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
128 struct smb_cred *scred)
129 {
130 int error;
131
132 bzero(rqp, sizeof (*rqp));
133 mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL);
134 cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
135
136 error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
137 if (error)
138 return (error);
139
140 /*
141 * We copied a VC pointer (vcp) into rqp->sr_vc,
142 * but we do NOT do a smb_vc_hold here. Instead,
143 * the caller is responsible for the hold on the
144 * share or the VC as needed. For smbfs callers,
145 * the hold is on the share, via the smbfs mount.
146 * For nsmb ioctl callers, the hold is done when
147 * the driver handle gets VC or share references.
148 * This design avoids frequent hold/rele activity
149 * when creating and completing requests.
150 */
151
152 rqp->sr_rexmit = SMBMAXRESTARTS;
153 rqp->sr_cred = scred; /* Note: ref hold done by caller. */
154 error = smb_rq_new(rqp, cmd);
155
156 return (error);
157 }
158
159 static int
160 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
161 {
162 struct mbchain *mbp = &rqp->sr_rq;
163 struct smb_vc *vcp = rqp->sr_vc;
164 int error;
165
166 ASSERT(rqp != NULL);
167
168 rqp->sr_sendcnt = 0;
169
170 mb_done(mbp);
171 md_done(&rqp->sr_rp);
172 error = mb_init(mbp);
173 if (error)
174 return (error);
175
176 if (vcp->vc_flags & SMBV_SMB2) {
177 /*
178 * SMB2 request initialization
179 */
180 rqp->sr2_command = cmd;
181 rqp->sr2_creditcharge = 1;
182 rqp->sr2_creditsrequested = 1;
183 rqp->sr_pid = 0xFEFF; /* Made up, just like Windows */
184 rqp->sr2_rqflags = 0;
185 if ((vcp->vc_flags & SMBV_SIGNING) != 0 &&
186 vcp->vc_mackey != NULL) {
187 rqp->sr2_rqflags |= SMB2_FLAGS_SIGNED;
188 }
189
190 /*
191 * The SMB2 header is filled in later by
192 * smb2_rq_fillhdr (see smb2_rq.c)
193 * Just reserve space here.
194 */
195 mb_put_mem(mbp, NULL, SMB2_HDRLEN, MB_MZERO);
196 } else {
197 /*
198 * SMB1 request initialization
199 */
200 rqp->sr_cmd = cmd;
201 rqp->sr_pid = (uint32_t)ddi_get_pid();
202 rqp->sr_rqflags = vcp->vc_hflags;
203 rqp->sr_rqflags2 = vcp->vc_hflags2;
204
205 /*
206 * The SMB header is filled in later by
207 * smb_rq_fillhdr (see below)
208 * Just reserve space here.
209 */
210 mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
211 }
212
213 return (0);
214 }
215
216 /*
217 * Given a request with it's body already composed,
218 * rewind to the start and fill in the SMB header.
219 * This is called when the request is enqueued,
220 * so we have the final MID, seq num. etc.
221 */
222 void
223 smb_rq_fillhdr(struct smb_rq *rqp)
224 {
225 struct mbchain mbtmp, *mbp = &mbtmp;
226 mblk_t *m;
227
228 /*
229 * Fill in the SMB header using a dup of the first mblk,
230 * which points at the same data but has its own wptr,
231 * so we can rewind without trashing the message.
232 */
233 m = dupb(rqp->sr_rq.mb_top);
234 m->b_wptr = m->b_rptr; /* rewind */
235 mb_initm(mbp, m);
236
237 mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
238 mb_put_uint8(mbp, rqp->sr_cmd);
239 mb_put_uint32le(mbp, 0); /* status */
240 mb_put_uint8(mbp, rqp->sr_rqflags);
241 mb_put_uint16le(mbp, rqp->sr_rqflags2);
242 mb_put_uint16le(mbp, 0); /* pid-high */
243 mb_put_mem(mbp, NULL, 8, MB_MZERO); /* MAC sig. (later) */
244 mb_put_uint16le(mbp, 0); /* reserved */
245 mb_put_uint16le(mbp, rqp->sr_rqtid);
246 mb_put_uint16le(mbp, (uint16_t)rqp->sr_pid);
247 mb_put_uint16le(mbp, rqp->sr_rquid);
248 mb_put_uint16le(mbp, rqp->sr_mid);
249
250 /* This will free the mblk from dupb. */
251 mb_done(mbp);
252 }
253
254 int
255 smb_rq_simple(struct smb_rq *rqp)
256 {
257 return (smb_rq_simple_timed(rqp, smb_timo_default));
258 }
259
260 /*
261 * Simple request-reply exchange
262 */
263 int
264 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
265 {
266 int error = EINVAL;
267
268 for (; ; ) {
269 /*
270 * Don't send any new requests if force unmount is underway.
271 * This check was moved into smb_rq_enqueue.
272 */
273 rqp->sr_flags &= ~SMBR_RESTART;
274 rqp->sr_timo = timeout; /* in seconds */
275 rqp->sr_state = SMBRQ_NOTSENT;
276 error = smb_rq_enqueue(rqp);
277 if (error) {
278 break;
279 }
280 error = smb_rq_reply(rqp);
281 if (!error)
282 break;
283 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
284 SMBR_RESTART)
285 break;
286 if (rqp->sr_rexmit <= 0)
287 break;
288 SMBRQ_LOCK(rqp);
289 if (rqp->sr_share) {
290 (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
291 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
292
293 } else {
294 delay(SEC_TO_TICK(SMB_RCNDELAY));
295 }
296 SMBRQ_UNLOCK(rqp);
297 rqp->sr_rexmit--;
298 }
299 return (error);
300 }
301
302
303 static int
304 smb_rq_enqueue(struct smb_rq *rqp)
305 {
306 struct smb_vc *vcp = rqp->sr_vc;
307 struct smb_share *ssp = rqp->sr_share;
308 int error = 0;
309
310 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
311
312 /*
313 * Normal requests may initiate a reconnect,
314 * and/or wait for state changes to finish.
315 * Some requests set the NORECONNECT flag
316 * to avoid all that (i.e. tree discon)
317 */
318 if (rqp->sr_flags & SMBR_NORECONNECT) {
319 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
320 SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
321 return (ENOTCONN);
322 }
323 if (ssp != NULL &&
324 ((ssp->ss_flags & SMBS_CONNECTED) == 0))
325 return (ENOTCONN);
326 goto ok_out;
327 }
328
329 /*
330 * If we're not connected, initiate a reconnect
331 * and/or wait for an existing one to finish.
332 */
333 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
334 error = smb_iod_reconnect(vcp);
335 if (error != 0)
336 return (error);
337 }
338
339 /*
340 * If this request has a "share" object
341 * that needs a tree connect, do it now.
342 */
343 if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
344 error = smb_share_tcon(ssp, rqp->sr_cred);
345 if (error)
346 return (error);
347 }
348
349 /*
350 * We now know what UID + TID to use.
351 * Store them in the request.
352 */
353 ok_out:
354 rqp->sr_rquid = vcp->vc_smbuid;
355 rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
356 error = smb1_iod_addrq(rqp);
357
358 return (error);
359 }
360
361 /*
362 * Used by the IOD thread during connection setup,
363 * and for smb_echo after network timeouts. Note that
364 * unlike smb_rq_simple, callers must check sr_error.
365 */
366 int
367 smb_rq_internal(struct smb_rq *rqp, int timeout)
368 {
369 struct smb_vc *vcp = rqp->sr_vc;
370 int error;
371
372 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
373
374 rqp->sr_flags &= ~SMBR_RESTART;
375 rqp->sr_timo = timeout; /* in seconds */
376 rqp->sr_state = SMBRQ_NOTSENT;
377
378 /*
379 * In-line smb_rq_enqueue(rqp) here, as we don't want it
380 * trying to reconnect etc. for an internal request.
381 */
382 rqp->sr_rquid = vcp->vc_smbuid;
383 rqp->sr_rqtid = SMB_TID_UNKNOWN;
384 rqp->sr_flags |= SMBR_INTERNAL;
385 error = smb1_iod_addrq(rqp);
386 if (error != 0)
387 return (error);
388
389 /*
390 * In-line a variant of smb_rq_reply(rqp) here as we may
391 * need to do custom parsing for SMB1-to-SMB2 negotiate.
392 */
393 if (rqp->sr_timo == SMBNOREPLYWAIT) {
394 smb_iod_removerq(rqp);
395 return (0);
396 }
397
398 error = smb_iod_waitrq_int(rqp);
399 if (error)
400 return (error);
401
402 /*
403 * If the request was signed, validate the
404 * signature on the response.
405 */
406 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
407 error = smb_rq_verify(rqp);
408 if (error)
409 return (error);
410 }
411
412 /*
413 * Parse the SMB header.
414 */
415 error = smb_rq_parsehdr(rqp);
416
417 /*
418 * Skip the error translation smb_rq_reply does.
419 * Callers of this expect "raw" NT status.
420 */
421
422 return (error);
423 }
424
425 /*
426 * Mark location of the word count, which is filled in later by
427 * smb_rw_wend(). Also initialize the counter that it uses
428 * to figure out what value to fill in.
429 *
430 * Note that the word count happens to be 8-bit.
431 */
432 void
433 smb_rq_wstart(struct smb_rq *rqp)
434 {
435 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
436 rqp->sr_rq.mb_count = 0;
437 }
438
439 void
440 smb_rq_wend(struct smb_rq *rqp)
441 {
442 uint_t wcnt;
443
444 if (rqp->sr_wcount == NULL) {
445 SMBSDEBUG("no wcount\n");
446 return;
447 }
448 wcnt = rqp->sr_rq.mb_count;
449 if (wcnt > 0x1ff)
450 SMBSDEBUG("word count too large (%d)\n", wcnt);
451 if (wcnt & 1)
452 SMBSDEBUG("odd word count\n");
453 /* Fill in the word count (8-bits) */
454 *rqp->sr_wcount = (wcnt >> 1);
455 }
456
457 /*
458 * Mark location of the byte count, which is filled in later by
459 * smb_rw_bend(). Also initialize the counter that it uses
460 * to figure out what value to fill in.
461 *
462 * Note that the byte count happens to be 16-bit.
463 */
464 void
465 smb_rq_bstart(struct smb_rq *rqp)
466 {
467 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
468 rqp->sr_rq.mb_count = 0;
469 }
470
471 void
472 smb_rq_bend(struct smb_rq *rqp)
473 {
474 uint_t bcnt;
475
476 if (rqp->sr_bcount == NULL) {
477 SMBSDEBUG("no bcount\n");
478 return;
479 }
480 bcnt = rqp->sr_rq.mb_count;
481 if (bcnt > 0xffff)
482 SMBSDEBUG("byte count too large (%d)\n", bcnt);
483 /*
484 * Fill in the byte count (16-bits)
485 * The pointer is char * type due to
486 * typical off-by-one alignment.
487 */
488 rqp->sr_bcount[0] = bcnt & 0xFF;
489 rqp->sr_bcount[1] = (bcnt >> 8);
490 }
491
492 int
493 smb_rq_getenv(struct smb_connobj *co,
494 struct smb_vc **vcpp, struct smb_share **sspp)
495 {
496 struct smb_vc *vcp = NULL;
497 struct smb_share *ssp = NULL;
498 int error = EINVAL;
499
500 if (co->co_flags & SMBO_GONE) {
501 SMBSDEBUG("zombie CO\n");
502 error = EINVAL;
503 goto out;
504 }
505
506 switch (co->co_level) {
507 case SMBL_SHARE:
508 ssp = CPTOSS(co);
509 if ((co->co_flags & SMBO_GONE) ||
510 co->co_parent == NULL) {
511 SMBSDEBUG("zombie share %s\n", ssp->ss_name);
512 break;
513 }
514 /* instead of recursion... */
515 co = co->co_parent;
516 /* FALLTHROUGH */
517 case SMBL_VC:
518 vcp = CPTOVC(co);
519 if ((co->co_flags & SMBO_GONE) ||
520 co->co_parent == NULL) {
521 SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
522 break;
523 }
524 error = 0;
525 break;
526
527 default:
528 SMBSDEBUG("invalid level %d passed\n", co->co_level);
529 }
530
531 out:
532 if (!error) {
533 if (vcpp)
534 *vcpp = vcp;
535 if (sspp)
536 *sspp = ssp;
537 }
538
539 return (error);
540 }
541
542 /*
543 * Wait for a reply to this request, then parse it.
544 */
545 static int
546 smb_rq_reply(struct smb_rq *rqp)
547 {
548 int error;
549
550 if (rqp->sr_timo == SMBNOREPLYWAIT) {
551 smb_iod_removerq(rqp);
552 return (0);
553 }
554
555 error = smb_iod_waitrq(rqp);
556 if (error)
557 return (error);
558
559 /*
560 * If the request was signed, validate the
561 * signature on the response.
562 */
563 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
564 error = smb_rq_verify(rqp);
565 if (error)
566 return (error);
567 }
568
569 /*
570 * Parse the SMB header
571 */
572 error = smb_rq_parsehdr(rqp);
573 if (error != 0)
574 return (error);
575
576 if (rqp->sr_error != 0) {
577 if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
578 error = smb_maperr32(rqp->sr_error);
579 } else {
580 uint8_t errClass = rqp->sr_error & 0xff;
581 uint16_t errCode = rqp->sr_error >> 16;
582 /* Convert to NT status */
583 rqp->sr_error = smb_doserr2status(errClass, errCode);
584 error = smb_maperror(errClass, errCode);
585 }
586 }
587
588 if (error != 0) {
589 /*
590 * Do a special check for STATUS_BUFFER_OVERFLOW;
591 * it's not an error.
592 */
593 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
594 /*
595 * Don't report it as an error to our caller;
596 * they can look at rqp->sr_error if they
597 * need to know whether we got a
598 * STATUS_BUFFER_OVERFLOW.
599 */
600 rqp->sr_flags |= SMBR_MOREDATA;
601 error = 0;
602 }
603 } else {
604 rqp->sr_flags &= ~SMBR_MOREDATA;
605 }
606
607 return (error);
608 }
609
610 /*
611 * Parse the SMB header
612 */
613 static int
614 smb_rq_parsehdr(struct smb_rq *rqp)
615 {
616 struct mdchain mdp_save;
617 struct mdchain *mdp = &rqp->sr_rp;
618 u_int8_t tb, sig[4];
619 int error;
620
621 /*
622 * Parse the signature. The reader already checked that
623 * the signature is valid. Here we just have to check
624 * for SMB1-to-SMB2 negotiate. Caller handles an EPROTO
625 * as a signal that we got an SMB2 reply. If we return
626 * EPROTO, rewind the mdchain back where it was.
627 */
628 mdp_save = *mdp;
629 error = md_get_mem(mdp, sig, 4, MB_MSYSTEM);
630 if (error)
631 return (error);
632 if (sig[0] != SMB_HDR_V1) {
633 if (rqp->sr_cmd == SMB_COM_NEGOTIATE) {
634 *mdp = mdp_save;
635 return (EPROTO);
636 }
637 return (EBADRPC);
638 }
639
640 /* Check cmd */
641 error = md_get_uint8(mdp, &tb);
642 if (tb != rqp->sr_cmd)
643 return (EBADRPC);
644
645 md_get_uint32le(mdp, &rqp->sr_error);
646 md_get_uint8(mdp, &rqp->sr_rpflags);
647 md_get_uint16le(mdp, &rqp->sr_rpflags2);
648
649 /* Skip: pid-high(2), MAC sig(8), reserved(2) */
650 md_get_mem(mdp, NULL, 12, MB_MSYSTEM);
651
652 md_get_uint16le(mdp, &rqp->sr_rptid);
653 md_get_uint16le(mdp, &rqp->sr_rppid);
654 md_get_uint16le(mdp, &rqp->sr_rpuid);
655 error = md_get_uint16le(mdp, &rqp->sr_rpmid);
656
657 return (error);
658 }
659
660
661 #define ALIGN4(a) (((a) + 3) & ~3)
662
663 /*
664 * TRANS2 request implementation
665 * TRANS implementation is in the "t2" routines
666 * NT_TRANSACTION implementation is the separate "nt" stuff
667 */
668 int
669 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
670 struct smb_t2rq **t2pp)
671 {
672 struct smb_t2rq *t2p;
673 int error;
674
675 t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
676 if (t2p == NULL)
677 return (ENOMEM);
678 error = smb_t2_init(t2p, layer, &setup, 1, scred);
679 t2p->t2_flags |= SMBT2_ALLOCED;
680 if (error) {
681 smb_t2_done(t2p);
682 return (error);
683 }
684 *t2pp = t2p;
685 return (0);
686 }
687
688 int
689 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
690 struct smb_ntrq **ntpp)
691 {
692 struct smb_ntrq *ntp;
693 int error;
694
695 ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
696 if (ntp == NULL)
697 return (ENOMEM);
698 error = smb_nt_init(ntp, layer, fn, scred);
699 mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
700 cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
701 ntp->nt_flags |= SMBT2_ALLOCED;
702 if (error) {
703 smb_nt_done(ntp);
704 return (error);
705 }
706 *ntpp = ntp;
707 return (0);
708 }
709
710 int
711 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
712 int setupcnt, struct smb_cred *scred)
713 {
714 int i;
715 int error;
716
717 bzero(t2p, sizeof (*t2p));
718 mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
719 cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
720
721 t2p->t2_source = source;
722 t2p->t2_setupcount = (u_int16_t)setupcnt;
723 t2p->t2_setupdata = t2p->t2_setup;
724 for (i = 0; i < setupcnt; i++)
725 t2p->t2_setup[i] = setup[i];
726 t2p->t2_fid = 0xffff;
727 t2p->t2_cred = scred;
728 t2p->t2_share = (source->co_level == SMBL_SHARE ?
729 CPTOSS(source) : NULL); /* for smb up/down */
730 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
731 if (error)
732 return (error);
733 return (0);
734 }
735
736 int
737 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
738 struct smb_cred *scred)
739 {
740 int error;
741
742 bzero(ntp, sizeof (*ntp));
743 ntp->nt_source = source;
744 ntp->nt_function = fn;
745 ntp->nt_cred = scred;
746 ntp->nt_share = (source->co_level == SMBL_SHARE ?
747 CPTOSS(source) : NULL); /* for smb up/down */
748 error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
749 if (error)
750 return (error);
751 return (0);
752 }
753
754 void
755 smb_t2_done(struct smb_t2rq *t2p)
756 {
757 mb_done(&t2p->t2_tparam);
758 mb_done(&t2p->t2_tdata);
759 md_done(&t2p->t2_rparam);
760 md_done(&t2p->t2_rdata);
761 mutex_destroy(&t2p->t2_lock);
762 cv_destroy(&t2p->t2_cond);
763 if (t2p->t2_flags & SMBT2_ALLOCED)
764 kmem_free(t2p, sizeof (*t2p));
765 }
766
767 void
768 smb_nt_done(struct smb_ntrq *ntp)
769 {
770 mb_done(&ntp->nt_tsetup);
771 mb_done(&ntp->nt_tparam);
772 mb_done(&ntp->nt_tdata);
773 md_done(&ntp->nt_rparam);
774 md_done(&ntp->nt_rdata);
775 cv_destroy(&ntp->nt_cond);
776 mutex_destroy(&ntp->nt_lock);
777 if (ntp->nt_flags & SMBT2_ALLOCED)
778 kmem_free(ntp, sizeof (*ntp));
779 }
780
781 /*
782 * Extract data [offset,count] from mtop and add to mdp.
783 */
784 static int
785 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
786 struct mdchain *mdp)
787 {
788 mblk_t *n;
789
790 n = m_copym(mtop, offset, count, M_WAITOK);
791 if (n == NULL)
792 return (EBADRPC);
793
794 if (mdp->md_top == NULL) {
795 md_initm(mdp, n);
796 } else
797 m_cat(mdp->md_top, n);
798
799 return (0);
800 }
801
802 static int
803 smb_t2_reply(struct smb_t2rq *t2p)
804 {
805 struct mdchain *mdp;
806 struct smb_rq *rqp = t2p->t2_rq;
807 int error, error2, totpgot, totdgot;
808 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
809 u_int16_t tmp, bc, dcount;
810 u_int8_t wc;
811
812 t2p->t2_flags &= ~SMBT2_MOREDATA;
813
814 error = smb_rq_reply(rqp);
815 if (rqp->sr_flags & SMBR_MOREDATA)
816 t2p->t2_flags |= SMBT2_MOREDATA;
817 t2p->t2_sr_errclass = rqp->sr_errclass;
818 t2p->t2_sr_serror = rqp->sr_serror;
819 t2p->t2_sr_error = rqp->sr_error;
820 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
821 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
822 return (error);
823 /*
824 * Now we have to get all subseqent responses, if any.
825 * The CIFS specification says that they can be misordered,
826 * which is weird.
827 * TODO: timo
828 */
829 totpgot = totdgot = 0;
830 totpcount = totdcount = 0xffff;
831 mdp = &rqp->sr_rp;
832 for (;;) {
833 DTRACE_PROBE2(smb_trans_reply,
834 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
835 m_dumpm(mdp->md_top);
836
837 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
838 break;
839 if (wc < 10) {
840 error2 = ENOENT;
841 break;
842 }
843 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
844 break;
845 if (totpcount > tmp)
846 totpcount = tmp;
847 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
848 break;
849 if (totdcount > tmp)
850 totdcount = tmp;
851 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
852 (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
853 (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
854 (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
855 break;
856 if (pcount != 0 && pdisp != totpgot) {
857 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
858 pdisp, totpgot);
859 error2 = EINVAL;
860 break;
861 }
862 if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
863 (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
864 (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
865 break;
866 if (dcount != 0 && ddisp != totdgot) {
867 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
868 dcount);
869 error2 = EINVAL;
870 break;
871 }
872
873 /* XXX: Skip setup words? We don't save them? */
874 md_get_uint8(mdp, &wc); /* SetupCount */
875 md_get_uint8(mdp, NULL); /* Reserved2 */
876 tmp = wc;
877 while (tmp--)
878 md_get_uint16le(mdp, NULL);
879
880 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
881 break;
882
883 /*
884 * There are pad bytes here, and the poff value
885 * indicates where the next data are found.
886 * No need to guess at the padding size.
887 */
888 if (pcount) {
889 error2 = smb_t2_placedata(mdp->md_top, poff,
890 pcount, &t2p->t2_rparam);
891 if (error2)
892 break;
893 }
894 totpgot += pcount;
895
896 if (dcount) {
897 error2 = smb_t2_placedata(mdp->md_top, doff,
898 dcount, &t2p->t2_rdata);
899 if (error2)
900 break;
901 }
902 totdgot += dcount;
903
904 if (totpgot >= totpcount && totdgot >= totdcount) {
905 error2 = 0;
906 t2p->t2_flags |= SMBT2_ALLRECV;
907 break;
908 }
909 /*
910 * We're done with this reply, look for the next one.
911 */
912 SMBRQ_LOCK(rqp);
913 md_next_record(&rqp->sr_rp);
914 SMBRQ_UNLOCK(rqp);
915 error2 = smb_rq_reply(rqp);
916 if (rqp->sr_flags & SMBR_MOREDATA)
917 t2p->t2_flags |= SMBT2_MOREDATA;
918 if (!error2)
919 continue;
920 t2p->t2_sr_errclass = rqp->sr_errclass;
921 t2p->t2_sr_serror = rqp->sr_serror;
922 t2p->t2_sr_error = rqp->sr_error;
923 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
924 error = error2;
925 if (!(rqp->sr_flags & SMBR_MOREDATA))
926 break;
927 }
928 return (error ? error : error2);
929 }
930
931 static int
932 smb_nt_reply(struct smb_ntrq *ntp)
933 {
934 struct mdchain *mdp;
935 struct smb_rq *rqp = ntp->nt_rq;
936 int error, error2;
937 u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
938 u_int32_t tmp, dcount, totpgot, totdgot;
939 u_int16_t bc;
940 u_int8_t wc;
941
942 ntp->nt_flags &= ~SMBT2_MOREDATA;
943
944 error = smb_rq_reply(rqp);
945 if (rqp->sr_flags & SMBR_MOREDATA)
946 ntp->nt_flags |= SMBT2_MOREDATA;
947 ntp->nt_sr_error = rqp->sr_error;
948 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
949 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
950 return (error);
951 /*
952 * Now we have to get all subseqent responses. The CIFS specification
953 * says that they can be misordered which is weird.
954 * TODO: timo
955 */
956 totpgot = totdgot = 0;
957 totpcount = totdcount = 0xffffffff;
958 mdp = &rqp->sr_rp;
959 for (;;) {
960 DTRACE_PROBE2(smb_trans_reply,
961 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
962 m_dumpm(mdp->md_top);
963
964 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
965 break;
966 if (wc < 18) {
967 error2 = ENOENT;
968 break;
969 }
970 md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
971 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
972 break;
973 if (totpcount > tmp)
974 totpcount = tmp;
975 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
976 break;
977 if (totdcount > tmp)
978 totdcount = tmp;
979 if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
980 (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
981 (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
982 break;
983 if (pcount != 0 && pdisp != totpgot) {
984 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
985 pdisp, totpgot);
986 error2 = EINVAL;
987 break;
988 }
989 if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
990 (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
991 (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
992 break;
993 if (dcount != 0 && ddisp != totdgot) {
994 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
995 dcount);
996 error2 = EINVAL;
997 break;
998 }
999
1000 /* XXX: Skip setup words? We don't save them? */
1001 md_get_uint8(mdp, &wc); /* SetupCount */
1002 tmp = wc;
1003 while (tmp--)
1004 md_get_uint16le(mdp, NULL);
1005
1006 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
1007 break;
1008
1009 /*
1010 * There are pad bytes here, and the poff value
1011 * indicates where the next data are found.
1012 * No need to guess at the padding size.
1013 */
1014 if (pcount) {
1015 error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
1016 &ntp->nt_rparam);
1017 if (error2)
1018 break;
1019 }
1020 totpgot += pcount;
1021
1022 if (dcount) {
1023 error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
1024 &ntp->nt_rdata);
1025 if (error2)
1026 break;
1027 }
1028 totdgot += dcount;
1029
1030 if (totpgot >= totpcount && totdgot >= totdcount) {
1031 error2 = 0;
1032 ntp->nt_flags |= SMBT2_ALLRECV;
1033 break;
1034 }
1035 /*
1036 * We're done with this reply, look for the next one.
1037 */
1038 SMBRQ_LOCK(rqp);
1039 md_next_record(&rqp->sr_rp);
1040 SMBRQ_UNLOCK(rqp);
1041 error2 = smb_rq_reply(rqp);
1042 if (rqp->sr_flags & SMBR_MOREDATA)
1043 ntp->nt_flags |= SMBT2_MOREDATA;
1044 if (!error2)
1045 continue;
1046 ntp->nt_sr_error = rqp->sr_error;
1047 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
1048 error = error2;
1049 if (!(rqp->sr_flags & SMBR_MOREDATA))
1050 break;
1051 }
1052 return (error ? error : error2);
1053 }
1054
1055 /*
1056 * Perform a full round of TRANS2 request
1057 */
1058 static int
1059 smb_t2_request_int(struct smb_t2rq *t2p)
1060 {
1061 struct smb_vc *vcp = t2p->t2_vc;
1062 struct smb_cred *scred = t2p->t2_cred;
1063 struct mbchain *mbp;
1064 struct mdchain *mdp, mbparam, mbdata;
1065 mblk_t *m;
1066 struct smb_rq *rqp;
1067 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
1068 int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
1069
1070 m = t2p->t2_tparam.mb_top;
1071 if (m) {
1072 md_initm(&mbparam, m); /* do not free it! */
1073 totpcount = m_fixhdr(m);
1074 if (totpcount > 0xffff) /* maxvalue for ushort_t */
1075 return (EINVAL);
1076 } else
1077 totpcount = 0;
1078 m = t2p->t2_tdata.mb_top;
1079 if (m) {
1080 md_initm(&mbdata, m); /* do not free it! */
1081 totdcount = m_fixhdr(m);
1082 if (totdcount > 0xffff)
1083 return (EINVAL);
1084 } else
1085 totdcount = 0;
1086 leftdcount = totdcount;
1087 leftpcount = totpcount;
1088 txmax = vcp->vc_txmax;
1089 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
1090 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
1091 if (error)
1092 return (error);
1093 rqp->sr_timo = smb_timo_default;
1094 rqp->sr_flags |= SMBR_MULTIPACKET;
1095 t2p->t2_rq = rqp;
1096 mbp = &rqp->sr_rq;
1097 smb_rq_wstart(rqp);
1098 mb_put_uint16le(mbp, totpcount);
1099 mb_put_uint16le(mbp, totdcount);
1100 mb_put_uint16le(mbp, t2p->t2_maxpcount);
1101 mb_put_uint16le(mbp, t2p->t2_maxdcount);
1102 mb_put_uint8(mbp, t2p->t2_maxscount);
1103 mb_put_uint8(mbp, 0); /* reserved */
1104 mb_put_uint16le(mbp, 0); /* flags */
1105 mb_put_uint32le(mbp, 0); /* Timeout */
1106 mb_put_uint16le(mbp, 0); /* reserved 2 */
1107 len = mb_fixhdr(mbp);
1108
1109 /*
1110 * Now we know the size of the trans overhead stuff:
1111 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
1112 * where nmsize is the OTW size of the name, including
1113 * the unicode null terminator and any alignment.
1114 * Use this to decide which parts (and how much)
1115 * can go into this request: params, data
1116 */
1117 nmlen = t2p->t_name ? t2p->t_name_len : 0;
1118 nmsize = nmlen + 1; /* null term. */
1119 if (SMB_UNICODE_STRINGS(vcp)) {
1120 nmsize *= 2;
1121 /* we know put_dmem will need to align */
1122 nmsize += 1;
1123 }
1124 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1125 if (len + leftpcount > txmax) {
1126 txpcount = min(leftpcount, txmax - len);
1127 poff = len;
1128 txdcount = 0;
1129 doff = 0;
1130 } else {
1131 txpcount = leftpcount;
1132 poff = txpcount ? len : 0;
1133 /*
1134 * Other client traffic seems to "ALIGN2" here. The extra
1135 * 2 byte pad we use has no observed downside and may be
1136 * required for some old servers(?)
1137 */
1138 len = ALIGN4(len + txpcount);
1139 txdcount = min(leftdcount, txmax - len);
1140 doff = txdcount ? len : 0;
1141 }
1142 leftpcount -= txpcount;
1143 leftdcount -= txdcount;
1144 mb_put_uint16le(mbp, txpcount);
1145 mb_put_uint16le(mbp, poff);
1146 mb_put_uint16le(mbp, txdcount);
1147 mb_put_uint16le(mbp, doff);
1148 mb_put_uint8(mbp, t2p->t2_setupcount);
1149 mb_put_uint8(mbp, 0);
1150 for (i = 0; i < t2p->t2_setupcount; i++) {
1151 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1152 }
1153 smb_rq_wend(rqp);
1154 smb_rq_bstart(rqp);
1155 if (t2p->t_name) {
1156 /* Put the string and terminating null. */
1157 error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1158 SMB_CS_NONE, NULL);
1159 } else {
1160 /* nmsize accounts for padding, char size. */
1161 error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1162 }
1163 if (error)
1164 goto freerq;
1165 len = mb_fixhdr(mbp);
1166 if (txpcount) {
1167 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1168 error = md_get_mbuf(&mbparam, txpcount, &m);
1169 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1170 if (error)
1171 goto freerq;
1172 mb_put_mbuf(mbp, m);
1173 }
1174 len = mb_fixhdr(mbp);
1175 if (txdcount) {
1176 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1177 error = md_get_mbuf(&mbdata, txdcount, &m);
1178 if (error)
1179 goto freerq;
1180 mb_put_mbuf(mbp, m);
1181 }
1182 smb_rq_bend(rqp); /* incredible, but thats it... */
1183 error = smb_rq_enqueue(rqp);
1184 if (error)
1185 goto freerq;
1186 if (leftpcount || leftdcount) {
1187 error = smb_rq_reply(rqp);
1188 if (error)
1189 goto bad;
1190 /*
1191 * this is an interim response, ignore it.
1192 */
1193 SMBRQ_LOCK(rqp);
1194 md_next_record(&rqp->sr_rp);
1195 SMBRQ_UNLOCK(rqp);
1196 }
1197 while (leftpcount || leftdcount) {
1198 error = smb_rq_new(rqp, t2p->t_name ?
1199 SMB_COM_TRANSACTION_SECONDARY :
1200 SMB_COM_TRANSACTION2_SECONDARY);
1201 if (error)
1202 goto bad;
1203 mbp = &rqp->sr_rq;
1204 smb_rq_wstart(rqp);
1205 mb_put_uint16le(mbp, totpcount);
1206 mb_put_uint16le(mbp, totdcount);
1207 len = mb_fixhdr(mbp);
1208 /*
1209 * now we have known packet size as
1210 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1211 * and need to decide which parts should go into request
1212 */
1213 len = ALIGN4(len + 6 * 2 + 2);
1214 if (t2p->t_name == NULL)
1215 len += 2;
1216 if (len + leftpcount > txmax) {
1217 txpcount = min(leftpcount, txmax - len);
1218 poff = len;
1219 txdcount = 0;
1220 doff = 0;
1221 } else {
1222 txpcount = leftpcount;
1223 poff = txpcount ? len : 0;
1224 len = ALIGN4(len + txpcount);
1225 txdcount = min(leftdcount, txmax - len);
1226 doff = txdcount ? len : 0;
1227 }
1228 mb_put_uint16le(mbp, txpcount);
1229 mb_put_uint16le(mbp, poff);
1230 mb_put_uint16le(mbp, totpcount - leftpcount);
1231 mb_put_uint16le(mbp, txdcount);
1232 mb_put_uint16le(mbp, doff);
1233 mb_put_uint16le(mbp, totdcount - leftdcount);
1234 leftpcount -= txpcount;
1235 leftdcount -= txdcount;
1236 if (t2p->t_name == NULL)
1237 mb_put_uint16le(mbp, t2p->t2_fid);
1238 smb_rq_wend(rqp);
1239 smb_rq_bstart(rqp);
1240 mb_put_uint8(mbp, 0); /* name */
1241 len = mb_fixhdr(mbp);
1242 if (txpcount) {
1243 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1244 error = md_get_mbuf(&mbparam, txpcount, &m);
1245 if (error)
1246 goto bad;
1247 mb_put_mbuf(mbp, m);
1248 }
1249 len = mb_fixhdr(mbp);
1250 if (txdcount) {
1251 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1252 error = md_get_mbuf(&mbdata, txdcount, &m);
1253 if (error)
1254 goto bad;
1255 mb_put_mbuf(mbp, m);
1256 }
1257 smb_rq_bend(rqp);
1258 error = smb1_iod_multirq(rqp);
1259 if (error)
1260 goto bad;
1261 } /* while left params or data */
1262 error = smb_t2_reply(t2p);
1263 if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1264 goto bad;
1265 mdp = &t2p->t2_rdata;
1266 if (mdp->md_top) {
1267 md_initm(mdp, mdp->md_top);
1268 }
1269 mdp = &t2p->t2_rparam;
1270 if (mdp->md_top) {
1271 md_initm(mdp, mdp->md_top);
1272 }
1273 bad:
1274 smb_iod_removerq(rqp);
1275 freerq:
1276 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1277 if (rqp->sr_flags & SMBR_RESTART)
1278 t2p->t2_flags |= SMBT2_RESTART;
1279 md_done(&t2p->t2_rparam);
1280 md_done(&t2p->t2_rdata);
1281 }
1282 smb_rq_done(rqp);
1283 return (error);
1284 }
1285
1286
1287 /*
1288 * Perform a full round of NT_TRANSACTION request
1289 */
1290 static int
1291 smb_nt_request_int(struct smb_ntrq *ntp)
1292 {
1293 struct smb_vc *vcp = ntp->nt_vc;
1294 struct smb_cred *scred = ntp->nt_cred;
1295 struct mbchain *mbp;
1296 struct mdchain *mdp, mbsetup, mbparam, mbdata;
1297 mblk_t *m;
1298 struct smb_rq *rqp;
1299 int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1300 int error, doff, poff, txdcount, txpcount;
1301 int totscount;
1302
1303 m = ntp->nt_tsetup.mb_top;
1304 if (m) {
1305 md_initm(&mbsetup, m); /* do not free it! */
1306 totscount = m_fixhdr(m);
1307 if (totscount > 2 * 0xff)
1308 return (EINVAL);
1309 } else
1310 totscount = 0;
1311 m = ntp->nt_tparam.mb_top;
1312 if (m) {
1313 md_initm(&mbparam, m); /* do not free it! */
1314 totpcount = m_fixhdr(m);
1315 if (totpcount > 0x7fffffff)
1316 return (EINVAL);
1317 } else
1318 totpcount = 0;
1319 m = ntp->nt_tdata.mb_top;
1320 if (m) {
1321 md_initm(&mbdata, m); /* do not free it! */
1322 totdcount = m_fixhdr(m);
1323 if (totdcount > 0x7fffffff)
1324 return (EINVAL);
1325 } else
1326 totdcount = 0;
1327 leftdcount = totdcount;
1328 leftpcount = totpcount;
1329 txmax = vcp->vc_txmax;
1330 error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1331 if (error)
1332 return (error);
1333 rqp->sr_timo = smb_timo_default;
1334 rqp->sr_flags |= SMBR_MULTIPACKET;
1335 ntp->nt_rq = rqp;
1336 mbp = &rqp->sr_rq;
1337 smb_rq_wstart(rqp);
1338 mb_put_uint8(mbp, ntp->nt_maxscount);
1339 mb_put_uint16le(mbp, 0); /* reserved (flags?) */
1340 mb_put_uint32le(mbp, totpcount);
1341 mb_put_uint32le(mbp, totdcount);
1342 mb_put_uint32le(mbp, ntp->nt_maxpcount);
1343 mb_put_uint32le(mbp, ntp->nt_maxdcount);
1344 len = mb_fixhdr(mbp);
1345 /*
1346 * now we have known packet size as
1347 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1348 * and need to decide which parts should go into the first request
1349 */
1350 len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1351 if (len + leftpcount > txmax) {
1352 txpcount = min(leftpcount, txmax - len);
1353 poff = len;
1354 txdcount = 0;
1355 doff = 0;
1356 } else {
1357 txpcount = leftpcount;
1358 poff = txpcount ? len : 0;
1359 len = ALIGN4(len + txpcount);
1360 txdcount = min(leftdcount, txmax - len);
1361 doff = txdcount ? len : 0;
1362 }
1363 leftpcount -= txpcount;
1364 leftdcount -= txdcount;
1365 mb_put_uint32le(mbp, txpcount);
1366 mb_put_uint32le(mbp, poff);
1367 mb_put_uint32le(mbp, txdcount);
1368 mb_put_uint32le(mbp, doff);
1369 mb_put_uint8(mbp, (totscount+1)/2);
1370 mb_put_uint16le(mbp, ntp->nt_function);
1371 if (totscount) {
1372 error = md_get_mbuf(&mbsetup, totscount, &m);
1373 SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1374 if (error)
1375 goto freerq;
1376 mb_put_mbuf(mbp, m);
1377 if (totscount & 1)
1378 mb_put_uint8(mbp, 0); /* setup is in words */
1379 }
1380 smb_rq_wend(rqp);
1381 smb_rq_bstart(rqp);
1382 len = mb_fixhdr(mbp);
1383 if (txpcount) {
1384 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1385 error = md_get_mbuf(&mbparam, txpcount, &m);
1386 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1387 if (error)
1388 goto freerq;
1389 mb_put_mbuf(mbp, m);
1390 }
1391 len = mb_fixhdr(mbp);
1392 if (txdcount) {
1393 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1394 error = md_get_mbuf(&mbdata, txdcount, &m);
1395 if (error)
1396 goto freerq;
1397 mb_put_mbuf(mbp, m);
1398 }
1399 smb_rq_bend(rqp); /* incredible, but thats it... */
1400 error = smb_rq_enqueue(rqp);
1401 if (error)
1402 goto freerq;
1403 if (leftpcount || leftdcount) {
1404 error = smb_rq_reply(rqp);
1405 if (error)
1406 goto bad;
1407 /*
1408 * this is an interim response, ignore it.
1409 */
1410 SMBRQ_LOCK(rqp);
1411 md_next_record(&rqp->sr_rp);
1412 SMBRQ_UNLOCK(rqp);
1413 }
1414 while (leftpcount || leftdcount) {
1415 error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1416 if (error)
1417 goto bad;
1418 mbp = &rqp->sr_rq;
1419 smb_rq_wstart(rqp);
1420 mb_put_mem(mbp, NULL, 3, MB_MZERO);
1421 mb_put_uint32le(mbp, totpcount);
1422 mb_put_uint32le(mbp, totdcount);
1423 len = mb_fixhdr(mbp);
1424 /*
1425 * now we have known packet size as
1426 * ALIGN4(len + 6 * 4 + 2)
1427 * and need to decide which parts should go into request
1428 */
1429 len = ALIGN4(len + 6 * 4 + 2);
1430 if (len + leftpcount > txmax) {
1431 txpcount = min(leftpcount, txmax - len);
1432 poff = len;
1433 txdcount = 0;
1434 doff = 0;
1435 } else {
1436 txpcount = leftpcount;
1437 poff = txpcount ? len : 0;
1438 len = ALIGN4(len + txpcount);
1439 txdcount = min(leftdcount, txmax - len);
1440 doff = txdcount ? len : 0;
1441 }
1442 mb_put_uint32le(mbp, txpcount);
1443 mb_put_uint32le(mbp, poff);
1444 mb_put_uint32le(mbp, totpcount - leftpcount);
1445 mb_put_uint32le(mbp, txdcount);
1446 mb_put_uint32le(mbp, doff);
1447 mb_put_uint32le(mbp, totdcount - leftdcount);
1448 leftpcount -= txpcount;
1449 leftdcount -= txdcount;
1450 smb_rq_wend(rqp);
1451 smb_rq_bstart(rqp);
1452 len = mb_fixhdr(mbp);
1453 if (txpcount) {
1454 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1455 error = md_get_mbuf(&mbparam, txpcount, &m);
1456 if (error)
1457 goto bad;
1458 mb_put_mbuf(mbp, m);
1459 }
1460 len = mb_fixhdr(mbp);
1461 if (txdcount) {
1462 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1463 error = md_get_mbuf(&mbdata, txdcount, &m);
1464 if (error)
1465 goto bad;
1466 mb_put_mbuf(mbp, m);
1467 }
1468 smb_rq_bend(rqp);
1469 error = smb1_iod_multirq(rqp);
1470 if (error)
1471 goto bad;
1472 } /* while left params or data */
1473 error = smb_nt_reply(ntp);
1474 if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1475 goto bad;
1476 mdp = &ntp->nt_rdata;
1477 if (mdp->md_top) {
1478 md_initm(mdp, mdp->md_top);
1479 }
1480 mdp = &ntp->nt_rparam;
1481 if (mdp->md_top) {
1482 md_initm(mdp, mdp->md_top);
1483 }
1484 bad:
1485 smb_iod_removerq(rqp);
1486 freerq:
1487 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1488 if (rqp->sr_flags & SMBR_RESTART)
1489 ntp->nt_flags |= SMBT2_RESTART;
1490 md_done(&ntp->nt_rparam);
1491 md_done(&ntp->nt_rdata);
1492 }
1493 smb_rq_done(rqp);
1494 return (error);
1495 }
1496
1497 int
1498 smb_t2_request(struct smb_t2rq *t2p)
1499 {
1500 int error = EINVAL, i;
1501
1502 for (i = 0; ; ) {
1503 /*
1504 * Don't send any new requests if force unmount is underway.
1505 * This check was moved into smb_rq_enqueue, called by
1506 * smb_t2_request_int()
1507 */
1508 t2p->t2_flags &= ~SMBT2_RESTART;
1509 error = smb_t2_request_int(t2p);
1510 if (!error)
1511 break;
1512 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1513 SMBT2_RESTART)
1514 break;
1515 if (++i > SMBMAXRESTARTS)
1516 break;
1517 mutex_enter(&(t2p)->t2_lock);
1518 if (t2p->t2_share) {
1519 (void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1520 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1521 } else {
1522 delay(SEC_TO_TICK(SMB_RCNDELAY));
1523 }
1524 mutex_exit(&(t2p)->t2_lock);
1525 }
1526 return (error);
1527 }
1528
1529
1530 int
1531 smb_nt_request(struct smb_ntrq *ntp)
1532 {
1533 int error = EINVAL, i;
1534
1535 for (i = 0; ; ) {
1536 /*
1537 * Don't send any new requests if force unmount is underway.
1538 * This check was moved into smb_rq_enqueue, called by
1539 * smb_nt_request_int()
1540 */
1541 ntp->nt_flags &= ~SMBT2_RESTART;
1542 error = smb_nt_request_int(ntp);
1543 if (!error)
1544 break;
1545 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1546 SMBT2_RESTART)
1547 break;
1548 if (++i > SMBMAXRESTARTS)
1549 break;
1550 mutex_enter(&(ntp)->nt_lock);
1551 if (ntp->nt_share) {
1552 (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1553 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1554
1555 } else {
1556 delay(SEC_TO_TICK(SMB_RCNDELAY));
1557 }
1558 mutex_exit(&(ntp)->nt_lock);
1559 }
1560 return (error);
1561 }
1562
1563 /*
1564 * Run an SMB transact named pipe.
1565 * Note: send_mb is consumed.
1566 */
1567 int
1568 smb_t2_xnp(struct smb_share *ssp, uint16_t fid,
1569 struct mbchain *send_mb, struct mdchain *recv_md,
1570 uint32_t *data_out_sz, /* max / returned */
1571 uint32_t *more, struct smb_cred *scrp)
1572 {
1573 struct smb_t2rq *t2p = NULL;
1574 mblk_t *m;
1575 uint16_t setup[2];
1576 int err;
1577
1578 setup[0] = TRANS_TRANSACT_NAMED_PIPE;
1579 setup[1] = fid;
1580
1581 t2p = kmem_alloc(sizeof (*t2p), KM_SLEEP);
1582 err = smb_t2_init(t2p, SSTOCP(ssp), setup, 2, scrp);
1583 if (err) {
1584 *data_out_sz = 0;
1585 goto out;
1586 }
1587
1588 t2p->t2_setupcount = 2;
1589 t2p->t2_setupdata = setup;
1590
1591 t2p->t_name = "\\PIPE\\";
1592 t2p->t_name_len = 6;
1593
1594 t2p->t2_maxscount = 0;
1595 t2p->t2_maxpcount = 0;
1596 t2p->t2_maxdcount = (uint16_t)*data_out_sz;
1597
1598 /* Transmit parameters (none) */
1599
1600 /*
1601 * Transmit data
1602 *
1603 * Copy the mb, and clear the source so we
1604 * don't end up with a double free.
1605 */
1606 t2p->t2_tdata = *send_mb;
1607 bzero(send_mb, sizeof (*send_mb));
1608
1609 /*
1610 * Run the request
1611 */
1612 err = smb_t2_request(t2p);
1613
1614 /* No returned parameters. */
1615
1616 if (err == 0 && (m = t2p->t2_rdata.md_top) != NULL) {
1617 /*
1618 * Received data
1619 *
1620 * Copy the mdchain, and clear the source so we
1621 * don't end up with a double free.
1622 */
1623 *data_out_sz = msgdsize(m);
1624 md_initm(recv_md, m);
1625 t2p->t2_rdata.md_top = NULL;
1626 } else {
1627 *data_out_sz = 0;
1628 }
1629
1630 if (t2p->t2_sr_error == NT_STATUS_BUFFER_OVERFLOW)
1631 *more = 1;
1632
1633 out:
1634 if (t2p != NULL) {
1635 /* Note: t2p->t_name no longer allocated */
1636 smb_t2_done(t2p);
1637 kmem_free(t2p, sizeof (*t2p));
1638 }
1639
1640 return (err);
1641 }