1 /*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33 */
34
35 /*
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
38 *
39 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
40 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
41 */
42
43 #ifdef DEBUG
44 /* See sys/queue.h */
45 #define QUEUEDEBUG 1
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/atomic.h>
51 #include <sys/proc.h>
52 #include <sys/thread.h>
53 #include <sys/file.h>
54 #include <sys/kmem.h>
55 #include <sys/unistd.h>
56 #include <sys/mount.h>
57 #include <sys/vnode.h>
58 #include <sys/types.h>
59 #include <sys/ddi.h>
60 #include <sys/sunddi.h>
61 #include <sys/stream.h>
62 #include <sys/strsun.h>
63 #include <sys/time.h>
64 #include <sys/class.h>
65 #include <sys/disp.h>
66 #include <sys/cmn_err.h>
67 #include <sys/zone.h>
68 #include <sys/sdt.h>
69
70 #include <netsmb/smb_osdep.h>
71
72 #include <netsmb/smb.h>
73 #include <netsmb/smb2.h>
74 #include <netsmb/smb_conn.h>
75 #include <netsmb/smb_rq.h>
76 #include <netsmb/smb2_rq.h>
77 #include <netsmb/smb_subr.h>
78 #include <netsmb/smb_tran.h>
79 #include <netsmb/smb_trantcp.h>
80
81 /*
82 * SMB messages are up to 64K. Let's leave room for two.
83 * If we negotiate up to SMB2, increase these. XXX todo
84 */
85 static int smb_tcpsndbuf = 0x20000;
86 static int smb_tcprcvbuf = 0x20000;
87 static int smb_connect_timeout = 10; /* seconds */
88
89 static int smb1_iod_process(smb_vc_t *, mblk_t *);
90 static int smb2_iod_process(smb_vc_t *, mblk_t *);
91 static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
92 static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
93
94 /*
95 * This is set/cleared when smbfs loads/unloads
96 * No locks should be necessary, because smbfs
97 * can't unload until all the mounts are gone.
98 */
99 static smb_fscb_t *fscb;
100 void
101 smb_fscb_set(smb_fscb_t *cb)
102 {
103 fscb = cb;
104 }
105
106 static void
107 smb_iod_share_disconnected(smb_share_t *ssp)
108 {
109
110 smb_share_invalidate(ssp);
111
112 /*
113 * This is the only fscb hook smbfs currently uses.
114 * Replaces smbfs_dead() from Darwin.
115 */
116 if (fscb && fscb->fscb_disconn) {
117 fscb->fscb_disconn(ssp);
118 }
119 }
120
121 /*
122 * State changes are important and infrequent.
123 * Make them easily observable via dtrace.
124 */
125 void
126 smb_iod_newstate(struct smb_vc *vcp, int state)
127 {
128 vcp->vc_state = state;
129 }
130
131 /* Lock Held version of the next function. */
132 static inline void
133 smb_iod_rqprocessed_LH(
134 struct smb_rq *rqp,
135 int error,
136 int flags)
137 {
138 rqp->sr_flags |= flags;
139 rqp->sr_lerror = error;
140 rqp->sr_rpgen++;
141 rqp->sr_state = SMBRQ_NOTIFIED;
142 cv_broadcast(&rqp->sr_cond);
143 }
144
145 static void
146 smb_iod_rqprocessed(
147 struct smb_rq *rqp,
148 int error,
149 int flags)
150 {
151
152 SMBRQ_LOCK(rqp);
153 smb_iod_rqprocessed_LH(rqp, error, flags);
154 SMBRQ_UNLOCK(rqp);
155 }
156
157 static void
158 smb_iod_invrq(struct smb_vc *vcp)
159 {
160 struct smb_rq *rqp;
161
162 /*
163 * Invalidate all outstanding requests for this connection
164 * Also wakeup iod_muxwant waiters.
165 */
166 rw_enter(&vcp->iod_rqlock, RW_READER);
167 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
168 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
169 }
170 rw_exit(&vcp->iod_rqlock);
171 cv_broadcast(&vcp->iod_muxwait);
172 }
173
174 /*
175 * Called by smb_vc_rele/smb_vc_kill on last ref, and by
176 * the driver close function if the IOD closes its minor.
177 * In those cases, the caller should be the IOD thread.
178 *
179 * Forcibly kill the connection.
180 */
181 void
182 smb_iod_disconnect(struct smb_vc *vcp)
183 {
184
185 /*
186 * Inform everyone of the state change.
187 */
188 SMB_VC_LOCK(vcp);
189 if (vcp->vc_state != SMBIOD_ST_DEAD) {
190 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
191 cv_broadcast(&vcp->vc_statechg);
192 }
193 SMB_VC_UNLOCK(vcp);
194
195 SMB_TRAN_DISCONNECT(vcp);
196 }
197
198 /*
199 * Send one request.
200 *
201 * SMB1 only
202 *
203 * Called by _addrq (for internal requests)
204 * and _sendall (via _addrq, _multirq, _waitrq)
205 * Errors are reported via the smb_rq, using:
206 * smb_iod_rqprocessed(rqp, ...)
207 */
208 static void
209 smb1_iod_sendrq(struct smb_rq *rqp)
210 {
211 struct smb_vc *vcp = rqp->sr_vc;
212 mblk_t *m;
213 int error;
214
215 ASSERT(vcp);
216 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
217 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
218
219 /*
220 * Internal requests are allowed in any state;
221 * otherwise should be active.
222 */
223 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
224 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
225 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
226 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
227 return;
228 }
229
230 /*
231 * Overwrite the SMB header with the assigned MID and
232 * (if we're signing) sign it.
233 */
234 smb_rq_fillhdr(rqp);
235 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236 smb_rq_sign(rqp);
237 }
238
239 /*
240 * The transport send consumes the message and we'd
241 * prefer to keep a copy, so dupmsg() before sending.
242 */
243 m = dupmsg(rqp->sr_rq.mb_top);
244 if (m == NULL) {
245 error = ENOBUFS;
246 goto fatal;
247 }
248
249 #ifdef DTRACE_PROBE2
250 DTRACE_PROBE2(iod_sendrq,
251 (smb_rq_t *), rqp, (mblk_t *), m);
252 #endif
253
254 error = SMB_TRAN_SEND(vcp, m);
255 m = 0; /* consumed by SEND */
256
257 rqp->sr_lerror = error;
258 if (error == 0) {
259 SMBRQ_LOCK(rqp);
260 rqp->sr_flags |= SMBR_SENT;
261 rqp->sr_state = SMBRQ_SENT;
262 SMBRQ_UNLOCK(rqp);
263 return;
264 }
265 /*
266 * Transport send returned an error.
267 * Was it a fatal one?
268 */
269 if (SMB_TRAN_FATAL(vcp, error)) {
270 /*
271 * No further attempts should be made
272 */
273 fatal:
274 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
275 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
276 return;
277 }
278 }
279
280 /*
281 * Send one request.
282 *
283 * SMB2 only
284 *
285 * Called by _addrq (for internal requests)
286 * and _sendall (via _addrq, _multirq, _waitrq)
287 * Errors are reported via the smb_rq, using:
288 * smb_iod_rqprocessed(rqp, ...)
289 */
290 static void
291 smb2_iod_sendrq(struct smb_rq *rqp)
292 {
293 struct smb_rq *c_rqp; /* compound */
294 struct smb_vc *vcp = rqp->sr_vc;
295 mblk_t *top_m;
296 mblk_t *cur_m;
297 int error;
298
299 ASSERT(vcp);
300 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
301 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
302
303 /*
304 * Internal requests are allowed in any state;
305 * otherwise should be active.
306 */
307 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
308 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
309 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
310 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
311 return;
312 }
313
314 /*
315 * Overwrite the SMB header with the assigned MID and
316 * (if we're signing) sign it. If there are compounded
317 * requests after the top one, do those too.
318 */
319 smb2_rq_fillhdr(rqp);
320 if (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
321 smb2_rq_sign(rqp);
322 }
323 c_rqp = rqp->sr2_compound_next;
324 while (c_rqp != NULL) {
325 smb2_rq_fillhdr(c_rqp);
326 if (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
327 smb2_rq_sign(c_rqp);
328 }
329 c_rqp = c_rqp->sr2_compound_next;
330 }
331
332 /*
333 * The transport send consumes the message and we'd
334 * prefer to keep a copy, so dupmsg() before sending.
335 * We also need this to build the compound message
336 * that we'll actually send. The message offset at
337 * the start of each compounded message should be
338 * eight-byte aligned. The caller preparing the
339 * compounded request has to take care of that
340 * before we get here and sign messages etc.
341 */
342 top_m = dupmsg(rqp->sr_rq.mb_top);
343 if (top_m == NULL) {
344 error = ENOBUFS;
345 goto fatal;
346 }
347 c_rqp = rqp->sr2_compound_next;
348 while (c_rqp != NULL) {
349 size_t len = msgdsize(top_m);
350 ASSERT((len & 7) == 0);
351 cur_m = dupmsg(c_rqp->sr_rq.mb_top);
352 if (cur_m == NULL) {
353 freemsg(top_m);
354 error = ENOBUFS;
355 goto fatal;
356 }
357 linkb(top_m, cur_m);
358 }
359
360 DTRACE_PROBE2(iod_sendrq,
361 (smb_rq_t *), rqp, (mblk_t *), top_m);
362
363 error = SMB_TRAN_SEND(vcp, top_m);
364 top_m = 0; /* consumed by SEND */
365
366 rqp->sr_lerror = error;
367 if (error == 0) {
368 SMBRQ_LOCK(rqp);
369 rqp->sr_flags |= SMBR_SENT;
370 rqp->sr_state = SMBRQ_SENT;
371 SMBRQ_UNLOCK(rqp);
372 return;
373 }
374 /*
375 * Transport send returned an error.
376 * Was it a fatal one?
377 */
378 if (SMB_TRAN_FATAL(vcp, error)) {
379 /*
380 * No further attempts should be made
381 */
382 fatal:
383 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
384 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
385 return;
386 }
387 }
388
389 /*
390 * Receive one NetBIOS (or NBT over TCP) message. If none have arrived,
391 * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
392 * none have arrived, return ETIME.
393 */
394 static int
395 smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
396 {
397 mblk_t *m;
398 int error;
399
400 top:
401 m = NULL;
402 error = SMB_TRAN_RECV(vcp, &m);
403 if (error == EAGAIN)
404 goto top;
405 if (error)
406 return (error);
407 ASSERT(m != NULL);
408
409 m = m_pullup(m, 4);
410 if (m == NULL) {
411 return (ENOSR);
412 }
413
414 *mpp = m;
415 return (0);
416 }
417
418 /*
419 * How long should we keep around an unused VC (connection)?
420 * There's usually a good chance connections will be reused,
421 * so the default is to keep such connections for 5 min.
422 */
423 #ifdef DEBUG
424 int smb_iod_idle_keep_time = 60; /* seconds */
425 #else
426 int smb_iod_idle_keep_time = 300; /* seconds */
427 #endif
428
429 /*
430 * Process incoming packets
431 *
432 * This is the "reader" loop, run by the IOD thread. Normally we're in
433 * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
434 * other states with poll==TRUE
435 *
436 * A non-zero error return here causes the IOD work loop to terminate.
437 */
438 int
439 smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
440 {
441 mblk_t *m;
442 int error = 0;
443 int etime_idle = 0; /* How many 15 sec. "ticks" idle. */
444 int etime_count = 0; /* ... and when we have requests. */
445
446 for (;;) {
447 /*
448 * Check whether someone "killed" this VC,
449 * or is asking the IOD to terminate.
450 */
451 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
452 SMBIODEBUG("SHUTDOWN set\n");
453 /* This IOD thread will terminate. */
454 SMB_VC_LOCK(vcp);
455 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
456 cv_broadcast(&vcp->vc_statechg);
457 SMB_VC_UNLOCK(vcp);
458 error = EINTR;
459 break;
460 }
461
462 m = NULL;
463 error = smb_iod_recvmsg(vcp, &m);
464
465 /*
466 * Internal requests (reconnecting) call this in a loop
467 * (with poll==TRUE) until the request completes.
468 */
469 if (error == ETIME && poll)
470 break;
471
472 if (error == ETIME &&
473 vcp->iod_rqlist.tqh_first != NULL) {
474
475 /*
476 * Nothing received and requests waiting.
477 * Increment etime_count. If we were idle,
478 * skip the 1st tick, because we started
479 * waiting before there were any requests.
480 */
481 if (etime_idle != 0) {
482 etime_idle = 0;
483 } else if (etime_count < INT16_MAX) {
484 etime_count++;
485 }
486
487 /*
488 * ETIME and requests in the queue.
489 * The first time (at 15 sec.)
490 * Log an error (just once).
491 */
492 if (etime_count > 0 &&
493 vcp->iod_noresp == B_FALSE) {
494 vcp->iod_noresp = B_TRUE;
495 zprintf(vcp->vc_zoneid,
496 "SMB server %s not responding\n",
497 vcp->vc_srvname);
498 }
499 /*
500 * At 30 sec. try sending an echo, which
501 * should cause some response.
502 */
503 if (etime_count == 2) {
504 SMBIODEBUG("send echo\n");
505 (void) smb_iod_send_echo(vcp, CRED());
506 }
507 /*
508 * At 45 sec. give up on the connection
509 * and try to reconnect.
510 */
511 if (etime_count == 3) {
512 SMB_VC_LOCK(vcp);
513 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
514 SMB_VC_UNLOCK(vcp);
515 SMB_TRAN_DISCONNECT(vcp);
516 break;
517 }
518 continue;
519 } /* ETIME and requests in the queue */
520
521 if (error == ETIME) {
522 /*
523 * Nothing received and no active requests.
524 *
525 * If we've received nothing from the server for
526 * smb_iod_idle_keep_time seconds, and the IOD
527 * thread holds the last reference to this VC,
528 * move to state IDLE and drop the TCP session.
529 * The IDLE handler will destroy the VC unless
530 * vc_state goes to RECONNECT before then.
531 */
532 etime_count = 0;
533 if (etime_idle < INT16_MAX)
534 etime_idle++;
535 if ((etime_idle * SMB_NBTIMO) <
536 smb_iod_idle_keep_time)
537 continue;
538 SMB_VC_LOCK(vcp);
539 if (vcp->vc_co.co_usecount == 1) {
540 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
541 SMB_VC_UNLOCK(vcp);
542 SMBIODEBUG("logoff & disconnect\n");
543 (void) smb_iod_logoff(vcp, CRED());
544 SMB_TRAN_DISCONNECT(vcp);
545 error = 0;
546 break;
547 }
548 SMB_VC_UNLOCK(vcp);
549 continue;
550 } /* error == ETIME */
551
552 if (error) {
553 /*
554 * The recv above returned an error indicating
555 * that our TCP session is no longer usable.
556 * Disconnect the session and get ready to
557 * reconnect. If we have pending requests,
558 * move to state reconnect immediately;
559 * otherwise move to state IDLE until a
560 * request is issued on this VC.
561 */
562 SMB_VC_LOCK(vcp);
563 if (vcp->iod_rqlist.tqh_first != NULL)
564 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
565 else
566 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
567 cv_broadcast(&vcp->vc_statechg);
568 SMB_VC_UNLOCK(vcp);
569 SMB_TRAN_DISCONNECT(vcp);
570 break;
571 }
572
573 /*
574 * Received something. Yea!
575 */
576 etime_count = 0;
577 etime_idle = 0;
578
579 /*
580 * If we just completed a reconnect after logging
581 * "SMB server %s not responding" then log OK now.
582 */
583 if (vcp->iod_noresp) {
584 vcp->iod_noresp = B_FALSE;
585 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
586 vcp->vc_srvname);
587 }
588
589 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
590 error = smb2_iod_process(vcp, m);
591 } else {
592 error = smb1_iod_process(vcp, m);
593 }
594
595 /*
596 * Reconnect calls this in a loop with poll=TRUE
597 * We've received a response, so break now.
598 */
599 if (poll) {
600 error = 0;
601 break;
602 }
603 }
604
605 return (error);
606 }
607
608 /*
609 * Have what should be an SMB1 reply. Check and parse the header,
610 * then use the message ID to find the request this belongs to and
611 * post it on that request.
612 *
613 * Returns an error if the reader should give up.
614 * To be safe, error if we read garbage.
615 */
616 static int
617 smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
618 {
619 struct mdchain md;
620 struct smb_rq *rqp;
621 uint8_t cmd, sig[4];
622 uint16_t mid;
623 int err, skip;
624
625 m = m_pullup(m, SMB_HDRLEN);
626 if (m == NULL)
627 return (ENOMEM);
628
629 /*
630 * Note: Intentionally do NOT md_done(&md)
631 * because that would free the message and
632 * we just want to peek here.
633 */
634 md_initm(&md, m);
635
636 /*
637 * Check the SMB header version and get the MID.
638 *
639 * The header version should be SMB1 except when we're
640 * doing SMB1-to-SMB2 negotiation, in which case we may
641 * see an SMB2 header with message ID=0 (only allowed in
642 * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
643 */
644 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
645 if (err)
646 return (err);
647 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
648 goto bad_hdr;
649 }
650 switch (sig[0]) {
651 case SMB_HDR_V1: /* SMB1 */
652 md_get_uint8(&md, &cmd);
653 /* Skip to and get the MID. At offset 5 now. */
654 skip = SMB_HDR_OFF_MID - 5;
655 md_get_mem(&md, NULL, skip, MB_MSYSTEM);
656 err = md_get_uint16le(&md, &mid);
657 if (err)
658 return (err);
659 break;
660 case SMB_HDR_V2: /* SMB2+ */
661 if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
662 /*
663 * No need to look, can only be
664 * MID=0, cmd=negotiate
665 */
666 cmd = SMB_COM_NEGOTIATE;
667 mid = 0;
668 break;
669 }
670 /* FALLTHROUGH */
671 bad_hdr:
672 default:
673 SMBIODEBUG("Bad SMB hdr\n");
674 m_freem(m);
675 return (EPROTO);
676 }
677
678 /*
679 * Find the reqeuest and post the reply
680 */
681 rw_enter(&vcp->iod_rqlock, RW_READER);
682 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
683
684 if (rqp->sr_mid != mid)
685 continue;
686
687 DTRACE_PROBE2(iod_post_reply,
688 (smb_rq_t *), rqp, (mblk_t *), m);
689 m_dumpm(m);
690
691 SMBRQ_LOCK(rqp);
692 if (rqp->sr_rp.md_top == NULL) {
693 md_initm(&rqp->sr_rp, m);
694 } else {
695 if (rqp->sr_flags & SMBR_MULTIPACKET) {
696 md_append_record(&rqp->sr_rp, m);
697 } else {
698 SMBRQ_UNLOCK(rqp);
699 rqp = NULL;
700 break;
701 }
702 }
703 smb_iod_rqprocessed_LH(rqp, 0, 0);
704 SMBRQ_UNLOCK(rqp);
705 break;
706 }
707 rw_exit(&vcp->iod_rqlock);
708
709 if (rqp == NULL) {
710 if (cmd != SMB_COM_ECHO) {
711 SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
712 }
713 m_freem(m);
714 /*
715 * Keep going. It's possible this reply came
716 * after the request timed out and went away.
717 */
718 }
719 return (0);
720 }
721
722 /*
723 * Have what should be an SMB2 reply. Check and parse the header,
724 * then use the message ID to find the request this belongs to and
725 * post it on that request.
726 *
727 * We also want to apply any credit grant in this reply now,
728 * rather than waiting for the owner to wake up.
729 */
730 static int
731 smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
732 {
733 struct mdchain md;
734 struct smb_rq *rqp;
735 uint8_t sig[4];
736 mblk_t *next_m = NULL;
737 uint64_t message_id, async_id;
738 uint32_t flags, next_cmd_off, status;
739 uint16_t command, credits_granted;
740 int err;
741
742 top:
743 m = m_pullup(m, SMB2_HDRLEN);
744 if (m == NULL)
745 return (ENOMEM);
746
747 /*
748 * Note: Intentionally do NOT md_done(&md)
749 * because that would free the message and
750 * we just want to peek here.
751 */
752 md_initm(&md, m);
753
754 /*
755 * Check the SMB header. Must be SMB2
756 * (and later, could be SMB3 encrypted)
757 */
758 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
759 if (err)
760 return (err);
761 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
762 goto bad_hdr;
763 }
764 switch (sig[0]) {
765 case SMB_HDR_V2:
766 break;
767 case SMB_HDR_V3E:
768 /*
769 * Todo: If encryption enabled, decrypt the message
770 * and restart processing on the cleartext.
771 */
772 /* FALLTHROUGH */
773 bad_hdr:
774 default:
775 SMBIODEBUG("Bad SMB2 hdr\n");
776 m_freem(m);
777 return (EPROTO);
778 }
779
780 /*
781 * Parse the rest of the SMB2 header,
782 * skipping what we don't need.
783 */
784 md_get_uint32le(&md, NULL); /* length, credit_charge */
785 md_get_uint32le(&md, &status);
786 md_get_uint16le(&md, &command);
787 md_get_uint16le(&md, &credits_granted);
788 md_get_uint32le(&md, &flags);
789 md_get_uint32le(&md, &next_cmd_off);
790 md_get_uint64le(&md, &message_id);
791 if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
792 md_get_uint64le(&md, &async_id);
793 } else {
794 /* PID, TID (not needed) */
795 async_id = 0;
796 }
797
798 /*
799 * If this is a compound reply, split it.
800 * Next must be 8-byte aligned.
801 */
802 if (next_cmd_off != 0) {
803 if ((next_cmd_off & 7) != 0)
804 SMBIODEBUG("Misaligned next cmd\n");
805 else
806 next_m = m_split(m, next_cmd_off, 1);
807 }
808
809 /*
810 * Apply the credit grant
811 */
812 rw_enter(&vcp->iod_rqlock, RW_WRITER);
813 vcp->vc2_limit_message_id += credits_granted;
814
815 /*
816 * Find the reqeuest and post the reply
817 */
818 rw_downgrade(&vcp->iod_rqlock);
819 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
820
821 if (rqp->sr2_messageid != message_id)
822 continue;
823
824 DTRACE_PROBE2(iod_post_reply,
825 (smb_rq_t *), rqp, (mblk_t *), m);
826 m_dumpm(m);
827
828 /*
829 * If this is an interim response, just save the
830 * async ID but don't wakup the request.
831 * Don't need SMBRQ_LOCK for this.
832 */
833 if (status == NT_STATUS_PENDING && async_id != 0) {
834 rqp->sr2_rspasyncid = async_id;
835 m_freem(m);
836 break;
837 }
838
839 SMBRQ_LOCK(rqp);
840 if (rqp->sr_rp.md_top == NULL) {
841 md_initm(&rqp->sr_rp, m);
842 } else {
843 SMBRQ_UNLOCK(rqp);
844 rqp = NULL;
845 break;
846 }
847 smb_iod_rqprocessed_LH(rqp, 0, 0);
848 SMBRQ_UNLOCK(rqp);
849 break;
850 }
851 rw_exit(&vcp->iod_rqlock);
852
853 if (rqp == NULL) {
854 if (command != SMB2_ECHO) {
855 SMBSDEBUG("drop resp: MID %lld\n",
856 (long long)message_id);
857 }
858 m_freem(m);
859 /*
860 * Keep going. It's possible this reply came
861 * after the request timed out and went away.
862 */
863 }
864
865 /*
866 * If we split a compound reply, continue with the
867 * next part of the compound.
868 */
869 if (next_m != NULL) {
870 m = next_m;
871 goto top;
872 }
873
874 return (0);
875 }
876
877 /*
878 * The IOD receiver thread has requests pending and
879 * has not received anything in a while. Try to
880 * send an SMB echo request. It's tricky to do a
881 * send from the IOD thread because we can't block.
882 *
883 * Using tmo=SMBNOREPLYWAIT in the request
884 * so smb_rq_reply will skip smb_iod_waitrq.
885 * The smb_smb_echo call uses SMBR_INTERNAL
886 * to avoid calling smb_iod_sendall().
887 */
888 static int
889 smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
890 {
891 smb_cred_t scred;
892 int err, tmo = SMBNOREPLYWAIT;
893
894 ASSERT(vcp->iod_thr == curthread);
895
896 smb_credinit(&scred, cr);
897 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
898 err = smb2_smb_echo(vcp, &scred, tmo);
899 } else {
900 err = smb_smb_echo(vcp, &scred, tmo);
901 }
902 smb_credrele(&scred);
903 return (err);
904 }
905
906 /*
907 * Helper for smb1_iod_addrq, smb2_iod_addrq
908 * Returns zero if interrupted, else 1.
909 */
910 static int
911 smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
912 {
913 int rc;
914
915 SMB_VC_LOCK(vcp);
916 vcp->iod_muxwant++;
917 if (sig_ok) {
918 rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
919 } else {
920 cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
921 rc = 1;
922 }
923 vcp->iod_muxwant--;
924 SMB_VC_UNLOCK(vcp);
925
926 return (rc);
927 }
928
929 /*
930 * Place request in the queue, and send it.
931 * Called with no locks held.
932 *
933 * Called for SMB1 only
934 *
935 * The logic for how we limit active requests differs between
936 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
937 */
938 int
939 smb1_iod_addrq(struct smb_rq *rqp)
940 {
941 struct smb_vc *vcp = rqp->sr_vc;
942 uint16_t need;
943 boolean_t sig_ok =
944 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
945
946 ASSERT(rqp->sr_cred);
947 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
948
949 rqp->sr_owner = curthread;
950
951 rw_enter(&vcp->iod_rqlock, RW_WRITER);
952
953 recheck:
954 /*
955 * Internal requests can be added in any state,
956 * but normal requests only in state active.
957 */
958 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
959 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
960 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
961 rw_exit(&vcp->iod_rqlock);
962 return (ENOTCONN);
963 }
964
965 /*
966 * If we're at the limit of active requests, block until
967 * enough requests complete so we can make ours active.
968 * Wakeup in smb_iod_removerq().
969 *
970 * Normal callers leave one slot free, so internal
971 * callers can have the last slot if needed.
972 */
973 need = 1;
974 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
975 need++;
976 if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
977 rw_exit(&vcp->iod_rqlock);
978 if (rqp->sr_flags & SMBR_INTERNAL)
979 return (EBUSY);
980 if (smb_iod_muxwait(vcp, sig_ok) == 0)
981 return (EINTR);
982 rw_enter(&vcp->iod_rqlock, RW_WRITER);
983 goto recheck;
984 }
985
986 /*
987 * Add this request to the active list and send it.
988 * For SMB2 we may have a sequence of compounded
989 * requests, in which case we must add them all.
990 * They're sent as a compound in smb2_iod_sendrq.
991 */
992 rqp->sr_mid = vcp->vc_next_mid++;
993 /* If signing, set the signing sequence numbers. */
994 if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
995 SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
996 rqp->sr_seqno = vcp->vc_next_seq++;
997 rqp->sr_rseqno = vcp->vc_next_seq++;
998 }
999 vcp->iod_muxcnt++;
1000 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1001 smb1_iod_sendrq(rqp);
1002
1003 rw_exit(&vcp->iod_rqlock);
1004 return (0);
1005 }
1006
1007 /*
1008 * Place request in the queue, and send it.
1009 * Called with no locks held.
1010 *
1011 * Called for SMB2 only.
1012 *
1013 * With SMB2 we have a range of valid message IDs, and we may
1014 * only send requests when we can assign a message ID within
1015 * the valid range. We may need to wait here for some active
1016 * request to finish (and update vc2_limit_message_id) before
1017 * we can get message IDs for our new request(s). Another
1018 * difference is that the request sequence we're waiting to
1019 * add here may require multipe message IDs, either due to
1020 * either compounding or multi-credit requests. Therefore
1021 * we need to wait for the availibility of how ever many
1022 * message IDs are required by our request sequence.
1023 */
1024 int
1025 smb2_iod_addrq(struct smb_rq *rqp)
1026 {
1027 struct smb_vc *vcp = rqp->sr_vc;
1028 struct smb_rq *c_rqp; /* compound req */
1029 uint16_t charge;
1030 boolean_t sig_ok =
1031 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
1032
1033 ASSERT(rqp->sr_cred != NULL);
1034 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
1035
1036 /*
1037 * Figure out the credit charges
1038 * No multi-credit messages yet.
1039 */
1040 rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
1041 c_rqp = rqp->sr2_compound_next;
1042 while (c_rqp != NULL) {
1043 rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
1044 c_rqp = c_rqp->sr2_compound_next;
1045 }
1046
1047 /*
1048 * Internal request must not be compounded
1049 * and should use exactly one credit.
1050 */
1051 if (rqp->sr_flags & SMBR_INTERNAL) {
1052 if (rqp->sr2_compound_next != NULL) {
1053 ASSERT(0);
1054 return (EINVAL);
1055 }
1056 }
1057
1058 rqp->sr_owner = curthread;
1059
1060 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1061
1062 recheck:
1063 /*
1064 * Internal requests can be added in any state,
1065 * but normal requests only in state active.
1066 */
1067 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1068 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1069 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1070 rw_exit(&vcp->iod_rqlock);
1071 return (ENOTCONN);
1072 }
1073
1074 /*
1075 * If we're at the limit of active requests, block until
1076 * enough requests complete so we can make ours active.
1077 * Wakeup in smb_iod_removerq().
1078 *
1079 * Normal callers leave one slot free, so internal
1080 * callers can have the last slot if needed.
1081 */
1082 charge = rqp->sr2_totalcreditcharge;
1083 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1084 charge++;
1085 if ((vcp->vc2_next_message_id + charge) >
1086 vcp->vc2_limit_message_id) {
1087 rw_exit(&vcp->iod_rqlock);
1088 if (rqp->sr_flags & SMBR_INTERNAL)
1089 return (EBUSY);
1090 if (smb_iod_muxwait(vcp, sig_ok) == 0)
1091 return (EINTR);
1092 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1093 goto recheck;
1094 }
1095
1096 /*
1097 * Add this request to the active list and send it.
1098 * For SMB2 we may have a sequence of compounded
1099 * requests, in which case we must add them all.
1100 * They're sent as a compound in smb2_iod_sendrq.
1101 */
1102
1103 rqp->sr2_messageid = vcp->vc2_next_message_id;
1104 vcp->vc2_next_message_id += rqp->sr2_creditcharge;
1105 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1106
1107 c_rqp = rqp->sr2_compound_next;
1108 while (c_rqp != NULL) {
1109 c_rqp->sr2_messageid = vcp->vc2_next_message_id;
1110 vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
1111 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
1112 c_rqp = c_rqp->sr2_compound_next;
1113 }
1114 smb2_iod_sendrq(rqp);
1115
1116 rw_exit(&vcp->iod_rqlock);
1117 return (0);
1118 }
1119
1120 /*
1121 * Mark an SMBR_MULTIPACKET request as
1122 * needing another send. Similar to the
1123 * "normal" part of smb1_iod_addrq.
1124 * Only used by SMB1
1125 */
1126 int
1127 smb1_iod_multirq(struct smb_rq *rqp)
1128 {
1129 struct smb_vc *vcp = rqp->sr_vc;
1130
1131 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
1132
1133 if (vcp->vc_flags & SMBV_SMB2) {
1134 ASSERT("!SMB2?");
1135 return (EINVAL);
1136 }
1137
1138 if (rqp->sr_flags & SMBR_INTERNAL)
1139 return (EINVAL);
1140
1141 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1142 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1143 return (ENOTCONN);
1144 }
1145
1146 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1147
1148 /* Already on iod_rqlist, just reset state. */
1149 rqp->sr_state = SMBRQ_NOTSENT;
1150 smb1_iod_sendrq(rqp);
1151
1152 rw_exit(&vcp->iod_rqlock);
1153
1154 return (0);
1155 }
1156
1157 /*
1158 * Remove a request from the active list, and
1159 * wake up requests waiting to go active.
1160 *
1161 * Shared by SMB1 + SMB2
1162 *
1163 * The logic for how we limit active requests differs between
1164 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
1165 * With SMB2 we have a range of valid message IDs, and when we
1166 * retire the oldest request we need to keep track of what is
1167 * now the oldest message ID. In both cases, after we take a
1168 * request out of the list here, we should be able to wake up
1169 * a request waiting to get in the active list.
1170 */
1171 void
1172 smb_iod_removerq(struct smb_rq *rqp)
1173 {
1174 struct smb_rq *rqp2;
1175 struct smb_vc *vcp = rqp->sr_vc;
1176 boolean_t was_head = B_FALSE;
1177
1178 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1179
1180 #ifdef QUEUEDEBUG
1181 /*
1182 * Make sure we have not already removed it.
1183 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
1184 * XXX: Don't like the constant 1 here...
1185 */
1186 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
1187 #endif
1188
1189 if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
1190 was_head = B_TRUE;
1191 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
1192 if (vcp->vc_flags & SMBV_SMB2) {
1193 rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
1194 if (was_head && rqp2 != NULL) {
1195 /* Do we still need this? */
1196 vcp->vc2_oldest_message_id =
1197 rqp2->sr2_messageid;
1198 }
1199 } else {
1200 ASSERT(vcp->iod_muxcnt > 0);
1201 vcp->iod_muxcnt--;
1202 }
1203
1204 rw_exit(&vcp->iod_rqlock);
1205
1206 /*
1207 * If there are requests waiting for "mux" slots,
1208 * wake one.
1209 */
1210 SMB_VC_LOCK(vcp);
1211 if (vcp->iod_muxwant != 0)
1212 cv_signal(&vcp->iod_muxwait);
1213 SMB_VC_UNLOCK(vcp);
1214 }
1215
1216 /*
1217 * Wait for a request to complete.
1218 */
1219 int
1220 smb_iod_waitrq(struct smb_rq *rqp)
1221 {
1222 struct smb_vc *vcp = rqp->sr_vc;
1223 clock_t tr, tmo1, tmo2;
1224 int error;
1225
1226 if (rqp->sr_flags & SMBR_INTERNAL) {
1227 /* XXX - Do we ever take this path now? */
1228 return (smb_iod_waitrq_int(rqp));
1229 }
1230
1231 /*
1232 * Make sure this is NOT the IOD thread,
1233 * or the wait below will stop the reader.
1234 */
1235 ASSERT(curthread != vcp->iod_thr);
1236
1237 SMBRQ_LOCK(rqp);
1238
1239 /*
1240 * The request has been sent. Now wait for the response,
1241 * with the timeout specified for this request.
1242 * Compute all the deadlines now, so we effectively
1243 * start the timer(s) after the request is sent.
1244 */
1245 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1246 tmo1 = SEC_TO_TICK(smb_timo_notice);
1247 else
1248 tmo1 = 0;
1249 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
1250
1251 /*
1252 * As above, we don't want to allow interrupt for some
1253 * requests like open, because we could miss a succesful
1254 * response and therefore "leak" a FID. Such requests
1255 * are marked SMBR_NOINTR_RECV to prevent that.
1256 *
1257 * If "slow server" warnings are enabled, wait first
1258 * for the "notice" timeout, and warn if expired.
1259 */
1260 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1261 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1262 tr = cv_reltimedwait(&rqp->sr_cond,
1263 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1264 else
1265 tr = cv_reltimedwait_sig(&rqp->sr_cond,
1266 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1267 if (tr == 0) {
1268 error = EINTR;
1269 goto out;
1270 }
1271 if (tr < 0) {
1272 DTRACE_PROBE1(smb_iod_waitrq1,
1273 (smb_rq_t *), rqp);
1274 }
1275 }
1276
1277 /*
1278 * Keep waiting until tmo2 is expired.
1279 */
1280 while (rqp->sr_rpgen == rqp->sr_rplast) {
1281 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1282 tr = cv_timedwait(&rqp->sr_cond,
1283 &rqp->sr_lock, tmo2);
1284 else
1285 tr = cv_timedwait_sig(&rqp->sr_cond,
1286 &rqp->sr_lock, tmo2);
1287 if (tr == 0) {
1288 error = EINTR;
1289 goto out;
1290 }
1291 if (tr < 0) {
1292 DTRACE_PROBE1(smb_iod_waitrq2,
1293 (smb_rq_t *), rqp);
1294 error = ETIME;
1295 goto out;
1296 }
1297 /* got wakeup */
1298 }
1299 error = rqp->sr_lerror;
1300 rqp->sr_rplast++;
1301
1302 out:
1303 SMBRQ_UNLOCK(rqp);
1304
1305 /*
1306 * MULTIPACKET request must stay in the list.
1307 * They may need additional responses.
1308 */
1309 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1310 smb_iod_removerq(rqp);
1311
1312 return (error);
1313 }
1314
1315 /*
1316 * Internal variant of smb_iod_waitrq(), for use in
1317 * requests run by the IOD (reader) thread itself.
1318 * Block only long enough to receive one reply.
1319 */
1320 int
1321 smb_iod_waitrq_int(struct smb_rq *rqp)
1322 {
1323 struct smb_vc *vcp = rqp->sr_vc;
1324 int timeleft = rqp->sr_timo;
1325 int error;
1326
1327 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
1328 again:
1329 error = smb_iod_recvall(vcp, B_TRUE);
1330 if (error == ETIME) {
1331 /* We waited SMB_NBTIMO sec. */
1332 timeleft -= SMB_NBTIMO;
1333 if (timeleft > 0)
1334 goto again;
1335 }
1336
1337 smb_iod_removerq(rqp);
1338 if (rqp->sr_state != SMBRQ_NOTIFIED)
1339 error = ETIME;
1340
1341 return (error);
1342 }
1343
1344 /*
1345 * Shutdown all outstanding I/O requests on the specified share with
1346 * ENXIO; used when unmounting a share. (There shouldn't be any for a
1347 * non-forced unmount; if this is a forced unmount, we have to shutdown
1348 * the requests as part of the unmount process.)
1349 */
1350 void
1351 smb_iod_shutdown_share(struct smb_share *ssp)
1352 {
1353 struct smb_vc *vcp = SSTOVC(ssp);
1354 struct smb_rq *rqp;
1355
1356 /*
1357 * Loop through the list of requests and shutdown the ones
1358 * that are for the specified share.
1359 */
1360 rw_enter(&vcp->iod_rqlock, RW_READER);
1361 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1362 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1363 smb_iod_rqprocessed(rqp, EIO, 0);
1364 }
1365 rw_exit(&vcp->iod_rqlock);
1366 }
1367
1368 /*
1369 * Ioctl functions called by the user-level I/O Deamon (IOD)
1370 * to bring up and service a connection to some SMB server.
1371 */
1372
1373 /*
1374 * Handle ioctl SMBIOC_IOD_CONNECT
1375 */
1376 int
1377 nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
1378 {
1379 int err, val;
1380
1381 ASSERT(vcp->iod_thr == curthread);
1382
1383 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1384 cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
1385 return (EINVAL);
1386 }
1387
1388 /*
1389 * Putting a TLI endpoint back in the right state for a new
1390 * connection is a bit tricky. In theory, this could be:
1391 * SMB_TRAN_DISCONNECT(vcp);
1392 * SMB_TRAN_UNBIND(vcp);
1393 * but that method often results in TOUTSTATE errors.
1394 * It's easier to just close it and open a new endpoint.
1395 */
1396 SMB_VC_LOCK(vcp);
1397 if (vcp->vc_tdata)
1398 SMB_TRAN_DONE(vcp);
1399 err = SMB_TRAN_CREATE(vcp, cr);
1400 SMB_VC_UNLOCK(vcp);
1401 if (err != 0)
1402 return (err);
1403
1404 /*
1405 * Set various options on this endpoint.
1406 * Keep going in spite of errors.
1407 */
1408 val = smb_tcpsndbuf;
1409 err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
1410 if (err != 0) {
1411 cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
1412 }
1413 val = smb_tcprcvbuf;
1414 err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
1415 if (err != 0) {
1416 cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
1417 }
1418 val = 1;
1419 err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
1420 if (err != 0) {
1421 cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
1422 }
1423 val = 1;
1424 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
1425 if (err != 0) {
1426 cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY, err=%d", err);
1427 }
1428 val = smb_connect_timeout * 1000;
1429 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
1430 if (err != 0) {
1431 cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo, err=%d", err);
1432 }
1433
1434 /*
1435 * Bind and connect
1436 */
1437 err = SMB_TRAN_BIND(vcp, NULL);
1438 if (err != 0) {
1439 cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
1440 /* Continue on and try connect. */
1441 }
1442 err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
1443 /*
1444 * No cmn_err here, as connect failures are normal, i.e.
1445 * when a server has multiple addresses and only some are
1446 * routed for us. (libsmbfs tries them all)
1447 */
1448 if (err == 0) {
1449 SMB_VC_LOCK(vcp);
1450 smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
1451 SMB_VC_UNLOCK(vcp);
1452 } /* else stay in state reconnect */
1453
1454 return (err);
1455 }
1456
1457 /*
1458 * Handle ioctl SMBIOC_IOD_NEGOTIATE
1459 * Do the whole SMB1/SMB2 negotiate
1460 *
1461 * This is where we send our first request to the server.
1462 * If this is the first time we're talking to this server,
1463 * (meaning not a reconnect) then we don't know whether
1464 * the server supports SMB2, so we need to use the weird
1465 * SMB1-to-SMB2 negotiation. That's where we send an SMB1
1466 * negotiate including dialect "SMB 2.???" and if the
1467 * server supports SMB2 we get an SMB2 reply -- Yes, an
1468 * SMB2 reply to an SMB1 request. A strange protocol...
1469 *
1470 * If on the other hand we already know the server supports
1471 * SMB2 (because this is a reconnect) or if the client side
1472 * has disabled SMB1 entirely, we'll skip the SMB1 part.
1473 */
1474 int
1475 nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
1476 {
1477 struct smb_sopt *sv = &vcp->vc_sopt;
1478 smb_cred_t scred;
1479 int err = 0;
1480
1481 ASSERT(vcp->iod_thr == curthread);
1482
1483 smb_credinit(&scred, cr);
1484
1485 if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
1486 cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1487 err = EINVAL;
1488 goto out;
1489 }
1490
1491 if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
1492 err = EINVAL;
1493 goto out;
1494 }
1495
1496 /*
1497 * (Re)init negotiated values
1498 */
1499 bzero(sv, sizeof (*sv));
1500 vcp->vc2_next_message_id = 0;
1501 vcp->vc2_limit_message_id = 1;
1502 vcp->vc2_session_id = 0;
1503 vcp->vc_next_seq = 0;
1504
1505 /*
1506 * If this was reconnect, get rid of the old MAC key
1507 * and session key.
1508 */
1509 SMB_VC_LOCK(vcp);
1510 if (vcp->vc_mackey != NULL) {
1511 kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
1512 vcp->vc_mackey = NULL;
1513 vcp->vc_mackeylen = 0;
1514 }
1515 if (vcp->vc_ssnkey != NULL) {
1516 kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
1517 vcp->vc_ssnkey = NULL;
1518 vcp->vc_ssnkeylen = 0;
1519 }
1520 SMB_VC_UNLOCK(vcp);
1521
1522 /*
1523 * If this is not an SMB2 reconect (SMBV_SMB2 not set),
1524 * and if SMB1 is enabled, do SMB1 neogotiate. Then
1525 * if either SMB1-to-SMB2 negotiate tells us we should
1526 * switch to SMB2, or the local configuration has
1527 * disabled SMB1, set the SMBV_SMB2 flag.
1528 *
1529 * Note that vc_maxver is handled in smb_smb_negotiate
1530 * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
1531 * the local configuration disables SMB2, and therefore
1532 * we won't set the SMBV_SMB2 flag.
1533 */
1534 if ((vcp->vc_flags & SMBV_SMB2) == 0) {
1535 if (vcp->vc_minver < SMB2_DIALECT_BASE) {
1536 /*
1537 * SMB1 is enabled
1538 */
1539 err = smb_smb_negotiate(vcp, &scred);
1540 if (err != 0)
1541 goto out;
1542 }
1543 /*
1544 * If SMB1-to-SMB2 negotiate told us we should
1545 * switch to SMB2, or if the local configuration
1546 * disables SMB1, set the SMB2 flag.
1547 */
1548 if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
1549 vcp->vc_minver >= SMB2_DIALECT_BASE) {
1550 /*
1551 * Switch this VC to SMB2.
1552 */
1553 SMB_VC_LOCK(vcp);
1554 vcp->vc_flags |= SMBV_SMB2;
1555 SMB_VC_UNLOCK(vcp);
1556 }
1557 }
1558
1559 /*
1560 * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
1561 * function was called), or SMB1-to-SMB2 negotiate indicated
1562 * we should switch to SMB2, or we have SMB1 disabled (both
1563 * cases set SMBV_SMB2 above), then do SMB2 negotiate.
1564 */
1565 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
1566 err = smb2_smb_negotiate(vcp, &scred);
1567 }
1568
1569 out:
1570 if (err == 0) {
1571 SMB_VC_LOCK(vcp);
1572 smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
1573 SMB_VC_UNLOCK(vcp);
1574 }
1575 /*
1576 * (else) leave state as it was.
1577 * User-level will either close this handle (if connecting
1578 * for the first time) or call rcfail and then try again.
1579 */
1580
1581 smb_credrele(&scred);
1582
1583 return (err);
1584 }
1585
1586 /*
1587 * Handle ioctl SMBIOC_IOD_SSNSETUP
1588 * Do either SMB1 or SMB2 session setup (one call/reply)
1589 */
1590 int
1591 nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
1592 {
1593 smb_cred_t scred;
1594 int err;
1595
1596 ASSERT(vcp->iod_thr == curthread);
1597
1598 switch (vcp->vc_state) {
1599 case SMBIOD_ST_NEGOTIATED:
1600 case SMBIOD_ST_AUTHCONT:
1601 break;
1602 default:
1603 return (EINVAL);
1604 }
1605
1606 smb_credinit(&scred, cr);
1607 if (vcp->vc_flags & SMBV_SMB2)
1608 err = smb2_smb_ssnsetup(vcp, &scred);
1609 else
1610 err = smb_smb_ssnsetup(vcp, &scred);
1611 smb_credrele(&scred);
1612
1613 SMB_VC_LOCK(vcp);
1614 switch (err) {
1615 case 0:
1616 smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
1617 break;
1618 case EINPROGRESS: /* MORE_PROCESSING_REQUIRED */
1619 smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
1620 break;
1621 default:
1622 smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
1623 break;
1624 }
1625 SMB_VC_UNLOCK(vcp);
1626
1627 return (err);
1628 }
1629
1630 static int
1631 smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
1632 {
1633 smb_cred_t scred;
1634 int err;
1635
1636 ASSERT(vcp->iod_thr == curthread);
1637
1638 smb_credinit(&scred, cr);
1639 if (vcp->vc_flags & SMBV_SMB2)
1640 err = smb2_smb_logoff(vcp, &scred);
1641 else
1642 err = smb_smb_logoff(vcp, &scred);
1643 smb_credrele(&scred);
1644
1645 return (err);
1646 }
1647
1648 /*
1649 * Handle ioctl SMBIOC_IOD_WORK
1650 *
1651 * The smbiod agent calls this after authentication to become
1652 * the reader for this session, so long as that's possible.
1653 * This should only return non-zero if we want that agent to
1654 * give up on this VC permanently.
1655 */
1656 /* ARGSUSED */
1657 int
1658 smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
1659 {
1660 smbioc_ssn_work_t *wk = &vcp->vc_work;
1661 int err = 0;
1662
1663 /*
1664 * This is called by the one-and-only
1665 * IOD thread for this VC.
1666 */
1667 ASSERT(vcp->iod_thr == curthread);
1668
1669 /*
1670 * Should be in state...
1671 */
1672 if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
1673 cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
1674 return (EINVAL);
1675 }
1676
1677 /*
1678 * Update the session key and initialize SMB signing.
1679 *
1680 * This implementation does not use multiple SMB sessions per
1681 * TCP connection (where only the first session key is used)
1682 * so we always have a new session key here. Sanity check the
1683 * length from user space. Normally 16 or 32.
1684 */
1685 if (wk->wk_u_ssnkey_len > 1024) {
1686 cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
1687 return (EINVAL);
1688 }
1689
1690 ASSERT(vcp->vc_ssnkey == NULL);
1691 SMB_VC_LOCK(vcp);
1692 if (wk->wk_u_ssnkey_len != 0 &&
1693 wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
1694 vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
1695 vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
1696 if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
1697 vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
1698 err = EFAULT;
1699 }
1700 }
1701 SMB_VC_UNLOCK(vcp);
1702 if (err)
1703 return (err);
1704
1705 /*
1706 * If we have a session key, derive the MAC key for SMB signing.
1707 * If this was a NULL session, we might have no session key.
1708 */
1709 ASSERT(vcp->vc_mackey == NULL);
1710 if (vcp->vc_ssnkey != NULL) {
1711 if (vcp->vc_flags & SMBV_SMB2)
1712 err = smb2_sign_init(vcp);
1713 else
1714 err = smb_sign_init(vcp);
1715 if (err != 0)
1716 return (err);
1717 }
1718
1719 /*
1720 * Tell any enqueued requests they can start.
1721 */
1722 SMB_VC_LOCK(vcp);
1723 vcp->vc_genid++; /* possibly new connection */
1724 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1725 cv_broadcast(&vcp->vc_statechg);
1726 SMB_VC_UNLOCK(vcp);
1727
1728 /*
1729 * The above cv_broadcast should be sufficient to
1730 * get requests going again.
1731 *
1732 * If we have a callback function, run it.
1733 * Was: smb_iod_notify_connected()
1734 */
1735 if (fscb && fscb->fscb_connect)
1736 smb_vc_walkshares(vcp, fscb->fscb_connect);
1737
1738 /*
1739 * Run the "reader" loop. An error return here is normal
1740 * (i.e. when we need to reconnect) so ignore errors.
1741 * Note: This call updates the vc_state.
1742 */
1743 (void) smb_iod_recvall(vcp, B_FALSE);
1744
1745 /*
1746 * The reader loop returned, so we must have a
1747 * new state. (disconnected or reconnecting)
1748 *
1749 * Notify shares of the disconnect.
1750 * Was: smb_iod_notify_disconnect()
1751 */
1752 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1753
1754 /*
1755 * The reader loop function returns only when
1756 * there's been an error on the connection, or
1757 * this VC has no more references. It also
1758 * updates the state before it returns.
1759 *
1760 * Tell any requests to give up or restart.
1761 */
1762 smb_iod_invrq(vcp);
1763
1764 return (err);
1765 }
1766
1767 /*
1768 * Handle ioctl SMBIOC_IOD_IDLE
1769 *
1770 * Wait around for someone to ask to use this VC again after the
1771 * TCP session has closed. When one of the connected trees adds a
1772 * request, smb_iod_reconnect will set vc_state to RECONNECT and
1773 * wake this cv_wait. When a VC ref. goes away in smb_vc_rele,
1774 * that also signals this wait so we can re-check whether we
1775 * now hold the last ref. on this VC (and can destroy it).
1776 */
1777 int
1778 smb_iod_vc_idle(struct smb_vc *vcp)
1779 {
1780 int err = 0;
1781 boolean_t destroy = B_FALSE;
1782
1783 /*
1784 * This is called by the one-and-only
1785 * IOD thread for this VC.
1786 */
1787 ASSERT(vcp->iod_thr == curthread);
1788
1789 /*
1790 * Should be in state...
1791 */
1792 if (vcp->vc_state != SMBIOD_ST_IDLE &&
1793 vcp->vc_state != SMBIOD_ST_RECONNECT) {
1794 cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
1795 return (EINVAL);
1796 }
1797
1798 SMB_VC_LOCK(vcp);
1799
1800 while (vcp->vc_state == SMBIOD_ST_IDLE &&
1801 vcp->vc_co.co_usecount > 1) {
1802 if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1803 err = EINTR;
1804 break;
1805 }
1806 }
1807 if (vcp->vc_state == SMBIOD_ST_IDLE &&
1808 vcp->vc_co.co_usecount == 1) {
1809 /*
1810 * We were woken because we now have the last ref.
1811 * Arrange for this VC to be destroyed now.
1812 * Set the "GONE" flag while holding the lock,
1813 * to prevent a race with new references.
1814 * The destroy happens after unlock.
1815 */
1816 vcp->vc_flags |= SMBV_GONE;
1817 destroy = B_TRUE;
1818 }
1819
1820 SMB_VC_UNLOCK(vcp);
1821
1822 if (destroy) {
1823 /* This sets vc_state = DEAD */
1824 smb_iod_disconnect(vcp);
1825 }
1826
1827 return (err);
1828 }
1829
1830 /*
1831 * Handle ioctl SMBIOC_IOD_RCFAIL
1832 *
1833 * After a failed reconnect attempt, smbiod will
1834 * call this to make current requests error out.
1835 */
1836 int
1837 smb_iod_vc_rcfail(struct smb_vc *vcp)
1838 {
1839 clock_t tr;
1840 int err = 0;
1841
1842 /*
1843 * This is called by the one-and-only
1844 * IOD thread for this VC.
1845 */
1846 ASSERT(vcp->iod_thr == curthread);
1847 SMB_VC_LOCK(vcp);
1848
1849 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1850 cv_broadcast(&vcp->vc_statechg);
1851
1852 /*
1853 * Short wait here for two reasons:
1854 * (1) Give requests a chance to error out.
1855 * (2) Prevent immediate retry.
1856 */
1857 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1858 SEC_TO_TICK(5), TR_CLOCK_TICK);
1859 if (tr == 0)
1860 err = EINTR;
1861
1862 /*
1863 * Normally we'll switch to state IDLE here. However,
1864 * if something called smb_iod_reconnect() while we were
1865 * waiting above, we'll be in in state reconnect already.
1866 * In that case, keep state RECONNECT, so we essentially
1867 * skip transition through state IDLE that would normally
1868 * happen next.
1869 */
1870 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1871 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1872 cv_broadcast(&vcp->vc_statechg);
1873 }
1874
1875 SMB_VC_UNLOCK(vcp);
1876
1877 return (err);
1878 }
1879
1880 /*
1881 * Ask the IOD to reconnect (if not already underway)
1882 * then wait for the reconnect to finish.
1883 */
1884 int
1885 smb_iod_reconnect(struct smb_vc *vcp)
1886 {
1887 int err = 0, rv;
1888
1889 SMB_VC_LOCK(vcp);
1890 again:
1891 switch (vcp->vc_state) {
1892
1893 case SMBIOD_ST_IDLE:
1894 /* Tell the IOD thread it's no longer IDLE. */
1895 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1896 cv_signal(&vcp->iod_idle);
1897 /* FALLTHROUGH */
1898
1899 case SMBIOD_ST_RECONNECT:
1900 case SMBIOD_ST_CONNECTED:
1901 case SMBIOD_ST_NEGOTIATED:
1902 case SMBIOD_ST_AUTHCONT:
1903 case SMBIOD_ST_AUTHOK:
1904 /* Wait for the VC state to become ACTIVE. */
1905 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1906 if (rv == 0) {
1907 err = EINTR;
1908 break;
1909 }
1910 goto again;
1911
1912 case SMBIOD_ST_VCACTIVE:
1913 err = 0; /* success! */
1914 break;
1915
1916 case SMBIOD_ST_AUTHFAIL:
1917 case SMBIOD_ST_RCFAILED:
1918 case SMBIOD_ST_DEAD:
1919 default:
1920 err = ENOTCONN;
1921 break;
1922 }
1923
1924 SMB_VC_UNLOCK(vcp);
1925 return (err);
1926 }