18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33 */
34
35 /*
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
38 */
39
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define QUEUEDEBUG 1
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/file.h>
51 #include <sys/kmem.h>
52 #include <sys/unistd.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/types.h>
56 #include <sys/ddi.h>
57 #include <sys/sunddi.h>
58 #include <sys/stream.h>
59 #include <sys/strsun.h>
60 #include <sys/time.h>
61 #include <sys/class.h>
62 #include <sys/disp.h>
63 #include <sys/cmn_err.h>
64 #include <sys/zone.h>
65 #include <sys/sdt.h>
66
67 #include <netsmb/smb_osdep.h>
68
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
75
76 int smb_iod_send_echo(smb_vc_t *);
77
78 /*
79 * This is set/cleared when smbfs loads/unloads
80 * No locks should be necessary, because smbfs
81 * can't unload until all the mounts are gone.
82 */
83 static smb_fscb_t *fscb;
84 void
85 smb_fscb_set(smb_fscb_t *cb)
86 {
87 fscb = cb;
88 }
89
90 static void
91 smb_iod_share_disconnected(smb_share_t *ssp)
92 {
93
94 smb_share_invalidate(ssp);
95
96 /* smbfs_dead() */
97 if (fscb && fscb->fscb_disconn) {
98 fscb->fscb_disconn(ssp);
99 }
100 }
101
102 /*
103 * State changes are important and infrequent.
104 * Make them easily observable via dtrace.
105 */
106 void
107 smb_iod_newstate(struct smb_vc *vcp, int state)
108 {
109 vcp->vc_state = state;
110 }
111
112 /* Lock Held version of the next function. */
113 static inline void
114 smb_iod_rqprocessed_LH(
115 struct smb_rq *rqp,
116 int error,
125
126 static void
127 smb_iod_rqprocessed(
128 struct smb_rq *rqp,
129 int error,
130 int flags)
131 {
132
133 SMBRQ_LOCK(rqp);
134 smb_iod_rqprocessed_LH(rqp, error, flags);
135 SMBRQ_UNLOCK(rqp);
136 }
137
138 static void
139 smb_iod_invrq(struct smb_vc *vcp)
140 {
141 struct smb_rq *rqp;
142
143 /*
144 * Invalidate all outstanding requests for this connection
145 */
146 rw_enter(&vcp->iod_rqlock, RW_READER);
147 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
149 }
150 rw_exit(&vcp->iod_rqlock);
151 }
152
153 /*
154 * Called by smb_vc_rele, smb_vc_kill, and by the driver
155 * close entry point if the IOD closes its dev handle.
156 *
157 * Forcibly kill the connection and IOD.
158 */
159 void
160 smb_iod_disconnect(struct smb_vc *vcp)
161 {
162
163 /*
164 * Inform everyone of the state change.
165 */
166 SMB_VC_LOCK(vcp);
167 if (vcp->vc_state != SMBIOD_ST_DEAD) {
168 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
169 cv_broadcast(&vcp->vc_statechg);
170 }
171 SMB_VC_UNLOCK(vcp);
172
173 /*
174 * Let's be safe here and avoid doing any
175 * call across the network while trying to
176 * shut things down. If we just disconnect,
177 * the server will take care of the logoff.
178 */
179 SMB_TRAN_DISCONNECT(vcp);
180
181 /*
182 * If we have an IOD, it should immediately notice
183 * that its connection has closed. But in case
184 * it doesn't, let's also send it a signal.
185 */
186 SMB_VC_LOCK(vcp);
187 if (vcp->iod_thr != NULL &&
188 vcp->iod_thr != curthread) {
189 tsignal(vcp->iod_thr, SIGKILL);
190 }
191 SMB_VC_UNLOCK(vcp);
192 }
193
194 /*
195 * Send one request.
196 *
197 * Called by _addrq (for internal requests)
198 * and _sendall (via _addrq, _multirq, _waitrq)
199 */
200 static int
201 smb_iod_sendrq(struct smb_rq *rqp)
202 {
203 struct smb_vc *vcp = rqp->sr_vc;
204 mblk_t *m;
205 int error;
206
207 ASSERT(vcp);
208 ASSERT(SEMA_HELD(&vcp->vc_sendlock));
209 ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
210
211 /*
212 * Note: Anything special for SMBR_INTERNAL here?
213 */
214 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
215 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
216 return (ENOTCONN);
217 }
218
219
220 /*
221 * On the first send, set the MID and (maybe)
222 * the signing sequence numbers. The increments
223 * here are serialized by vc_sendlock
224 */
225 if (rqp->sr_sendcnt == 0) {
226
227 rqp->sr_mid = vcp->vc_next_mid++;
228
229 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
230 /*
231 * We're signing requests and verifying
232 * signatures on responses. Set the
233 * sequence numbers of the request and
234 * response here, used in smb_rq_verify.
235 */
236 rqp->sr_seqno = vcp->vc_next_seq++;
237 rqp->sr_rseqno = vcp->vc_next_seq++;
238 }
239
240 /* Fill in UID, TID, MID, etc. */
241 smb_rq_fillhdr(rqp);
242
243 /*
244 * Sign the message now that we're finally done
245 * filling in the SMB header fields, etc.
246 */
247 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
248 smb_rq_sign(rqp);
249 }
250 }
251 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
252 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
253 /*
254 * If all attempts to send a request failed, then
255 * something is seriously hosed.
256 */
257 return (ENOTCONN);
258 }
259
260 /*
261 * Replaced m_copym() with Solaris copymsg() which does the same
262 * work when we want to do a M_COPYALL.
263 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
264 */
265 m = copymsg(rqp->sr_rq.mb_top);
266
267 #ifdef DTRACE_PROBE
268 DTRACE_PROBE2(smb_iod_sendrq,
269 (smb_rq_t *), rqp, (mblk_t *), m);
270 #else
271 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
272 #endif
273 m_dumpm(m);
274
275 if (m != NULL) {
276 error = SMB_TRAN_SEND(vcp, m);
277 m = 0; /* consumed by SEND */
278 } else
279 error = ENOBUFS;
280
281 rqp->sr_lerror = error;
282 if (error == 0) {
283 SMBRQ_LOCK(rqp);
284 rqp->sr_flags |= SMBR_SENT;
285 rqp->sr_state = SMBRQ_SENT;
286 if (rqp->sr_flags & SMBR_SENDWAIT)
287 cv_broadcast(&rqp->sr_cond);
288 SMBRQ_UNLOCK(rqp);
289 return (0);
290 }
291 /*
292 * Check for fatal errors
293 */
294 if (SMB_TRAN_FATAL(vcp, error)) {
295 /*
296 * No further attempts should be made
297 */
298 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
299 return (ENOTCONN);
300 }
301 if (error)
302 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
303
304 #ifdef APPLE
305 /* If proc waiting on rqp was signaled... */
306 if (smb_rq_intr(rqp))
307 smb_iod_rqprocessed(rqp, EINTR, 0);
308 #endif
309
310 return (0);
311 }
312
313 static int
314 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
315 {
316 mblk_t *m;
317 uchar_t *hp;
318 int error;
319
320 top:
321 m = NULL;
322 error = SMB_TRAN_RECV(vcp, &m);
323 if (error == EAGAIN)
324 goto top;
325 if (error)
326 return (error);
327 ASSERT(m);
328
329 m = m_pullup(m, SMB_HDRLEN);
330 if (m == NULL) {
331 return (ENOSR);
332 }
333
334 /*
335 * Check the SMB header
336 */
337 hp = mtod(m, uchar_t *);
338 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
339 m_freem(m);
340 return (EPROTO);
341 }
342
343 *mpp = m;
344 return (0);
345 }
346
347 /*
348 * Process incoming packets
349 *
350 * This is the "reader" loop, run by the IOD thread
351 * while in state SMBIOD_ST_VCACTIVE. The loop now
352 * simply blocks in the socket recv until either a
353 * message arrives, or a disconnect.
354 *
355 * Any non-zero error means the IOD should terminate.
356 */
357 int
358 smb_iod_recvall(struct smb_vc *vcp)
359 {
360 struct smb_rq *rqp;
361 mblk_t *m;
362 uchar_t *hp;
363 ushort_t mid;
364 int error = 0;
365 int etime_count = 0; /* for "server not responding", etc. */
366
367 for (;;) {
368 /*
369 * Check whether someone "killed" this VC,
370 * or is asking the IOD to terminate.
371 */
372
373 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
374 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
375 error = 0;
376 break;
377 }
378
379 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
380 SMBIODEBUG("SHUTDOWN set\n");
381 /* This IOD thread will terminate. */
382 SMB_VC_LOCK(vcp);
383 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
384 cv_broadcast(&vcp->vc_statechg);
385 SMB_VC_UNLOCK(vcp);
386 error = EINTR;
387 break;
388 }
389
390 m = NULL;
391 error = smb_iod_recv1(vcp, &m);
392
393 if (error == ETIME &&
394 vcp->iod_rqlist.tqh_first != NULL) {
395 /*
396 * Nothing received for 15 seconds and
397 * we have requests in the queue.
398 */
399 etime_count++;
400
401 /*
402 * Once, at 15 sec. notify callbacks
403 * and print the warning message.
404 */
405 if (etime_count == 1) {
406 /* Was: smb_iod_notify_down(vcp); */
407 if (fscb && fscb->fscb_down)
408 smb_vc_walkshares(vcp,
409 fscb->fscb_down);
410 zprintf(vcp->vc_zoneid,
411 "SMB server %s not responding\n",
412 vcp->vc_srvname);
413 }
414
415 /*
416 * At 30 sec. try sending an echo, and then
417 * once a minute thereafter.
418 */
419 if ((etime_count & 3) == 2) {
420 (void) smb_iod_send_echo(vcp);
421 }
422
423 continue;
424 } /* ETIME && requests in queue */
425
426 if (error == ETIME) {
427 /*
428 * If the IOD thread holds the last reference
429 * to this VC, let the IOD thread terminate.
430 */
431 if (vcp->vc_co.co_usecount > 1)
432 continue;
433 SMB_VC_LOCK(vcp);
434 if (vcp->vc_co.co_usecount == 1) {
435 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
436 SMB_VC_UNLOCK(vcp);
437 error = 0;
438 break;
439 }
440 SMB_VC_UNLOCK(vcp);
441 continue;
442 } /* error == ETIME */
443
444 if (error) {
445 /*
446 * The recv. above returned some error
447 * we can't continue from i.e. ENOTCONN.
448 * It's dangerous to continue here.
449 * (possible infinite loop!)
450 *
451 * If we have requests enqueued, next
452 * state is reconnecting, else idle.
453 */
454 int state;
455 SMB_VC_LOCK(vcp);
456 state = (vcp->iod_rqlist.tqh_first != NULL) ?
457 SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
458 smb_iod_newstate(vcp, state);
459 cv_broadcast(&vcp->vc_statechg);
460 SMB_VC_UNLOCK(vcp);
461 error = 0;
462 break;
463 }
464
465 /*
466 * Received something. Yea!
467 */
468 if (etime_count) {
469 etime_count = 0;
470
471 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
472 vcp->vc_srvname);
473
474 /* Was: smb_iod_notify_up(vcp); */
475 if (fscb && fscb->fscb_up)
476 smb_vc_walkshares(vcp, fscb->fscb_up);
477 }
478
479 /*
480 * Have an SMB packet. The SMB header was
481 * checked in smb_iod_recv1().
482 * Find the request...
483 */
484 hp = mtod(m, uchar_t *);
485 /*LINTED*/
486 mid = letohs(SMB_HDRMID(hp));
487 SMBIODEBUG("mid %04x\n", (uint_t)mid);
488
489 rw_enter(&vcp->iod_rqlock, RW_READER);
490 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
491
492 if (rqp->sr_mid != mid)
493 continue;
494
495 DTRACE_PROBE2(smb_iod_recvrq,
496 (smb_rq_t *), rqp, (mblk_t *), m);
497 m_dumpm(m);
498
499 SMBRQ_LOCK(rqp);
500 if (rqp->sr_rp.md_top == NULL) {
501 md_initm(&rqp->sr_rp, m);
502 } else {
503 if (rqp->sr_flags & SMBR_MULTIPACKET) {
504 md_append_record(&rqp->sr_rp, m);
505 } else {
506 SMBRQ_UNLOCK(rqp);
507 SMBSDEBUG("duplicate response %d "
508 "(ignored)\n", mid);
509 break;
510 }
511 }
512 smb_iod_rqprocessed_LH(rqp, 0, 0);
513 SMBRQ_UNLOCK(rqp);
514 break;
515 }
516
517 if (rqp == NULL) {
518 int cmd = SMB_HDRCMD(hp);
519
520 if (cmd != SMB_COM_ECHO)
521 SMBSDEBUG("drop resp: mid %d, cmd %d\n",
522 (uint_t)mid, cmd);
523 /* smb_printrqlist(vcp); */
524 m_freem(m);
525 }
526 rw_exit(&vcp->iod_rqlock);
527
528 }
529
530 return (error);
531 }
532
533 /*
534 * The IOD receiver thread has requests pending and
535 * has not received anything in a while. Try to
536 * send an SMB echo request. It's tricky to do a
537 * send from the IOD thread because we can't block.
538 *
539 * Using tmo=SMBNOREPLYWAIT in the request
540 * so smb_rq_reply will skip smb_iod_waitrq.
541 * The smb_smb_echo call uses SMBR_INTERNAL
542 * to avoid calling smb_iod_sendall().
543 */
544 int
545 smb_iod_send_echo(smb_vc_t *vcp)
546 {
547 smb_cred_t scred;
548 int err;
549
550 smb_credinit(&scred, NULL);
551 err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
552 smb_credrele(&scred);
553 return (err);
554 }
555
556 /*
557 * The IOD thread is now just a "reader",
558 * so no more smb_iod_request(). Yea!
559 */
560
561 /*
562 * Place request in the queue, and send it now if possible.
563 * Called with no locks held.
564 */
565 int
566 smb_iod_addrq(struct smb_rq *rqp)
567 {
568 struct smb_vc *vcp = rqp->sr_vc;
569 int error, save_newrq;
570
571 ASSERT(rqp->sr_cred);
572
573 /*
574 * State should be correct after the check in
575 * smb_rq_enqueue(), but we dropped locks...
576 */
577 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
578 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
579 return (ENOTCONN);
580 }
581
582 /*
583 * Requests from the IOD itself are marked _INTERNAL,
584 * and get some special treatment to avoid blocking
585 * the reader thread (so we don't deadlock).
586 * The request is not yet on the queue, so we can
587 * modify it's state here without locks.
588 * Only thing using this now is ECHO.
589 */
590 rqp->sr_owner = curthread;
591 if (rqp->sr_owner == vcp->iod_thr) {
592 rqp->sr_flags |= SMBR_INTERNAL;
593
594 /*
595 * This is a request from the IOD thread.
596 * Always send directly from this thread.
597 * Note lock order: iod_rqlist, vc_sendlock
598 */
599 rw_enter(&vcp->iod_rqlock, RW_WRITER);
600 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
601 rw_downgrade(&vcp->iod_rqlock);
602
603 /*
604 * Note: iod_sendrq expects vc_sendlock,
605 * so take that here, but carefully:
606 * Never block the IOD thread here.
607 */
608 if (sema_tryp(&vcp->vc_sendlock) == 0) {
609 SMBIODEBUG("sendlock busy\n");
610 error = EAGAIN;
611 } else {
612 /* Have vc_sendlock */
613 error = smb_iod_sendrq(rqp);
614 sema_v(&vcp->vc_sendlock);
615 }
616
617 rw_exit(&vcp->iod_rqlock);
618
619 /*
620 * In the non-error case, _removerq
621 * is done by either smb_rq_reply
622 * or smb_iod_waitrq.
623 */
624 if (error)
625 smb_iod_removerq(rqp);
626
627 return (error);
628 }
629
630 rw_enter(&vcp->iod_rqlock, RW_WRITER);
631
632 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
633 /* iod_rqlock/WRITER protects iod_newrq */
634 save_newrq = vcp->iod_newrq;
635 vcp->iod_newrq++;
636
637 rw_exit(&vcp->iod_rqlock);
638
639 /*
640 * Now send any requests that need to be sent,
641 * including the one we just put on the list.
642 * Only the thread that found iod_newrq==0
643 * needs to run the send loop.
644 */
645 if (save_newrq == 0)
646 smb_iod_sendall(vcp);
647
648 return (0);
649 }
650
651 /*
652 * Mark an SMBR_MULTIPACKET request as
653 * needing another send. Similar to the
654 * "normal" part of smb_iod_addrq.
655 */
656 int
657 smb_iod_multirq(struct smb_rq *rqp)
658 {
659 struct smb_vc *vcp = rqp->sr_vc;
660 int save_newrq;
661
662 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
663
664 if (rqp->sr_flags & SMBR_INTERNAL)
665 return (EINVAL);
666
667 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
668 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
669 return (ENOTCONN);
670 }
671
672 rw_enter(&vcp->iod_rqlock, RW_WRITER);
673
674 /* Already on iod_rqlist, just reset state. */
675 rqp->sr_state = SMBRQ_NOTSENT;
676
677 /* iod_rqlock/WRITER protects iod_newrq */
678 save_newrq = vcp->iod_newrq;
679 vcp->iod_newrq++;
680
681 rw_exit(&vcp->iod_rqlock);
682
683 /*
684 * Now send any requests that need to be sent,
685 * including the one we just marked NOTSENT.
686 * Only the thread that found iod_newrq==0
687 * needs to run the send loop.
688 */
689 if (save_newrq == 0)
690 smb_iod_sendall(vcp);
691
692 return (0);
693 }
694
695
696 void
697 smb_iod_removerq(struct smb_rq *rqp)
698 {
699 struct smb_vc *vcp = rqp->sr_vc;
700
701 rw_enter(&vcp->iod_rqlock, RW_WRITER);
702 #ifdef QUEUEDEBUG
703 /*
704 * Make sure we have not already removed it.
705 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
706 * XXX: Don't like the constant 1 here...
707 */
708 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
709 #endif
710 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
711 rw_exit(&vcp->iod_rqlock);
712 }
713
714
715
716 /*
717 * Wait for a request to complete.
718 *
719 * For normal requests, we need to deal with
720 * ioc_muxcnt dropping below vc_maxmux by
721 * making arrangements to send more...
722 */
723 int
724 smb_iod_waitrq(struct smb_rq *rqp)
725 {
726 struct smb_vc *vcp = rqp->sr_vc;
727 clock_t tr, tmo1, tmo2;
728 int error, rc;
729
730 if (rqp->sr_flags & SMBR_INTERNAL) {
731 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
732 smb_iod_removerq(rqp);
733 return (EAGAIN);
734 }
735
736 /*
737 * Make sure this is NOT the IOD thread,
738 * or the wait below will stop the reader.
739 */
740 ASSERT(curthread != vcp->iod_thr);
741
742 SMBRQ_LOCK(rqp);
743
744 /*
745 * First, wait for the request to be sent. Normally the send
746 * has already happened by the time we get here. However, if
747 * we have more than maxmux entries in the request list, our
748 * request may not be sent until other requests complete.
749 * The wait in this case is due to local I/O demands, so
750 * we don't want the server response timeout to apply.
751 *
752 * If a request is allowed to interrupt this wait, then the
753 * request is cancelled and never sent OTW. Some kinds of
754 * requests should never be cancelled (i.e. close) and those
755 * are marked SMBR_NOINTR_SEND so they either go eventually,
756 * or a connection close will terminate them with ENOTCONN.
757 */
758 while (rqp->sr_state == SMBRQ_NOTSENT) {
759 rqp->sr_flags |= SMBR_SENDWAIT;
760 if (rqp->sr_flags & SMBR_NOINTR_SEND) {
761 cv_wait(&rqp->sr_cond, &rqp->sr_lock);
762 rc = 1;
763 } else
764 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
765 rqp->sr_flags &= ~SMBR_SENDWAIT;
766 if (rc == 0) {
767 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
768 error = EINTR;
769 goto out;
770 }
771 }
772
773 /*
774 * The request has been sent. Now wait for the response,
775 * with the timeout specified for this request.
776 * Compute all the deadlines now, so we effectively
777 * start the timer(s) after the request is sent.
778 */
779 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
780 tmo1 = SEC_TO_TICK(smb_timo_notice);
781 else
782 tmo1 = 0;
783 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
784
785 /*
786 * As above, we don't want to allow interrupt for some
787 * requests like open, because we could miss a succesful
788 * response and therefore "leak" a FID. Such requests
789 * are marked SMBR_NOINTR_RECV to prevent that.
790 *
791 * If "slow server" warnings are enabled, wait first
792 * for the "notice" timeout, and warn if expired.
793 */
794 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
795 if (rqp->sr_flags & SMBR_NOINTR_RECV)
796 tr = cv_reltimedwait(&rqp->sr_cond,
797 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
798 else
799 tr = cv_reltimedwait_sig(&rqp->sr_cond,
800 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
801 if (tr == 0) {
802 error = EINTR;
803 goto out;
804 }
805 if (tr < 0) {
806 #ifdef DTRACE_PROBE
807 DTRACE_PROBE1(smb_iod_waitrq1,
808 (smb_rq_t *), rqp);
809 #endif
810 #ifdef NOT_YET
811 /* Want this to go ONLY to the user. */
812 uprintf("SMB server %s has not responded"
813 " to request %d after %d seconds..."
814 " (still waiting).\n", vcp->vc_srvname,
815 rqp->sr_mid, smb_timo_notice);
816 #endif
817 }
818 }
819
820 /*
821 * Keep waiting until tmo2 is expired.
822 */
823 while (rqp->sr_rpgen == rqp->sr_rplast) {
824 if (rqp->sr_flags & SMBR_NOINTR_RECV)
825 tr = cv_timedwait(&rqp->sr_cond,
826 &rqp->sr_lock, tmo2);
827 else
828 tr = cv_timedwait_sig(&rqp->sr_cond,
829 &rqp->sr_lock, tmo2);
830 if (tr == 0) {
831 error = EINTR;
832 goto out;
833 }
834 if (tr < 0) {
835 #ifdef DTRACE_PROBE
836 DTRACE_PROBE1(smb_iod_waitrq2,
837 (smb_rq_t *), rqp);
838 #endif
839 #ifdef NOT_YET
840 /* Want this to go ONLY to the user. */
841 uprintf("SMB server %s has not responded"
842 " to request %d after %d seconds..."
843 " (giving up).\n", vcp->vc_srvname,
844 rqp->sr_mid, rqp->sr_timo);
845 #endif
846 error = ETIME;
847 goto out;
848 }
849 /* got wakeup */
850 }
851 error = rqp->sr_lerror;
852 rqp->sr_rplast++;
853
854 out:
855 SMBRQ_UNLOCK(rqp);
856
857 /*
858 * MULTIPACKET request must stay in the list.
859 * They may need additional responses.
860 */
861 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
862 smb_iod_removerq(rqp);
863
864 /*
865 * Some request has been completed.
866 * If we reached the mux limit,
867 * re-run the send loop...
868 */
869 if (vcp->iod_muxfull)
870 smb_iod_sendall(vcp);
871
872 return (error);
873 }
874
875 /*
876 * Shutdown all outstanding I/O requests on the specified share with
877 * ENXIO; used when unmounting a share. (There shouldn't be any for a
878 * non-forced unmount; if this is a forced unmount, we have to shutdown
879 * the requests as part of the unmount process.)
880 */
881 void
882 smb_iod_shutdown_share(struct smb_share *ssp)
883 {
884 struct smb_vc *vcp = SSTOVC(ssp);
885 struct smb_rq *rqp;
886
887 /*
888 * Loop through the list of requests and shutdown the ones
889 * that are for the specified share.
890 */
891 rw_enter(&vcp->iod_rqlock, RW_READER);
892 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
893 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
894 smb_iod_rqprocessed(rqp, EIO, 0);
895 }
896 rw_exit(&vcp->iod_rqlock);
897 }
898
899 /*
900 * Send all requests that need sending.
901 * Called from _addrq, _multirq, _waitrq
902 */
903 void
904 smb_iod_sendall(smb_vc_t *vcp)
905 {
906 struct smb_rq *rqp;
907 int error, muxcnt;
908
909 /*
910 * Clear "newrq" to make sure threads adding
911 * new requests will run this function again.
912 */
913 rw_enter(&vcp->iod_rqlock, RW_WRITER);
914 vcp->iod_newrq = 0;
915
916 /*
917 * We only read iod_rqlist, so downgrade rwlock.
918 * This allows the IOD to handle responses while
919 * some requesting thread may be blocked in send.
920 */
921 rw_downgrade(&vcp->iod_rqlock);
922
923 /*
924 * Serialize to prevent multiple senders.
925 * Note lock order: iod_rqlock, vc_sendlock
926 */
927 sema_p(&vcp->vc_sendlock);
928
929 /*
930 * Walk the list of requests and send when possible.
931 * We avoid having more than vc_maxmux requests
932 * outstanding to the server by traversing only
933 * vc_maxmux entries into this list. Simple!
934 */
935 ASSERT(vcp->vc_maxmux > 0);
936 error = muxcnt = 0;
937 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
938
939 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
940 error = ENOTCONN; /* stop everything! */
941 break;
942 }
943
944 if (rqp->sr_state == SMBRQ_NOTSENT) {
945 error = smb_iod_sendrq(rqp);
946 if (error)
947 break;
948 }
949
950 if (++muxcnt == vcp->vc_maxmux) {
951 SMBIODEBUG("muxcnt == vc_maxmux\n");
952 break;
953 }
954
955 }
956
957 /*
958 * If we have vc_maxmux requests outstanding,
959 * arrange for _waitrq to call _sendall as
960 * requests are completed.
961 */
962 vcp->iod_muxfull =
963 (muxcnt < vcp->vc_maxmux) ? 0 : 1;
964
965 sema_v(&vcp->vc_sendlock);
966 rw_exit(&vcp->iod_rqlock);
967 }
968
969 int
970 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
971 {
972 struct file *fp = NULL;
973 int err = 0;
974
975 /*
976 * This is called by the one-and-only
977 * IOD thread for this VC.
978 */
979 ASSERT(vcp->iod_thr == curthread);
980
981 /*
982 * Get the network transport file pointer,
983 * and "loan" it to our transport module.
984 */
985 if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
986 err = EBADF;
987 goto out;
988 }
989 if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
990 goto out;
991
992 /*
993 * In case of reconnect, tell any enqueued requests
994 * then can GO!
995 */
996 SMB_VC_LOCK(vcp);
997 vcp->vc_genid++; /* possibly new connection */
998 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
999 cv_broadcast(&vcp->vc_statechg);
1000 SMB_VC_UNLOCK(vcp);
1001
1002 /*
1003 * The above cv_broadcast should be sufficient to
1004 * get requests going again.
1005 *
1006 * If we have a callback function, run it.
1007 * Was: smb_iod_notify_connected()
1008 */
1009 if (fscb && fscb->fscb_connect)
1010 smb_vc_walkshares(vcp, fscb->fscb_connect);
1011
1012 /*
1013 * Run the "reader" loop.
1014 */
1015 err = smb_iod_recvall(vcp);
1016
1017 /*
1018 * The reader loop returned, so we must have a
1019 * new state. (disconnected or reconnecting)
1020 *
1021 * Notify shares of the disconnect.
1022 * Was: smb_iod_notify_disconnect()
1023 */
1024 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1025
1026 /*
1027 * The reader loop function returns only when
1028 * there's been an error on the connection, or
1029 * this VC has no more references. It also
1030 * updates the state before it returns.
1031 *
1032 * Tell any requests to give up or restart.
1033 */
1034 smb_iod_invrq(vcp);
1035
1036 out:
1037 /* Recall the file descriptor loan. */
1038 (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1039 if (fp != NULL) {
1040 releasef(vcp->vc_tran_fd);
1041 }
1042
1043 return (err);
1044 }
1045
1046 /*
1047 * Wait around for someone to ask to use this VC.
1048 * If the VC has only the IOD reference, then
1049 * wait only a minute or so, then drop it.
1050 */
1051 int
1052 smb_iod_vc_idle(struct smb_vc *vcp)
1053 {
1054 clock_t tr, delta = SEC_TO_TICK(15);
1055 int err = 0;
1056
1057 /*
1058 * This is called by the one-and-only
1059 * IOD thread for this VC.
1060 */
1061 ASSERT(vcp->iod_thr == curthread);
1062
1063 SMB_VC_LOCK(vcp);
1064 while (vcp->vc_state == SMBIOD_ST_IDLE) {
1065 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1066 delta, TR_CLOCK_TICK);
1067 if (tr == 0) {
1068 err = EINTR;
1069 break;
1070 }
1071 if (tr < 0) {
1072 /* timeout */
1073 if (vcp->vc_co.co_usecount == 1) {
1074 /* Let this IOD terminate. */
1075 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1076 /* nobody to cv_broadcast */
1077 break;
1078 }
1079 }
1080 }
1081 SMB_VC_UNLOCK(vcp);
1082
1083 return (err);
1084 }
1085
1086 /*
1087 * After a failed reconnect attempt, smbiod will
1088 * call this to make current requests error out.
1089 */
1090 int
1091 smb_iod_vc_rcfail(struct smb_vc *vcp)
1092 {
1093 clock_t tr;
1094 int err = 0;
1095
1096 /*
1097 * This is called by the one-and-only
1098 * IOD thread for this VC.
1099 */
1100 ASSERT(vcp->iod_thr == curthread);
1101
1102 if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1103 return (EINVAL);
1104
1105 SMB_VC_LOCK(vcp);
1106
1107 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1108 cv_broadcast(&vcp->vc_statechg);
1109
1110 /*
1111 * Short wait here for two reasons:
1112 * (1) Give requests a chance to error out.
1113 * (2) Prevent immediate retry.
1114 */
1115 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1116 SEC_TO_TICK(5), TR_CLOCK_TICK);
1117 if (tr == 0)
1118 err = EINTR;
1119
1120 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1121 cv_broadcast(&vcp->vc_statechg);
1122
1123 SMB_VC_UNLOCK(vcp);
1124
1125 return (err);
1126 }
1127
1128 /*
1129 * Ask the IOD to reconnect (if not already underway)
1130 * then wait for the reconnect to finish.
1131 */
1132 int
1133 smb_iod_reconnect(struct smb_vc *vcp)
1134 {
1135 int err = 0, rv;
1136
1137 SMB_VC_LOCK(vcp);
1138 again:
1139 switch (vcp->vc_state) {
1140
1141 case SMBIOD_ST_IDLE:
1142 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1143 cv_signal(&vcp->iod_idle);
1144 /* FALLTHROUGH */
1145
1146 case SMBIOD_ST_RECONNECT:
1147 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1148 if (rv == 0) {
1149 err = EINTR;
1150 break;
1151 }
1152 goto again;
1153
1154 case SMBIOD_ST_VCACTIVE:
1155 err = 0; /* success! */
1156 break;
1157
1158 case SMBIOD_ST_RCFAILED:
1159 case SMBIOD_ST_DEAD:
1160 default:
1161 err = ENOTCONN;
1162 break;
1163 }
1164
1165 SMB_VC_UNLOCK(vcp);
1166 return (err);
1167 }
|
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33 */
34
35 /*
36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 * Use is subject to license terms.
38 *
39 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
40 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
41 */
42
43 #ifdef DEBUG
44 /* See sys/queue.h */
45 #define QUEUEDEBUG 1
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/atomic.h>
51 #include <sys/proc.h>
52 #include <sys/thread.h>
53 #include <sys/file.h>
54 #include <sys/kmem.h>
55 #include <sys/unistd.h>
56 #include <sys/mount.h>
57 #include <sys/vnode.h>
58 #include <sys/types.h>
59 #include <sys/ddi.h>
60 #include <sys/sunddi.h>
61 #include <sys/stream.h>
62 #include <sys/strsun.h>
63 #include <sys/time.h>
64 #include <sys/class.h>
65 #include <sys/disp.h>
66 #include <sys/cmn_err.h>
67 #include <sys/zone.h>
68 #include <sys/sdt.h>
69
70 #include <netsmb/smb_osdep.h>
71
72 #include <netsmb/smb.h>
73 #include <netsmb/smb2.h>
74 #include <netsmb/smb_conn.h>
75 #include <netsmb/smb_rq.h>
76 #include <netsmb/smb2_rq.h>
77 #include <netsmb/smb_subr.h>
78 #include <netsmb/smb_tran.h>
79 #include <netsmb/smb_trantcp.h>
80
81 /*
82 * SMB messages are up to 64K. Let's leave room for two.
83 * If we negotiate up to SMB2, increase these. XXX todo
84 */
85 static int smb_tcpsndbuf = 0x20000;
86 static int smb_tcprcvbuf = 0x20000;
87 static int smb_connect_timeout = 10; /* seconds */
88
89 static int smb1_iod_process(smb_vc_t *, mblk_t *);
90 static int smb2_iod_process(smb_vc_t *, mblk_t *);
91 static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
92 static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
93
94 /*
95 * This is set/cleared when smbfs loads/unloads
96 * No locks should be necessary, because smbfs
97 * can't unload until all the mounts are gone.
98 */
99 static smb_fscb_t *fscb;
100 void
101 smb_fscb_set(smb_fscb_t *cb)
102 {
103 fscb = cb;
104 }
105
106 static void
107 smb_iod_share_disconnected(smb_share_t *ssp)
108 {
109
110 smb_share_invalidate(ssp);
111
112 /*
113 * This is the only fscb hook smbfs currently uses.
114 * Replaces smbfs_dead() from Darwin.
115 */
116 if (fscb && fscb->fscb_disconn) {
117 fscb->fscb_disconn(ssp);
118 }
119 }
120
121 /*
122 * State changes are important and infrequent.
123 * Make them easily observable via dtrace.
124 */
125 void
126 smb_iod_newstate(struct smb_vc *vcp, int state)
127 {
128 vcp->vc_state = state;
129 }
130
131 /* Lock Held version of the next function. */
132 static inline void
133 smb_iod_rqprocessed_LH(
134 struct smb_rq *rqp,
135 int error,
144
145 static void
146 smb_iod_rqprocessed(
147 struct smb_rq *rqp,
148 int error,
149 int flags)
150 {
151
152 SMBRQ_LOCK(rqp);
153 smb_iod_rqprocessed_LH(rqp, error, flags);
154 SMBRQ_UNLOCK(rqp);
155 }
156
157 static void
158 smb_iod_invrq(struct smb_vc *vcp)
159 {
160 struct smb_rq *rqp;
161
162 /*
163 * Invalidate all outstanding requests for this connection
164 * Also wakeup iod_muxwant waiters.
165 */
166 rw_enter(&vcp->iod_rqlock, RW_READER);
167 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
168 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
169 }
170 rw_exit(&vcp->iod_rqlock);
171 cv_broadcast(&vcp->iod_muxwait);
172 }
173
174 /*
175 * Called by smb_vc_rele/smb_vc_kill on last ref, and by
176 * the driver close function if the IOD closes its minor.
177 * In those cases, the caller should be the IOD thread.
178 *
179 * Forcibly kill the connection.
180 */
181 void
182 smb_iod_disconnect(struct smb_vc *vcp)
183 {
184
185 /*
186 * Inform everyone of the state change.
187 */
188 SMB_VC_LOCK(vcp);
189 if (vcp->vc_state != SMBIOD_ST_DEAD) {
190 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
191 cv_broadcast(&vcp->vc_statechg);
192 }
193 SMB_VC_UNLOCK(vcp);
194
195 SMB_TRAN_DISCONNECT(vcp);
196 }
197
198 /*
199 * Send one request.
200 *
201 * SMB1 only
202 *
203 * Called by _addrq (for internal requests)
204 * and _sendall (via _addrq, _multirq, _waitrq)
205 * Errors are reported via the smb_rq, using:
206 * smb_iod_rqprocessed(rqp, ...)
207 */
208 static void
209 smb1_iod_sendrq(struct smb_rq *rqp)
210 {
211 struct smb_vc *vcp = rqp->sr_vc;
212 mblk_t *m;
213 int error;
214
215 ASSERT(vcp);
216 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
217 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
218
219 /*
220 * Internal requests are allowed in any state;
221 * otherwise should be active.
222 */
223 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
224 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
225 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
226 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
227 return;
228 }
229
230 /*
231 * Overwrite the SMB header with the assigned MID and
232 * (if we're signing) sign it.
233 */
234 smb_rq_fillhdr(rqp);
235 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236 smb_rq_sign(rqp);
237 }
238
239 /*
240 * The transport send consumes the message and we'd
241 * prefer to keep a copy, so dupmsg() before sending.
242 */
243 m = dupmsg(rqp->sr_rq.mb_top);
244 if (m == NULL) {
245 error = ENOBUFS;
246 goto fatal;
247 }
248
249 #ifdef DTRACE_PROBE2
250 DTRACE_PROBE2(iod_sendrq,
251 (smb_rq_t *), rqp, (mblk_t *), m);
252 #endif
253
254 error = SMB_TRAN_SEND(vcp, m);
255 m = 0; /* consumed by SEND */
256
257 rqp->sr_lerror = error;
258 if (error == 0) {
259 SMBRQ_LOCK(rqp);
260 rqp->sr_flags |= SMBR_SENT;
261 rqp->sr_state = SMBRQ_SENT;
262 SMBRQ_UNLOCK(rqp);
263 return;
264 }
265 /*
266 * Transport send returned an error.
267 * Was it a fatal one?
268 */
269 if (SMB_TRAN_FATAL(vcp, error)) {
270 /*
271 * No further attempts should be made
272 */
273 fatal:
274 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
275 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
276 return;
277 }
278 }
279
280 /*
281 * Send one request.
282 *
283 * SMB2 only
284 *
285 * Called by _addrq (for internal requests)
286 * and _sendall (via _addrq, _multirq, _waitrq)
287 * Errors are reported via the smb_rq, using:
288 * smb_iod_rqprocessed(rqp, ...)
289 */
290 static void
291 smb2_iod_sendrq(struct smb_rq *rqp)
292 {
293 struct smb_rq *c_rqp; /* compound */
294 struct smb_vc *vcp = rqp->sr_vc;
295 mblk_t *top_m;
296 mblk_t *cur_m;
297 int error;
298
299 ASSERT(vcp);
300 ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
301 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
302
303 /*
304 * Internal requests are allowed in any state;
305 * otherwise should be active.
306 */
307 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
308 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
309 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
310 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
311 return;
312 }
313
314 /*
315 * Overwrite the SMB header with the assigned MID and
316 * (if we're signing) sign it. If there are compounded
317 * requests after the top one, do those too.
318 */
319 smb2_rq_fillhdr(rqp);
320 if (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
321 smb2_rq_sign(rqp);
322 }
323 c_rqp = rqp->sr2_compound_next;
324 while (c_rqp != NULL) {
325 smb2_rq_fillhdr(c_rqp);
326 if (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
327 smb2_rq_sign(c_rqp);
328 }
329 c_rqp = c_rqp->sr2_compound_next;
330 }
331
332 /*
333 * The transport send consumes the message and we'd
334 * prefer to keep a copy, so dupmsg() before sending.
335 * We also need this to build the compound message
336 * that we'll actually send. The message offset at
337 * the start of each compounded message should be
338 * eight-byte aligned. The caller preparing the
339 * compounded request has to take care of that
340 * before we get here and sign messages etc.
341 */
342 top_m = dupmsg(rqp->sr_rq.mb_top);
343 if (top_m == NULL) {
344 error = ENOBUFS;
345 goto fatal;
346 }
347 c_rqp = rqp->sr2_compound_next;
348 while (c_rqp != NULL) {
349 size_t len = msgdsize(top_m);
350 ASSERT((len & 7) == 0);
351 cur_m = dupmsg(c_rqp->sr_rq.mb_top);
352 if (cur_m == NULL) {
353 freemsg(top_m);
354 error = ENOBUFS;
355 goto fatal;
356 }
357 linkb(top_m, cur_m);
358 }
359
360 DTRACE_PROBE2(iod_sendrq,
361 (smb_rq_t *), rqp, (mblk_t *), top_m);
362
363 error = SMB_TRAN_SEND(vcp, top_m);
364 top_m = 0; /* consumed by SEND */
365
366 rqp->sr_lerror = error;
367 if (error == 0) {
368 SMBRQ_LOCK(rqp);
369 rqp->sr_flags |= SMBR_SENT;
370 rqp->sr_state = SMBRQ_SENT;
371 SMBRQ_UNLOCK(rqp);
372 return;
373 }
374 /*
375 * Transport send returned an error.
376 * Was it a fatal one?
377 */
378 if (SMB_TRAN_FATAL(vcp, error)) {
379 /*
380 * No further attempts should be made
381 */
382 fatal:
383 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
384 smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
385 return;
386 }
387 }
388
389 /*
390 * Receive one NetBIOS (or NBT over TCP) message. If none have arrived,
391 * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
392 * none have arrived, return ETIME.
393 */
394 static int
395 smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
396 {
397 mblk_t *m;
398 int error;
399
400 top:
401 m = NULL;
402 error = SMB_TRAN_RECV(vcp, &m);
403 if (error == EAGAIN)
404 goto top;
405 if (error)
406 return (error);
407 ASSERT(m != NULL);
408
409 m = m_pullup(m, 4);
410 if (m == NULL) {
411 return (ENOSR);
412 }
413
414 *mpp = m;
415 return (0);
416 }
417
418 /*
419 * How long should we keep around an unused VC (connection)?
420 * There's usually a good chance connections will be reused,
421 * so the default is to keep such connections for 5 min.
422 */
423 #ifdef DEBUG
424 int smb_iod_idle_keep_time = 60; /* seconds */
425 #else
426 int smb_iod_idle_keep_time = 300; /* seconds */
427 #endif
428
429 /*
430 * Process incoming packets
431 *
432 * This is the "reader" loop, run by the IOD thread. Normally we're in
433 * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
434 * other states with poll==TRUE
435 *
436 * A non-zero error return here causes the IOD work loop to terminate.
437 */
438 int
439 smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
440 {
441 mblk_t *m;
442 int error = 0;
443 int etime_idle = 0; /* How many 15 sec. "ticks" idle. */
444 int etime_count = 0; /* ... and when we have requests. */
445
446 for (;;) {
447 /*
448 * Check whether someone "killed" this VC,
449 * or is asking the IOD to terminate.
450 */
451 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
452 SMBIODEBUG("SHUTDOWN set\n");
453 /* This IOD thread will terminate. */
454 SMB_VC_LOCK(vcp);
455 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
456 cv_broadcast(&vcp->vc_statechg);
457 SMB_VC_UNLOCK(vcp);
458 error = EINTR;
459 break;
460 }
461
462 m = NULL;
463 error = smb_iod_recvmsg(vcp, &m);
464
465 /*
466 * Internal requests (reconnecting) call this in a loop
467 * (with poll==TRUE) until the request completes.
468 */
469 if (error == ETIME && poll)
470 break;
471
472 if (error == ETIME &&
473 vcp->iod_rqlist.tqh_first != NULL) {
474
475 /*
476 * Nothing received and requests waiting.
477 * Increment etime_count. If we were idle,
478 * skip the 1st tick, because we started
479 * waiting before there were any requests.
480 */
481 if (etime_idle != 0) {
482 etime_idle = 0;
483 } else if (etime_count < INT16_MAX) {
484 etime_count++;
485 }
486
487 /*
488 * ETIME and requests in the queue.
489 * The first time (at 15 sec.)
490 * Log an error (just once).
491 */
492 if (etime_count > 0 &&
493 vcp->iod_noresp == B_FALSE) {
494 vcp->iod_noresp = B_TRUE;
495 zprintf(vcp->vc_zoneid,
496 "SMB server %s not responding\n",
497 vcp->vc_srvname);
498 }
499 /*
500 * At 30 sec. try sending an echo, which
501 * should cause some response.
502 */
503 if (etime_count == 2) {
504 SMBIODEBUG("send echo\n");
505 (void) smb_iod_send_echo(vcp, CRED());
506 }
507 /*
508 * At 45 sec. give up on the connection
509 * and try to reconnect.
510 */
511 if (etime_count == 3) {
512 SMB_VC_LOCK(vcp);
513 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
514 SMB_VC_UNLOCK(vcp);
515 SMB_TRAN_DISCONNECT(vcp);
516 break;
517 }
518 continue;
519 } /* ETIME and requests in the queue */
520
521 if (error == ETIME) {
522 /*
523 * Nothing received and no active requests.
524 *
525 * If we've received nothing from the server for
526 * smb_iod_idle_keep_time seconds, and the IOD
527 * thread holds the last reference to this VC,
528 * move to state IDLE and drop the TCP session.
529 * The IDLE handler will destroy the VC unless
530 * vc_state goes to RECONNECT before then.
531 */
532 etime_count = 0;
533 if (etime_idle < INT16_MAX)
534 etime_idle++;
535 if ((etime_idle * SMB_NBTIMO) <
536 smb_iod_idle_keep_time)
537 continue;
538 SMB_VC_LOCK(vcp);
539 if (vcp->vc_co.co_usecount == 1) {
540 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
541 SMB_VC_UNLOCK(vcp);
542 SMBIODEBUG("logoff & disconnect\n");
543 (void) smb_iod_logoff(vcp, CRED());
544 SMB_TRAN_DISCONNECT(vcp);
545 error = 0;
546 break;
547 }
548 SMB_VC_UNLOCK(vcp);
549 continue;
550 } /* error == ETIME */
551
552 if (error) {
553 /*
554 * The recv above returned an error indicating
555 * that our TCP session is no longer usable.
556 * Disconnect the session and get ready to
557 * reconnect. If we have pending requests,
558 * move to state reconnect immediately;
559 * otherwise move to state IDLE until a
560 * request is issued on this VC.
561 */
562 SMB_VC_LOCK(vcp);
563 if (vcp->iod_rqlist.tqh_first != NULL)
564 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
565 else
566 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
567 cv_broadcast(&vcp->vc_statechg);
568 SMB_VC_UNLOCK(vcp);
569 SMB_TRAN_DISCONNECT(vcp);
570 break;
571 }
572
573 /*
574 * Received something. Yea!
575 */
576 etime_count = 0;
577 etime_idle = 0;
578
579 /*
580 * If we just completed a reconnect after logging
581 * "SMB server %s not responding" then log OK now.
582 */
583 if (vcp->iod_noresp) {
584 vcp->iod_noresp = B_FALSE;
585 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
586 vcp->vc_srvname);
587 }
588
589 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
590 error = smb2_iod_process(vcp, m);
591 } else {
592 error = smb1_iod_process(vcp, m);
593 }
594
595 /*
596 * Reconnect calls this in a loop with poll=TRUE
597 * We've received a response, so break now.
598 */
599 if (poll) {
600 error = 0;
601 break;
602 }
603 }
604
605 return (error);
606 }
607
608 /*
609 * Have what should be an SMB1 reply. Check and parse the header,
610 * then use the message ID to find the request this belongs to and
611 * post it on that request.
612 *
613 * Returns an error if the reader should give up.
614 * To be safe, error if we read garbage.
615 */
616 static int
617 smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
618 {
619 struct mdchain md;
620 struct smb_rq *rqp;
621 uint8_t cmd, sig[4];
622 uint16_t mid;
623 int err, skip;
624
625 m = m_pullup(m, SMB_HDRLEN);
626 if (m == NULL)
627 return (ENOMEM);
628
629 /*
630 * Note: Intentionally do NOT md_done(&md)
631 * because that would free the message and
632 * we just want to peek here.
633 */
634 md_initm(&md, m);
635
636 /*
637 * Check the SMB header version and get the MID.
638 *
639 * The header version should be SMB1 except when we're
640 * doing SMB1-to-SMB2 negotiation, in which case we may
641 * see an SMB2 header with message ID=0 (only allowed in
642 * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
643 */
644 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
645 if (err)
646 return (err);
647 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
648 goto bad_hdr;
649 }
650 switch (sig[0]) {
651 case SMB_HDR_V1: /* SMB1 */
652 md_get_uint8(&md, &cmd);
653 /* Skip to and get the MID. At offset 5 now. */
654 skip = SMB_HDR_OFF_MID - 5;
655 md_get_mem(&md, NULL, skip, MB_MSYSTEM);
656 err = md_get_uint16le(&md, &mid);
657 if (err)
658 return (err);
659 break;
660 case SMB_HDR_V2: /* SMB2+ */
661 if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
662 /*
663 * No need to look, can only be
664 * MID=0, cmd=negotiate
665 */
666 cmd = SMB_COM_NEGOTIATE;
667 mid = 0;
668 break;
669 }
670 /* FALLTHROUGH */
671 bad_hdr:
672 default:
673 SMBIODEBUG("Bad SMB hdr\n");
674 m_freem(m);
675 return (EPROTO);
676 }
677
678 /*
679 * Find the reqeuest and post the reply
680 */
681 rw_enter(&vcp->iod_rqlock, RW_READER);
682 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
683
684 if (rqp->sr_mid != mid)
685 continue;
686
687 DTRACE_PROBE2(iod_post_reply,
688 (smb_rq_t *), rqp, (mblk_t *), m);
689 m_dumpm(m);
690
691 SMBRQ_LOCK(rqp);
692 if (rqp->sr_rp.md_top == NULL) {
693 md_initm(&rqp->sr_rp, m);
694 } else {
695 if (rqp->sr_flags & SMBR_MULTIPACKET) {
696 md_append_record(&rqp->sr_rp, m);
697 } else {
698 SMBRQ_UNLOCK(rqp);
699 rqp = NULL;
700 break;
701 }
702 }
703 smb_iod_rqprocessed_LH(rqp, 0, 0);
704 SMBRQ_UNLOCK(rqp);
705 break;
706 }
707 rw_exit(&vcp->iod_rqlock);
708
709 if (rqp == NULL) {
710 if (cmd != SMB_COM_ECHO) {
711 SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
712 }
713 m_freem(m);
714 /*
715 * Keep going. It's possible this reply came
716 * after the request timed out and went away.
717 */
718 }
719 return (0);
720 }
721
722 /*
723 * Have what should be an SMB2 reply. Check and parse the header,
724 * then use the message ID to find the request this belongs to and
725 * post it on that request.
726 *
727 * We also want to apply any credit grant in this reply now,
728 * rather than waiting for the owner to wake up.
729 */
730 static int
731 smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
732 {
733 struct mdchain md;
734 struct smb_rq *rqp;
735 uint8_t sig[4];
736 mblk_t *next_m = NULL;
737 uint64_t message_id, async_id;
738 uint32_t flags, next_cmd_off, status;
739 uint16_t command, credits_granted;
740 int err;
741
742 top:
743 m = m_pullup(m, SMB2_HDRLEN);
744 if (m == NULL)
745 return (ENOMEM);
746
747 /*
748 * Note: Intentionally do NOT md_done(&md)
749 * because that would free the message and
750 * we just want to peek here.
751 */
752 md_initm(&md, m);
753
754 /*
755 * Check the SMB header. Must be SMB2
756 * (and later, could be SMB3 encrypted)
757 */
758 err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
759 if (err)
760 return (err);
761 if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
762 goto bad_hdr;
763 }
764 switch (sig[0]) {
765 case SMB_HDR_V2:
766 break;
767 case SMB_HDR_V3E:
768 /*
769 * Todo: If encryption enabled, decrypt the message
770 * and restart processing on the cleartext.
771 */
772 /* FALLTHROUGH */
773 bad_hdr:
774 default:
775 SMBIODEBUG("Bad SMB2 hdr\n");
776 m_freem(m);
777 return (EPROTO);
778 }
779
780 /*
781 * Parse the rest of the SMB2 header,
782 * skipping what we don't need.
783 */
784 md_get_uint32le(&md, NULL); /* length, credit_charge */
785 md_get_uint32le(&md, &status);
786 md_get_uint16le(&md, &command);
787 md_get_uint16le(&md, &credits_granted);
788 md_get_uint32le(&md, &flags);
789 md_get_uint32le(&md, &next_cmd_off);
790 md_get_uint64le(&md, &message_id);
791 if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
792 md_get_uint64le(&md, &async_id);
793 } else {
794 /* PID, TID (not needed) */
795 async_id = 0;
796 }
797
798 /*
799 * If this is a compound reply, split it.
800 * Next must be 8-byte aligned.
801 */
802 if (next_cmd_off != 0) {
803 if ((next_cmd_off & 7) != 0)
804 SMBIODEBUG("Misaligned next cmd\n");
805 else
806 next_m = m_split(m, next_cmd_off, 1);
807 }
808
809 /*
810 * Apply the credit grant
811 */
812 rw_enter(&vcp->iod_rqlock, RW_WRITER);
813 vcp->vc2_limit_message_id += credits_granted;
814
815 /*
816 * Find the reqeuest and post the reply
817 */
818 rw_downgrade(&vcp->iod_rqlock);
819 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
820
821 if (rqp->sr2_messageid != message_id)
822 continue;
823
824 DTRACE_PROBE2(iod_post_reply,
825 (smb_rq_t *), rqp, (mblk_t *), m);
826 m_dumpm(m);
827
828 /*
829 * If this is an interim response, just save the
830 * async ID but don't wakup the request.
831 * Don't need SMBRQ_LOCK for this.
832 */
833 if (status == NT_STATUS_PENDING && async_id != 0) {
834 rqp->sr2_rspasyncid = async_id;
835 m_freem(m);
836 break;
837 }
838
839 SMBRQ_LOCK(rqp);
840 if (rqp->sr_rp.md_top == NULL) {
841 md_initm(&rqp->sr_rp, m);
842 } else {
843 SMBRQ_UNLOCK(rqp);
844 rqp = NULL;
845 break;
846 }
847 smb_iod_rqprocessed_LH(rqp, 0, 0);
848 SMBRQ_UNLOCK(rqp);
849 break;
850 }
851 rw_exit(&vcp->iod_rqlock);
852
853 if (rqp == NULL) {
854 if (command != SMB2_ECHO) {
855 SMBSDEBUG("drop resp: MID %lld\n",
856 (long long)message_id);
857 }
858 m_freem(m);
859 /*
860 * Keep going. It's possible this reply came
861 * after the request timed out and went away.
862 */
863 }
864
865 /*
866 * If we split a compound reply, continue with the
867 * next part of the compound.
868 */
869 if (next_m != NULL) {
870 m = next_m;
871 goto top;
872 }
873
874 return (0);
875 }
876
877 /*
878 * The IOD receiver thread has requests pending and
879 * has not received anything in a while. Try to
880 * send an SMB echo request. It's tricky to do a
881 * send from the IOD thread because we can't block.
882 *
883 * Using tmo=SMBNOREPLYWAIT in the request
884 * so smb_rq_reply will skip smb_iod_waitrq.
885 * The smb_smb_echo call uses SMBR_INTERNAL
886 * to avoid calling smb_iod_sendall().
887 */
888 static int
889 smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
890 {
891 smb_cred_t scred;
892 int err, tmo = SMBNOREPLYWAIT;
893
894 ASSERT(vcp->iod_thr == curthread);
895
896 smb_credinit(&scred, cr);
897 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
898 err = smb2_smb_echo(vcp, &scred, tmo);
899 } else {
900 err = smb_smb_echo(vcp, &scred, tmo);
901 }
902 smb_credrele(&scred);
903 return (err);
904 }
905
906 /*
907 * Helper for smb1_iod_addrq, smb2_iod_addrq
908 * Returns zero if interrupted, else 1.
909 */
910 static int
911 smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
912 {
913 int rc;
914
915 SMB_VC_LOCK(vcp);
916 vcp->iod_muxwant++;
917 if (sig_ok) {
918 rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
919 } else {
920 cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
921 rc = 1;
922 }
923 vcp->iod_muxwant--;
924 SMB_VC_UNLOCK(vcp);
925
926 return (rc);
927 }
928
929 /*
930 * Place request in the queue, and send it.
931 * Called with no locks held.
932 *
933 * Called for SMB1 only
934 *
935 * The logic for how we limit active requests differs between
936 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
937 */
938 int
939 smb1_iod_addrq(struct smb_rq *rqp)
940 {
941 struct smb_vc *vcp = rqp->sr_vc;
942 uint16_t need;
943 boolean_t sig_ok =
944 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
945
946 ASSERT(rqp->sr_cred);
947 ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
948
949 rqp->sr_owner = curthread;
950
951 rw_enter(&vcp->iod_rqlock, RW_WRITER);
952
953 recheck:
954 /*
955 * Internal requests can be added in any state,
956 * but normal requests only in state active.
957 */
958 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
959 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
960 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
961 rw_exit(&vcp->iod_rqlock);
962 return (ENOTCONN);
963 }
964
965 /*
966 * If we're at the limit of active requests, block until
967 * enough requests complete so we can make ours active.
968 * Wakeup in smb_iod_removerq().
969 *
970 * Normal callers leave one slot free, so internal
971 * callers can have the last slot if needed.
972 */
973 need = 1;
974 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
975 need++;
976 if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
977 rw_exit(&vcp->iod_rqlock);
978 if (rqp->sr_flags & SMBR_INTERNAL)
979 return (EBUSY);
980 if (smb_iod_muxwait(vcp, sig_ok) == 0)
981 return (EINTR);
982 rw_enter(&vcp->iod_rqlock, RW_WRITER);
983 goto recheck;
984 }
985
986 /*
987 * Add this request to the active list and send it.
988 * For SMB2 we may have a sequence of compounded
989 * requests, in which case we must add them all.
990 * They're sent as a compound in smb2_iod_sendrq.
991 */
992 rqp->sr_mid = vcp->vc_next_mid++;
993 /* If signing, set the signing sequence numbers. */
994 if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
995 SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
996 rqp->sr_seqno = vcp->vc_next_seq++;
997 rqp->sr_rseqno = vcp->vc_next_seq++;
998 }
999 vcp->iod_muxcnt++;
1000 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1001 smb1_iod_sendrq(rqp);
1002
1003 rw_exit(&vcp->iod_rqlock);
1004 return (0);
1005 }
1006
1007 /*
1008 * Place request in the queue, and send it.
1009 * Called with no locks held.
1010 *
1011 * Called for SMB2 only.
1012 *
1013 * With SMB2 we have a range of valid message IDs, and we may
1014 * only send requests when we can assign a message ID within
1015 * the valid range. We may need to wait here for some active
1016 * request to finish (and update vc2_limit_message_id) before
1017 * we can get message IDs for our new request(s). Another
1018 * difference is that the request sequence we're waiting to
1019 * add here may require multipe message IDs, either due to
1020 * either compounding or multi-credit requests. Therefore
1021 * we need to wait for the availibility of how ever many
1022 * message IDs are required by our request sequence.
1023 */
1024 int
1025 smb2_iod_addrq(struct smb_rq *rqp)
1026 {
1027 struct smb_vc *vcp = rqp->sr_vc;
1028 struct smb_rq *c_rqp; /* compound req */
1029 uint16_t charge;
1030 boolean_t sig_ok =
1031 (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
1032
1033 ASSERT(rqp->sr_cred != NULL);
1034 ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
1035
1036 /*
1037 * Figure out the credit charges
1038 * No multi-credit messages yet.
1039 */
1040 rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
1041 c_rqp = rqp->sr2_compound_next;
1042 while (c_rqp != NULL) {
1043 rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
1044 c_rqp = c_rqp->sr2_compound_next;
1045 }
1046
1047 /*
1048 * Internal request must not be compounded
1049 * and should use exactly one credit.
1050 */
1051 if (rqp->sr_flags & SMBR_INTERNAL) {
1052 if (rqp->sr2_compound_next != NULL) {
1053 ASSERT(0);
1054 return (EINVAL);
1055 }
1056 }
1057
1058 rqp->sr_owner = curthread;
1059
1060 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1061
1062 recheck:
1063 /*
1064 * Internal requests can be added in any state,
1065 * but normal requests only in state active.
1066 */
1067 if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1068 vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1069 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1070 rw_exit(&vcp->iod_rqlock);
1071 return (ENOTCONN);
1072 }
1073
1074 /*
1075 * If we're at the limit of active requests, block until
1076 * enough requests complete so we can make ours active.
1077 * Wakeup in smb_iod_removerq().
1078 *
1079 * Normal callers leave one slot free, so internal
1080 * callers can have the last slot if needed.
1081 */
1082 charge = rqp->sr2_totalcreditcharge;
1083 if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1084 charge++;
1085 if ((vcp->vc2_next_message_id + charge) >
1086 vcp->vc2_limit_message_id) {
1087 rw_exit(&vcp->iod_rqlock);
1088 if (rqp->sr_flags & SMBR_INTERNAL)
1089 return (EBUSY);
1090 if (smb_iod_muxwait(vcp, sig_ok) == 0)
1091 return (EINTR);
1092 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1093 goto recheck;
1094 }
1095
1096 /*
1097 * Add this request to the active list and send it.
1098 * For SMB2 we may have a sequence of compounded
1099 * requests, in which case we must add them all.
1100 * They're sent as a compound in smb2_iod_sendrq.
1101 */
1102
1103 rqp->sr2_messageid = vcp->vc2_next_message_id;
1104 vcp->vc2_next_message_id += rqp->sr2_creditcharge;
1105 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1106
1107 c_rqp = rqp->sr2_compound_next;
1108 while (c_rqp != NULL) {
1109 c_rqp->sr2_messageid = vcp->vc2_next_message_id;
1110 vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
1111 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
1112 c_rqp = c_rqp->sr2_compound_next;
1113 }
1114 smb2_iod_sendrq(rqp);
1115
1116 rw_exit(&vcp->iod_rqlock);
1117 return (0);
1118 }
1119
1120 /*
1121 * Mark an SMBR_MULTIPACKET request as
1122 * needing another send. Similar to the
1123 * "normal" part of smb1_iod_addrq.
1124 * Only used by SMB1
1125 */
1126 int
1127 smb1_iod_multirq(struct smb_rq *rqp)
1128 {
1129 struct smb_vc *vcp = rqp->sr_vc;
1130
1131 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
1132
1133 if (vcp->vc_flags & SMBV_SMB2) {
1134 ASSERT("!SMB2?");
1135 return (EINVAL);
1136 }
1137
1138 if (rqp->sr_flags & SMBR_INTERNAL)
1139 return (EINVAL);
1140
1141 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1142 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1143 return (ENOTCONN);
1144 }
1145
1146 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1147
1148 /* Already on iod_rqlist, just reset state. */
1149 rqp->sr_state = SMBRQ_NOTSENT;
1150 smb1_iod_sendrq(rqp);
1151
1152 rw_exit(&vcp->iod_rqlock);
1153
1154 return (0);
1155 }
1156
1157 /*
1158 * Remove a request from the active list, and
1159 * wake up requests waiting to go active.
1160 *
1161 * Shared by SMB1 + SMB2
1162 *
1163 * The logic for how we limit active requests differs between
1164 * SMB1 and SMB2. With SMB1 it's a simple counter ioc_muxcnt.
1165 * With SMB2 we have a range of valid message IDs, and when we
1166 * retire the oldest request we need to keep track of what is
1167 * now the oldest message ID. In both cases, after we take a
1168 * request out of the list here, we should be able to wake up
1169 * a request waiting to get in the active list.
1170 */
1171 void
1172 smb_iod_removerq(struct smb_rq *rqp)
1173 {
1174 struct smb_rq *rqp2;
1175 struct smb_vc *vcp = rqp->sr_vc;
1176 boolean_t was_head = B_FALSE;
1177
1178 rw_enter(&vcp->iod_rqlock, RW_WRITER);
1179
1180 #ifdef QUEUEDEBUG
1181 /*
1182 * Make sure we have not already removed it.
1183 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
1184 * XXX: Don't like the constant 1 here...
1185 */
1186 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
1187 #endif
1188
1189 if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
1190 was_head = B_TRUE;
1191 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
1192 if (vcp->vc_flags & SMBV_SMB2) {
1193 rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
1194 if (was_head && rqp2 != NULL) {
1195 /* Do we still need this? */
1196 vcp->vc2_oldest_message_id =
1197 rqp2->sr2_messageid;
1198 }
1199 } else {
1200 ASSERT(vcp->iod_muxcnt > 0);
1201 vcp->iod_muxcnt--;
1202 }
1203
1204 rw_exit(&vcp->iod_rqlock);
1205
1206 /*
1207 * If there are requests waiting for "mux" slots,
1208 * wake one.
1209 */
1210 SMB_VC_LOCK(vcp);
1211 if (vcp->iod_muxwant != 0)
1212 cv_signal(&vcp->iod_muxwait);
1213 SMB_VC_UNLOCK(vcp);
1214 }
1215
1216 /*
1217 * Wait for a request to complete.
1218 */
1219 int
1220 smb_iod_waitrq(struct smb_rq *rqp)
1221 {
1222 struct smb_vc *vcp = rqp->sr_vc;
1223 clock_t tr, tmo1, tmo2;
1224 int error;
1225
1226 if (rqp->sr_flags & SMBR_INTERNAL) {
1227 /* XXX - Do we ever take this path now? */
1228 return (smb_iod_waitrq_int(rqp));
1229 }
1230
1231 /*
1232 * Make sure this is NOT the IOD thread,
1233 * or the wait below will stop the reader.
1234 */
1235 ASSERT(curthread != vcp->iod_thr);
1236
1237 SMBRQ_LOCK(rqp);
1238
1239 /*
1240 * The request has been sent. Now wait for the response,
1241 * with the timeout specified for this request.
1242 * Compute all the deadlines now, so we effectively
1243 * start the timer(s) after the request is sent.
1244 */
1245 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1246 tmo1 = SEC_TO_TICK(smb_timo_notice);
1247 else
1248 tmo1 = 0;
1249 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
1250
1251 /*
1252 * As above, we don't want to allow interrupt for some
1253 * requests like open, because we could miss a succesful
1254 * response and therefore "leak" a FID. Such requests
1255 * are marked SMBR_NOINTR_RECV to prevent that.
1256 *
1257 * If "slow server" warnings are enabled, wait first
1258 * for the "notice" timeout, and warn if expired.
1259 */
1260 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1261 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1262 tr = cv_reltimedwait(&rqp->sr_cond,
1263 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1264 else
1265 tr = cv_reltimedwait_sig(&rqp->sr_cond,
1266 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
1267 if (tr == 0) {
1268 error = EINTR;
1269 goto out;
1270 }
1271 if (tr < 0) {
1272 DTRACE_PROBE1(smb_iod_waitrq1,
1273 (smb_rq_t *), rqp);
1274 }
1275 }
1276
1277 /*
1278 * Keep waiting until tmo2 is expired.
1279 */
1280 while (rqp->sr_rpgen == rqp->sr_rplast) {
1281 if (rqp->sr_flags & SMBR_NOINTR_RECV)
1282 tr = cv_timedwait(&rqp->sr_cond,
1283 &rqp->sr_lock, tmo2);
1284 else
1285 tr = cv_timedwait_sig(&rqp->sr_cond,
1286 &rqp->sr_lock, tmo2);
1287 if (tr == 0) {
1288 error = EINTR;
1289 goto out;
1290 }
1291 if (tr < 0) {
1292 DTRACE_PROBE1(smb_iod_waitrq2,
1293 (smb_rq_t *), rqp);
1294 error = ETIME;
1295 goto out;
1296 }
1297 /* got wakeup */
1298 }
1299 error = rqp->sr_lerror;
1300 rqp->sr_rplast++;
1301
1302 out:
1303 SMBRQ_UNLOCK(rqp);
1304
1305 /*
1306 * MULTIPACKET request must stay in the list.
1307 * They may need additional responses.
1308 */
1309 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1310 smb_iod_removerq(rqp);
1311
1312 return (error);
1313 }
1314
1315 /*
1316 * Internal variant of smb_iod_waitrq(), for use in
1317 * requests run by the IOD (reader) thread itself.
1318 * Block only long enough to receive one reply.
1319 */
1320 int
1321 smb_iod_waitrq_int(struct smb_rq *rqp)
1322 {
1323 struct smb_vc *vcp = rqp->sr_vc;
1324 int timeleft = rqp->sr_timo;
1325 int error;
1326
1327 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
1328 again:
1329 error = smb_iod_recvall(vcp, B_TRUE);
1330 if (error == ETIME) {
1331 /* We waited SMB_NBTIMO sec. */
1332 timeleft -= SMB_NBTIMO;
1333 if (timeleft > 0)
1334 goto again;
1335 }
1336
1337 smb_iod_removerq(rqp);
1338 if (rqp->sr_state != SMBRQ_NOTIFIED)
1339 error = ETIME;
1340
1341 return (error);
1342 }
1343
1344 /*
1345 * Shutdown all outstanding I/O requests on the specified share with
1346 * ENXIO; used when unmounting a share. (There shouldn't be any for a
1347 * non-forced unmount; if this is a forced unmount, we have to shutdown
1348 * the requests as part of the unmount process.)
1349 */
1350 void
1351 smb_iod_shutdown_share(struct smb_share *ssp)
1352 {
1353 struct smb_vc *vcp = SSTOVC(ssp);
1354 struct smb_rq *rqp;
1355
1356 /*
1357 * Loop through the list of requests and shutdown the ones
1358 * that are for the specified share.
1359 */
1360 rw_enter(&vcp->iod_rqlock, RW_READER);
1361 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1362 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1363 smb_iod_rqprocessed(rqp, EIO, 0);
1364 }
1365 rw_exit(&vcp->iod_rqlock);
1366 }
1367
1368 /*
1369 * Ioctl functions called by the user-level I/O Deamon (IOD)
1370 * to bring up and service a connection to some SMB server.
1371 */
1372
1373 /*
1374 * Handle ioctl SMBIOC_IOD_CONNECT
1375 */
1376 int
1377 nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
1378 {
1379 int err, val;
1380
1381 ASSERT(vcp->iod_thr == curthread);
1382
1383 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1384 cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
1385 return (EINVAL);
1386 }
1387
1388 /*
1389 * Putting a TLI endpoint back in the right state for a new
1390 * connection is a bit tricky. In theory, this could be:
1391 * SMB_TRAN_DISCONNECT(vcp);
1392 * SMB_TRAN_UNBIND(vcp);
1393 * but that method often results in TOUTSTATE errors.
1394 * It's easier to just close it and open a new endpoint.
1395 */
1396 SMB_VC_LOCK(vcp);
1397 if (vcp->vc_tdata)
1398 SMB_TRAN_DONE(vcp);
1399 err = SMB_TRAN_CREATE(vcp, cr);
1400 SMB_VC_UNLOCK(vcp);
1401 if (err != 0)
1402 return (err);
1403
1404 /*
1405 * Set various options on this endpoint.
1406 * Keep going in spite of errors.
1407 */
1408 val = smb_tcpsndbuf;
1409 err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
1410 if (err != 0) {
1411 cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
1412 }
1413 val = smb_tcprcvbuf;
1414 err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
1415 if (err != 0) {
1416 cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
1417 }
1418 val = 1;
1419 err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
1420 if (err != 0) {
1421 cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
1422 }
1423 val = 1;
1424 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
1425 if (err != 0) {
1426 cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY, err=%d", err);
1427 }
1428 val = smb_connect_timeout * 1000;
1429 err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
1430 if (err != 0) {
1431 cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo, err=%d", err);
1432 }
1433
1434 /*
1435 * Bind and connect
1436 */
1437 err = SMB_TRAN_BIND(vcp, NULL);
1438 if (err != 0) {
1439 cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
1440 /* Continue on and try connect. */
1441 }
1442 err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
1443 /*
1444 * No cmn_err here, as connect failures are normal, i.e.
1445 * when a server has multiple addresses and only some are
1446 * routed for us. (libsmbfs tries them all)
1447 */
1448 if (err == 0) {
1449 SMB_VC_LOCK(vcp);
1450 smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
1451 SMB_VC_UNLOCK(vcp);
1452 } /* else stay in state reconnect */
1453
1454 return (err);
1455 }
1456
1457 /*
1458 * Handle ioctl SMBIOC_IOD_NEGOTIATE
1459 * Do the whole SMB1/SMB2 negotiate
1460 *
1461 * This is where we send our first request to the server.
1462 * If this is the first time we're talking to this server,
1463 * (meaning not a reconnect) then we don't know whether
1464 * the server supports SMB2, so we need to use the weird
1465 * SMB1-to-SMB2 negotiation. That's where we send an SMB1
1466 * negotiate including dialect "SMB 2.???" and if the
1467 * server supports SMB2 we get an SMB2 reply -- Yes, an
1468 * SMB2 reply to an SMB1 request. A strange protocol...
1469 *
1470 * If on the other hand we already know the server supports
1471 * SMB2 (because this is a reconnect) or if the client side
1472 * has disabled SMB1 entirely, we'll skip the SMB1 part.
1473 */
1474 int
1475 nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
1476 {
1477 struct smb_sopt *sv = &vcp->vc_sopt;
1478 smb_cred_t scred;
1479 int err = 0;
1480
1481 ASSERT(vcp->iod_thr == curthread);
1482
1483 smb_credinit(&scred, cr);
1484
1485 if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
1486 cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1487 err = EINVAL;
1488 goto out;
1489 }
1490
1491 if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
1492 err = EINVAL;
1493 goto out;
1494 }
1495
1496 /*
1497 * (Re)init negotiated values
1498 */
1499 bzero(sv, sizeof (*sv));
1500 vcp->vc2_next_message_id = 0;
1501 vcp->vc2_limit_message_id = 1;
1502 vcp->vc2_session_id = 0;
1503 vcp->vc_next_seq = 0;
1504
1505 /*
1506 * If this was reconnect, get rid of the old MAC key
1507 * and session key.
1508 */
1509 SMB_VC_LOCK(vcp);
1510 if (vcp->vc_mackey != NULL) {
1511 kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
1512 vcp->vc_mackey = NULL;
1513 vcp->vc_mackeylen = 0;
1514 }
1515 if (vcp->vc_ssnkey != NULL) {
1516 kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
1517 vcp->vc_ssnkey = NULL;
1518 vcp->vc_ssnkeylen = 0;
1519 }
1520 SMB_VC_UNLOCK(vcp);
1521
1522 /*
1523 * If this is not an SMB2 reconect (SMBV_SMB2 not set),
1524 * and if SMB1 is enabled, do SMB1 neogotiate. Then
1525 * if either SMB1-to-SMB2 negotiate tells us we should
1526 * switch to SMB2, or the local configuration has
1527 * disabled SMB1, set the SMBV_SMB2 flag.
1528 *
1529 * Note that vc_maxver is handled in smb_smb_negotiate
1530 * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
1531 * the local configuration disables SMB2, and therefore
1532 * we won't set the SMBV_SMB2 flag.
1533 */
1534 if ((vcp->vc_flags & SMBV_SMB2) == 0) {
1535 if (vcp->vc_minver < SMB2_DIALECT_BASE) {
1536 /*
1537 * SMB1 is enabled
1538 */
1539 err = smb_smb_negotiate(vcp, &scred);
1540 if (err != 0)
1541 goto out;
1542 }
1543 /*
1544 * If SMB1-to-SMB2 negotiate told us we should
1545 * switch to SMB2, or if the local configuration
1546 * disables SMB1, set the SMB2 flag.
1547 */
1548 if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
1549 vcp->vc_minver >= SMB2_DIALECT_BASE) {
1550 /*
1551 * Switch this VC to SMB2.
1552 */
1553 SMB_VC_LOCK(vcp);
1554 vcp->vc_flags |= SMBV_SMB2;
1555 SMB_VC_UNLOCK(vcp);
1556 }
1557 }
1558
1559 /*
1560 * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
1561 * function was called), or SMB1-to-SMB2 negotiate indicated
1562 * we should switch to SMB2, or we have SMB1 disabled (both
1563 * cases set SMBV_SMB2 above), then do SMB2 negotiate.
1564 */
1565 if ((vcp->vc_flags & SMBV_SMB2) != 0) {
1566 err = smb2_smb_negotiate(vcp, &scred);
1567 }
1568
1569 out:
1570 if (err == 0) {
1571 SMB_VC_LOCK(vcp);
1572 smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
1573 SMB_VC_UNLOCK(vcp);
1574 }
1575 /*
1576 * (else) leave state as it was.
1577 * User-level will either close this handle (if connecting
1578 * for the first time) or call rcfail and then try again.
1579 */
1580
1581 smb_credrele(&scred);
1582
1583 return (err);
1584 }
1585
1586 /*
1587 * Handle ioctl SMBIOC_IOD_SSNSETUP
1588 * Do either SMB1 or SMB2 session setup (one call/reply)
1589 */
1590 int
1591 nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
1592 {
1593 smb_cred_t scred;
1594 int err;
1595
1596 ASSERT(vcp->iod_thr == curthread);
1597
1598 switch (vcp->vc_state) {
1599 case SMBIOD_ST_NEGOTIATED:
1600 case SMBIOD_ST_AUTHCONT:
1601 break;
1602 default:
1603 return (EINVAL);
1604 }
1605
1606 smb_credinit(&scred, cr);
1607 if (vcp->vc_flags & SMBV_SMB2)
1608 err = smb2_smb_ssnsetup(vcp, &scred);
1609 else
1610 err = smb_smb_ssnsetup(vcp, &scred);
1611 smb_credrele(&scred);
1612
1613 SMB_VC_LOCK(vcp);
1614 switch (err) {
1615 case 0:
1616 smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
1617 break;
1618 case EINPROGRESS: /* MORE_PROCESSING_REQUIRED */
1619 smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
1620 break;
1621 default:
1622 smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
1623 break;
1624 }
1625 SMB_VC_UNLOCK(vcp);
1626
1627 return (err);
1628 }
1629
1630 static int
1631 smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
1632 {
1633 smb_cred_t scred;
1634 int err;
1635
1636 ASSERT(vcp->iod_thr == curthread);
1637
1638 smb_credinit(&scred, cr);
1639 if (vcp->vc_flags & SMBV_SMB2)
1640 err = smb2_smb_logoff(vcp, &scred);
1641 else
1642 err = smb_smb_logoff(vcp, &scred);
1643 smb_credrele(&scred);
1644
1645 return (err);
1646 }
1647
1648 /*
1649 * Handle ioctl SMBIOC_IOD_WORK
1650 *
1651 * The smbiod agent calls this after authentication to become
1652 * the reader for this session, so long as that's possible.
1653 * This should only return non-zero if we want that agent to
1654 * give up on this VC permanently.
1655 */
1656 /* ARGSUSED */
1657 int
1658 smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
1659 {
1660 smbioc_ssn_work_t *wk = &vcp->vc_work;
1661 int err = 0;
1662
1663 /*
1664 * This is called by the one-and-only
1665 * IOD thread for this VC.
1666 */
1667 ASSERT(vcp->iod_thr == curthread);
1668
1669 /*
1670 * Should be in state...
1671 */
1672 if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
1673 cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
1674 return (EINVAL);
1675 }
1676
1677 /*
1678 * Update the session key and initialize SMB signing.
1679 *
1680 * This implementation does not use multiple SMB sessions per
1681 * TCP connection (where only the first session key is used)
1682 * so we always have a new session key here. Sanity check the
1683 * length from user space. Normally 16 or 32.
1684 */
1685 if (wk->wk_u_ssnkey_len > 1024) {
1686 cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
1687 return (EINVAL);
1688 }
1689
1690 ASSERT(vcp->vc_ssnkey == NULL);
1691 SMB_VC_LOCK(vcp);
1692 if (wk->wk_u_ssnkey_len != 0 &&
1693 wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
1694 vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
1695 vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
1696 if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
1697 vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
1698 err = EFAULT;
1699 }
1700 }
1701 SMB_VC_UNLOCK(vcp);
1702 if (err)
1703 return (err);
1704
1705 /*
1706 * If we have a session key, derive the MAC key for SMB signing.
1707 * If this was a NULL session, we might have no session key.
1708 */
1709 ASSERT(vcp->vc_mackey == NULL);
1710 if (vcp->vc_ssnkey != NULL) {
1711 if (vcp->vc_flags & SMBV_SMB2)
1712 err = smb2_sign_init(vcp);
1713 else
1714 err = smb_sign_init(vcp);
1715 if (err != 0)
1716 return (err);
1717 }
1718
1719 /*
1720 * Tell any enqueued requests they can start.
1721 */
1722 SMB_VC_LOCK(vcp);
1723 vcp->vc_genid++; /* possibly new connection */
1724 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1725 cv_broadcast(&vcp->vc_statechg);
1726 SMB_VC_UNLOCK(vcp);
1727
1728 /*
1729 * The above cv_broadcast should be sufficient to
1730 * get requests going again.
1731 *
1732 * If we have a callback function, run it.
1733 * Was: smb_iod_notify_connected()
1734 */
1735 if (fscb && fscb->fscb_connect)
1736 smb_vc_walkshares(vcp, fscb->fscb_connect);
1737
1738 /*
1739 * Run the "reader" loop. An error return here is normal
1740 * (i.e. when we need to reconnect) so ignore errors.
1741 * Note: This call updates the vc_state.
1742 */
1743 (void) smb_iod_recvall(vcp, B_FALSE);
1744
1745 /*
1746 * The reader loop returned, so we must have a
1747 * new state. (disconnected or reconnecting)
1748 *
1749 * Notify shares of the disconnect.
1750 * Was: smb_iod_notify_disconnect()
1751 */
1752 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1753
1754 /*
1755 * The reader loop function returns only when
1756 * there's been an error on the connection, or
1757 * this VC has no more references. It also
1758 * updates the state before it returns.
1759 *
1760 * Tell any requests to give up or restart.
1761 */
1762 smb_iod_invrq(vcp);
1763
1764 return (err);
1765 }
1766
1767 /*
1768 * Handle ioctl SMBIOC_IOD_IDLE
1769 *
1770 * Wait around for someone to ask to use this VC again after the
1771 * TCP session has closed. When one of the connected trees adds a
1772 * request, smb_iod_reconnect will set vc_state to RECONNECT and
1773 * wake this cv_wait. When a VC ref. goes away in smb_vc_rele,
1774 * that also signals this wait so we can re-check whether we
1775 * now hold the last ref. on this VC (and can destroy it).
1776 */
1777 int
1778 smb_iod_vc_idle(struct smb_vc *vcp)
1779 {
1780 int err = 0;
1781 boolean_t destroy = B_FALSE;
1782
1783 /*
1784 * This is called by the one-and-only
1785 * IOD thread for this VC.
1786 */
1787 ASSERT(vcp->iod_thr == curthread);
1788
1789 /*
1790 * Should be in state...
1791 */
1792 if (vcp->vc_state != SMBIOD_ST_IDLE &&
1793 vcp->vc_state != SMBIOD_ST_RECONNECT) {
1794 cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
1795 return (EINVAL);
1796 }
1797
1798 SMB_VC_LOCK(vcp);
1799
1800 while (vcp->vc_state == SMBIOD_ST_IDLE &&
1801 vcp->vc_co.co_usecount > 1) {
1802 if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1803 err = EINTR;
1804 break;
1805 }
1806 }
1807 if (vcp->vc_state == SMBIOD_ST_IDLE &&
1808 vcp->vc_co.co_usecount == 1) {
1809 /*
1810 * We were woken because we now have the last ref.
1811 * Arrange for this VC to be destroyed now.
1812 * Set the "GONE" flag while holding the lock,
1813 * to prevent a race with new references.
1814 * The destroy happens after unlock.
1815 */
1816 vcp->vc_flags |= SMBV_GONE;
1817 destroy = B_TRUE;
1818 }
1819
1820 SMB_VC_UNLOCK(vcp);
1821
1822 if (destroy) {
1823 /* This sets vc_state = DEAD */
1824 smb_iod_disconnect(vcp);
1825 }
1826
1827 return (err);
1828 }
1829
1830 /*
1831 * Handle ioctl SMBIOC_IOD_RCFAIL
1832 *
1833 * After a failed reconnect attempt, smbiod will
1834 * call this to make current requests error out.
1835 */
1836 int
1837 smb_iod_vc_rcfail(struct smb_vc *vcp)
1838 {
1839 clock_t tr;
1840 int err = 0;
1841
1842 /*
1843 * This is called by the one-and-only
1844 * IOD thread for this VC.
1845 */
1846 ASSERT(vcp->iod_thr == curthread);
1847 SMB_VC_LOCK(vcp);
1848
1849 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1850 cv_broadcast(&vcp->vc_statechg);
1851
1852 /*
1853 * Short wait here for two reasons:
1854 * (1) Give requests a chance to error out.
1855 * (2) Prevent immediate retry.
1856 */
1857 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1858 SEC_TO_TICK(5), TR_CLOCK_TICK);
1859 if (tr == 0)
1860 err = EINTR;
1861
1862 /*
1863 * Normally we'll switch to state IDLE here. However,
1864 * if something called smb_iod_reconnect() while we were
1865 * waiting above, we'll be in in state reconnect already.
1866 * In that case, keep state RECONNECT, so we essentially
1867 * skip transition through state IDLE that would normally
1868 * happen next.
1869 */
1870 if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
1871 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1872 cv_broadcast(&vcp->vc_statechg);
1873 }
1874
1875 SMB_VC_UNLOCK(vcp);
1876
1877 return (err);
1878 }
1879
1880 /*
1881 * Ask the IOD to reconnect (if not already underway)
1882 * then wait for the reconnect to finish.
1883 */
1884 int
1885 smb_iod_reconnect(struct smb_vc *vcp)
1886 {
1887 int err = 0, rv;
1888
1889 SMB_VC_LOCK(vcp);
1890 again:
1891 switch (vcp->vc_state) {
1892
1893 case SMBIOD_ST_IDLE:
1894 /* Tell the IOD thread it's no longer IDLE. */
1895 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1896 cv_signal(&vcp->iod_idle);
1897 /* FALLTHROUGH */
1898
1899 case SMBIOD_ST_RECONNECT:
1900 case SMBIOD_ST_CONNECTED:
1901 case SMBIOD_ST_NEGOTIATED:
1902 case SMBIOD_ST_AUTHCONT:
1903 case SMBIOD_ST_AUTHOK:
1904 /* Wait for the VC state to become ACTIVE. */
1905 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1906 if (rv == 0) {
1907 err = EINTR;
1908 break;
1909 }
1910 goto again;
1911
1912 case SMBIOD_ST_VCACTIVE:
1913 err = 0; /* success! */
1914 break;
1915
1916 case SMBIOD_ST_AUTHFAIL:
1917 case SMBIOD_ST_RCFAILED:
1918 case SMBIOD_ST_DEAD:
1919 default:
1920 err = ENOTCONN;
1921 break;
1922 }
1923
1924 SMB_VC_UNLOCK(vcp);
1925 return (err);
1926 }
|