Print this page
5133 Upstream SMB client fixes: Nexenta SUP-538 and SUP-548
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
+++ new/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
1 1 /*
2 2 * Copyright (c) 2000-2001 Boris Popov
3 3 * All rights reserved.
4 4 *
5 5 * Redistribution and use in source and binary forms, with or without
6 6 * modification, are permitted provided that the following conditions
7 7 * are met:
8 8 * 1. Redistributions of source code must retain the above copyright
9 9 * notice, this list of conditions and the following disclaimer.
10 10 * 2. Redistributions in binary form must reproduce the above copyright
11 11 * notice, this list of conditions and the following disclaimer in the
12 12 * documentation and/or other materials provided with the distribution.
13 13 * 3. All advertising materials mentioning features or use of this software
14 14 * must display the following acknowledgement:
15 15 * This product includes software developed by Boris Popov.
16 16 * 4. Neither the name of the author nor the names of any co-contributors
17 17 * may be used to endorse or promote products derived from this software
18 18 * without specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 30 * SUCH DAMAGE.
31 31 *
32 32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33 33 */
34 34
35 35 /*
36 36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
37 37 * Use is subject to license terms.
38 38 */
39 39
40 40 #ifdef DEBUG
41 41 /* See sys/queue.h */
42 42 #define QUEUEDEBUG 1
43 43 #endif
44 44
45 45 #include <sys/param.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/atomic.h>
48 48 #include <sys/proc.h>
49 49 #include <sys/thread.h>
50 50 #include <sys/file.h>
51 51 #include <sys/kmem.h>
52 52 #include <sys/unistd.h>
53 53 #include <sys/mount.h>
54 54 #include <sys/vnode.h>
55 55 #include <sys/types.h>
56 56 #include <sys/ddi.h>
57 57 #include <sys/sunddi.h>
58 58 #include <sys/stream.h>
59 59 #include <sys/strsun.h>
60 60 #include <sys/time.h>
61 61 #include <sys/class.h>
62 62 #include <sys/disp.h>
63 63 #include <sys/cmn_err.h>
64 64 #include <sys/zone.h>
65 65 #include <sys/sdt.h>
66 66
67 67 #include <netsmb/smb_osdep.h>
68 68
69 69 #include <netsmb/smb.h>
70 70 #include <netsmb/smb_conn.h>
71 71 #include <netsmb/smb_rq.h>
72 72 #include <netsmb/smb_subr.h>
73 73 #include <netsmb/smb_tran.h>
74 74 #include <netsmb/smb_trantcp.h>
75 75
76 76 int smb_iod_send_echo(smb_vc_t *);
77 77
78 78 /*
79 79 * This is set/cleared when smbfs loads/unloads
80 80 * No locks should be necessary, because smbfs
81 81 * can't unload until all the mounts are gone.
82 82 */
83 83 static smb_fscb_t *fscb;
84 84 void
85 85 smb_fscb_set(smb_fscb_t *cb)
86 86 {
87 87 fscb = cb;
88 88 }
89 89
90 90 static void
91 91 smb_iod_share_disconnected(smb_share_t *ssp)
92 92 {
93 93
94 94 smb_share_invalidate(ssp);
95 95
96 96 /* smbfs_dead() */
97 97 if (fscb && fscb->fscb_disconn) {
98 98 fscb->fscb_disconn(ssp);
99 99 }
100 100 }
101 101
102 102 /*
103 103 * State changes are important and infrequent.
104 104 * Make them easily observable via dtrace.
105 105 */
106 106 void
107 107 smb_iod_newstate(struct smb_vc *vcp, int state)
108 108 {
109 109 vcp->vc_state = state;
110 110 }
111 111
112 112 /* Lock Held version of the next function. */
113 113 static inline void
114 114 smb_iod_rqprocessed_LH(
115 115 struct smb_rq *rqp,
116 116 int error,
117 117 int flags)
118 118 {
119 119 rqp->sr_flags |= flags;
120 120 rqp->sr_lerror = error;
121 121 rqp->sr_rpgen++;
122 122 rqp->sr_state = SMBRQ_NOTIFIED;
123 123 cv_broadcast(&rqp->sr_cond);
124 124 }
125 125
126 126 static void
127 127 smb_iod_rqprocessed(
128 128 struct smb_rq *rqp,
129 129 int error,
130 130 int flags)
131 131 {
132 132
133 133 SMBRQ_LOCK(rqp);
134 134 smb_iod_rqprocessed_LH(rqp, error, flags);
135 135 SMBRQ_UNLOCK(rqp);
136 136 }
137 137
138 138 static void
139 139 smb_iod_invrq(struct smb_vc *vcp)
140 140 {
141 141 struct smb_rq *rqp;
142 142
143 143 /*
144 144 * Invalidate all outstanding requests for this connection
145 145 */
146 146 rw_enter(&vcp->iod_rqlock, RW_READER);
147 147 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 148 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
149 149 }
150 150 rw_exit(&vcp->iod_rqlock);
151 151 }
152 152
153 153 /*
154 154 * Called by smb_vc_rele, smb_vc_kill, and by the driver
155 155 * close entry point if the IOD closes its dev handle.
156 156 *
157 157 * Forcibly kill the connection and IOD.
158 158 */
159 159 void
160 160 smb_iod_disconnect(struct smb_vc *vcp)
161 161 {
162 162
163 163 /*
164 164 * Inform everyone of the state change.
165 165 */
166 166 SMB_VC_LOCK(vcp);
167 167 if (vcp->vc_state != SMBIOD_ST_DEAD) {
168 168 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
169 169 cv_broadcast(&vcp->vc_statechg);
170 170 }
171 171 SMB_VC_UNLOCK(vcp);
172 172
173 173 /*
174 174 * Let's be safe here and avoid doing any
|
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
175 175 * call across the network while trying to
176 176 * shut things down. If we just disconnect,
177 177 * the server will take care of the logoff.
178 178 */
179 179 SMB_TRAN_DISCONNECT(vcp);
180 180
181 181 /*
182 182 * If we have an IOD, it should immediately notice
183 183 * that its connection has closed. But in case
184 184 * it doesn't, let's also send it a signal.
185 - * (but don't shoot our own foot!)
186 - * Note: the iod calls smb_iod_invrq on its way out.
187 185 */
186 + SMB_VC_LOCK(vcp);
188 187 if (vcp->iod_thr != NULL &&
189 188 vcp->iod_thr != curthread) {
190 189 tsignal(vcp->iod_thr, SIGKILL);
191 190 }
191 + SMB_VC_UNLOCK(vcp);
192 192 }
193 193
194 194 /*
195 195 * Send one request.
196 196 *
197 197 * Called by _addrq (for internal requests)
198 198 * and _sendall (via _addrq, _multirq, _waitrq)
199 199 */
200 200 static int
201 201 smb_iod_sendrq(struct smb_rq *rqp)
202 202 {
203 203 struct smb_vc *vcp = rqp->sr_vc;
204 204 mblk_t *m;
205 205 int error;
206 206
207 207 ASSERT(vcp);
208 208 ASSERT(SEMA_HELD(&vcp->vc_sendlock));
209 209 ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
210 210
211 211 /*
212 212 * Note: Anything special for SMBR_INTERNAL here?
213 213 */
214 214 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
215 215 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
216 216 return (ENOTCONN);
217 217 }
218 218
219 219
220 220 /*
221 221 * On the first send, set the MID and (maybe)
222 222 * the signing sequence numbers. The increments
223 223 * here are serialized by vc_sendlock
224 224 */
225 225 if (rqp->sr_sendcnt == 0) {
226 226
227 227 rqp->sr_mid = vcp->vc_next_mid++;
228 228
229 229 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
230 230 /*
231 231 * We're signing requests and verifying
232 232 * signatures on responses. Set the
233 233 * sequence numbers of the request and
234 234 * response here, used in smb_rq_verify.
235 235 */
236 236 rqp->sr_seqno = vcp->vc_next_seq++;
237 237 rqp->sr_rseqno = vcp->vc_next_seq++;
238 238 }
239 239
240 240 /* Fill in UID, TID, MID, etc. */
241 241 smb_rq_fillhdr(rqp);
242 242
243 243 /*
244 244 * Sign the message now that we're finally done
245 245 * filling in the SMB header fields, etc.
246 246 */
247 247 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
248 248 smb_rq_sign(rqp);
249 249 }
250 250 }
251 251 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
252 252 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
253 253 /*
254 254 * If all attempts to send a request failed, then
255 255 * something is seriously hosed.
256 256 */
257 257 return (ENOTCONN);
258 258 }
259 259
260 260 /*
261 261 * Replaced m_copym() with Solaris copymsg() which does the same
262 262 * work when we want to do a M_COPYALL.
263 263 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
264 264 */
265 265 m = copymsg(rqp->sr_rq.mb_top);
266 266
267 267 #ifdef DTRACE_PROBE
268 268 DTRACE_PROBE2(smb_iod_sendrq,
269 269 (smb_rq_t *), rqp, (mblk_t *), m);
270 270 #else
271 271 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
272 272 #endif
273 273 m_dumpm(m);
274 274
275 275 if (m != NULL) {
276 276 error = SMB_TRAN_SEND(vcp, m);
277 277 m = 0; /* consumed by SEND */
278 278 } else
279 279 error = ENOBUFS;
280 280
281 281 rqp->sr_lerror = error;
282 282 if (error == 0) {
283 283 SMBRQ_LOCK(rqp);
284 284 rqp->sr_flags |= SMBR_SENT;
285 285 rqp->sr_state = SMBRQ_SENT;
286 286 if (rqp->sr_flags & SMBR_SENDWAIT)
287 287 cv_broadcast(&rqp->sr_cond);
288 288 SMBRQ_UNLOCK(rqp);
289 289 return (0);
290 290 }
291 291 /*
292 292 * Check for fatal errors
293 293 */
294 294 if (SMB_TRAN_FATAL(vcp, error)) {
295 295 /*
296 296 * No further attempts should be made
297 297 */
298 298 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
299 299 return (ENOTCONN);
300 300 }
301 301 if (error)
302 302 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
303 303
304 304 #ifdef APPLE
305 305 /* If proc waiting on rqp was signaled... */
306 306 if (smb_rq_intr(rqp))
307 307 smb_iod_rqprocessed(rqp, EINTR, 0);
308 308 #endif
309 309
310 310 return (0);
311 311 }
312 312
313 313 static int
314 314 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
315 315 {
316 316 mblk_t *m;
317 317 uchar_t *hp;
318 318 int error;
319 319
320 320 top:
321 321 m = NULL;
322 322 error = SMB_TRAN_RECV(vcp, &m);
323 323 if (error == EAGAIN)
324 324 goto top;
325 325 if (error)
326 326 return (error);
327 327 ASSERT(m);
328 328
329 329 m = m_pullup(m, SMB_HDRLEN);
330 330 if (m == NULL) {
331 331 return (ENOSR);
332 332 }
333 333
334 334 /*
335 335 * Check the SMB header
336 336 */
337 337 hp = mtod(m, uchar_t *);
338 338 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
339 339 m_freem(m);
340 340 return (EPROTO);
341 341 }
342 342
343 343 *mpp = m;
344 344 return (0);
345 345 }
346 346
347 347 /*
348 348 * Process incoming packets
349 349 *
350 350 * This is the "reader" loop, run by the IOD thread
351 351 * while in state SMBIOD_ST_VCACTIVE. The loop now
352 352 * simply blocks in the socket recv until either a
353 353 * message arrives, or a disconnect.
354 354 *
355 355 * Any non-zero error means the IOD should terminate.
356 356 */
357 357 int
358 358 smb_iod_recvall(struct smb_vc *vcp)
359 359 {
360 360 struct smb_rq *rqp;
361 361 mblk_t *m;
362 362 uchar_t *hp;
363 363 ushort_t mid;
364 364 int error = 0;
365 365 int etime_count = 0; /* for "server not responding", etc. */
366 366
367 367 for (;;) {
368 368 /*
369 369 * Check whether someone "killed" this VC,
370 370 * or is asking the IOD to terminate.
371 371 */
372 372
373 373 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
374 374 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
375 375 error = 0;
376 376 break;
377 377 }
378 378
379 379 if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
380 380 SMBIODEBUG("SHUTDOWN set\n");
381 381 /* This IOD thread will terminate. */
382 382 SMB_VC_LOCK(vcp);
383 383 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
384 384 cv_broadcast(&vcp->vc_statechg);
385 385 SMB_VC_UNLOCK(vcp);
386 386 error = EINTR;
387 387 break;
388 388 }
389 389
390 390 m = NULL;
391 391 error = smb_iod_recv1(vcp, &m);
392 392
393 393 if (error == ETIME &&
394 394 vcp->iod_rqlist.tqh_first != NULL) {
395 395 /*
396 396 * Nothing received for 15 seconds and
397 397 * we have requests in the queue.
398 398 */
399 399 etime_count++;
400 400
401 401 /*
402 402 * Once, at 15 sec. notify callbacks
403 403 * and print the warning message.
404 404 */
405 405 if (etime_count == 1) {
406 406 /* Was: smb_iod_notify_down(vcp); */
407 407 if (fscb && fscb->fscb_down)
408 408 smb_vc_walkshares(vcp,
409 409 fscb->fscb_down);
410 410 zprintf(vcp->vc_zoneid,
411 411 "SMB server %s not responding\n",
412 412 vcp->vc_srvname);
413 413 }
414 414
415 415 /*
416 416 * At 30 sec. try sending an echo, and then
417 417 * once a minute thereafter.
418 418 */
419 419 if ((etime_count & 3) == 2) {
420 420 (void) smb_iod_send_echo(vcp);
421 421 }
422 422
423 423 continue;
424 424 } /* ETIME && requests in queue */
425 425
426 426 if (error == ETIME) {
427 427 /*
428 428 * If the IOD thread holds the last reference
429 429 * to this VC, let the IOD thread terminate.
430 430 */
431 431 if (vcp->vc_co.co_usecount > 1)
432 432 continue;
433 433 SMB_VC_LOCK(vcp);
434 434 if (vcp->vc_co.co_usecount == 1) {
435 435 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
436 436 SMB_VC_UNLOCK(vcp);
437 437 error = 0;
438 438 break;
439 439 }
440 440 SMB_VC_UNLOCK(vcp);
441 441 continue;
442 442 } /* error == ETIME */
443 443
444 444 if (error) {
445 445 /*
446 446 * The recv. above returned some error
447 447 * we can't continue from i.e. ENOTCONN.
448 448 * It's dangerous to continue here.
449 449 * (possible infinite loop!)
450 450 *
451 451 * If we have requests enqueued, next
452 452 * state is reconnecting, else idle.
453 453 */
454 454 int state;
455 455 SMB_VC_LOCK(vcp);
456 456 state = (vcp->iod_rqlist.tqh_first != NULL) ?
457 457 SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE;
458 458 smb_iod_newstate(vcp, state);
459 459 cv_broadcast(&vcp->vc_statechg);
460 460 SMB_VC_UNLOCK(vcp);
461 461 error = 0;
462 462 break;
463 463 }
464 464
465 465 /*
466 466 * Received something. Yea!
467 467 */
468 468 if (etime_count) {
469 469 etime_count = 0;
470 470
471 471 zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
472 472 vcp->vc_srvname);
473 473
474 474 /* Was: smb_iod_notify_up(vcp); */
475 475 if (fscb && fscb->fscb_up)
476 476 smb_vc_walkshares(vcp, fscb->fscb_up);
477 477 }
478 478
479 479 /*
480 480 * Have an SMB packet. The SMB header was
481 481 * checked in smb_iod_recv1().
482 482 * Find the request...
483 483 */
484 484 hp = mtod(m, uchar_t *);
485 485 /*LINTED*/
486 486 mid = letohs(SMB_HDRMID(hp));
487 487 SMBIODEBUG("mid %04x\n", (uint_t)mid);
488 488
489 489 rw_enter(&vcp->iod_rqlock, RW_READER);
490 490 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
491 491
492 492 if (rqp->sr_mid != mid)
493 493 continue;
494 494
495 495 DTRACE_PROBE2(smb_iod_recvrq,
496 496 (smb_rq_t *), rqp, (mblk_t *), m);
497 497 m_dumpm(m);
498 498
499 499 SMBRQ_LOCK(rqp);
500 500 if (rqp->sr_rp.md_top == NULL) {
501 501 md_initm(&rqp->sr_rp, m);
502 502 } else {
503 503 if (rqp->sr_flags & SMBR_MULTIPACKET) {
504 504 md_append_record(&rqp->sr_rp, m);
505 505 } else {
506 506 SMBRQ_UNLOCK(rqp);
507 507 SMBSDEBUG("duplicate response %d "
508 508 "(ignored)\n", mid);
509 509 break;
510 510 }
511 511 }
512 512 smb_iod_rqprocessed_LH(rqp, 0, 0);
513 513 SMBRQ_UNLOCK(rqp);
514 514 break;
515 515 }
516 516
517 517 if (rqp == NULL) {
518 518 int cmd = SMB_HDRCMD(hp);
519 519
520 520 if (cmd != SMB_COM_ECHO)
521 521 SMBSDEBUG("drop resp: mid %d, cmd %d\n",
522 522 (uint_t)mid, cmd);
523 523 /* smb_printrqlist(vcp); */
524 524 m_freem(m);
525 525 }
526 526 rw_exit(&vcp->iod_rqlock);
527 527
528 528 }
529 529
530 530 return (error);
531 531 }
532 532
533 533 /*
534 534 * The IOD receiver thread has requests pending and
535 535 * has not received anything in a while. Try to
536 536 * send an SMB echo request. It's tricky to do a
537 537 * send from the IOD thread because we can't block.
538 538 *
539 539 * Using tmo=SMBNOREPLYWAIT in the request
540 540 * so smb_rq_reply will skip smb_iod_waitrq.
541 541 * The smb_smb_echo call uses SMBR_INTERNAL
542 542 * to avoid calling smb_iod_sendall().
543 543 */
544 544 int
545 545 smb_iod_send_echo(smb_vc_t *vcp)
546 546 {
547 547 smb_cred_t scred;
548 548 int err;
549 549
550 550 smb_credinit(&scred, NULL);
551 551 err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
552 552 smb_credrele(&scred);
553 553 return (err);
554 554 }
555 555
556 556 /*
557 557 * The IOD thread is now just a "reader",
558 558 * so no more smb_iod_request(). Yea!
559 559 */
560 560
561 561 /*
562 562 * Place request in the queue, and send it now if possible.
563 563 * Called with no locks held.
564 564 */
565 565 int
566 566 smb_iod_addrq(struct smb_rq *rqp)
567 567 {
568 568 struct smb_vc *vcp = rqp->sr_vc;
569 569 int error, save_newrq;
570 570
571 571 ASSERT(rqp->sr_cred);
572 572
573 573 /*
574 574 * State should be correct after the check in
575 575 * smb_rq_enqueue(), but we dropped locks...
576 576 */
577 577 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
578 578 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
579 579 return (ENOTCONN);
580 580 }
581 581
582 582 /*
583 583 * Requests from the IOD itself are marked _INTERNAL,
584 584 * and get some special treatment to avoid blocking
585 585 * the reader thread (so we don't deadlock).
586 586 * The request is not yet on the queue, so we can
587 587 * modify it's state here without locks.
588 588 * Only thing using this now is ECHO.
589 589 */
590 590 rqp->sr_owner = curthread;
591 591 if (rqp->sr_owner == vcp->iod_thr) {
592 592 rqp->sr_flags |= SMBR_INTERNAL;
593 593
594 594 /*
595 595 * This is a request from the IOD thread.
596 596 * Always send directly from this thread.
597 597 * Note lock order: iod_rqlist, vc_sendlock
598 598 */
599 599 rw_enter(&vcp->iod_rqlock, RW_WRITER);
600 600 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
601 601 rw_downgrade(&vcp->iod_rqlock);
602 602
603 603 /*
604 604 * Note: iod_sendrq expects vc_sendlock,
605 605 * so take that here, but carefully:
606 606 * Never block the IOD thread here.
607 607 */
608 608 if (sema_tryp(&vcp->vc_sendlock) == 0) {
609 609 SMBIODEBUG("sendlock busy\n");
610 610 error = EAGAIN;
611 611 } else {
612 612 /* Have vc_sendlock */
613 613 error = smb_iod_sendrq(rqp);
614 614 sema_v(&vcp->vc_sendlock);
615 615 }
616 616
617 617 rw_exit(&vcp->iod_rqlock);
618 618
619 619 /*
620 620 * In the non-error case, _removerq
621 621 * is done by either smb_rq_reply
622 622 * or smb_iod_waitrq.
623 623 */
624 624 if (error)
625 625 smb_iod_removerq(rqp);
626 626
627 627 return (error);
628 628 }
629 629
630 630 rw_enter(&vcp->iod_rqlock, RW_WRITER);
631 631
632 632 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
633 633 /* iod_rqlock/WRITER protects iod_newrq */
634 634 save_newrq = vcp->iod_newrq;
635 635 vcp->iod_newrq++;
636 636
637 637 rw_exit(&vcp->iod_rqlock);
638 638
639 639 /*
640 640 * Now send any requests that need to be sent,
641 641 * including the one we just put on the list.
642 642 * Only the thread that found iod_newrq==0
643 643 * needs to run the send loop.
644 644 */
645 645 if (save_newrq == 0)
646 646 smb_iod_sendall(vcp);
647 647
648 648 return (0);
649 649 }
650 650
651 651 /*
652 652 * Mark an SMBR_MULTIPACKET request as
653 653 * needing another send. Similar to the
654 654 * "normal" part of smb_iod_addrq.
655 655 */
656 656 int
657 657 smb_iod_multirq(struct smb_rq *rqp)
658 658 {
659 659 struct smb_vc *vcp = rqp->sr_vc;
660 660 int save_newrq;
661 661
662 662 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
663 663
664 664 if (rqp->sr_flags & SMBR_INTERNAL)
665 665 return (EINVAL);
666 666
667 667 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
668 668 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
669 669 return (ENOTCONN);
670 670 }
671 671
672 672 rw_enter(&vcp->iod_rqlock, RW_WRITER);
673 673
674 674 /* Already on iod_rqlist, just reset state. */
675 675 rqp->sr_state = SMBRQ_NOTSENT;
676 676
677 677 /* iod_rqlock/WRITER protects iod_newrq */
678 678 save_newrq = vcp->iod_newrq;
679 679 vcp->iod_newrq++;
680 680
681 681 rw_exit(&vcp->iod_rqlock);
682 682
683 683 /*
684 684 * Now send any requests that need to be sent,
685 685 * including the one we just marked NOTSENT.
686 686 * Only the thread that found iod_newrq==0
687 687 * needs to run the send loop.
688 688 */
689 689 if (save_newrq == 0)
690 690 smb_iod_sendall(vcp);
691 691
692 692 return (0);
693 693 }
694 694
695 695
696 696 void
697 697 smb_iod_removerq(struct smb_rq *rqp)
698 698 {
699 699 struct smb_vc *vcp = rqp->sr_vc;
700 700
701 701 rw_enter(&vcp->iod_rqlock, RW_WRITER);
702 702 #ifdef QUEUEDEBUG
703 703 /*
704 704 * Make sure we have not already removed it.
705 705 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
706 706 * XXX: Don't like the constant 1 here...
707 707 */
708 708 ASSERT(rqp->sr_link.tqe_next != (void *)1L);
709 709 #endif
710 710 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
711 711 rw_exit(&vcp->iod_rqlock);
712 712 }
713 713
714 714
715 715
716 716 /*
717 717 * Wait for a request to complete.
718 718 *
719 719 * For normal requests, we need to deal with
720 720 * ioc_muxcnt dropping below vc_maxmux by
721 721 * making arrangements to send more...
722 722 */
723 723 int
724 724 smb_iod_waitrq(struct smb_rq *rqp)
725 725 {
726 726 struct smb_vc *vcp = rqp->sr_vc;
727 727 clock_t tr, tmo1, tmo2;
728 728 int error, rc;
729 729
730 730 if (rqp->sr_flags & SMBR_INTERNAL) {
731 731 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
732 732 smb_iod_removerq(rqp);
733 733 return (EAGAIN);
734 734 }
735 735
736 736 /*
737 737 * Make sure this is NOT the IOD thread,
738 738 * or the wait below will stop the reader.
739 739 */
740 740 ASSERT(curthread != vcp->iod_thr);
741 741
742 742 SMBRQ_LOCK(rqp);
743 743
744 744 /*
745 745 * First, wait for the request to be sent. Normally the send
746 746 * has already happened by the time we get here. However, if
747 747 * we have more than maxmux entries in the request list, our
748 748 * request may not be sent until other requests complete.
749 749 * The wait in this case is due to local I/O demands, so
750 750 * we don't want the server response timeout to apply.
751 751 *
752 752 * If a request is allowed to interrupt this wait, then the
753 753 * request is cancelled and never sent OTW. Some kinds of
754 754 * requests should never be cancelled (i.e. close) and those
755 755 * are marked SMBR_NOINTR_SEND so they either go eventually,
756 756 * or a connection close will terminate them with ENOTCONN.
757 757 */
758 758 while (rqp->sr_state == SMBRQ_NOTSENT) {
759 759 rqp->sr_flags |= SMBR_SENDWAIT;
760 760 if (rqp->sr_flags & SMBR_NOINTR_SEND) {
761 761 cv_wait(&rqp->sr_cond, &rqp->sr_lock);
762 762 rc = 1;
763 763 } else
764 764 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
765 765 rqp->sr_flags &= ~SMBR_SENDWAIT;
766 766 if (rc == 0) {
767 767 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp);
768 768 error = EINTR;
769 769 goto out;
770 770 }
771 771 }
772 772
773 773 /*
774 774 * The request has been sent. Now wait for the response,
775 775 * with the timeout specified for this request.
776 776 * Compute all the deadlines now, so we effectively
777 777 * start the timer(s) after the request is sent.
778 778 */
779 779 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
780 780 tmo1 = SEC_TO_TICK(smb_timo_notice);
781 781 else
782 782 tmo1 = 0;
783 783 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
784 784
785 785 /*
786 786 * As above, we don't want to allow interrupt for some
787 787 * requests like open, because we could miss a succesful
788 788 * response and therefore "leak" a FID. Such requests
789 789 * are marked SMBR_NOINTR_RECV to prevent that.
790 790 *
791 791 * If "slow server" warnings are enabled, wait first
792 792 * for the "notice" timeout, and warn if expired.
793 793 */
794 794 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
795 795 if (rqp->sr_flags & SMBR_NOINTR_RECV)
796 796 tr = cv_reltimedwait(&rqp->sr_cond,
797 797 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
798 798 else
799 799 tr = cv_reltimedwait_sig(&rqp->sr_cond,
800 800 &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
801 801 if (tr == 0) {
802 802 error = EINTR;
803 803 goto out;
804 804 }
805 805 if (tr < 0) {
806 806 #ifdef DTRACE_PROBE
807 807 DTRACE_PROBE1(smb_iod_waitrq1,
808 808 (smb_rq_t *), rqp);
809 809 #endif
810 810 #ifdef NOT_YET
811 811 /* Want this to go ONLY to the user. */
812 812 uprintf("SMB server %s has not responded"
813 813 " to request %d after %d seconds..."
814 814 " (still waiting).\n", vcp->vc_srvname,
815 815 rqp->sr_mid, smb_timo_notice);
816 816 #endif
817 817 }
818 818 }
819 819
820 820 /*
821 821 * Keep waiting until tmo2 is expired.
822 822 */
823 823 while (rqp->sr_rpgen == rqp->sr_rplast) {
824 824 if (rqp->sr_flags & SMBR_NOINTR_RECV)
825 825 tr = cv_timedwait(&rqp->sr_cond,
826 826 &rqp->sr_lock, tmo2);
827 827 else
828 828 tr = cv_timedwait_sig(&rqp->sr_cond,
829 829 &rqp->sr_lock, tmo2);
830 830 if (tr == 0) {
831 831 error = EINTR;
832 832 goto out;
833 833 }
834 834 if (tr < 0) {
835 835 #ifdef DTRACE_PROBE
836 836 DTRACE_PROBE1(smb_iod_waitrq2,
837 837 (smb_rq_t *), rqp);
838 838 #endif
839 839 #ifdef NOT_YET
840 840 /* Want this to go ONLY to the user. */
841 841 uprintf("SMB server %s has not responded"
842 842 " to request %d after %d seconds..."
843 843 " (giving up).\n", vcp->vc_srvname,
844 844 rqp->sr_mid, rqp->sr_timo);
845 845 #endif
846 846 error = ETIME;
847 847 goto out;
848 848 }
849 849 /* got wakeup */
850 850 }
851 851 error = rqp->sr_lerror;
852 852 rqp->sr_rplast++;
853 853
854 854 out:
855 855 SMBRQ_UNLOCK(rqp);
856 856
857 857 /*
858 858 * MULTIPACKET request must stay in the list.
859 859 * They may need additional responses.
860 860 */
861 861 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
862 862 smb_iod_removerq(rqp);
863 863
864 864 /*
865 865 * Some request has been completed.
866 866 * If we reached the mux limit,
867 867 * re-run the send loop...
868 868 */
869 869 if (vcp->iod_muxfull)
870 870 smb_iod_sendall(vcp);
871 871
872 872 return (error);
873 873 }
874 874
875 875 /*
876 876 * Shutdown all outstanding I/O requests on the specified share with
877 877 * ENXIO; used when unmounting a share. (There shouldn't be any for a
878 878 * non-forced unmount; if this is a forced unmount, we have to shutdown
879 879 * the requests as part of the unmount process.)
880 880 */
881 881 void
882 882 smb_iod_shutdown_share(struct smb_share *ssp)
883 883 {
884 884 struct smb_vc *vcp = SSTOVC(ssp);
885 885 struct smb_rq *rqp;
886 886
887 887 /*
888 888 * Loop through the list of requests and shutdown the ones
889 889 * that are for the specified share.
890 890 */
891 891 rw_enter(&vcp->iod_rqlock, RW_READER);
892 892 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
893 893 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
894 894 smb_iod_rqprocessed(rqp, EIO, 0);
895 895 }
896 896 rw_exit(&vcp->iod_rqlock);
897 897 }
898 898
899 899 /*
900 900 * Send all requests that need sending.
901 901 * Called from _addrq, _multirq, _waitrq
902 902 */
903 903 void
904 904 smb_iod_sendall(smb_vc_t *vcp)
905 905 {
906 906 struct smb_rq *rqp;
907 907 int error, muxcnt;
908 908
909 909 /*
910 910 * Clear "newrq" to make sure threads adding
911 911 * new requests will run this function again.
912 912 */
913 913 rw_enter(&vcp->iod_rqlock, RW_WRITER);
914 914 vcp->iod_newrq = 0;
915 915
916 916 /*
917 917 * We only read iod_rqlist, so downgrade rwlock.
918 918 * This allows the IOD to handle responses while
919 919 * some requesting thread may be blocked in send.
920 920 */
921 921 rw_downgrade(&vcp->iod_rqlock);
922 922
923 923 /*
924 924 * Serialize to prevent multiple senders.
925 925 * Note lock order: iod_rqlock, vc_sendlock
926 926 */
927 927 sema_p(&vcp->vc_sendlock);
928 928
929 929 /*
930 930 * Walk the list of requests and send when possible.
931 931 * We avoid having more than vc_maxmux requests
932 932 * outstanding to the server by traversing only
933 933 * vc_maxmux entries into this list. Simple!
934 934 */
935 935 ASSERT(vcp->vc_maxmux > 0);
936 936 error = muxcnt = 0;
937 937 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
938 938
939 939 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
940 940 error = ENOTCONN; /* stop everything! */
941 941 break;
942 942 }
943 943
944 944 if (rqp->sr_state == SMBRQ_NOTSENT) {
945 945 error = smb_iod_sendrq(rqp);
946 946 if (error)
947 947 break;
948 948 }
949 949
950 950 if (++muxcnt == vcp->vc_maxmux) {
951 951 SMBIODEBUG("muxcnt == vc_maxmux\n");
952 952 break;
953 953 }
954 954
955 955 }
956 956
957 957 /*
958 958 * If we have vc_maxmux requests outstanding,
959 959 * arrange for _waitrq to call _sendall as
960 960 * requests are completed.
961 961 */
962 962 vcp->iod_muxfull =
963 963 (muxcnt < vcp->vc_maxmux) ? 0 : 1;
964 964
965 965 sema_v(&vcp->vc_sendlock);
966 966 rw_exit(&vcp->iod_rqlock);
967 967 }
968 968
969 969 int
970 970 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr)
971 971 {
972 972 struct file *fp = NULL;
973 973 int err = 0;
974 974
975 975 /*
976 976 * This is called by the one-and-only
977 977 * IOD thread for this VC.
978 978 */
979 979 ASSERT(vcp->iod_thr == curthread);
980 980
981 981 /*
982 982 * Get the network transport file pointer,
983 983 * and "loan" it to our transport module.
984 984 */
985 985 if ((fp = getf(vcp->vc_tran_fd)) == NULL) {
986 986 err = EBADF;
987 987 goto out;
988 988 }
989 989 if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0)
990 990 goto out;
991 991
992 992 /*
993 993 * In case of reconnect, tell any enqueued requests
994 994 * then can GO!
995 995 */
996 996 SMB_VC_LOCK(vcp);
997 997 vcp->vc_genid++; /* possibly new connection */
998 998 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
999 999 cv_broadcast(&vcp->vc_statechg);
1000 1000 SMB_VC_UNLOCK(vcp);
1001 1001
1002 1002 /*
1003 1003 * The above cv_broadcast should be sufficient to
1004 1004 * get requests going again.
1005 1005 *
1006 1006 * If we have a callback function, run it.
1007 1007 * Was: smb_iod_notify_connected()
1008 1008 */
1009 1009 if (fscb && fscb->fscb_connect)
1010 1010 smb_vc_walkshares(vcp, fscb->fscb_connect);
1011 1011
1012 1012 /*
1013 1013 * Run the "reader" loop.
1014 1014 */
1015 1015 err = smb_iod_recvall(vcp);
1016 1016
1017 1017 /*
1018 1018 * The reader loop returned, so we must have a
1019 1019 * new state. (disconnected or reconnecting)
1020 1020 *
1021 1021 * Notify shares of the disconnect.
1022 1022 * Was: smb_iod_notify_disconnect()
1023 1023 */
1024 1024 smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1025 1025
1026 1026 /*
1027 1027 * The reader loop function returns only when
1028 1028 * there's been an error on the connection, or
1029 1029 * this VC has no more references. It also
1030 1030 * updates the state before it returns.
1031 1031 *
1032 1032 * Tell any requests to give up or restart.
1033 1033 */
1034 1034 smb_iod_invrq(vcp);
1035 1035
1036 1036 out:
1037 1037 /* Recall the file descriptor loan. */
1038 1038 (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr);
1039 1039 if (fp != NULL) {
1040 1040 releasef(vcp->vc_tran_fd);
1041 1041 }
1042 1042
1043 1043 return (err);
1044 1044 }
1045 1045
1046 1046 /*
1047 1047 * Wait around for someone to ask to use this VC.
1048 1048 * If the VC has only the IOD reference, then
1049 1049 * wait only a minute or so, then drop it.
1050 1050 */
1051 1051 int
1052 1052 smb_iod_vc_idle(struct smb_vc *vcp)
1053 1053 {
1054 1054 clock_t tr, delta = SEC_TO_TICK(15);
1055 1055 int err = 0;
1056 1056
1057 1057 /*
1058 1058 * This is called by the one-and-only
1059 1059 * IOD thread for this VC.
1060 1060 */
1061 1061 ASSERT(vcp->iod_thr == curthread);
1062 1062
1063 1063 SMB_VC_LOCK(vcp);
1064 1064 while (vcp->vc_state == SMBIOD_ST_IDLE) {
1065 1065 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1066 1066 delta, TR_CLOCK_TICK);
1067 1067 if (tr == 0) {
1068 1068 err = EINTR;
1069 1069 break;
1070 1070 }
1071 1071 if (tr < 0) {
1072 1072 /* timeout */
1073 1073 if (vcp->vc_co.co_usecount == 1) {
1074 1074 /* Let this IOD terminate. */
1075 1075 smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1076 1076 /* nobody to cv_broadcast */
1077 1077 break;
1078 1078 }
1079 1079 }
1080 1080 }
1081 1081 SMB_VC_UNLOCK(vcp);
1082 1082
1083 1083 return (err);
1084 1084 }
1085 1085
1086 1086 /*
1087 1087 * After a failed reconnect attempt, smbiod will
1088 1088 * call this to make current requests error out.
1089 1089 */
1090 1090 int
1091 1091 smb_iod_vc_rcfail(struct smb_vc *vcp)
1092 1092 {
1093 1093 clock_t tr;
1094 1094 int err = 0;
1095 1095
1096 1096 /*
1097 1097 * This is called by the one-and-only
1098 1098 * IOD thread for this VC.
1099 1099 */
1100 1100 ASSERT(vcp->iod_thr == curthread);
1101 1101
1102 1102 if (vcp->vc_state != SMBIOD_ST_RECONNECT)
1103 1103 return (EINVAL);
1104 1104
1105 1105 SMB_VC_LOCK(vcp);
1106 1106
1107 1107 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1108 1108 cv_broadcast(&vcp->vc_statechg);
1109 1109
1110 1110 /*
1111 1111 * Short wait here for two reasons:
1112 1112 * (1) Give requests a chance to error out.
1113 1113 * (2) Prevent immediate retry.
1114 1114 */
1115 1115 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1116 1116 SEC_TO_TICK(5), TR_CLOCK_TICK);
1117 1117 if (tr == 0)
1118 1118 err = EINTR;
1119 1119
1120 1120 smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1121 1121 cv_broadcast(&vcp->vc_statechg);
1122 1122
1123 1123 SMB_VC_UNLOCK(vcp);
1124 1124
1125 1125 return (err);
1126 1126 }
1127 1127
1128 1128 /*
1129 1129 * Ask the IOD to reconnect (if not already underway)
1130 1130 * then wait for the reconnect to finish.
1131 1131 */
1132 1132 int
1133 1133 smb_iod_reconnect(struct smb_vc *vcp)
1134 1134 {
1135 1135 int err = 0, rv;
1136 1136
1137 1137 SMB_VC_LOCK(vcp);
1138 1138 again:
1139 1139 switch (vcp->vc_state) {
1140 1140
1141 1141 case SMBIOD_ST_IDLE:
1142 1142 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1143 1143 cv_signal(&vcp->iod_idle);
1144 1144 /* FALLTHROUGH */
1145 1145
1146 1146 case SMBIOD_ST_RECONNECT:
1147 1147 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1148 1148 if (rv == 0) {
1149 1149 err = EINTR;
1150 1150 break;
1151 1151 }
1152 1152 goto again;
1153 1153
1154 1154 case SMBIOD_ST_VCACTIVE:
1155 1155 err = 0; /* success! */
1156 1156 break;
1157 1157
1158 1158 case SMBIOD_ST_RCFAILED:
1159 1159 case SMBIOD_ST_DEAD:
1160 1160 default:
1161 1161 err = ENOTCONN;
1162 1162 break;
1163 1163 }
1164 1164
1165 1165 SMB_VC_UNLOCK(vcp);
1166 1166 return (err);
1167 1167 }
|
↓ open down ↓ |
966 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX