Print this page
NEX-14666 Need to provide SMB 2.1 Client
NEX-17187 panic in smbfs_acl_store
NEX-17231 smbfs create xattr files finds wrong file
NEX-17224 smbfs lookup EINVAL should be ENOENT
NEX-17260 SMB1 client fails to list directory after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
and: (cleanup)
NEX-16824 SMB client connection setup rework
NEX-17232 SMB client reconnect failures
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
and: (improve debug)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c
+++ new/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c
1 1 /*
2 2 * Copyright (c) 2000-2001, Boris Popov
3 3 * All rights reserved.
4 4 *
5 5 * Redistribution and use in source and binary forms, with or without
6 6 * modification, are permitted provided that the following conditions
7 7 * are met:
8 8 * 1. Redistributions of source code must retain the above copyright
9 9 * notice, this list of conditions and the following disclaimer.
10 10 * 2. Redistributions in binary form must reproduce the above copyright
11 11 * notice, this list of conditions and the following disclaimer in the
12 12 * documentation and/or other materials provided with the distribution.
13 13 * 3. All advertising materials mentioning features or use of this software
14 14 * must display the following acknowledgement:
15 15 * This product includes software developed by Boris Popov.
16 16 * 4. Neither the name of the author nor the names of any co-contributors
17 17 * may be used to endorse or promote products derived from this software
18 18 * without specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
27 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 30 * SUCH DAMAGE.
31 31 *
32 32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33 33 */
34 34
35 35 /*
36 36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37 + * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
38 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
37 39 */
38 40
39 41 #include <sys/param.h>
40 42 #include <sys/systm.h>
41 43 #include <sys/time.h>
42 44 #include <sys/kmem.h>
43 45 #include <sys/proc.h>
44 46 #include <sys/lock.h>
45 47 #include <sys/socket.h>
46 48 #include <sys/mount.h>
47 49 #include <sys/sunddi.h>
48 50 #include <sys/cmn_err.h>
49 51 #include <sys/sdt.h>
50 52
51 53 #include <netsmb/smb_osdep.h>
52 54
53 55 #include <netsmb/smb.h>
56 +#include <netsmb/smb2.h>
54 57 #include <netsmb/smb_conn.h>
55 58 #include <netsmb/smb_subr.h>
56 59 #include <netsmb/smb_tran.h>
57 60 #include <netsmb/smb_rq.h>
61 +#include <netsmb/smb2_rq.h>
58 62
59 63 /*
60 64 * How long to wait before restarting a request (after reconnect)
61 65 */
62 66 #define SMB_RCNDELAY 2 /* seconds */
63 67
64 68 /*
65 69 * leave this zero - we can't ssecond guess server side effects of
66 70 * duplicate ops, this isn't nfs!
67 71 */
68 72 #define SMBMAXRESTARTS 0
69 73
70 74
71 75 static int smb_rq_reply(struct smb_rq *rqp);
76 +static int smb_rq_parsehdr(struct smb_rq *rqp);
72 77 static int smb_rq_enqueue(struct smb_rq *rqp);
73 -static int smb_rq_getenv(struct smb_connobj *layer,
74 - struct smb_vc **vcpp, struct smb_share **sspp);
75 78 static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
76 79 static int smb_t2_reply(struct smb_t2rq *t2p);
77 80 static int smb_nt_reply(struct smb_ntrq *ntp);
78 81
79 82
80 83 /*
81 84 * Done with a request object. Free its contents.
82 85 * If it was allocated (SMBR_ALLOCED) free it too.
83 86 * Some of these are stack locals, not allocated.
84 87 *
85 88 * No locks here - this is the last ref.
86 89 */
87 90 void
88 91 smb_rq_done(struct smb_rq *rqp)
89 92 {
90 93
91 94 /*
92 95 * No smb_vc_rele() here - see smb_rq_init()
93 96 */
94 97 mb_done(&rqp->sr_rq);
95 98 md_done(&rqp->sr_rp);
96 99 mutex_destroy(&rqp->sr_lock);
97 100 cv_destroy(&rqp->sr_cond);
98 101 if (rqp->sr_flags & SMBR_ALLOCED)
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
99 102 kmem_free(rqp, sizeof (*rqp));
100 103 }
101 104
102 105 int
103 106 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
104 107 struct smb_rq **rqpp)
105 108 {
106 109 struct smb_rq *rqp;
107 110 int error;
108 111
112 + // XXX kmem cache?
109 113 rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
110 114 if (rqp == NULL)
111 115 return (ENOMEM);
112 116 error = smb_rq_init(rqp, layer, cmd, scred);
113 117 if (error) {
114 118 smb_rq_done(rqp);
115 119 return (error);
116 120 }
117 121 rqp->sr_flags |= SMBR_ALLOCED;
118 122 *rqpp = rqp;
119 123 return (0);
120 124 }
121 125
122 126 int
123 127 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
124 128 struct smb_cred *scred)
125 129 {
126 130 int error;
127 131
128 132 bzero(rqp, sizeof (*rqp));
129 133 mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL);
130 134 cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
131 135
132 136 error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
133 137 if (error)
134 138 return (error);
135 139
136 140 /*
137 141 * We copied a VC pointer (vcp) into rqp->sr_vc,
138 142 * but we do NOT do a smb_vc_hold here. Instead,
139 143 * the caller is responsible for the hold on the
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
140 144 * share or the VC as needed. For smbfs callers,
141 145 * the hold is on the share, via the smbfs mount.
142 146 * For nsmb ioctl callers, the hold is done when
143 147 * the driver handle gets VC or share references.
144 148 * This design avoids frequent hold/rele activity
145 149 * when creating and completing requests.
146 150 */
147 151
148 152 rqp->sr_rexmit = SMBMAXRESTARTS;
149 153 rqp->sr_cred = scred; /* Note: ref hold done by caller. */
150 - rqp->sr_pid = (uint16_t)ddi_get_pid();
151 154 error = smb_rq_new(rqp, cmd);
152 155
153 156 return (error);
154 157 }
155 158
156 159 static int
157 160 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
158 161 {
159 162 struct mbchain *mbp = &rqp->sr_rq;
160 163 struct smb_vc *vcp = rqp->sr_vc;
161 164 int error;
162 165
163 166 ASSERT(rqp != NULL);
164 167
165 168 rqp->sr_sendcnt = 0;
166 - rqp->sr_cmd = cmd;
167 169
168 170 mb_done(mbp);
169 171 md_done(&rqp->sr_rp);
170 172 error = mb_init(mbp);
171 173 if (error)
172 174 return (error);
173 175
174 - /*
175 - * Is this the right place to save the flags?
176 - */
177 - rqp->sr_rqflags = vcp->vc_hflags;
178 - rqp->sr_rqflags2 = vcp->vc_hflags2;
176 + if (vcp->vc_flags & SMBV_SMB2) {
177 + /*
178 + * SMB2 request initialization
179 + */
180 + rqp->sr2_command = cmd;
181 + rqp->sr2_creditcharge = 1;
182 + rqp->sr2_creditsrequested = 1;
183 + rqp->sr_pid = 0xFEFF; /* Made up, just like Windows */
184 + rqp->sr2_rqflags = 0;
185 + if ((vcp->vc_flags & SMBV_SIGNING) != 0 &&
186 + vcp->vc_mackey != NULL) {
187 + rqp->sr2_rqflags |= SMB2_FLAGS_SIGNED;
188 + }
179 189
180 - /*
181 - * The SMB header is filled in later by
182 - * smb_rq_fillhdr (see below)
183 - * Just reserve space here.
184 - */
185 - mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
190 + /*
191 + * The SMB2 header is filled in later by
192 + * smb2_rq_fillhdr (see smb2_rq.c)
193 + * Just reserve space here.
194 + */
195 + mb_put_mem(mbp, NULL, SMB2_HDRLEN, MB_MZERO);
196 + } else {
197 + /*
198 + * SMB1 request initialization
199 + */
200 + rqp->sr_cmd = cmd;
201 + rqp->sr_pid = (uint32_t)ddi_get_pid();
202 + rqp->sr_rqflags = vcp->vc_hflags;
203 + rqp->sr_rqflags2 = vcp->vc_hflags2;
186 204
205 + /*
206 + * The SMB header is filled in later by
207 + * smb_rq_fillhdr (see below)
208 + * Just reserve space here.
209 + */
210 + mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
211 + }
212 +
187 213 return (0);
188 214 }
189 215
190 216 /*
191 217 * Given a request with it's body already composed,
192 218 * rewind to the start and fill in the SMB header.
193 - * This is called after the request is enqueued,
219 + * This is called when the request is enqueued,
194 220 * so we have the final MID, seq num. etc.
195 221 */
196 222 void
197 223 smb_rq_fillhdr(struct smb_rq *rqp)
198 224 {
199 225 struct mbchain mbtmp, *mbp = &mbtmp;
200 226 mblk_t *m;
201 227
202 228 /*
203 229 * Fill in the SMB header using a dup of the first mblk,
204 230 * which points at the same data but has its own wptr,
205 231 * so we can rewind without trashing the message.
206 232 */
207 233 m = dupb(rqp->sr_rq.mb_top);
208 234 m->b_wptr = m->b_rptr; /* rewind */
209 235 mb_initm(mbp, m);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
210 236
211 237 mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
212 238 mb_put_uint8(mbp, rqp->sr_cmd);
213 239 mb_put_uint32le(mbp, 0); /* status */
214 240 mb_put_uint8(mbp, rqp->sr_rqflags);
215 241 mb_put_uint16le(mbp, rqp->sr_rqflags2);
216 242 mb_put_uint16le(mbp, 0); /* pid-high */
217 243 mb_put_mem(mbp, NULL, 8, MB_MZERO); /* MAC sig. (later) */
218 244 mb_put_uint16le(mbp, 0); /* reserved */
219 245 mb_put_uint16le(mbp, rqp->sr_rqtid);
220 - mb_put_uint16le(mbp, rqp->sr_pid);
246 + mb_put_uint16le(mbp, (uint16_t)rqp->sr_pid);
221 247 mb_put_uint16le(mbp, rqp->sr_rquid);
222 248 mb_put_uint16le(mbp, rqp->sr_mid);
223 249
224 250 /* This will free the mblk from dupb. */
225 251 mb_done(mbp);
226 252 }
227 253
228 254 int
229 255 smb_rq_simple(struct smb_rq *rqp)
230 256 {
231 257 return (smb_rq_simple_timed(rqp, smb_timo_default));
232 258 }
233 259
234 260 /*
235 261 * Simple request-reply exchange
236 262 */
237 263 int
238 264 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
239 265 {
240 266 int error = EINVAL;
241 267
242 268 for (; ; ) {
243 269 /*
244 270 * Don't send any new requests if force unmount is underway.
245 271 * This check was moved into smb_rq_enqueue.
246 272 */
247 273 rqp->sr_flags &= ~SMBR_RESTART;
248 274 rqp->sr_timo = timeout; /* in seconds */
249 275 rqp->sr_state = SMBRQ_NOTSENT;
250 276 error = smb_rq_enqueue(rqp);
251 277 if (error) {
252 278 break;
253 279 }
254 280 error = smb_rq_reply(rqp);
255 281 if (!error)
256 282 break;
257 283 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
258 284 SMBR_RESTART)
259 285 break;
260 286 if (rqp->sr_rexmit <= 0)
261 287 break;
262 288 SMBRQ_LOCK(rqp);
263 289 if (rqp->sr_share) {
264 290 (void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
265 291 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
266 292
267 293 } else {
268 294 delay(SEC_TO_TICK(SMB_RCNDELAY));
269 295 }
270 296 SMBRQ_UNLOCK(rqp);
271 297 rqp->sr_rexmit--;
272 298 }
273 299 return (error);
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
274 300 }
275 301
276 302
277 303 static int
278 304 smb_rq_enqueue(struct smb_rq *rqp)
279 305 {
280 306 struct smb_vc *vcp = rqp->sr_vc;
281 307 struct smb_share *ssp = rqp->sr_share;
282 308 int error = 0;
283 309
310 + ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
311 +
284 312 /*
285 313 * Normal requests may initiate a reconnect,
286 314 * and/or wait for state changes to finish.
287 315 * Some requests set the NORECONNECT flag
288 316 * to avoid all that (i.e. tree discon)
289 317 */
290 318 if (rqp->sr_flags & SMBR_NORECONNECT) {
291 319 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
292 320 SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
293 321 return (ENOTCONN);
294 322 }
295 323 if (ssp != NULL &&
296 324 ((ssp->ss_flags & SMBS_CONNECTED) == 0))
297 325 return (ENOTCONN);
298 326 goto ok_out;
299 327 }
300 328
301 329 /*
302 330 * If we're not connected, initiate a reconnect
303 331 * and/or wait for an existing one to finish.
304 332 */
305 333 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
306 334 error = smb_iod_reconnect(vcp);
307 335 if (error != 0)
308 336 return (error);
309 337 }
310 338
311 339 /*
312 340 * If this request has a "share" object
313 341 * that needs a tree connect, do it now.
314 342 */
315 343 if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
316 344 error = smb_share_tcon(ssp, rqp->sr_cred);
317 345 if (error)
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
318 346 return (error);
319 347 }
320 348
321 349 /*
322 350 * We now know what UID + TID to use.
323 351 * Store them in the request.
324 352 */
325 353 ok_out:
326 354 rqp->sr_rquid = vcp->vc_smbuid;
327 355 rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
328 - error = smb_iod_addrq(rqp);
356 + error = smb1_iod_addrq(rqp);
329 357
330 358 return (error);
331 359 }
332 360
333 361 /*
362 + * Used by the IOD thread during connection setup,
363 + * and for smb_echo after network timeouts. Note that
364 + * unlike smb_rq_simple, callers must check sr_error.
365 + */
366 +int
367 +smb_rq_internal(struct smb_rq *rqp, int timeout)
368 +{
369 + struct smb_vc *vcp = rqp->sr_vc;
370 + int error;
371 +
372 + ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
373 +
374 + rqp->sr_flags &= ~SMBR_RESTART;
375 + rqp->sr_timo = timeout; /* in seconds */
376 + rqp->sr_state = SMBRQ_NOTSENT;
377 +
378 + /*
379 + * In-line smb_rq_enqueue(rqp) here, as we don't want it
380 + * trying to reconnect etc. for an internal request.
381 + */
382 + rqp->sr_rquid = vcp->vc_smbuid;
383 + rqp->sr_rqtid = SMB_TID_UNKNOWN;
384 + rqp->sr_flags |= SMBR_INTERNAL;
385 + error = smb1_iod_addrq(rqp);
386 + if (error != 0)
387 + return (error);
388 +
389 + /*
390 + * In-line a variant of smb_rq_reply(rqp) here as we may
391 + * need to do custom parsing for SMB1-to-SMB2 negotiate.
392 + */
393 + if (rqp->sr_timo == SMBNOREPLYWAIT) {
394 + smb_iod_removerq(rqp);
395 + return (0);
396 + }
397 +
398 + error = smb_iod_waitrq_int(rqp);
399 + if (error)
400 + return (error);
401 +
402 + /*
403 + * If the request was signed, validate the
404 + * signature on the response.
405 + */
406 + if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
407 + error = smb_rq_verify(rqp);
408 + if (error)
409 + return (error);
410 + }
411 +
412 + /*
413 + * Parse the SMB header.
414 + */
415 + error = smb_rq_parsehdr(rqp);
416 +
417 + /*
418 + * Skip the error translation smb_rq_reply does.
419 + * Callers of this expect "raw" NT status.
420 + */
421 +
422 + return (error);
423 +}
424 +
425 +/*
334 426 * Mark location of the word count, which is filled in later by
335 427 * smb_rw_wend(). Also initialize the counter that it uses
336 428 * to figure out what value to fill in.
337 429 *
338 430 * Note that the word count happens to be 8-bit.
339 431 */
340 432 void
341 433 smb_rq_wstart(struct smb_rq *rqp)
342 434 {
343 435 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
344 436 rqp->sr_rq.mb_count = 0;
345 437 }
346 438
347 439 void
348 440 smb_rq_wend(struct smb_rq *rqp)
349 441 {
350 442 uint_t wcnt;
351 443
352 444 if (rqp->sr_wcount == NULL) {
353 445 SMBSDEBUG("no wcount\n");
354 446 return;
355 447 }
356 448 wcnt = rqp->sr_rq.mb_count;
357 449 if (wcnt > 0x1ff)
358 450 SMBSDEBUG("word count too large (%d)\n", wcnt);
359 451 if (wcnt & 1)
360 452 SMBSDEBUG("odd word count\n");
361 453 /* Fill in the word count (8-bits) */
362 454 *rqp->sr_wcount = (wcnt >> 1);
363 455 }
364 456
365 457 /*
366 458 * Mark location of the byte count, which is filled in later by
367 459 * smb_rw_bend(). Also initialize the counter that it uses
368 460 * to figure out what value to fill in.
369 461 *
370 462 * Note that the byte count happens to be 16-bit.
371 463 */
372 464 void
373 465 smb_rq_bstart(struct smb_rq *rqp)
374 466 {
375 467 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
376 468 rqp->sr_rq.mb_count = 0;
377 469 }
378 470
379 471 void
380 472 smb_rq_bend(struct smb_rq *rqp)
381 473 {
382 474 uint_t bcnt;
383 475
384 476 if (rqp->sr_bcount == NULL) {
385 477 SMBSDEBUG("no bcount\n");
386 478 return;
387 479 }
388 480 bcnt = rqp->sr_rq.mb_count;
389 481 if (bcnt > 0xffff)
390 482 SMBSDEBUG("byte count too large (%d)\n", bcnt);
|
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
391 483 /*
392 484 * Fill in the byte count (16-bits)
393 485 * The pointer is char * type due to
394 486 * typical off-by-one alignment.
395 487 */
396 488 rqp->sr_bcount[0] = bcnt & 0xFF;
397 489 rqp->sr_bcount[1] = (bcnt >> 8);
398 490 }
399 491
400 492 int
401 -smb_rq_intr(struct smb_rq *rqp)
402 -{
403 - if (rqp->sr_flags & SMBR_INTR)
404 - return (EINTR);
405 -
406 - return (0);
407 -}
408 -
409 -static int
410 493 smb_rq_getenv(struct smb_connobj *co,
411 494 struct smb_vc **vcpp, struct smb_share **sspp)
412 495 {
413 496 struct smb_vc *vcp = NULL;
414 497 struct smb_share *ssp = NULL;
415 498 int error = EINVAL;
416 499
417 500 if (co->co_flags & SMBO_GONE) {
418 501 SMBSDEBUG("zombie CO\n");
419 502 error = EINVAL;
420 503 goto out;
421 504 }
422 505
423 506 switch (co->co_level) {
424 507 case SMBL_SHARE:
425 508 ssp = CPTOSS(co);
426 509 if ((co->co_flags & SMBO_GONE) ||
427 510 co->co_parent == NULL) {
428 511 SMBSDEBUG("zombie share %s\n", ssp->ss_name);
429 512 break;
430 513 }
431 514 /* instead of recursion... */
432 515 co = co->co_parent;
433 516 /* FALLTHROUGH */
434 517 case SMBL_VC:
435 518 vcp = CPTOVC(co);
436 519 if ((co->co_flags & SMBO_GONE) ||
437 520 co->co_parent == NULL) {
438 521 SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
439 522 break;
440 523 }
441 524 error = 0;
442 525 break;
443 526
444 527 default:
445 528 SMBSDEBUG("invalid level %d passed\n", co->co_level);
446 529 }
447 530
448 531 out:
449 532 if (!error) {
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
450 533 if (vcpp)
451 534 *vcpp = vcp;
452 535 if (sspp)
453 536 *sspp = ssp;
454 537 }
455 538
456 539 return (error);
457 540 }
458 541
459 542 /*
460 - * Wait for reply on the request
543 + * Wait for a reply to this request, then parse it.
461 544 */
462 545 static int
463 546 smb_rq_reply(struct smb_rq *rqp)
464 547 {
465 - struct mdchain *mdp = &rqp->sr_rp;
466 - u_int8_t tb;
467 - int error, rperror = 0;
548 + int error;
468 549
469 550 if (rqp->sr_timo == SMBNOREPLYWAIT) {
470 551 smb_iod_removerq(rqp);
471 552 return (0);
472 553 }
473 554
474 555 error = smb_iod_waitrq(rqp);
475 556 if (error)
476 557 return (error);
477 558
478 559 /*
479 560 * If the request was signed, validate the
480 561 * signature on the response.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
481 562 */
482 563 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
483 564 error = smb_rq_verify(rqp);
484 565 if (error)
485 566 return (error);
486 567 }
487 568
488 569 /*
489 570 * Parse the SMB header
490 571 */
491 - error = md_get_uint32le(mdp, NULL);
492 - if (error)
572 + error = smb_rq_parsehdr(rqp);
573 + if (error != 0)
493 574 return (error);
494 - error = md_get_uint8(mdp, &tb);
495 - error = md_get_uint32le(mdp, &rqp->sr_error);
496 - error = md_get_uint8(mdp, &rqp->sr_rpflags);
497 - error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
498 - if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
575 +
576 + if (rqp->sr_error != 0) {
577 + if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
578 + error = smb_maperr32(rqp->sr_error);
579 + } else {
580 + uint8_t errClass = rqp->sr_error & 0xff;
581 + uint16_t errCode = rqp->sr_error >> 16;
582 + /* Convert to NT status */
583 + rqp->sr_error = smb_doserr2status(errClass, errCode);
584 + error = smb_maperror(errClass, errCode);
585 + }
586 + }
587 +
588 + if (error != 0) {
499 589 /*
500 590 * Do a special check for STATUS_BUFFER_OVERFLOW;
501 591 * it's not an error.
502 592 */
503 593 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
504 594 /*
505 595 * Don't report it as an error to our caller;
506 596 * they can look at rqp->sr_error if they
507 597 * need to know whether we got a
508 598 * STATUS_BUFFER_OVERFLOW.
509 - * XXX - should we do that for all errors
510 - * where (error & 0xC0000000) is 0x80000000,
511 - * i.e. all warnings?
512 599 */
513 - rperror = 0;
514 - } else
515 - rperror = smb_maperr32(rqp->sr_error);
600 + rqp->sr_flags |= SMBR_MOREDATA;
601 + error = 0;
602 + }
516 603 } else {
517 - rqp->sr_errclass = rqp->sr_error & 0xff;
518 - rqp->sr_serror = rqp->sr_error >> 16;
519 - rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
520 - }
521 - if (rperror == EMOREDATA) {
522 - rperror = E2BIG;
523 - rqp->sr_flags |= SMBR_MOREDATA;
524 - } else
525 604 rqp->sr_flags &= ~SMBR_MOREDATA;
605 + }
526 606
527 - error = md_get_uint32le(mdp, NULL);
528 - error = md_get_uint32le(mdp, NULL);
529 - error = md_get_uint32le(mdp, NULL);
607 + return (error);
608 +}
530 609
531 - error = md_get_uint16le(mdp, &rqp->sr_rptid);
532 - error = md_get_uint16le(mdp, &rqp->sr_rppid);
533 - error = md_get_uint16le(mdp, &rqp->sr_rpuid);
610 +/*
611 + * Parse the SMB header
612 + */
613 +static int
614 +smb_rq_parsehdr(struct smb_rq *rqp)
615 +{
616 + struct mdchain mdp_save;
617 + struct mdchain *mdp = &rqp->sr_rp;
618 + u_int8_t tb, sig[4];
619 + int error;
620 +
621 + /*
622 + * Parse the signature. The reader already checked that
623 + * the signature is valid. Here we just have to check
624 + * for SMB1-to-SMB2 negotiate. Caller handles an EPROTO
625 + * as a signal that we got an SMB2 reply. If we return
626 + * EPROTO, rewind the mdchain back where it was.
627 + */
628 + mdp_save = *mdp;
629 + error = md_get_mem(mdp, sig, 4, MB_MSYSTEM);
630 + if (error)
631 + return (error);
632 + if (sig[0] != SMB_HDR_V1) {
633 + if (rqp->sr_cmd == SMB_COM_NEGOTIATE) {
634 + *mdp = mdp_save;
635 + return (EPROTO);
636 + }
637 + return (EBADRPC);
638 + }
639 +
640 + /* Check cmd */
641 + error = md_get_uint8(mdp, &tb);
642 + if (tb != rqp->sr_cmd)
643 + return (EBADRPC);
644 +
645 + md_get_uint32le(mdp, &rqp->sr_error);
646 + md_get_uint8(mdp, &rqp->sr_rpflags);
647 + md_get_uint16le(mdp, &rqp->sr_rpflags2);
648 +
649 + /* Skip: pid-high(2), MAC sig(8), reserved(2) */
650 + md_get_mem(mdp, NULL, 12, MB_MSYSTEM);
651 +
652 + md_get_uint16le(mdp, &rqp->sr_rptid);
653 + md_get_uint16le(mdp, &rqp->sr_rppid);
654 + md_get_uint16le(mdp, &rqp->sr_rpuid);
534 655 error = md_get_uint16le(mdp, &rqp->sr_rpmid);
535 656
536 - return ((error) ? error : rperror);
657 + return (error);
537 658 }
538 659
539 660
540 661 #define ALIGN4(a) (((a) + 3) & ~3)
541 662
542 663 /*
543 664 * TRANS2 request implementation
544 665 * TRANS implementation is in the "t2" routines
545 666 * NT_TRANSACTION implementation is the separate "nt" stuff
546 667 */
547 668 int
548 669 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
549 670 struct smb_t2rq **t2pp)
550 671 {
551 672 struct smb_t2rq *t2p;
552 673 int error;
553 674
554 675 t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
555 676 if (t2p == NULL)
556 677 return (ENOMEM);
557 678 error = smb_t2_init(t2p, layer, &setup, 1, scred);
558 679 t2p->t2_flags |= SMBT2_ALLOCED;
559 680 if (error) {
560 681 smb_t2_done(t2p);
561 682 return (error);
562 683 }
563 684 *t2pp = t2p;
564 685 return (0);
565 686 }
566 687
567 688 int
568 689 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
569 690 struct smb_ntrq **ntpp)
570 691 {
571 692 struct smb_ntrq *ntp;
572 693 int error;
573 694
574 695 ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
575 696 if (ntp == NULL)
576 697 return (ENOMEM);
577 698 error = smb_nt_init(ntp, layer, fn, scred);
578 699 mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
579 700 cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
580 701 ntp->nt_flags |= SMBT2_ALLOCED;
581 702 if (error) {
582 703 smb_nt_done(ntp);
583 704 return (error);
584 705 }
585 706 *ntpp = ntp;
586 707 return (0);
587 708 }
588 709
589 710 int
590 711 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
591 712 int setupcnt, struct smb_cred *scred)
592 713 {
593 714 int i;
594 715 int error;
595 716
596 717 bzero(t2p, sizeof (*t2p));
597 718 mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
598 719 cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
599 720
600 721 t2p->t2_source = source;
601 722 t2p->t2_setupcount = (u_int16_t)setupcnt;
602 723 t2p->t2_setupdata = t2p->t2_setup;
603 724 for (i = 0; i < setupcnt; i++)
604 725 t2p->t2_setup[i] = setup[i];
605 726 t2p->t2_fid = 0xffff;
606 727 t2p->t2_cred = scred;
607 728 t2p->t2_share = (source->co_level == SMBL_SHARE ?
608 729 CPTOSS(source) : NULL); /* for smb up/down */
609 730 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
610 731 if (error)
611 732 return (error);
612 733 return (0);
613 734 }
614 735
615 736 int
616 737 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
617 738 struct smb_cred *scred)
618 739 {
619 740 int error;
620 741
621 742 bzero(ntp, sizeof (*ntp));
622 743 ntp->nt_source = source;
623 744 ntp->nt_function = fn;
624 745 ntp->nt_cred = scred;
625 746 ntp->nt_share = (source->co_level == SMBL_SHARE ?
626 747 CPTOSS(source) : NULL); /* for smb up/down */
627 748 error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
628 749 if (error)
629 750 return (error);
630 751 return (0);
631 752 }
632 753
633 754 void
634 755 smb_t2_done(struct smb_t2rq *t2p)
635 756 {
636 757 mb_done(&t2p->t2_tparam);
637 758 mb_done(&t2p->t2_tdata);
638 759 md_done(&t2p->t2_rparam);
639 760 md_done(&t2p->t2_rdata);
640 761 mutex_destroy(&t2p->t2_lock);
641 762 cv_destroy(&t2p->t2_cond);
642 763 if (t2p->t2_flags & SMBT2_ALLOCED)
643 764 kmem_free(t2p, sizeof (*t2p));
644 765 }
645 766
646 767 void
647 768 smb_nt_done(struct smb_ntrq *ntp)
648 769 {
649 770 mb_done(&ntp->nt_tsetup);
650 771 mb_done(&ntp->nt_tparam);
651 772 mb_done(&ntp->nt_tdata);
652 773 md_done(&ntp->nt_rparam);
653 774 md_done(&ntp->nt_rdata);
654 775 cv_destroy(&ntp->nt_cond);
655 776 mutex_destroy(&ntp->nt_lock);
656 777 if (ntp->nt_flags & SMBT2_ALLOCED)
657 778 kmem_free(ntp, sizeof (*ntp));
658 779 }
659 780
660 781 /*
661 782 * Extract data [offset,count] from mtop and add to mdp.
662 783 */
663 784 static int
664 785 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
665 786 struct mdchain *mdp)
666 787 {
667 788 mblk_t *n;
668 789
669 790 n = m_copym(mtop, offset, count, M_WAITOK);
670 791 if (n == NULL)
671 792 return (EBADRPC);
672 793
673 794 if (mdp->md_top == NULL) {
674 795 md_initm(mdp, n);
675 796 } else
676 797 m_cat(mdp->md_top, n);
677 798
678 799 return (0);
679 800 }
680 801
681 802 static int
682 803 smb_t2_reply(struct smb_t2rq *t2p)
683 804 {
684 805 struct mdchain *mdp;
685 806 struct smb_rq *rqp = t2p->t2_rq;
686 807 int error, error2, totpgot, totdgot;
687 808 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
688 809 u_int16_t tmp, bc, dcount;
689 810 u_int8_t wc;
690 811
691 812 t2p->t2_flags &= ~SMBT2_MOREDATA;
692 813
693 814 error = smb_rq_reply(rqp);
694 815 if (rqp->sr_flags & SMBR_MOREDATA)
695 816 t2p->t2_flags |= SMBT2_MOREDATA;
696 817 t2p->t2_sr_errclass = rqp->sr_errclass;
697 818 t2p->t2_sr_serror = rqp->sr_serror;
698 819 t2p->t2_sr_error = rqp->sr_error;
699 820 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
700 821 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
701 822 return (error);
702 823 /*
703 824 * Now we have to get all subseqent responses, if any.
704 825 * The CIFS specification says that they can be misordered,
705 826 * which is weird.
706 827 * TODO: timo
707 828 */
708 829 totpgot = totdgot = 0;
709 830 totpcount = totdcount = 0xffff;
710 831 mdp = &rqp->sr_rp;
711 832 for (;;) {
712 833 DTRACE_PROBE2(smb_trans_reply,
713 834 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
714 835 m_dumpm(mdp->md_top);
715 836
716 837 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
717 838 break;
718 839 if (wc < 10) {
719 840 error2 = ENOENT;
720 841 break;
721 842 }
722 843 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
723 844 break;
724 845 if (totpcount > tmp)
725 846 totpcount = tmp;
726 847 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
727 848 break;
728 849 if (totdcount > tmp)
729 850 totdcount = tmp;
730 851 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
731 852 (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
732 853 (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
733 854 (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
734 855 break;
735 856 if (pcount != 0 && pdisp != totpgot) {
736 857 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
737 858 pdisp, totpgot);
738 859 error2 = EINVAL;
739 860 break;
740 861 }
741 862 if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
742 863 (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
743 864 (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
744 865 break;
745 866 if (dcount != 0 && ddisp != totdgot) {
746 867 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
747 868 dcount);
748 869 error2 = EINVAL;
749 870 break;
750 871 }
751 872
752 873 /* XXX: Skip setup words? We don't save them? */
753 874 md_get_uint8(mdp, &wc); /* SetupCount */
754 875 md_get_uint8(mdp, NULL); /* Reserved2 */
755 876 tmp = wc;
756 877 while (tmp--)
757 878 md_get_uint16le(mdp, NULL);
758 879
759 880 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
760 881 break;
761 882
762 883 /*
763 884 * There are pad bytes here, and the poff value
764 885 * indicates where the next data are found.
765 886 * No need to guess at the padding size.
766 887 */
767 888 if (pcount) {
768 889 error2 = smb_t2_placedata(mdp->md_top, poff,
769 890 pcount, &t2p->t2_rparam);
770 891 if (error2)
771 892 break;
772 893 }
773 894 totpgot += pcount;
774 895
775 896 if (dcount) {
776 897 error2 = smb_t2_placedata(mdp->md_top, doff,
777 898 dcount, &t2p->t2_rdata);
778 899 if (error2)
779 900 break;
780 901 }
781 902 totdgot += dcount;
782 903
783 904 if (totpgot >= totpcount && totdgot >= totdcount) {
784 905 error2 = 0;
785 906 t2p->t2_flags |= SMBT2_ALLRECV;
786 907 break;
787 908 }
788 909 /*
789 910 * We're done with this reply, look for the next one.
790 911 */
791 912 SMBRQ_LOCK(rqp);
792 913 md_next_record(&rqp->sr_rp);
793 914 SMBRQ_UNLOCK(rqp);
794 915 error2 = smb_rq_reply(rqp);
795 916 if (rqp->sr_flags & SMBR_MOREDATA)
796 917 t2p->t2_flags |= SMBT2_MOREDATA;
797 918 if (!error2)
798 919 continue;
799 920 t2p->t2_sr_errclass = rqp->sr_errclass;
800 921 t2p->t2_sr_serror = rqp->sr_serror;
801 922 t2p->t2_sr_error = rqp->sr_error;
802 923 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
803 924 error = error2;
804 925 if (!(rqp->sr_flags & SMBR_MOREDATA))
805 926 break;
806 927 }
807 928 return (error ? error : error2);
808 929 }
809 930
810 931 static int
811 932 smb_nt_reply(struct smb_ntrq *ntp)
812 933 {
813 934 struct mdchain *mdp;
814 935 struct smb_rq *rqp = ntp->nt_rq;
815 936 int error, error2;
816 937 u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
817 938 u_int32_t tmp, dcount, totpgot, totdgot;
818 939 u_int16_t bc;
819 940 u_int8_t wc;
820 941
821 942 ntp->nt_flags &= ~SMBT2_MOREDATA;
822 943
823 944 error = smb_rq_reply(rqp);
824 945 if (rqp->sr_flags & SMBR_MOREDATA)
825 946 ntp->nt_flags |= SMBT2_MOREDATA;
826 947 ntp->nt_sr_error = rqp->sr_error;
827 948 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
828 949 if (error && !(rqp->sr_flags & SMBR_MOREDATA))
829 950 return (error);
830 951 /*
831 952 * Now we have to get all subseqent responses. The CIFS specification
832 953 * says that they can be misordered which is weird.
833 954 * TODO: timo
834 955 */
835 956 totpgot = totdgot = 0;
836 957 totpcount = totdcount = 0xffffffff;
837 958 mdp = &rqp->sr_rp;
838 959 for (;;) {
839 960 DTRACE_PROBE2(smb_trans_reply,
840 961 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
841 962 m_dumpm(mdp->md_top);
842 963
843 964 if ((error2 = md_get_uint8(mdp, &wc)) != 0)
844 965 break;
845 966 if (wc < 18) {
846 967 error2 = ENOENT;
847 968 break;
848 969 }
849 970 md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
850 971 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
851 972 break;
852 973 if (totpcount > tmp)
853 974 totpcount = tmp;
854 975 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
855 976 break;
856 977 if (totdcount > tmp)
857 978 totdcount = tmp;
858 979 if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
859 980 (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
860 981 (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
861 982 break;
862 983 if (pcount != 0 && pdisp != totpgot) {
863 984 SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
864 985 pdisp, totpgot);
865 986 error2 = EINVAL;
866 987 break;
867 988 }
868 989 if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
869 990 (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
870 991 (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
871 992 break;
872 993 if (dcount != 0 && ddisp != totdgot) {
873 994 SMBSDEBUG("Can't handle misordered data: dcount %d\n",
874 995 dcount);
875 996 error2 = EINVAL;
876 997 break;
877 998 }
878 999
879 1000 /* XXX: Skip setup words? We don't save them? */
880 1001 md_get_uint8(mdp, &wc); /* SetupCount */
881 1002 tmp = wc;
882 1003 while (tmp--)
883 1004 md_get_uint16le(mdp, NULL);
884 1005
885 1006 if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
886 1007 break;
887 1008
888 1009 /*
889 1010 * There are pad bytes here, and the poff value
890 1011 * indicates where the next data are found.
891 1012 * No need to guess at the padding size.
892 1013 */
893 1014 if (pcount) {
894 1015 error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
895 1016 &ntp->nt_rparam);
896 1017 if (error2)
897 1018 break;
898 1019 }
899 1020 totpgot += pcount;
900 1021
901 1022 if (dcount) {
902 1023 error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
903 1024 &ntp->nt_rdata);
904 1025 if (error2)
905 1026 break;
906 1027 }
907 1028 totdgot += dcount;
908 1029
909 1030 if (totpgot >= totpcount && totdgot >= totdcount) {
910 1031 error2 = 0;
911 1032 ntp->nt_flags |= SMBT2_ALLRECV;
912 1033 break;
913 1034 }
914 1035 /*
915 1036 * We're done with this reply, look for the next one.
916 1037 */
917 1038 SMBRQ_LOCK(rqp);
918 1039 md_next_record(&rqp->sr_rp);
919 1040 SMBRQ_UNLOCK(rqp);
920 1041 error2 = smb_rq_reply(rqp);
921 1042 if (rqp->sr_flags & SMBR_MOREDATA)
922 1043 ntp->nt_flags |= SMBT2_MOREDATA;
923 1044 if (!error2)
924 1045 continue;
925 1046 ntp->nt_sr_error = rqp->sr_error;
926 1047 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
927 1048 error = error2;
928 1049 if (!(rqp->sr_flags & SMBR_MOREDATA))
929 1050 break;
930 1051 }
931 1052 return (error ? error : error2);
932 1053 }
933 1054
934 1055 /*
935 1056 * Perform a full round of TRANS2 request
936 1057 */
937 1058 static int
938 1059 smb_t2_request_int(struct smb_t2rq *t2p)
939 1060 {
940 1061 struct smb_vc *vcp = t2p->t2_vc;
941 1062 struct smb_cred *scred = t2p->t2_cred;
942 1063 struct mbchain *mbp;
943 1064 struct mdchain *mdp, mbparam, mbdata;
944 1065 mblk_t *m;
945 1066 struct smb_rq *rqp;
946 1067 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
947 1068 int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
948 1069
949 1070 m = t2p->t2_tparam.mb_top;
950 1071 if (m) {
951 1072 md_initm(&mbparam, m); /* do not free it! */
952 1073 totpcount = m_fixhdr(m);
953 1074 if (totpcount > 0xffff) /* maxvalue for ushort_t */
954 1075 return (EINVAL);
955 1076 } else
956 1077 totpcount = 0;
957 1078 m = t2p->t2_tdata.mb_top;
958 1079 if (m) {
959 1080 md_initm(&mbdata, m); /* do not free it! */
960 1081 totdcount = m_fixhdr(m);
961 1082 if (totdcount > 0xffff)
962 1083 return (EINVAL);
963 1084 } else
964 1085 totdcount = 0;
965 1086 leftdcount = totdcount;
966 1087 leftpcount = totpcount;
967 1088 txmax = vcp->vc_txmax;
968 1089 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
969 1090 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
970 1091 if (error)
971 1092 return (error);
972 1093 rqp->sr_timo = smb_timo_default;
973 1094 rqp->sr_flags |= SMBR_MULTIPACKET;
974 1095 t2p->t2_rq = rqp;
975 1096 mbp = &rqp->sr_rq;
976 1097 smb_rq_wstart(rqp);
977 1098 mb_put_uint16le(mbp, totpcount);
978 1099 mb_put_uint16le(mbp, totdcount);
979 1100 mb_put_uint16le(mbp, t2p->t2_maxpcount);
980 1101 mb_put_uint16le(mbp, t2p->t2_maxdcount);
981 1102 mb_put_uint8(mbp, t2p->t2_maxscount);
982 1103 mb_put_uint8(mbp, 0); /* reserved */
983 1104 mb_put_uint16le(mbp, 0); /* flags */
984 1105 mb_put_uint32le(mbp, 0); /* Timeout */
985 1106 mb_put_uint16le(mbp, 0); /* reserved 2 */
986 1107 len = mb_fixhdr(mbp);
987 1108
988 1109 /*
989 1110 * Now we know the size of the trans overhead stuff:
990 1111 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
991 1112 * where nmsize is the OTW size of the name, including
992 1113 * the unicode null terminator and any alignment.
993 1114 * Use this to decide which parts (and how much)
994 1115 * can go into this request: params, data
995 1116 */
996 1117 nmlen = t2p->t_name ? t2p->t_name_len : 0;
997 1118 nmsize = nmlen + 1; /* null term. */
998 1119 if (SMB_UNICODE_STRINGS(vcp)) {
999 1120 nmsize *= 2;
1000 1121 /* we know put_dmem will need to align */
1001 1122 nmsize += 1;
1002 1123 }
1003 1124 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1004 1125 if (len + leftpcount > txmax) {
1005 1126 txpcount = min(leftpcount, txmax - len);
1006 1127 poff = len;
1007 1128 txdcount = 0;
1008 1129 doff = 0;
1009 1130 } else {
1010 1131 txpcount = leftpcount;
1011 1132 poff = txpcount ? len : 0;
1012 1133 /*
1013 1134 * Other client traffic seems to "ALIGN2" here. The extra
1014 1135 * 2 byte pad we use has no observed downside and may be
1015 1136 * required for some old servers(?)
1016 1137 */
1017 1138 len = ALIGN4(len + txpcount);
1018 1139 txdcount = min(leftdcount, txmax - len);
1019 1140 doff = txdcount ? len : 0;
1020 1141 }
1021 1142 leftpcount -= txpcount;
1022 1143 leftdcount -= txdcount;
1023 1144 mb_put_uint16le(mbp, txpcount);
1024 1145 mb_put_uint16le(mbp, poff);
1025 1146 mb_put_uint16le(mbp, txdcount);
1026 1147 mb_put_uint16le(mbp, doff);
1027 1148 mb_put_uint8(mbp, t2p->t2_setupcount);
1028 1149 mb_put_uint8(mbp, 0);
1029 1150 for (i = 0; i < t2p->t2_setupcount; i++) {
1030 1151 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1031 1152 }
1032 1153 smb_rq_wend(rqp);
1033 1154 smb_rq_bstart(rqp);
1034 1155 if (t2p->t_name) {
1035 1156 /* Put the string and terminating null. */
1036 1157 error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1037 1158 SMB_CS_NONE, NULL);
1038 1159 } else {
1039 1160 /* nmsize accounts for padding, char size. */
1040 1161 error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1041 1162 }
1042 1163 if (error)
1043 1164 goto freerq;
1044 1165 len = mb_fixhdr(mbp);
1045 1166 if (txpcount) {
1046 1167 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1047 1168 error = md_get_mbuf(&mbparam, txpcount, &m);
1048 1169 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1049 1170 if (error)
1050 1171 goto freerq;
1051 1172 mb_put_mbuf(mbp, m);
1052 1173 }
1053 1174 len = mb_fixhdr(mbp);
1054 1175 if (txdcount) {
1055 1176 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1056 1177 error = md_get_mbuf(&mbdata, txdcount, &m);
1057 1178 if (error)
1058 1179 goto freerq;
1059 1180 mb_put_mbuf(mbp, m);
1060 1181 }
1061 1182 smb_rq_bend(rqp); /* incredible, but thats it... */
1062 1183 error = smb_rq_enqueue(rqp);
1063 1184 if (error)
1064 1185 goto freerq;
1065 1186 if (leftpcount || leftdcount) {
1066 1187 error = smb_rq_reply(rqp);
1067 1188 if (error)
1068 1189 goto bad;
1069 1190 /*
1070 1191 * this is an interim response, ignore it.
1071 1192 */
1072 1193 SMBRQ_LOCK(rqp);
1073 1194 md_next_record(&rqp->sr_rp);
1074 1195 SMBRQ_UNLOCK(rqp);
1075 1196 }
1076 1197 while (leftpcount || leftdcount) {
1077 1198 error = smb_rq_new(rqp, t2p->t_name ?
1078 1199 SMB_COM_TRANSACTION_SECONDARY :
1079 1200 SMB_COM_TRANSACTION2_SECONDARY);
1080 1201 if (error)
1081 1202 goto bad;
1082 1203 mbp = &rqp->sr_rq;
1083 1204 smb_rq_wstart(rqp);
1084 1205 mb_put_uint16le(mbp, totpcount);
1085 1206 mb_put_uint16le(mbp, totdcount);
1086 1207 len = mb_fixhdr(mbp);
1087 1208 /*
1088 1209 * now we have known packet size as
1089 1210 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1090 1211 * and need to decide which parts should go into request
1091 1212 */
1092 1213 len = ALIGN4(len + 6 * 2 + 2);
1093 1214 if (t2p->t_name == NULL)
1094 1215 len += 2;
1095 1216 if (len + leftpcount > txmax) {
1096 1217 txpcount = min(leftpcount, txmax - len);
1097 1218 poff = len;
1098 1219 txdcount = 0;
1099 1220 doff = 0;
1100 1221 } else {
1101 1222 txpcount = leftpcount;
1102 1223 poff = txpcount ? len : 0;
1103 1224 len = ALIGN4(len + txpcount);
1104 1225 txdcount = min(leftdcount, txmax - len);
1105 1226 doff = txdcount ? len : 0;
1106 1227 }
1107 1228 mb_put_uint16le(mbp, txpcount);
1108 1229 mb_put_uint16le(mbp, poff);
1109 1230 mb_put_uint16le(mbp, totpcount - leftpcount);
1110 1231 mb_put_uint16le(mbp, txdcount);
1111 1232 mb_put_uint16le(mbp, doff);
1112 1233 mb_put_uint16le(mbp, totdcount - leftdcount);
1113 1234 leftpcount -= txpcount;
1114 1235 leftdcount -= txdcount;
1115 1236 if (t2p->t_name == NULL)
1116 1237 mb_put_uint16le(mbp, t2p->t2_fid);
1117 1238 smb_rq_wend(rqp);
1118 1239 smb_rq_bstart(rqp);
1119 1240 mb_put_uint8(mbp, 0); /* name */
1120 1241 len = mb_fixhdr(mbp);
1121 1242 if (txpcount) {
1122 1243 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1123 1244 error = md_get_mbuf(&mbparam, txpcount, &m);
1124 1245 if (error)
1125 1246 goto bad;
1126 1247 mb_put_mbuf(mbp, m);
|
↓ open down ↓ |
580 lines elided |
↑ open up ↑ |
1127 1248 }
1128 1249 len = mb_fixhdr(mbp);
1129 1250 if (txdcount) {
1130 1251 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1131 1252 error = md_get_mbuf(&mbdata, txdcount, &m);
1132 1253 if (error)
1133 1254 goto bad;
1134 1255 mb_put_mbuf(mbp, m);
1135 1256 }
1136 1257 smb_rq_bend(rqp);
1137 - error = smb_iod_multirq(rqp);
1258 + error = smb1_iod_multirq(rqp);
1138 1259 if (error)
1139 1260 goto bad;
1140 1261 } /* while left params or data */
1141 1262 error = smb_t2_reply(t2p);
1142 1263 if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1143 1264 goto bad;
1144 1265 mdp = &t2p->t2_rdata;
1145 1266 if (mdp->md_top) {
1146 1267 md_initm(mdp, mdp->md_top);
1147 1268 }
1148 1269 mdp = &t2p->t2_rparam;
1149 1270 if (mdp->md_top) {
1150 1271 md_initm(mdp, mdp->md_top);
1151 1272 }
1152 1273 bad:
1153 1274 smb_iod_removerq(rqp);
1154 1275 freerq:
1155 1276 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1156 1277 if (rqp->sr_flags & SMBR_RESTART)
1157 1278 t2p->t2_flags |= SMBT2_RESTART;
1158 1279 md_done(&t2p->t2_rparam);
1159 1280 md_done(&t2p->t2_rdata);
1160 1281 }
1161 1282 smb_rq_done(rqp);
1162 1283 return (error);
1163 1284 }
1164 1285
1165 1286
1166 1287 /*
1167 1288 * Perform a full round of NT_TRANSACTION request
1168 1289 */
1169 1290 static int
1170 1291 smb_nt_request_int(struct smb_ntrq *ntp)
1171 1292 {
1172 1293 struct smb_vc *vcp = ntp->nt_vc;
1173 1294 struct smb_cred *scred = ntp->nt_cred;
1174 1295 struct mbchain *mbp;
1175 1296 struct mdchain *mdp, mbsetup, mbparam, mbdata;
1176 1297 mblk_t *m;
1177 1298 struct smb_rq *rqp;
1178 1299 int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1179 1300 int error, doff, poff, txdcount, txpcount;
1180 1301 int totscount;
1181 1302
1182 1303 m = ntp->nt_tsetup.mb_top;
1183 1304 if (m) {
1184 1305 md_initm(&mbsetup, m); /* do not free it! */
1185 1306 totscount = m_fixhdr(m);
1186 1307 if (totscount > 2 * 0xff)
1187 1308 return (EINVAL);
1188 1309 } else
1189 1310 totscount = 0;
1190 1311 m = ntp->nt_tparam.mb_top;
1191 1312 if (m) {
1192 1313 md_initm(&mbparam, m); /* do not free it! */
1193 1314 totpcount = m_fixhdr(m);
1194 1315 if (totpcount > 0x7fffffff)
1195 1316 return (EINVAL);
1196 1317 } else
1197 1318 totpcount = 0;
1198 1319 m = ntp->nt_tdata.mb_top;
1199 1320 if (m) {
1200 1321 md_initm(&mbdata, m); /* do not free it! */
1201 1322 totdcount = m_fixhdr(m);
1202 1323 if (totdcount > 0x7fffffff)
1203 1324 return (EINVAL);
1204 1325 } else
1205 1326 totdcount = 0;
1206 1327 leftdcount = totdcount;
1207 1328 leftpcount = totpcount;
1208 1329 txmax = vcp->vc_txmax;
1209 1330 error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1210 1331 if (error)
1211 1332 return (error);
1212 1333 rqp->sr_timo = smb_timo_default;
1213 1334 rqp->sr_flags |= SMBR_MULTIPACKET;
1214 1335 ntp->nt_rq = rqp;
1215 1336 mbp = &rqp->sr_rq;
1216 1337 smb_rq_wstart(rqp);
1217 1338 mb_put_uint8(mbp, ntp->nt_maxscount);
1218 1339 mb_put_uint16le(mbp, 0); /* reserved (flags?) */
1219 1340 mb_put_uint32le(mbp, totpcount);
1220 1341 mb_put_uint32le(mbp, totdcount);
1221 1342 mb_put_uint32le(mbp, ntp->nt_maxpcount);
1222 1343 mb_put_uint32le(mbp, ntp->nt_maxdcount);
1223 1344 len = mb_fixhdr(mbp);
1224 1345 /*
1225 1346 * now we have known packet size as
1226 1347 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1227 1348 * and need to decide which parts should go into the first request
1228 1349 */
1229 1350 len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1230 1351 if (len + leftpcount > txmax) {
1231 1352 txpcount = min(leftpcount, txmax - len);
1232 1353 poff = len;
1233 1354 txdcount = 0;
1234 1355 doff = 0;
1235 1356 } else {
1236 1357 txpcount = leftpcount;
1237 1358 poff = txpcount ? len : 0;
1238 1359 len = ALIGN4(len + txpcount);
1239 1360 txdcount = min(leftdcount, txmax - len);
1240 1361 doff = txdcount ? len : 0;
1241 1362 }
1242 1363 leftpcount -= txpcount;
1243 1364 leftdcount -= txdcount;
1244 1365 mb_put_uint32le(mbp, txpcount);
1245 1366 mb_put_uint32le(mbp, poff);
1246 1367 mb_put_uint32le(mbp, txdcount);
1247 1368 mb_put_uint32le(mbp, doff);
1248 1369 mb_put_uint8(mbp, (totscount+1)/2);
1249 1370 mb_put_uint16le(mbp, ntp->nt_function);
1250 1371 if (totscount) {
1251 1372 error = md_get_mbuf(&mbsetup, totscount, &m);
1252 1373 SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1253 1374 if (error)
1254 1375 goto freerq;
1255 1376 mb_put_mbuf(mbp, m);
1256 1377 if (totscount & 1)
1257 1378 mb_put_uint8(mbp, 0); /* setup is in words */
1258 1379 }
1259 1380 smb_rq_wend(rqp);
1260 1381 smb_rq_bstart(rqp);
1261 1382 len = mb_fixhdr(mbp);
1262 1383 if (txpcount) {
1263 1384 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1264 1385 error = md_get_mbuf(&mbparam, txpcount, &m);
1265 1386 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1266 1387 if (error)
1267 1388 goto freerq;
1268 1389 mb_put_mbuf(mbp, m);
1269 1390 }
1270 1391 len = mb_fixhdr(mbp);
1271 1392 if (txdcount) {
1272 1393 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1273 1394 error = md_get_mbuf(&mbdata, txdcount, &m);
1274 1395 if (error)
1275 1396 goto freerq;
1276 1397 mb_put_mbuf(mbp, m);
1277 1398 }
1278 1399 smb_rq_bend(rqp); /* incredible, but thats it... */
1279 1400 error = smb_rq_enqueue(rqp);
1280 1401 if (error)
1281 1402 goto freerq;
1282 1403 if (leftpcount || leftdcount) {
1283 1404 error = smb_rq_reply(rqp);
1284 1405 if (error)
1285 1406 goto bad;
1286 1407 /*
1287 1408 * this is an interim response, ignore it.
1288 1409 */
1289 1410 SMBRQ_LOCK(rqp);
1290 1411 md_next_record(&rqp->sr_rp);
1291 1412 SMBRQ_UNLOCK(rqp);
1292 1413 }
1293 1414 while (leftpcount || leftdcount) {
1294 1415 error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1295 1416 if (error)
1296 1417 goto bad;
1297 1418 mbp = &rqp->sr_rq;
1298 1419 smb_rq_wstart(rqp);
1299 1420 mb_put_mem(mbp, NULL, 3, MB_MZERO);
1300 1421 mb_put_uint32le(mbp, totpcount);
1301 1422 mb_put_uint32le(mbp, totdcount);
1302 1423 len = mb_fixhdr(mbp);
1303 1424 /*
1304 1425 * now we have known packet size as
1305 1426 * ALIGN4(len + 6 * 4 + 2)
1306 1427 * and need to decide which parts should go into request
1307 1428 */
1308 1429 len = ALIGN4(len + 6 * 4 + 2);
1309 1430 if (len + leftpcount > txmax) {
1310 1431 txpcount = min(leftpcount, txmax - len);
1311 1432 poff = len;
1312 1433 txdcount = 0;
1313 1434 doff = 0;
1314 1435 } else {
1315 1436 txpcount = leftpcount;
1316 1437 poff = txpcount ? len : 0;
1317 1438 len = ALIGN4(len + txpcount);
1318 1439 txdcount = min(leftdcount, txmax - len);
1319 1440 doff = txdcount ? len : 0;
1320 1441 }
1321 1442 mb_put_uint32le(mbp, txpcount);
1322 1443 mb_put_uint32le(mbp, poff);
1323 1444 mb_put_uint32le(mbp, totpcount - leftpcount);
1324 1445 mb_put_uint32le(mbp, txdcount);
1325 1446 mb_put_uint32le(mbp, doff);
1326 1447 mb_put_uint32le(mbp, totdcount - leftdcount);
1327 1448 leftpcount -= txpcount;
1328 1449 leftdcount -= txdcount;
1329 1450 smb_rq_wend(rqp);
1330 1451 smb_rq_bstart(rqp);
1331 1452 len = mb_fixhdr(mbp);
1332 1453 if (txpcount) {
1333 1454 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1334 1455 error = md_get_mbuf(&mbparam, txpcount, &m);
1335 1456 if (error)
1336 1457 goto bad;
1337 1458 mb_put_mbuf(mbp, m);
|
↓ open down ↓ |
190 lines elided |
↑ open up ↑ |
1338 1459 }
1339 1460 len = mb_fixhdr(mbp);
1340 1461 if (txdcount) {
1341 1462 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1342 1463 error = md_get_mbuf(&mbdata, txdcount, &m);
1343 1464 if (error)
1344 1465 goto bad;
1345 1466 mb_put_mbuf(mbp, m);
1346 1467 }
1347 1468 smb_rq_bend(rqp);
1348 - error = smb_iod_multirq(rqp);
1469 + error = smb1_iod_multirq(rqp);
1349 1470 if (error)
1350 1471 goto bad;
1351 1472 } /* while left params or data */
1352 1473 error = smb_nt_reply(ntp);
1353 1474 if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1354 1475 goto bad;
1355 1476 mdp = &ntp->nt_rdata;
1356 1477 if (mdp->md_top) {
1357 1478 md_initm(mdp, mdp->md_top);
1358 1479 }
1359 1480 mdp = &ntp->nt_rparam;
1360 1481 if (mdp->md_top) {
1361 1482 md_initm(mdp, mdp->md_top);
1362 1483 }
1363 1484 bad:
1364 1485 smb_iod_removerq(rqp);
1365 1486 freerq:
1366 1487 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1367 1488 if (rqp->sr_flags & SMBR_RESTART)
1368 1489 ntp->nt_flags |= SMBT2_RESTART;
1369 1490 md_done(&ntp->nt_rparam);
1370 1491 md_done(&ntp->nt_rdata);
1371 1492 }
1372 1493 smb_rq_done(rqp);
1373 1494 return (error);
1374 1495 }
1375 1496
1376 1497 int
1377 1498 smb_t2_request(struct smb_t2rq *t2p)
1378 1499 {
1379 1500 int error = EINVAL, i;
1380 1501
1381 1502 for (i = 0; ; ) {
1382 1503 /*
1383 1504 * Don't send any new requests if force unmount is underway.
1384 1505 * This check was moved into smb_rq_enqueue, called by
1385 1506 * smb_t2_request_int()
1386 1507 */
1387 1508 t2p->t2_flags &= ~SMBT2_RESTART;
1388 1509 error = smb_t2_request_int(t2p);
1389 1510 if (!error)
1390 1511 break;
1391 1512 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1392 1513 SMBT2_RESTART)
1393 1514 break;
1394 1515 if (++i > SMBMAXRESTARTS)
1395 1516 break;
1396 1517 mutex_enter(&(t2p)->t2_lock);
1397 1518 if (t2p->t2_share) {
1398 1519 (void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1399 1520 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1400 1521 } else {
1401 1522 delay(SEC_TO_TICK(SMB_RCNDELAY));
1402 1523 }
1403 1524 mutex_exit(&(t2p)->t2_lock);
1404 1525 }
1405 1526 return (error);
1406 1527 }
1407 1528
1408 1529
1409 1530 int
1410 1531 smb_nt_request(struct smb_ntrq *ntp)
1411 1532 {
1412 1533 int error = EINVAL, i;
1413 1534
1414 1535 for (i = 0; ; ) {
1415 1536 /*
1416 1537 * Don't send any new requests if force unmount is underway.
1417 1538 * This check was moved into smb_rq_enqueue, called by
1418 1539 * smb_nt_request_int()
1419 1540 */
1420 1541 ntp->nt_flags &= ~SMBT2_RESTART;
1421 1542 error = smb_nt_request_int(ntp);
1422 1543 if (!error)
1423 1544 break;
1424 1545 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1425 1546 SMBT2_RESTART)
1426 1547 break;
1427 1548 if (++i > SMBMAXRESTARTS)
1428 1549 break;
1429 1550 mutex_enter(&(ntp)->nt_lock);
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
1430 1551 if (ntp->nt_share) {
1431 1552 (void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1432 1553 SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1433 1554
1434 1555 } else {
1435 1556 delay(SEC_TO_TICK(SMB_RCNDELAY));
1436 1557 }
1437 1558 mutex_exit(&(ntp)->nt_lock);
1438 1559 }
1439 1560 return (error);
1561 +}
1562 +
1563 +/*
1564 + * Run an SMB transact named pipe.
1565 + * Note: send_mb is consumed.
1566 + */
1567 +int
1568 +smb_t2_xnp(struct smb_share *ssp, uint16_t fid,
1569 + struct mbchain *send_mb, struct mdchain *recv_md,
1570 + uint32_t *data_out_sz, /* max / returned */
1571 + uint32_t *more, struct smb_cred *scrp)
1572 +{
1573 + struct smb_t2rq *t2p = NULL;
1574 + mblk_t *m;
1575 + uint16_t setup[2];
1576 + int err;
1577 +
1578 + setup[0] = TRANS_TRANSACT_NAMED_PIPE;
1579 + setup[1] = fid;
1580 +
1581 + t2p = kmem_alloc(sizeof (*t2p), KM_SLEEP);
1582 + err = smb_t2_init(t2p, SSTOCP(ssp), setup, 2, scrp);
1583 + if (err) {
1584 + *data_out_sz = 0;
1585 + goto out;
1586 + }
1587 +
1588 + t2p->t2_setupcount = 2;
1589 + t2p->t2_setupdata = setup;
1590 +
1591 + t2p->t_name = "\\PIPE\\";
1592 + t2p->t_name_len = 6;
1593 +
1594 + t2p->t2_maxscount = 0;
1595 + t2p->t2_maxpcount = 0;
1596 + t2p->t2_maxdcount = (uint16_t)*data_out_sz;
1597 +
1598 + /* Transmit parameters (none) */
1599 +
1600 + /*
1601 + * Transmit data
1602 + *
1603 + * Copy the mb, and clear the source so we
1604 + * don't end up with a double free.
1605 + */
1606 + t2p->t2_tdata = *send_mb;
1607 + bzero(send_mb, sizeof (*send_mb));
1608 +
1609 + /*
1610 + * Run the request
1611 + */
1612 + err = smb_t2_request(t2p);
1613 +
1614 + /* No returned parameters. */
1615 +
1616 + if (err == 0 && (m = t2p->t2_rdata.md_top) != NULL) {
1617 + /*
1618 + * Received data
1619 + *
1620 + * Copy the mdchain, and clear the source so we
1621 + * don't end up with a double free.
1622 + */
1623 + *data_out_sz = msgdsize(m);
1624 + md_initm(recv_md, m);
1625 + t2p->t2_rdata.md_top = NULL;
1626 + } else {
1627 + *data_out_sz = 0;
1628 + }
1629 +
1630 + if (t2p->t2_sr_error == NT_STATUS_BUFFER_OVERFLOW)
1631 + *more = 1;
1632 +
1633 +out:
1634 + if (t2p != NULL) {
1635 + /* Note: t2p->t_name no longer allocated */
1636 + smb_t2_done(t2p);
1637 + kmem_free(t2p, sizeof (*t2p));
1638 + }
1639 +
1640 + return (err);
1440 1641 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX