Print this page
NEX-19225 SMB client 2.1 hits redzone panic
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
NEX-14666 Need to provide SMB 2.1 Client
NEX-17187 panic in smbfs_acl_store
NEX-17231 smbfs create xattr files finds wrong file
NEX-17224 smbfs lookup EINVAL should be ENOENT
NEX-17260 SMB1 client fails to list directory after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
and: (cleanup)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/netsmb/subr_mchain.c
+++ new/usr/src/uts/common/fs/smbclnt/netsmb/subr_mchain.c
1 1 /*
2 2 * Copyright (c) 2000, 2001 Boris Popov
3 3 * All rights reserved.
4 4 *
5 5 * Redistribution and use in source and binary forms, with or without
6 6 * modification, are permitted provided that the following conditions
7 7 * are met:
8 8 * 1. Redistributions of source code must retain the above copyright
9 9 * notice, this list of conditions and the following disclaimer.
10 10 * 2. Redistributions in binary form must reproduce the above copyright
11 11 * notice, this list of conditions and the following disclaimer in the
12 12 * documentation and/or other materials provided with the distribution.
13 13 * 3. All advertising materials mentioning features or use of this software
14 14 * must display the following acknowledgement:
15 15 * This product includes software developed by Boris Popov.
16 16 * 4. Neither the name of the author nor the names of any co-contributors
17 17 * may be used to endorse or promote products derived from this software
18 18 * without specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
26 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 30 * SUCH DAMAGE.
31 31 *
32 32 * $FreeBSD: src/sys/kern/subr_mchain.c,v 1.1 2001/02/24 15:44:29 bp Exp $
33 33 */
34 34
35 35 /*
36 - * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
37 36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 37 * Use is subject to license terms.
38 + *
39 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
39 40 */
40 41
41 42 #include <sys/param.h>
42 43 #include <sys/systm.h>
43 44 #include <sys/errno.h>
44 45 #include <sys/uio.h>
45 46 #include <sys/types.h>
46 47 #include <sys/stream.h>
47 48 #include <sys/strsun.h>
48 49 #include <sys/strsubr.h>
49 50 #include <sys/sunddi.h>
50 51 #include <sys/cmn_err.h>
51 52
52 53 #include <netsmb/smb_osdep.h>
53 54 #include <netsmb/mchain.h>
54 55
55 56 #include <netsmb/smb.h>
56 57 #include <netsmb/smb_conn.h>
57 58 #include <netsmb/smb_subr.h>
58 59
59 60 /* BEGIN CSTYLED */
60 61 /*
61 62 * BSD-style mbufs, vs SysV-style mblks:
62 63 * One big difference: the mbuf payload is:
63 64 * m_data ... (m_data + m_len)
64 65 * In Unix STREAMS, the mblk payload is:
65 66 * b_rptr ... b_wptr
66 67 *
67 68 * Here are some handy conversion notes:
68 69 *
69 70 * struct mbuf struct mblk
70 71 * m->m_next m->b_cont
71 72 * m->m_nextpkt m->b_next
72 73 * m->m_data m->b_rptr
73 74 * m->m_len MBLKL(m)
74 75 * m->m_dat[] m->b_datap->db_base
75 76 * &m->m_dat[MLEN] m->b_datap->db_lim
76 77 * M_TRAILINGSPACE(m) MBLKTAIL(m)
77 78 * m_freem(m) freemsg(m)
78 79 *
79 80 * Note that mbufs chains also have a special "packet" header,
80 81 * which has the length of the whole message. In STREAMS one
81 82 * typically just calls msgdsize(m) to get that.
82 83 */
83 84 /* END CSTYLED */
84 85
85 86
86 87 /*
87 88 *
88 89 * MODULE_VERSION(libmchain, 1);
89 90 */
90 91
91 92 #ifdef __GNUC__
92 93 #define MBERROR(format, args...) printf("%s(%d): "format, \
93 94 __FUNCTION__, __LINE__, ## args)
94 95 #define MBPANIC(format, args...) printf("%s(%d): "format, \
95 96 __FUNCTION__, __LINE__, ## args)
96 97 #else
97 98 #define MBERROR(...) \
98 99 smb_errmsg(CE_NOTE, __func__, __VA_ARGS__)
99 100 #define MBPANIC(...) \
100 101 smb_errmsg(CE_PANIC, __func__, __VA_ARGS__)
101 102 #endif
102 103
103 104 /*
104 105 * MLEN: The smallest mblk we'll allocate.
105 106 *
|
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
106 107 * There's more to MLEN than you might think.
107 108 * Some ethernet drivers may send each mblk as a
108 109 * separate frame, so we want MLEN at least 1K.
109 110 * We could have used 1K here, but that might
110 111 * hurt transports that support larger frames.
111 112 * 4K fits nicely in 3 Ethernet frames (3 * 1500)
112 113 * leaving about 500 bytes for protocol headers.
113 114 */
114 115 #define MLEN 4096
115 116
117 +#if (MLEN < SMB2_HDRLEN)
118 +#error "MLEN can't fit a contiguous SMB2 header"
119 +#endif
116 120
117 121 /*
118 122 * Some UIO routines.
119 123 * Taken from Darwin Sourcecs.
120 124 */
121 125
122 126 /*
123 127 * uio_isuserspace - non zero value if the address space
124 128 * flag is for a user address space (could be 32 or 64 bit).
125 129 */
126 130 #define uio_isuserspace(uio) (uio->uio_segflg == UIO_USERSPACE)
127 131
128 132 /*
129 133 * uio_curriovbase - return the base address of the current iovec associated
130 134 * with the given uio_t. May return 0.
131 135 */
132 136 caddr_t
133 137 uio_curriovbase(uio_t *a_uio)
134 138 {
135 139 if (a_uio->uio_iovcnt < 1) {
136 140 return (0);
137 141 }
138 142 return ((caddr_t)((uintptr_t)a_uio->uio_iov->iov_base));
139 143 }
140 144
141 145 /*
142 146 * uio_curriovlen - return the length value of the current iovec associated
143 147 * with the given uio_t.
144 148 */
145 149 size_t
146 150 uio_curriovlen(uio_t *a_uio)
147 151 {
148 152 if (a_uio->uio_iovcnt < 1) {
149 153 return (0);
150 154 }
151 155 return ((size_t)a_uio->uio_iov->iov_len);
152 156 }
153 157
154 158
155 159 /*
156 160 * uio_update - update the given uio_t for a_count of completed IO.
157 161 * This call decrements the current iovec length and residual IO value
158 162 * and increments the current iovec base address and offset value.
159 163 * If the current iovec length is 0 then advance to the next
160 164 * iovec (if any).
161 165 * If the a_count passed in is 0, than only do the advancement
162 166 * over any 0 length iovec's.
163 167 */
164 168 void
165 169 uio_update(uio_t *a_uio, size_t a_count)
166 170 {
167 171 if (a_uio->uio_iovcnt < 1) {
168 172 return;
169 173 }
170 174
171 175 /*
172 176 * if a_count == 0, then we are asking to skip over
173 177 * any empty iovs
174 178 */
175 179 if (a_count) {
176 180 if (a_count > a_uio->uio_iov->iov_len) {
177 181 a_uio->uio_iov->iov_base += a_uio->uio_iov->iov_len;
178 182 a_uio->uio_iov->iov_len = 0;
179 183 } else {
180 184 a_uio->uio_iov->iov_base += a_count;
181 185 a_uio->uio_iov->iov_len -= a_count;
182 186 }
183 187 if (a_uio->uio_resid < 0) {
184 188 a_uio->uio_resid = 0;
185 189 }
186 190 if (a_count > (size_t)a_uio->uio_resid) {
187 191 a_uio->uio_loffset += a_uio->uio_resid;
188 192 a_uio->uio_resid = 0;
189 193 } else {
190 194 a_uio->uio_loffset += a_count;
191 195 a_uio->uio_resid -= a_count;
192 196 }
193 197 }
194 198 /*
195 199 * advance to next iovec if current one is totally consumed
196 200 */
197 201 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iov->iov_len == 0) {
198 202 a_uio->uio_iovcnt--;
199 203 if (a_uio->uio_iovcnt > 0) {
200 204 a_uio->uio_iov++;
201 205 }
202 206 }
203 207 }
204 208
205 209 /*
206 210 * This is now used only to extend an existing mblk chain,
207 211 * so don't need to use allocb_cred_wait here.
208 212 */
209 213 /*ARGSUSED*/
210 214 mblk_t *
211 215 m_getblk(int size, int type)
212 216 {
213 217 mblk_t *mblk;
214 218 int error;
215 219
216 220 /* Make size at least MLEN. */
217 221 if (size < MLEN)
218 222 size = MLEN;
219 223 mblk = allocb_wait(size, BPRI_LO, STR_NOSIG, &error);
220 224 ASSERT(mblk);
221 225 return (mblk);
222 226 }
223 227
224 228 void
225 229 mb_done(struct mbchain *mbp)
226 230 {
227 231 if (mbp->mb_top) {
228 232 freemsg(mbp->mb_top);
229 233 mbp->mb_top = NULL;
230 234 }
231 235 /* Avoid dangling references */
232 236 mbp->mb_cur = NULL;
233 237 }
234 238
235 239 unsigned int
236 240 m_length(mblk_t *mblk)
237 241 {
238 242 uint64_t diff;
239 243
240 244 diff = (uintptr_t)mblk->b_datap->db_lim -
241 245 (uintptr_t)mblk->b_datap->db_base;
242 246 ASSERT(diff == (uint64_t)((unsigned int)diff));
243 247 return ((unsigned int)diff);
244 248 }
245 249
246 250 void
247 251 mb_initm(struct mbchain *mbp, mblk_t *m)
248 252 {
249 253 bzero(mbp, sizeof (*mbp));
250 254 mbp->mb_top = mbp->mb_cur = m;
251 255 }
252 256
253 257
254 258 int
255 259 mb_init(struct mbchain *mbp)
256 260 {
257 261 cred_t *cr;
258 262 mblk_t *mblk;
259 263 int error;
260 264
261 265 /*
262 266 * This message will be the head of a new mblk chain,
263 267 * so we'd like its db_credp set. If we extend this
264 268 * chain later, we'll just use allocb_wait()
265 269 */
266 270 cr = ddi_get_cred();
267 271 mblk = allocb_cred_wait(MLEN, STR_NOSIG, &error, cr, NOPID);
268 272
269 273 /*
270 274 * Leave room in this first mblk so we can
271 275 * prepend a 4-byte NetBIOS header.
272 276 * See smb_nbst_send()
273 277 */
274 278 mblk->b_wptr += 4;
275 279 mblk->b_rptr = mblk->b_wptr;
276 280
277 281 mb_initm(mbp, mblk);
278 282 return (0);
279 283 }
280 284
281 285
282 286 /*
283 287 * mb_detach() function returns the value of mbp->mb_top field
284 288 * and sets its * value to NULL.
285 289 */
286 290
287 291 mblk_t *
288 292 mb_detach(struct mbchain *mbp)
289 293 {
290 294 mblk_t *m;
291 295
292 296 m = mbp->mb_top;
293 297 mbp->mb_top = mbp->mb_cur = NULL;
294 298 return (m);
295 299 }
296 300
297 301 /*
298 302 * Returns the length of the mblk_t data.
299 303 * Should be m_totlen() perhaps?
300 304 */
301 305 int
302 306 m_fixhdr(mblk_t *m0)
303 307 {
304 308 size_t dsz;
305 309
306 310 dsz = msgdsize(m0);
307 311 return ((int)dsz);
308 312 }
309 313
310 314 /*
311 315 * BSD code set the message header length here, and
312 316 * returned the length. We don't have that field, so
313 317 * just return the message length.
314 318 */
315 319 int
316 320 mb_fixhdr(struct mbchain *mbp)
317 321 {
318 322 return (m_fixhdr(mbp->mb_top));
319 323 }
320 324
321 325
322 326 /*
323 327 * Check if object of size 'size' fit to the current position and
324 328 * allocate new mbuf if not. Advance pointers and increase len. of mbuf(s).
325 329 * Return pointer to the object placeholder or NULL if any error occured.
326 330 * Note: size should be <= MLEN
327 331 */
328 332 void *
329 333 mb_reserve(struct mbchain *mbp, int size)
330 334 {
331 335 mblk_t *m, *mn;
332 336 void *bpos;
333 337
334 338 m = mbp->mb_cur;
335 339 /*
336 340 * If the requested size is more than the space left.
337 341 * Allocate and appenad a new mblk.
338 342 */
339 343 if (MBLKTAIL(m) < size) {
340 344 mn = m_getblk(size, 1);
341 345 if (mn == NULL)
342 346 return (NULL);
343 347 mbp->mb_cur = m->b_cont = mn;
344 348 m = mn;
345 349 }
346 350 /*
347 351 * If 'size' bytes fits into the buffer, then
348 352 * 1. increment the write pointer to the size.
349 353 * 2. return the position from where the memory is reserved.
350 354 */
351 355 bpos = m->b_wptr;
352 356 m->b_wptr += size;
353 357 mbp->mb_count += size;
354 358 return (bpos);
355 359 }
356 360
357 361 /*
358 362 * All mb_put_*() functions perform an actual copy of the data into mbuf
359 363 * chain. Functions which have le or be suffixes will perform conversion to
360 364 * the little- or big-endian data formats.
361 365 *
362 366 * Inline version of mb_put_mem(). Handles the easy case in-line,
363 367 * and calls mb_put_mem() if crossing mblk boundaries, etc.
364 368 *
365 369 * We build with -xspace, which causes these inline functions
366 370 * to not be inlined. Using macros instead for now.
367 371 */
368 372 #ifdef INLINE_WORKS
369 373
370 374 static inline int
371 375 mb_put_inline(struct mbchain *mbp, void *src, int size)
372 376 {
373 377 mblk_t *m = mbp->mb_cur;
374 378
375 379 if (m != NULL && size <= MBLKTAIL(m)) {
376 380 uchar_t *p = src;
377 381 int n = size;
378 382 while (n--)
379 383 *(m->b_wptr)++ = *p++;
380 384 mbp->mb_count += size;
381 385 return (0);
382 386 }
383 387 return (mb_put_mem(mbp, src, size, MB_MINLINE));
384 388 }
385 389 #define MB_PUT_INLINE(MBP, SRC, SZ) \
386 390 return (mb_put_inline(MBP, SRC, SZ))
387 391
388 392 #else /* INLINE_WORKS */
389 393
390 394 #define MB_PUT_INLINE(MBP, SRC, SZ) \
391 395 mblk_t *m = MBP->mb_cur; \
392 396 if (m != NULL && SZ <= MBLKTAIL(m)) { \
393 397 uchar_t *p = (void *) SRC; \
394 398 int n = SZ; \
395 399 while (n--) \
396 400 *(m->b_wptr)++ = *p++; \
397 401 MBP->mb_count += SZ; \
398 402 return (0); \
399 403 } \
400 404 return (mb_put_mem(MBP, SRC, SZ, MB_MINLINE))
401 405
402 406 #endif /* INLINE_WORKS */
403 407
404 408 /*
405 409 * Assumes total data length in previous mblks is EVEN.
406 410 * Might need to compute the offset from mb_top instead.
407 411 */
408 412 int
409 413 mb_put_padbyte(struct mbchain *mbp)
410 414 {
411 415 uintptr_t dst;
412 416 char v = 0;
|
↓ open down ↓ |
287 lines elided |
↑ open up ↑ |
413 417
414 418 dst = (uintptr_t)mbp->mb_cur->b_wptr;
415 419 /* only add padding if address is odd */
416 420 if (dst & 1) {
417 421 MB_PUT_INLINE(mbp, &v, sizeof (v));
418 422 }
419 423
420 424 return (0);
421 425 }
422 426
427 +/*
428 + * Adds padding to 8 byte boundary
429 + */
423 430 int
431 +mb_put_align8(struct mbchain *mbp)
432 +{
433 + static const char zeros[8] = { 0 };
434 + int pad_len = 0;
435 +
436 + if ((mbp->mb_count % 8) != 0) {
437 + pad_len = 8 - (mbp->mb_count % 8);
438 + MB_PUT_INLINE(mbp, zeros, pad_len);
439 + }
440 + return (0);
441 +}
442 +
443 +int
424 444 mb_put_uint8(struct mbchain *mbp, u_int8_t x)
425 445 {
426 446 u_int8_t v = x;
427 447 MB_PUT_INLINE(mbp, &v, sizeof (v));
428 448 }
429 449
430 450 int
431 451 mb_put_uint16be(struct mbchain *mbp, u_int16_t x)
432 452 {
433 453 u_int16_t v = htobes(x);
434 454 MB_PUT_INLINE(mbp, &v, sizeof (v));
435 455 }
436 456
437 457 int
438 458 mb_put_uint16le(struct mbchain *mbp, u_int16_t x)
439 459 {
440 460 u_int16_t v = htoles(x);
441 461 MB_PUT_INLINE(mbp, &v, sizeof (v));
442 462 }
443 463
444 464 int
445 465 mb_put_uint32be(struct mbchain *mbp, u_int32_t x)
446 466 {
447 467 u_int32_t v = htobel(x);
448 468 MB_PUT_INLINE(mbp, &v, sizeof (v));
449 469 }
450 470
451 471 int
452 472 mb_put_uint32le(struct mbchain *mbp, u_int32_t x)
453 473 {
454 474 u_int32_t v = htolel(x);
455 475 MB_PUT_INLINE(mbp, &v, sizeof (v));
456 476 }
457 477
458 478 int
459 479 mb_put_uint64be(struct mbchain *mbp, u_int64_t x)
460 480 {
461 481 u_int64_t v = htobeq(x);
462 482 MB_PUT_INLINE(mbp, &v, sizeof (v));
463 483 }
464 484
465 485 int
466 486 mb_put_uint64le(struct mbchain *mbp, u_int64_t x)
467 487 {
468 488 u_int64_t v = htoleq(x);
469 489 MB_PUT_INLINE(mbp, &v, sizeof (v));
470 490 }
471 491
472 492 /*
473 493 * mb_put_mem() function copies size bytes of data specified by the source
474 494 * argument to an mbuf chain. The type argument specifies the method used
475 495 * to perform a copy
476 496 */
477 497 int
478 498 mb_put_mem(struct mbchain *mbp, const void *vsrc, int size, int type)
479 499 {
480 500 mblk_t *n, *m = mbp->mb_cur;
481 501 c_caddr_t source = vsrc;
482 502 c_caddr_t src;
483 503 caddr_t dst;
484 504 uint64_t diff;
485 505 int cplen, mleft, count;
486 506
487 507 diff = MBLKTAIL(m);
488 508 ASSERT(diff == (uint64_t)((int)diff));
489 509 mleft = (int)diff;
490 510
491 511 while (size > 0) {
492 512 if (mleft == 0) {
493 513 if (m->b_cont == NULL) {
494 514 /*
495 515 * Changed m_getm() to m_getblk()
496 516 * with the requested size, so we
497 517 * don't need m_getm() anymore.
498 518 */
499 519 n = m_getblk(size, 1);
500 520 if (n == NULL)
501 521 return (ENOBUFS);
502 522 m->b_cont = n;
503 523 }
504 524 m = m->b_cont;
505 525 diff = MBLKTAIL(m);
506 526 ASSERT(diff == (uint64_t)((int)diff));
507 527 mleft = (int)diff;
508 528 continue;
509 529 }
510 530 cplen = mleft > size ? size : mleft;
511 531 dst = (caddr_t)m->b_wptr;
512 532 switch (type) {
513 533 case MB_MINLINE:
514 534 for (src = source, count = cplen; count; count--)
515 535 *dst++ = *src++;
516 536 break;
517 537 case MB_MSYSTEM:
518 538 bcopy(source, dst, cplen);
519 539 break;
520 540 case MB_MUSER:
521 541 if (copyin((void *)source, dst, cplen))
522 542 return (EFAULT);
523 543 break;
524 544 case MB_MZERO:
525 545 bzero(dst, cplen);
526 546 break;
527 547 }
528 548 size -= cplen;
529 549 source += cplen;
|
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
530 550 mleft -= cplen;
531 551 m->b_wptr += cplen;
532 552 mbp->mb_count += cplen;
533 553 }
534 554 mbp->mb_cur = m;
535 555 return (0);
536 556 }
537 557
538 558 /*
539 559 * Append an mblk to the chain.
560 + * Note: The mblk_t *m is consumed.
540 561 */
541 562 int
542 563 mb_put_mbuf(struct mbchain *mbp, mblk_t *m)
543 564 {
544 565 mblk_t *nm, *tail_mb;
545 566 size_t size;
546 567
547 568 /* See: linkb(9f) */
548 569 tail_mb = mbp->mb_cur;
549 570 while (tail_mb->b_cont != NULL)
550 571 tail_mb = tail_mb->b_cont;
551 572
552 573 /*
553 574 * Avoid small frags: Only link if the size of the
554 575 * new mbuf is larger than the space left in the last
555 576 * mblk of the chain (tail), otherwise just copy.
556 577 */
557 578 while (m != NULL) {
558 579 size = MBLKL(m);
559 580 if (size > MBLKTAIL(tail_mb)) {
560 581 /* Link */
561 582 tail_mb->b_cont = m;
562 583 mbp->mb_cur = m;
563 584 mbp->mb_count += msgdsize(m);
564 585 return (0);
565 586 }
566 587 /* Copy */
567 588 bcopy(m->b_rptr, tail_mb->b_wptr, size);
568 589 tail_mb->b_wptr += size;
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
569 590 mbp->mb_count += size;
570 591 nm = unlinkb(m);
571 592 freeb(m);
572 593 m = nm;
573 594 }
574 595
575 596 return (0);
576 597 }
577 598
578 599 /*
600 + * Put an mbchain into another mbchain
601 + * Leave sub_mbp untouched.
602 + */
603 +int
604 +mb_put_mbchain(struct mbchain *mbp, struct mbchain *sub_mbp)
605 +{
606 + mblk_t *m;
607 +
608 + if (sub_mbp == NULL)
609 + return (0);
610 +
611 + m = sub_mbp->mb_top;
612 + if (m == NULL)
613 + return (0);
614 +
615 + m = dupmsg(m);
616 + if (m == NULL)
617 + return (ENOSR);
618 +
619 + return (mb_put_mbuf(mbp, m));
620 +}
621 +
622 +/*
579 623 * copies a uio scatter/gather list to an mbuf chain.
580 624 */
581 625 int
582 626 mb_put_uio(struct mbchain *mbp, uio_t *uiop, size_t size)
583 627 {
584 628 size_t left;
585 629 int mtype, error;
586 630
587 631 mtype = (uio_isuserspace(uiop) ? MB_MUSER : MB_MSYSTEM);
588 632 while (size > 0 && uiop->uio_resid) {
589 633 if (uiop->uio_iovcnt <= 0 ||
590 634 uio_curriovbase(uiop) == USER_ADDR_NULL)
591 635 return (EFBIG);
592 636 left = uio_curriovlen(uiop);
593 637 if (left > size)
594 638 left = size;
595 639 error = mb_put_mem(mbp, CAST_DOWN(caddr_t,
596 640 uio_curriovbase(uiop)), left, mtype);
597 641 if (error)
598 642 return (error);
599 643 uio_update(uiop, left);
600 644 size -= left;
601 645 }
602 646 return (0);
603 647 }
604 648
605 649 /*
606 650 * Routines for fetching data from an mbuf chain
607 651 */
608 652
609 653 void
610 654 md_initm(struct mdchain *mdp, mblk_t *m)
611 655 {
612 656 bzero(mdp, sizeof (*mdp));
613 657 mdp->md_top = mdp->md_cur = m;
614 658 mdp->md_pos = m->b_rptr;
615 659 }
616 660
617 661 void
618 662 md_done(struct mdchain *mdp)
619 663 {
620 664 mblk_t *m;
621 665
622 666 /*
623 667 * Deal with the fact that we can error out of
624 668 * smb_t2_reply or smb_nt_reply without using up
625 669 * all the "records" added by md_append_record().
626 670 */
627 671 while ((m = mdp->md_top) != NULL) {
628 672 mdp->md_top = m->b_next;
629 673 m->b_next = NULL;
630 674 freemsg(m);
631 675 }
632 676 /* Avoid dangling references */
633 677 mdp->md_cur = NULL;
634 678 mdp->md_pos = NULL;
635 679 }
636 680
637 681 /*
638 682 * Append a new message (separate mbuf chain).
639 683 * It is caller responsibility to prevent
640 684 * multiple calls to fetch/record routines.
641 685 * Note unusual use of mblk->b_next here.
642 686 */
643 687 void
644 688 md_append_record(struct mdchain *mdp, mblk_t *top)
645 689 {
646 690 mblk_t *m;
647 691
648 692 top->b_next = NULL;
649 693 if (mdp->md_top == NULL) {
650 694 md_initm(mdp, top);
651 695 return;
652 696 }
653 697 m = mdp->md_top;
654 698 /* Get to last message (not b_cont chain) */
655 699 while (m->b_next)
656 700 m = m->b_next;
657 701 m->b_next = top;
658 702 }
659 703
660 704 /*
661 705 * Advance mdp->md_top to the next message.
662 706 * Note unusual use of mblk->b_next here.
663 707 */
664 708 void
665 709 md_next_record(struct mdchain *mdp)
666 710 {
667 711 mblk_t *m, *top;
668 712
669 713 if ((top = mdp->md_top) == NULL)
670 714 return;
671 715
672 716 /*
673 717 * Get the next message, if any,
674 718 * stored by md_append_record.
675 719 * Note: NOT b_cont chain
676 720 */
677 721 m = top->b_next;
678 722 top->b_next = NULL;
679 723
680 724 /* Done with old "top". */
681 725 md_done(mdp);
682 726 if (m == NULL)
683 727 return;
684 728
685 729 /* Setup new "top". */
686 730 md_initm(mdp, m);
687 731 }
688 732
689 733 /*
690 734 * Inline version of md_get_mem(). Handles the easy case in-line,
691 735 * and calls md_get_mem() if crossing mblk boundaries, etc.
692 736 */
693 737 #ifdef INLINE_WORKS /* see above */
694 738
695 739 static inline int
696 740 md_get_inline(struct mdchain *mdp, void *dst, int size)
697 741 {
698 742 mblk_t *m = mdp->md_cur;
699 743
700 744 if (m != NULL && mdp->md_pos + size <= m->b_wptr) {
701 745 uchar_t *p = dst;
702 746 int n = size;
703 747 while (n--)
704 748 *p++ = *(mdp->md_pos)++;
705 749 /* no md_count += size */
706 750 return (0);
707 751 }
708 752 return (md_get_mem(mdp, dst, size, MB_MINLINE));
709 753 }
710 754 #define MD_GET_INLINE(MDP, DST, SZ) \
711 755 error = md_get_inline(MDP, DST, SZ)
712 756
713 757 #else /* INLINE_WORKS */
714 758
715 759 /* Note, sets variable: error */
716 760 #define MD_GET_INLINE(MDP, DST, SZ) \
717 761 mblk_t *m = MDP->md_cur; \
718 762 if (m != NULL && MDP->md_pos + SZ <= m->b_wptr) { \
719 763 uchar_t *p = (void *) DST; \
720 764 int n = SZ; \
721 765 while (n--) \
722 766 *p++ = *(mdp->md_pos)++; \
723 767 /* no md_count += SZ */ \
724 768 error = 0; \
725 769 } else \
726 770 error = md_get_mem(MDP, DST, SZ, MB_MINLINE)
727 771
728 772 #endif /* INLINE_WORKS */
729 773
730 774
731 775 int
732 776 md_get_uint8(struct mdchain *mdp, u_int8_t *x)
733 777 {
734 778 uint8_t v;
735 779 int error;
736 780
737 781 MD_GET_INLINE(mdp, &v, sizeof (v));
738 782 if (x)
739 783 *x = v;
740 784 return (error);
741 785 }
742 786
743 787 int
744 788 md_get_uint16be(struct mdchain *mdp, u_int16_t *x) {
745 789 u_int16_t v;
746 790 int error;
747 791
748 792 MD_GET_INLINE(mdp, &v, sizeof (v));
749 793 if (x)
750 794 *x = betohs(v);
751 795 return (error);
752 796 }
753 797
754 798 int
755 799 md_get_uint16le(struct mdchain *mdp, u_int16_t *x)
756 800 {
757 801 u_int16_t v;
758 802 int error;
759 803
760 804 MD_GET_INLINE(mdp, &v, sizeof (v));
761 805 if (x)
762 806 *x = letohs(v);
763 807 return (error);
764 808 }
765 809
766 810 int
767 811 md_get_uint32be(struct mdchain *mdp, u_int32_t *x)
768 812 {
769 813 u_int32_t v;
770 814 int error;
771 815
772 816 MD_GET_INLINE(mdp, &v, sizeof (v));
773 817 if (x)
774 818 *x = betohl(v);
775 819 return (error);
776 820 }
777 821
778 822 int
779 823 md_get_uint32le(struct mdchain *mdp, u_int32_t *x)
780 824 {
781 825 u_int32_t v;
782 826 int error;
783 827
784 828 MD_GET_INLINE(mdp, &v, sizeof (v));
785 829 if (x)
786 830 *x = letohl(v);
787 831 return (error);
788 832 }
789 833
790 834 int
791 835 md_get_uint64be(struct mdchain *mdp, u_int64_t *x)
792 836 {
793 837 u_int64_t v;
794 838 int error;
795 839
796 840 MD_GET_INLINE(mdp, &v, sizeof (v));
797 841 if (x)
798 842 *x = betohq(v);
799 843 return (error);
800 844 }
801 845
802 846 int
803 847 md_get_uint64le(struct mdchain *mdp, u_int64_t *x)
804 848 {
805 849 u_int64_t v;
806 850 int error;
807 851
808 852 MD_GET_INLINE(mdp, &v, sizeof (v));
809 853 if (x)
810 854 *x = letohq(v);
811 855 return (error);
812 856 }
813 857
814 858 int
815 859 md_get_mem(struct mdchain *mdp, void *vdst, int size, int type)
816 860 {
817 861 mblk_t *m = mdp->md_cur;
818 862 caddr_t target = vdst;
819 863 unsigned char *s;
820 864 uint64_t diff;
821 865 int count;
822 866
823 867 while (size > 0) {
824 868 if (m == NULL) {
825 869 SMBSDEBUG("incomplete copy\n");
826 870 return (EBADRPC);
827 871 }
828 872
829 873 /*
830 874 * Offset in the current MBUF.
831 875 */
832 876 s = mdp->md_pos;
833 877 ASSERT((m->b_rptr <= s) && (s <= m->b_wptr));
834 878
835 879 /* Data remaining. */
836 880 diff = (uintptr_t)m->b_wptr - (uintptr_t)s;
837 881 ASSERT(diff == (uint64_t)((int)diff));
838 882 count = (int)diff;
839 883
840 884 /*
841 885 * Check if the no. of bytes remaining is less than
842 886 * the bytes requested.
843 887 */
844 888 if (count == 0) {
845 889 m = m->b_cont;
846 890 if (m) {
847 891 mdp->md_cur = m;
848 892 mdp->md_pos = s = m->b_rptr;
849 893 }
850 894 continue;
851 895 }
852 896 if (count > size)
853 897 count = size;
854 898 size -= count;
855 899 mdp->md_pos += count;
856 900 if (target == NULL)
857 901 continue;
858 902 switch (type) {
859 903 case MB_MUSER:
860 904 if (copyout(s, target, count))
861 905 return (EFAULT);
862 906 break;
863 907 case MB_MSYSTEM:
864 908 bcopy(s, target, count);
865 909 break;
866 910 case MB_MINLINE:
867 911 while (count--)
|
↓ open down ↓ |
279 lines elided |
↑ open up ↑ |
868 912 *target++ = *s++;
869 913 continue;
870 914 }
871 915 target += count;
872 916 }
873 917 return (0);
874 918 }
875 919
876 920 /*
877 921 * Get the next SIZE bytes as a separate mblk.
922 + * Advances position in mdp by SIZE.
878 923 */
879 924 int
880 925 md_get_mbuf(struct mdchain *mdp, int size, mblk_t **ret)
881 926 {
882 927 mblk_t *m, *rm;
883 928
884 929 unsigned char *s;
885 930 uint64_t diff;
886 931 int off;
887 932
888 933 /*
889 934 * Offset in the current MBUF.
890 935 */
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
891 936 m = mdp->md_cur;
892 937 s = mdp->md_pos;
893 938 ASSERT((m->b_rptr <= s) && (s <= m->b_wptr));
894 939 diff = (uintptr_t)s - (uintptr_t)m->b_rptr;
895 940 ASSERT(diff == (uint64_t)((int)diff));
896 941 off = (int)diff;
897 942
898 943 rm = m_copym(m, off, size, M_WAITOK);
899 944 if (rm == NULL)
900 945 return (EBADRPC);
946 + (void) md_get_mem(mdp, NULL, size, MB_MSYSTEM);
901 947
902 948 *ret = rm;
903 949 return (0);
904 950 }
905 951
906 952 int
907 953 md_get_uio(struct mdchain *mdp, uio_t *uiop, size_t size)
908 954 {
909 955 size_t left;
910 956 int mtype, error;
911 957
912 958 mtype = (uio_isuserspace(uiop) ? MB_MUSER : MB_MSYSTEM);
913 959 while (size > 0 && uiop->uio_resid) {
914 960 if (uiop->uio_iovcnt <= 0 ||
915 961 uio_curriovbase(uiop) == USER_ADDR_NULL)
916 962 return (EFBIG);
917 963 left = uio_curriovlen(uiop);
918 964 if (left > size)
919 965 left = size;
920 966 error = md_get_mem(mdp, CAST_DOWN(caddr_t,
921 967 uio_curriovbase(uiop)), left, mtype);
922 968 if (error)
923 969 return (error);
924 970 uio_update(uiop, left);
925 971 size -= left;
926 972 }
927 973 return (0);
928 974 }
929 975
930 976 /*
931 977 * Additions for Solaris
932 978 */
933 979
934 980 /*
935 981 * concatenate mblk chain n to m.
936 982 * go till end of data in m.
937 983 * then add the link of b_cont to n.
938 984 * See: linkb(9f)
939 985 */
940 986
941 987 void m_cat(
942 988 mblk_t *m,
943 989 mblk_t *n)
944 990 {
945 991 if (!n)
946 992 return;
947 993 while (m->b_cont) {
948 994 m = m->b_cont;
949 995 }
950 996 m->b_cont = n;
951 997 }
952 998
953 999 /*ARGSUSED*/
954 1000 mblk_t *
955 1001 m_copym(mblk_t *m, int off, int len, int wait)
956 1002 {
957 1003 mblk_t *n;
958 1004 size_t dsz;
959 1005 ssize_t adj;
960 1006
961 1007 dsz = msgdsize(m);
962 1008 if (len == M_COPYALL) {
963 1009 if (off > dsz)
964 1010 return (0);
965 1011 } else {
966 1012 if ((off + len) > dsz)
967 1013 return (0);
968 1014 }
969 1015
970 1016 if ((n = dupmsg(m)) == NULL)
971 1017 return (0);
972 1018
973 1019 /* trim from head */
974 1020 adj = off;
975 1021 if (!adjmsg(n, adj)) {
976 1022 freemsg(n);
977 1023 return (0);
978 1024 }
979 1025
980 1026 /* trim from tail */
981 1027 if (len != M_COPYALL) {
982 1028 dsz = msgdsize(n);
983 1029 ASSERT(len <= dsz);
984 1030 if (len < dsz) {
985 1031 adj = (ssize_t)len - (ssize_t)dsz;
986 1032 ASSERT(adj < 0);
987 1033 (void) adjmsg(n, adj);
988 1034 }
989 1035 }
990 1036
991 1037 return (n);
992 1038 }
993 1039
994 1040 /*
995 1041 * Get "rqlen" contiguous bytes into the first mblk of a chain.
996 1042 */
997 1043 mblk_t *
998 1044 m_pullup(
999 1045 mblk_t *m,
1000 1046 int rqlen)
1001 1047 {
1002 1048 ptrdiff_t diff;
1003 1049
1004 1050 diff = MBLKL(m);
1005 1051 ASSERT(diff == (ptrdiff_t)((int)diff));
1006 1052 if ((int)diff < rqlen) {
1007 1053 /* This should be rare. */
1008 1054 if (!pullupmsg(m, rqlen)) {
1009 1055 SMBSDEBUG("pullupmsg failed!\n");
1010 1056 freemsg(m);
1011 1057 return (NULL);
1012 1058 }
1013 1059 }
1014 1060 return (m);
1015 1061 }
1016 1062
1017 1063
1018 1064 /*
1019 1065 * m_split : split the mblk from the offset(len0) to the end.
1020 1066 * Partition an mbuf chain in two pieces, returning the tail --
1021 1067 * all but the first len0 bytes. In case of failure, it returns NULL and
1022 1068 * attempts to restore the chain to its original state.
1023 1069 * Similar to dupmsg() + adjmsg() on Solaris.
1024 1070 */
1025 1071 /*ARGSUSED*/
1026 1072 mblk_t *
1027 1073 m_split(
1028 1074 mblk_t *m0,
1029 1075 int len0,
1030 1076 int wait)
1031 1077 {
1032 1078 mblk_t *m, *n;
1033 1079 int mbl, len = len0;
1034 1080 ptrdiff_t diff;
1035 1081
1036 1082 #if 0 /* If life were simple, this would be: */
1037 1083 for (m = m0; m && len > MBLKL(m); m = m->b_cont)
1038 1084 len -= MBLKL(m);
1039 1085 #else /* but with LP64 and picky lint we have: */
1040 1086 for (m = m0; m; m = m->b_cont) {
1041 1087 diff = MBLKL(m);
1042 1088 ASSERT(diff == (ptrdiff_t)((int)diff));
1043 1089 mbl = (int)diff;
1044 1090 if (len <= mbl)
1045 1091 break;
1046 1092 len -= mbl;
1047 1093 }
1048 1094 #endif
1049 1095
1050 1096 if (m == 0)
1051 1097 return (0);
1052 1098
1053 1099 /* This is the one to split (dupb, adjust) */
1054 1100 if ((n = dupb(m)) == 0)
1055 1101 return (0);
1056 1102
1057 1103 ASSERT(len <= MBLKL(m));
1058 1104
1059 1105 m->b_wptr = m->b_rptr + len;
1060 1106 n->b_rptr += len;
1061 1107
1062 1108 /* Move any b_cont (tail) to the new head. */
1063 1109 n->b_cont = m->b_cont;
1064 1110 m->b_cont = NULL;
1065 1111
1066 1112 return (n);
1067 1113 }
|
↓ open down ↓ |
157 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX