1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
  25  */
  26 
  27 #include <sys/param.h>
  28 #include <sys/types.h>
  29 #include <sys/tzfile.h>
  30 #include <sys/atomic.h>
  31 #include <sys/time.h>
  32 #include <sys/spl.h>
  33 #include <sys/random.h>
  34 #include <smbsrv/smb_kproto.h>
  35 #include <smbsrv/smb_fsops.h>
  36 #include <smbsrv/smbinfo.h>
  37 #include <smbsrv/smb_xdr.h>
  38 #include <smbsrv/smb_vops.h>
  39 #include <smbsrv/smb_idmap.h>
  40 
  41 #include <sys/sid.h>
  42 #include <sys/priv_names.h>
  43 #include <sys/bitmap.h>
  44 
  45 static kmem_cache_t     *smb_dtor_cache = NULL;
  46 
  47 static boolean_t smb_avl_hold(smb_avl_t *);
  48 static void smb_avl_rele(smb_avl_t *);
  49 
  50 time_t tzh_leapcnt = 0;
  51 
  52 struct tm
  53 *smb_gmtime_r(time_t *clock, struct tm *result);
  54 
  55 time_t
  56 smb_timegm(struct tm *tm);
  57 
  58 struct  tm {
  59         int     tm_sec;
  60         int     tm_min;
  61         int     tm_hour;
  62         int     tm_mday;
  63         int     tm_mon;
  64         int     tm_year;
  65         int     tm_wday;
  66         int     tm_yday;
  67         int     tm_isdst;
  68 };
  69 
  70 static const int days_in_month[] = {
  71         31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  72 };
  73 
  74 /*
  75  * Given a UTF-8 string (our internal form everywhere)
  76  * return either the Unicode (UTF-16) length in bytes,
  77  * or the OEM length in bytes.  Which we return is
  78  * determined by whether the client supports Unicode.
  79  * This length does NOT include the null.
  80  */
  81 int
  82 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
  83 {
  84         if (sr->session->dialect >= SMB_VERS_2_BASE ||
  85             (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
  86                 return (smb_wcequiv_strlen(str));
  87         return (smb_sbequiv_strlen(str));
  88 }
  89 
  90 /*
  91  * Given a UTF-8 string (our internal form everywhere)
  92  * return either the Unicode (UTF-16) length in bytes,
  93  * or the OEM length in bytes.  Which we return is
  94  * determined by whether the client supports Unicode.
  95  * This length DOES include the null.
  96  */
  97 int
  98 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
  99 {
 100         if (sr->session->dialect >= SMB_VERS_2_BASE ||
 101             (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
 102                 return (smb_wcequiv_strlen(str) + 2);
 103         return (smb_sbequiv_strlen(str) + 1);
 104 }
 105 
 106 int
 107 smb_ascii_or_unicode_null_len(struct smb_request *sr)
 108 {
 109         if (sr->session->dialect >= SMB_VERS_2_BASE ||
 110             (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
 111                 return (2);
 112         return (1);
 113 }
 114 
 115 /*
 116  *
 117  * Convert old-style (DOS, LanMan) wildcard strings to NT style.
 118  * This should ONLY happen to patterns that come from old clients,
 119  * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
 120  *
 121  *      ? is converted to >
 122  *      * is converted to < if it is followed by .
 123  *      . is converted to " if it is followed by ? or * or end of pattern
 124  *
 125  * Note: modifies pattern in place.
 126  */
 127 void
 128 smb_convert_wildcards(char *pattern)
 129 {
 130         char    *p;
 131 
 132         for (p = pattern; *p != '\0'; p++) {
 133                 switch (*p) {
 134                 case '?':
 135                         *p = '>';
 136                         break;
 137                 case '*':
 138                         if (p[1] == '.')
 139                                 *p = '<';
 140                         break;
 141                 case '.':
 142                         if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
 143                                 *p = '\"';
 144                         break;
 145                 }
 146         }
 147 }
 148 
 149 /*
 150  * smb_sattr_check
 151  *
 152  * Check file attributes against a search attribute (sattr) mask.
 153  *
 154  * Normal files, which includes READONLY and ARCHIVE, always pass
 155  * this check.  If the DIRECTORY, HIDDEN or SYSTEM special attributes
 156  * are set then they must appear in the search mask.  The special
 157  * attributes are inclusive, i.e. all special attributes that appear
 158  * in sattr must also appear in the file attributes for the check to
 159  * pass.
 160  *
 161  * The following examples show how this works:
 162  *
 163  *              fileA:  READONLY
 164  *              fileB:  0 (no attributes = normal file)
 165  *              fileC:  READONLY, ARCHIVE
 166  *              fileD:  HIDDEN
 167  *              fileE:  READONLY, HIDDEN, SYSTEM
 168  *              dirA:   DIRECTORY
 169  *
 170  * search attribute: 0
 171  *              Returns: fileA, fileB and fileC.
 172  * search attribute: HIDDEN
 173  *              Returns: fileA, fileB, fileC and fileD.
 174  * search attribute: SYSTEM
 175  *              Returns: fileA, fileB and fileC.
 176  * search attribute: DIRECTORY
 177  *              Returns: fileA, fileB, fileC and dirA.
 178  * search attribute: HIDDEN and SYSTEM
 179  *              Returns: fileA, fileB, fileC, fileD and fileE.
 180  *
 181  * Returns true if the file and sattr match; otherwise, returns false.
 182  */
 183 boolean_t
 184 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
 185 {
 186         if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
 187             !(sattr & FILE_ATTRIBUTE_DIRECTORY))
 188                 return (B_FALSE);
 189 
 190         if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
 191             !(sattr & FILE_ATTRIBUTE_HIDDEN))
 192                 return (B_FALSE);
 193 
 194         if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
 195             !(sattr & FILE_ATTRIBUTE_SYSTEM))
 196                 return (B_FALSE);
 197 
 198         return (B_TRUE);
 199 }
 200 
 201 time_t
 202 smb_get_boottime(void)
 203 {
 204         extern time_t   boot_time;
 205         zone_t *z = curzone;
 206 
 207         /* Unfortunately, the GZ doesn't set zone_boot_time. */
 208         if (z->zone_id == GLOBAL_ZONEID)
 209                 return (boot_time);
 210 
 211         return (z->zone_boot_time);
 212 }
 213 
 214 /*
 215  * smb_idpool_increment
 216  *
 217  * This function increments the ID pool by doubling the current size. This
 218  * function assumes the caller entered the mutex of the pool.
 219  */
 220 static int
 221 smb_idpool_increment(
 222     smb_idpool_t        *pool)
 223 {
 224         uint8_t         *new_pool;
 225         uint32_t        new_size;
 226 
 227         ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
 228 
 229         new_size = pool->id_size * 2;
 230         if (new_size <= SMB_IDPOOL_MAX_SIZE) {
 231                 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
 232                 if (new_pool) {
 233                         bzero(new_pool, new_size / 8);
 234                         bcopy(pool->id_pool, new_pool, pool->id_size / 8);
 235                         kmem_free(pool->id_pool, pool->id_size / 8);
 236                         pool->id_pool = new_pool;
 237                         pool->id_free_counter += new_size - pool->id_size;
 238                         pool->id_max_free_counter += new_size - pool->id_size;
 239                         pool->id_size = new_size;
 240                         pool->id_idx_msk = (new_size / 8) - 1;
 241                         if (new_size >= SMB_IDPOOL_MAX_SIZE) {
 242                                 /* id -1 made unavailable */
 243                                 pool->id_pool[pool->id_idx_msk] = 0x80;
 244                                 pool->id_free_counter--;
 245                                 pool->id_max_free_counter--;
 246                         }
 247                         return (0);
 248                 }
 249         }
 250         return (-1);
 251 }
 252 
 253 /*
 254  * smb_idpool_constructor
 255  *
 256  * This function initializes the pool structure provided.
 257  */
 258 int
 259 smb_idpool_constructor(
 260     smb_idpool_t        *pool)
 261 {
 262 
 263         ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
 264 
 265         pool->id_size = SMB_IDPOOL_MIN_SIZE;
 266         pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
 267         pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
 268         pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
 269         pool->id_bit = 0x02;
 270         pool->id_bit_idx = 1;
 271         pool->id_idx = 0;
 272         pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
 273             KM_SLEEP);
 274         bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
 275         /* -1 id made unavailable */
 276         pool->id_pool[0] = 0x01;             /* id 0 made unavailable */
 277         mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
 278         pool->id_magic = SMB_IDPOOL_MAGIC;
 279         return (0);
 280 }
 281 
 282 /*
 283  * smb_idpool_destructor
 284  *
 285  * This function tears down and frees the resources associated with the
 286  * pool provided.
 287  */
 288 void
 289 smb_idpool_destructor(
 290     smb_idpool_t        *pool)
 291 {
 292         ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
 293         ASSERT(pool->id_free_counter == pool->id_max_free_counter);
 294         pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
 295         mutex_destroy(&pool->id_mutex);
 296         kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
 297 }
 298 
 299 /*
 300  * smb_idpool_alloc
 301  *
 302  * This function allocates an ID from the pool provided.
 303  */
 304 int
 305 smb_idpool_alloc(
 306     smb_idpool_t        *pool,
 307     uint16_t            *id)
 308 {
 309         uint32_t        i;
 310         uint8_t         bit;
 311         uint8_t         bit_idx;
 312         uint8_t         byte;
 313 
 314         ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
 315 
 316         mutex_enter(&pool->id_mutex);
 317         if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
 318                 mutex_exit(&pool->id_mutex);
 319                 return (-1);
 320         }
 321 
 322         i = pool->id_size;
 323         while (i) {
 324                 bit = pool->id_bit;
 325                 bit_idx = pool->id_bit_idx;
 326                 byte = pool->id_pool[pool->id_idx];
 327                 while (bit) {
 328                         if (byte & bit) {
 329                                 bit = bit << 1;
 330                                 bit_idx++;
 331                                 continue;
 332                         }
 333                         pool->id_pool[pool->id_idx] |= bit;
 334                         *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
 335                         pool->id_free_counter--;
 336                         /*
 337                          * Leave position at next bit to allocate,
 338                          * so we don't keep re-using the last in an
 339                          * alloc/free/alloc/free sequence.  Doing
 340                          * that can confuse some SMB clients.
 341                          */
 342                         if (bit & 0x80) {
 343                                 pool->id_bit = 1;
 344                                 pool->id_bit_idx = 0;
 345                                 pool->id_idx++;
 346                                 pool->id_idx &= pool->id_idx_msk;
 347                         } else {
 348                                 pool->id_bit = (bit << 1);
 349                                 pool->id_bit_idx = bit_idx + 1;
 350                                 /* keep id_idx */
 351                         }
 352                         mutex_exit(&pool->id_mutex);
 353                         return (0);
 354                 }
 355                 pool->id_bit = 1;
 356                 pool->id_bit_idx = 0;
 357                 pool->id_idx++;
 358                 pool->id_idx &= pool->id_idx_msk;
 359                 --i;
 360         }
 361         /*
 362          * This section of code shouldn't be reached. If there are IDs
 363          * available and none could be found there's a problem.
 364          */
 365         ASSERT(0);
 366         mutex_exit(&pool->id_mutex);
 367         return (-1);
 368 }
 369 
 370 /*
 371  * smb_idpool_free
 372  *
 373  * This function frees the ID provided.
 374  */
 375 void
 376 smb_idpool_free(
 377     smb_idpool_t        *pool,
 378     uint16_t            id)
 379 {
 380         ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
 381         ASSERT(id != 0);
 382         ASSERT(id != 0xFFFF);
 383 
 384         mutex_enter(&pool->id_mutex);
 385         if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
 386                 pool->id_pool[id >> 3] &= ~(1 << (id & 7));
 387                 pool->id_free_counter++;
 388                 ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
 389                 mutex_exit(&pool->id_mutex);
 390                 return;
 391         }
 392         /* Freeing a free ID. */
 393         ASSERT(0);
 394         mutex_exit(&pool->id_mutex);
 395 }
 396 
 397 /*
 398  * Initialize the llist delete queue object cache.
 399  */
 400 void
 401 smb_llist_init(void)
 402 {
 403         if (smb_dtor_cache != NULL)
 404                 return;
 405 
 406         smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
 407             sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
 408 }
 409 
 410 /*
 411  * Destroy the llist delete queue object cache.
 412  */
 413 void
 414 smb_llist_fini(void)
 415 {
 416         if (smb_dtor_cache != NULL) {
 417                 kmem_cache_destroy(smb_dtor_cache);
 418                 smb_dtor_cache = NULL;
 419         }
 420 }
 421 
 422 /*
 423  * smb_llist_constructor
 424  *
 425  * This function initializes a locked list.
 426  */
 427 void
 428 smb_llist_constructor(
 429     smb_llist_t *ll,
 430     size_t      size,
 431     size_t      offset)
 432 {
 433         rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
 434         mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
 435         list_create(&ll->ll_list, size, offset);
 436         list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
 437             offsetof(smb_dtor_t, dt_lnd));
 438         ll->ll_count = 0;
 439         ll->ll_wrop = 0;
 440         ll->ll_deleteq_count = 0;
 441         ll->ll_flushing = B_FALSE;
 442 }
 443 
 444 /*
 445  * Flush the delete queue and destroy a locked list.
 446  */
 447 void
 448 smb_llist_destructor(
 449     smb_llist_t *ll)
 450 {
 451         smb_llist_flush(ll);
 452 
 453         ASSERT(ll->ll_count == 0);
 454         ASSERT(ll->ll_deleteq_count == 0);
 455 
 456         rw_destroy(&ll->ll_lock);
 457         list_destroy(&ll->ll_list);
 458         list_destroy(&ll->ll_deleteq);
 459         mutex_destroy(&ll->ll_mutex);
 460 }
 461 
 462 /*
 463  * Post an object to the delete queue.  The delete queue will be processed
 464  * during list exit or list destruction.  Objects are often posted for
 465  * deletion during list iteration (while the list is locked) but that is
 466  * not required, and an object can be posted at any time.
 467  */
 468 void
 469 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
 470 {
 471         smb_dtor_t      *dtor;
 472 
 473         ASSERT((object != NULL) && (dtorproc != NULL));
 474 
 475         dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
 476         bzero(dtor, sizeof (smb_dtor_t));
 477         dtor->dt_magic = SMB_DTOR_MAGIC;
 478         dtor->dt_object = object;
 479         dtor->dt_proc = dtorproc;
 480 
 481         mutex_enter(&ll->ll_mutex);
 482         list_insert_tail(&ll->ll_deleteq, dtor);
 483         ++ll->ll_deleteq_count;
 484         mutex_exit(&ll->ll_mutex);
 485 }
 486 
 487 void
 488 smb_llist_enter(smb_llist_t *ll, krw_t mode)
 489 {
 490         rw_enter(&ll->ll_lock, mode);
 491 }
 492 
 493 /*
 494  * Exit the list lock and process the delete queue.
 495  */
 496 void
 497 smb_llist_exit(smb_llist_t *ll)
 498 {
 499         rw_exit(&ll->ll_lock);
 500         smb_llist_flush(ll);
 501 }
 502 
 503 /*
 504  * Flush the list delete queue.  The mutex is dropped across the destructor
 505  * call in case this leads to additional objects being posted to the delete
 506  * queue.
 507  */
 508 void
 509 smb_llist_flush(smb_llist_t *ll)
 510 {
 511         smb_dtor_t    *dtor;
 512 
 513         mutex_enter(&ll->ll_mutex);
 514         if (ll->ll_flushing) {
 515                 mutex_exit(&ll->ll_mutex);
 516                 return;
 517         }
 518         ll->ll_flushing = B_TRUE;
 519 
 520         dtor = list_head(&ll->ll_deleteq);
 521         while (dtor != NULL) {
 522                 SMB_DTOR_VALID(dtor);
 523                 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
 524                 list_remove(&ll->ll_deleteq, dtor);
 525                 --ll->ll_deleteq_count;
 526                 mutex_exit(&ll->ll_mutex);
 527 
 528                 dtor->dt_proc(dtor->dt_object);
 529 
 530                 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
 531                 kmem_cache_free(smb_dtor_cache, dtor);
 532                 mutex_enter(&ll->ll_mutex);
 533                 dtor = list_head(&ll->ll_deleteq);
 534         }
 535         ll->ll_flushing = B_FALSE;
 536 
 537         mutex_exit(&ll->ll_mutex);
 538 }
 539 
 540 /*
 541  * smb_llist_upgrade
 542  *
 543  * This function tries to upgrade the lock of the locked list. It assumes the
 544  * locked has already been entered in RW_READER mode. It first tries using the
 545  * Solaris function rw_tryupgrade(). If that call fails the lock is released
 546  * and reentered in RW_WRITER mode. In that last case a window is opened during
 547  * which the contents of the list may have changed. The return code indicates
 548  * whether or not the list was modified when the lock was exited.
 549  */
 550 int smb_llist_upgrade(
 551     smb_llist_t *ll)
 552 {
 553         uint64_t        wrop;
 554 
 555         if (rw_tryupgrade(&ll->ll_lock) != 0) {
 556                 return (0);
 557         }
 558         wrop = ll->ll_wrop;
 559         rw_exit(&ll->ll_lock);
 560         rw_enter(&ll->ll_lock, RW_WRITER);
 561         return (wrop != ll->ll_wrop);
 562 }
 563 
 564 /*
 565  * smb_llist_insert_head
 566  *
 567  * This function inserts the object passed a the beginning of the list. This
 568  * function assumes the lock of the list has already been entered.
 569  */
 570 void
 571 smb_llist_insert_head(
 572     smb_llist_t *ll,
 573     void        *obj)
 574 {
 575         list_insert_head(&ll->ll_list, obj);
 576         ++ll->ll_wrop;
 577         ++ll->ll_count;
 578 }
 579 
 580 /*
 581  * smb_llist_insert_tail
 582  *
 583  * This function appends to the object passed to the list. This function assumes
 584  * the lock of the list has already been entered.
 585  *
 586  */
 587 void
 588 smb_llist_insert_tail(
 589     smb_llist_t *ll,
 590     void        *obj)
 591 {
 592         list_insert_tail(&ll->ll_list, obj);
 593         ++ll->ll_wrop;
 594         ++ll->ll_count;
 595 }
 596 
 597 /*
 598  * smb_llist_remove
 599  *
 600  * This function removes the object passed from the list. This function assumes
 601  * the lock of the list has already been entered.
 602  */
 603 void
 604 smb_llist_remove(
 605     smb_llist_t *ll,
 606     void        *obj)
 607 {
 608         list_remove(&ll->ll_list, obj);
 609         ++ll->ll_wrop;
 610         --ll->ll_count;
 611 }
 612 
 613 /*
 614  * smb_llist_get_count
 615  *
 616  * This function returns the number of elements in the specified list.
 617  */
 618 uint32_t
 619 smb_llist_get_count(
 620     smb_llist_t *ll)
 621 {
 622         return (ll->ll_count);
 623 }
 624 
 625 /*
 626  * smb_slist_constructor
 627  *
 628  * Synchronized list constructor.
 629  */
 630 void
 631 smb_slist_constructor(
 632     smb_slist_t *sl,
 633     size_t      size,
 634     size_t      offset)
 635 {
 636         mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
 637         cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
 638         list_create(&sl->sl_list, size, offset);
 639         sl->sl_count = 0;
 640         sl->sl_waiting = B_FALSE;
 641 }
 642 
 643 /*
 644  * smb_slist_destructor
 645  *
 646  * Synchronized list destructor.
 647  */
 648 void
 649 smb_slist_destructor(
 650     smb_slist_t *sl)
 651 {
 652         VERIFY(sl->sl_count == 0);
 653 
 654         mutex_destroy(&sl->sl_mutex);
 655         cv_destroy(&sl->sl_cv);
 656         list_destroy(&sl->sl_list);
 657 }
 658 
 659 /*
 660  * smb_slist_insert_head
 661  *
 662  * This function inserts the object passed a the beginning of the list.
 663  */
 664 void
 665 smb_slist_insert_head(
 666     smb_slist_t *sl,
 667     void        *obj)
 668 {
 669         mutex_enter(&sl->sl_mutex);
 670         list_insert_head(&sl->sl_list, obj);
 671         ++sl->sl_count;
 672         mutex_exit(&sl->sl_mutex);
 673 }
 674 
 675 /*
 676  * smb_slist_insert_tail
 677  *
 678  * This function appends the object passed to the list.
 679  */
 680 void
 681 smb_slist_insert_tail(
 682     smb_slist_t *sl,
 683     void        *obj)
 684 {
 685         mutex_enter(&sl->sl_mutex);
 686         list_insert_tail(&sl->sl_list, obj);
 687         ++sl->sl_count;
 688         mutex_exit(&sl->sl_mutex);
 689 }
 690 
 691 /*
 692  * smb_llist_remove
 693  *
 694  * This function removes the object passed by the caller from the list.
 695  */
 696 void
 697 smb_slist_remove(
 698     smb_slist_t *sl,
 699     void        *obj)
 700 {
 701         mutex_enter(&sl->sl_mutex);
 702         list_remove(&sl->sl_list, obj);
 703         if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
 704                 sl->sl_waiting = B_FALSE;
 705                 cv_broadcast(&sl->sl_cv);
 706         }
 707         mutex_exit(&sl->sl_mutex);
 708 }
 709 
 710 /*
 711  * smb_slist_move_tail
 712  *
 713  * This function transfers all the contents of the synchronized list to the
 714  * list_t provided. It returns the number of objects transferred.
 715  */
 716 uint32_t
 717 smb_slist_move_tail(
 718     list_t      *lst,
 719     smb_slist_t *sl)
 720 {
 721         uint32_t        rv;
 722 
 723         mutex_enter(&sl->sl_mutex);
 724         rv = sl->sl_count;
 725         if (sl->sl_count) {
 726                 list_move_tail(lst, &sl->sl_list);
 727                 sl->sl_count = 0;
 728                 if (sl->sl_waiting) {
 729                         sl->sl_waiting = B_FALSE;
 730                         cv_broadcast(&sl->sl_cv);
 731                 }
 732         }
 733         mutex_exit(&sl->sl_mutex);
 734         return (rv);
 735 }
 736 
 737 /*
 738  * smb_slist_obj_move
 739  *
 740  * This function moves an object from one list to the end of the other list. It
 741  * assumes the mutex of each list has been entered.
 742  */
 743 void
 744 smb_slist_obj_move(
 745     smb_slist_t *dst,
 746     smb_slist_t *src,
 747     void        *obj)
 748 {
 749         ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
 750         ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
 751 
 752         list_remove(&src->sl_list, obj);
 753         list_insert_tail(&dst->sl_list, obj);
 754         dst->sl_count++;
 755         src->sl_count--;
 756         if ((src->sl_count == 0) && (src->sl_waiting)) {
 757                 src->sl_waiting = B_FALSE;
 758                 cv_broadcast(&src->sl_cv);
 759         }
 760 }
 761 
 762 /*
 763  * smb_slist_wait_for_empty
 764  *
 765  * This function waits for a list to be emptied.
 766  */
 767 void
 768 smb_slist_wait_for_empty(
 769     smb_slist_t *sl)
 770 {
 771         mutex_enter(&sl->sl_mutex);
 772         while (sl->sl_count) {
 773                 sl->sl_waiting = B_TRUE;
 774                 cv_wait(&sl->sl_cv, &sl->sl_mutex);
 775         }
 776         mutex_exit(&sl->sl_mutex);
 777 }
 778 
 779 /*
 780  * smb_slist_exit
 781  *
 782  * This function exits the muetx of the list and signal the condition variable
 783  * if the list is empty.
 784  */
 785 void
 786 smb_slist_exit(smb_slist_t *sl)
 787 {
 788         if ((sl->sl_count == 0) && (sl->sl_waiting)) {
 789                 sl->sl_waiting = B_FALSE;
 790                 cv_broadcast(&sl->sl_cv);
 791         }
 792         mutex_exit(&sl->sl_mutex);
 793 }
 794 
 795 /* smb_thread_... moved to smb_thread.c */
 796 
 797 /*
 798  * smb_rwx_init
 799  */
 800 void
 801 smb_rwx_init(
 802     smb_rwx_t   *rwx)
 803 {
 804         bzero(rwx, sizeof (smb_rwx_t));
 805         cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
 806         mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
 807         rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
 808 }
 809 
 810 /*
 811  * smb_rwx_destroy
 812  */
 813 void
 814 smb_rwx_destroy(
 815     smb_rwx_t   *rwx)
 816 {
 817         mutex_destroy(&rwx->rwx_mutex);
 818         cv_destroy(&rwx->rwx_cv);
 819         rw_destroy(&rwx->rwx_lock);
 820 }
 821 
 822 /*
 823  * smb_rwx_rwexit
 824  */
 825 void
 826 smb_rwx_rwenter(smb_rwx_t *rwx, krw_t mode)
 827 {
 828         rw_enter(&rwx->rwx_lock, mode);
 829 }
 830 
 831 /*
 832  * smb_rwx_rwexit
 833  */
 834 void
 835 smb_rwx_rwexit(
 836     smb_rwx_t   *rwx)
 837 {
 838         rw_exit(&rwx->rwx_lock);
 839 }
 840 
 841 
 842 /*
 843  * smb_rwx_cvwait
 844  *
 845  * Wait on rwx->rw_cv, dropping the rw lock and retake after wakeup.
 846  * Assumes the smb_rwx lock was entered in RW_READER or RW_WRITER
 847  * mode. It will:
 848  *
 849  *      1) release the lock and save its current mode.
 850  *      2) wait until the condition variable is signaled.
 851  *      3) re-acquire the lock in the mode saved in (1).
 852  *
 853  * Lock order: rwlock, mutex
 854  */
 855 int
 856 smb_rwx_cvwait(
 857     smb_rwx_t   *rwx,
 858     clock_t     timeout)
 859 {
 860         krw_t   mode;
 861         int     rc = 1;
 862 
 863         if (rw_write_held(&rwx->rwx_lock)) {
 864                 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
 865                 mode = RW_WRITER;
 866         } else {
 867                 ASSERT(rw_read_held(&rwx->rwx_lock));
 868                 mode = RW_READER;
 869         }
 870 
 871         mutex_enter(&rwx->rwx_mutex);
 872         rw_exit(&rwx->rwx_lock);
 873 
 874         rwx->rwx_waiting = B_TRUE;
 875         if (timeout == -1) {
 876                 cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
 877         } else {
 878                 rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
 879                     timeout, TR_CLOCK_TICK);
 880         }
 881         mutex_exit(&rwx->rwx_mutex);
 882 
 883         rw_enter(&rwx->rwx_lock, mode);
 884         return (rc);
 885 }
 886 
 887 /*
 888  * smb_rwx_cvbcast
 889  *
 890  * Wake up threads waiting on rx_cv
 891  * The rw lock may or may not be held.
 892  * The mutex MUST NOT be held.
 893  */
 894 void
 895 smb_rwx_cvbcast(smb_rwx_t *rwx)
 896 {
 897         mutex_enter(&rwx->rwx_mutex);
 898         if (rwx->rwx_waiting) {
 899                 rwx->rwx_waiting = B_FALSE;
 900                 cv_broadcast(&rwx->rwx_cv);
 901         }
 902         mutex_exit(&rwx->rwx_mutex);
 903 }
 904 
 905 /* smb_idmap_... moved to smb_idmap.c */
 906 
 907 uint64_t
 908 smb_time_unix_to_nt(timestruc_t *unix_time)
 909 {
 910         uint64_t nt_time;
 911 
 912         if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
 913                 return (0);
 914 
 915         nt_time = unix_time->tv_sec;
 916         nt_time *= 10000000;  /* seconds to 100ns */
 917         nt_time += unix_time->tv_nsec / 100;
 918         return (nt_time + NT_TIME_BIAS);
 919 }
 920 
 921 void
 922 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
 923 {
 924         uint32_t seconds;
 925 
 926         ASSERT(unix_time);
 927 
 928         if ((nt_time == 0) || (nt_time == -1)) {
 929                 unix_time->tv_sec = 0;
 930                 unix_time->tv_nsec = 0;
 931                 return;
 932         }
 933 
 934         /*
 935          * Can't represent times less than or equal NT_TIME_BIAS,
 936          * so convert them to the oldest date we can store.
 937          * Note that time zero is "special" being converted
 938          * both directions as 0:0 (unix-to-nt, nt-to-unix).
 939          */
 940         if (nt_time <= NT_TIME_BIAS) {
 941                 unix_time->tv_sec = 0;
 942                 unix_time->tv_nsec = 100;
 943                 return;
 944         }
 945 
 946         nt_time -= NT_TIME_BIAS;
 947         seconds = nt_time / 10000000;
 948         unix_time->tv_sec = seconds;
 949         unix_time->tv_nsec = (nt_time  % 10000000) * 100;
 950 }
 951 
 952 /*
 953  * smb_time_gmt_to_local, smb_time_local_to_gmt
 954  *
 955  * Apply the gmt offset to convert between local time and gmt
 956  */
 957 int32_t
 958 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
 959 {
 960         if ((gmt == 0) || (gmt == -1))
 961                 return (0);
 962 
 963         return (gmt - sr->sr_gmtoff);
 964 }
 965 
 966 int32_t
 967 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
 968 {
 969         if ((local == 0) || (local == -1))
 970                 return (0);
 971 
 972         return (local + sr->sr_gmtoff);
 973 }
 974 
 975 
 976 /*
 977  * smb_time_dos_to_unix
 978  *
 979  * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
 980  *
 981  * A date/time field of 0 means that that server file system
 982  * assigned value need not be changed. The behaviour when the
 983  * date/time field is set to -1 is not documented but is
 984  * generally treated like 0.
 985  * If date or time is 0 or -1 the unix time is returned as 0
 986  * so that the caller can identify and handle this special case.
 987  */
 988 int32_t
 989 smb_time_dos_to_unix(int16_t date, int16_t time)
 990 {
 991         struct tm       atm;
 992 
 993         if (((date == 0) || (time == 0)) ||
 994             ((date == -1) || (time == -1))) {
 995                 return (0);
 996         }
 997 
 998         atm.tm_year = ((date >>  9) & 0x3F) + 80;
 999         atm.tm_mon  = ((date >>  5) & 0x0F) - 1;
1000         atm.tm_mday = ((date >>  0) & 0x1F);
1001         atm.tm_hour = ((time >> 11) & 0x1F);
1002         atm.tm_min  = ((time >>  5) & 0x3F);
1003         atm.tm_sec  = ((time >>  0) & 0x1F) << 1;
1004 
1005         return (smb_timegm(&atm));
1006 }
1007 
1008 void
1009 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1010 {
1011         struct tm       atm;
1012         int             i;
1013         time_t          tmp_time;
1014 
1015         if (ux_time == 0) {
1016                 *date_p = 0;
1017                 *time_p = 0;
1018                 return;
1019         }
1020 
1021         tmp_time = (time_t)ux_time;
1022         (void) smb_gmtime_r(&tmp_time, &atm);
1023 
1024         if (date_p) {
1025                 i = 0;
1026                 i += atm.tm_year - 80;
1027                 i <<= 4;
1028                 i += atm.tm_mon + 1;
1029                 i <<= 5;
1030                 i += atm.tm_mday;
1031 
1032                 *date_p = (short)i;
1033         }
1034         if (time_p) {
1035                 i = 0;
1036                 i += atm.tm_hour;
1037                 i <<= 6;
1038                 i += atm.tm_min;
1039                 i <<= 5;
1040                 i += atm.tm_sec >> 1;
1041 
1042                 *time_p = (short)i;
1043         }
1044 }
1045 
1046 
1047 /*
1048  * smb_gmtime_r
1049  *
1050  * Thread-safe version of smb_gmtime. Returns a null pointer if either
1051  * input parameter is a null pointer. Otherwise returns a pointer
1052  * to result.
1053  *
1054  * Day of the week calculation: the Epoch was a thursday.
1055  *
1056  * There are no timezone corrections so tm_isdst and tm_gmtoff are
1057  * always zero, and the zone is always WET.
1058  */
1059 struct tm *
1060 smb_gmtime_r(time_t *clock, struct tm *result)
1061 {
1062         time_t tsec;
1063         int year;
1064         int month;
1065         int sec_per_month;
1066 
1067         if (clock == 0 || result == 0)
1068                 return (0);
1069 
1070         bzero(result, sizeof (struct tm));
1071         tsec = *clock;
1072         tsec -= tzh_leapcnt;
1073 
1074         result->tm_wday = tsec / SECSPERDAY;
1075         result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1076 
1077         year = EPOCH_YEAR;
1078         while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1079             (SECSPERDAY * DAYSPERNYEAR))) {
1080                 if (isleap(year))
1081                         tsec -= SECSPERDAY * DAYSPERLYEAR;
1082                 else
1083                         tsec -= SECSPERDAY * DAYSPERNYEAR;
1084 
1085                 ++year;
1086         }
1087 
1088         result->tm_year = year - TM_YEAR_BASE;
1089         result->tm_yday = tsec / SECSPERDAY;
1090 
1091         for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1092                 sec_per_month = days_in_month[month] * SECSPERDAY;
1093 
1094                 if (month == TM_FEBRUARY && isleap(year))
1095                         sec_per_month += SECSPERDAY;
1096 
1097                 if (tsec < sec_per_month)
1098                         break;
1099 
1100                 tsec -= sec_per_month;
1101         }
1102 
1103         result->tm_mon = month;
1104         result->tm_mday = (tsec / SECSPERDAY) + 1;
1105         tsec %= SECSPERDAY;
1106         result->tm_sec = tsec % 60;
1107         tsec /= 60;
1108         result->tm_min = tsec % 60;
1109         tsec /= 60;
1110         result->tm_hour = (int)tsec;
1111 
1112         return (result);
1113 }
1114 
1115 
1116 /*
1117  * smb_timegm
1118  *
1119  * Converts the broken-down time in tm to a time value, i.e. the number
1120  * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1121  * not a POSIX or ANSI function. Per the man page, the input values of
1122  * tm_wday and tm_yday are ignored and, as the input data is assumed to
1123  * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1124  *
1125  * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1126  * and tm_yday, and bring the other fields within normal range. I don't
1127  * think this is really how it should be done but it's convenient for
1128  * now.
1129  */
1130 time_t
1131 smb_timegm(struct tm *tm)
1132 {
1133         time_t tsec;
1134         int dd;
1135         int mm;
1136         int yy;
1137         int year;
1138 
1139         if (tm == 0)
1140                 return (-1);
1141 
1142         year = tm->tm_year + TM_YEAR_BASE;
1143         tsec = tzh_leapcnt;
1144 
1145         for (yy = EPOCH_YEAR; yy < year; ++yy) {
1146                 if (isleap(yy))
1147                         tsec += SECSPERDAY * DAYSPERLYEAR;
1148                 else
1149                         tsec += SECSPERDAY * DAYSPERNYEAR;
1150         }
1151 
1152         for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1153                 dd = days_in_month[mm] * SECSPERDAY;
1154 
1155                 if (mm == TM_FEBRUARY && isleap(year))
1156                         dd += SECSPERDAY;
1157 
1158                 tsec += dd;
1159         }
1160 
1161         tsec += (tm->tm_mday - 1) * SECSPERDAY;
1162         tsec += tm->tm_sec;
1163         tsec += tm->tm_min * SECSPERMIN;
1164         tsec += tm->tm_hour * SECSPERHOUR;
1165 
1166         tm->tm_isdst = 0;
1167         (void) smb_gmtime_r(&tsec, tm);
1168         return (tsec);
1169 }
1170 
1171 /*
1172  * smb_pad_align
1173  *
1174  * Returns the number of bytes required to pad an offset to the
1175  * specified alignment.
1176  */
1177 uint32_t
1178 smb_pad_align(uint32_t offset, uint32_t align)
1179 {
1180         uint32_t pad = offset % align;
1181 
1182         if (pad != 0)
1183                 pad = align - pad;
1184 
1185         return (pad);
1186 }
1187 
1188 /*
1189  * smb_panic
1190  *
1191  * Logs the file name, function name and line number passed in and panics the
1192  * system.
1193  */
1194 void
1195 smb_panic(char *file, const char *func, int line)
1196 {
1197         cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1198 }
1199 
1200 /*
1201  * Creates an AVL tree and initializes the given smb_avl_t
1202  * structure using the passed args
1203  */
1204 void
1205 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1206         const smb_avl_nops_t *ops)
1207 {
1208         ASSERT(avl);
1209         ASSERT(ops);
1210 
1211         rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1212         mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1213 
1214         avl->avl_nops = ops;
1215         avl->avl_state = SMB_AVL_STATE_READY;
1216         avl->avl_refcnt = 0;
1217         (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1218             sizeof (uint32_t));
1219 
1220         avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1221 }
1222 
1223 /*
1224  * Destroys the specified AVL tree.
1225  * It waits for all the in-flight operations to finish
1226  * before destroying the AVL.
1227  */
1228 void
1229 smb_avl_destroy(smb_avl_t *avl)
1230 {
1231         void *cookie = NULL;
1232         void *node;
1233 
1234         ASSERT(avl);
1235 
1236         mutex_enter(&avl->avl_mutex);
1237         if (avl->avl_state != SMB_AVL_STATE_READY) {
1238                 mutex_exit(&avl->avl_mutex);
1239                 return;
1240         }
1241 
1242         avl->avl_state = SMB_AVL_STATE_DESTROYING;
1243 
1244         while (avl->avl_refcnt > 0)
1245                 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1246         mutex_exit(&avl->avl_mutex);
1247 
1248         rw_enter(&avl->avl_lock, RW_WRITER);
1249         while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1250                 avl->avl_nops->avln_destroy(node);
1251 
1252         avl_destroy(&avl->avl_tree);
1253         rw_exit(&avl->avl_lock);
1254 
1255         rw_destroy(&avl->avl_lock);
1256 
1257         mutex_destroy(&avl->avl_mutex);
1258         bzero(avl, sizeof (smb_avl_t));
1259 }
1260 
1261 /*
1262  * Adds the given item to the AVL if it's
1263  * not already there.
1264  *
1265  * Returns:
1266  *
1267  *      ENOTACTIVE      AVL is not in READY state
1268  *      EEXIST          The item is already in AVL
1269  */
1270 int
1271 smb_avl_add(smb_avl_t *avl, void *item)
1272 {
1273         avl_index_t where;
1274 
1275         ASSERT(avl);
1276         ASSERT(item);
1277 
1278         if (!smb_avl_hold(avl))
1279                 return (ENOTACTIVE);
1280 
1281         rw_enter(&avl->avl_lock, RW_WRITER);
1282         if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1283                 rw_exit(&avl->avl_lock);
1284                 smb_avl_rele(avl);
1285                 return (EEXIST);
1286         }
1287 
1288         avl_insert(&avl->avl_tree, item, where);
1289         avl->avl_sequence++;
1290         rw_exit(&avl->avl_lock);
1291 
1292         smb_avl_rele(avl);
1293         return (0);
1294 }
1295 
1296 /*
1297  * Removes the given item from the AVL.
1298  * If no reference is left on the item
1299  * it will also be destroyed by calling the
1300  * registered destroy operation.
1301  */
1302 void
1303 smb_avl_remove(smb_avl_t *avl, void *item)
1304 {
1305         avl_index_t where;
1306         void *rm_item;
1307 
1308         ASSERT(avl);
1309         ASSERT(item);
1310 
1311         if (!smb_avl_hold(avl))
1312                 return;
1313 
1314         rw_enter(&avl->avl_lock, RW_WRITER);
1315         if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1316                 rw_exit(&avl->avl_lock);
1317                 smb_avl_rele(avl);
1318                 return;
1319         }
1320 
1321         avl_remove(&avl->avl_tree, rm_item);
1322         if (avl->avl_nops->avln_rele(rm_item))
1323                 avl->avl_nops->avln_destroy(rm_item);
1324         avl->avl_sequence++;
1325         rw_exit(&avl->avl_lock);
1326 
1327         smb_avl_rele(avl);
1328 }
1329 
1330 /*
1331  * Looks up the AVL for the given item.
1332  * If the item is found a hold on the object
1333  * is taken before the pointer to it is
1334  * returned to the caller. The caller MUST
1335  * always call smb_avl_release() after it's done
1336  * using the returned object to release the hold
1337  * taken on the object.
1338  */
1339 void *
1340 smb_avl_lookup(smb_avl_t *avl, void *item)
1341 {
1342         void *node = NULL;
1343 
1344         ASSERT(avl);
1345         ASSERT(item);
1346 
1347         if (!smb_avl_hold(avl))
1348                 return (NULL);
1349 
1350         rw_enter(&avl->avl_lock, RW_READER);
1351         node = avl_find(&avl->avl_tree, item, NULL);
1352         if (node != NULL)
1353                 avl->avl_nops->avln_hold(node);
1354         rw_exit(&avl->avl_lock);
1355 
1356         if (node == NULL)
1357                 smb_avl_rele(avl);
1358 
1359         return (node);
1360 }
1361 
1362 /*
1363  * The hold on the given object is released.
1364  * This function MUST always be called after
1365  * smb_avl_lookup() and smb_avl_iterate() for
1366  * the returned object.
1367  *
1368  * If AVL is in DESTROYING state, the destroying
1369  * thread will be notified.
1370  */
1371 void
1372 smb_avl_release(smb_avl_t *avl, void *item)
1373 {
1374         ASSERT(avl);
1375         ASSERT(item);
1376 
1377         if (avl->avl_nops->avln_rele(item))
1378                 avl->avl_nops->avln_destroy(item);
1379 
1380         smb_avl_rele(avl);
1381 }
1382 
1383 /*
1384  * Initializes the given cursor for the AVL.
1385  * The cursor will be used to iterate through the AVL
1386  */
1387 void
1388 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1389 {
1390         ASSERT(avl);
1391         ASSERT(cursor);
1392 
1393         cursor->avlc_next = NULL;
1394         cursor->avlc_sequence = avl->avl_sequence;
1395 }
1396 
1397 /*
1398  * Iterates through the AVL using the given cursor.
1399  * It always starts at the beginning and then returns
1400  * a pointer to the next object on each subsequent call.
1401  *
1402  * If a new object is added to or removed from the AVL
1403  * between two calls to this function, the iteration
1404  * will terminate prematurely.
1405  *
1406  * The caller MUST always call smb_avl_release() after it's
1407  * done using the returned object to release the hold taken
1408  * on the object.
1409  */
1410 void *
1411 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1412 {
1413         void *node;
1414 
1415         ASSERT(avl);
1416         ASSERT(cursor);
1417 
1418         if (!smb_avl_hold(avl))
1419                 return (NULL);
1420 
1421         rw_enter(&avl->avl_lock, RW_READER);
1422         if (cursor->avlc_sequence != avl->avl_sequence) {
1423                 rw_exit(&avl->avl_lock);
1424                 smb_avl_rele(avl);
1425                 return (NULL);
1426         }
1427 
1428         if (cursor->avlc_next == NULL)
1429                 node = avl_first(&avl->avl_tree);
1430         else
1431                 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1432 
1433         if (node != NULL)
1434                 avl->avl_nops->avln_hold(node);
1435 
1436         cursor->avlc_next = node;
1437         rw_exit(&avl->avl_lock);
1438 
1439         if (node == NULL)
1440                 smb_avl_rele(avl);
1441 
1442         return (node);
1443 }
1444 
1445 /*
1446  * Increments the AVL reference count in order to
1447  * prevent the avl from being destroyed while it's
1448  * being accessed.
1449  */
1450 static boolean_t
1451 smb_avl_hold(smb_avl_t *avl)
1452 {
1453         mutex_enter(&avl->avl_mutex);
1454         if (avl->avl_state != SMB_AVL_STATE_READY) {
1455                 mutex_exit(&avl->avl_mutex);
1456                 return (B_FALSE);
1457         }
1458         avl->avl_refcnt++;
1459         mutex_exit(&avl->avl_mutex);
1460 
1461         return (B_TRUE);
1462 }
1463 
1464 /*
1465  * Decrements the AVL reference count to release the
1466  * hold. If another thread is trying to destroy the
1467  * AVL and is waiting for the reference count to become
1468  * 0, it is signaled to wake up.
1469  */
1470 static void
1471 smb_avl_rele(smb_avl_t *avl)
1472 {
1473         mutex_enter(&avl->avl_mutex);
1474         ASSERT(avl->avl_refcnt > 0);
1475         avl->avl_refcnt--;
1476         if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1477                 cv_broadcast(&avl->avl_cv);
1478         mutex_exit(&avl->avl_mutex);
1479 }
1480 
1481 /*
1482  * smb_latency_init
1483  */
1484 void
1485 smb_latency_init(smb_latency_t *lat)
1486 {
1487         bzero(lat, sizeof (*lat));
1488         mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1489 }
1490 
1491 /*
1492  * smb_latency_destroy
1493  */
1494 void
1495 smb_latency_destroy(smb_latency_t *lat)
1496 {
1497         mutex_destroy(&lat->ly_mutex);
1498 }
1499 
1500 /*
1501  * smb_latency_add_sample
1502  *
1503  * Uses the new sample to calculate the new mean and standard deviation. The
1504  * sample must be a scaled value.
1505  */
1506 void
1507 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1508 {
1509         hrtime_t        a_mean;
1510         hrtime_t        d_mean;
1511 
1512         mutex_enter(&lat->ly_mutex);
1513         lat->ly_a_nreq++;
1514         lat->ly_a_sum += sample;
1515         if (lat->ly_a_nreq != 0) {
1516                 a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1517                 lat->ly_a_stddev =
1518                     (sample - a_mean) * (sample - lat->ly_a_mean);
1519                 lat->ly_a_mean = a_mean;
1520         }
1521         lat->ly_d_nreq++;
1522         lat->ly_d_sum += sample;
1523         if (lat->ly_d_nreq != 0) {
1524                 d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1525                 lat->ly_d_stddev =
1526                     (sample - d_mean) * (sample - lat->ly_d_mean);
1527                 lat->ly_d_mean = d_mean;
1528         }
1529         mutex_exit(&lat->ly_mutex);
1530 }
1531 
1532 /*
1533  * smb_srqueue_init
1534  */
1535 void
1536 smb_srqueue_init(smb_srqueue_t *srq)
1537 {
1538         bzero(srq, sizeof (*srq));
1539         mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1540         srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1541 }
1542 
1543 /*
1544  * smb_srqueue_destroy
1545  */
1546 void
1547 smb_srqueue_destroy(smb_srqueue_t *srq)
1548 {
1549         mutex_destroy(&srq->srq_mutex);
1550 }
1551 
1552 /*
1553  * smb_srqueue_waitq_enter
1554  */
1555 void
1556 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1557 {
1558         hrtime_t        new;
1559         hrtime_t        delta;
1560         uint32_t        wcnt;
1561 
1562         mutex_enter(&srq->srq_mutex);
1563         new = gethrtime_unscaled();
1564         delta = new - srq->srq_wlastupdate;
1565         srq->srq_wlastupdate = new;
1566         wcnt = srq->srq_wcnt++;
1567         if (wcnt != 0) {
1568                 srq->srq_wlentime += delta * wcnt;
1569                 srq->srq_wtime += delta;
1570         }
1571         mutex_exit(&srq->srq_mutex);
1572 }
1573 
1574 /*
1575  * smb_srqueue_runq_exit
1576  */
1577 void
1578 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1579 {
1580         hrtime_t        new;
1581         hrtime_t        delta;
1582         uint32_t        rcnt;
1583 
1584         mutex_enter(&srq->srq_mutex);
1585         new = gethrtime_unscaled();
1586         delta = new - srq->srq_rlastupdate;
1587         srq->srq_rlastupdate = new;
1588         rcnt = srq->srq_rcnt--;
1589         ASSERT(rcnt > 0);
1590         srq->srq_rlentime += delta * rcnt;
1591         srq->srq_rtime += delta;
1592         mutex_exit(&srq->srq_mutex);
1593 }
1594 
1595 /*
1596  * smb_srqueue_waitq_to_runq
1597  */
1598 void
1599 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1600 {
1601         hrtime_t        new;
1602         hrtime_t        delta;
1603         uint32_t        wcnt;
1604         uint32_t        rcnt;
1605 
1606         mutex_enter(&srq->srq_mutex);
1607         new = gethrtime_unscaled();
1608         delta = new - srq->srq_wlastupdate;
1609         srq->srq_wlastupdate = new;
1610         wcnt = srq->srq_wcnt--;
1611         ASSERT(wcnt > 0);
1612         srq->srq_wlentime += delta * wcnt;
1613         srq->srq_wtime += delta;
1614         delta = new - srq->srq_rlastupdate;
1615         srq->srq_rlastupdate = new;
1616         rcnt = srq->srq_rcnt++;
1617         if (rcnt != 0) {
1618                 srq->srq_rlentime += delta * rcnt;
1619                 srq->srq_rtime += delta;
1620         }
1621         mutex_exit(&srq->srq_mutex);
1622 }
1623 
1624 /*
1625  * smb_srqueue_update
1626  *
1627  * Takes a snapshot of the smb_sr_stat_t structure passed in.
1628  */
1629 void
1630 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1631 {
1632         hrtime_t        delta;
1633         hrtime_t        snaptime;
1634 
1635         mutex_enter(&srq->srq_mutex);
1636         snaptime = gethrtime_unscaled();
1637         delta = snaptime - srq->srq_wlastupdate;
1638         srq->srq_wlastupdate = snaptime;
1639         if (srq->srq_wcnt != 0) {
1640                 srq->srq_wlentime += delta * srq->srq_wcnt;
1641                 srq->srq_wtime += delta;
1642         }
1643         delta = snaptime - srq->srq_rlastupdate;
1644         srq->srq_rlastupdate = snaptime;
1645         if (srq->srq_rcnt != 0) {
1646                 srq->srq_rlentime += delta * srq->srq_rcnt;
1647                 srq->srq_rtime += delta;
1648         }
1649         kd->ku_rlentime = srq->srq_rlentime;
1650         kd->ku_rtime = srq->srq_rtime;
1651         kd->ku_wlentime = srq->srq_wlentime;
1652         kd->ku_wtime = srq->srq_wtime;
1653         mutex_exit(&srq->srq_mutex);
1654         scalehrtime(&kd->ku_rlentime);
1655         scalehrtime(&kd->ku_rtime);
1656         scalehrtime(&kd->ku_wlentime);
1657         scalehrtime(&kd->ku_wtime);
1658 }
1659 
1660 void
1661 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1662     uint_t threshold, uint_t timeout)
1663 {
1664         bzero(ct, sizeof (smb_cmd_threshold_t));
1665         mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1666         cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1667 
1668         ct->ct_cmd = cmd;
1669         ct->ct_threshold = threshold;
1670         ct->ct_timeout = timeout;
1671 }
1672 
1673 void
1674 smb_threshold_fini(smb_cmd_threshold_t *ct)
1675 {
1676         cv_destroy(&ct->ct_cond);
1677         mutex_destroy(&ct->ct_mutex);
1678 }
1679 
1680 /*
1681  * This threshold mechanism is used to limit the number of simultaneous
1682  * named pipe connections, concurrent authentication conversations, etc.
1683  * Requests that would take us over the threshold wait until either the
1684  * resources are available (return zero) or timeout (return error).
1685  */
1686 int
1687 smb_threshold_enter(smb_cmd_threshold_t *ct)
1688 {
1689         clock_t time, rem;
1690 
1691         time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1692         mutex_enter(&ct->ct_mutex);
1693 
1694         while (ct->ct_threshold != 0 &&
1695             ct->ct_threshold <= ct->ct_active_cnt) {
1696                 ct->ct_blocked_cnt++;
1697                 rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1698                 ct->ct_blocked_cnt--;
1699                 if (rem < 0) {
1700                         mutex_exit(&ct->ct_mutex);
1701                         return (ETIME);
1702                 }
1703         }
1704         if (ct->ct_threshold == 0) {
1705                 mutex_exit(&ct->ct_mutex);
1706                 return (ECANCELED);
1707         }
1708 
1709         ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1710         ct->ct_active_cnt++;
1711 
1712         mutex_exit(&ct->ct_mutex);
1713         return (0);
1714 }
1715 
1716 void
1717 smb_threshold_exit(smb_cmd_threshold_t *ct)
1718 {
1719         mutex_enter(&ct->ct_mutex);
1720         ASSERT3U(ct->ct_active_cnt, >, 0);
1721         ct->ct_active_cnt--;
1722         if (ct->ct_blocked_cnt)
1723                 cv_signal(&ct->ct_cond);
1724         mutex_exit(&ct->ct_mutex);
1725 }
1726 
1727 void
1728 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1729 {
1730         mutex_enter(&ct->ct_mutex);
1731         ct->ct_threshold = 0;
1732         cv_broadcast(&ct->ct_cond);
1733         mutex_exit(&ct->ct_mutex);
1734 }
1735 
1736 /* taken from mod_hash_byptr */
1737 uint_t
1738 smb_hash_uint64(smb_hash_t *hash, uint64_t val)
1739 {
1740         uint64_t k = val >> hash->rshift;
1741         uint_t idx = ((uint_t)k) & (hash->num_buckets - 1);
1742 
1743         return (idx);
1744 }
1745 
1746 boolean_t
1747 smb_is_pow2(size_t n)
1748 {
1749         return ((n & (n - 1)) == 0);
1750 }
1751 
1752 smb_hash_t *
1753 smb_hash_create(size_t elemsz, size_t link_offset,
1754     uint32_t num_buckets)
1755 {
1756         smb_hash_t *hash = kmem_alloc(sizeof (*hash), KM_SLEEP);
1757         int i;
1758 
1759         if (!smb_is_pow2(num_buckets))
1760                 num_buckets = 1 << highbit(num_buckets);
1761 
1762         hash->rshift = highbit(elemsz);
1763         hash->num_buckets = num_buckets;
1764         hash->buckets = kmem_zalloc(num_buckets * sizeof (smb_bucket_t),
1765             KM_SLEEP);
1766         for (i = 0; i < num_buckets; i++)
1767                 smb_llist_constructor(&hash->buckets[i].b_list, elemsz,
1768                     link_offset);
1769         return (hash);
1770 }
1771 
1772 void
1773 smb_hash_destroy(smb_hash_t *hash)
1774 {
1775         int i;
1776 
1777         for (i = 0; i < hash->num_buckets; i++)
1778                 smb_llist_destructor(&hash->buckets[i].b_list);
1779 
1780         kmem_free(hash->buckets, hash->num_buckets * sizeof (smb_bucket_t));
1781         kmem_free(hash, sizeof (*hash));
1782 }