1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011 by Delphix. All rights reserved.
  24  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  25  */
  26 
  27 #include <sys/zfs_context.h>
  28 #include <sys/spa_impl.h>
  29 #include <sys/zio.h>
  30 #include <sys/zio_checksum.h>
  31 #include <sys/zio_compress.h>
  32 #include <sys/dmu.h>
  33 #include <sys/dmu_tx.h>
  34 #include <sys/zap.h>
  35 #include <sys/zil.h>
  36 #include <sys/vdev_impl.h>
  37 #include <sys/metaslab.h>
  38 #include <sys/uberblock_impl.h>
  39 #include <sys/txg.h>
  40 #include <sys/avl.h>
  41 #include <sys/unique.h>
  42 #include <sys/dsl_pool.h>
  43 #include <sys/dsl_dir.h>
  44 #include <sys/dsl_prop.h>
  45 #include <sys/dsl_scan.h>
  46 #include <sys/fs/zfs.h>
  47 #include <sys/metaslab_impl.h>
  48 #include <sys/arc.h>
  49 #include <sys/ddt.h>
  50 #include "zfs_prop.h"
  51 
  52 /*
  53  * SPA locking
  54  *
  55  * There are four basic locks for managing spa_t structures:
  56  *
  57  * spa_namespace_lock (global mutex)
  58  *
  59  *      This lock must be acquired to do any of the following:
  60  *
  61  *              - Lookup a spa_t by name
  62  *              - Add or remove a spa_t from the namespace
  63  *              - Increase spa_refcount from non-zero
  64  *              - Check if spa_refcount is zero
  65  *              - Rename a spa_t
  66  *              - add/remove/attach/detach devices
  67  *              - Held for the duration of create/destroy/import/export
  68  *
  69  *      It does not need to handle recursion.  A create or destroy may
  70  *      reference objects (files or zvols) in other pools, but by
  71  *      definition they must have an existing reference, and will never need
  72  *      to lookup a spa_t by name.
  73  *
  74  * spa_refcount (per-spa refcount_t protected by mutex)
  75  *
  76  *      This reference count keep track of any active users of the spa_t.  The
  77  *      spa_t cannot be destroyed or freed while this is non-zero.  Internally,
  78  *      the refcount is never really 'zero' - opening a pool implicitly keeps
  79  *      some references in the DMU.  Internally we check against spa_minref, but
  80  *      present the image of a zero/non-zero value to consumers.
  81  *
  82  * spa_config_lock[] (per-spa array of rwlocks)
  83  *
  84  *      This protects the spa_t from config changes, and must be held in
  85  *      the following circumstances:
  86  *
  87  *              - RW_READER to perform I/O to the spa
  88  *              - RW_WRITER to change the vdev config
  89  *
  90  * The locking order is fairly straightforward:
  91  *
  92  *              spa_namespace_lock      ->   spa_refcount
  93  *
  94  *      The namespace lock must be acquired to increase the refcount from 0
  95  *      or to check if it is zero.
  96  *
  97  *              spa_refcount            ->   spa_config_lock[]
  98  *
  99  *      There must be at least one valid reference on the spa_t to acquire
 100  *      the config lock.
 101  *
 102  *              spa_namespace_lock      ->   spa_config_lock[]
 103  *
 104  *      The namespace lock must always be taken before the config lock.
 105  *
 106  *
 107  * The spa_namespace_lock can be acquired directly and is globally visible.
 108  *
 109  * The namespace is manipulated using the following functions, all of which
 110  * require the spa_namespace_lock to be held.
 111  *
 112  *      spa_lookup()            Lookup a spa_t by name.
 113  *
 114  *      spa_add()               Create a new spa_t in the namespace.
 115  *
 116  *      spa_remove()            Remove a spa_t from the namespace.  This also
 117  *                              frees up any memory associated with the spa_t.
 118  *
 119  *      spa_next()              Returns the next spa_t in the system, or the
 120  *                              first if NULL is passed.
 121  *
 122  *      spa_evict_all()         Shutdown and remove all spa_t structures in
 123  *                              the system.
 124  *
 125  *      spa_guid_exists()       Determine whether a pool/device guid exists.
 126  *
 127  * The spa_refcount is manipulated using the following functions:
 128  *
 129  *      spa_open_ref()          Adds a reference to the given spa_t.  Must be
 130  *                              called with spa_namespace_lock held if the
 131  *                              refcount is currently zero.
 132  *
 133  *      spa_close()             Remove a reference from the spa_t.  This will
 134  *                              not free the spa_t or remove it from the
 135  *                              namespace.  No locking is required.
 136  *
 137  *      spa_refcount_zero()     Returns true if the refcount is currently
 138  *                              zero.  Must be called with spa_namespace_lock
 139  *                              held.
 140  *
 141  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
 142  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
 143  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
 144  *
 145  * To read the configuration, it suffices to hold one of these locks as reader.
 146  * To modify the configuration, you must hold all locks as writer.  To modify
 147  * vdev state without altering the vdev tree's topology (e.g. online/offline),
 148  * you must hold SCL_STATE and SCL_ZIO as writer.
 149  *
 150  * We use these distinct config locks to avoid recursive lock entry.
 151  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
 152  * block allocations (SCL_ALLOC), which may require reading space maps
 153  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
 154  *
 155  * The spa config locks cannot be normal rwlocks because we need the
 156  * ability to hand off ownership.  For example, SCL_ZIO is acquired
 157  * by the issuing thread and later released by an interrupt thread.
 158  * They do, however, obey the usual write-wanted semantics to prevent
 159  * writer (i.e. system administrator) starvation.
 160  *
 161  * The lock acquisition rules are as follows:
 162  *
 163  * SCL_CONFIG
 164  *      Protects changes to the vdev tree topology, such as vdev
 165  *      add/remove/attach/detach.  Protects the dirty config list
 166  *      (spa_config_dirty_list) and the set of spares and l2arc devices.
 167  *
 168  * SCL_STATE
 169  *      Protects changes to pool state and vdev state, such as vdev
 170  *      online/offline/fault/degrade/clear.  Protects the dirty state list
 171  *      (spa_state_dirty_list) and global pool state (spa_state).
 172  *
 173  * SCL_ALLOC
 174  *      Protects changes to metaslab groups and classes.
 175  *      Held as reader by metaslab_alloc() and metaslab_claim().
 176  *
 177  * SCL_ZIO
 178  *      Held by bp-level zios (those which have no io_vd upon entry)
 179  *      to prevent changes to the vdev tree.  The bp-level zio implicitly
 180  *      protects all of its vdev child zios, which do not hold SCL_ZIO.
 181  *
 182  * SCL_FREE
 183  *      Protects changes to metaslab groups and classes.
 184  *      Held as reader by metaslab_free().  SCL_FREE is distinct from
 185  *      SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
 186  *      blocks in zio_done() while another i/o that holds either
 187  *      SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
 188  *
 189  * SCL_VDEV
 190  *      Held as reader to prevent changes to the vdev tree during trivial
 191  *      inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
 192  *      other locks, and lower than all of them, to ensure that it's safe
 193  *      to acquire regardless of caller context.
 194  *
 195  * In addition, the following rules apply:
 196  *
 197  * (a)  spa_props_lock protects pool properties, spa_config and spa_config_list.
 198  *      The lock ordering is SCL_CONFIG > spa_props_lock.
 199  *
 200  * (b)  I/O operations on leaf vdevs.  For any zio operation that takes
 201  *      an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
 202  *      or zio_write_phys() -- the caller must ensure that the config cannot
 203  *      cannot change in the interim, and that the vdev cannot be reopened.
 204  *      SCL_STATE as reader suffices for both.
 205  *
 206  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
 207  *
 208  *      spa_vdev_enter()        Acquire the namespace lock and the config lock
 209  *                              for writing.
 210  *
 211  *      spa_vdev_exit()         Release the config lock, wait for all I/O
 212  *                              to complete, sync the updated configs to the
 213  *                              cache, and release the namespace lock.
 214  *
 215  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
 216  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
 217  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
 218  *
 219  * spa_rename() is also implemented within this file since is requires
 220  * manipulation of the namespace.
 221  */
 222 
 223 static avl_tree_t spa_namespace_avl;
 224 kmutex_t spa_namespace_lock;
 225 static kcondvar_t spa_namespace_cv;
 226 static int spa_active_count;
 227 int spa_max_replication_override = SPA_DVAS_PER_BP;
 228 
 229 static kmutex_t spa_spare_lock;
 230 static avl_tree_t spa_spare_avl;
 231 static kmutex_t spa_l2cache_lock;
 232 static avl_tree_t spa_l2cache_avl;
 233 
 234 kmem_cache_t *spa_buffer_pool;
 235 int spa_mode_global;
 236 
 237 #ifdef ZFS_DEBUG
 238 /* Everything except dprintf is on by default in debug builds */
 239 int zfs_flags = ~ZFS_DEBUG_DPRINTF;
 240 #else
 241 int zfs_flags = 0;
 242 #endif
 243 
 244 /*
 245  * zfs_recover can be set to nonzero to attempt to recover from
 246  * otherwise-fatal errors, typically caused by on-disk corruption.  When
 247  * set, calls to zfs_panic_recover() will turn into warning messages.
 248  */
 249 int zfs_recover = 0;
 250 
 251 
 252 /*
 253  * ==========================================================================
 254  * SPA config locking
 255  * ==========================================================================
 256  */
 257 static void
 258 spa_config_lock_init(spa_t *spa)
 259 {
 260         for (int i = 0; i < SCL_LOCKS; i++) {
 261                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 262                 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
 263                 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
 264                 refcount_create(&scl->scl_count);
 265                 scl->scl_writer = NULL;
 266                 scl->scl_write_wanted = 0;
 267         }
 268 }
 269 
 270 static void
 271 spa_config_lock_destroy(spa_t *spa)
 272 {
 273         for (int i = 0; i < SCL_LOCKS; i++) {
 274                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 275                 mutex_destroy(&scl->scl_lock);
 276                 cv_destroy(&scl->scl_cv);
 277                 refcount_destroy(&scl->scl_count);
 278                 ASSERT(scl->scl_writer == NULL);
 279                 ASSERT(scl->scl_write_wanted == 0);
 280         }
 281 }
 282 
 283 int
 284 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
 285 {
 286         for (int i = 0; i < SCL_LOCKS; i++) {
 287                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 288                 if (!(locks & (1 << i)))
 289                         continue;
 290                 mutex_enter(&scl->scl_lock);
 291                 if (rw == RW_READER) {
 292                         if (scl->scl_writer || scl->scl_write_wanted) {
 293                                 mutex_exit(&scl->scl_lock);
 294                                 spa_config_exit(spa, locks ^ (1 << i), tag);
 295                                 return (0);
 296                         }
 297                 } else {
 298                         ASSERT(scl->scl_writer != curthread);
 299                         if (!refcount_is_zero(&scl->scl_count)) {
 300                                 mutex_exit(&scl->scl_lock);
 301                                 spa_config_exit(spa, locks ^ (1 << i), tag);
 302                                 return (0);
 303                         }
 304                         scl->scl_writer = curthread;
 305                 }
 306                 (void) refcount_add(&scl->scl_count, tag);
 307                 mutex_exit(&scl->scl_lock);
 308         }
 309         return (1);
 310 }
 311 
 312 void
 313 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
 314 {
 315         int wlocks_held = 0;
 316 
 317         for (int i = 0; i < SCL_LOCKS; i++) {
 318                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 319                 if (scl->scl_writer == curthread)
 320                         wlocks_held |= (1 << i);
 321                 if (!(locks & (1 << i)))
 322                         continue;
 323                 mutex_enter(&scl->scl_lock);
 324                 if (rw == RW_READER) {
 325                         while (scl->scl_writer || scl->scl_write_wanted) {
 326                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
 327                         }
 328                 } else {
 329                         ASSERT(scl->scl_writer != curthread);
 330                         while (!refcount_is_zero(&scl->scl_count)) {
 331                                 scl->scl_write_wanted++;
 332                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
 333                                 scl->scl_write_wanted--;
 334                         }
 335                         scl->scl_writer = curthread;
 336                 }
 337                 (void) refcount_add(&scl->scl_count, tag);
 338                 mutex_exit(&scl->scl_lock);
 339         }
 340         ASSERT(wlocks_held <= locks);
 341 }
 342 
 343 void
 344 spa_config_exit(spa_t *spa, int locks, void *tag)
 345 {
 346         for (int i = SCL_LOCKS - 1; i >= 0; i--) {
 347                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 348                 if (!(locks & (1 << i)))
 349                         continue;
 350                 mutex_enter(&scl->scl_lock);
 351                 ASSERT(!refcount_is_zero(&scl->scl_count));
 352                 if (refcount_remove(&scl->scl_count, tag) == 0) {
 353                         ASSERT(scl->scl_writer == NULL ||
 354                             scl->scl_writer == curthread);
 355                         scl->scl_writer = NULL;      /* OK in either case */
 356                         cv_broadcast(&scl->scl_cv);
 357                 }
 358                 mutex_exit(&scl->scl_lock);
 359         }
 360 }
 361 
 362 int
 363 spa_config_held(spa_t *spa, int locks, krw_t rw)
 364 {
 365         int locks_held = 0;
 366 
 367         for (int i = 0; i < SCL_LOCKS; i++) {
 368                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 369                 if (!(locks & (1 << i)))
 370                         continue;
 371                 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
 372                     (rw == RW_WRITER && scl->scl_writer == curthread))
 373                         locks_held |= 1 << i;
 374         }
 375 
 376         return (locks_held);
 377 }
 378 
 379 /*
 380  * ==========================================================================
 381  * SPA namespace functions
 382  * ==========================================================================
 383  */
 384 
 385 /*
 386  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
 387  * Returns NULL if no matching spa_t is found.
 388  */
 389 spa_t *
 390 spa_lookup(const char *name)
 391 {
 392         static spa_t search;    /* spa_t is large; don't allocate on stack */
 393         spa_t *spa;
 394         avl_index_t where;
 395         char c;
 396         char *cp;
 397 
 398         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 399 
 400         /*
 401          * If it's a full dataset name, figure out the pool name and
 402          * just use that.
 403          */
 404         cp = strpbrk(name, "/@");
 405         if (cp) {
 406                 c = *cp;
 407                 *cp = '\0';
 408         }
 409 
 410         (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
 411         spa = avl_find(&spa_namespace_avl, &search, &where);
 412 
 413         if (cp)
 414                 *cp = c;
 415 
 416         return (spa);
 417 }
 418 
 419 /*
 420  * Create an uninitialized spa_t with the given name.  Requires
 421  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
 422  * exist by calling spa_lookup() first.
 423  */
 424 spa_t *
 425 spa_add(const char *name, nvlist_t *config, const char *altroot)
 426 {
 427         spa_t *spa;
 428         spa_config_dirent_t *dp;
 429 
 430         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 431 
 432         spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
 433 
 434         mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
 435         mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
 436         mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
 437         mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
 438         mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
 439         mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
 440         mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
 441         mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
 442         mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
 443 
 444         cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
 445         cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
 446         cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
 447         cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
 448 
 449         for (int t = 0; t < TXG_SIZE; t++)
 450                 bplist_create(&spa->spa_free_bplist[t]);
 451 
 452         (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
 453         spa->spa_state = POOL_STATE_UNINITIALIZED;
 454         spa->spa_freeze_txg = UINT64_MAX;
 455         spa->spa_final_txg = UINT64_MAX;
 456         spa->spa_load_max_txg = UINT64_MAX;
 457         spa->spa_proc = &p0;
 458         spa->spa_proc_state = SPA_PROC_NONE;
 459 
 460         refcount_create(&spa->spa_refcount);
 461         spa_config_lock_init(spa);
 462 
 463         avl_add(&spa_namespace_avl, spa);
 464 
 465         /*
 466          * Set the alternate root, if there is one.
 467          */
 468         if (altroot) {
 469                 spa->spa_root = spa_strdup(altroot);
 470                 spa_active_count++;
 471         }
 472 
 473         /*
 474          * Every pool starts with the default cachefile
 475          */
 476         list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
 477             offsetof(spa_config_dirent_t, scd_link));
 478 
 479         dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
 480         dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
 481         list_insert_head(&spa->spa_config_list, dp);
 482 
 483         VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
 484             KM_SLEEP) == 0);
 485 
 486         if (config != NULL)
 487                 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
 488 
 489         return (spa);
 490 }
 491 
 492 /*
 493  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
 494  * spa_namespace_lock.  This is called only after the spa_t has been closed and
 495  * deactivated.
 496  */
 497 void
 498 spa_remove(spa_t *spa)
 499 {
 500         spa_config_dirent_t *dp;
 501 
 502         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 503         ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
 504 
 505         nvlist_free(spa->spa_config_splitting);
 506 
 507         avl_remove(&spa_namespace_avl, spa);
 508         cv_broadcast(&spa_namespace_cv);
 509 
 510         if (spa->spa_root) {
 511                 spa_strfree(spa->spa_root);
 512                 spa_active_count--;
 513         }
 514 
 515         while ((dp = list_head(&spa->spa_config_list)) != NULL) {
 516                 list_remove(&spa->spa_config_list, dp);
 517                 if (dp->scd_path != NULL)
 518                         spa_strfree(dp->scd_path);
 519                 kmem_free(dp, sizeof (spa_config_dirent_t));
 520         }
 521 
 522         list_destroy(&spa->spa_config_list);
 523 
 524         nvlist_free(spa->spa_load_info);
 525         spa_config_set(spa, NULL);
 526 
 527         refcount_destroy(&spa->spa_refcount);
 528 
 529         spa_config_lock_destroy(spa);
 530 
 531         for (int t = 0; t < TXG_SIZE; t++)
 532                 bplist_destroy(&spa->spa_free_bplist[t]);
 533 
 534         cv_destroy(&spa->spa_async_cv);
 535         cv_destroy(&spa->spa_proc_cv);
 536         cv_destroy(&spa->spa_scrub_io_cv);
 537         cv_destroy(&spa->spa_suspend_cv);
 538 
 539         mutex_destroy(&spa->spa_async_lock);
 540         mutex_destroy(&spa->spa_errlist_lock);
 541         mutex_destroy(&spa->spa_errlog_lock);
 542         mutex_destroy(&spa->spa_history_lock);
 543         mutex_destroy(&spa->spa_proc_lock);
 544         mutex_destroy(&spa->spa_props_lock);
 545         mutex_destroy(&spa->spa_scrub_lock);
 546         mutex_destroy(&spa->spa_suspend_lock);
 547         mutex_destroy(&spa->spa_vdev_top_lock);
 548 
 549         kmem_free(spa, sizeof (spa_t));
 550 }
 551 
 552 /*
 553  * Given a pool, return the next pool in the namespace, or NULL if there is
 554  * none.  If 'prev' is NULL, return the first pool.
 555  */
 556 spa_t *
 557 spa_next(spa_t *prev)
 558 {
 559         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 560 
 561         if (prev)
 562                 return (AVL_NEXT(&spa_namespace_avl, prev));
 563         else
 564                 return (avl_first(&spa_namespace_avl));
 565 }
 566 
 567 /*
 568  * ==========================================================================
 569  * SPA refcount functions
 570  * ==========================================================================
 571  */
 572 
 573 /*
 574  * Add a reference to the given spa_t.  Must have at least one reference, or
 575  * have the namespace lock held.
 576  */
 577 void
 578 spa_open_ref(spa_t *spa, void *tag)
 579 {
 580         ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
 581             MUTEX_HELD(&spa_namespace_lock));
 582         (void) refcount_add(&spa->spa_refcount, tag);
 583 }
 584 
 585 /*
 586  * Remove a reference to the given spa_t.  Must have at least one reference, or
 587  * have the namespace lock held.
 588  */
 589 void
 590 spa_close(spa_t *spa, void *tag)
 591 {
 592         ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
 593             MUTEX_HELD(&spa_namespace_lock));
 594         (void) refcount_remove(&spa->spa_refcount, tag);
 595 }
 596 
 597 /*
 598  * Check to see if the spa refcount is zero.  Must be called with
 599  * spa_namespace_lock held.  We really compare against spa_minref, which is the
 600  * number of references acquired when opening a pool
 601  */
 602 boolean_t
 603 spa_refcount_zero(spa_t *spa)
 604 {
 605         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 606 
 607         return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
 608 }
 609 
 610 /*
 611  * ==========================================================================
 612  * SPA spare and l2cache tracking
 613  * ==========================================================================
 614  */
 615 
 616 /*
 617  * Hot spares and cache devices are tracked using the same code below,
 618  * for 'auxiliary' devices.
 619  */
 620 
 621 typedef struct spa_aux {
 622         uint64_t        aux_guid;
 623         uint64_t        aux_pool;
 624         avl_node_t      aux_avl;
 625         int             aux_count;
 626 } spa_aux_t;
 627 
 628 static int
 629 spa_aux_compare(const void *a, const void *b)
 630 {
 631         const spa_aux_t *sa = a;
 632         const spa_aux_t *sb = b;
 633 
 634         if (sa->aux_guid < sb->aux_guid)
 635                 return (-1);
 636         else if (sa->aux_guid > sb->aux_guid)
 637                 return (1);
 638         else
 639                 return (0);
 640 }
 641 
 642 void
 643 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
 644 {
 645         avl_index_t where;
 646         spa_aux_t search;
 647         spa_aux_t *aux;
 648 
 649         search.aux_guid = vd->vdev_guid;
 650         if ((aux = avl_find(avl, &search, &where)) != NULL) {
 651                 aux->aux_count++;
 652         } else {
 653                 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
 654                 aux->aux_guid = vd->vdev_guid;
 655                 aux->aux_count = 1;
 656                 avl_insert(avl, aux, where);
 657         }
 658 }
 659 
 660 void
 661 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
 662 {
 663         spa_aux_t search;
 664         spa_aux_t *aux;
 665         avl_index_t where;
 666 
 667         search.aux_guid = vd->vdev_guid;
 668         aux = avl_find(avl, &search, &where);
 669 
 670         ASSERT(aux != NULL);
 671 
 672         if (--aux->aux_count == 0) {
 673                 avl_remove(avl, aux);
 674                 kmem_free(aux, sizeof (spa_aux_t));
 675         } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
 676                 aux->aux_pool = 0ULL;
 677         }
 678 }
 679 
 680 boolean_t
 681 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
 682 {
 683         spa_aux_t search, *found;
 684 
 685         search.aux_guid = guid;
 686         found = avl_find(avl, &search, NULL);
 687 
 688         if (pool) {
 689                 if (found)
 690                         *pool = found->aux_pool;
 691                 else
 692                         *pool = 0ULL;
 693         }
 694 
 695         if (refcnt) {
 696                 if (found)
 697                         *refcnt = found->aux_count;
 698                 else
 699                         *refcnt = 0;
 700         }
 701 
 702         return (found != NULL);
 703 }
 704 
 705 void
 706 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
 707 {
 708         spa_aux_t search, *found;
 709         avl_index_t where;
 710 
 711         search.aux_guid = vd->vdev_guid;
 712         found = avl_find(avl, &search, &where);
 713         ASSERT(found != NULL);
 714         ASSERT(found->aux_pool == 0ULL);
 715 
 716         found->aux_pool = spa_guid(vd->vdev_spa);
 717 }
 718 
 719 /*
 720  * Spares are tracked globally due to the following constraints:
 721  *
 722  *      - A spare may be part of multiple pools.
 723  *      - A spare may be added to a pool even if it's actively in use within
 724  *        another pool.
 725  *      - A spare in use in any pool can only be the source of a replacement if
 726  *        the target is a spare in the same pool.
 727  *
 728  * We keep track of all spares on the system through the use of a reference
 729  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
 730  * spare, then we bump the reference count in the AVL tree.  In addition, we set
 731  * the 'vdev_isspare' member to indicate that the device is a spare (active or
 732  * inactive).  When a spare is made active (used to replace a device in the
 733  * pool), we also keep track of which pool its been made a part of.
 734  *
 735  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
 736  * called under the spa_namespace lock as part of vdev reconfiguration.  The
 737  * separate spare lock exists for the status query path, which does not need to
 738  * be completely consistent with respect to other vdev configuration changes.
 739  */
 740 
 741 static int
 742 spa_spare_compare(const void *a, const void *b)
 743 {
 744         return (spa_aux_compare(a, b));
 745 }
 746 
 747 void
 748 spa_spare_add(vdev_t *vd)
 749 {
 750         mutex_enter(&spa_spare_lock);
 751         ASSERT(!vd->vdev_isspare);
 752         spa_aux_add(vd, &spa_spare_avl);
 753         vd->vdev_isspare = B_TRUE;
 754         mutex_exit(&spa_spare_lock);
 755 }
 756 
 757 void
 758 spa_spare_remove(vdev_t *vd)
 759 {
 760         mutex_enter(&spa_spare_lock);
 761         ASSERT(vd->vdev_isspare);
 762         spa_aux_remove(vd, &spa_spare_avl);
 763         vd->vdev_isspare = B_FALSE;
 764         mutex_exit(&spa_spare_lock);
 765 }
 766 
 767 boolean_t
 768 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
 769 {
 770         boolean_t found;
 771 
 772         mutex_enter(&spa_spare_lock);
 773         found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
 774         mutex_exit(&spa_spare_lock);
 775 
 776         return (found);
 777 }
 778 
 779 void
 780 spa_spare_activate(vdev_t *vd)
 781 {
 782         mutex_enter(&spa_spare_lock);
 783         ASSERT(vd->vdev_isspare);
 784         spa_aux_activate(vd, &spa_spare_avl);
 785         mutex_exit(&spa_spare_lock);
 786 }
 787 
 788 /*
 789  * Level 2 ARC devices are tracked globally for the same reasons as spares.
 790  * Cache devices currently only support one pool per cache device, and so
 791  * for these devices the aux reference count is currently unused beyond 1.
 792  */
 793 
 794 static int
 795 spa_l2cache_compare(const void *a, const void *b)
 796 {
 797         return (spa_aux_compare(a, b));
 798 }
 799 
 800 void
 801 spa_l2cache_add(vdev_t *vd)
 802 {
 803         mutex_enter(&spa_l2cache_lock);
 804         ASSERT(!vd->vdev_isl2cache);
 805         spa_aux_add(vd, &spa_l2cache_avl);
 806         vd->vdev_isl2cache = B_TRUE;
 807         mutex_exit(&spa_l2cache_lock);
 808 }
 809 
 810 void
 811 spa_l2cache_remove(vdev_t *vd)
 812 {
 813         mutex_enter(&spa_l2cache_lock);
 814         ASSERT(vd->vdev_isl2cache);
 815         spa_aux_remove(vd, &spa_l2cache_avl);
 816         vd->vdev_isl2cache = B_FALSE;
 817         mutex_exit(&spa_l2cache_lock);
 818 }
 819 
 820 boolean_t
 821 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
 822 {
 823         boolean_t found;
 824 
 825         mutex_enter(&spa_l2cache_lock);
 826         found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
 827         mutex_exit(&spa_l2cache_lock);
 828 
 829         return (found);
 830 }
 831 
 832 void
 833 spa_l2cache_activate(vdev_t *vd)
 834 {
 835         mutex_enter(&spa_l2cache_lock);
 836         ASSERT(vd->vdev_isl2cache);
 837         spa_aux_activate(vd, &spa_l2cache_avl);
 838         mutex_exit(&spa_l2cache_lock);
 839 }
 840 
 841 /*
 842  * ==========================================================================
 843  * SPA vdev locking
 844  * ==========================================================================
 845  */
 846 
 847 /*
 848  * Lock the given spa_t for the purpose of adding or removing a vdev.
 849  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
 850  * It returns the next transaction group for the spa_t.
 851  */
 852 uint64_t
 853 spa_vdev_enter(spa_t *spa)
 854 {
 855         mutex_enter(&spa->spa_vdev_top_lock);
 856         mutex_enter(&spa_namespace_lock);
 857         return (spa_vdev_config_enter(spa));
 858 }
 859 
 860 /*
 861  * Internal implementation for spa_vdev_enter().  Used when a vdev
 862  * operation requires multiple syncs (i.e. removing a device) while
 863  * keeping the spa_namespace_lock held.
 864  */
 865 uint64_t
 866 spa_vdev_config_enter(spa_t *spa)
 867 {
 868         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 869 
 870         spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
 871 
 872         return (spa_last_synced_txg(spa) + 1);
 873 }
 874 
 875 /*
 876  * Used in combination with spa_vdev_config_enter() to allow the syncing
 877  * of multiple transactions without releasing the spa_namespace_lock.
 878  */
 879 void
 880 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
 881 {
 882         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 883 
 884         int config_changed = B_FALSE;
 885 
 886         ASSERT(txg > spa_last_synced_txg(spa));
 887 
 888         spa->spa_pending_vdev = NULL;
 889 
 890         /*
 891          * Reassess the DTLs.
 892          */
 893         vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
 894 
 895         if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
 896                 config_changed = B_TRUE;
 897                 spa->spa_config_generation++;
 898         }
 899 
 900         /*
 901          * Verify the metaslab classes.
 902          */
 903         ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
 904         ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
 905 
 906         spa_config_exit(spa, SCL_ALL, spa);
 907 
 908         /*
 909          * Panic the system if the specified tag requires it.  This
 910          * is useful for ensuring that configurations are updated
 911          * transactionally.
 912          */
 913         if (zio_injection_enabled)
 914                 zio_handle_panic_injection(spa, tag, 0);
 915 
 916         /*
 917          * Note: this txg_wait_synced() is important because it ensures
 918          * that there won't be more than one config change per txg.
 919          * This allows us to use the txg as the generation number.
 920          */
 921         if (error == 0)
 922                 txg_wait_synced(spa->spa_dsl_pool, txg);
 923 
 924         if (vd != NULL) {
 925                 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
 926                 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
 927                 vdev_free(vd);
 928                 spa_config_exit(spa, SCL_ALL, spa);
 929         }
 930 
 931         /*
 932          * If the config changed, update the config cache.
 933          */
 934         if (config_changed)
 935                 spa_config_sync(spa, B_FALSE, B_TRUE);
 936 }
 937 
 938 /*
 939  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
 940  * locking of spa_vdev_enter(), we also want make sure the transactions have
 941  * synced to disk, and then update the global configuration cache with the new
 942  * information.
 943  */
 944 int
 945 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
 946 {
 947         spa_vdev_config_exit(spa, vd, txg, error, FTAG);
 948         mutex_exit(&spa_namespace_lock);
 949         mutex_exit(&spa->spa_vdev_top_lock);
 950 
 951         return (error);
 952 }
 953 
 954 /*
 955  * Lock the given spa_t for the purpose of changing vdev state.
 956  */
 957 void
 958 spa_vdev_state_enter(spa_t *spa, int oplocks)
 959 {
 960         int locks = SCL_STATE_ALL | oplocks;
 961 
 962         /*
 963          * Root pools may need to read of the underlying devfs filesystem
 964          * when opening up a vdev.  Unfortunately if we're holding the
 965          * SCL_ZIO lock it will result in a deadlock when we try to issue
 966          * the read from the root filesystem.  Instead we "prefetch"
 967          * the associated vnodes that we need prior to opening the
 968          * underlying devices and cache them so that we can prevent
 969          * any I/O when we are doing the actual open.
 970          */
 971         if (spa_is_root(spa)) {
 972                 int low = locks & ~(SCL_ZIO - 1);
 973                 int high = locks & ~low;
 974 
 975                 spa_config_enter(spa, high, spa, RW_WRITER);
 976                 vdev_hold(spa->spa_root_vdev);
 977                 spa_config_enter(spa, low, spa, RW_WRITER);
 978         } else {
 979                 spa_config_enter(spa, locks, spa, RW_WRITER);
 980         }
 981         spa->spa_vdev_locks = locks;
 982 }
 983 
 984 int
 985 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
 986 {
 987         boolean_t config_changed = B_FALSE;
 988 
 989         if (vd != NULL || error == 0)
 990                 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
 991                     0, 0, B_FALSE);
 992 
 993         if (vd != NULL) {
 994                 vdev_state_dirty(vd->vdev_top);
 995                 config_changed = B_TRUE;
 996                 spa->spa_config_generation++;
 997         }
 998 
 999         if (spa_is_root(spa))
1000                 vdev_rele(spa->spa_root_vdev);
1001 
1002         ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1003         spa_config_exit(spa, spa->spa_vdev_locks, spa);
1004 
1005         /*
1006          * If anything changed, wait for it to sync.  This ensures that,
1007          * from the system administrator's perspective, zpool(1M) commands
1008          * are synchronous.  This is important for things like zpool offline:
1009          * when the command completes, you expect no further I/O from ZFS.
1010          */
1011         if (vd != NULL)
1012                 txg_wait_synced(spa->spa_dsl_pool, 0);
1013 
1014         /*
1015          * If the config changed, update the config cache.
1016          */
1017         if (config_changed) {
1018                 mutex_enter(&spa_namespace_lock);
1019                 spa_config_sync(spa, B_FALSE, B_TRUE);
1020                 mutex_exit(&spa_namespace_lock);
1021         }
1022 
1023         return (error);
1024 }
1025 
1026 /*
1027  * ==========================================================================
1028  * Miscellaneous functions
1029  * ==========================================================================
1030  */
1031 
1032 /*
1033  * Rename a spa_t.
1034  */
1035 int
1036 spa_rename(const char *name, const char *newname)
1037 {
1038         spa_t *spa;
1039         int err;
1040 
1041         /*
1042          * Lookup the spa_t and grab the config lock for writing.  We need to
1043          * actually open the pool so that we can sync out the necessary labels.
1044          * It's OK to call spa_open() with the namespace lock held because we
1045          * allow recursive calls for other reasons.
1046          */
1047         mutex_enter(&spa_namespace_lock);
1048         if ((err = spa_open(name, &spa, FTAG)) != 0) {
1049                 mutex_exit(&spa_namespace_lock);
1050                 return (err);
1051         }
1052 
1053         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1054 
1055         avl_remove(&spa_namespace_avl, spa);
1056         (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1057         avl_add(&spa_namespace_avl, spa);
1058 
1059         /*
1060          * Sync all labels to disk with the new names by marking the root vdev
1061          * dirty and waiting for it to sync.  It will pick up the new pool name
1062          * during the sync.
1063          */
1064         vdev_config_dirty(spa->spa_root_vdev);
1065 
1066         spa_config_exit(spa, SCL_ALL, FTAG);
1067 
1068         txg_wait_synced(spa->spa_dsl_pool, 0);
1069 
1070         /*
1071          * Sync the updated config cache.
1072          */
1073         spa_config_sync(spa, B_FALSE, B_TRUE);
1074 
1075         spa_close(spa, FTAG);
1076 
1077         mutex_exit(&spa_namespace_lock);
1078 
1079         return (0);
1080 }
1081 
1082 /*
1083  * Return the spa_t associated with given pool_guid, if it exists.  If
1084  * device_guid is non-zero, determine whether the pool exists *and* contains
1085  * a device with the specified device_guid.
1086  */
1087 spa_t *
1088 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1089 {
1090         spa_t *spa;
1091         avl_tree_t *t = &spa_namespace_avl;
1092 
1093         ASSERT(MUTEX_HELD(&spa_namespace_lock));
1094 
1095         for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1096                 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1097                         continue;
1098                 if (spa->spa_root_vdev == NULL)
1099                         continue;
1100                 if (spa_guid(spa) == pool_guid) {
1101                         if (device_guid == 0)
1102                                 break;
1103 
1104                         if (vdev_lookup_by_guid(spa->spa_root_vdev,
1105                             device_guid) != NULL)
1106                                 break;
1107 
1108                         /*
1109                          * Check any devices we may be in the process of adding.
1110                          */
1111                         if (spa->spa_pending_vdev) {
1112                                 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1113                                     device_guid) != NULL)
1114                                         break;
1115                         }
1116                 }
1117         }
1118 
1119         return (spa);
1120 }
1121 
1122 /*
1123  * Determine whether a pool with the given pool_guid exists.
1124  */
1125 boolean_t
1126 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1127 {
1128         return (spa_by_guid(pool_guid, device_guid) != NULL);
1129 }
1130 
1131 char *
1132 spa_strdup(const char *s)
1133 {
1134         size_t len;
1135         char *new;
1136 
1137         len = strlen(s);
1138         new = kmem_alloc(len + 1, KM_SLEEP);
1139         bcopy(s, new, len);
1140         new[len] = '\0';
1141 
1142         return (new);
1143 }
1144 
1145 void
1146 spa_strfree(char *s)
1147 {
1148         kmem_free(s, strlen(s) + 1);
1149 }
1150 
1151 uint64_t
1152 spa_get_random(uint64_t range)
1153 {
1154         uint64_t r;
1155 
1156         ASSERT(range != 0);
1157 
1158         (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1159 
1160         return (r % range);
1161 }
1162 
1163 uint64_t
1164 spa_generate_guid(spa_t *spa)
1165 {
1166         uint64_t guid = spa_get_random(-1ULL);
1167 
1168         if (spa != NULL) {
1169                 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1170                         guid = spa_get_random(-1ULL);
1171         } else {
1172                 while (guid == 0 || spa_guid_exists(guid, 0))
1173                         guid = spa_get_random(-1ULL);
1174         }
1175 
1176         return (guid);
1177 }
1178 
1179 void
1180 sprintf_blkptr(char *buf, const blkptr_t *bp)
1181 {
1182         char *type = NULL;
1183         char *checksum = NULL;
1184         char *compress = NULL;
1185 
1186         if (bp != NULL) {
1187                 type = dmu_ot[BP_GET_TYPE(bp)].ot_name;
1188                 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1189                 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1190         }
1191 
1192         SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1193 }
1194 
1195 void
1196 spa_freeze(spa_t *spa)
1197 {
1198         uint64_t freeze_txg = 0;
1199 
1200         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1201         if (spa->spa_freeze_txg == UINT64_MAX) {
1202                 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1203                 spa->spa_freeze_txg = freeze_txg;
1204         }
1205         spa_config_exit(spa, SCL_ALL, FTAG);
1206         if (freeze_txg != 0)
1207                 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1208 }
1209 
1210 void
1211 zfs_panic_recover(const char *fmt, ...)
1212 {
1213         va_list adx;
1214 
1215         va_start(adx, fmt);
1216         vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1217         va_end(adx);
1218 }
1219 
1220 /*
1221  * This is a stripped-down version of strtoull, suitable only for converting
1222  * lowercase hexidecimal numbers that don't overflow.
1223  */
1224 uint64_t
1225 strtonum(const char *str, char **nptr)
1226 {
1227         uint64_t val = 0;
1228         char c;
1229         int digit;
1230 
1231         while ((c = *str) != '\0') {
1232                 if (c >= '0' && c <= '9')
1233                         digit = c - '0';
1234                 else if (c >= 'a' && c <= 'f')
1235                         digit = 10 + c - 'a';
1236                 else
1237                         break;
1238 
1239                 val *= 16;
1240                 val += digit;
1241 
1242                 str++;
1243         }
1244 
1245         if (nptr)
1246                 *nptr = (char *)str;
1247 
1248         return (val);
1249 }
1250 
1251 /*
1252  * ==========================================================================
1253  * Accessor functions
1254  * ==========================================================================
1255  */
1256 
1257 boolean_t
1258 spa_shutting_down(spa_t *spa)
1259 {
1260         return (spa->spa_async_suspended);
1261 }
1262 
1263 dsl_pool_t *
1264 spa_get_dsl(spa_t *spa)
1265 {
1266         return (spa->spa_dsl_pool);
1267 }
1268 
1269 blkptr_t *
1270 spa_get_rootblkptr(spa_t *spa)
1271 {
1272         return (&spa->spa_ubsync.ub_rootbp);
1273 }
1274 
1275 void
1276 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1277 {
1278         spa->spa_uberblock.ub_rootbp = *bp;
1279 }
1280 
1281 void
1282 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1283 {
1284         if (spa->spa_root == NULL)
1285                 buf[0] = '\0';
1286         else
1287                 (void) strncpy(buf, spa->spa_root, buflen);
1288 }
1289 
1290 int
1291 spa_sync_pass(spa_t *spa)
1292 {
1293         return (spa->spa_sync_pass);
1294 }
1295 
1296 char *
1297 spa_name(spa_t *spa)
1298 {
1299         return (spa->spa_name);
1300 }
1301 
1302 uint64_t
1303 spa_guid(spa_t *spa)
1304 {
1305         /*
1306          * If we fail to parse the config during spa_load(), we can go through
1307          * the error path (which posts an ereport) and end up here with no root
1308          * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1309          * this case.
1310          */
1311         if (spa->spa_root_vdev != NULL)
1312                 return (spa->spa_root_vdev->vdev_guid);
1313         else
1314                 return (spa->spa_config_guid);
1315 }
1316 
1317 uint64_t
1318 spa_load_guid(spa_t *spa)
1319 {
1320         /*
1321          * This is a GUID that exists solely as a reference for the
1322          * purposes of the arc.  It is generated at load time, and
1323          * is never written to persistent storage.
1324          */
1325         return (spa->spa_load_guid);
1326 }
1327 
1328 uint64_t
1329 spa_last_synced_txg(spa_t *spa)
1330 {
1331         return (spa->spa_ubsync.ub_txg);
1332 }
1333 
1334 uint64_t
1335 spa_first_txg(spa_t *spa)
1336 {
1337         return (spa->spa_first_txg);
1338 }
1339 
1340 uint64_t
1341 spa_syncing_txg(spa_t *spa)
1342 {
1343         return (spa->spa_syncing_txg);
1344 }
1345 
1346 pool_state_t
1347 spa_state(spa_t *spa)
1348 {
1349         return (spa->spa_state);
1350 }
1351 
1352 spa_load_state_t
1353 spa_load_state(spa_t *spa)
1354 {
1355         return (spa->spa_load_state);
1356 }
1357 
1358 uint64_t
1359 spa_freeze_txg(spa_t *spa)
1360 {
1361         return (spa->spa_freeze_txg);
1362 }
1363 
1364 /* ARGSUSED */
1365 uint64_t
1366 spa_get_asize(spa_t *spa, uint64_t lsize)
1367 {
1368         /*
1369          * The worst case is single-sector max-parity RAID-Z blocks, in which
1370          * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
1371          * times the size; so just assume that.  Add to this the fact that
1372          * we can have up to 3 DVAs per bp, and one more factor of 2 because
1373          * the block may be dittoed with up to 3 DVAs by ddt_sync().
1374          */
1375         return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
1376 }
1377 
1378 uint64_t
1379 spa_get_dspace(spa_t *spa)
1380 {
1381         return (spa->spa_dspace);
1382 }
1383 
1384 void
1385 spa_update_dspace(spa_t *spa)
1386 {
1387         spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1388             ddt_get_dedup_dspace(spa);
1389 }
1390 
1391 /*
1392  * Return the failure mode that has been set to this pool. The default
1393  * behavior will be to block all I/Os when a complete failure occurs.
1394  */
1395 uint8_t
1396 spa_get_failmode(spa_t *spa)
1397 {
1398         return (spa->spa_failmode);
1399 }
1400 
1401 boolean_t
1402 spa_suspended(spa_t *spa)
1403 {
1404         return (spa->spa_suspended);
1405 }
1406 
1407 uint64_t
1408 spa_version(spa_t *spa)
1409 {
1410         return (spa->spa_ubsync.ub_version);
1411 }
1412 
1413 boolean_t
1414 spa_deflate(spa_t *spa)
1415 {
1416         return (spa->spa_deflate);
1417 }
1418 
1419 metaslab_class_t *
1420 spa_normal_class(spa_t *spa)
1421 {
1422         return (spa->spa_normal_class);
1423 }
1424 
1425 metaslab_class_t *
1426 spa_log_class(spa_t *spa)
1427 {
1428         return (spa->spa_log_class);
1429 }
1430 
1431 int
1432 spa_max_replication(spa_t *spa)
1433 {
1434         /*
1435          * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1436          * handle BPs with more than one DVA allocated.  Set our max
1437          * replication level accordingly.
1438          */
1439         if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1440                 return (1);
1441         return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1442 }
1443 
1444 int
1445 spa_prev_software_version(spa_t *spa)
1446 {
1447         return (spa->spa_prev_software_version);
1448 }
1449 
1450 uint64_t
1451 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1452 {
1453         uint64_t asize = DVA_GET_ASIZE(dva);
1454         uint64_t dsize = asize;
1455 
1456         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1457 
1458         if (asize != 0 && spa->spa_deflate) {
1459                 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1460                 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1461         }
1462 
1463         return (dsize);
1464 }
1465 
1466 uint64_t
1467 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1468 {
1469         uint64_t dsize = 0;
1470 
1471         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1472                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1473 
1474         return (dsize);
1475 }
1476 
1477 uint64_t
1478 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1479 {
1480         uint64_t dsize = 0;
1481 
1482         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1483 
1484         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1485                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1486 
1487         spa_config_exit(spa, SCL_VDEV, FTAG);
1488 
1489         return (dsize);
1490 }
1491 
1492 /*
1493  * ==========================================================================
1494  * Initialization and Termination
1495  * ==========================================================================
1496  */
1497 
1498 static int
1499 spa_name_compare(const void *a1, const void *a2)
1500 {
1501         const spa_t *s1 = a1;
1502         const spa_t *s2 = a2;
1503         int s;
1504 
1505         s = strcmp(s1->spa_name, s2->spa_name);
1506         if (s > 0)
1507                 return (1);
1508         if (s < 0)
1509                 return (-1);
1510         return (0);
1511 }
1512 
1513 int
1514 spa_busy(void)
1515 {
1516         return (spa_active_count);
1517 }
1518 
1519 void
1520 spa_boot_init()
1521 {
1522         spa_config_load();
1523 }
1524 
1525 void
1526 spa_init(int mode)
1527 {
1528         mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1529         mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1530         mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1531         cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1532 
1533         avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1534             offsetof(spa_t, spa_avl));
1535 
1536         avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1537             offsetof(spa_aux_t, aux_avl));
1538 
1539         avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1540             offsetof(spa_aux_t, aux_avl));
1541 
1542         spa_mode_global = mode;
1543 
1544         refcount_init();
1545         unique_init();
1546         zio_init();
1547         dmu_init();
1548         zil_init();
1549         vdev_cache_stat_init();
1550         zfs_prop_init();
1551         zpool_prop_init();
1552         spa_config_load();
1553         l2arc_start();
1554 }
1555 
1556 void
1557 spa_fini(void)
1558 {
1559         l2arc_stop();
1560 
1561         spa_evict_all();
1562 
1563         vdev_cache_stat_fini();
1564         zil_fini();
1565         dmu_fini();
1566         zio_fini();
1567         unique_fini();
1568         refcount_fini();
1569 
1570         avl_destroy(&spa_namespace_avl);
1571         avl_destroy(&spa_spare_avl);
1572         avl_destroy(&spa_l2cache_avl);
1573 
1574         cv_destroy(&spa_namespace_cv);
1575         mutex_destroy(&spa_namespace_lock);
1576         mutex_destroy(&spa_spare_lock);
1577         mutex_destroy(&spa_l2cache_lock);
1578 }
1579 
1580 /*
1581  * Return whether this pool has slogs. No locking needed.
1582  * It's not a problem if the wrong answer is returned as it's only for
1583  * performance and not correctness
1584  */
1585 boolean_t
1586 spa_has_slogs(spa_t *spa)
1587 {
1588         return (spa->spa_log_class->mc_rotor != NULL);
1589 }
1590 
1591 spa_log_state_t
1592 spa_get_log_state(spa_t *spa)
1593 {
1594         return (spa->spa_log_state);
1595 }
1596 
1597 void
1598 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1599 {
1600         spa->spa_log_state = state;
1601 }
1602 
1603 boolean_t
1604 spa_is_root(spa_t *spa)
1605 {
1606         return (spa->spa_is_root);
1607 }
1608 
1609 boolean_t
1610 spa_writeable(spa_t *spa)
1611 {
1612         return (!!(spa->spa_mode & FWRITE));
1613 }
1614 
1615 int
1616 spa_mode(spa_t *spa)
1617 {
1618         return (spa->spa_mode);
1619 }
1620 
1621 uint64_t
1622 spa_bootfs(spa_t *spa)
1623 {
1624         return (spa->spa_bootfs);
1625 }
1626 
1627 uint64_t
1628 spa_delegation(spa_t *spa)
1629 {
1630         return (spa->spa_delegation);
1631 }
1632 
1633 objset_t *
1634 spa_meta_objset(spa_t *spa)
1635 {
1636         return (spa->spa_meta_objset);
1637 }
1638 
1639 enum zio_checksum
1640 spa_dedup_checksum(spa_t *spa)
1641 {
1642         return (spa->spa_dedup_checksum);
1643 }
1644 
1645 /*
1646  * Reset pool scan stat per scan pass (or reboot).
1647  */
1648 void
1649 spa_scan_stat_init(spa_t *spa)
1650 {
1651         /* data not stored on disk */
1652         spa->spa_scan_pass_start = gethrestime_sec();
1653         spa->spa_scan_pass_exam = 0;
1654         vdev_scan_stat_init(spa->spa_root_vdev);
1655 }
1656 
1657 /*
1658  * Get scan stats for zpool status reports
1659  */
1660 int
1661 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1662 {
1663         dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1664 
1665         if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1666                 return (ENOENT);
1667         bzero(ps, sizeof (pool_scan_stat_t));
1668 
1669         /* data stored on disk */
1670         ps->pss_func = scn->scn_phys.scn_func;
1671         ps->pss_start_time = scn->scn_phys.scn_start_time;
1672         ps->pss_end_time = scn->scn_phys.scn_end_time;
1673         ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1674         ps->pss_examined = scn->scn_phys.scn_examined;
1675         ps->pss_to_process = scn->scn_phys.scn_to_process;
1676         ps->pss_processed = scn->scn_phys.scn_processed;
1677         ps->pss_errors = scn->scn_phys.scn_errors;
1678         ps->pss_state = scn->scn_phys.scn_state;
1679 
1680         /* data not stored on disk */
1681         ps->pss_pass_start = spa->spa_scan_pass_start;
1682         ps->pss_pass_exam = spa->spa_scan_pass_exam;
1683 
1684         return (0);
1685 }
1686 
1687 boolean_t
1688 spa_debug_enabled(spa_t *spa)
1689 {
1690         return (spa->spa_debug);
1691 }