1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
  25  * Copyright (c) 2013, 2014, Nexenta Systems, Inc.  All rights reserved.
  26  */
  27 
  28 /*
  29  * SPA: Storage Pool Allocator
  30  *
  31  * This file contains all the routines used when modifying on-disk SPA state.
  32  * This includes opening, importing, destroying, exporting a pool, and syncing a
  33  * pool.
  34  */
  35 
  36 #include <sys/zfs_context.h>
  37 #include <sys/fm/fs/zfs.h>
  38 #include <sys/spa_impl.h>
  39 #include <sys/zio.h>
  40 #include <sys/zio_checksum.h>
  41 #include <sys/dmu.h>
  42 #include <sys/dmu_tx.h>
  43 #include <sys/zap.h>
  44 #include <sys/zil.h>
  45 #include <sys/ddt.h>
  46 #include <sys/vdev_impl.h>
  47 #include <sys/metaslab.h>
  48 #include <sys/metaslab_impl.h>
  49 #include <sys/uberblock_impl.h>
  50 #include <sys/txg.h>
  51 #include <sys/avl.h>
  52 #include <sys/dmu_traverse.h>
  53 #include <sys/dmu_objset.h>
  54 #include <sys/unique.h>
  55 #include <sys/dsl_pool.h>
  56 #include <sys/dsl_dataset.h>
  57 #include <sys/dsl_dir.h>
  58 #include <sys/dsl_prop.h>
  59 #include <sys/dsl_synctask.h>
  60 #include <sys/fs/zfs.h>
  61 #include <sys/arc.h>
  62 #include <sys/callb.h>
  63 #include <sys/systeminfo.h>
  64 #include <sys/spa_boot.h>
  65 #include <sys/zfs_ioctl.h>
  66 #include <sys/dsl_scan.h>
  67 #include <sys/zfeature.h>
  68 #include <sys/dsl_destroy.h>
  69 
  70 #ifdef  _KERNEL
  71 #include <sys/bootprops.h>
  72 #include <sys/callb.h>
  73 #include <sys/cpupart.h>
  74 #include <sys/pool.h>
  75 #include <sys/sysdc.h>
  76 #include <sys/zone.h>
  77 #endif  /* _KERNEL */
  78 
  79 #include "zfs_prop.h"
  80 #include "zfs_comutil.h"
  81 
  82 /*
  83  * The interval, in seconds, at which failed configuration cache file writes
  84  * should be retried.
  85  */
  86 static int zfs_ccw_retry_interval = 300;
  87 
  88 typedef enum zti_modes {
  89         ZTI_MODE_FIXED,                 /* value is # of threads (min 1) */
  90         ZTI_MODE_BATCH,                 /* cpu-intensive; value is ignored */
  91         ZTI_MODE_NULL,                  /* don't create a taskq */
  92         ZTI_NMODES
  93 } zti_modes_t;
  94 
  95 #define ZTI_P(n, q)     { ZTI_MODE_FIXED, (n), (q) }
  96 #define ZTI_BATCH       { ZTI_MODE_BATCH, 0, 1 }
  97 #define ZTI_NULL        { ZTI_MODE_NULL, 0, 0 }
  98 
  99 #define ZTI_N(n)        ZTI_P(n, 1)
 100 #define ZTI_ONE         ZTI_N(1)
 101 
 102 typedef struct zio_taskq_info {
 103         zti_modes_t zti_mode;
 104         uint_t zti_value;
 105         uint_t zti_count;
 106 } zio_taskq_info_t;
 107 
 108 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
 109         "issue", "issue_high", "intr", "intr_high"
 110 };
 111 
 112 /*
 113  * This table defines the taskq settings for each ZFS I/O type. When
 114  * initializing a pool, we use this table to create an appropriately sized
 115  * taskq. Some operations are low volume and therefore have a small, static
 116  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
 117  * macros. Other operations process a large amount of data; the ZTI_BATCH
 118  * macro causes us to create a taskq oriented for throughput. Some operations
 119  * are so high frequency and short-lived that the taskq itself can become a a
 120  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
 121  * additional degree of parallelism specified by the number of threads per-
 122  * taskq and the number of taskqs; when dispatching an event in this case, the
 123  * particular taskq is chosen at random.
 124  *
 125  * The different taskq priorities are to handle the different contexts (issue
 126  * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
 127  * need to be handled with minimum delay.
 128  */
 129 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
 130         /* ISSUE        ISSUE_HIGH      INTR            INTR_HIGH */
 131         { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* NULL */
 132         { ZTI_N(8),     ZTI_NULL,       ZTI_P(12, 8),   ZTI_NULL }, /* READ */
 133         { ZTI_BATCH,    ZTI_N(5),       ZTI_N(8),       ZTI_N(5) }, /* WRITE */
 134         { ZTI_P(12, 8), ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* FREE */
 135         { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* CLAIM */
 136         { ZTI_ONE,      ZTI_NULL,       ZTI_ONE,        ZTI_NULL }, /* IOCTL */
 137 };
 138 
 139 static void spa_sync_version(void *arg, dmu_tx_t *tx);
 140 static void spa_sync_props(void *arg, dmu_tx_t *tx);
 141 static boolean_t spa_has_active_shared_spare(spa_t *spa);
 142 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
 143     spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
 144     char **ereport);
 145 static void spa_vdev_resilver_done(spa_t *spa);
 146 
 147 uint_t          zio_taskq_batch_pct = 75;       /* 1 thread per cpu in pset */
 148 id_t            zio_taskq_psrset_bind = PS_NONE;
 149 boolean_t       zio_taskq_sysdc = B_TRUE;       /* use SDC scheduling class */
 150 uint_t          zio_taskq_basedc = 80;          /* base duty cycle */
 151 
 152 boolean_t       spa_create_process = B_TRUE;    /* no process ==> no sysdc */
 153 extern int      zfs_sync_pass_deferred_free;
 154 
 155 /*
 156  * This (illegal) pool name is used when temporarily importing a spa_t in order
 157  * to get the vdev stats associated with the imported devices.
 158  */
 159 #define TRYIMPORT_NAME  "$import"
 160 
 161 /*
 162  * ==========================================================================
 163  * SPA properties routines
 164  * ==========================================================================
 165  */
 166 
 167 /*
 168  * Add a (source=src, propname=propval) list to an nvlist.
 169  */
 170 static void
 171 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
 172     uint64_t intval, zprop_source_t src)
 173 {
 174         const char *propname = zpool_prop_to_name(prop);
 175         nvlist_t *propval;
 176 
 177         VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
 178         VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
 179 
 180         if (strval != NULL)
 181                 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
 182         else
 183                 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
 184 
 185         VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
 186         nvlist_free(propval);
 187 }
 188 
 189 /*
 190  * Get property values from the spa configuration.
 191  */
 192 static void
 193 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
 194 {
 195         vdev_t *rvd = spa->spa_root_vdev;
 196         dsl_pool_t *pool = spa->spa_dsl_pool;
 197         uint64_t size, alloc, cap, version;
 198         zprop_source_t src = ZPROP_SRC_NONE;
 199         spa_config_dirent_t *dp;
 200         metaslab_class_t *mc = spa_normal_class(spa);
 201 
 202         ASSERT(MUTEX_HELD(&spa->spa_props_lock));
 203 
 204         if (rvd != NULL) {
 205                 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
 206                 size = metaslab_class_get_space(spa_normal_class(spa));
 207                 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
 208                 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
 209                 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
 210                 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
 211                     size - alloc, src);
 212 
 213                 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
 214                     metaslab_class_fragmentation(mc), src);
 215                 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
 216                     metaslab_class_expandable_space(mc), src);
 217                 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
 218                     (spa_mode(spa) == FREAD), src);
 219 
 220                 cap = (size == 0) ? 0 : (alloc * 100 / size);
 221                 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
 222 
 223                 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
 224                     ddt_get_pool_dedup_ratio(spa), src);
 225 
 226                 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
 227                     rvd->vdev_state, src);
 228 
 229                 version = spa_version(spa);
 230                 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
 231                         src = ZPROP_SRC_DEFAULT;
 232                 else
 233                         src = ZPROP_SRC_LOCAL;
 234                 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
 235         }
 236 
 237         if (pool != NULL) {
 238                 /*
 239                  * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
 240                  * when opening pools before this version freedir will be NULL.
 241                  */
 242                 if (pool->dp_free_dir != NULL) {
 243                         spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
 244                             dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
 245                             src);
 246                 } else {
 247                         spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
 248                             NULL, 0, src);
 249                 }
 250 
 251                 if (pool->dp_leak_dir != NULL) {
 252                         spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
 253                             dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
 254                             src);
 255                 } else {
 256                         spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
 257                             NULL, 0, src);
 258                 }
 259         }
 260 
 261         spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
 262 
 263         if (spa->spa_comment != NULL) {
 264                 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
 265                     0, ZPROP_SRC_LOCAL);
 266         }
 267 
 268         if (spa->spa_root != NULL)
 269                 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
 270                     0, ZPROP_SRC_LOCAL);
 271 
 272         if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
 273                 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
 274                     MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
 275         } else {
 276                 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
 277                     SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
 278         }
 279 
 280         if ((dp = list_head(&spa->spa_config_list)) != NULL) {
 281                 if (dp->scd_path == NULL) {
 282                         spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
 283                             "none", 0, ZPROP_SRC_LOCAL);
 284                 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
 285                         spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
 286                             dp->scd_path, 0, ZPROP_SRC_LOCAL);
 287                 }
 288         }
 289 }
 290 
 291 /*
 292  * Get zpool property values.
 293  */
 294 int
 295 spa_prop_get(spa_t *spa, nvlist_t **nvp)
 296 {
 297         objset_t *mos = spa->spa_meta_objset;
 298         zap_cursor_t zc;
 299         zap_attribute_t za;
 300         int err;
 301 
 302         VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
 303 
 304         mutex_enter(&spa->spa_props_lock);
 305 
 306         /*
 307          * Get properties from the spa config.
 308          */
 309         spa_prop_get_config(spa, nvp);
 310 
 311         /* If no pool property object, no more prop to get. */
 312         if (mos == NULL || spa->spa_pool_props_object == 0) {
 313                 mutex_exit(&spa->spa_props_lock);
 314                 return (0);
 315         }
 316 
 317         /*
 318          * Get properties from the MOS pool property object.
 319          */
 320         for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
 321             (err = zap_cursor_retrieve(&zc, &za)) == 0;
 322             zap_cursor_advance(&zc)) {
 323                 uint64_t intval = 0;
 324                 char *strval = NULL;
 325                 zprop_source_t src = ZPROP_SRC_DEFAULT;
 326                 zpool_prop_t prop;
 327 
 328                 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
 329                         continue;
 330 
 331                 switch (za.za_integer_length) {
 332                 case 8:
 333                         /* integer property */
 334                         if (za.za_first_integer !=
 335                             zpool_prop_default_numeric(prop))
 336                                 src = ZPROP_SRC_LOCAL;
 337 
 338                         if (prop == ZPOOL_PROP_BOOTFS) {
 339                                 dsl_pool_t *dp;
 340                                 dsl_dataset_t *ds = NULL;
 341 
 342                                 dp = spa_get_dsl(spa);
 343                                 dsl_pool_config_enter(dp, FTAG);
 344                                 if (err = dsl_dataset_hold_obj(dp,
 345                                     za.za_first_integer, FTAG, &ds)) {
 346                                         dsl_pool_config_exit(dp, FTAG);
 347                                         break;
 348                                 }
 349 
 350                                 strval = kmem_alloc(
 351                                     MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
 352                                     KM_SLEEP);
 353                                 dsl_dataset_name(ds, strval);
 354                                 dsl_dataset_rele(ds, FTAG);
 355                                 dsl_pool_config_exit(dp, FTAG);
 356                         } else {
 357                                 strval = NULL;
 358                                 intval = za.za_first_integer;
 359                         }
 360 
 361                         spa_prop_add_list(*nvp, prop, strval, intval, src);
 362 
 363                         if (strval != NULL)
 364                                 kmem_free(strval,
 365                                     MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
 366 
 367                         break;
 368 
 369                 case 1:
 370                         /* string property */
 371                         strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
 372                         err = zap_lookup(mos, spa->spa_pool_props_object,
 373                             za.za_name, 1, za.za_num_integers, strval);
 374                         if (err) {
 375                                 kmem_free(strval, za.za_num_integers);
 376                                 break;
 377                         }
 378                         spa_prop_add_list(*nvp, prop, strval, 0, src);
 379                         kmem_free(strval, za.za_num_integers);
 380                         break;
 381 
 382                 default:
 383                         break;
 384                 }
 385         }
 386         zap_cursor_fini(&zc);
 387         mutex_exit(&spa->spa_props_lock);
 388 out:
 389         if (err && err != ENOENT) {
 390                 nvlist_free(*nvp);
 391                 *nvp = NULL;
 392                 return (err);
 393         }
 394 
 395         return (0);
 396 }
 397 
 398 /*
 399  * Validate the given pool properties nvlist and modify the list
 400  * for the property values to be set.
 401  */
 402 static int
 403 spa_prop_validate(spa_t *spa, nvlist_t *props)
 404 {
 405         nvpair_t *elem;
 406         int error = 0, reset_bootfs = 0;
 407         uint64_t objnum = 0;
 408         boolean_t has_feature = B_FALSE;
 409 
 410         elem = NULL;
 411         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
 412                 uint64_t intval;
 413                 char *strval, *slash, *check, *fname;
 414                 const char *propname = nvpair_name(elem);
 415                 zpool_prop_t prop = zpool_name_to_prop(propname);
 416 
 417                 switch (prop) {
 418                 case ZPROP_INVAL:
 419                         if (!zpool_prop_feature(propname)) {
 420                                 error = SET_ERROR(EINVAL);
 421                                 break;
 422                         }
 423 
 424                         /*
 425                          * Sanitize the input.
 426                          */
 427                         if (nvpair_type(elem) != DATA_TYPE_UINT64) {
 428                                 error = SET_ERROR(EINVAL);
 429                                 break;
 430                         }
 431 
 432                         if (nvpair_value_uint64(elem, &intval) != 0) {
 433                                 error = SET_ERROR(EINVAL);
 434                                 break;
 435                         }
 436 
 437                         if (intval != 0) {
 438                                 error = SET_ERROR(EINVAL);
 439                                 break;
 440                         }
 441 
 442                         fname = strchr(propname, '@') + 1;
 443                         if (zfeature_lookup_name(fname, NULL) != 0) {
 444                                 error = SET_ERROR(EINVAL);
 445                                 break;
 446                         }
 447 
 448                         has_feature = B_TRUE;
 449                         break;
 450 
 451                 case ZPOOL_PROP_VERSION:
 452                         error = nvpair_value_uint64(elem, &intval);
 453                         if (!error &&
 454                             (intval < spa_version(spa) ||
 455                             intval > SPA_VERSION_BEFORE_FEATURES ||
 456                             has_feature))
 457                                 error = SET_ERROR(EINVAL);
 458                         break;
 459 
 460                 case ZPOOL_PROP_DELEGATION:
 461                 case ZPOOL_PROP_AUTOREPLACE:
 462                 case ZPOOL_PROP_LISTSNAPS:
 463                 case ZPOOL_PROP_AUTOEXPAND:
 464                         error = nvpair_value_uint64(elem, &intval);
 465                         if (!error && intval > 1)
 466                                 error = SET_ERROR(EINVAL);
 467                         break;
 468 
 469                 case ZPOOL_PROP_BOOTFS:
 470                         /*
 471                          * If the pool version is less than SPA_VERSION_BOOTFS,
 472                          * or the pool is still being created (version == 0),
 473                          * the bootfs property cannot be set.
 474                          */
 475                         if (spa_version(spa) < SPA_VERSION_BOOTFS) {
 476                                 error = SET_ERROR(ENOTSUP);
 477                                 break;
 478                         }
 479 
 480                         /*
 481                          * Make sure the vdev config is bootable
 482                          */
 483                         if (!vdev_is_bootable(spa->spa_root_vdev)) {
 484                                 error = SET_ERROR(ENOTSUP);
 485                                 break;
 486                         }
 487 
 488                         reset_bootfs = 1;
 489 
 490                         error = nvpair_value_string(elem, &strval);
 491 
 492                         if (!error) {
 493                                 objset_t *os;
 494                                 uint64_t propval;
 495 
 496                                 if (strval == NULL || strval[0] == '\0') {
 497                                         objnum = zpool_prop_default_numeric(
 498                                             ZPOOL_PROP_BOOTFS);
 499                                         break;
 500                                 }
 501 
 502                                 if (error = dmu_objset_hold(strval, FTAG, &os))
 503                                         break;
 504 
 505                                 /*
 506                                  * Must be ZPL, and its property settings
 507                                  * must be supported by GRUB (compression
 508                                  * is not gzip, and large blocks are not used).
 509                                  */
 510 
 511                                 if (dmu_objset_type(os) != DMU_OST_ZFS) {
 512                                         error = SET_ERROR(ENOTSUP);
 513                                 } else if ((error =
 514                                     dsl_prop_get_int_ds(dmu_objset_ds(os),
 515                                     zfs_prop_to_name(ZFS_PROP_COMPRESSION),
 516                                     &propval)) == 0 &&
 517                                     !BOOTFS_COMPRESS_VALID(propval)) {
 518                                         error = SET_ERROR(ENOTSUP);
 519                                 } else if ((error =
 520                                     dsl_prop_get_int_ds(dmu_objset_ds(os),
 521                                     zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
 522                                     &propval)) == 0 &&
 523                                     propval > SPA_OLD_MAXBLOCKSIZE) {
 524                                         error = SET_ERROR(ENOTSUP);
 525                                 } else {
 526                                         objnum = dmu_objset_id(os);
 527                                 }
 528                                 dmu_objset_rele(os, FTAG);
 529                         }
 530                         break;
 531 
 532                 case ZPOOL_PROP_FAILUREMODE:
 533                         error = nvpair_value_uint64(elem, &intval);
 534                         if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
 535                             intval > ZIO_FAILURE_MODE_PANIC))
 536                                 error = SET_ERROR(EINVAL);
 537 
 538                         /*
 539                          * This is a special case which only occurs when
 540                          * the pool has completely failed. This allows
 541                          * the user to change the in-core failmode property
 542                          * without syncing it out to disk (I/Os might
 543                          * currently be blocked). We do this by returning
 544                          * EIO to the caller (spa_prop_set) to trick it
 545                          * into thinking we encountered a property validation
 546                          * error.
 547                          */
 548                         if (!error && spa_suspended(spa)) {
 549                                 spa->spa_failmode = intval;
 550                                 error = SET_ERROR(EIO);
 551                         }
 552                         break;
 553 
 554                 case ZPOOL_PROP_CACHEFILE:
 555                         if ((error = nvpair_value_string(elem, &strval)) != 0)
 556                                 break;
 557 
 558                         if (strval[0] == '\0')
 559                                 break;
 560 
 561                         if (strcmp(strval, "none") == 0)
 562                                 break;
 563 
 564                         if (strval[0] != '/') {
 565                                 error = SET_ERROR(EINVAL);
 566                                 break;
 567                         }
 568 
 569                         slash = strrchr(strval, '/');
 570                         ASSERT(slash != NULL);
 571 
 572                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
 573                             strcmp(slash, "/..") == 0)
 574                                 error = SET_ERROR(EINVAL);
 575                         break;
 576 
 577                 case ZPOOL_PROP_COMMENT:
 578                         if ((error = nvpair_value_string(elem, &strval)) != 0)
 579                                 break;
 580                         for (check = strval; *check != '\0'; check++) {
 581                                 /*
 582                                  * The kernel doesn't have an easy isprint()
 583                                  * check.  For this kernel check, we merely
 584                                  * check ASCII apart from DEL.  Fix this if
 585                                  * there is an easy-to-use kernel isprint().
 586                                  */
 587                                 if (*check >= 0x7f) {
 588                                         error = SET_ERROR(EINVAL);
 589                                         break;
 590                                 }
 591                                 check++;
 592                         }
 593                         if (strlen(strval) > ZPROP_MAX_COMMENT)
 594                                 error = E2BIG;
 595                         break;
 596 
 597                 case ZPOOL_PROP_DEDUPDITTO:
 598                         if (spa_version(spa) < SPA_VERSION_DEDUP)
 599                                 error = SET_ERROR(ENOTSUP);
 600                         else
 601                                 error = nvpair_value_uint64(elem, &intval);
 602                         if (error == 0 &&
 603                             intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
 604                                 error = SET_ERROR(EINVAL);
 605                         break;
 606                 }
 607 
 608                 if (error)
 609                         break;
 610         }
 611 
 612         if (!error && reset_bootfs) {
 613                 error = nvlist_remove(props,
 614                     zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
 615 
 616                 if (!error) {
 617                         error = nvlist_add_uint64(props,
 618                             zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
 619                 }
 620         }
 621 
 622         return (error);
 623 }
 624 
 625 void
 626 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
 627 {
 628         char *cachefile;
 629         spa_config_dirent_t *dp;
 630 
 631         if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
 632             &cachefile) != 0)
 633                 return;
 634 
 635         dp = kmem_alloc(sizeof (spa_config_dirent_t),
 636             KM_SLEEP);
 637 
 638         if (cachefile[0] == '\0')
 639                 dp->scd_path = spa_strdup(spa_config_path);
 640         else if (strcmp(cachefile, "none") == 0)
 641                 dp->scd_path = NULL;
 642         else
 643                 dp->scd_path = spa_strdup(cachefile);
 644 
 645         list_insert_head(&spa->spa_config_list, dp);
 646         if (need_sync)
 647                 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
 648 }
 649 
 650 int
 651 spa_prop_set(spa_t *spa, nvlist_t *nvp)
 652 {
 653         int error;
 654         nvpair_t *elem = NULL;
 655         boolean_t need_sync = B_FALSE;
 656 
 657         if ((error = spa_prop_validate(spa, nvp)) != 0)
 658                 return (error);
 659 
 660         while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
 661                 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
 662 
 663                 if (prop == ZPOOL_PROP_CACHEFILE ||
 664                     prop == ZPOOL_PROP_ALTROOT ||
 665                     prop == ZPOOL_PROP_READONLY)
 666                         continue;
 667 
 668                 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
 669                         uint64_t ver;
 670 
 671                         if (prop == ZPOOL_PROP_VERSION) {
 672                                 VERIFY(nvpair_value_uint64(elem, &ver) == 0);
 673                         } else {
 674                                 ASSERT(zpool_prop_feature(nvpair_name(elem)));
 675                                 ver = SPA_VERSION_FEATURES;
 676                                 need_sync = B_TRUE;
 677                         }
 678 
 679                         /* Save time if the version is already set. */
 680                         if (ver == spa_version(spa))
 681                                 continue;
 682 
 683                         /*
 684                          * In addition to the pool directory object, we might
 685                          * create the pool properties object, the features for
 686                          * read object, the features for write object, or the
 687                          * feature descriptions object.
 688                          */
 689                         error = dsl_sync_task(spa->spa_name, NULL,
 690                             spa_sync_version, &ver,
 691                             6, ZFS_SPACE_CHECK_RESERVED);
 692                         if (error)
 693                                 return (error);
 694                         continue;
 695                 }
 696 
 697                 need_sync = B_TRUE;
 698                 break;
 699         }
 700 
 701         if (need_sync) {
 702                 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
 703                     nvp, 6, ZFS_SPACE_CHECK_RESERVED));
 704         }
 705 
 706         return (0);
 707 }
 708 
 709 /*
 710  * If the bootfs property value is dsobj, clear it.
 711  */
 712 void
 713 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
 714 {
 715         if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
 716                 VERIFY(zap_remove(spa->spa_meta_objset,
 717                     spa->spa_pool_props_object,
 718                     zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
 719                 spa->spa_bootfs = 0;
 720         }
 721 }
 722 
 723 /*ARGSUSED*/
 724 static int
 725 spa_change_guid_check(void *arg, dmu_tx_t *tx)
 726 {
 727         uint64_t *newguid = arg;
 728         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
 729         vdev_t *rvd = spa->spa_root_vdev;
 730         uint64_t vdev_state;
 731 
 732         spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
 733         vdev_state = rvd->vdev_state;
 734         spa_config_exit(spa, SCL_STATE, FTAG);
 735 
 736         if (vdev_state != VDEV_STATE_HEALTHY)
 737                 return (SET_ERROR(ENXIO));
 738 
 739         ASSERT3U(spa_guid(spa), !=, *newguid);
 740 
 741         return (0);
 742 }
 743 
 744 static void
 745 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
 746 {
 747         uint64_t *newguid = arg;
 748         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
 749         uint64_t oldguid;
 750         vdev_t *rvd = spa->spa_root_vdev;
 751 
 752         oldguid = spa_guid(spa);
 753 
 754         spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
 755         rvd->vdev_guid = *newguid;
 756         rvd->vdev_guid_sum += (*newguid - oldguid);
 757         vdev_config_dirty(rvd);
 758         spa_config_exit(spa, SCL_STATE, FTAG);
 759 
 760         spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
 761             oldguid, *newguid);
 762 }
 763 
 764 /*
 765  * Change the GUID for the pool.  This is done so that we can later
 766  * re-import a pool built from a clone of our own vdevs.  We will modify
 767  * the root vdev's guid, our own pool guid, and then mark all of our
 768  * vdevs dirty.  Note that we must make sure that all our vdevs are
 769  * online when we do this, or else any vdevs that weren't present
 770  * would be orphaned from our pool.  We are also going to issue a
 771  * sysevent to update any watchers.
 772  */
 773 int
 774 spa_change_guid(spa_t *spa)
 775 {
 776         int error;
 777         uint64_t guid;
 778 
 779         mutex_enter(&spa->spa_vdev_top_lock);
 780         mutex_enter(&spa_namespace_lock);
 781         guid = spa_generate_guid(NULL);
 782 
 783         error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
 784             spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
 785 
 786         if (error == 0) {
 787                 spa_config_sync(spa, B_FALSE, B_TRUE);
 788                 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
 789         }
 790 
 791         mutex_exit(&spa_namespace_lock);
 792         mutex_exit(&spa->spa_vdev_top_lock);
 793 
 794         return (error);
 795 }
 796 
 797 /*
 798  * ==========================================================================
 799  * SPA state manipulation (open/create/destroy/import/export)
 800  * ==========================================================================
 801  */
 802 
 803 static int
 804 spa_error_entry_compare(const void *a, const void *b)
 805 {
 806         spa_error_entry_t *sa = (spa_error_entry_t *)a;
 807         spa_error_entry_t *sb = (spa_error_entry_t *)b;
 808         int ret;
 809 
 810         ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
 811             sizeof (zbookmark_phys_t));
 812 
 813         if (ret < 0)
 814                 return (-1);
 815         else if (ret > 0)
 816                 return (1);
 817         else
 818                 return (0);
 819 }
 820 
 821 /*
 822  * Utility function which retrieves copies of the current logs and
 823  * re-initializes them in the process.
 824  */
 825 void
 826 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
 827 {
 828         ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
 829 
 830         bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
 831         bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
 832 
 833         avl_create(&spa->spa_errlist_scrub,
 834             spa_error_entry_compare, sizeof (spa_error_entry_t),
 835             offsetof(spa_error_entry_t, se_avl));
 836         avl_create(&spa->spa_errlist_last,
 837             spa_error_entry_compare, sizeof (spa_error_entry_t),
 838             offsetof(spa_error_entry_t, se_avl));
 839 }
 840 
 841 static void
 842 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
 843 {
 844         const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
 845         enum zti_modes mode = ztip->zti_mode;
 846         uint_t value = ztip->zti_value;
 847         uint_t count = ztip->zti_count;
 848         spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
 849         char name[32];
 850         uint_t flags = 0;
 851         boolean_t batch = B_FALSE;
 852 
 853         if (mode == ZTI_MODE_NULL) {
 854                 tqs->stqs_count = 0;
 855                 tqs->stqs_taskq = NULL;
 856                 return;
 857         }
 858 
 859         ASSERT3U(count, >, 0);
 860 
 861         tqs->stqs_count = count;
 862         tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
 863 
 864         switch (mode) {
 865         case ZTI_MODE_FIXED:
 866                 ASSERT3U(value, >=, 1);
 867                 value = MAX(value, 1);
 868                 break;
 869 
 870         case ZTI_MODE_BATCH:
 871                 batch = B_TRUE;
 872                 flags |= TASKQ_THREADS_CPU_PCT;
 873                 value = zio_taskq_batch_pct;
 874                 break;
 875 
 876         default:
 877                 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
 878                     "spa_activate()",
 879                     zio_type_name[t], zio_taskq_types[q], mode, value);
 880                 break;
 881         }
 882 
 883         for (uint_t i = 0; i < count; i++) {
 884                 taskq_t *tq;
 885 
 886                 if (count > 1) {
 887                         (void) snprintf(name, sizeof (name), "%s_%s_%u",
 888                             zio_type_name[t], zio_taskq_types[q], i);
 889                 } else {
 890                         (void) snprintf(name, sizeof (name), "%s_%s",
 891                             zio_type_name[t], zio_taskq_types[q]);
 892                 }
 893 
 894                 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
 895                         if (batch)
 896                                 flags |= TASKQ_DC_BATCH;
 897 
 898                         tq = taskq_create_sysdc(name, value, 50, INT_MAX,
 899                             spa->spa_proc, zio_taskq_basedc, flags);
 900                 } else {
 901                         pri_t pri = maxclsyspri;
 902                         /*
 903                          * The write issue taskq can be extremely CPU
 904                          * intensive.  Run it at slightly lower priority
 905                          * than the other taskqs.
 906                          */
 907                         if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
 908                                 pri--;
 909 
 910                         tq = taskq_create_proc(name, value, pri, 50,
 911                             INT_MAX, spa->spa_proc, flags);
 912                 }
 913 
 914                 tqs->stqs_taskq[i] = tq;
 915         }
 916 }
 917 
 918 static void
 919 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
 920 {
 921         spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
 922 
 923         if (tqs->stqs_taskq == NULL) {
 924                 ASSERT0(tqs->stqs_count);
 925                 return;
 926         }
 927 
 928         for (uint_t i = 0; i < tqs->stqs_count; i++) {
 929                 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
 930                 taskq_destroy(tqs->stqs_taskq[i]);
 931         }
 932 
 933         kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
 934         tqs->stqs_taskq = NULL;
 935 }
 936 
 937 /*
 938  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
 939  * Note that a type may have multiple discrete taskqs to avoid lock contention
 940  * on the taskq itself. In that case we choose which taskq at random by using
 941  * the low bits of gethrtime().
 942  */
 943 void
 944 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
 945     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
 946 {
 947         spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
 948         taskq_t *tq;
 949 
 950         ASSERT3P(tqs->stqs_taskq, !=, NULL);
 951         ASSERT3U(tqs->stqs_count, !=, 0);
 952 
 953         if (tqs->stqs_count == 1) {
 954                 tq = tqs->stqs_taskq[0];
 955         } else {
 956                 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
 957         }
 958 
 959         taskq_dispatch_ent(tq, func, arg, flags, ent);
 960 }
 961 
 962 static void
 963 spa_create_zio_taskqs(spa_t *spa)
 964 {
 965         for (int t = 0; t < ZIO_TYPES; t++) {
 966                 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
 967                         spa_taskqs_init(spa, t, q);
 968                 }
 969         }
 970 }
 971 
 972 #ifdef _KERNEL
 973 static void
 974 spa_thread(void *arg)
 975 {
 976         callb_cpr_t cprinfo;
 977 
 978         spa_t *spa = arg;
 979         user_t *pu = PTOU(curproc);
 980 
 981         CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
 982             spa->spa_name);
 983 
 984         ASSERT(curproc != &p0);
 985         (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
 986             "zpool-%s", spa->spa_name);
 987         (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
 988 
 989         /* bind this thread to the requested psrset */
 990         if (zio_taskq_psrset_bind != PS_NONE) {
 991                 pool_lock();
 992                 mutex_enter(&cpu_lock);
 993                 mutex_enter(&pidlock);
 994                 mutex_enter(&curproc->p_lock);
 995 
 996                 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
 997                     0, NULL, NULL) == 0)  {
 998                         curthread->t_bind_pset = zio_taskq_psrset_bind;
 999                 } else {
1000                         cmn_err(CE_WARN,
1001                             "Couldn't bind process for zfs pool \"%s\" to "
1002                             "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1003                 }
1004 
1005                 mutex_exit(&curproc->p_lock);
1006                 mutex_exit(&pidlock);
1007                 mutex_exit(&cpu_lock);
1008                 pool_unlock();
1009         }
1010 
1011         if (zio_taskq_sysdc) {
1012                 sysdc_thread_enter(curthread, 100, 0);
1013         }
1014 
1015         spa->spa_proc = curproc;
1016         spa->spa_did = curthread->t_did;
1017 
1018         spa_create_zio_taskqs(spa);
1019 
1020         mutex_enter(&spa->spa_proc_lock);
1021         ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1022 
1023         spa->spa_proc_state = SPA_PROC_ACTIVE;
1024         cv_broadcast(&spa->spa_proc_cv);
1025 
1026         CALLB_CPR_SAFE_BEGIN(&cprinfo);
1027         while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1028                 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1029         CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1030 
1031         ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1032         spa->spa_proc_state = SPA_PROC_GONE;
1033         spa->spa_proc = &p0;
1034         cv_broadcast(&spa->spa_proc_cv);
1035         CALLB_CPR_EXIT(&cprinfo);   /* drops spa_proc_lock */
1036 
1037         mutex_enter(&curproc->p_lock);
1038         lwp_exit();
1039 }
1040 #endif
1041 
1042 /*
1043  * Activate an uninitialized pool.
1044  */
1045 static void
1046 spa_activate(spa_t *spa, int mode)
1047 {
1048         ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1049 
1050         spa->spa_state = POOL_STATE_ACTIVE;
1051         spa->spa_mode = mode;
1052 
1053         spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1054         spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1055 
1056         /* Try to create a covering process */
1057         mutex_enter(&spa->spa_proc_lock);
1058         ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1059         ASSERT(spa->spa_proc == &p0);
1060         spa->spa_did = 0;
1061 
1062         /* Only create a process if we're going to be around a while. */
1063         if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1064                 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1065                     NULL, 0) == 0) {
1066                         spa->spa_proc_state = SPA_PROC_CREATED;
1067                         while (spa->spa_proc_state == SPA_PROC_CREATED) {
1068                                 cv_wait(&spa->spa_proc_cv,
1069                                     &spa->spa_proc_lock);
1070                         }
1071                         ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1072                         ASSERT(spa->spa_proc != &p0);
1073                         ASSERT(spa->spa_did != 0);
1074                 } else {
1075 #ifdef _KERNEL
1076                         cmn_err(CE_WARN,
1077                             "Couldn't create process for zfs pool \"%s\"\n",
1078                             spa->spa_name);
1079 #endif
1080                 }
1081         }
1082         mutex_exit(&spa->spa_proc_lock);
1083 
1084         /* If we didn't create a process, we need to create our taskqs. */
1085         if (spa->spa_proc == &p0) {
1086                 spa_create_zio_taskqs(spa);
1087         }
1088 
1089         list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1090             offsetof(vdev_t, vdev_config_dirty_node));
1091         list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1092             offsetof(vdev_t, vdev_state_dirty_node));
1093 
1094         txg_list_create(&spa->spa_vdev_txg_list,
1095             offsetof(struct vdev, vdev_txg_node));
1096 
1097         avl_create(&spa->spa_errlist_scrub,
1098             spa_error_entry_compare, sizeof (spa_error_entry_t),
1099             offsetof(spa_error_entry_t, se_avl));
1100         avl_create(&spa->spa_errlist_last,
1101             spa_error_entry_compare, sizeof (spa_error_entry_t),
1102             offsetof(spa_error_entry_t, se_avl));
1103 }
1104 
1105 /*
1106  * Opposite of spa_activate().
1107  */
1108 static void
1109 spa_deactivate(spa_t *spa)
1110 {
1111         ASSERT(spa->spa_sync_on == B_FALSE);
1112         ASSERT(spa->spa_dsl_pool == NULL);
1113         ASSERT(spa->spa_root_vdev == NULL);
1114         ASSERT(spa->spa_async_zio_root == NULL);
1115         ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1116 
1117         txg_list_destroy(&spa->spa_vdev_txg_list);
1118 
1119         list_destroy(&spa->spa_config_dirty_list);
1120         list_destroy(&spa->spa_state_dirty_list);
1121 
1122         for (int t = 0; t < ZIO_TYPES; t++) {
1123                 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1124                         spa_taskqs_fini(spa, t, q);
1125                 }
1126         }
1127 
1128         metaslab_class_destroy(spa->spa_normal_class);
1129         spa->spa_normal_class = NULL;
1130 
1131         metaslab_class_destroy(spa->spa_log_class);
1132         spa->spa_log_class = NULL;
1133 
1134         /*
1135          * If this was part of an import or the open otherwise failed, we may
1136          * still have errors left in the queues.  Empty them just in case.
1137          */
1138         spa_errlog_drain(spa);
1139 
1140         avl_destroy(&spa->spa_errlist_scrub);
1141         avl_destroy(&spa->spa_errlist_last);
1142 
1143         spa->spa_state = POOL_STATE_UNINITIALIZED;
1144 
1145         mutex_enter(&spa->spa_proc_lock);
1146         if (spa->spa_proc_state != SPA_PROC_NONE) {
1147                 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1148                 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1149                 cv_broadcast(&spa->spa_proc_cv);
1150                 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1151                         ASSERT(spa->spa_proc != &p0);
1152                         cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1153                 }
1154                 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1155                 spa->spa_proc_state = SPA_PROC_NONE;
1156         }
1157         ASSERT(spa->spa_proc == &p0);
1158         mutex_exit(&spa->spa_proc_lock);
1159 
1160         /*
1161          * We want to make sure spa_thread() has actually exited the ZFS
1162          * module, so that the module can't be unloaded out from underneath
1163          * it.
1164          */
1165         if (spa->spa_did != 0) {
1166                 thread_join(spa->spa_did);
1167                 spa->spa_did = 0;
1168         }
1169 }
1170 
1171 /*
1172  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1173  * will create all the necessary vdevs in the appropriate layout, with each vdev
1174  * in the CLOSED state.  This will prep the pool before open/creation/import.
1175  * All vdev validation is done by the vdev_alloc() routine.
1176  */
1177 static int
1178 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1179     uint_t id, int atype)
1180 {
1181         nvlist_t **child;
1182         uint_t children;
1183         int error;
1184 
1185         if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1186                 return (error);
1187 
1188         if ((*vdp)->vdev_ops->vdev_op_leaf)
1189                 return (0);
1190 
1191         error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1192             &child, &children);
1193 
1194         if (error == ENOENT)
1195                 return (0);
1196 
1197         if (error) {
1198                 vdev_free(*vdp);
1199                 *vdp = NULL;
1200                 return (SET_ERROR(EINVAL));
1201         }
1202 
1203         for (int c = 0; c < children; c++) {
1204                 vdev_t *vd;
1205                 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1206                     atype)) != 0) {
1207                         vdev_free(*vdp);
1208                         *vdp = NULL;
1209                         return (error);
1210                 }
1211         }
1212 
1213         ASSERT(*vdp != NULL);
1214 
1215         return (0);
1216 }
1217 
1218 /*
1219  * Opposite of spa_load().
1220  */
1221 static void
1222 spa_unload(spa_t *spa)
1223 {
1224         int i;
1225 
1226         ASSERT(MUTEX_HELD(&spa_namespace_lock));
1227 
1228         /*
1229          * Stop async tasks.
1230          */
1231         spa_async_suspend(spa);
1232 
1233         /*
1234          * Stop syncing.
1235          */
1236         if (spa->spa_sync_on) {
1237                 txg_sync_stop(spa->spa_dsl_pool);
1238                 spa->spa_sync_on = B_FALSE;
1239         }
1240 
1241         /*
1242          * Wait for any outstanding async I/O to complete.
1243          */
1244         if (spa->spa_async_zio_root != NULL) {
1245                 for (int i = 0; i < max_ncpus; i++)
1246                         (void) zio_wait(spa->spa_async_zio_root[i]);
1247                 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1248                 spa->spa_async_zio_root = NULL;
1249         }
1250 
1251         bpobj_close(&spa->spa_deferred_bpobj);
1252 
1253         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1254 
1255         /*
1256          * Close all vdevs.
1257          */
1258         if (spa->spa_root_vdev)
1259                 vdev_free(spa->spa_root_vdev);
1260         ASSERT(spa->spa_root_vdev == NULL);
1261 
1262         /*
1263          * Close the dsl pool.
1264          */
1265         if (spa->spa_dsl_pool) {
1266                 dsl_pool_close(spa->spa_dsl_pool);
1267                 spa->spa_dsl_pool = NULL;
1268                 spa->spa_meta_objset = NULL;
1269         }
1270 
1271         ddt_unload(spa);
1272 
1273 
1274         /*
1275          * Drop and purge level 2 cache
1276          */
1277         spa_l2cache_drop(spa);
1278 
1279         for (i = 0; i < spa->spa_spares.sav_count; i++)
1280                 vdev_free(spa->spa_spares.sav_vdevs[i]);
1281         if (spa->spa_spares.sav_vdevs) {
1282                 kmem_free(spa->spa_spares.sav_vdevs,
1283                     spa->spa_spares.sav_count * sizeof (void *));
1284                 spa->spa_spares.sav_vdevs = NULL;
1285         }
1286         if (spa->spa_spares.sav_config) {
1287                 nvlist_free(spa->spa_spares.sav_config);
1288                 spa->spa_spares.sav_config = NULL;
1289         }
1290         spa->spa_spares.sav_count = 0;
1291 
1292         for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1293                 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1294                 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1295         }
1296         if (spa->spa_l2cache.sav_vdevs) {
1297                 kmem_free(spa->spa_l2cache.sav_vdevs,
1298                     spa->spa_l2cache.sav_count * sizeof (void *));
1299                 spa->spa_l2cache.sav_vdevs = NULL;
1300         }
1301         if (spa->spa_l2cache.sav_config) {
1302                 nvlist_free(spa->spa_l2cache.sav_config);
1303                 spa->spa_l2cache.sav_config = NULL;
1304         }
1305         spa->spa_l2cache.sav_count = 0;
1306 
1307         spa->spa_async_suspended = 0;
1308 
1309         if (spa->spa_comment != NULL) {
1310                 spa_strfree(spa->spa_comment);
1311                 spa->spa_comment = NULL;
1312         }
1313 
1314         spa_config_exit(spa, SCL_ALL, FTAG);
1315 }
1316 
1317 /*
1318  * Load (or re-load) the current list of vdevs describing the active spares for
1319  * this pool.  When this is called, we have some form of basic information in
1320  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1321  * then re-generate a more complete list including status information.
1322  */
1323 static void
1324 spa_load_spares(spa_t *spa)
1325 {
1326         nvlist_t **spares;
1327         uint_t nspares;
1328         int i;
1329         vdev_t *vd, *tvd;
1330 
1331         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1332 
1333         /*
1334          * First, close and free any existing spare vdevs.
1335          */
1336         for (i = 0; i < spa->spa_spares.sav_count; i++) {
1337                 vd = spa->spa_spares.sav_vdevs[i];
1338 
1339                 /* Undo the call to spa_activate() below */
1340                 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1341                     B_FALSE)) != NULL && tvd->vdev_isspare)
1342                         spa_spare_remove(tvd);
1343                 vdev_close(vd);
1344                 vdev_free(vd);
1345         }
1346 
1347         if (spa->spa_spares.sav_vdevs)
1348                 kmem_free(spa->spa_spares.sav_vdevs,
1349                     spa->spa_spares.sav_count * sizeof (void *));
1350 
1351         if (spa->spa_spares.sav_config == NULL)
1352                 nspares = 0;
1353         else
1354                 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1355                     ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1356 
1357         spa->spa_spares.sav_count = (int)nspares;
1358         spa->spa_spares.sav_vdevs = NULL;
1359 
1360         if (nspares == 0)
1361                 return;
1362 
1363         /*
1364          * Construct the array of vdevs, opening them to get status in the
1365          * process.   For each spare, there is potentially two different vdev_t
1366          * structures associated with it: one in the list of spares (used only
1367          * for basic validation purposes) and one in the active vdev
1368          * configuration (if it's spared in).  During this phase we open and
1369          * validate each vdev on the spare list.  If the vdev also exists in the
1370          * active configuration, then we also mark this vdev as an active spare.
1371          */
1372         spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1373             KM_SLEEP);
1374         for (i = 0; i < spa->spa_spares.sav_count; i++) {
1375                 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1376                     VDEV_ALLOC_SPARE) == 0);
1377                 ASSERT(vd != NULL);
1378 
1379                 spa->spa_spares.sav_vdevs[i] = vd;
1380 
1381                 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1382                     B_FALSE)) != NULL) {
1383                         if (!tvd->vdev_isspare)
1384                                 spa_spare_add(tvd);
1385 
1386                         /*
1387                          * We only mark the spare active if we were successfully
1388                          * able to load the vdev.  Otherwise, importing a pool
1389                          * with a bad active spare would result in strange
1390                          * behavior, because multiple pool would think the spare
1391                          * is actively in use.
1392                          *
1393                          * There is a vulnerability here to an equally bizarre
1394                          * circumstance, where a dead active spare is later
1395                          * brought back to life (onlined or otherwise).  Given
1396                          * the rarity of this scenario, and the extra complexity
1397                          * it adds, we ignore the possibility.
1398                          */
1399                         if (!vdev_is_dead(tvd))
1400                                 spa_spare_activate(tvd);
1401                 }
1402 
1403                 vd->vdev_top = vd;
1404                 vd->vdev_aux = &spa->spa_spares;
1405 
1406                 if (vdev_open(vd) != 0)
1407                         continue;
1408 
1409                 if (vdev_validate_aux(vd) == 0)
1410                         spa_spare_add(vd);
1411         }
1412 
1413         /*
1414          * Recompute the stashed list of spares, with status information
1415          * this time.
1416          */
1417         VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1418             DATA_TYPE_NVLIST_ARRAY) == 0);
1419 
1420         spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1421             KM_SLEEP);
1422         for (i = 0; i < spa->spa_spares.sav_count; i++)
1423                 spares[i] = vdev_config_generate(spa,
1424                     spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1425         VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1426             ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1427         for (i = 0; i < spa->spa_spares.sav_count; i++)
1428                 nvlist_free(spares[i]);
1429         kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1430 }
1431 
1432 /*
1433  * Load (or re-load) the current list of vdevs describing the active l2cache for
1434  * this pool.  When this is called, we have some form of basic information in
1435  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1436  * then re-generate a more complete list including status information.
1437  * Devices which are already active have their details maintained, and are
1438  * not re-opened.
1439  */
1440 static void
1441 spa_load_l2cache(spa_t *spa)
1442 {
1443         nvlist_t **l2cache;
1444         uint_t nl2cache;
1445         int i, j, oldnvdevs;
1446         uint64_t guid;
1447         vdev_t *vd, **oldvdevs, **newvdevs;
1448         spa_aux_vdev_t *sav = &spa->spa_l2cache;
1449 
1450         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1451 
1452         if (sav->sav_config != NULL) {
1453                 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1454                     ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1455                 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1456         } else {
1457                 nl2cache = 0;
1458                 newvdevs = NULL;
1459         }
1460 
1461         oldvdevs = sav->sav_vdevs;
1462         oldnvdevs = sav->sav_count;
1463         sav->sav_vdevs = NULL;
1464         sav->sav_count = 0;
1465 
1466         /*
1467          * Process new nvlist of vdevs.
1468          */
1469         for (i = 0; i < nl2cache; i++) {
1470                 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1471                     &guid) == 0);
1472 
1473                 newvdevs[i] = NULL;
1474                 for (j = 0; j < oldnvdevs; j++) {
1475                         vd = oldvdevs[j];
1476                         if (vd != NULL && guid == vd->vdev_guid) {
1477                                 /*
1478                                  * Retain previous vdev for add/remove ops.
1479                                  */
1480                                 newvdevs[i] = vd;
1481                                 oldvdevs[j] = NULL;
1482                                 break;
1483                         }
1484                 }
1485 
1486                 if (newvdevs[i] == NULL) {
1487                         /*
1488                          * Create new vdev
1489                          */
1490                         VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1491                             VDEV_ALLOC_L2CACHE) == 0);
1492                         ASSERT(vd != NULL);
1493                         newvdevs[i] = vd;
1494 
1495                         /*
1496                          * Commit this vdev as an l2cache device,
1497                          * even if it fails to open.
1498                          */
1499                         spa_l2cache_add(vd);
1500 
1501                         vd->vdev_top = vd;
1502                         vd->vdev_aux = sav;
1503 
1504                         spa_l2cache_activate(vd);
1505 
1506                         if (vdev_open(vd) != 0)
1507                                 continue;
1508 
1509                         (void) vdev_validate_aux(vd);
1510 
1511                         if (!vdev_is_dead(vd))
1512                                 l2arc_add_vdev(spa, vd);
1513                 }
1514         }
1515 
1516         /*
1517          * Purge vdevs that were dropped
1518          */
1519         for (i = 0; i < oldnvdevs; i++) {
1520                 uint64_t pool;
1521 
1522                 vd = oldvdevs[i];
1523                 if (vd != NULL) {
1524                         ASSERT(vd->vdev_isl2cache);
1525 
1526                         if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1527                             pool != 0ULL && l2arc_vdev_present(vd))
1528                                 l2arc_remove_vdev(vd);
1529                         vdev_clear_stats(vd);
1530                         vdev_free(vd);
1531                 }
1532         }
1533 
1534         if (oldvdevs)
1535                 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1536 
1537         if (sav->sav_config == NULL)
1538                 goto out;
1539 
1540         sav->sav_vdevs = newvdevs;
1541         sav->sav_count = (int)nl2cache;
1542 
1543         /*
1544          * Recompute the stashed list of l2cache devices, with status
1545          * information this time.
1546          */
1547         VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1548             DATA_TYPE_NVLIST_ARRAY) == 0);
1549 
1550         l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1551         for (i = 0; i < sav->sav_count; i++)
1552                 l2cache[i] = vdev_config_generate(spa,
1553                     sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1554         VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1555             ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1556 out:
1557         for (i = 0; i < sav->sav_count; i++)
1558                 nvlist_free(l2cache[i]);
1559         if (sav->sav_count)
1560                 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1561 }
1562 
1563 static int
1564 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1565 {
1566         dmu_buf_t *db;
1567         char *packed = NULL;
1568         size_t nvsize = 0;
1569         int error;
1570         *value = NULL;
1571 
1572         VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1573         nvsize = *(uint64_t *)db->db_data;
1574         dmu_buf_rele(db, FTAG);
1575 
1576         packed = kmem_alloc(nvsize, KM_SLEEP);
1577         error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1578             DMU_READ_PREFETCH);
1579         if (error == 0)
1580                 error = nvlist_unpack(packed, nvsize, value, 0);
1581         kmem_free(packed, nvsize);
1582 
1583         return (error);
1584 }
1585 
1586 /*
1587  * Checks to see if the given vdev could not be opened, in which case we post a
1588  * sysevent to notify the autoreplace code that the device has been removed.
1589  */
1590 static void
1591 spa_check_removed(vdev_t *vd)
1592 {
1593         for (int c = 0; c < vd->vdev_children; c++)
1594                 spa_check_removed(vd->vdev_child[c]);
1595 
1596         if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1597             !vd->vdev_ishole) {
1598                 zfs_post_autoreplace(vd->vdev_spa, vd);
1599                 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1600         }
1601 }
1602 
1603 /*
1604  * Validate the current config against the MOS config
1605  */
1606 static boolean_t
1607 spa_config_valid(spa_t *spa, nvlist_t *config)
1608 {
1609         vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1610         nvlist_t *nv;
1611 
1612         VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1613 
1614         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1615         VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1616 
1617         ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1618 
1619         /*
1620          * If we're doing a normal import, then build up any additional
1621          * diagnostic information about missing devices in this config.
1622          * We'll pass this up to the user for further processing.
1623          */
1624         if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1625                 nvlist_t **child, *nv;
1626                 uint64_t idx = 0;
1627 
1628                 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1629                     KM_SLEEP);
1630                 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1631 
1632                 for (int c = 0; c < rvd->vdev_children; c++) {
1633                         vdev_t *tvd = rvd->vdev_child[c];
1634                         vdev_t *mtvd  = mrvd->vdev_child[c];
1635 
1636                         if (tvd->vdev_ops == &vdev_missing_ops &&
1637                             mtvd->vdev_ops != &vdev_missing_ops &&
1638                             mtvd->vdev_islog)
1639                                 child[idx++] = vdev_config_generate(spa, mtvd,
1640                                     B_FALSE, 0);
1641                 }
1642 
1643                 if (idx) {
1644                         VERIFY(nvlist_add_nvlist_array(nv,
1645                             ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1646                         VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1647                             ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1648 
1649                         for (int i = 0; i < idx; i++)
1650                                 nvlist_free(child[i]);
1651                 }
1652                 nvlist_free(nv);
1653                 kmem_free(child, rvd->vdev_children * sizeof (char **));
1654         }
1655 
1656         /*
1657          * Compare the root vdev tree with the information we have
1658          * from the MOS config (mrvd). Check each top-level vdev
1659          * with the corresponding MOS config top-level (mtvd).
1660          */
1661         for (int c = 0; c < rvd->vdev_children; c++) {
1662                 vdev_t *tvd = rvd->vdev_child[c];
1663                 vdev_t *mtvd  = mrvd->vdev_child[c];
1664 
1665                 /*
1666                  * Resolve any "missing" vdevs in the current configuration.
1667                  * If we find that the MOS config has more accurate information
1668                  * about the top-level vdev then use that vdev instead.
1669                  */
1670                 if (tvd->vdev_ops == &vdev_missing_ops &&
1671                     mtvd->vdev_ops != &vdev_missing_ops) {
1672 
1673                         if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1674                                 continue;
1675 
1676                         /*
1677                          * Device specific actions.
1678                          */
1679                         if (mtvd->vdev_islog) {
1680                                 spa_set_log_state(spa, SPA_LOG_CLEAR);
1681                         } else {
1682                                 /*
1683                                  * XXX - once we have 'readonly' pool
1684                                  * support we should be able to handle
1685                                  * missing data devices by transitioning
1686                                  * the pool to readonly.
1687                                  */
1688                                 continue;
1689                         }
1690 
1691                         /*
1692                          * Swap the missing vdev with the data we were
1693                          * able to obtain from the MOS config.
1694                          */
1695                         vdev_remove_child(rvd, tvd);
1696                         vdev_remove_child(mrvd, mtvd);
1697 
1698                         vdev_add_child(rvd, mtvd);
1699                         vdev_add_child(mrvd, tvd);
1700 
1701                         spa_config_exit(spa, SCL_ALL, FTAG);
1702                         vdev_load(mtvd);
1703                         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1704 
1705                         vdev_reopen(rvd);
1706                 } else if (mtvd->vdev_islog) {
1707                         /*
1708                          * Load the slog device's state from the MOS config
1709                          * since it's possible that the label does not
1710                          * contain the most up-to-date information.
1711                          */
1712                         vdev_load_log_state(tvd, mtvd);
1713                         vdev_reopen(tvd);
1714                 }
1715         }
1716         vdev_free(mrvd);
1717         spa_config_exit(spa, SCL_ALL, FTAG);
1718 
1719         /*
1720          * Ensure we were able to validate the config.
1721          */
1722         return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1723 }
1724 
1725 /*
1726  * Check for missing log devices
1727  */
1728 static boolean_t
1729 spa_check_logs(spa_t *spa)
1730 {
1731         boolean_t rv = B_FALSE;
1732 
1733         switch (spa->spa_log_state) {
1734         case SPA_LOG_MISSING:
1735                 /* need to recheck in case slog has been restored */
1736         case SPA_LOG_UNKNOWN:
1737                 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1738                     NULL, DS_FIND_CHILDREN) != 0);
1739                 if (rv)
1740                         spa_set_log_state(spa, SPA_LOG_MISSING);
1741                 break;
1742         }
1743         return (rv);
1744 }
1745 
1746 static boolean_t
1747 spa_passivate_log(spa_t *spa)
1748 {
1749         vdev_t *rvd = spa->spa_root_vdev;
1750         boolean_t slog_found = B_FALSE;
1751 
1752         ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1753 
1754         if (!spa_has_slogs(spa))
1755                 return (B_FALSE);
1756 
1757         for (int c = 0; c < rvd->vdev_children; c++) {
1758                 vdev_t *tvd = rvd->vdev_child[c];
1759                 metaslab_group_t *mg = tvd->vdev_mg;
1760 
1761                 if (tvd->vdev_islog) {
1762                         metaslab_group_passivate(mg);
1763                         slog_found = B_TRUE;
1764                 }
1765         }
1766 
1767         return (slog_found);
1768 }
1769 
1770 static void
1771 spa_activate_log(spa_t *spa)
1772 {
1773         vdev_t *rvd = spa->spa_root_vdev;
1774 
1775         ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1776 
1777         for (int c = 0; c < rvd->vdev_children; c++) {
1778                 vdev_t *tvd = rvd->vdev_child[c];
1779                 metaslab_group_t *mg = tvd->vdev_mg;
1780 
1781                 if (tvd->vdev_islog)
1782                         metaslab_group_activate(mg);
1783         }
1784 }
1785 
1786 int
1787 spa_offline_log(spa_t *spa)
1788 {
1789         int error;
1790 
1791         error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1792             NULL, DS_FIND_CHILDREN);
1793         if (error == 0) {
1794                 /*
1795                  * We successfully offlined the log device, sync out the
1796                  * current txg so that the "stubby" block can be removed
1797                  * by zil_sync().
1798                  */
1799                 txg_wait_synced(spa->spa_dsl_pool, 0);
1800         }
1801         return (error);
1802 }
1803 
1804 static void
1805 spa_aux_check_removed(spa_aux_vdev_t *sav)
1806 {
1807         for (int i = 0; i < sav->sav_count; i++)
1808                 spa_check_removed(sav->sav_vdevs[i]);
1809 }
1810 
1811 void
1812 spa_claim_notify(zio_t *zio)
1813 {
1814         spa_t *spa = zio->io_spa;
1815 
1816         if (zio->io_error)
1817                 return;
1818 
1819         mutex_enter(&spa->spa_props_lock);       /* any mutex will do */
1820         if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1821                 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1822         mutex_exit(&spa->spa_props_lock);
1823 }
1824 
1825 typedef struct spa_load_error {
1826         uint64_t        sle_meta_count;
1827         uint64_t        sle_data_count;
1828 } spa_load_error_t;
1829 
1830 static void
1831 spa_load_verify_done(zio_t *zio)
1832 {
1833         blkptr_t *bp = zio->io_bp;
1834         spa_load_error_t *sle = zio->io_private;
1835         dmu_object_type_t type = BP_GET_TYPE(bp);
1836         int error = zio->io_error;
1837         spa_t *spa = zio->io_spa;
1838 
1839         if (error) {
1840                 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1841                     type != DMU_OT_INTENT_LOG)
1842                         atomic_inc_64(&sle->sle_meta_count);
1843                 else
1844                         atomic_inc_64(&sle->sle_data_count);
1845         }
1846         zio_data_buf_free(zio->io_data, zio->io_size);
1847 
1848         mutex_enter(&spa->spa_scrub_lock);
1849         spa->spa_scrub_inflight--;
1850         cv_broadcast(&spa->spa_scrub_io_cv);
1851         mutex_exit(&spa->spa_scrub_lock);
1852 }
1853 
1854 /*
1855  * Maximum number of concurrent scrub i/os to create while verifying
1856  * a pool while importing it.
1857  */
1858 int spa_load_verify_maxinflight = 10000;
1859 boolean_t spa_load_verify_metadata = B_TRUE;
1860 boolean_t spa_load_verify_data = B_TRUE;
1861 
1862 /*ARGSUSED*/
1863 static int
1864 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1865     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1866 {
1867         if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1868                 return (0);
1869         /*
1870          * Note: normally this routine will not be called if
1871          * spa_load_verify_metadata is not set.  However, it may be useful
1872          * to manually set the flag after the traversal has begun.
1873          */
1874         if (!spa_load_verify_metadata)
1875                 return (0);
1876         if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data)
1877                 return (0);
1878 
1879         zio_t *rio = arg;
1880         size_t size = BP_GET_PSIZE(bp);
1881         void *data = zio_data_buf_alloc(size);
1882 
1883         mutex_enter(&spa->spa_scrub_lock);
1884         while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
1885                 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1886         spa->spa_scrub_inflight++;
1887         mutex_exit(&spa->spa_scrub_lock);
1888 
1889         zio_nowait(zio_read(rio, spa, bp, data, size,
1890             spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1891             ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1892             ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
1893         return (0);
1894 }
1895 
1896 static int
1897 spa_load_verify(spa_t *spa)
1898 {
1899         zio_t *rio;
1900         spa_load_error_t sle = { 0 };
1901         zpool_rewind_policy_t policy;
1902         boolean_t verify_ok = B_FALSE;
1903         int error = 0;
1904 
1905         zpool_get_rewind_policy(spa->spa_config, &policy);
1906 
1907         if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1908                 return (0);
1909 
1910         rio = zio_root(spa, NULL, &sle,
1911             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1912 
1913         if (spa_load_verify_metadata) {
1914                 error = traverse_pool(spa, spa->spa_verify_min_txg,
1915                     TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
1916                     spa_load_verify_cb, rio);
1917         }
1918 
1919         (void) zio_wait(rio);
1920 
1921         spa->spa_load_meta_errors = sle.sle_meta_count;
1922         spa->spa_load_data_errors = sle.sle_data_count;
1923 
1924         if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1925             sle.sle_data_count <= policy.zrp_maxdata) {
1926                 int64_t loss = 0;
1927 
1928                 verify_ok = B_TRUE;
1929                 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1930                 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
1931 
1932                 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1933                 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1934                     ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1935                 VERIFY(nvlist_add_int64(spa->spa_load_info,
1936                     ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1937                 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1938                     ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
1939         } else {
1940                 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1941         }
1942 
1943         if (error) {
1944                 if (error != ENXIO && error != EIO)
1945                         error = SET_ERROR(EIO);
1946                 return (error);
1947         }
1948 
1949         return (verify_ok ? 0 : EIO);
1950 }
1951 
1952 /*
1953  * Find a value in the pool props object.
1954  */
1955 static void
1956 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1957 {
1958         (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1959             zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1960 }
1961 
1962 /*
1963  * Find a value in the pool directory object.
1964  */
1965 static int
1966 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1967 {
1968         return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1969             name, sizeof (uint64_t), 1, val));
1970 }
1971 
1972 static int
1973 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1974 {
1975         vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1976         return (err);
1977 }
1978 
1979 /*
1980  * Fix up config after a partly-completed split.  This is done with the
1981  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
1982  * pool have that entry in their config, but only the splitting one contains
1983  * a list of all the guids of the vdevs that are being split off.
1984  *
1985  * This function determines what to do with that list: either rejoin
1986  * all the disks to the pool, or complete the splitting process.  To attempt
1987  * the rejoin, each disk that is offlined is marked online again, and
1988  * we do a reopen() call.  If the vdev label for every disk that was
1989  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1990  * then we call vdev_split() on each disk, and complete the split.
1991  *
1992  * Otherwise we leave the config alone, with all the vdevs in place in
1993  * the original pool.
1994  */
1995 static void
1996 spa_try_repair(spa_t *spa, nvlist_t *config)
1997 {
1998         uint_t extracted;
1999         uint64_t *glist;
2000         uint_t i, gcount;
2001         nvlist_t *nvl;
2002         vdev_t **vd;
2003         boolean_t attempt_reopen;
2004 
2005         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2006                 return;
2007 
2008         /* check that the config is complete */
2009         if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2010             &glist, &gcount) != 0)
2011                 return;
2012 
2013         vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2014 
2015         /* attempt to online all the vdevs & validate */
2016         attempt_reopen = B_TRUE;
2017         for (i = 0; i < gcount; i++) {
2018                 if (glist[i] == 0)      /* vdev is hole */
2019                         continue;
2020 
2021                 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2022                 if (vd[i] == NULL) {
2023                         /*
2024                          * Don't bother attempting to reopen the disks;
2025                          * just do the split.
2026                          */
2027                         attempt_reopen = B_FALSE;
2028                 } else {
2029                         /* attempt to re-online it */
2030                         vd[i]->vdev_offline = B_FALSE;
2031                 }
2032         }
2033 
2034         if (attempt_reopen) {
2035                 vdev_reopen(spa->spa_root_vdev);
2036 
2037                 /* check each device to see what state it's in */
2038                 for (extracted = 0, i = 0; i < gcount; i++) {
2039                         if (vd[i] != NULL &&
2040                             vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2041                                 break;
2042                         ++extracted;
2043                 }
2044         }
2045 
2046         /*
2047          * If every disk has been moved to the new pool, or if we never
2048          * even attempted to look at them, then we split them off for
2049          * good.
2050          */
2051         if (!attempt_reopen || gcount == extracted) {
2052                 for (i = 0; i < gcount; i++)
2053                         if (vd[i] != NULL)
2054                                 vdev_split(vd[i]);
2055                 vdev_reopen(spa->spa_root_vdev);
2056         }
2057 
2058         kmem_free(vd, gcount * sizeof (vdev_t *));
2059 }
2060 
2061 static int
2062 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
2063     boolean_t mosconfig)
2064 {
2065         nvlist_t *config = spa->spa_config;
2066         char *ereport = FM_EREPORT_ZFS_POOL;
2067         char *comment;
2068         int error;
2069         uint64_t pool_guid;
2070         nvlist_t *nvl;
2071 
2072         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
2073                 return (SET_ERROR(EINVAL));
2074 
2075         ASSERT(spa->spa_comment == NULL);
2076         if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2077                 spa->spa_comment = spa_strdup(comment);
2078 
2079         /*
2080          * Versioning wasn't explicitly added to the label until later, so if
2081          * it's not present treat it as the initial version.
2082          */
2083         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2084             &spa->spa_ubsync.ub_version) != 0)
2085                 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2086 
2087         (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2088             &spa->spa_config_txg);
2089 
2090         if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
2091             spa_guid_exists(pool_guid, 0)) {
2092                 error = SET_ERROR(EEXIST);
2093         } else {
2094                 spa->spa_config_guid = pool_guid;
2095 
2096                 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
2097                     &nvl) == 0) {
2098                         VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
2099                             KM_SLEEP) == 0);
2100                 }
2101 
2102                 nvlist_free(spa->spa_load_info);
2103                 spa->spa_load_info = fnvlist_alloc();
2104 
2105                 gethrestime(&spa->spa_loaded_ts);
2106                 error = spa_load_impl(spa, pool_guid, config, state, type,
2107                     mosconfig, &ereport);
2108         }
2109 
2110         spa->spa_minref = refcount_count(&spa->spa_refcount);
2111         if (error) {
2112                 if (error != EEXIST) {
2113                         spa->spa_loaded_ts.tv_sec = 0;
2114                         spa->spa_loaded_ts.tv_nsec = 0;
2115                 }
2116                 if (error != EBADF) {
2117                         zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2118                 }
2119         }
2120         spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2121         spa->spa_ena = 0;
2122 
2123         return (error);
2124 }
2125 
2126 /*
2127  * Load an existing storage pool, using the pool's builtin spa_config as a
2128  * source of configuration information.
2129  */
2130 static int
2131 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
2132     spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
2133     char **ereport)
2134 {
2135         int error = 0;
2136         nvlist_t *nvroot = NULL;
2137         nvlist_t *label;
2138         vdev_t *rvd;
2139         uberblock_t *ub = &spa->spa_uberblock;
2140         uint64_t children, config_cache_txg = spa->spa_config_txg;
2141         int orig_mode = spa->spa_mode;
2142         int parse;
2143         uint64_t obj;
2144         boolean_t missing_feat_write = B_FALSE;
2145 
2146         /*
2147          * If this is an untrusted config, access the pool in read-only mode.
2148          * This prevents things like resilvering recently removed devices.
2149          */
2150         if (!mosconfig)
2151                 spa->spa_mode = FREAD;
2152 
2153         ASSERT(MUTEX_HELD(&spa_namespace_lock));
2154 
2155         spa->spa_load_state = state;
2156 
2157         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
2158                 return (SET_ERROR(EINVAL));
2159 
2160         parse = (type == SPA_IMPORT_EXISTING ?
2161             VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2162 
2163         /*
2164          * Create "The Godfather" zio to hold all async IOs
2165          */
2166         spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2167             KM_SLEEP);
2168         for (int i = 0; i < max_ncpus; i++) {
2169                 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2170                     ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2171                     ZIO_FLAG_GODFATHER);
2172         }
2173 
2174         /*
2175          * Parse the configuration into a vdev tree.  We explicitly set the
2176          * value that will be returned by spa_version() since parsing the
2177          * configuration requires knowing the version number.
2178          */
2179         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2180         error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
2181         spa_config_exit(spa, SCL_ALL, FTAG);
2182 
2183         if (error != 0)
2184                 return (error);
2185 
2186         ASSERT(spa->spa_root_vdev == rvd);
2187 
2188         if (type != SPA_IMPORT_ASSEMBLE) {
2189                 ASSERT(spa_guid(spa) == pool_guid);
2190         }
2191 
2192         /*
2193          * Try to open all vdevs, loading each label in the process.
2194          */
2195         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2196         error = vdev_open(rvd);
2197         spa_config_exit(spa, SCL_ALL, FTAG);
2198         if (error != 0)
2199                 return (error);
2200 
2201         /*
2202          * We need to validate the vdev labels against the configuration that
2203          * we have in hand, which is dependent on the setting of mosconfig. If
2204          * mosconfig is true then we're validating the vdev labels based on
2205          * that config.  Otherwise, we're validating against the cached config
2206          * (zpool.cache) that was read when we loaded the zfs module, and then
2207          * later we will recursively call spa_load() and validate against
2208          * the vdev config.
2209          *
2210          * If we're assembling a new pool that's been split off from an
2211          * existing pool, the labels haven't yet been updated so we skip
2212          * validation for now.
2213          */
2214         if (type != SPA_IMPORT_ASSEMBLE) {
2215                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2216                 error = vdev_validate(rvd, mosconfig);
2217                 spa_config_exit(spa, SCL_ALL, FTAG);
2218 
2219                 if (error != 0)
2220                         return (error);
2221 
2222                 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2223                         return (SET_ERROR(ENXIO));
2224         }
2225 
2226         /*
2227          * Find the best uberblock.
2228          */
2229         vdev_uberblock_load(rvd, ub, &label);
2230 
2231         /*
2232          * If we weren't able to find a single valid uberblock, return failure.
2233          */
2234         if (ub->ub_txg == 0) {
2235                 nvlist_free(label);
2236                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2237         }
2238 
2239         /*
2240          * If the pool has an unsupported version we can't open it.
2241          */
2242         if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2243                 nvlist_free(label);
2244                 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2245         }
2246 
2247         if (ub->ub_version >= SPA_VERSION_FEATURES) {
2248                 nvlist_t *features;
2249 
2250                 /*
2251                  * If we weren't able to find what's necessary for reading the
2252                  * MOS in the label, return failure.
2253                  */
2254                 if (label == NULL || nvlist_lookup_nvlist(label,
2255                     ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2256                         nvlist_free(label);
2257                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2258                             ENXIO));
2259                 }
2260 
2261                 /*
2262                  * Update our in-core representation with the definitive values
2263                  * from the label.
2264                  */
2265                 nvlist_free(spa->spa_label_features);
2266                 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2267         }
2268 
2269         nvlist_free(label);
2270 
2271         /*
2272          * Look through entries in the label nvlist's features_for_read. If
2273          * there is a feature listed there which we don't understand then we
2274          * cannot open a pool.
2275          */
2276         if (ub->ub_version >= SPA_VERSION_FEATURES) {
2277                 nvlist_t *unsup_feat;
2278 
2279                 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2280                     0);
2281 
2282                 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2283                     NULL); nvp != NULL;
2284                     nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2285                         if (!zfeature_is_supported(nvpair_name(nvp))) {
2286                                 VERIFY(nvlist_add_string(unsup_feat,
2287                                     nvpair_name(nvp), "") == 0);
2288                         }
2289                 }
2290 
2291                 if (!nvlist_empty(unsup_feat)) {
2292                         VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2293                             ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2294                         nvlist_free(unsup_feat);
2295                         return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2296                             ENOTSUP));
2297                 }
2298 
2299                 nvlist_free(unsup_feat);
2300         }
2301 
2302         /*
2303          * If the vdev guid sum doesn't match the uberblock, we have an
2304          * incomplete configuration.  We first check to see if the pool
2305          * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2306          * If it is, defer the vdev_guid_sum check till later so we
2307          * can handle missing vdevs.
2308          */
2309         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2310             &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2311             rvd->vdev_guid_sum != ub->ub_guid_sum)
2312                 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2313 
2314         if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2315                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2316                 spa_try_repair(spa, config);
2317                 spa_config_exit(spa, SCL_ALL, FTAG);
2318                 nvlist_free(spa->spa_config_splitting);
2319                 spa->spa_config_splitting = NULL;
2320         }
2321 
2322         /*
2323          * Initialize internal SPA structures.
2324          */
2325         spa->spa_state = POOL_STATE_ACTIVE;
2326         spa->spa_ubsync = spa->spa_uberblock;
2327         spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2328             TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2329         spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2330             spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2331         spa->spa_claim_max_txg = spa->spa_first_txg;
2332         spa->spa_prev_software_version = ub->ub_software_version;
2333 
2334         error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2335         if (error)
2336                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2337         spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2338 
2339         if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2340                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2341 
2342         if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2343                 boolean_t missing_feat_read = B_FALSE;
2344                 nvlist_t *unsup_feat, *enabled_feat;
2345 
2346                 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2347                     &spa->spa_feat_for_read_obj) != 0) {
2348                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2349                 }
2350 
2351                 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2352                     &spa->spa_feat_for_write_obj) != 0) {
2353                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2354                 }
2355 
2356                 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2357                     &spa->spa_feat_desc_obj) != 0) {
2358                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2359                 }
2360 
2361                 enabled_feat = fnvlist_alloc();
2362                 unsup_feat = fnvlist_alloc();
2363 
2364                 if (!spa_features_check(spa, B_FALSE,
2365                     unsup_feat, enabled_feat))
2366                         missing_feat_read = B_TRUE;
2367 
2368                 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2369                         if (!spa_features_check(spa, B_TRUE,
2370                             unsup_feat, enabled_feat)) {
2371                                 missing_feat_write = B_TRUE;
2372                         }
2373                 }
2374 
2375                 fnvlist_add_nvlist(spa->spa_load_info,
2376                     ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2377 
2378                 if (!nvlist_empty(unsup_feat)) {
2379                         fnvlist_add_nvlist(spa->spa_load_info,
2380                             ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2381                 }
2382 
2383                 fnvlist_free(enabled_feat);
2384                 fnvlist_free(unsup_feat);
2385 
2386                 if (!missing_feat_read) {
2387                         fnvlist_add_boolean(spa->spa_load_info,
2388                             ZPOOL_CONFIG_CAN_RDONLY);
2389                 }
2390 
2391                 /*
2392                  * If the state is SPA_LOAD_TRYIMPORT, our objective is
2393                  * twofold: to determine whether the pool is available for
2394                  * import in read-write mode and (if it is not) whether the
2395                  * pool is available for import in read-only mode. If the pool
2396                  * is available for import in read-write mode, it is displayed
2397                  * as available in userland; if it is not available for import
2398                  * in read-only mode, it is displayed as unavailable in
2399                  * userland. If the pool is available for import in read-only
2400                  * mode but not read-write mode, it is displayed as unavailable
2401                  * in userland with a special note that the pool is actually
2402                  * available for open in read-only mode.
2403                  *
2404                  * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2405                  * missing a feature for write, we must first determine whether
2406                  * the pool can be opened read-only before returning to
2407                  * userland in order to know whether to display the
2408                  * abovementioned note.
2409                  */
2410                 if (missing_feat_read || (missing_feat_write &&
2411                     spa_writeable(spa))) {
2412                         return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2413                             ENOTSUP));
2414                 }
2415 
2416                 /*
2417                  * Load refcounts for ZFS features from disk into an in-memory
2418                  * cache during SPA initialization.
2419                  */
2420                 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2421                         uint64_t refcount;
2422 
2423                         error = feature_get_refcount_from_disk(spa,
2424                             &spa_feature_table[i], &refcount);
2425                         if (error == 0) {
2426                                 spa->spa_feat_refcount_cache[i] = refcount;
2427                         } else if (error == ENOTSUP) {
2428                                 spa->spa_feat_refcount_cache[i] =
2429                                     SPA_FEATURE_DISABLED;
2430                         } else {
2431                                 return (spa_vdev_err(rvd,
2432                                     VDEV_AUX_CORRUPT_DATA, EIO));
2433                         }
2434                 }
2435         }
2436 
2437         if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2438                 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2439                     &spa->spa_feat_enabled_txg_obj) != 0)
2440                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2441         }
2442 
2443         spa->spa_is_initializing = B_TRUE;
2444         error = dsl_pool_open(spa->spa_dsl_pool);
2445         spa->spa_is_initializing = B_FALSE;
2446         if (error != 0)
2447                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2448 
2449         if (!mosconfig) {
2450                 uint64_t hostid;
2451                 nvlist_t *policy = NULL, *nvconfig;
2452 
2453                 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2454                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2455 
2456                 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2457                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2458                         char *hostname;
2459                         unsigned long myhostid = 0;
2460 
2461                         VERIFY(nvlist_lookup_string(nvconfig,
2462                             ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2463 
2464 #ifdef  _KERNEL
2465                         myhostid = zone_get_hostid(NULL);
2466 #else   /* _KERNEL */
2467                         /*
2468                          * We're emulating the system's hostid in userland, so
2469                          * we can't use zone_get_hostid().
2470                          */
2471                         (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2472 #endif  /* _KERNEL */
2473                         if (hostid != 0 && myhostid != 0 &&
2474                             hostid != myhostid) {
2475                                 nvlist_free(nvconfig);
2476                                 cmn_err(CE_WARN, "pool '%s' could not be "
2477                                     "loaded as it was last accessed by "
2478                                     "another system (host: %s hostid: 0x%lx). "
2479                                     "See: http://illumos.org/msg/ZFS-8000-EY",
2480                                     spa_name(spa), hostname,
2481                                     (unsigned long)hostid);
2482                                 return (SET_ERROR(EBADF));
2483                         }
2484                 }
2485                 if (nvlist_lookup_nvlist(spa->spa_config,
2486                     ZPOOL_REWIND_POLICY, &policy) == 0)
2487                         VERIFY(nvlist_add_nvlist(nvconfig,
2488                             ZPOOL_REWIND_POLICY, policy) == 0);
2489 
2490                 spa_config_set(spa, nvconfig);
2491                 spa_unload(spa);
2492                 spa_deactivate(spa);
2493                 spa_activate(spa, orig_mode);
2494 
2495                 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2496         }
2497 
2498         if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2499                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2500         error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2501         if (error != 0)
2502                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2503 
2504         /*
2505          * Load the bit that tells us to use the new accounting function
2506          * (raid-z deflation).  If we have an older pool, this will not
2507          * be present.
2508          */
2509         error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2510         if (error != 0 && error != ENOENT)
2511                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2512 
2513         error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2514             &spa->spa_creation_version);
2515         if (error != 0 && error != ENOENT)
2516                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2517 
2518         /*
2519          * Load the persistent error log.  If we have an older pool, this will
2520          * not be present.
2521          */
2522         error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2523         if (error != 0 && error != ENOENT)
2524                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2525 
2526         error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2527             &spa->spa_errlog_scrub);
2528         if (error != 0 && error != ENOENT)
2529                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2530 
2531         /*
2532          * Load the history object.  If we have an older pool, this
2533          * will not be present.
2534          */
2535         error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2536         if (error != 0 && error != ENOENT)
2537                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2538 
2539         /*
2540          * If we're assembling the pool from the split-off vdevs of
2541          * an existing pool, we don't want to attach the spares & cache
2542          * devices.
2543          */
2544 
2545         /*
2546          * Load any hot spares for this pool.
2547          */
2548         error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2549         if (error != 0 && error != ENOENT)
2550                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2551         if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2552                 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2553                 if (load_nvlist(spa, spa->spa_spares.sav_object,
2554                     &spa->spa_spares.sav_config) != 0)
2555                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2556 
2557                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2558                 spa_load_spares(spa);
2559                 spa_config_exit(spa, SCL_ALL, FTAG);
2560         } else if (error == 0) {
2561                 spa->spa_spares.sav_sync = B_TRUE;
2562         }
2563 
2564         /*
2565          * Load any level 2 ARC devices for this pool.
2566          */
2567         error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2568             &spa->spa_l2cache.sav_object);
2569         if (error != 0 && error != ENOENT)
2570                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2571         if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2572                 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2573                 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2574                     &spa->spa_l2cache.sav_config) != 0)
2575                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2576 
2577                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2578                 spa_load_l2cache(spa);
2579                 spa_config_exit(spa, SCL_ALL, FTAG);
2580         } else if (error == 0) {
2581                 spa->spa_l2cache.sav_sync = B_TRUE;
2582         }
2583 
2584         spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2585 
2586         error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2587         if (error && error != ENOENT)
2588                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2589 
2590         if (error == 0) {
2591                 uint64_t autoreplace;
2592 
2593                 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2594                 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2595                 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2596                 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2597                 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2598                 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2599                     &spa->spa_dedup_ditto);
2600 
2601                 spa->spa_autoreplace = (autoreplace != 0);
2602         }
2603 
2604         /*
2605          * If the 'autoreplace' property is set, then post a resource notifying
2606          * the ZFS DE that it should not issue any faults for unopenable
2607          * devices.  We also iterate over the vdevs, and post a sysevent for any
2608          * unopenable vdevs so that the normal autoreplace handler can take
2609          * over.
2610          */
2611         if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2612                 spa_check_removed(spa->spa_root_vdev);
2613                 /*
2614                  * For the import case, this is done in spa_import(), because
2615                  * at this point we're using the spare definitions from
2616                  * the MOS config, not necessarily from the userland config.
2617                  */
2618                 if (state != SPA_LOAD_IMPORT) {
2619                         spa_aux_check_removed(&spa->spa_spares);
2620                         spa_aux_check_removed(&spa->spa_l2cache);
2621                 }
2622         }
2623 
2624         /*
2625          * Load the vdev state for all toplevel vdevs.
2626          */
2627         vdev_load(rvd);
2628 
2629         /*
2630          * Propagate the leaf DTLs we just loaded all the way up the tree.
2631          */
2632         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2633         vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2634         spa_config_exit(spa, SCL_ALL, FTAG);
2635 
2636         /*
2637          * Load the DDTs (dedup tables).
2638          */
2639         error = ddt_load(spa);
2640         if (error != 0)
2641                 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2642 
2643         spa_update_dspace(spa);
2644 
2645         /*
2646          * Validate the config, using the MOS config to fill in any
2647          * information which might be missing.  If we fail to validate
2648          * the config then declare the pool unfit for use. If we're
2649          * assembling a pool from a split, the log is not transferred
2650          * over.
2651          */
2652         if (type != SPA_IMPORT_ASSEMBLE) {
2653                 nvlist_t *nvconfig;
2654 
2655                 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2656                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2657 
2658                 if (!spa_config_valid(spa, nvconfig)) {
2659                         nvlist_free(nvconfig);
2660                         return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2661                             ENXIO));
2662                 }
2663                 nvlist_free(nvconfig);
2664 
2665                 /*
2666                  * Now that we've validated the config, check the state of the
2667                  * root vdev.  If it can't be opened, it indicates one or
2668                  * more toplevel vdevs are faulted.
2669                  */
2670                 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2671                         return (SET_ERROR(ENXIO));
2672 
2673                 if (spa_check_logs(spa)) {
2674                         *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2675                         return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2676                 }
2677         }
2678 
2679         if (missing_feat_write) {
2680                 ASSERT(state == SPA_LOAD_TRYIMPORT);
2681 
2682                 /*
2683                  * At this point, we know that we can open the pool in
2684                  * read-only mode but not read-write mode. We now have enough
2685                  * information and can return to userland.
2686                  */
2687                 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2688         }
2689 
2690         /*
2691          * We've successfully opened the pool, verify that we're ready
2692          * to start pushing transactions.
2693          */
2694         if (state != SPA_LOAD_TRYIMPORT) {
2695                 if (error = spa_load_verify(spa))
2696                         return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2697                             error));
2698         }
2699 
2700         if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2701             spa->spa_load_max_txg == UINT64_MAX)) {
2702                 dmu_tx_t *tx;
2703                 int need_update = B_FALSE;
2704 
2705                 ASSERT(state != SPA_LOAD_TRYIMPORT);
2706 
2707                 /*
2708                  * Claim log blocks that haven't been committed yet.
2709                  * This must all happen in a single txg.
2710                  * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2711                  * invoked from zil_claim_log_block()'s i/o done callback.
2712                  * Price of rollback is that we abandon the log.
2713                  */
2714                 spa->spa_claiming = B_TRUE;
2715 
2716                 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2717                     spa_first_txg(spa));
2718                 (void) dmu_objset_find(spa_name(spa),
2719                     zil_claim, tx, DS_FIND_CHILDREN);
2720                 dmu_tx_commit(tx);
2721 
2722                 spa->spa_claiming = B_FALSE;
2723 
2724                 spa_set_log_state(spa, SPA_LOG_GOOD);
2725                 spa->spa_sync_on = B_TRUE;
2726                 txg_sync_start(spa->spa_dsl_pool);
2727 
2728                 /*
2729                  * Wait for all claims to sync.  We sync up to the highest
2730                  * claimed log block birth time so that claimed log blocks
2731                  * don't appear to be from the future.  spa_claim_max_txg
2732                  * will have been set for us by either zil_check_log_chain()
2733                  * (invoked from spa_check_logs()) or zil_claim() above.
2734                  */
2735                 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2736 
2737                 /*
2738                  * If the config cache is stale, or we have uninitialized
2739                  * metaslabs (see spa_vdev_add()), then update the config.
2740                  *
2741                  * If this is a verbatim import, trust the current
2742                  * in-core spa_config and update the disk labels.
2743                  */
2744                 if (config_cache_txg != spa->spa_config_txg ||
2745                     state == SPA_LOAD_IMPORT ||
2746                     state == SPA_LOAD_RECOVER ||
2747                     (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2748                         need_update = B_TRUE;
2749 
2750                 for (int c = 0; c < rvd->vdev_children; c++)
2751                         if (rvd->vdev_child[c]->vdev_ms_array == 0)
2752                                 need_update = B_TRUE;
2753 
2754                 /*
2755                  * Update the config cache asychronously in case we're the
2756                  * root pool, in which case the config cache isn't writable yet.
2757                  */
2758                 if (need_update)
2759                         spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2760 
2761                 /*
2762                  * Check all DTLs to see if anything needs resilvering.
2763                  */
2764                 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2765                     vdev_resilver_needed(rvd, NULL, NULL))
2766                         spa_async_request(spa, SPA_ASYNC_RESILVER);
2767 
2768                 /*
2769                  * Log the fact that we booted up (so that we can detect if
2770                  * we rebooted in the middle of an operation).
2771                  */
2772                 spa_history_log_version(spa, "open");
2773 
2774                 /*
2775                  * Delete any inconsistent datasets.
2776                  */
2777                 (void) dmu_objset_find(spa_name(spa),
2778                     dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2779 
2780                 /*
2781                  * Clean up any stale temporary dataset userrefs.
2782                  */
2783                 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2784         }
2785 
2786         return (0);
2787 }
2788 
2789 static int
2790 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2791 {
2792         int mode = spa->spa_mode;
2793 
2794         spa_unload(spa);
2795         spa_deactivate(spa);
2796 
2797         spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
2798 
2799         spa_activate(spa, mode);
2800         spa_async_suspend(spa);
2801 
2802         return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2803 }
2804 
2805 /*
2806  * If spa_load() fails this function will try loading prior txg's. If
2807  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2808  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2809  * function will not rewind the pool and will return the same error as
2810  * spa_load().
2811  */
2812 static int
2813 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2814     uint64_t max_request, int rewind_flags)
2815 {
2816         nvlist_t *loadinfo = NULL;
2817         nvlist_t *config = NULL;
2818         int load_error, rewind_error;
2819         uint64_t safe_rewind_txg;
2820         uint64_t min_txg;
2821 
2822         if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2823                 spa->spa_load_max_txg = spa->spa_load_txg;
2824                 spa_set_log_state(spa, SPA_LOG_CLEAR);
2825         } else {
2826                 spa->spa_load_max_txg = max_request;
2827                 if (max_request != UINT64_MAX)
2828                         spa->spa_extreme_rewind = B_TRUE;
2829         }
2830 
2831         load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2832             mosconfig);
2833         if (load_error == 0)
2834                 return (0);
2835 
2836         if (spa->spa_root_vdev != NULL)
2837                 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2838 
2839         spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2840         spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2841 
2842         if (rewind_flags & ZPOOL_NEVER_REWIND) {
2843                 nvlist_free(config);
2844                 return (load_error);
2845         }
2846 
2847         if (state == SPA_LOAD_RECOVER) {
2848                 /* Price of rolling back is discarding txgs, including log */
2849                 spa_set_log_state(spa, SPA_LOG_CLEAR);
2850         } else {
2851                 /*
2852                  * If we aren't rolling back save the load info from our first
2853                  * import attempt so that we can restore it after attempting
2854                  * to rewind.
2855                  */
2856                 loadinfo = spa->spa_load_info;
2857                 spa->spa_load_info = fnvlist_alloc();
2858         }
2859 
2860         spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2861         safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2862         min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2863             TXG_INITIAL : safe_rewind_txg;
2864 
2865         /*
2866          * Continue as long as we're finding errors, we're still within
2867          * the acceptable rewind range, and we're still finding uberblocks
2868          */
2869         while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2870             spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2871                 if (spa->spa_load_max_txg < safe_rewind_txg)
2872                         spa->spa_extreme_rewind = B_TRUE;
2873                 rewind_error = spa_load_retry(spa, state, mosconfig);
2874         }
2875 
2876         spa->spa_extreme_rewind = B_FALSE;
2877         spa->spa_load_max_txg = UINT64_MAX;
2878 
2879         if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2880                 spa_config_set(spa, config);
2881 
2882         if (state == SPA_LOAD_RECOVER) {
2883                 ASSERT3P(loadinfo, ==, NULL);
2884                 return (rewind_error);
2885         } else {
2886                 /* Store the rewind info as part of the initial load info */
2887                 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2888                     spa->spa_load_info);
2889 
2890                 /* Restore the initial load info */
2891                 fnvlist_free(spa->spa_load_info);
2892                 spa->spa_load_info = loadinfo;
2893 
2894                 return (load_error);
2895         }
2896 }
2897 
2898 /*
2899  * Pool Open/Import
2900  *
2901  * The import case is identical to an open except that the configuration is sent
2902  * down from userland, instead of grabbed from the configuration cache.  For the
2903  * case of an open, the pool configuration will exist in the
2904  * POOL_STATE_UNINITIALIZED state.
2905  *
2906  * The stats information (gen/count/ustats) is used to gather vdev statistics at
2907  * the same time open the pool, without having to keep around the spa_t in some
2908  * ambiguous state.
2909  */
2910 static int
2911 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2912     nvlist_t **config)
2913 {
2914         spa_t *spa;
2915         spa_load_state_t state = SPA_LOAD_OPEN;
2916         int error;
2917         int locked = B_FALSE;
2918 
2919         *spapp = NULL;
2920 
2921         /*
2922          * As disgusting as this is, we need to support recursive calls to this
2923          * function because dsl_dir_open() is called during spa_load(), and ends
2924          * up calling spa_open() again.  The real fix is to figure out how to
2925          * avoid dsl_dir_open() calling this in the first place.
2926          */
2927         if (mutex_owner(&spa_namespace_lock) != curthread) {
2928                 mutex_enter(&spa_namespace_lock);
2929                 locked = B_TRUE;
2930         }
2931 
2932         if ((spa = spa_lookup(pool)) == NULL) {
2933                 if (locked)
2934                         mutex_exit(&spa_namespace_lock);
2935                 return (SET_ERROR(ENOENT));
2936         }
2937 
2938         if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2939                 zpool_rewind_policy_t policy;
2940 
2941                 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2942                     &policy);
2943                 if (policy.zrp_request & ZPOOL_DO_REWIND)
2944                         state = SPA_LOAD_RECOVER;
2945 
2946                 spa_activate(spa, spa_mode_global);
2947 
2948                 if (state != SPA_LOAD_RECOVER)
2949                         spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2950 
2951                 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2952                     policy.zrp_request);
2953 
2954                 if (error == EBADF) {
2955                         /*
2956                          * If vdev_validate() returns failure (indicated by
2957                          * EBADF), it indicates that one of the vdevs indicates
2958                          * that the pool has been exported or destroyed.  If
2959                          * this is the case, the config cache is out of sync and
2960                          * we should remove the pool from the namespace.
2961                          */
2962                         spa_unload(spa);
2963                         spa_deactivate(spa);
2964                         spa_config_sync(spa, B_TRUE, B_TRUE);
2965                         spa_remove(spa);
2966                         if (locked)
2967                                 mutex_exit(&spa_namespace_lock);
2968                         return (SET_ERROR(ENOENT));
2969                 }
2970 
2971                 if (error) {
2972                         /*
2973                          * We can't open the pool, but we still have useful
2974                          * information: the state of each vdev after the
2975                          * attempted vdev_open().  Return this to the user.
2976                          */
2977                         if (config != NULL && spa->spa_config) {
2978                                 VERIFY(nvlist_dup(spa->spa_config, config,
2979                                     KM_SLEEP) == 0);
2980                                 VERIFY(nvlist_add_nvlist(*config,
2981                                     ZPOOL_CONFIG_LOAD_INFO,
2982                                     spa->spa_load_info) == 0);
2983                         }
2984                         spa_unload(spa);
2985                         spa_deactivate(spa);
2986                         spa->spa_last_open_failed = error;
2987                         if (locked)
2988                                 mutex_exit(&spa_namespace_lock);
2989                         *spapp = NULL;
2990                         return (error);
2991                 }
2992         }
2993 
2994         spa_open_ref(spa, tag);
2995 
2996         if (config != NULL)
2997                 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2998 
2999         /*
3000          * If we've recovered the pool, pass back any information we
3001          * gathered while doing the load.
3002          */
3003         if (state == SPA_LOAD_RECOVER) {
3004                 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
3005                     spa->spa_load_info) == 0);
3006         }
3007 
3008         if (locked) {
3009                 spa->spa_last_open_failed = 0;
3010                 spa->spa_last_ubsync_txg = 0;
3011                 spa->spa_load_txg = 0;
3012                 mutex_exit(&spa_namespace_lock);
3013         }
3014 
3015         *spapp = spa;
3016 
3017         return (0);
3018 }
3019 
3020 int
3021 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
3022     nvlist_t **config)
3023 {
3024         return (spa_open_common(name, spapp, tag, policy, config));
3025 }
3026 
3027 int
3028 spa_open(const char *name, spa_t **spapp, void *tag)
3029 {
3030         return (spa_open_common(name, spapp, tag, NULL, NULL));
3031 }
3032 
3033 /*
3034  * Lookup the given spa_t, incrementing the inject count in the process,
3035  * preventing it from being exported or destroyed.
3036  */
3037 spa_t *
3038 spa_inject_addref(char *name)
3039 {
3040         spa_t *spa;
3041 
3042         mutex_enter(&spa_namespace_lock);
3043         if ((spa = spa_lookup(name)) == NULL) {
3044                 mutex_exit(&spa_namespace_lock);
3045                 return (NULL);
3046         }
3047         spa->spa_inject_ref++;
3048         mutex_exit(&spa_namespace_lock);
3049 
3050         return (spa);
3051 }
3052 
3053 void
3054 spa_inject_delref(spa_t *spa)
3055 {
3056         mutex_enter(&spa_namespace_lock);
3057         spa->spa_inject_ref--;
3058         mutex_exit(&spa_namespace_lock);
3059 }
3060 
3061 /*
3062  * Add spares device information to the nvlist.
3063  */
3064 static void
3065 spa_add_spares(spa_t *spa, nvlist_t *config)
3066 {
3067         nvlist_t **spares;
3068         uint_t i, nspares;
3069         nvlist_t *nvroot;
3070         uint64_t guid;
3071         vdev_stat_t *vs;
3072         uint_t vsc;
3073         uint64_t pool;
3074 
3075         ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3076 
3077         if (spa->spa_spares.sav_count == 0)
3078                 return;
3079 
3080         VERIFY(nvlist_lookup_nvlist(config,
3081             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3082         VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3083             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3084         if (nspares != 0) {
3085                 VERIFY(nvlist_add_nvlist_array(nvroot,
3086                     ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3087                 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3088                     ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
3089 
3090                 /*
3091                  * Go through and find any spares which have since been
3092                  * repurposed as an active spare.  If this is the case, update
3093                  * their status appropriately.
3094                  */
3095                 for (i = 0; i < nspares; i++) {
3096                         VERIFY(nvlist_lookup_uint64(spares[i],
3097                             ZPOOL_CONFIG_GUID, &guid) == 0);
3098                         if (spa_spare_exists(guid, &pool, NULL) &&
3099                             pool != 0ULL) {
3100                                 VERIFY(nvlist_lookup_uint64_array(
3101                                     spares[i], ZPOOL_CONFIG_VDEV_STATS,
3102                                     (uint64_t **)&vs, &vsc) == 0);
3103                                 vs->vs_state = VDEV_STATE_CANT_OPEN;
3104                                 vs->vs_aux = VDEV_AUX_SPARED;
3105                         }
3106                 }
3107         }
3108 }
3109 
3110 /*
3111  * Add l2cache device information to the nvlist, including vdev stats.
3112  */
3113 static void
3114 spa_add_l2cache(spa_t *spa, nvlist_t *config)
3115 {
3116         nvlist_t **l2cache;
3117         uint_t i, j, nl2cache;
3118         nvlist_t *nvroot;
3119         uint64_t guid;
3120         vdev_t *vd;
3121         vdev_stat_t *vs;
3122         uint_t vsc;
3123 
3124         ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3125 
3126         if (spa->spa_l2cache.sav_count == 0)
3127                 return;
3128 
3129         VERIFY(nvlist_lookup_nvlist(config,
3130             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3131         VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3132             ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3133         if (nl2cache != 0) {
3134                 VERIFY(nvlist_add_nvlist_array(nvroot,
3135                     ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3136                 VERIFY(nvlist_lookup_nvlist_array(nvroot,
3137                     ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
3138 
3139                 /*
3140                  * Update level 2 cache device stats.
3141                  */
3142 
3143                 for (i = 0; i < nl2cache; i++) {
3144                         VERIFY(nvlist_lookup_uint64(l2cache[i],
3145                             ZPOOL_CONFIG_GUID, &guid) == 0);
3146 
3147                         vd = NULL;
3148                         for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
3149                                 if (guid ==
3150                                     spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
3151                                         vd = spa->spa_l2cache.sav_vdevs[j];
3152                                         break;
3153                                 }
3154                         }
3155                         ASSERT(vd != NULL);
3156 
3157                         VERIFY(nvlist_lookup_uint64_array(l2cache[i],
3158                             ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
3159                             == 0);
3160                         vdev_get_stats(vd, vs);
3161                 }
3162         }
3163 }
3164 
3165 static void
3166 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
3167 {
3168         nvlist_t *features;
3169         zap_cursor_t zc;
3170         zap_attribute_t za;
3171 
3172         ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
3173         VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3174 
3175         if (spa->spa_feat_for_read_obj != 0) {
3176                 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3177                     spa->spa_feat_for_read_obj);
3178                     zap_cursor_retrieve(&zc, &za) == 0;
3179                     zap_cursor_advance(&zc)) {
3180                         ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3181                             za.za_num_integers == 1);
3182                         VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3183                             za.za_first_integer));
3184                 }
3185                 zap_cursor_fini(&zc);
3186         }
3187 
3188         if (spa->spa_feat_for_write_obj != 0) {
3189                 for (zap_cursor_init(&zc, spa->spa_meta_objset,
3190                     spa->spa_feat_for_write_obj);
3191                     zap_cursor_retrieve(&zc, &za) == 0;
3192                     zap_cursor_advance(&zc)) {
3193                         ASSERT(za.za_integer_length == sizeof (uint64_t) &&
3194                             za.za_num_integers == 1);
3195                         VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3196                             za.za_first_integer));
3197                 }
3198                 zap_cursor_fini(&zc);
3199         }
3200 
3201         VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3202             features) == 0);
3203         nvlist_free(features);
3204 }
3205 
3206 int
3207 spa_get_stats(const char *name, nvlist_t **config,
3208     char *altroot, size_t buflen)
3209 {
3210         int error;
3211         spa_t *spa;
3212 
3213         *config = NULL;
3214         error = spa_open_common(name, &spa, FTAG, NULL, config);
3215 
3216         if (spa != NULL) {
3217                 /*
3218                  * This still leaves a window of inconsistency where the spares
3219                  * or l2cache devices could change and the config would be
3220                  * self-inconsistent.
3221                  */
3222                 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3223 
3224                 if (*config != NULL) {
3225                         uint64_t loadtimes[2];
3226 
3227                         loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3228                         loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3229                         VERIFY(nvlist_add_uint64_array(*config,
3230                             ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3231 
3232                         VERIFY(nvlist_add_uint64(*config,
3233                             ZPOOL_CONFIG_ERRCOUNT,
3234                             spa_get_errlog_size(spa)) == 0);
3235 
3236                         if (spa_suspended(spa))
3237                                 VERIFY(nvlist_add_uint64(*config,
3238                                     ZPOOL_CONFIG_SUSPENDED,
3239                                     spa->spa_failmode) == 0);
3240 
3241                         spa_add_spares(spa, *config);
3242                         spa_add_l2cache(spa, *config);
3243                         spa_add_feature_stats(spa, *config);
3244                 }
3245         }
3246 
3247         /*
3248          * We want to get the alternate root even for faulted pools, so we cheat
3249          * and call spa_lookup() directly.
3250          */
3251         if (altroot) {
3252                 if (spa == NULL) {
3253                         mutex_enter(&spa_namespace_lock);
3254                         spa = spa_lookup(name);
3255                         if (spa)
3256                                 spa_altroot(spa, altroot, buflen);
3257                         else
3258                                 altroot[0] = '\0';
3259                         spa = NULL;
3260                         mutex_exit(&spa_namespace_lock);
3261                 } else {
3262                         spa_altroot(spa, altroot, buflen);
3263                 }
3264         }
3265 
3266         if (spa != NULL) {
3267                 spa_config_exit(spa, SCL_CONFIG, FTAG);
3268                 spa_close(spa, FTAG);
3269         }
3270 
3271         return (error);
3272 }
3273 
3274 /*
3275  * Validate that the auxiliary device array is well formed.  We must have an
3276  * array of nvlists, each which describes a valid leaf vdev.  If this is an
3277  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3278  * specified, as long as they are well-formed.
3279  */
3280 static int
3281 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3282     spa_aux_vdev_t *sav, const char *config, uint64_t version,
3283     vdev_labeltype_t label)
3284 {
3285         nvlist_t **dev;
3286         uint_t i, ndev;
3287         vdev_t *vd;
3288         int error;
3289 
3290         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3291 
3292         /*
3293          * It's acceptable to have no devs specified.
3294          */
3295         if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3296                 return (0);
3297 
3298         if (ndev == 0)
3299                 return (SET_ERROR(EINVAL));
3300 
3301         /*
3302          * Make sure the pool is formatted with a version that supports this
3303          * device type.
3304          */
3305         if (spa_version(spa) < version)
3306                 return (SET_ERROR(ENOTSUP));
3307 
3308         /*
3309          * Set the pending device list so we correctly handle device in-use
3310          * checking.
3311          */
3312         sav->sav_pending = dev;
3313         sav->sav_npending = ndev;
3314 
3315         for (i = 0; i < ndev; i++) {
3316                 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3317                     mode)) != 0)
3318                         goto out;
3319 
3320                 if (!vd->vdev_ops->vdev_op_leaf) {
3321                         vdev_free(vd);
3322                         error = SET_ERROR(EINVAL);
3323                         goto out;
3324                 }
3325 
3326                 /*
3327                  * The L2ARC currently only supports disk devices in
3328                  * kernel context.  For user-level testing, we allow it.
3329                  */
3330 #ifdef _KERNEL
3331                 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3332                     strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3333                         error = SET_ERROR(ENOTBLK);
3334                         vdev_free(vd);
3335                         goto out;
3336                 }
3337 #endif
3338                 vd->vdev_top = vd;
3339 
3340                 if ((error = vdev_open(vd)) == 0 &&
3341                     (error = vdev_label_init(vd, crtxg, label)) == 0) {
3342                         VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3343                             vd->vdev_guid) == 0);
3344                 }
3345 
3346                 vdev_free(vd);
3347 
3348                 if (error &&
3349                     (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3350                         goto out;
3351                 else
3352                         error = 0;
3353         }
3354 
3355 out:
3356         sav->sav_pending = NULL;
3357         sav->sav_npending = 0;
3358         return (error);
3359 }
3360 
3361 static int
3362 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3363 {
3364         int error;
3365 
3366         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3367 
3368         if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3369             &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3370             VDEV_LABEL_SPARE)) != 0) {
3371                 return (error);
3372         }
3373 
3374         return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3375             &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3376             VDEV_LABEL_L2CACHE));
3377 }
3378 
3379 static void
3380 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3381     const char *config)
3382 {
3383         int i;
3384 
3385         if (sav->sav_config != NULL) {
3386                 nvlist_t **olddevs;
3387                 uint_t oldndevs;
3388                 nvlist_t **newdevs;
3389 
3390                 /*
3391                  * Generate new dev list by concatentating with the
3392                  * current dev list.
3393                  */
3394                 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3395                     &olddevs, &oldndevs) == 0);
3396 
3397                 newdevs = kmem_alloc(sizeof (void *) *
3398                     (ndevs + oldndevs), KM_SLEEP);
3399                 for (i = 0; i < oldndevs; i++)
3400                         VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3401                             KM_SLEEP) == 0);
3402                 for (i = 0; i < ndevs; i++)
3403                         VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3404                             KM_SLEEP) == 0);
3405 
3406                 VERIFY(nvlist_remove(sav->sav_config, config,
3407                     DATA_TYPE_NVLIST_ARRAY) == 0);
3408 
3409                 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3410                     config, newdevs, ndevs + oldndevs) == 0);
3411                 for (i = 0; i < oldndevs + ndevs; i++)
3412                         nvlist_free(newdevs[i]);
3413                 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3414         } else {
3415                 /*
3416                  * Generate a new dev list.
3417                  */
3418                 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3419                     KM_SLEEP) == 0);
3420                 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3421                     devs, ndevs) == 0);
3422         }
3423 }
3424 
3425 /*
3426  * Stop and drop level 2 ARC devices
3427  */
3428 void
3429 spa_l2cache_drop(spa_t *spa)
3430 {
3431         vdev_t *vd;
3432         int i;
3433         spa_aux_vdev_t *sav = &spa->spa_l2cache;
3434 
3435         for (i = 0; i < sav->sav_count; i++) {
3436                 uint64_t pool;
3437 
3438                 vd = sav->sav_vdevs[i];
3439                 ASSERT(vd != NULL);
3440 
3441                 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3442                     pool != 0ULL && l2arc_vdev_present(vd))
3443                         l2arc_remove_vdev(vd);
3444         }
3445 }
3446 
3447 /*
3448  * Pool Creation
3449  */
3450 int
3451 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3452     nvlist_t *zplprops)
3453 {
3454         spa_t *spa;
3455         char *altroot = NULL;
3456         vdev_t *rvd;
3457         dsl_pool_t *dp;
3458         dmu_tx_t *tx;
3459         int error = 0;
3460         uint64_t txg = TXG_INITIAL;
3461         nvlist_t **spares, **l2cache;
3462         uint_t nspares, nl2cache;
3463         uint64_t version, obj;
3464         boolean_t has_features;
3465 
3466         /*
3467          * If this pool already exists, return failure.
3468          */
3469         mutex_enter(&spa_namespace_lock);
3470         if (spa_lookup(pool) != NULL) {
3471                 mutex_exit(&spa_namespace_lock);
3472                 return (SET_ERROR(EEXIST));
3473         }
3474 
3475         /*
3476          * Allocate a new spa_t structure.
3477          */
3478         (void) nvlist_lookup_string(props,
3479             zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3480         spa = spa_add(pool, NULL, altroot);
3481         spa_activate(spa, spa_mode_global);
3482 
3483         if (props && (error = spa_prop_validate(spa, props))) {
3484                 spa_deactivate(spa);
3485                 spa_remove(spa);
3486                 mutex_exit(&spa_namespace_lock);
3487                 return (error);
3488         }
3489 
3490         has_features = B_FALSE;
3491         for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3492             elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3493                 if (zpool_prop_feature(nvpair_name(elem)))
3494                         has_features = B_TRUE;
3495         }
3496 
3497         if (has_features || nvlist_lookup_uint64(props,
3498             zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3499                 version = SPA_VERSION;
3500         }
3501         ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3502 
3503         spa->spa_first_txg = txg;
3504         spa->spa_uberblock.ub_txg = txg - 1;
3505         spa->spa_uberblock.ub_version = version;
3506         spa->spa_ubsync = spa->spa_uberblock;
3507 
3508         /*
3509          * Create "The Godfather" zio to hold all async IOs
3510          */
3511         spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3512             KM_SLEEP);
3513         for (int i = 0; i < max_ncpus; i++) {
3514                 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3515                     ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3516                     ZIO_FLAG_GODFATHER);
3517         }
3518 
3519         /*
3520          * Create the root vdev.
3521          */
3522         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3523 
3524         error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3525 
3526         ASSERT(error != 0 || rvd != NULL);
3527         ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3528 
3529         if (error == 0 && !zfs_allocatable_devs(nvroot))
3530                 error = SET_ERROR(EINVAL);
3531 
3532         if (error == 0 &&
3533             (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3534             (error = spa_validate_aux(spa, nvroot, txg,
3535             VDEV_ALLOC_ADD)) == 0) {
3536                 for (int c = 0; c < rvd->vdev_children; c++) {
3537                         vdev_metaslab_set_size(rvd->vdev_child[c]);
3538                         vdev_expand(rvd->vdev_child[c], txg);
3539                 }
3540         }
3541 
3542         spa_config_exit(spa, SCL_ALL, FTAG);
3543 
3544         if (error != 0) {
3545                 spa_unload(spa);
3546                 spa_deactivate(spa);
3547                 spa_remove(spa);
3548                 mutex_exit(&spa_namespace_lock);
3549                 return (error);
3550         }
3551 
3552         /*
3553          * Get the list of spares, if specified.
3554          */
3555         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3556             &spares, &nspares) == 0) {
3557                 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3558                     KM_SLEEP) == 0);
3559                 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3560                     ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3561                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3562                 spa_load_spares(spa);
3563                 spa_config_exit(spa, SCL_ALL, FTAG);
3564                 spa->spa_spares.sav_sync = B_TRUE;
3565         }
3566 
3567         /*
3568          * Get the list of level 2 cache devices, if specified.
3569          */
3570         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3571             &l2cache, &nl2cache) == 0) {
3572                 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3573                     NV_UNIQUE_NAME, KM_SLEEP) == 0);
3574                 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3575                     ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3576                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3577                 spa_load_l2cache(spa);
3578                 spa_config_exit(spa, SCL_ALL, FTAG);
3579                 spa->spa_l2cache.sav_sync = B_TRUE;
3580         }
3581 
3582         spa->spa_is_initializing = B_TRUE;
3583         spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3584         spa->spa_meta_objset = dp->dp_meta_objset;
3585         spa->spa_is_initializing = B_FALSE;
3586 
3587         /*
3588          * Create DDTs (dedup tables).
3589          */
3590         ddt_create(spa);
3591 
3592         spa_update_dspace(spa);
3593 
3594         tx = dmu_tx_create_assigned(dp, txg);
3595 
3596         /*
3597          * Create the pool config object.
3598          */
3599         spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3600             DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3601             DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3602 
3603         if (zap_add(spa->spa_meta_objset,
3604             DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3605             sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3606                 cmn_err(CE_PANIC, "failed to add pool config");
3607         }
3608 
3609         if (spa_version(spa) >= SPA_VERSION_FEATURES)
3610                 spa_feature_create_zap_objects(spa, tx);
3611 
3612         if (zap_add(spa->spa_meta_objset,
3613             DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3614             sizeof (uint64_t), 1, &version, tx) != 0) {
3615                 cmn_err(CE_PANIC, "failed to add pool version");
3616         }
3617 
3618         /* Newly created pools with the right version are always deflated. */
3619         if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3620                 spa->spa_deflate = TRUE;
3621                 if (zap_add(spa->spa_meta_objset,
3622                     DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3623                     sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3624                         cmn_err(CE_PANIC, "failed to add deflate");
3625                 }
3626         }
3627 
3628         /*
3629          * Create the deferred-free bpobj.  Turn off compression
3630          * because sync-to-convergence takes longer if the blocksize
3631          * keeps changing.
3632          */
3633         obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3634         dmu_object_set_compress(spa->spa_meta_objset, obj,
3635             ZIO_COMPRESS_OFF, tx);
3636         if (zap_add(spa->spa_meta_objset,
3637             DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3638             sizeof (uint64_t), 1, &obj, tx) != 0) {
3639                 cmn_err(CE_PANIC, "failed to add bpobj");
3640         }
3641         VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3642             spa->spa_meta_objset, obj));
3643 
3644         /*
3645          * Create the pool's history object.
3646          */
3647         if (version >= SPA_VERSION_ZPOOL_HISTORY)
3648                 spa_history_create_obj(spa, tx);
3649 
3650         /*
3651          * Set pool properties.
3652          */
3653         spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3654         spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3655         spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3656         spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3657 
3658         if (props != NULL) {
3659                 spa_configfile_set(spa, props, B_FALSE);
3660                 spa_sync_props(props, tx);
3661         }
3662 
3663         dmu_tx_commit(tx);
3664 
3665         spa->spa_sync_on = B_TRUE;
3666         txg_sync_start(spa->spa_dsl_pool);
3667 
3668         /*
3669          * We explicitly wait for the first transaction to complete so that our
3670          * bean counters are appropriately updated.
3671          */
3672         txg_wait_synced(spa->spa_dsl_pool, txg);
3673 
3674         spa_config_sync(spa, B_FALSE, B_TRUE);
3675 
3676         spa_history_log_version(spa, "create");
3677 
3678         spa->spa_minref = refcount_count(&spa->spa_refcount);
3679 
3680         mutex_exit(&spa_namespace_lock);
3681 
3682         return (0);
3683 }
3684 
3685 #ifdef _KERNEL
3686 /*
3687  * Get the root pool information from the root disk, then import the root pool
3688  * during the system boot up time.
3689  */
3690 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3691 
3692 static nvlist_t *
3693 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3694 {
3695         nvlist_t *config;
3696         nvlist_t *nvtop, *nvroot;
3697         uint64_t pgid;
3698 
3699         if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3700                 return (NULL);
3701 
3702         /*
3703          * Add this top-level vdev to the child array.
3704          */
3705         VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3706             &nvtop) == 0);
3707         VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3708             &pgid) == 0);
3709         VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3710 
3711         /*
3712          * Put this pool's top-level vdevs into a root vdev.
3713          */
3714         VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3715         VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3716             VDEV_TYPE_ROOT) == 0);
3717         VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3718         VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3719         VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3720             &nvtop, 1) == 0);
3721 
3722         /*
3723          * Replace the existing vdev_tree with the new root vdev in
3724          * this pool's configuration (remove the old, add the new).
3725          */
3726         VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3727         nvlist_free(nvroot);
3728         return (config);
3729 }
3730 
3731 /*
3732  * Walk the vdev tree and see if we can find a device with "better"
3733  * configuration. A configuration is "better" if the label on that
3734  * device has a more recent txg.
3735  */
3736 static void
3737 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3738 {
3739         for (int c = 0; c < vd->vdev_children; c++)
3740                 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3741 
3742         if (vd->vdev_ops->vdev_op_leaf) {
3743                 nvlist_t *label;
3744                 uint64_t label_txg;
3745 
3746                 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3747                     &label) != 0)
3748                         return;
3749 
3750                 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3751                     &label_txg) == 0);
3752 
3753                 /*
3754                  * Do we have a better boot device?
3755                  */
3756                 if (label_txg > *txg) {
3757                         *txg = label_txg;
3758                         *avd = vd;
3759                 }
3760                 nvlist_free(label);
3761         }
3762 }
3763 
3764 /*
3765  * Import a root pool.
3766  *
3767  * For x86. devpath_list will consist of devid and/or physpath name of
3768  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3769  * The GRUB "findroot" command will return the vdev we should boot.
3770  *
3771  * For Sparc, devpath_list consists the physpath name of the booting device
3772  * no matter the rootpool is a single device pool or a mirrored pool.
3773  * e.g.
3774  *      "/pci@1f,0/ide@d/disk@0,0:a"
3775  */
3776 int
3777 spa_import_rootpool(char *devpath, char *devid)
3778 {
3779         spa_t *spa;
3780         vdev_t *rvd, *bvd, *avd = NULL;
3781         nvlist_t *config, *nvtop;
3782         uint64_t guid, txg;
3783         char *pname;
3784         int error;
3785 
3786         /*
3787          * Read the label from the boot device and generate a configuration.
3788          */
3789         config = spa_generate_rootconf(devpath, devid, &guid);
3790 #if defined(_OBP) && defined(_KERNEL)
3791         if (config == NULL) {
3792                 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3793                         /* iscsi boot */
3794                         get_iscsi_bootpath_phy(devpath);
3795                         config = spa_generate_rootconf(devpath, devid, &guid);
3796                 }
3797         }
3798 #endif
3799         if (config == NULL) {
3800                 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
3801                     devpath);
3802                 return (SET_ERROR(EIO));
3803         }
3804 
3805         VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3806             &pname) == 0);
3807         VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3808 
3809         mutex_enter(&spa_namespace_lock);
3810         if ((spa = spa_lookup(pname)) != NULL) {
3811                 /*
3812                  * Remove the existing root pool from the namespace so that we
3813                  * can replace it with the correct config we just read in.
3814                  */
3815                 spa_remove(spa);
3816         }
3817 
3818         spa = spa_add(pname, config, NULL);
3819         spa->spa_is_root = B_TRUE;
3820         spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
3821 
3822         /*
3823          * Build up a vdev tree based on the boot device's label config.
3824          */
3825         VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3826             &nvtop) == 0);
3827         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3828         error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3829             VDEV_ALLOC_ROOTPOOL);
3830         spa_config_exit(spa, SCL_ALL, FTAG);
3831         if (error) {
3832                 mutex_exit(&spa_namespace_lock);
3833                 nvlist_free(config);
3834                 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3835                     pname);
3836                 return (error);
3837         }
3838 
3839         /*
3840          * Get the boot vdev.
3841          */
3842         if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3843                 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3844                     (u_longlong_t)guid);
3845                 error = SET_ERROR(ENOENT);
3846                 goto out;
3847         }
3848 
3849         /*
3850          * Determine if there is a better boot device.
3851          */
3852         avd = bvd;
3853         spa_alt_rootvdev(rvd, &avd, &txg);
3854         if (avd != bvd) {
3855                 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3856                     "try booting from '%s'", avd->vdev_path);
3857                 error = SET_ERROR(EINVAL);
3858                 goto out;
3859         }
3860 
3861         /*
3862          * If the boot device is part of a spare vdev then ensure that
3863          * we're booting off the active spare.
3864          */
3865         if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3866             !bvd->vdev_isspare) {
3867                 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3868                     "try booting from '%s'",
3869                     bvd->vdev_parent->
3870                     vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
3871                 error = SET_ERROR(EINVAL);
3872                 goto out;
3873         }
3874 
3875         error = 0;
3876 out:
3877         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3878         vdev_free(rvd);
3879         spa_config_exit(spa, SCL_ALL, FTAG);
3880         mutex_exit(&spa_namespace_lock);
3881 
3882         nvlist_free(config);
3883         return (error);
3884 }
3885 
3886 #endif
3887 
3888 /*
3889  * Import a non-root pool into the system.
3890  */
3891 int
3892 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
3893 {
3894         spa_t *spa;
3895         char *altroot = NULL;
3896         spa_load_state_t state = SPA_LOAD_IMPORT;
3897         zpool_rewind_policy_t policy;
3898         uint64_t mode = spa_mode_global;
3899         uint64_t readonly = B_FALSE;
3900         int error;
3901         nvlist_t *nvroot;
3902         nvlist_t **spares, **l2cache;
3903         uint_t nspares, nl2cache;
3904 
3905         /*
3906          * If a pool with this name exists, return failure.
3907          */
3908         mutex_enter(&spa_namespace_lock);
3909         if (spa_lookup(pool) != NULL) {
3910                 mutex_exit(&spa_namespace_lock);
3911                 return (SET_ERROR(EEXIST));
3912         }
3913 
3914         /*
3915          * Create and initialize the spa structure.
3916          */
3917         (void) nvlist_lookup_string(props,
3918             zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3919         (void) nvlist_lookup_uint64(props,
3920             zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3921         if (readonly)
3922                 mode = FREAD;
3923         spa = spa_add(pool, config, altroot);
3924         spa->spa_import_flags = flags;
3925 
3926         /*
3927          * Verbatim import - Take a pool and insert it into the namespace
3928          * as if it had been loaded at boot.
3929          */
3930         if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3931                 if (props != NULL)
3932                         spa_configfile_set(spa, props, B_FALSE);
3933 
3934                 spa_config_sync(spa, B_FALSE, B_TRUE);
3935 
3936                 mutex_exit(&spa_namespace_lock);
3937                 return (0);
3938         }
3939 
3940         spa_activate(spa, mode);
3941 
3942         /*
3943          * Don't start async tasks until we know everything is healthy.
3944          */
3945         spa_async_suspend(spa);
3946 
3947         zpool_get_rewind_policy(config, &policy);
3948         if (policy.zrp_request & ZPOOL_DO_REWIND)
3949                 state = SPA_LOAD_RECOVER;
3950 
3951         /*
3952          * Pass off the heavy lifting to spa_load().  Pass TRUE for mosconfig
3953          * because the user-supplied config is actually the one to trust when
3954          * doing an import.
3955          */
3956         if (state != SPA_LOAD_RECOVER)
3957                 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3958 
3959         error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3960             policy.zrp_request);
3961 
3962         /*
3963          * Propagate anything learned while loading the pool and pass it
3964          * back to caller (i.e. rewind info, missing devices, etc).
3965          */
3966         VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3967             spa->spa_load_info) == 0);
3968 
3969         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3970         /*
3971          * Toss any existing sparelist, as it doesn't have any validity
3972          * anymore, and conflicts with spa_has_spare().
3973          */
3974         if (spa->spa_spares.sav_config) {
3975                 nvlist_free(spa->spa_spares.sav_config);
3976                 spa->spa_spares.sav_config = NULL;
3977                 spa_load_spares(spa);
3978         }
3979         if (spa->spa_l2cache.sav_config) {
3980                 nvlist_free(spa->spa_l2cache.sav_config);
3981                 spa->spa_l2cache.sav_config = NULL;
3982                 spa_load_l2cache(spa);
3983         }
3984 
3985         VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3986             &nvroot) == 0);
3987         if (error == 0)
3988                 error = spa_validate_aux(spa, nvroot, -1ULL,
3989                     VDEV_ALLOC_SPARE);
3990         if (error == 0)
3991                 error = spa_validate_aux(spa, nvroot, -1ULL,
3992                     VDEV_ALLOC_L2CACHE);
3993         spa_config_exit(spa, SCL_ALL, FTAG);
3994 
3995         if (props != NULL)
3996                 spa_configfile_set(spa, props, B_FALSE);
3997 
3998         if (error != 0 || (props && spa_writeable(spa) &&
3999             (error = spa_prop_set(spa, props)))) {
4000                 spa_unload(spa);
4001                 spa_deactivate(spa);
4002                 spa_remove(spa);
4003                 mutex_exit(&spa_namespace_lock);
4004                 return (error);
4005         }
4006 
4007         spa_async_resume(spa);
4008 
4009         /*
4010          * Override any spares and level 2 cache devices as specified by
4011          * the user, as these may have correct device names/devids, etc.
4012          */
4013         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4014             &spares, &nspares) == 0) {
4015                 if (spa->spa_spares.sav_config)
4016                         VERIFY(nvlist_remove(spa->spa_spares.sav_config,
4017                             ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
4018                 else
4019                         VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
4020                             NV_UNIQUE_NAME, KM_SLEEP) == 0);
4021                 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4022                     ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4023                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4024                 spa_load_spares(spa);
4025                 spa_config_exit(spa, SCL_ALL, FTAG);
4026                 spa->spa_spares.sav_sync = B_TRUE;
4027         }
4028         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4029             &l2cache, &nl2cache) == 0) {
4030                 if (spa->spa_l2cache.sav_config)
4031                         VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
4032                             ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
4033                 else
4034                         VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4035                             NV_UNIQUE_NAME, KM_SLEEP) == 0);
4036                 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4037                     ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4038                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4039                 spa_load_l2cache(spa);
4040                 spa_config_exit(spa, SCL_ALL, FTAG);
4041                 spa->spa_l2cache.sav_sync = B_TRUE;
4042         }
4043 
4044         /*
4045          * Check for any removed devices.
4046          */
4047         if (spa->spa_autoreplace) {
4048                 spa_aux_check_removed(&spa->spa_spares);
4049                 spa_aux_check_removed(&spa->spa_l2cache);
4050         }
4051 
4052         if (spa_writeable(spa)) {
4053                 /*
4054                  * Update the config cache to include the newly-imported pool.
4055                  */
4056                 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4057         }
4058 
4059         /*
4060          * It's possible that the pool was expanded while it was exported.
4061          * We kick off an async task to handle this for us.
4062          */
4063         spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4064 
4065         mutex_exit(&spa_namespace_lock);
4066         spa_history_log_version(spa, "import");
4067 
4068         return (0);
4069 }
4070 
4071 nvlist_t *
4072 spa_tryimport(nvlist_t *tryconfig)
4073 {
4074         nvlist_t *config = NULL;
4075         char *poolname;
4076         spa_t *spa;
4077         uint64_t state;
4078         int error;
4079 
4080         if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
4081                 return (NULL);
4082 
4083         if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
4084                 return (NULL);
4085 
4086         /*
4087          * Create and initialize the spa structure.
4088          */
4089         mutex_enter(&spa_namespace_lock);
4090         spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
4091         spa_activate(spa, FREAD);
4092 
4093         /*
4094          * Pass off the heavy lifting to spa_load().
4095          * Pass TRUE for mosconfig because the user-supplied config
4096          * is actually the one to trust when doing an import.
4097          */
4098         error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
4099 
4100         /*
4101          * If 'tryconfig' was at least parsable, return the current config.
4102          */
4103         if (spa->spa_root_vdev != NULL) {
4104                 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4105                 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
4106                     poolname) == 0);
4107                 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4108                     state) == 0);
4109                 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
4110                     spa->spa_uberblock.ub_timestamp) == 0);
4111                 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
4112                     spa->spa_load_info) == 0);
4113 
4114                 /*
4115                  * If the bootfs property exists on this pool then we
4116                  * copy it out so that external consumers can tell which
4117                  * pools are bootable.
4118                  */
4119                 if ((!error || error == EEXIST) && spa->spa_bootfs) {
4120                         char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4121 
4122                         /*
4123                          * We have to play games with the name since the
4124                          * pool was opened as TRYIMPORT_NAME.
4125                          */
4126                         if (dsl_dsobj_to_dsname(spa_name(spa),
4127                             spa->spa_bootfs, tmpname) == 0) {
4128                                 char *cp;
4129                                 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
4130 
4131                                 cp = strchr(tmpname, '/');
4132                                 if (cp == NULL) {
4133                                         (void) strlcpy(dsname, tmpname,
4134                                             MAXPATHLEN);
4135                                 } else {
4136                                         (void) snprintf(dsname, MAXPATHLEN,
4137                                             "%s/%s", poolname, ++cp);
4138                                 }
4139                                 VERIFY(nvlist_add_string(config,
4140                                     ZPOOL_CONFIG_BOOTFS, dsname) == 0);
4141                                 kmem_free(dsname, MAXPATHLEN);
4142                         }
4143                         kmem_free(tmpname, MAXPATHLEN);
4144                 }
4145 
4146                 /*
4147                  * Add the list of hot spares and level 2 cache devices.
4148                  */
4149                 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4150                 spa_add_spares(spa, config);
4151                 spa_add_l2cache(spa, config);
4152                 spa_config_exit(spa, SCL_CONFIG, FTAG);
4153         }
4154 
4155         spa_unload(spa);
4156         spa_deactivate(spa);
4157         spa_remove(spa);
4158         mutex_exit(&spa_namespace_lock);
4159 
4160         return (config);
4161 }
4162 
4163 /*
4164  * Pool export/destroy
4165  *
4166  * The act of destroying or exporting a pool is very simple.  We make sure there
4167  * is no more pending I/O and any references to the pool are gone.  Then, we
4168  * update the pool state and sync all the labels to disk, removing the
4169  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
4170  * we don't sync the labels or remove the configuration cache.
4171  */
4172 static int
4173 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
4174     boolean_t force, boolean_t hardforce)
4175 {
4176         spa_t *spa;
4177 
4178         if (oldconfig)
4179                 *oldconfig = NULL;
4180 
4181         if (!(spa_mode_global & FWRITE))
4182                 return (SET_ERROR(EROFS));
4183 
4184         mutex_enter(&spa_namespace_lock);
4185         if ((spa = spa_lookup(pool)) == NULL) {
4186                 mutex_exit(&spa_namespace_lock);
4187                 return (SET_ERROR(ENOENT));
4188         }
4189 
4190         /*
4191          * Put a hold on the pool, drop the namespace lock, stop async tasks,
4192          * reacquire the namespace lock, and see if we can export.
4193          */
4194         spa_open_ref(spa, FTAG);
4195         mutex_exit(&spa_namespace_lock);
4196         spa_async_suspend(spa);
4197         mutex_enter(&spa_namespace_lock);
4198         spa_close(spa, FTAG);
4199 
4200         /*
4201          * The pool will be in core if it's openable,
4202          * in which case we can modify its state.
4203          */
4204         if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
4205                 /*
4206                  * Objsets may be open only because they're dirty, so we
4207                  * have to force it to sync before checking spa_refcnt.
4208                  */
4209                 txg_wait_synced(spa->spa_dsl_pool, 0);
4210 
4211                 /*
4212                  * A pool cannot be exported or destroyed if there are active
4213                  * references.  If we are resetting a pool, allow references by
4214                  * fault injection handlers.
4215                  */
4216                 if (!spa_refcount_zero(spa) ||
4217                     (spa->spa_inject_ref != 0 &&
4218                     new_state != POOL_STATE_UNINITIALIZED)) {
4219                         spa_async_resume(spa);
4220                         mutex_exit(&spa_namespace_lock);
4221                         return (SET_ERROR(EBUSY));
4222                 }
4223 
4224                 /*
4225                  * A pool cannot be exported if it has an active shared spare.
4226                  * This is to prevent other pools stealing the active spare
4227                  * from an exported pool. At user's own will, such pool can
4228                  * be forcedly exported.
4229                  */
4230                 if (!force && new_state == POOL_STATE_EXPORTED &&
4231                     spa_has_active_shared_spare(spa)) {
4232                         spa_async_resume(spa);
4233                         mutex_exit(&spa_namespace_lock);
4234                         return (SET_ERROR(EXDEV));
4235                 }
4236 
4237                 /*
4238                  * We want this to be reflected on every label,
4239                  * so mark them all dirty.  spa_unload() will do the
4240                  * final sync that pushes these changes out.
4241                  */
4242                 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4243                         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4244                         spa->spa_state = new_state;
4245                         spa->spa_final_txg = spa_last_synced_txg(spa) +
4246                             TXG_DEFER_SIZE + 1;
4247                         vdev_config_dirty(spa->spa_root_vdev);
4248                         spa_config_exit(spa, SCL_ALL, FTAG);
4249                 }
4250         }
4251 
4252         spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4253 
4254         if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4255                 spa_unload(spa);
4256                 spa_deactivate(spa);
4257         }
4258 
4259         if (oldconfig && spa->spa_config)
4260                 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4261 
4262         if (new_state != POOL_STATE_UNINITIALIZED) {
4263                 if (!hardforce)
4264                         spa_config_sync(spa, B_TRUE, B_TRUE);
4265                 spa_remove(spa);
4266         }
4267         mutex_exit(&spa_namespace_lock);
4268 
4269         return (0);
4270 }
4271 
4272 /*
4273  * Destroy a storage pool.
4274  */
4275 int
4276 spa_destroy(char *pool)
4277 {
4278         return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4279             B_FALSE, B_FALSE));
4280 }
4281 
4282 /*
4283  * Export a storage pool.
4284  */
4285 int
4286 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4287     boolean_t hardforce)
4288 {
4289         return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4290             force, hardforce));
4291 }
4292 
4293 /*
4294  * Similar to spa_export(), this unloads the spa_t without actually removing it
4295  * from the namespace in any way.
4296  */
4297 int
4298 spa_reset(char *pool)
4299 {
4300         return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4301             B_FALSE, B_FALSE));
4302 }
4303 
4304 /*
4305  * ==========================================================================
4306  * Device manipulation
4307  * ==========================================================================
4308  */
4309 
4310 /*
4311  * Add a device to a storage pool.
4312  */
4313 int
4314 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4315 {
4316         uint64_t txg, id;
4317         int error;
4318         vdev_t *rvd = spa->spa_root_vdev;
4319         vdev_t *vd, *tvd;
4320         nvlist_t **spares, **l2cache;
4321         uint_t nspares, nl2cache;
4322 
4323         ASSERT(spa_writeable(spa));
4324 
4325         txg = spa_vdev_enter(spa);
4326 
4327         if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4328             VDEV_ALLOC_ADD)) != 0)
4329                 return (spa_vdev_exit(spa, NULL, txg, error));
4330 
4331         spa->spa_pending_vdev = vd;  /* spa_vdev_exit() will clear this */
4332 
4333         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4334             &nspares) != 0)
4335                 nspares = 0;
4336 
4337         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4338             &nl2cache) != 0)
4339                 nl2cache = 0;
4340 
4341         if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4342                 return (spa_vdev_exit(spa, vd, txg, EINVAL));
4343 
4344         if (vd->vdev_children != 0 &&
4345             (error = vdev_create(vd, txg, B_FALSE)) != 0)
4346                 return (spa_vdev_exit(spa, vd, txg, error));
4347 
4348         /*
4349          * We must validate the spares and l2cache devices after checking the
4350          * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
4351          */
4352         if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4353                 return (spa_vdev_exit(spa, vd, txg, error));
4354 
4355         /*
4356          * Transfer each new top-level vdev from vd to rvd.
4357          */
4358         for (int c = 0; c < vd->vdev_children; c++) {
4359 
4360                 /*
4361                  * Set the vdev id to the first hole, if one exists.
4362                  */
4363                 for (id = 0; id < rvd->vdev_children; id++) {
4364                         if (rvd->vdev_child[id]->vdev_ishole) {
4365                                 vdev_free(rvd->vdev_child[id]);
4366                                 break;
4367                         }
4368                 }
4369                 tvd = vd->vdev_child[c];
4370                 vdev_remove_child(vd, tvd);
4371                 tvd->vdev_id = id;
4372                 vdev_add_child(rvd, tvd);
4373                 vdev_config_dirty(tvd);
4374         }
4375 
4376         if (nspares != 0) {
4377                 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4378                     ZPOOL_CONFIG_SPARES);
4379                 spa_load_spares(spa);
4380                 spa->spa_spares.sav_sync = B_TRUE;
4381         }
4382 
4383         if (nl2cache != 0) {
4384                 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4385                     ZPOOL_CONFIG_L2CACHE);
4386                 spa_load_l2cache(spa);
4387                 spa->spa_l2cache.sav_sync = B_TRUE;
4388         }
4389 
4390         /*
4391          * We have to be careful when adding new vdevs to an existing pool.
4392          * If other threads start allocating from these vdevs before we
4393          * sync the config cache, and we lose power, then upon reboot we may
4394          * fail to open the pool because there are DVAs that the config cache
4395          * can't translate.  Therefore, we first add the vdevs without
4396          * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4397          * and then let spa_config_update() initialize the new metaslabs.
4398          *
4399          * spa_load() checks for added-but-not-initialized vdevs, so that
4400          * if we lose power at any point in this sequence, the remaining
4401          * steps will be completed the next time we load the pool.
4402          */
4403         (void) spa_vdev_exit(spa, vd, txg, 0);
4404 
4405         mutex_enter(&spa_namespace_lock);
4406         spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4407         mutex_exit(&spa_namespace_lock);
4408 
4409         return (0);
4410 }
4411 
4412 /*
4413  * Attach a device to a mirror.  The arguments are the path to any device
4414  * in the mirror, and the nvroot for the new device.  If the path specifies
4415  * a device that is not mirrored, we automatically insert the mirror vdev.
4416  *
4417  * If 'replacing' is specified, the new device is intended to replace the
4418  * existing device; in this case the two devices are made into their own
4419  * mirror using the 'replacing' vdev, which is functionally identical to
4420  * the mirror vdev (it actually reuses all the same ops) but has a few
4421  * extra rules: you can't attach to it after it's been created, and upon
4422  * completion of resilvering, the first disk (the one being replaced)
4423  * is automatically detached.
4424  */
4425 int
4426 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4427 {
4428         uint64_t txg, dtl_max_txg;
4429         vdev_t *rvd = spa->spa_root_vdev;
4430         vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4431         vdev_ops_t *pvops;
4432         char *oldvdpath, *newvdpath;
4433         int newvd_isspare;
4434         int error;
4435 
4436         ASSERT(spa_writeable(spa));
4437 
4438         txg = spa_vdev_enter(spa);
4439 
4440         oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4441 
4442         if (oldvd == NULL)
4443                 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4444 
4445         if (!oldvd->vdev_ops->vdev_op_leaf)
4446                 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4447 
4448         pvd = oldvd->vdev_parent;
4449 
4450         if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4451             VDEV_ALLOC_ATTACH)) != 0)
4452                 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4453 
4454         if (newrootvd->vdev_children != 1)
4455                 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4456 
4457         newvd = newrootvd->vdev_child[0];
4458 
4459         if (!newvd->vdev_ops->vdev_op_leaf)
4460                 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4461 
4462         if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4463                 return (spa_vdev_exit(spa, newrootvd, txg, error));
4464 
4465         /*
4466          * Spares can't replace logs
4467          */
4468         if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4469                 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4470 
4471         if (!replacing) {
4472                 /*
4473                  * For attach, the only allowable parent is a mirror or the root
4474                  * vdev.
4475                  */
4476                 if (pvd->vdev_ops != &vdev_mirror_ops &&
4477                     pvd->vdev_ops != &vdev_root_ops)
4478                         return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4479 
4480                 pvops = &vdev_mirror_ops;
4481         } else {
4482                 /*
4483                  * Active hot spares can only be replaced by inactive hot
4484                  * spares.
4485                  */
4486                 if (pvd->vdev_ops == &vdev_spare_ops &&
4487                     oldvd->vdev_isspare &&
4488                     !spa_has_spare(spa, newvd->vdev_guid))
4489                         return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4490 
4491                 /*
4492                  * If the source is a hot spare, and the parent isn't already a
4493                  * spare, then we want to create a new hot spare.  Otherwise, we
4494                  * want to create a replacing vdev.  The user is not allowed to
4495                  * attach to a spared vdev child unless the 'isspare' state is
4496                  * the same (spare replaces spare, non-spare replaces
4497                  * non-spare).
4498                  */
4499                 if (pvd->vdev_ops == &vdev_replacing_ops &&
4500                     spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4501                         return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4502                 } else if (pvd->vdev_ops == &vdev_spare_ops &&
4503                     newvd->vdev_isspare != oldvd->vdev_isspare) {
4504                         return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4505                 }
4506 
4507                 if (newvd->vdev_isspare)
4508                         pvops = &vdev_spare_ops;
4509                 else
4510                         pvops = &vdev_replacing_ops;
4511         }
4512 
4513         /*
4514          * Make sure the new device is big enough.
4515          */
4516         if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4517                 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4518 
4519         /*
4520          * The new device cannot have a higher alignment requirement
4521          * than the top-level vdev.
4522          */
4523         if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4524                 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4525 
4526         /*
4527          * If this is an in-place replacement, update oldvd's path and devid
4528          * to make it distinguishable from newvd, and unopenable from now on.
4529          */
4530         if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4531                 spa_strfree(oldvd->vdev_path);
4532                 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4533                     KM_SLEEP);
4534                 (void) sprintf(oldvd->vdev_path, "%s/%s",
4535                     newvd->vdev_path, "old");
4536                 if (oldvd->vdev_devid != NULL) {
4537                         spa_strfree(oldvd->vdev_devid);
4538                         oldvd->vdev_devid = NULL;
4539                 }
4540         }
4541 
4542         /* mark the device being resilvered */
4543         newvd->vdev_resilver_txg = txg;
4544 
4545         /*
4546          * If the parent is not a mirror, or if we're replacing, insert the new
4547          * mirror/replacing/spare vdev above oldvd.
4548          */
4549         if (pvd->vdev_ops != pvops)
4550                 pvd = vdev_add_parent(oldvd, pvops);
4551 
4552         ASSERT(pvd->vdev_top->vdev_parent == rvd);
4553         ASSERT(pvd->vdev_ops == pvops);
4554         ASSERT(oldvd->vdev_parent == pvd);
4555 
4556         /*
4557          * Extract the new device from its root and add it to pvd.
4558          */
4559         vdev_remove_child(newrootvd, newvd);
4560         newvd->vdev_id = pvd->vdev_children;
4561         newvd->vdev_crtxg = oldvd->vdev_crtxg;
4562         vdev_add_child(pvd, newvd);
4563 
4564         tvd = newvd->vdev_top;
4565         ASSERT(pvd->vdev_top == tvd);
4566         ASSERT(tvd->vdev_parent == rvd);
4567 
4568         vdev_config_dirty(tvd);
4569 
4570         /*
4571          * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4572          * for any dmu_sync-ed blocks.  It will propagate upward when
4573          * spa_vdev_exit() calls vdev_dtl_reassess().
4574          */
4575         dtl_max_txg = txg + TXG_CONCURRENT_STATES;
4576 
4577         vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4578             dtl_max_txg - TXG_INITIAL);
4579 
4580         if (newvd->vdev_isspare) {
4581                 spa_spare_activate(newvd);
4582                 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
4583         }
4584 
4585         oldvdpath = spa_strdup(oldvd->vdev_path);
4586         newvdpath = spa_strdup(newvd->vdev_path);
4587         newvd_isspare = newvd->vdev_isspare;
4588 
4589         /*
4590          * Mark newvd's DTL dirty in this txg.
4591          */
4592         vdev_dirty(tvd, VDD_DTL, newvd, txg);
4593 
4594         /*
4595          * Schedule the resilver to restart in the future. We do this to
4596          * ensure that dmu_sync-ed blocks have been stitched into the
4597          * respective datasets.
4598          */
4599         dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4600 
4601         /*
4602          * Commit the config
4603          */
4604         (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4605 
4606         spa_history_log_internal(spa, "vdev attach", NULL,
4607             "%s vdev=%s %s vdev=%s",
4608             replacing && newvd_isspare ? "spare in" :
4609             replacing ? "replace" : "attach", newvdpath,
4610             replacing ? "for" : "to", oldvdpath);
4611 
4612         spa_strfree(oldvdpath);
4613         spa_strfree(newvdpath);
4614 
4615         if (spa->spa_bootfs)
4616                 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4617 
4618         return (0);
4619 }
4620 
4621 /*
4622  * Detach a device from a mirror or replacing vdev.
4623  *
4624  * If 'replace_done' is specified, only detach if the parent
4625  * is a replacing vdev.
4626  */
4627 int
4628 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
4629 {
4630         uint64_t txg;
4631         int error;
4632         vdev_t *rvd = spa->spa_root_vdev;
4633         vdev_t *vd, *pvd, *cvd, *tvd;
4634         boolean_t unspare = B_FALSE;
4635         uint64_t unspare_guid = 0;
4636         char *vdpath;
4637 
4638         ASSERT(spa_writeable(spa));
4639 
4640         txg = spa_vdev_enter(spa);
4641 
4642         vd = spa_lookup_by_guid(spa, guid, B_FALSE);
4643 
4644         if (vd == NULL)
4645                 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4646 
4647         if (!vd->vdev_ops->vdev_op_leaf)
4648                 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4649 
4650         pvd = vd->vdev_parent;
4651 
4652         /*
4653          * If the parent/child relationship is not as expected, don't do it.
4654          * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4655          * vdev that's replacing B with C.  The user's intent in replacing
4656          * is to go from M(A,B) to M(A,C).  If the user decides to cancel
4657          * the replace by detaching C, the expected behavior is to end up
4658          * M(A,B).  But suppose that right after deciding to detach C,
4659          * the replacement of B completes.  We would have M(A,C), and then
4660          * ask to detach C, which would leave us with just A -- not what
4661          * the user wanted.  To prevent this, we make sure that the
4662          * parent/child relationship hasn't changed -- in this example,
4663          * that C's parent is still the replacing vdev R.
4664          */
4665         if (pvd->vdev_guid != pguid && pguid != 0)
4666                 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4667 
4668         /*
4669          * Only 'replacing' or 'spare' vdevs can be replaced.
4670          */
4671         if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4672             pvd->vdev_ops != &vdev_spare_ops)
4673                 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4674 
4675         ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4676             spa_version(spa) >= SPA_VERSION_SPARES);
4677 
4678         /*
4679          * Only mirror, replacing, and spare vdevs support detach.
4680          */
4681         if (pvd->vdev_ops != &vdev_replacing_ops &&
4682             pvd->vdev_ops != &vdev_mirror_ops &&
4683             pvd->vdev_ops != &vdev_spare_ops)
4684                 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4685 
4686         /*
4687          * If this device has the only valid copy of some data,
4688          * we cannot safely detach it.
4689          */
4690         if (vdev_dtl_required(vd))
4691                 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4692 
4693         ASSERT(pvd->vdev_children >= 2);
4694 
4695         /*
4696          * If we are detaching the second disk from a replacing vdev, then
4697          * check to see if we changed the original vdev's path to have "/old"
4698          * at the end in spa_vdev_attach().  If so, undo that change now.
4699          */
4700         if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4701             vd->vdev_path != NULL) {
4702                 size_t len = strlen(vd->vdev_path);
4703 
4704                 for (int c = 0; c < pvd->vdev_children; c++) {
4705                         cvd = pvd->vdev_child[c];
4706 
4707                         if (cvd == vd || cvd->vdev_path == NULL)
4708                                 continue;
4709 
4710                         if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4711                             strcmp(cvd->vdev_path + len, "/old") == 0) {
4712                                 spa_strfree(cvd->vdev_path);
4713                                 cvd->vdev_path = spa_strdup(vd->vdev_path);
4714                                 break;
4715                         }
4716                 }
4717         }
4718 
4719         /*
4720          * If we are detaching the original disk from a spare, then it implies
4721          * that the spare should become a real disk, and be removed from the
4722          * active spare list for the pool.
4723          */
4724         if (pvd->vdev_ops == &vdev_spare_ops &&
4725             vd->vdev_id == 0 &&
4726             pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
4727                 unspare = B_TRUE;
4728 
4729         /*
4730          * Erase the disk labels so the disk can be used for other things.
4731          * This must be done after all other error cases are handled,
4732          * but before we disembowel vd (so we can still do I/O to it).
4733          * But if we can't do it, don't treat the error as fatal --
4734          * it may be that the unwritability of the disk is the reason
4735          * it's being detached!
4736          */
4737         error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4738 
4739         /*
4740          * Remove vd from its parent and compact the parent's children.
4741          */
4742         vdev_remove_child(pvd, vd);
4743         vdev_compact_children(pvd);
4744 
4745         /*
4746          * Remember one of the remaining children so we can get tvd below.
4747          */
4748         cvd = pvd->vdev_child[pvd->vdev_children - 1];
4749 
4750         /*
4751          * If we need to remove the remaining child from the list of hot spares,
4752          * do it now, marking the vdev as no longer a spare in the process.
4753          * We must do this before vdev_remove_parent(), because that can
4754          * change the GUID if it creates a new toplevel GUID.  For a similar
4755          * reason, we must remove the spare now, in the same txg as the detach;
4756          * otherwise someone could attach a new sibling, change the GUID, and
4757          * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
4758          */
4759         if (unspare) {
4760                 ASSERT(cvd->vdev_isspare);
4761                 spa_spare_remove(cvd);
4762                 unspare_guid = cvd->vdev_guid;
4763                 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
4764                 cvd->vdev_unspare = B_TRUE;
4765         }
4766 
4767         /*
4768          * If the parent mirror/replacing vdev only has one child,
4769          * the parent is no longer needed.  Remove it from the tree.
4770          */
4771         if (pvd->vdev_children == 1) {
4772                 if (pvd->vdev_ops == &vdev_spare_ops)
4773                         cvd->vdev_unspare = B_FALSE;
4774                 vdev_remove_parent(cvd);
4775         }
4776 
4777 
4778         /*
4779          * We don't set tvd until now because the parent we just removed
4780          * may have been the previous top-level vdev.
4781          */
4782         tvd = cvd->vdev_top;
4783         ASSERT(tvd->vdev_parent == rvd);
4784 
4785         /*
4786          * Reevaluate the parent vdev state.
4787          */
4788         vdev_propagate_state(cvd);
4789 
4790         /*
4791          * If the 'autoexpand' property is set on the pool then automatically
4792          * try to expand the size of the pool. For example if the device we
4793          * just detached was smaller than the others, it may be possible to
4794          * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4795          * first so that we can obtain the updated sizes of the leaf vdevs.
4796          */
4797         if (spa->spa_autoexpand) {
4798                 vdev_reopen(tvd);
4799                 vdev_expand(tvd, txg);
4800         }
4801 
4802         vdev_config_dirty(tvd);
4803 
4804         /*
4805          * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
4806          * vd->vdev_detached is set and free vd's DTL object in syncing context.
4807          * But first make sure we're not on any *other* txg's DTL list, to
4808          * prevent vd from being accessed after it's freed.
4809          */
4810         vdpath = spa_strdup(vd->vdev_path);
4811         for (int t = 0; t < TXG_SIZE; t++)
4812                 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4813         vd->vdev_detached = B_TRUE;
4814         vdev_dirty(tvd, VDD_DTL, vd, txg);
4815 
4816         spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
4817 
4818         /* hang on to the spa before we release the lock */
4819         spa_open_ref(spa, FTAG);
4820 
4821         error = spa_vdev_exit(spa, vd, txg, 0);
4822 
4823         spa_history_log_internal(spa, "detach", NULL,
4824             "vdev=%s", vdpath);
4825         spa_strfree(vdpath);
4826 
4827         /*
4828          * If this was the removal of the original device in a hot spare vdev,
4829          * then we want to go through and remove the device from the hot spare
4830          * list of every other pool.
4831          */
4832         if (unspare) {
4833                 spa_t *altspa = NULL;
4834 
4835                 mutex_enter(&spa_namespace_lock);
4836                 while ((altspa = spa_next(altspa)) != NULL) {
4837                         if (altspa->spa_state != POOL_STATE_ACTIVE ||
4838                             altspa == spa)
4839                                 continue;
4840 
4841                         spa_open_ref(altspa, FTAG);
4842                         mutex_exit(&spa_namespace_lock);
4843                         (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
4844                         mutex_enter(&spa_namespace_lock);
4845                         spa_close(altspa, FTAG);
4846                 }
4847                 mutex_exit(&spa_namespace_lock);
4848 
4849                 /* search the rest of the vdevs for spares to remove */
4850                 spa_vdev_resilver_done(spa);
4851         }
4852 
4853         /* all done with the spa; OK to release */
4854         mutex_enter(&spa_namespace_lock);
4855         spa_close(spa, FTAG);
4856         mutex_exit(&spa_namespace_lock);
4857 
4858         return (error);
4859 }
4860 
4861 /*
4862  * Split a set of devices from their mirrors, and create a new pool from them.
4863  */
4864 int
4865 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4866     nvlist_t *props, boolean_t exp)
4867 {
4868         int error = 0;
4869         uint64_t txg, *glist;
4870         spa_t *newspa;
4871         uint_t c, children, lastlog;
4872         nvlist_t **child, *nvl, *tmp;
4873         dmu_tx_t *tx;
4874         char *altroot = NULL;
4875         vdev_t *rvd, **vml = NULL;                      /* vdev modify list */
4876         boolean_t activate_slog;
4877 
4878         ASSERT(spa_writeable(spa));
4879 
4880         txg = spa_vdev_enter(spa);
4881 
4882         /* clear the log and flush everything up to now */
4883         activate_slog = spa_passivate_log(spa);
4884         (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4885         error = spa_offline_log(spa);
4886         txg = spa_vdev_config_enter(spa);
4887 
4888         if (activate_slog)
4889                 spa_activate_log(spa);
4890 
4891         if (error != 0)
4892                 return (spa_vdev_exit(spa, NULL, txg, error));
4893 
4894         /* check new spa name before going any further */
4895         if (spa_lookup(newname) != NULL)
4896                 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4897 
4898         /*
4899          * scan through all the children to ensure they're all mirrors
4900          */
4901         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4902             nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4903             &children) != 0)
4904                 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4905 
4906         /* first, check to ensure we've got the right child count */
4907         rvd = spa->spa_root_vdev;
4908         lastlog = 0;
4909         for (c = 0; c < rvd->vdev_children; c++) {
4910                 vdev_t *vd = rvd->vdev_child[c];
4911 
4912                 /* don't count the holes & logs as children */
4913                 if (vd->vdev_islog || vd->vdev_ishole) {
4914                         if (lastlog == 0)
4915                                 lastlog = c;
4916                         continue;
4917                 }
4918 
4919                 lastlog = 0;
4920         }
4921         if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4922                 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4923 
4924         /* next, ensure no spare or cache devices are part of the split */
4925         if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4926             nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4927                 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4928 
4929         vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
4930         glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
4931 
4932         /* then, loop over each vdev and validate it */
4933         for (c = 0; c < children; c++) {
4934                 uint64_t is_hole = 0;
4935 
4936                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4937                     &is_hole);
4938 
4939                 if (is_hole != 0) {
4940                         if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4941                             spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4942                                 continue;
4943                         } else {
4944                                 error = SET_ERROR(EINVAL);
4945                                 break;
4946                         }
4947                 }
4948 
4949                 /* which disk is going to be split? */
4950                 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4951                     &glist[c]) != 0) {
4952                         error = SET_ERROR(EINVAL);
4953                         break;
4954                 }
4955 
4956                 /* look it up in the spa */
4957                 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4958                 if (vml[c] == NULL) {
4959                         error = SET_ERROR(ENODEV);
4960                         break;
4961                 }
4962 
4963                 /* make sure there's nothing stopping the split */
4964                 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4965                     vml[c]->vdev_islog ||
4966                     vml[c]->vdev_ishole ||
4967                     vml[c]->vdev_isspare ||
4968                     vml[c]->vdev_isl2cache ||
4969                     !vdev_writeable(vml[c]) ||
4970                     vml[c]->vdev_children != 0 ||
4971                     vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4972                     c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
4973                         error = SET_ERROR(EINVAL);
4974                         break;
4975                 }
4976 
4977                 if (vdev_dtl_required(vml[c])) {
4978                         error = SET_ERROR(EBUSY);
4979                         break;
4980                 }
4981 
4982                 /* we need certain info from the top level */
4983                 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4984                     vml[c]->vdev_top->vdev_ms_array) == 0);
4985                 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4986                     vml[c]->vdev_top->vdev_ms_shift) == 0);
4987                 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4988                     vml[c]->vdev_top->vdev_asize) == 0);
4989                 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4990                     vml[c]->vdev_top->vdev_ashift) == 0);
4991         }
4992 
4993         if (error != 0) {
4994                 kmem_free(vml, children * sizeof (vdev_t *));
4995                 kmem_free(glist, children * sizeof (uint64_t));
4996                 return (spa_vdev_exit(spa, NULL, txg, error));
4997         }
4998 
4999         /* stop writers from using the disks */
5000         for (c = 0; c < children; c++) {
5001                 if (vml[c] != NULL)
5002                         vml[c]->vdev_offline = B_TRUE;
5003         }
5004         vdev_reopen(spa->spa_root_vdev);
5005 
5006         /*
5007          * Temporarily record the splitting vdevs in the spa config.  This
5008          * will disappear once the config is regenerated.
5009          */
5010         VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5011         VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
5012             glist, children) == 0);
5013         kmem_free(glist, children * sizeof (uint64_t));
5014 
5015         mutex_enter(&spa->spa_props_lock);
5016         VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
5017             nvl) == 0);
5018         mutex_exit(&spa->spa_props_lock);
5019         spa->spa_config_splitting = nvl;
5020         vdev_config_dirty(spa->spa_root_vdev);
5021 
5022         /* configure and create the new pool */
5023         VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
5024         VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5025             exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
5026         VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5027             spa_version(spa)) == 0);
5028         VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
5029             spa->spa_config_txg) == 0);
5030         VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5031             spa_generate_guid(NULL)) == 0);
5032         (void) nvlist_lookup_string(props,
5033             zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5034 
5035         /* add the new pool to the namespace */
5036         newspa = spa_add(newname, config, altroot);
5037         newspa->spa_config_txg = spa->spa_config_txg;
5038         spa_set_log_state(newspa, SPA_LOG_CLEAR);
5039 
5040         /* release the spa config lock, retaining the namespace lock */
5041         spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5042 
5043         if (zio_injection_enabled)
5044                 zio_handle_panic_injection(spa, FTAG, 1);
5045 
5046         spa_activate(newspa, spa_mode_global);
5047         spa_async_suspend(newspa);
5048 
5049         /* create the new pool from the disks of the original pool */
5050         error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
5051         if (error)
5052                 goto out;
5053 
5054         /* if that worked, generate a real config for the new pool */
5055         if (newspa->spa_root_vdev != NULL) {
5056                 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
5057                     NV_UNIQUE_NAME, KM_SLEEP) == 0);
5058                 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
5059                     ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
5060                 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
5061                     B_TRUE));
5062         }
5063 
5064         /* set the props */
5065         if (props != NULL) {
5066                 spa_configfile_set(newspa, props, B_FALSE);
5067                 error = spa_prop_set(newspa, props);
5068                 if (error)
5069                         goto out;
5070         }
5071 
5072         /* flush everything */
5073         txg = spa_vdev_config_enter(newspa);
5074         vdev_config_dirty(newspa->spa_root_vdev);
5075         (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
5076 
5077         if (zio_injection_enabled)
5078                 zio_handle_panic_injection(spa, FTAG, 2);
5079 
5080         spa_async_resume(newspa);
5081 
5082         /* finally, update the original pool's config */
5083         txg = spa_vdev_config_enter(spa);
5084         tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
5085         error = dmu_tx_assign(tx, TXG_WAIT);
5086         if (error != 0)
5087                 dmu_tx_abort(tx);
5088         for (c = 0; c < children; c++) {
5089                 if (vml[c] != NULL) {
5090                         vdev_split(vml[c]);
5091                         if (error == 0)
5092                                 spa_history_log_internal(spa, "detach", tx,
5093                                     "vdev=%s", vml[c]->vdev_path);
5094                         vdev_free(vml[c]);
5095                 }
5096         }
5097         vdev_config_dirty(spa->spa_root_vdev);
5098         spa->spa_config_splitting = NULL;
5099         nvlist_free(nvl);
5100         if (error == 0)
5101                 dmu_tx_commit(tx);
5102         (void) spa_vdev_exit(spa, NULL, txg, 0);
5103 
5104         if (zio_injection_enabled)
5105                 zio_handle_panic_injection(spa, FTAG, 3);
5106 
5107         /* split is complete; log a history record */
5108         spa_history_log_internal(newspa, "split", NULL,
5109             "from pool %s", spa_name(spa));
5110 
5111         kmem_free(vml, children * sizeof (vdev_t *));
5112 
5113         /* if we're not going to mount the filesystems in userland, export */
5114         if (exp)
5115                 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
5116                     B_FALSE, B_FALSE);
5117 
5118         return (error);
5119 
5120 out:
5121         spa_unload(newspa);
5122         spa_deactivate(newspa);
5123         spa_remove(newspa);
5124 
5125         txg = spa_vdev_config_enter(spa);
5126 
5127         /* re-online all offlined disks */
5128         for (c = 0; c < children; c++) {
5129                 if (vml[c] != NULL)
5130                         vml[c]->vdev_offline = B_FALSE;
5131         }
5132         vdev_reopen(spa->spa_root_vdev);
5133 
5134         nvlist_free(spa->spa_config_splitting);
5135         spa->spa_config_splitting = NULL;
5136         (void) spa_vdev_exit(spa, NULL, txg, error);
5137 
5138         kmem_free(vml, children * sizeof (vdev_t *));
5139         return (error);
5140 }
5141 
5142 static nvlist_t *
5143 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
5144 {
5145         for (int i = 0; i < count; i++) {
5146                 uint64_t guid;
5147 
5148                 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
5149                     &guid) == 0);
5150 
5151                 if (guid == target_guid)
5152                         return (nvpp[i]);
5153         }
5154 
5155         return (NULL);
5156 }
5157 
5158 static void
5159 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
5160         nvlist_t *dev_to_remove)
5161 {
5162         nvlist_t **newdev = NULL;
5163 
5164         if (count > 1)
5165                 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
5166 
5167         for (int i = 0, j = 0; i < count; i++) {
5168                 if (dev[i] == dev_to_remove)
5169                         continue;
5170                 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
5171         }
5172 
5173         VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
5174         VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
5175 
5176         for (int i = 0; i < count - 1; i++)
5177                 nvlist_free(newdev[i]);
5178 
5179         if (count > 1)
5180                 kmem_free(newdev, (count - 1) * sizeof (void *));
5181 }
5182 
5183 /*
5184  * Evacuate the device.
5185  */
5186 static int
5187 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
5188 {
5189         uint64_t txg;
5190         int error = 0;
5191 
5192         ASSERT(MUTEX_HELD(&spa_namespace_lock));
5193         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5194         ASSERT(vd == vd->vdev_top);
5195 
5196         /*
5197          * Evacuate the device.  We don't hold the config lock as writer
5198          * since we need to do I/O but we do keep the
5199          * spa_namespace_lock held.  Once this completes the device
5200          * should no longer have any blocks allocated on it.
5201          */
5202         if (vd->vdev_islog) {
5203                 if (vd->vdev_stat.vs_alloc != 0)
5204                         error = spa_offline_log(spa);
5205         } else {
5206                 error = SET_ERROR(ENOTSUP);
5207         }
5208 
5209         if (error)
5210                 return (error);
5211 
5212         /*
5213          * The evacuation succeeded.  Remove any remaining MOS metadata
5214          * associated with this vdev, and wait for these changes to sync.
5215          */
5216         ASSERT0(vd->vdev_stat.vs_alloc);
5217         txg = spa_vdev_config_enter(spa);
5218         vd->vdev_removing = B_TRUE;
5219         vdev_dirty_leaves(vd, VDD_DTL, txg);
5220         vdev_config_dirty(vd);
5221         spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5222 
5223         return (0);
5224 }
5225 
5226 /*
5227  * Complete the removal by cleaning up the namespace.
5228  */
5229 static void
5230 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5231 {
5232         vdev_t *rvd = spa->spa_root_vdev;
5233         uint64_t id = vd->vdev_id;
5234         boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5235 
5236         ASSERT(MUTEX_HELD(&spa_namespace_lock));
5237         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5238         ASSERT(vd == vd->vdev_top);
5239 
5240         /*
5241          * Only remove any devices which are empty.
5242          */
5243         if (vd->vdev_stat.vs_alloc != 0)
5244                 return;
5245 
5246         (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5247 
5248         if (list_link_active(&vd->vdev_state_dirty_node))
5249                 vdev_state_clean(vd);
5250         if (list_link_active(&vd->vdev_config_dirty_node))
5251                 vdev_config_clean(vd);
5252 
5253         vdev_free(vd);
5254 
5255         if (last_vdev) {
5256                 vdev_compact_children(rvd);
5257         } else {
5258                 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5259                 vdev_add_child(rvd, vd);
5260         }
5261         vdev_config_dirty(rvd);
5262 
5263         /*
5264          * Reassess the health of our root vdev.
5265          */
5266         vdev_reopen(rvd);
5267 }
5268 
5269 /*
5270  * Remove a device from the pool -
5271  *
5272  * Removing a device from the vdev namespace requires several steps
5273  * and can take a significant amount of time.  As a result we use
5274  * the spa_vdev_config_[enter/exit] functions which allow us to
5275  * grab and release the spa_config_lock while still holding the namespace
5276  * lock.  During each step the configuration is synced out.
5277  *
5278  * Currently, this supports removing only hot spares, slogs, and level 2 ARC
5279  * devices.
5280  */
5281 int
5282 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5283 {
5284         vdev_t *vd;
5285         metaslab_group_t *mg;
5286         nvlist_t **spares, **l2cache, *nv;
5287         uint64_t txg = 0;
5288         uint_t nspares, nl2cache;
5289         int error = 0;
5290         boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5291 
5292         ASSERT(spa_writeable(spa));
5293 
5294         if (!locked)
5295                 txg = spa_vdev_enter(spa);
5296 
5297         vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5298 
5299         if (spa->spa_spares.sav_vdevs != NULL &&
5300             nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5301             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5302             (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5303                 /*
5304                  * Only remove the hot spare if it's not currently in use
5305                  * in this pool.
5306                  */
5307                 if (vd == NULL || unspare) {
5308                         spa_vdev_remove_aux(spa->spa_spares.sav_config,
5309                             ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5310                         spa_load_spares(spa);
5311                         spa->spa_spares.sav_sync = B_TRUE;
5312                 } else {
5313                         error = SET_ERROR(EBUSY);
5314                 }
5315         } else if (spa->spa_l2cache.sav_vdevs != NULL &&
5316             nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5317             ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5318             (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5319                 /*
5320                  * Cache devices can always be removed.
5321                  */
5322                 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5323                     ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5324                 spa_load_l2cache(spa);
5325                 spa->spa_l2cache.sav_sync = B_TRUE;
5326         } else if (vd != NULL && vd->vdev_islog) {
5327                 ASSERT(!locked);
5328                 ASSERT(vd == vd->vdev_top);
5329 
5330                 mg = vd->vdev_mg;
5331 
5332                 /*
5333                  * Stop allocating from this vdev.
5334                  */
5335                 metaslab_group_passivate(mg);
5336 
5337                 /*
5338                  * Wait for the youngest allocations and frees to sync,
5339                  * and then wait for the deferral of those frees to finish.
5340                  */
5341                 spa_vdev_config_exit(spa, NULL,
5342                     txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5343 
5344                 /*
5345                  * Attempt to evacuate the vdev.
5346                  */
5347                 error = spa_vdev_remove_evacuate(spa, vd);
5348 
5349                 txg = spa_vdev_config_enter(spa);
5350 
5351                 /*
5352                  * If we couldn't evacuate the vdev, unwind.
5353                  */
5354                 if (error) {
5355                         metaslab_group_activate(mg);
5356                         return (spa_vdev_exit(spa, NULL, txg, error));
5357                 }
5358 
5359                 /*
5360                  * Clean up the vdev namespace.
5361                  */
5362                 spa_vdev_remove_from_namespace(spa, vd);
5363 
5364         } else if (vd != NULL) {
5365                 /*
5366                  * Normal vdevs cannot be removed (yet).
5367                  */
5368                 error = SET_ERROR(ENOTSUP);
5369         } else {
5370                 /*
5371                  * There is no vdev of any kind with the specified guid.
5372                  */
5373                 error = SET_ERROR(ENOENT);
5374         }
5375 
5376         if (!locked)
5377                 return (spa_vdev_exit(spa, NULL, txg, error));
5378 
5379         return (error);
5380 }
5381 
5382 /*
5383  * Find any device that's done replacing, or a vdev marked 'unspare' that's
5384  * currently spared, so we can detach it.
5385  */
5386 static vdev_t *
5387 spa_vdev_resilver_done_hunt(vdev_t *vd)
5388 {
5389         vdev_t *newvd, *oldvd;
5390 
5391         for (int c = 0; c < vd->vdev_children; c++) {
5392                 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5393                 if (oldvd != NULL)
5394                         return (oldvd);
5395         }
5396 
5397         /*
5398          * Check for a completed replacement.  We always consider the first
5399          * vdev in the list to be the oldest vdev, and the last one to be
5400          * the newest (see spa_vdev_attach() for how that works).  In
5401          * the case where the newest vdev is faulted, we will not automatically
5402          * remove it after a resilver completes.  This is OK as it will require
5403          * user intervention to determine which disk the admin wishes to keep.
5404          */
5405         if (vd->vdev_ops == &vdev_replacing_ops) {
5406                 ASSERT(vd->vdev_children > 1);
5407 
5408                 newvd = vd->vdev_child[vd->vdev_children - 1];
5409                 oldvd = vd->vdev_child[0];
5410 
5411                 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5412                     vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5413                     !vdev_dtl_required(oldvd))
5414                         return (oldvd);
5415         }
5416 
5417         /*
5418          * Check for a completed resilver with the 'unspare' flag set.
5419          */
5420         if (vd->vdev_ops == &vdev_spare_ops) {
5421                 vdev_t *first = vd->vdev_child[0];
5422                 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5423 
5424                 if (last->vdev_unspare) {
5425                         oldvd = first;
5426                         newvd = last;
5427                 } else if (first->vdev_unspare) {
5428                         oldvd = last;
5429                         newvd = first;
5430                 } else {
5431                         oldvd = NULL;
5432                 }
5433 
5434                 if (oldvd != NULL &&
5435                     vdev_dtl_empty(newvd, DTL_MISSING) &&
5436                     vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5437                     !vdev_dtl_required(oldvd))
5438                         return (oldvd);
5439 
5440                 /*
5441                  * If there are more than two spares attached to a disk,
5442                  * and those spares are not required, then we want to
5443                  * attempt to free them up now so that they can be used
5444                  * by other pools.  Once we're back down to a single
5445                  * disk+spare, we stop removing them.
5446                  */
5447                 if (vd->vdev_children > 2) {
5448                         newvd = vd->vdev_child[1];
5449 
5450                         if (newvd->vdev_isspare && last->vdev_isspare &&
5451                             vdev_dtl_empty(last, DTL_MISSING) &&
5452                             vdev_dtl_empty(last, DTL_OUTAGE) &&
5453                             !vdev_dtl_required(newvd))
5454                                 return (newvd);
5455                 }
5456         }
5457 
5458         return (NULL);
5459 }
5460 
5461 static void
5462 spa_vdev_resilver_done(spa_t *spa)
5463 {
5464         vdev_t *vd, *pvd, *ppvd;
5465         uint64_t guid, sguid, pguid, ppguid;
5466 
5467         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5468 
5469         while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5470                 pvd = vd->vdev_parent;
5471                 ppvd = pvd->vdev_parent;
5472                 guid = vd->vdev_guid;
5473                 pguid = pvd->vdev_guid;
5474                 ppguid = ppvd->vdev_guid;
5475                 sguid = 0;
5476                 /*
5477                  * If we have just finished replacing a hot spared device, then
5478                  * we need to detach the parent's first child (the original hot
5479                  * spare) as well.
5480                  */
5481                 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5482                     ppvd->vdev_children == 2) {
5483                         ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5484                         sguid = ppvd->vdev_child[1]->vdev_guid;
5485                 }
5486                 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
5487 
5488                 spa_config_exit(spa, SCL_ALL, FTAG);
5489                 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5490                         return;
5491                 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5492                         return;
5493                 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5494         }
5495 
5496         spa_config_exit(spa, SCL_ALL, FTAG);
5497 }
5498 
5499 /*
5500  * Update the stored path or FRU for this vdev.
5501  */
5502 int
5503 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5504     boolean_t ispath)
5505 {
5506         vdev_t *vd;
5507         boolean_t sync = B_FALSE;
5508 
5509         ASSERT(spa_writeable(spa));
5510 
5511         spa_vdev_state_enter(spa, SCL_ALL);
5512 
5513         if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
5514                 return (spa_vdev_state_exit(spa, NULL, ENOENT));
5515 
5516         if (!vd->vdev_ops->vdev_op_leaf)
5517                 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
5518 
5519         if (ispath) {
5520                 if (strcmp(value, vd->vdev_path) != 0) {
5521                         spa_strfree(vd->vdev_path);
5522                         vd->vdev_path = spa_strdup(value);
5523                         sync = B_TRUE;
5524                 }
5525         } else {
5526                 if (vd->vdev_fru == NULL) {
5527                         vd->vdev_fru = spa_strdup(value);
5528                         sync = B_TRUE;
5529                 } else if (strcmp(value, vd->vdev_fru) != 0) {
5530                         spa_strfree(vd->vdev_fru);
5531                         vd->vdev_fru = spa_strdup(value);
5532                         sync = B_TRUE;
5533                 }
5534         }
5535 
5536         return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
5537 }
5538 
5539 int
5540 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5541 {
5542         return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5543 }
5544 
5545 int
5546 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5547 {
5548         return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5549 }
5550 
5551 /*
5552  * ==========================================================================
5553  * SPA Scanning
5554  * ==========================================================================
5555  */
5556 
5557 int
5558 spa_scan_stop(spa_t *spa)
5559 {
5560         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5561         if (dsl_scan_resilvering(spa->spa_dsl_pool))
5562                 return (SET_ERROR(EBUSY));
5563         return (dsl_scan_cancel(spa->spa_dsl_pool));
5564 }
5565 
5566 int
5567 spa_scan(spa_t *spa, pool_scan_func_t func)
5568 {
5569         ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5570 
5571         if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
5572                 return (SET_ERROR(ENOTSUP));
5573 
5574         /*
5575          * If a resilver was requested, but there is no DTL on a
5576          * writeable leaf device, we have nothing to do.
5577          */
5578         if (func == POOL_SCAN_RESILVER &&
5579             !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5580                 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
5581                 return (0);
5582         }
5583 
5584         return (dsl_scan(spa->spa_dsl_pool, func));
5585 }
5586 
5587 /*
5588  * ==========================================================================
5589  * SPA async task processing
5590  * ==========================================================================
5591  */
5592 
5593 static void
5594 spa_async_remove(spa_t *spa, vdev_t *vd)
5595 {
5596         if (vd->vdev_remove_wanted) {
5597                 vd->vdev_remove_wanted = B_FALSE;
5598                 vd->vdev_delayed_close = B_FALSE;
5599                 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
5600 
5601                 /*
5602                  * We want to clear the stats, but we don't want to do a full
5603                  * vdev_clear() as that will cause us to throw away
5604                  * degraded/faulted state as well as attempt to reopen the
5605                  * device, all of which is a waste.
5606                  */
5607                 vd->vdev_stat.vs_read_errors = 0;
5608                 vd->vdev_stat.vs_write_errors = 0;
5609                 vd->vdev_stat.vs_checksum_errors = 0;
5610 
5611                 vdev_state_dirty(vd->vdev_top);
5612         }
5613 
5614         for (int c = 0; c < vd->vdev_children; c++)
5615                 spa_async_remove(spa, vd->vdev_child[c]);
5616 }
5617 
5618 static void
5619 spa_async_probe(spa_t *spa, vdev_t *vd)
5620 {
5621         if (vd->vdev_probe_wanted) {
5622                 vd->vdev_probe_wanted = B_FALSE;
5623                 vdev_reopen(vd);        /* vdev_open() does the actual probe */
5624         }
5625 
5626         for (int c = 0; c < vd->vdev_children; c++)
5627                 spa_async_probe(spa, vd->vdev_child[c]);
5628 }
5629 
5630 static void
5631 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5632 {
5633         sysevent_id_t eid;
5634         nvlist_t *attr;
5635         char *physpath;
5636 
5637         if (!spa->spa_autoexpand)
5638                 return;
5639 
5640         for (int c = 0; c < vd->vdev_children; c++) {
5641                 vdev_t *cvd = vd->vdev_child[c];
5642                 spa_async_autoexpand(spa, cvd);
5643         }
5644 
5645         if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5646                 return;
5647 
5648         physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5649         (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
5650 
5651         VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5652         VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
5653 
5654         (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
5655             ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
5656 
5657         nvlist_free(attr);
5658         kmem_free(physpath, MAXPATHLEN);
5659 }
5660 
5661 static void
5662 spa_async_thread(spa_t *spa)
5663 {
5664         int tasks;
5665 
5666         ASSERT(spa->spa_sync_on);
5667 
5668         mutex_enter(&spa->spa_async_lock);
5669         tasks = spa->spa_async_tasks;
5670         spa->spa_async_tasks = 0;
5671         mutex_exit(&spa->spa_async_lock);
5672 
5673         /*
5674          * See if the config needs to be updated.
5675          */
5676         if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
5677                 uint64_t old_space, new_space;
5678 
5679                 mutex_enter(&spa_namespace_lock);
5680                 old_space = metaslab_class_get_space(spa_normal_class(spa));
5681                 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5682                 new_space = metaslab_class_get_space(spa_normal_class(spa));
5683                 mutex_exit(&spa_namespace_lock);
5684 
5685                 /*
5686                  * If the pool grew as a result of the config update,
5687                  * then log an internal history event.
5688                  */
5689                 if (new_space != old_space) {
5690                         spa_history_log_internal(spa, "vdev online", NULL,
5691                             "pool '%s' size: %llu(+%llu)",
5692                             spa_name(spa), new_space, new_space - old_space);
5693                 }
5694         }
5695 
5696         /*
5697          * See if any devices need to be marked REMOVED.
5698          */
5699         if (tasks & SPA_ASYNC_REMOVE) {
5700                 spa_vdev_state_enter(spa, SCL_NONE);
5701                 spa_async_remove(spa, spa->spa_root_vdev);
5702                 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
5703                         spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
5704                 for (int i = 0; i < spa->spa_spares.sav_count; i++)
5705                         spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5706                 (void) spa_vdev_state_exit(spa, NULL, 0);
5707         }
5708 
5709         if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5710                 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5711                 spa_async_autoexpand(spa, spa->spa_root_vdev);
5712                 spa_config_exit(spa, SCL_CONFIG, FTAG);
5713         }
5714 
5715         /*
5716          * See if any devices need to be probed.
5717          */
5718         if (tasks & SPA_ASYNC_PROBE) {
5719                 spa_vdev_state_enter(spa, SCL_NONE);
5720                 spa_async_probe(spa, spa->spa_root_vdev);
5721                 (void) spa_vdev_state_exit(spa, NULL, 0);
5722         }
5723 
5724         /*
5725          * If any devices are done replacing, detach them.
5726          */
5727         if (tasks & SPA_ASYNC_RESILVER_DONE)
5728                 spa_vdev_resilver_done(spa);
5729 
5730         /*
5731          * Kick off a resilver.
5732          */
5733         if (tasks & SPA_ASYNC_RESILVER)
5734                 dsl_resilver_restart(spa->spa_dsl_pool, 0);
5735 
5736         /*
5737          * Let the world know that we're done.
5738          */
5739         mutex_enter(&spa->spa_async_lock);
5740         spa->spa_async_thread = NULL;
5741         cv_broadcast(&spa->spa_async_cv);
5742         mutex_exit(&spa->spa_async_lock);
5743         thread_exit();
5744 }
5745 
5746 void
5747 spa_async_suspend(spa_t *spa)
5748 {
5749         mutex_enter(&spa->spa_async_lock);
5750         spa->spa_async_suspended++;
5751         while (spa->spa_async_thread != NULL)
5752                 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5753         mutex_exit(&spa->spa_async_lock);
5754 }
5755 
5756 void
5757 spa_async_resume(spa_t *spa)
5758 {
5759         mutex_enter(&spa->spa_async_lock);
5760         ASSERT(spa->spa_async_suspended != 0);
5761         spa->spa_async_suspended--;
5762         mutex_exit(&spa->spa_async_lock);
5763 }
5764 
5765 static boolean_t
5766 spa_async_tasks_pending(spa_t *spa)
5767 {
5768         uint_t non_config_tasks;
5769         uint_t config_task;
5770         boolean_t config_task_suspended;
5771 
5772         non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
5773         config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
5774         if (spa->spa_ccw_fail_time == 0) {
5775                 config_task_suspended = B_FALSE;
5776         } else {
5777                 config_task_suspended =
5778                     (gethrtime() - spa->spa_ccw_fail_time) <
5779                     (zfs_ccw_retry_interval * NANOSEC);
5780         }
5781 
5782         return (non_config_tasks || (config_task && !config_task_suspended));
5783 }
5784 
5785 static void
5786 spa_async_dispatch(spa_t *spa)
5787 {
5788         mutex_enter(&spa->spa_async_lock);
5789         if (spa_async_tasks_pending(spa) &&
5790             !spa->spa_async_suspended &&
5791             spa->spa_async_thread == NULL &&
5792             rootdir != NULL)
5793                 spa->spa_async_thread = thread_create(NULL, 0,
5794                     spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5795         mutex_exit(&spa->spa_async_lock);
5796 }
5797 
5798 void
5799 spa_async_request(spa_t *spa, int task)
5800 {
5801         zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
5802         mutex_enter(&spa->spa_async_lock);
5803         spa->spa_async_tasks |= task;
5804         mutex_exit(&spa->spa_async_lock);
5805 }
5806 
5807 /*
5808  * ==========================================================================
5809  * SPA syncing routines
5810  * ==========================================================================
5811  */
5812 
5813 static int
5814 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5815 {
5816         bpobj_t *bpo = arg;
5817         bpobj_enqueue(bpo, bp, tx);
5818         return (0);
5819 }
5820 
5821 static int
5822 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5823 {
5824         zio_t *zio = arg;
5825 
5826         zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5827             zio->io_flags));
5828         return (0);
5829 }
5830 
5831 /*
5832  * Note: this simple function is not inlined to make it easier to dtrace the
5833  * amount of time spent syncing frees.
5834  */
5835 static void
5836 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
5837 {
5838         zio_t *zio = zio_root(spa, NULL, NULL, 0);
5839         bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
5840         VERIFY(zio_wait(zio) == 0);
5841 }
5842 
5843 /*
5844  * Note: this simple function is not inlined to make it easier to dtrace the
5845  * amount of time spent syncing deferred frees.
5846  */
5847 static void
5848 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
5849 {
5850         zio_t *zio = zio_root(spa, NULL, NULL, 0);
5851         VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
5852             spa_free_sync_cb, zio, tx), ==, 0);
5853         VERIFY0(zio_wait(zio));
5854 }
5855 
5856 
5857 static void
5858 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5859 {
5860         char *packed = NULL;
5861         size_t bufsize;
5862         size_t nvsize = 0;
5863         dmu_buf_t *db;
5864 
5865         VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5866 
5867         /*
5868          * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5869          * information.  This avoids the dmu_buf_will_dirty() path and
5870          * saves us a pre-read to get data we don't actually care about.
5871          */
5872         bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
5873         packed = kmem_alloc(bufsize, KM_SLEEP);
5874 
5875         VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5876             KM_SLEEP) == 0);
5877         bzero(packed + nvsize, bufsize - nvsize);
5878 
5879         dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5880 
5881         kmem_free(packed, bufsize);
5882 
5883         VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5884         dmu_buf_will_dirty(db, tx);
5885         *(uint64_t *)db->db_data = nvsize;
5886         dmu_buf_rele(db, FTAG);
5887 }
5888 
5889 static void
5890 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5891     const char *config, const char *entry)
5892 {
5893         nvlist_t *nvroot;
5894         nvlist_t **list;
5895         int i;
5896 
5897         if (!sav->sav_sync)
5898                 return;
5899 
5900         /*
5901          * Update the MOS nvlist describing the list of available devices.
5902          * spa_validate_aux() will have already made sure this nvlist is
5903          * valid and the vdevs are labeled appropriately.
5904          */
5905         if (sav->sav_object == 0) {
5906                 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5907                     DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5908                     sizeof (uint64_t), tx);
5909                 VERIFY(zap_update(spa->spa_meta_objset,
5910                     DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5911                     &sav->sav_object, tx) == 0);
5912         }
5913 
5914         VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5915         if (sav->sav_count == 0) {
5916                 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5917         } else {
5918                 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
5919                 for (i = 0; i < sav->sav_count; i++)
5920                         list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
5921                             B_FALSE, VDEV_CONFIG_L2CACHE);
5922                 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5923                     sav->sav_count) == 0);
5924                 for (i = 0; i < sav->sav_count; i++)
5925                         nvlist_free(list[i]);
5926                 kmem_free(list, sav->sav_count * sizeof (void *));
5927         }
5928 
5929         spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5930         nvlist_free(nvroot);
5931 
5932         sav->sav_sync = B_FALSE;
5933 }
5934 
5935 static void
5936 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5937 {
5938         nvlist_t *config;
5939 
5940         if (list_is_empty(&spa->spa_config_dirty_list))
5941                 return;
5942 
5943         spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5944 
5945         config = spa_config_generate(spa, spa->spa_root_vdev,
5946             dmu_tx_get_txg(tx), B_FALSE);
5947 
5948         /*
5949          * If we're upgrading the spa version then make sure that
5950          * the config object gets updated with the correct version.
5951          */
5952         if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
5953                 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
5954                     spa->spa_uberblock.ub_version);
5955 
5956         spa_config_exit(spa, SCL_STATE, FTAG);
5957 
5958         if (spa->spa_config_syncing)
5959                 nvlist_free(spa->spa_config_syncing);
5960         spa->spa_config_syncing = config;
5961 
5962         spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5963 }
5964 
5965 static void
5966 spa_sync_version(void *arg, dmu_tx_t *tx)
5967 {
5968         uint64_t *versionp = arg;
5969         uint64_t version = *versionp;
5970         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5971 
5972         /*
5973          * Setting the version is special cased when first creating the pool.
5974          */
5975         ASSERT(tx->tx_txg != TXG_INITIAL);
5976 
5977         ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5978         ASSERT(version >= spa_version(spa));
5979 
5980         spa->spa_uberblock.ub_version = version;
5981         vdev_config_dirty(spa->spa_root_vdev);
5982         spa_history_log_internal(spa, "set", tx, "version=%lld", version);
5983 }
5984 
5985 /*
5986  * Set zpool properties.
5987  */
5988 static void
5989 spa_sync_props(void *arg, dmu_tx_t *tx)
5990 {
5991         nvlist_t *nvp = arg;
5992         spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5993         objset_t *mos = spa->spa_meta_objset;
5994         nvpair_t *elem = NULL;
5995 
5996         mutex_enter(&spa->spa_props_lock);
5997 
5998         while ((elem = nvlist_next_nvpair(nvp, elem))) {
5999                 uint64_t intval;
6000                 char *strval, *fname;
6001                 zpool_prop_t prop;
6002                 const char *propname;
6003                 zprop_type_t proptype;
6004                 spa_feature_t fid;
6005 
6006                 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
6007                 case ZPROP_INVAL:
6008                         /*
6009                          * We checked this earlier in spa_prop_validate().
6010                          */
6011                         ASSERT(zpool_prop_feature(nvpair_name(elem)));
6012 
6013                         fname = strchr(nvpair_name(elem), '@') + 1;
6014                         VERIFY0(zfeature_lookup_name(fname, &fid));
6015 
6016                         spa_feature_enable(spa, fid, tx);
6017                         spa_history_log_internal(spa, "set", tx,
6018                             "%s=enabled", nvpair_name(elem));
6019                         break;
6020 
6021                 case ZPOOL_PROP_VERSION:
6022                         intval = fnvpair_value_uint64(elem);
6023                         /*
6024                          * The version is synced seperatly before other
6025                          * properties and should be correct by now.
6026                          */
6027                         ASSERT3U(spa_version(spa), >=, intval);
6028                         break;
6029 
6030                 case ZPOOL_PROP_ALTROOT:
6031                         /*
6032                          * 'altroot' is a non-persistent property. It should
6033                          * have been set temporarily at creation or import time.
6034                          */
6035                         ASSERT(spa->spa_root != NULL);
6036                         break;
6037 
6038                 case ZPOOL_PROP_READONLY:
6039                 case ZPOOL_PROP_CACHEFILE:
6040                         /*
6041                          * 'readonly' and 'cachefile' are also non-persisitent
6042                          * properties.
6043                          */
6044                         break;
6045                 case ZPOOL_PROP_COMMENT:
6046                         strval = fnvpair_value_string(elem);
6047                         if (spa->spa_comment != NULL)
6048                                 spa_strfree(spa->spa_comment);
6049                         spa->spa_comment = spa_strdup(strval);
6050                         /*
6051                          * We need to dirty the configuration on all the vdevs
6052                          * so that their labels get updated.  It's unnecessary
6053                          * to do this for pool creation since the vdev's
6054                          * configuratoin has already been dirtied.
6055                          */
6056                         if (tx->tx_txg != TXG_INITIAL)
6057                                 vdev_config_dirty(spa->spa_root_vdev);
6058                         spa_history_log_internal(spa, "set", tx,
6059                             "%s=%s", nvpair_name(elem), strval);
6060                         break;
6061                 default:
6062                         /*
6063                          * Set pool property values in the poolprops mos object.
6064                          */
6065                         if (spa->spa_pool_props_object == 0) {
6066                                 spa->spa_pool_props_object =
6067                                     zap_create_link(mos, DMU_OT_POOL_PROPS,
6068                                     DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
6069                                     tx);
6070                         }
6071 
6072                         /* normalize the property name */
6073                         propname = zpool_prop_to_name(prop);
6074                         proptype = zpool_prop_get_type(prop);
6075 
6076                         if (nvpair_type(elem) == DATA_TYPE_STRING) {
6077                                 ASSERT(proptype == PROP_TYPE_STRING);
6078                                 strval = fnvpair_value_string(elem);
6079                                 VERIFY0(zap_update(mos,
6080                                     spa->spa_pool_props_object, propname,
6081                                     1, strlen(strval) + 1, strval, tx));
6082                                 spa_history_log_internal(spa, "set", tx,
6083                                     "%s=%s", nvpair_name(elem), strval);
6084                         } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6085                                 intval = fnvpair_value_uint64(elem);
6086 
6087                                 if (proptype == PROP_TYPE_INDEX) {
6088                                         const char *unused;
6089                                         VERIFY0(zpool_prop_index_to_string(
6090                                             prop, intval, &unused));
6091                                 }
6092                                 VERIFY0(zap_update(mos,
6093                                     spa->spa_pool_props_object, propname,
6094                                     8, 1, &intval, tx));
6095                                 spa_history_log_internal(spa, "set", tx,
6096                                     "%s=%lld", nvpair_name(elem), intval);
6097                         } else {
6098                                 ASSERT(0); /* not allowed */
6099                         }
6100 
6101                         switch (prop) {
6102                         case ZPOOL_PROP_DELEGATION:
6103                                 spa->spa_delegation = intval;
6104                                 break;
6105                         case ZPOOL_PROP_BOOTFS:
6106                                 spa->spa_bootfs = intval;
6107                                 break;
6108                         case ZPOOL_PROP_FAILUREMODE:
6109                                 spa->spa_failmode = intval;
6110                                 break;
6111                         case ZPOOL_PROP_AUTOEXPAND:
6112                                 spa->spa_autoexpand = intval;
6113                                 if (tx->tx_txg != TXG_INITIAL)
6114                                         spa_async_request(spa,
6115                                             SPA_ASYNC_AUTOEXPAND);
6116                                 break;
6117                         case ZPOOL_PROP_DEDUPDITTO:
6118                                 spa->spa_dedup_ditto = intval;
6119                                 break;
6120                         default:
6121                                 break;
6122                         }
6123                 }
6124 
6125         }
6126 
6127         mutex_exit(&spa->spa_props_lock);
6128 }
6129 
6130 /*
6131  * Perform one-time upgrade on-disk changes.  spa_version() does not
6132  * reflect the new version this txg, so there must be no changes this
6133  * txg to anything that the upgrade code depends on after it executes.
6134  * Therefore this must be called after dsl_pool_sync() does the sync
6135  * tasks.
6136  */
6137 static void
6138 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
6139 {
6140         dsl_pool_t *dp = spa->spa_dsl_pool;
6141 
6142         ASSERT(spa->spa_sync_pass == 1);
6143 
6144         rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
6145 
6146         if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
6147             spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
6148                 dsl_pool_create_origin(dp, tx);
6149 
6150                 /* Keeping the origin open increases spa_minref */
6151                 spa->spa_minref += 3;
6152         }
6153 
6154         if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
6155             spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
6156                 dsl_pool_upgrade_clones(dp, tx);
6157         }
6158 
6159         if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
6160             spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
6161                 dsl_pool_upgrade_dir_clones(dp, tx);
6162 
6163                 /* Keeping the freedir open increases spa_minref */
6164                 spa->spa_minref += 3;
6165         }
6166 
6167         if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
6168             spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6169                 spa_feature_create_zap_objects(spa, tx);
6170         }
6171 
6172         /*
6173          * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
6174          * when possibility to use lz4 compression for metadata was added
6175          * Old pools that have this feature enabled must be upgraded to have
6176          * this feature active
6177          */
6178         if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
6179                 boolean_t lz4_en = spa_feature_is_enabled(spa,
6180                     SPA_FEATURE_LZ4_COMPRESS);
6181                 boolean_t lz4_ac = spa_feature_is_active(spa,
6182                     SPA_FEATURE_LZ4_COMPRESS);
6183 
6184                 if (lz4_en && !lz4_ac)
6185                         spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
6186         }
6187         rrw_exit(&dp->dp_config_rwlock, FTAG);
6188 }
6189 
6190 /*
6191  * Sync the specified transaction group.  New blocks may be dirtied as
6192  * part of the process, so we iterate until it converges.
6193  */
6194 void
6195 spa_sync(spa_t *spa, uint64_t txg)
6196 {
6197         dsl_pool_t *dp = spa->spa_dsl_pool;
6198         objset_t *mos = spa->spa_meta_objset;
6199         bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
6200         vdev_t *rvd = spa->spa_root_vdev;
6201         vdev_t *vd;
6202         dmu_tx_t *tx;
6203         int error;
6204 
6205         VERIFY(spa_writeable(spa));
6206 
6207         /*
6208          * Lock out configuration changes.
6209          */
6210         spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6211 
6212         spa->spa_syncing_txg = txg;
6213         spa->spa_sync_pass = 0;
6214 
6215         /*
6216          * If there are any pending vdev state changes, convert them
6217          * into config changes that go out with this transaction group.
6218          */
6219         spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6220         while (list_head(&spa->spa_state_dirty_list) != NULL) {
6221                 /*
6222                  * We need the write lock here because, for aux vdevs,
6223                  * calling vdev_config_dirty() modifies sav_config.
6224                  * This is ugly and will become unnecessary when we
6225                  * eliminate the aux vdev wart by integrating all vdevs
6226                  * into the root vdev tree.
6227                  */
6228                 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6229                 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
6230                 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
6231                         vdev_state_clean(vd);
6232                         vdev_config_dirty(vd);
6233                 }
6234                 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
6235                 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
6236         }
6237         spa_config_exit(spa, SCL_STATE, FTAG);
6238 
6239         tx = dmu_tx_create_assigned(dp, txg);
6240 
6241         spa->spa_sync_starttime = gethrtime();
6242         VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
6243             spa->spa_sync_starttime + spa->spa_deadman_synctime));
6244 
6245         /*
6246          * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
6247          * set spa_deflate if we have no raid-z vdevs.
6248          */
6249         if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
6250             spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
6251                 int i;
6252 
6253                 for (i = 0; i < rvd->vdev_children; i++) {
6254                         vd = rvd->vdev_child[i];
6255                         if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
6256                                 break;
6257                 }
6258                 if (i == rvd->vdev_children) {
6259                         spa->spa_deflate = TRUE;
6260                         VERIFY(0 == zap_add(spa->spa_meta_objset,
6261                             DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6262                             sizeof (uint64_t), 1, &spa->spa_deflate, tx));
6263                 }
6264         }
6265 
6266         /*
6267          * Iterate to convergence.
6268          */
6269         do {
6270                 int pass = ++spa->spa_sync_pass;
6271 
6272                 spa_sync_config_object(spa, tx);
6273                 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
6274                     ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
6275                 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
6276                     ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
6277                 spa_errlog_sync(spa, txg);
6278                 dsl_pool_sync(dp, txg);
6279 
6280                 if (pass < zfs_sync_pass_deferred_free) {
6281                         spa_sync_frees(spa, free_bpl, tx);
6282                 } else {
6283                         /*
6284                          * We can not defer frees in pass 1, because
6285                          * we sync the deferred frees later in pass 1.
6286                          */
6287                         ASSERT3U(pass, >, 1);
6288                         bplist_iterate(free_bpl, bpobj_enqueue_cb,
6289                             &spa->spa_deferred_bpobj, tx);
6290                 }
6291 
6292                 ddt_sync(spa, txg);
6293                 dsl_scan_sync(dp, tx);
6294 
6295                 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6296                         vdev_sync(vd, txg);
6297 
6298                 if (pass == 1) {
6299                         spa_sync_upgrades(spa, tx);
6300                         ASSERT3U(txg, >=,
6301                             spa->spa_uberblock.ub_rootbp.blk_birth);
6302                         /*
6303                          * Note: We need to check if the MOS is dirty
6304                          * because we could have marked the MOS dirty
6305                          * without updating the uberblock (e.g. if we
6306                          * have sync tasks but no dirty user data).  We
6307                          * need to check the uberblock's rootbp because
6308                          * it is updated if we have synced out dirty
6309                          * data (though in this case the MOS will most
6310                          * likely also be dirty due to second order
6311                          * effects, we don't want to rely on that here).
6312                          */
6313                         if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
6314                             !dmu_objset_is_dirty(mos, txg)) {
6315                                 /*
6316                                  * Nothing changed on the first pass,
6317                                  * therefore this TXG is a no-op.  Avoid
6318                                  * syncing deferred frees, so that we
6319                                  * can keep this TXG as a no-op.
6320                                  */
6321                                 ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
6322                                     txg));
6323                                 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6324                                 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
6325                                 break;
6326                         }
6327                         spa_sync_deferred_frees(spa, tx);
6328                 }
6329 
6330         } while (dmu_objset_is_dirty(mos, txg));
6331 
6332         /*
6333          * Rewrite the vdev configuration (which includes the uberblock)
6334          * to commit the transaction group.
6335          *
6336          * If there are no dirty vdevs, we sync the uberblock to a few
6337          * random top-level vdevs that are known to be visible in the
6338          * config cache (see spa_vdev_add() for a complete description).
6339          * If there *are* dirty vdevs, sync the uberblock to all vdevs.
6340          */
6341         for (;;) {
6342                 /*
6343                  * We hold SCL_STATE to prevent vdev open/close/etc.
6344                  * while we're attempting to write the vdev labels.
6345                  */
6346                 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6347 
6348                 if (list_is_empty(&spa->spa_config_dirty_list)) {
6349                         vdev_t *svd[SPA_DVAS_PER_BP];
6350                         int svdcount = 0;
6351                         int children = rvd->vdev_children;
6352                         int c0 = spa_get_random(children);
6353 
6354                         for (int c = 0; c < children; c++) {
6355                                 vd = rvd->vdev_child[(c0 + c) % children];
6356                                 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6357                                         continue;
6358                                 svd[svdcount++] = vd;
6359                                 if (svdcount == SPA_DVAS_PER_BP)
6360                                         break;
6361                         }
6362                         error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6363                         if (error != 0)
6364                                 error = vdev_config_sync(svd, svdcount, txg,
6365                                     B_TRUE);
6366                 } else {
6367                         error = vdev_config_sync(rvd->vdev_child,
6368                             rvd->vdev_children, txg, B_FALSE);
6369                         if (error != 0)
6370                                 error = vdev_config_sync(rvd->vdev_child,
6371                                     rvd->vdev_children, txg, B_TRUE);
6372                 }
6373 
6374                 if (error == 0)
6375                         spa->spa_last_synced_guid = rvd->vdev_guid;
6376 
6377                 spa_config_exit(spa, SCL_STATE, FTAG);
6378 
6379                 if (error == 0)
6380                         break;
6381                 zio_suspend(spa, NULL);
6382                 zio_resume_wait(spa);
6383         }
6384         dmu_tx_commit(tx);
6385 
6386         VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
6387 
6388         /*
6389          * Clear the dirty config list.
6390          */
6391         while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
6392                 vdev_config_clean(vd);
6393 
6394         /*
6395          * Now that the new config has synced transactionally,
6396          * let it become visible to the config cache.
6397          */
6398         if (spa->spa_config_syncing != NULL) {
6399                 spa_config_set(spa, spa->spa_config_syncing);
6400                 spa->spa_config_txg = txg;
6401                 spa->spa_config_syncing = NULL;
6402         }
6403 
6404         spa->spa_ubsync = spa->spa_uberblock;
6405 
6406         dsl_pool_sync_done(dp, txg);
6407 
6408         /*
6409          * Update usable space statistics.
6410          */
6411         while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
6412                 vdev_sync_done(vd, txg);
6413 
6414         spa_update_dspace(spa);
6415 
6416         /*
6417          * It had better be the case that we didn't dirty anything
6418          * since vdev_config_sync().
6419          */
6420         ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6421         ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6422         ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
6423 
6424         spa->spa_sync_pass = 0;
6425 
6426         spa_config_exit(spa, SCL_CONFIG, FTAG);
6427 
6428         spa_handle_ignored_writes(spa);
6429 
6430         /*
6431          * If any async tasks have been requested, kick them off.
6432          */
6433         spa_async_dispatch(spa);
6434 }
6435 
6436 /*
6437  * Sync all pools.  We don't want to hold the namespace lock across these
6438  * operations, so we take a reference on the spa_t and drop the lock during the
6439  * sync.
6440  */
6441 void
6442 spa_sync_allpools(void)
6443 {
6444         spa_t *spa = NULL;
6445         mutex_enter(&spa_namespace_lock);
6446         while ((spa = spa_next(spa)) != NULL) {
6447                 if (spa_state(spa) != POOL_STATE_ACTIVE ||
6448                     !spa_writeable(spa) || spa_suspended(spa))
6449                         continue;
6450                 spa_open_ref(spa, FTAG);
6451                 mutex_exit(&spa_namespace_lock);
6452                 txg_wait_synced(spa_get_dsl(spa), 0);
6453                 mutex_enter(&spa_namespace_lock);
6454                 spa_close(spa, FTAG);
6455         }
6456         mutex_exit(&spa_namespace_lock);
6457 }
6458 
6459 /*
6460  * ==========================================================================
6461  * Miscellaneous routines
6462  * ==========================================================================
6463  */
6464 
6465 /*
6466  * Remove all pools in the system.
6467  */
6468 void
6469 spa_evict_all(void)
6470 {
6471         spa_t *spa;
6472 
6473         /*
6474          * Remove all cached state.  All pools should be closed now,
6475          * so every spa in the AVL tree should be unreferenced.
6476          */
6477         mutex_enter(&spa_namespace_lock);
6478         while ((spa = spa_next(NULL)) != NULL) {
6479                 /*
6480                  * Stop async tasks.  The async thread may need to detach
6481                  * a device that's been replaced, which requires grabbing
6482                  * spa_namespace_lock, so we must drop it here.
6483                  */
6484                 spa_open_ref(spa, FTAG);
6485                 mutex_exit(&spa_namespace_lock);
6486                 spa_async_suspend(spa);
6487                 mutex_enter(&spa_namespace_lock);
6488                 spa_close(spa, FTAG);
6489 
6490                 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6491                         spa_unload(spa);
6492                         spa_deactivate(spa);
6493                 }
6494                 spa_remove(spa);
6495         }
6496         mutex_exit(&spa_namespace_lock);
6497 }
6498 
6499 vdev_t *
6500 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
6501 {
6502         vdev_t *vd;
6503         int i;
6504 
6505         if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6506                 return (vd);
6507 
6508         if (aux) {
6509                 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6510                         vd = spa->spa_l2cache.sav_vdevs[i];
6511                         if (vd->vdev_guid == guid)
6512                                 return (vd);
6513                 }
6514 
6515                 for (i = 0; i < spa->spa_spares.sav_count; i++) {
6516                         vd = spa->spa_spares.sav_vdevs[i];
6517                         if (vd->vdev_guid == guid)
6518                                 return (vd);
6519                 }
6520         }
6521 
6522         return (NULL);
6523 }
6524 
6525 void
6526 spa_upgrade(spa_t *spa, uint64_t version)
6527 {
6528         ASSERT(spa_writeable(spa));
6529 
6530         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6531 
6532         /*
6533          * This should only be called for a non-faulted pool, and since a
6534          * future version would result in an unopenable pool, this shouldn't be
6535          * possible.
6536          */
6537         ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
6538         ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
6539 
6540         spa->spa_uberblock.ub_version = version;
6541         vdev_config_dirty(spa->spa_root_vdev);
6542 
6543         spa_config_exit(spa, SCL_ALL, FTAG);
6544 
6545         txg_wait_synced(spa_get_dsl(spa), 0);
6546 }
6547 
6548 boolean_t
6549 spa_has_spare(spa_t *spa, uint64_t guid)
6550 {
6551         int i;
6552         uint64_t spareguid;
6553         spa_aux_vdev_t *sav = &spa->spa_spares;
6554 
6555         for (i = 0; i < sav->sav_count; i++)
6556                 if (sav->sav_vdevs[i]->vdev_guid == guid)
6557                         return (B_TRUE);
6558 
6559         for (i = 0; i < sav->sav_npending; i++) {
6560                 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6561                     &spareguid) == 0 && spareguid == guid)
6562                         return (B_TRUE);
6563         }
6564 
6565         return (B_FALSE);
6566 }
6567 
6568 /*
6569  * Check if a pool has an active shared spare device.
6570  * Note: reference count of an active spare is 2, as a spare and as a replace
6571  */
6572 static boolean_t
6573 spa_has_active_shared_spare(spa_t *spa)
6574 {
6575         int i, refcnt;
6576         uint64_t pool;
6577         spa_aux_vdev_t *sav = &spa->spa_spares;
6578 
6579         for (i = 0; i < sav->sav_count; i++) {
6580                 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6581                     &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6582                     refcnt > 2)
6583                         return (B_TRUE);
6584         }
6585 
6586         return (B_FALSE);
6587 }
6588 
6589 /*
6590  * Post a sysevent corresponding to the given event.  The 'name' must be one of
6591  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
6592  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
6593  * in the userland libzpool, as we don't want consumers to misinterpret ztest
6594  * or zdb as real changes.
6595  */
6596 void
6597 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6598 {
6599 #ifdef _KERNEL
6600         sysevent_t              *ev;
6601         sysevent_attr_list_t    *attr = NULL;
6602         sysevent_value_t        value;
6603         sysevent_id_t           eid;
6604 
6605         ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
6606             SE_SLEEP);
6607 
6608         value.value_type = SE_DATA_TYPE_STRING;
6609         value.value.sv_string = spa_name(spa);
6610         if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
6611                 goto done;
6612 
6613         value.value_type = SE_DATA_TYPE_UINT64;
6614         value.value.sv_uint64 = spa_guid(spa);
6615         if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
6616                 goto done;
6617 
6618         if (vd) {
6619                 value.value_type = SE_DATA_TYPE_UINT64;
6620                 value.value.sv_uint64 = vd->vdev_guid;
6621                 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
6622                     SE_SLEEP) != 0)
6623                         goto done;
6624 
6625                 if (vd->vdev_path) {
6626                         value.value_type = SE_DATA_TYPE_STRING;
6627                         value.value.sv_string = vd->vdev_path;
6628                         if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
6629                             &value, SE_SLEEP) != 0)
6630                                 goto done;
6631                 }
6632         }
6633 
6634         if (sysevent_attach_attributes(ev, attr) != 0)
6635                 goto done;
6636         attr = NULL;
6637 
6638         (void) log_sysevent(ev, SE_SLEEP, &eid);
6639 
6640 done:
6641         if (attr)
6642                 sysevent_free_attr(attr);
6643         sysevent_free(ev);
6644 #endif
6645 }