1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2016 by Delphix. All rights reserved.
  24  */
  25 
  26 #include <sys/spa.h>
  27 #include <sys/spa_impl.h>
  28 #include <sys/txg.h>
  29 #include <sys/vdev_impl.h>
  30 #include <sys/refcount.h>
  31 #include <sys/metaslab_impl.h>
  32 #include <sys/dsl_synctask.h>
  33 #include <sys/zap.h>
  34 #include <sys/dmu_tx.h>
  35 
  36 /*
  37  * Maximum number of metaslabs per group that can be initialized
  38  * simultaneously.
  39  */
  40 int max_initialize_ms = 3;
  41 
  42 /*
  43  * Value that is written to disk during initialization.
  44  */
  45 uint64_t zfs_initialize_value = 0xdeadbeefdeadbeefULL;
  46 
  47 /* maximum number of I/Os outstanding per leaf vdev */
  48 int zfs_initialize_limit = 1;
  49 
  50 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
  51 uint64_t zfs_initialize_chunk_size = 1024 * 1024;
  52 
  53 static boolean_t
  54 vdev_initialize_should_stop(vdev_t *vd)
  55 {
  56         return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
  57             vd->vdev_detached || vd->vdev_top->vdev_removing);
  58 }
  59 
  60 static void
  61 vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
  62 {
  63         /*
  64          * We pass in the guid instead of the vdev_t since the vdev may
  65          * have been freed prior to the sync task being processed. This
  66          * happens when a vdev is detached as we call spa_config_vdev_exit(),
  67          * stop the intializing thread, schedule the sync task, and free
  68          * the vdev. Later when the scheduled sync task is invoked, it would
  69          * find that the vdev has been freed.
  70          */
  71         uint64_t guid = *(uint64_t *)arg;
  72         uint64_t txg = dmu_tx_get_txg(tx);
  73         kmem_free(arg, sizeof (uint64_t));
  74 
  75         vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
  76         if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
  77                 return;
  78 
  79         uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
  80         vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
  81 
  82         VERIFY(vd->vdev_leaf_zap != 0);
  83 
  84         objset_t *mos = vd->vdev_spa->spa_meta_objset;
  85 
  86         if (last_offset > 0) {
  87                 vd->vdev_initialize_last_offset = last_offset;
  88                 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
  89                     VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
  90                     sizeof (last_offset), 1, &last_offset, tx));
  91         }
  92         if (vd->vdev_initialize_action_time > 0) {
  93                 uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
  94                 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
  95                     VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
  96                     1, &val, tx));
  97         }
  98 
  99         uint64_t initialize_state = vd->vdev_initialize_state;
 100         VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
 101             VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
 102             &initialize_state, tx));
 103 }
 104 
 105 static void
 106 vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
 107 {
 108         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
 109         spa_t *spa = vd->vdev_spa;
 110 
 111         if (new_state == vd->vdev_initialize_state)
 112                 return;
 113 
 114         /*
 115          * Copy the vd's guid, this will be freed by the sync task.
 116          */
 117         uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
 118         *guid = vd->vdev_guid;
 119 
 120         /*
 121          * If we're suspending, then preserving the original start time.
 122          */
 123         if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
 124                 vd->vdev_initialize_action_time = gethrestime_sec();
 125         }
 126         vd->vdev_initialize_state = new_state;
 127 
 128         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
 129         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
 130         dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
 131             guid, 2, ZFS_SPACE_CHECK_RESERVED, tx);
 132 
 133         switch (new_state) {
 134         case VDEV_INITIALIZE_ACTIVE:
 135                 spa_history_log_internal(spa, "initialize", tx,
 136                     "vdev=%s activated", vd->vdev_path);
 137                 break;
 138         case VDEV_INITIALIZE_SUSPENDED:
 139                 spa_history_log_internal(spa, "initialize", tx,
 140                     "vdev=%s suspended", vd->vdev_path);
 141                 break;
 142         case VDEV_INITIALIZE_CANCELED:
 143                 spa_history_log_internal(spa, "initialize", tx,
 144                     "vdev=%s canceled", vd->vdev_path);
 145                 break;
 146         case VDEV_INITIALIZE_COMPLETE:
 147                 spa_history_log_internal(spa, "initialize", tx,
 148                     "vdev=%s complete", vd->vdev_path);
 149                 break;
 150         default:
 151                 panic("invalid state %llu", (unsigned long long)new_state);
 152         }
 153 
 154         dmu_tx_commit(tx);
 155 }
 156 
 157 static void
 158 vdev_initialize_cb(zio_t *zio)
 159 {
 160         vdev_t *vd = zio->io_vd;
 161         mutex_enter(&vd->vdev_initialize_io_lock);
 162         if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
 163                 /*
 164                  * The I/O failed because the vdev was unavailable; roll the
 165                  * last offset back. (This works because spa_sync waits on
 166                  * spa_txg_zio before it runs sync tasks.)
 167                  */
 168                 uint64_t *off =
 169                     &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
 170                 *off = MIN(*off, zio->io_offset);
 171         } else {
 172                 /*
 173                  * Since initializing is best-effort, we ignore I/O errors and
 174                  * rely on vdev_probe to determine if the errors are more
 175                  * critical.
 176                  */
 177                 if (zio->io_error != 0)
 178                         vd->vdev_stat.vs_initialize_errors++;
 179 
 180                 vd->vdev_initialize_bytes_done += zio->io_orig_size;
 181         }
 182         ASSERT3U(vd->vdev_initialize_inflight, >, 0);
 183         vd->vdev_initialize_inflight--;
 184         cv_broadcast(&vd->vdev_initialize_io_cv);
 185         mutex_exit(&vd->vdev_initialize_io_lock);
 186 
 187         spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
 188 }
 189 
 190 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
 191 static int
 192 vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
 193 {
 194         spa_t *spa = vd->vdev_spa;
 195 
 196         /* Limit inflight initializing I/Os */
 197         mutex_enter(&vd->vdev_initialize_io_lock);
 198         while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
 199                 cv_wait(&vd->vdev_initialize_io_cv,
 200                     &vd->vdev_initialize_io_lock);
 201         }
 202         vd->vdev_initialize_inflight++;
 203         mutex_exit(&vd->vdev_initialize_io_lock);
 204 
 205         dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
 206         VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
 207         uint64_t txg = dmu_tx_get_txg(tx);
 208 
 209         spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
 210         mutex_enter(&vd->vdev_initialize_lock);
 211 
 212         if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
 213                 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
 214                 *guid = vd->vdev_guid;
 215 
 216                 /* This is the first write of this txg. */
 217                 dsl_sync_task_nowait(spa_get_dsl(spa),
 218                     vdev_initialize_zap_update_sync, guid, 2,
 219                     ZFS_SPACE_CHECK_RESERVED, tx);
 220         }
 221 
 222         /*
 223          * We know the vdev struct will still be around since all
 224          * consumers of vdev_free must stop the initialization first.
 225          */
 226         if (vdev_initialize_should_stop(vd)) {
 227                 mutex_enter(&vd->vdev_initialize_io_lock);
 228                 ASSERT3U(vd->vdev_initialize_inflight, >, 0);
 229                 vd->vdev_initialize_inflight--;
 230                 mutex_exit(&vd->vdev_initialize_io_lock);
 231                 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
 232                 mutex_exit(&vd->vdev_initialize_lock);
 233                 dmu_tx_commit(tx);
 234                 return (SET_ERROR(EINTR));
 235         }
 236         mutex_exit(&vd->vdev_initialize_lock);
 237 
 238         vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
 239         zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
 240             size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
 241             ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
 242         /* vdev_initialize_cb releases SCL_STATE_ALL */
 243 
 244         dmu_tx_commit(tx);
 245 
 246         return (0);
 247 }
 248 
 249 /*
 250  * Translate a logical range to the physical range for the specified vdev_t.
 251  * This function is initially called with a leaf vdev and will walk each
 252  * parent vdev until it reaches a top-level vdev. Once the top-level is
 253  * reached the physical range is initialized and the recursive function
 254  * begins to unwind. As it unwinds it calls the parent's vdev specific
 255  * translation function to do the real conversion.
 256  */
 257 void
 258 vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs)
 259 {
 260         /*
 261          * Walk up the vdev tree
 262          */
 263         if (vd != vd->vdev_top) {
 264                 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs);
 265         } else {
 266                 /*
 267                  * We've reached the top-level vdev, initialize the
 268                  * physical range to the logical range and start to
 269                  * unwind.
 270                  */
 271                 physical_rs->rs_start = logical_rs->rs_start;
 272                 physical_rs->rs_end = logical_rs->rs_end;
 273                 return;
 274         }
 275 
 276         vdev_t *pvd = vd->vdev_parent;
 277         ASSERT3P(pvd, !=, NULL);
 278         ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
 279 
 280         /*
 281          * As this recursive function unwinds, translate the logical
 282          * range into its physical components by calling the
 283          * vdev specific translate function.
 284          */
 285         range_seg_t intermediate = { 0 };
 286         pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate);
 287 
 288         physical_rs->rs_start = intermediate.rs_start;
 289         physical_rs->rs_end = intermediate.rs_end;
 290 }
 291 
 292 /*
 293  * Callback to fill each ABD chunk with zfs_initialize_value. len must be
 294  * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
 295  * allocation will guarantee these for us.
 296  */
 297 /* ARGSUSED */
 298 static int
 299 vdev_initialize_block_fill(void *buf, size_t len, void *unused)
 300 {
 301         ASSERT0(len % sizeof (uint64_t));
 302         for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
 303                 *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
 304         }
 305         return (0);
 306 }
 307 
 308 static abd_t *
 309 vdev_initialize_block_alloc()
 310 {
 311         /* Allocate ABD for filler data */
 312         abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
 313 
 314         ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
 315         (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
 316             vdev_initialize_block_fill, NULL);
 317 
 318         return (data);
 319 }
 320 
 321 static void
 322 vdev_initialize_block_free(abd_t *data)
 323 {
 324         abd_free(data);
 325 }
 326 
 327 static int
 328 vdev_initialize_ranges(vdev_t *vd, abd_t *data)
 329 {
 330         avl_tree_t *rt = &vd->vdev_initialize_tree->rt_root;
 331 
 332         for (range_seg_t *rs = avl_first(rt); rs != NULL;
 333             rs = AVL_NEXT(rt, rs)) {
 334                 uint64_t size = rs->rs_end - rs->rs_start;
 335 
 336                 /* Split range into legally-sized physical chunks */
 337                 uint64_t writes_required =
 338                     ((size - 1) / zfs_initialize_chunk_size) + 1;
 339 
 340                 for (uint64_t w = 0; w < writes_required; w++) {
 341                         int error;
 342 
 343                         error = vdev_initialize_write(vd,
 344                             VDEV_LABEL_START_SIZE + rs->rs_start +
 345                             (w * zfs_initialize_chunk_size),
 346                             MIN(size - (w * zfs_initialize_chunk_size),
 347                             zfs_initialize_chunk_size), data);
 348                         if (error != 0)
 349                                 return (error);
 350                 }
 351         }
 352         return (0);
 353 }
 354 
 355 static void
 356 vdev_initialize_mg_wait(metaslab_group_t *mg)
 357 {
 358         ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock));
 359         while (mg->mg_initialize_updating) {
 360                 cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock);
 361         }
 362 }
 363 
 364 static void
 365 vdev_initialize_mg_mark(metaslab_group_t *mg)
 366 {
 367         ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock));
 368         ASSERT(mg->mg_initialize_updating);
 369 
 370         while (mg->mg_ms_initializing >= max_initialize_ms) {
 371                 cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock);
 372         }
 373         mg->mg_ms_initializing++;
 374         ASSERT3U(mg->mg_ms_initializing, <=, max_initialize_ms);
 375 }
 376 
 377 /*
 378  * Mark the metaslab as being initialized to prevent any allocations
 379  * on this metaslab. We must also track how many metaslabs are currently
 380  * being initialized within a metaslab group and limit them to prevent
 381  * allocation failures from occurring because all metaslabs are being
 382  * initialized.
 383  */
 384 static void
 385 vdev_initialize_ms_mark(metaslab_t *msp)
 386 {
 387         ASSERT(!MUTEX_HELD(&msp->ms_lock));
 388         metaslab_group_t *mg = msp->ms_group;
 389 
 390         mutex_enter(&mg->mg_ms_initialize_lock);
 391 
 392         /*
 393          * To keep an accurate count of how many threads are initializing
 394          * a specific metaslab group, we only allow one thread to mark
 395          * the metaslab group at a time. This ensures that the value of
 396          * ms_initializing will be accurate when we decide to mark a metaslab
 397          * group as being initialized. To do this we force all other threads
 398          * to wait till the metaslab's mg_initialize_updating flag is no
 399          * longer set.
 400          */
 401         vdev_initialize_mg_wait(mg);
 402         mg->mg_initialize_updating = B_TRUE;
 403         if (msp->ms_initializing == 0) {
 404                 vdev_initialize_mg_mark(mg);
 405         }
 406         mutex_enter(&msp->ms_lock);
 407         msp->ms_initializing++;
 408         mutex_exit(&msp->ms_lock);
 409 
 410         mg->mg_initialize_updating = B_FALSE;
 411         cv_broadcast(&mg->mg_ms_initialize_cv);
 412         mutex_exit(&mg->mg_ms_initialize_lock);
 413 }
 414 
 415 static void
 416 vdev_initialize_ms_unmark(metaslab_t *msp)
 417 {
 418         ASSERT(!MUTEX_HELD(&msp->ms_lock));
 419         metaslab_group_t *mg = msp->ms_group;
 420         mutex_enter(&mg->mg_ms_initialize_lock);
 421         mutex_enter(&msp->ms_lock);
 422         if (--msp->ms_initializing == 0) {
 423                 mg->mg_ms_initializing--;
 424                 cv_broadcast(&mg->mg_ms_initialize_cv);
 425         }
 426         mutex_exit(&msp->ms_lock);
 427         mutex_exit(&mg->mg_ms_initialize_lock);
 428 }
 429 
 430 static void
 431 vdev_initialize_calculate_progress(vdev_t *vd)
 432 {
 433         ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
 434             spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
 435         ASSERT(vd->vdev_leaf_zap != 0);
 436 
 437         vd->vdev_initialize_bytes_est = 0;
 438         vd->vdev_initialize_bytes_done = 0;
 439 
 440         for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
 441                 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
 442                 mutex_enter(&msp->ms_lock);
 443 
 444                 uint64_t ms_free = msp->ms_size -
 445                     space_map_allocated(msp->ms_sm);
 446 
 447                 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops)
 448                         ms_free /= vd->vdev_top->vdev_children;
 449 
 450                 /*
 451                  * Convert the metaslab range to a physical range
 452                  * on our vdev. We use this to determine if we are
 453                  * in the middle of this metaslab range.
 454                  */
 455                 range_seg_t logical_rs, physical_rs;
 456                 logical_rs.rs_start = msp->ms_start;
 457                 logical_rs.rs_end = msp->ms_start + msp->ms_size;
 458                 vdev_xlate(vd, &logical_rs, &physical_rs);
 459 
 460                 if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
 461                         vd->vdev_initialize_bytes_est += ms_free;
 462                         mutex_exit(&msp->ms_lock);
 463                         continue;
 464                 } else if (vd->vdev_initialize_last_offset >
 465                     physical_rs.rs_end) {
 466                         vd->vdev_initialize_bytes_done += ms_free;
 467                         vd->vdev_initialize_bytes_est += ms_free;
 468                         mutex_exit(&msp->ms_lock);
 469                         continue;
 470                 }
 471 
 472                 /*
 473                  * If we get here, we're in the middle of initializing this
 474                  * metaslab. Load it and walk the free tree for more accurate
 475                  * progress estimation.
 476                  */
 477                 VERIFY0(metaslab_load(msp));
 478 
 479                 for (range_seg_t *rs = avl_first(&msp->ms_allocatable->rt_root);
 480                     rs; rs = AVL_NEXT(&msp->ms_allocatable->rt_root, rs)) {
 481                         logical_rs.rs_start = rs->rs_start;
 482                         logical_rs.rs_end = rs->rs_end;
 483                         vdev_xlate(vd, &logical_rs, &physical_rs);
 484 
 485                         uint64_t size = physical_rs.rs_end -
 486                             physical_rs.rs_start;
 487                         vd->vdev_initialize_bytes_est += size;
 488                         if (vd->vdev_initialize_last_offset >
 489                             physical_rs.rs_end) {
 490                                 vd->vdev_initialize_bytes_done += size;
 491                         } else if (vd->vdev_initialize_last_offset >
 492                             physical_rs.rs_start &&
 493                             vd->vdev_initialize_last_offset <
 494                             physical_rs.rs_end) {
 495                                 vd->vdev_initialize_bytes_done +=
 496                                     vd->vdev_initialize_last_offset -
 497                                     physical_rs.rs_start;
 498                         }
 499                 }
 500                 mutex_exit(&msp->ms_lock);
 501         }
 502 }
 503 
 504 static void
 505 vdev_initialize_load(vdev_t *vd)
 506 {
 507         ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
 508             spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
 509         ASSERT(vd->vdev_leaf_zap != 0);
 510 
 511         if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
 512             vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
 513                 int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
 514                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
 515                     sizeof (vd->vdev_initialize_last_offset), 1,
 516                     &vd->vdev_initialize_last_offset);
 517                 ASSERT(err == 0 || err == ENOENT);
 518         }
 519 
 520         vdev_initialize_calculate_progress(vd);
 521 }
 522 
 523 
 524 /*
 525  * Convert the logical range into a physcial range and add it to our
 526  * avl tree.
 527  */
 528 void
 529 vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
 530 {
 531         vdev_t *vd = arg;
 532         range_seg_t logical_rs, physical_rs;
 533         logical_rs.rs_start = start;
 534         logical_rs.rs_end = start + size;
 535 
 536         ASSERT(vd->vdev_ops->vdev_op_leaf);
 537         vdev_xlate(vd, &logical_rs, &physical_rs);
 538 
 539         IMPLY(vd->vdev_top == vd,
 540             logical_rs.rs_start == physical_rs.rs_start);
 541         IMPLY(vd->vdev_top == vd,
 542             logical_rs.rs_end == physical_rs.rs_end);
 543 
 544         /* Only add segments that we have not visited yet */
 545         if (physical_rs.rs_end <= vd->vdev_initialize_last_offset)
 546                 return;
 547 
 548         /* Pick up where we left off mid-range. */
 549         if (vd->vdev_initialize_last_offset > physical_rs.rs_start) {
 550                 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
 551                     "(%llu, %llu)", vd->vdev_path,
 552                     (u_longlong_t)physical_rs.rs_start,
 553                     (u_longlong_t)physical_rs.rs_end,
 554                     (u_longlong_t)vd->vdev_initialize_last_offset,
 555                     (u_longlong_t)physical_rs.rs_end);
 556                 ASSERT3U(physical_rs.rs_end, >,
 557                     vd->vdev_initialize_last_offset);
 558                 physical_rs.rs_start = vd->vdev_initialize_last_offset;
 559         }
 560         ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
 561 
 562         /*
 563          * With raidz, it's possible that the logical range does not live on
 564          * this leaf vdev. We only add the physical range to this vdev's if it
 565          * has a length greater than 0.
 566          */
 567         if (physical_rs.rs_end > physical_rs.rs_start) {
 568                 range_tree_add(vd->vdev_initialize_tree, physical_rs.rs_start,
 569                     physical_rs.rs_end - physical_rs.rs_start);
 570         } else {
 571                 ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
 572         }
 573 }
 574 
 575 static void
 576 vdev_initialize_thread(void *arg)
 577 {
 578         vdev_t *vd = arg;
 579         spa_t *spa = vd->vdev_spa;
 580         int error = 0;
 581         uint64_t ms_count = 0;
 582 
 583         ASSERT(vdev_is_concrete(vd));
 584         spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
 585 
 586         vd->vdev_initialize_last_offset = 0;
 587         vdev_initialize_load(vd);
 588 
 589         abd_t *deadbeef = vdev_initialize_block_alloc();
 590 
 591         vd->vdev_initialize_tree = range_tree_create(NULL, NULL);
 592 
 593         for (uint64_t i = 0; !vd->vdev_detached &&
 594             i < vd->vdev_top->vdev_ms_count; i++) {
 595                 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
 596 
 597                 /*
 598                  * If we've expanded the top-level vdev or it's our
 599                  * first pass, calculate our progress.
 600                  */
 601                 if (vd->vdev_top->vdev_ms_count != ms_count) {
 602                         vdev_initialize_calculate_progress(vd);
 603                         ms_count = vd->vdev_top->vdev_ms_count;
 604                 }
 605 
 606                 vdev_initialize_ms_mark(msp);
 607                 mutex_enter(&msp->ms_lock);
 608                 VERIFY0(metaslab_load(msp));
 609 
 610                 range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add,
 611                     vd);
 612                 mutex_exit(&msp->ms_lock);
 613 
 614                 spa_config_exit(spa, SCL_CONFIG, FTAG);
 615                 error = vdev_initialize_ranges(vd, deadbeef);
 616                 vdev_initialize_ms_unmark(msp);
 617                 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
 618 
 619                 range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
 620                 if (error != 0)
 621                         break;
 622         }
 623 
 624         spa_config_exit(spa, SCL_CONFIG, FTAG);
 625         mutex_enter(&vd->vdev_initialize_io_lock);
 626         while (vd->vdev_initialize_inflight > 0) {
 627                 cv_wait(&vd->vdev_initialize_io_cv,
 628                     &vd->vdev_initialize_io_lock);
 629         }
 630         mutex_exit(&vd->vdev_initialize_io_lock);
 631 
 632         range_tree_destroy(vd->vdev_initialize_tree);
 633         vdev_initialize_block_free(deadbeef);
 634         vd->vdev_initialize_tree = NULL;
 635 
 636         mutex_enter(&vd->vdev_initialize_lock);
 637         if (!vd->vdev_initialize_exit_wanted && vdev_writeable(vd)) {
 638                 vdev_initialize_change_state(vd, VDEV_INITIALIZE_COMPLETE);
 639         }
 640         ASSERT(vd->vdev_initialize_thread != NULL ||
 641             vd->vdev_initialize_inflight == 0);
 642 
 643         /*
 644          * Drop the vdev_initialize_lock while we sync out the
 645          * txg since it's possible that a device might be trying to
 646          * come online and must check to see if it needs to restart an
 647          * initialization. That thread will be holding the spa_config_lock
 648          * which would prevent the txg_wait_synced from completing.
 649          */
 650         mutex_exit(&vd->vdev_initialize_lock);
 651         txg_wait_synced(spa_get_dsl(spa), 0);
 652         mutex_enter(&vd->vdev_initialize_lock);
 653 
 654         vd->vdev_initialize_thread = NULL;
 655         cv_broadcast(&vd->vdev_initialize_cv);
 656         mutex_exit(&vd->vdev_initialize_lock);
 657 }
 658 
 659 /*
 660  * Initiates a device. Caller must hold vdev_initialize_lock.
 661  * Device must be a leaf and not already be initializing.
 662  */
 663 void
 664 vdev_initialize(vdev_t *vd)
 665 {
 666         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
 667         ASSERT(vd->vdev_ops->vdev_op_leaf);
 668         ASSERT(vdev_is_concrete(vd));
 669         ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
 670         ASSERT(!vd->vdev_detached);
 671         ASSERT(!vd->vdev_initialize_exit_wanted);
 672         ASSERT(!vd->vdev_top->vdev_removing);
 673 
 674         vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
 675         vd->vdev_initialize_thread = thread_create(NULL, 0,
 676             vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
 677 }
 678 
 679 /*
 680  * Stop initializng a device, with the resultant initialing state being
 681  * tgt_state. Blocks until the initializing thread has exited.
 682  * Caller must hold vdev_initialize_lock and must not be writing to the spa
 683  * config, as the initializing thread may try to enter the config as a reader
 684  * before exiting.
 685  */
 686 void
 687 vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state)
 688 {
 689         spa_t *spa = vd->vdev_spa;
 690         ASSERT(!spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_WRITER));
 691 
 692         ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
 693         ASSERT(vd->vdev_ops->vdev_op_leaf);
 694         ASSERT(vdev_is_concrete(vd));
 695 
 696         /*
 697          * Allow cancel requests to proceed even if the initialize thread
 698          * has stopped.
 699          */
 700         if (vd->vdev_initialize_thread == NULL &&
 701             tgt_state != VDEV_INITIALIZE_CANCELED) {
 702                 return;
 703         }
 704 
 705         vdev_initialize_change_state(vd, tgt_state);
 706         vd->vdev_initialize_exit_wanted = B_TRUE;
 707         while (vd->vdev_initialize_thread != NULL)
 708                 cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
 709 
 710         ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
 711         vd->vdev_initialize_exit_wanted = B_FALSE;
 712 }
 713 
 714 static void
 715 vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state)
 716 {
 717         if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
 718                 mutex_enter(&vd->vdev_initialize_lock);
 719                 vdev_initialize_stop(vd, tgt_state);
 720                 mutex_exit(&vd->vdev_initialize_lock);
 721                 return;
 722         }
 723 
 724         for (uint64_t i = 0; i < vd->vdev_children; i++) {
 725                 vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state);
 726         }
 727 }
 728 
 729 /*
 730  * Convenience function to stop initializing of a vdev tree and set all
 731  * initialize thread pointers to NULL.
 732  */
 733 void
 734 vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
 735 {
 736         vdev_initialize_stop_all_impl(vd, tgt_state);
 737 
 738         if (vd->vdev_spa->spa_sync_on) {
 739                 /* Make sure that our state has been synced to disk */
 740                 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
 741         }
 742 }
 743 
 744 void
 745 vdev_initialize_restart(vdev_t *vd)
 746 {
 747         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 748         ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
 749 
 750         if (vd->vdev_leaf_zap != 0) {
 751                 mutex_enter(&vd->vdev_initialize_lock);
 752                 uint64_t initialize_state = VDEV_INITIALIZE_NONE;
 753                 int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
 754                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
 755                     sizeof (initialize_state), 1, &initialize_state);
 756                 ASSERT(err == 0 || err == ENOENT);
 757                 vd->vdev_initialize_state = initialize_state;
 758 
 759                 uint64_t timestamp = 0;
 760                 err = zap_lookup(vd->vdev_spa->spa_meta_objset,
 761                     vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
 762                     sizeof (timestamp), 1, &timestamp);
 763                 ASSERT(err == 0 || err == ENOENT);
 764                 vd->vdev_initialize_action_time = (time_t)timestamp;
 765 
 766                 if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
 767                     vd->vdev_offline) {
 768                         /* load progress for reporting, but don't resume */
 769                         vdev_initialize_load(vd);
 770                 } else if (vd->vdev_initialize_state ==
 771                     VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd)) {
 772                         vdev_initialize(vd);
 773                 }
 774 
 775                 mutex_exit(&vd->vdev_initialize_lock);
 776         }
 777 
 778         for (uint64_t i = 0; i < vd->vdev_children; i++) {
 779                 vdev_initialize_restart(vd->vdev_child[i]);
 780         }
 781 }