Print this page
10592 misc. metaslab and vdev related ZoL bug fixes
Portions contributed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed by: Giuseppe Di Natale <guss80@gmail.com>
Reviewed by: George Melikov <mail@gmelikov.ru>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Tony Hutter <hutter2@llnl.gov>
Reviewed by: Kody Kantor <kody.kantor@joyent.com>
Approved by: Dan McDonald <danmcd@joyent.com>
*** 281,299 ****
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *ms = vd->vdev_ms[i];
if (ms->ms_sm == NULL)
continue;
- /*
- * Sync tasks happen before metaslab_sync(), therefore
- * smp_alloc and sm_alloc must be the same.
- */
- ASSERT3U(space_map_allocated(ms->ms_sm), ==,
- ms->ms_sm->sm_phys->smp_alloc);
-
spa->spa_removing_phys.sr_to_copy +=
! space_map_allocated(ms->ms_sm);
/*
* Space which we are freeing this txg does not need to
* be copied.
*/
--- 281,292 ----
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *ms = vd->vdev_ms[i];
if (ms->ms_sm == NULL)
continue;
spa->spa_removing_phys.sr_to_copy +=
! metaslab_allocated_space(ms);
/*
* Space which we are freeing this txg does not need to
* be copied.
*/
*** 1399,1425 ****
* would have modified the space map) will wait for us
* to finish loading the spacemap, and then take the
* appropriate action (see free_from_removing_vdev()).
*/
if (msp->ms_sm != NULL) {
! space_map_t *sm = NULL;
- /*
- * We have to open a new space map here, because
- * ms_sm's sm_length and sm_alloc may not reflect
- * what's in the object contents, if we are in between
- * metaslab_sync() and metaslab_sync_done().
- */
- VERIFY0(space_map_open(&sm,
- spa->spa_dsl_pool->dp_meta_objset,
- msp->ms_sm->sm_object, msp->ms_sm->sm_start,
- msp->ms_sm->sm_size, msp->ms_sm->sm_shift));
- space_map_update(sm);
- VERIFY0(space_map_load(sm, svr->svr_allocd_segs,
- SM_ALLOC));
- space_map_close(sm);
-
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
* When we are resuming from a paused removal (i.e.
--- 1392,1404 ----
* would have modified the space map) will wait for us
* to finish loading the spacemap, and then take the
* appropriate action (see free_from_removing_vdev()).
*/
if (msp->ms_sm != NULL) {
! VERIFY0(space_map_load(msp->ms_sm,
! svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
* When we are resuming from a paused removal (i.e.
*** 1609,1628 ****
for (int i = 0; i < TXG_DEFER_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_defer[i]));
ASSERT0(range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) {
- /*
- * Assert that the in-core spacemap has the same
- * length as the on-disk one, so we can use the
- * existing in-core spacemap to load it from disk.
- */
- ASSERT3U(msp->ms_sm->sm_alloc, ==,
- msp->ms_sm->sm_phys->smp_alloc);
- ASSERT3U(msp->ms_sm->sm_length, ==,
- msp->ms_sm->sm_phys->smp_objsize);
-
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
--- 1588,1597 ----
*** 1711,1723 ****
}
return (error);
}
- /*
- * Called every sync pass of every txg if there's a svr.
- */
void
svr_sync(spa_t *spa, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
--- 1680,1689 ----
*** 1777,1786 ****
--- 1743,1753 ----
spa_t *spa = vd->vdev_spa;
int error = 0;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Stop allocating from this vdev.
*/
metaslab_group_passivate(mg);
*** 1791,1809 ****
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
! * Evacuate the device. We don't hold the config lock as writer
! * since we need to do I/O but we do keep the
* spa_namespace_lock held. Once this completes the device
* should no longer have any blocks allocated on it.
*/
! if (vd->vdev_islog) {
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
- }
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
--- 1758,1775 ----
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
! * Evacuate the device. We don't hold the config lock as
! * writer since we need to do I/O but we do keep the
* spa_namespace_lock held. Once this completes the device
* should no longer have any blocks allocated on it.
*/
! ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
*** 1818,1827 ****
--- 1784,1795 ----
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
+ vdev_metaslab_fini(vd);
+
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
/* Make sure these changes are sync'ed */
*** 1847,1856 ****
--- 1815,1826 ----
if (list_link_active(&vd->vdev_state_dirty_node))
vdev_state_clean(vd);
if (list_link_active(&vd->vdev_config_dirty_node))
vdev_config_clean(vd);
+ ASSERT0(vd->vdev_stat.vs_alloc);
+
/*
* Clean up the vdev namespace.
*/
vdev_remove_make_hole_and_free(vd);