663 space_map_t *prev_obsolete_sm = NULL;
664
665 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
666 ASSERT(scip->scip_next_mapping_object != 0);
667 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
668 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
669
670 for (int i = 0; i < TXG_SIZE; i++) {
671 /*
672 * The list must start out empty in order for the
673 * _commit_sync() sync task to be properly registered
674 * on the first call to _commit_entry(); so it's wise
675 * to double check and ensure we actually are starting
676 * with empty lists.
677 */
678 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
679 }
680
681 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
682 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
683 space_map_update(prev_obsolete_sm);
684 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
685 if (prev_obsolete_sm != NULL) {
686 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
687 counts, prev_obsolete_sm);
688 }
689 space_map_close(prev_obsolete_sm);
690
691 /*
692 * Generate new mapping. Determine what index to continue from
693 * based on the max offset that we've already written in the
694 * new mapping.
695 */
696 uint64_t max_offset =
697 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
698 if (max_offset == 0) {
699 /* We haven't written anything to the new mapping yet. */
700 start_index = 0;
701 } else {
702 /*
703 * Pick up from where we left off. _entry_for_offset()
816 ASSERT3U(vic->vic_mapping_object, !=, 0);
817 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
818 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
819 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
820
821 if (vdev_obsolete_sm_object(vd) == 0) {
822 uint64_t obsolete_sm_object =
823 space_map_alloc(spa->spa_meta_objset,
824 vdev_standard_sm_blksz, tx);
825
826 ASSERT(vd->vdev_top_zap != 0);
827 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
828 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
829 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
830 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
831
832 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
833 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
834 spa->spa_meta_objset, obsolete_sm_object,
835 0, vd->vdev_asize, 0));
836 space_map_update(vd->vdev_obsolete_sm);
837 }
838
839 ASSERT(vd->vdev_obsolete_sm != NULL);
840 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
841 space_map_object(vd->vdev_obsolete_sm));
842
843 space_map_write(vd->vdev_obsolete_sm,
844 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
845 space_map_update(vd->vdev_obsolete_sm);
846 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
847 }
848
849 int
850 spa_condense_init(spa_t *spa)
851 {
852 int error = zap_lookup(spa->spa_meta_objset,
853 DMU_POOL_DIRECTORY_OBJECT,
854 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
855 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
856 &spa->spa_condensing_indirect_phys);
857 if (error == 0) {
858 if (spa_writeable(spa)) {
859 spa->spa_condensing_indirect =
860 spa_condensing_indirect_create(spa);
861 }
862 return (0);
863 } else if (error == ENOENT) {
864 return (0);
865 } else {
|
663 space_map_t *prev_obsolete_sm = NULL;
664
665 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
666 ASSERT(scip->scip_next_mapping_object != 0);
667 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
668 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
669
670 for (int i = 0; i < TXG_SIZE; i++) {
671 /*
672 * The list must start out empty in order for the
673 * _commit_sync() sync task to be properly registered
674 * on the first call to _commit_entry(); so it's wise
675 * to double check and ensure we actually are starting
676 * with empty lists.
677 */
678 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
679 }
680
681 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
682 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
683 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
684 if (prev_obsolete_sm != NULL) {
685 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
686 counts, prev_obsolete_sm);
687 }
688 space_map_close(prev_obsolete_sm);
689
690 /*
691 * Generate new mapping. Determine what index to continue from
692 * based on the max offset that we've already written in the
693 * new mapping.
694 */
695 uint64_t max_offset =
696 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
697 if (max_offset == 0) {
698 /* We haven't written anything to the new mapping yet. */
699 start_index = 0;
700 } else {
701 /*
702 * Pick up from where we left off. _entry_for_offset()
815 ASSERT3U(vic->vic_mapping_object, !=, 0);
816 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
817 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
818 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
819
820 if (vdev_obsolete_sm_object(vd) == 0) {
821 uint64_t obsolete_sm_object =
822 space_map_alloc(spa->spa_meta_objset,
823 vdev_standard_sm_blksz, tx);
824
825 ASSERT(vd->vdev_top_zap != 0);
826 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
827 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
828 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
829 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
830
831 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
832 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
833 spa->spa_meta_objset, obsolete_sm_object,
834 0, vd->vdev_asize, 0));
835 }
836
837 ASSERT(vd->vdev_obsolete_sm != NULL);
838 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
839 space_map_object(vd->vdev_obsolete_sm));
840
841 space_map_write(vd->vdev_obsolete_sm,
842 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
843 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
844 }
845
846 int
847 spa_condense_init(spa_t *spa)
848 {
849 int error = zap_lookup(spa->spa_meta_objset,
850 DMU_POOL_DIRECTORY_OBJECT,
851 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
852 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
853 &spa->spa_condensing_indirect_phys);
854 if (error == 0) {
855 if (spa_writeable(spa)) {
856 spa->spa_condensing_indirect =
857 spa_condensing_indirect_create(spa);
858 }
859 return (0);
860 } else if (error == ENOENT) {
861 return (0);
862 } else {
|