34 #include <sys/dsl_dir.h>
  35 #include <sys/dmu_traverse.h>
  36 #include <sys/dsl_scan.h>
  37 #include <sys/dmu_objset.h>
  38 #include <sys/zap.h>
  39 #include <sys/zfeature.h>
  40 #include <sys/zfs_ioctl.h>
  41 #include <sys/dsl_deleg.h>
  42 #include <sys/dmu_impl.h>
  43 
  44 typedef struct dmu_snapshots_destroy_arg {
  45         nvlist_t *dsda_snaps;
  46         nvlist_t *dsda_successful_snaps;
  47         boolean_t dsda_defer;
  48         nvlist_t *dsda_errlist;
  49 } dmu_snapshots_destroy_arg_t;
  50 
  51 int
  52 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
  53 {
  54         if (!dsl_dataset_is_snapshot(ds))
  55                 return (SET_ERROR(EINVAL));
  56 
  57         if (dsl_dataset_long_held(ds))
  58                 return (SET_ERROR(EBUSY));
  59 
  60         /*
  61          * Only allow deferred destroy on pools that support it.
  62          * NOTE: deferred destroy is only supported on snapshots.
  63          */
  64         if (defer) {
  65                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
  66                     SPA_VERSION_USERREFS)
  67                         return (SET_ERROR(ENOTSUP));
  68                 return (0);
  69         }
  70 
  71         /*
  72          * If this snapshot has an elevated user reference count,
  73          * we can't destroy it yet.
  74          */
 
 337                 /* Move blocks to be freed to pool's free list. */
 338                 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
 339                     &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
 340                     tx);
 341                 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
 342                     DD_USED_HEAD, used, comp, uncomp, tx);
 343 
 344                 /* Merge our deadlist into next's and free it. */
 345                 dsl_deadlist_merge(&ds_next->ds_deadlist,
 346                     dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
 347         }
 348         dsl_deadlist_close(&ds->ds_deadlist);
 349         dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
 350         dmu_buf_will_dirty(ds->ds_dbuf, tx);
 351         dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
 352 
 353         /* Collapse range in clone heads */
 354         dsl_dataset_remove_clones_key(ds,
 355             dsl_dataset_phys(ds)->ds_creation_txg, tx);
 356 
 357         if (dsl_dataset_is_snapshot(ds_next)) {
 358                 dsl_dataset_t *ds_nextnext;
 359 
 360                 /*
 361                  * Update next's unique to include blocks which
 362                  * were previously shared by only this snapshot
 363                  * and it.  Those blocks will be born after the
 364                  * prev snap and before this snap, and will have
 365                  * died after the next snap and before the one
 366                  * after that (ie. be on the snap after next's
 367                  * deadlist).
 368                  */
 369                 VERIFY0(dsl_dataset_hold_obj(dp,
 370                     dsl_dataset_phys(ds_next)->ds_next_snap_obj,
 371                     FTAG, &ds_nextnext));
 372                 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
 373                     dsl_dataset_phys(ds)->ds_prev_snap_txg,
 374                     dsl_dataset_phys(ds)->ds_creation_txg,
 375                     &used, &comp, &uncomp);
 376                 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
 377                 dsl_dataset_rele(ds_nextnext, FTAG);
 
 587         ka.ds = ds;
 588         ka.tx = tx;
 589         VERIFY0(traverse_dataset(ds,
 590             dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
 591             kill_blkptr, &ka));
 592         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
 593             dsl_dataset_phys(ds)->ds_unique_bytes == 0);
 594 }
 595 
 596 typedef struct dsl_destroy_head_arg {
 597         const char *ddha_name;
 598 } dsl_destroy_head_arg_t;
 599 
 600 int
 601 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
 602 {
 603         int error;
 604         uint64_t count;
 605         objset_t *mos;
 606 
 607         ASSERT(!dsl_dataset_is_snapshot(ds));
 608         if (dsl_dataset_is_snapshot(ds))
 609                 return (SET_ERROR(EINVAL));
 610 
 611         if (refcount_count(&ds->ds_longholds) != expected_holds)
 612                 return (SET_ERROR(EBUSY));
 613 
 614         mos = ds->ds_dir->dd_pool->dp_meta_objset;
 615 
 616         /*
 617          * Can't delete a head dataset if there are snapshots of it.
 618          * (Except if the only snapshots are from the branch we cloned
 619          * from.)
 620          */
 621         if (ds->ds_prev != NULL &&
 622             dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
 623                 return (SET_ERROR(EBUSY));
 624 
 625         /*
 626          * Can't delete if there are children of this fs.
 627          */
 628         error = zap_count(mos,
 
 | 
 
 
  34 #include <sys/dsl_dir.h>
  35 #include <sys/dmu_traverse.h>
  36 #include <sys/dsl_scan.h>
  37 #include <sys/dmu_objset.h>
  38 #include <sys/zap.h>
  39 #include <sys/zfeature.h>
  40 #include <sys/zfs_ioctl.h>
  41 #include <sys/dsl_deleg.h>
  42 #include <sys/dmu_impl.h>
  43 
  44 typedef struct dmu_snapshots_destroy_arg {
  45         nvlist_t *dsda_snaps;
  46         nvlist_t *dsda_successful_snaps;
  47         boolean_t dsda_defer;
  48         nvlist_t *dsda_errlist;
  49 } dmu_snapshots_destroy_arg_t;
  50 
  51 int
  52 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
  53 {
  54         if (!ds->ds_is_snapshot)
  55                 return (SET_ERROR(EINVAL));
  56 
  57         if (dsl_dataset_long_held(ds))
  58                 return (SET_ERROR(EBUSY));
  59 
  60         /*
  61          * Only allow deferred destroy on pools that support it.
  62          * NOTE: deferred destroy is only supported on snapshots.
  63          */
  64         if (defer) {
  65                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
  66                     SPA_VERSION_USERREFS)
  67                         return (SET_ERROR(ENOTSUP));
  68                 return (0);
  69         }
  70 
  71         /*
  72          * If this snapshot has an elevated user reference count,
  73          * we can't destroy it yet.
  74          */
 
 337                 /* Move blocks to be freed to pool's free list. */
 338                 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
 339                     &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
 340                     tx);
 341                 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
 342                     DD_USED_HEAD, used, comp, uncomp, tx);
 343 
 344                 /* Merge our deadlist into next's and free it. */
 345                 dsl_deadlist_merge(&ds_next->ds_deadlist,
 346                     dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
 347         }
 348         dsl_deadlist_close(&ds->ds_deadlist);
 349         dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
 350         dmu_buf_will_dirty(ds->ds_dbuf, tx);
 351         dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
 352 
 353         /* Collapse range in clone heads */
 354         dsl_dataset_remove_clones_key(ds,
 355             dsl_dataset_phys(ds)->ds_creation_txg, tx);
 356 
 357         if (ds_next->ds_is_snapshot) {
 358                 dsl_dataset_t *ds_nextnext;
 359 
 360                 /*
 361                  * Update next's unique to include blocks which
 362                  * were previously shared by only this snapshot
 363                  * and it.  Those blocks will be born after the
 364                  * prev snap and before this snap, and will have
 365                  * died after the next snap and before the one
 366                  * after that (ie. be on the snap after next's
 367                  * deadlist).
 368                  */
 369                 VERIFY0(dsl_dataset_hold_obj(dp,
 370                     dsl_dataset_phys(ds_next)->ds_next_snap_obj,
 371                     FTAG, &ds_nextnext));
 372                 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
 373                     dsl_dataset_phys(ds)->ds_prev_snap_txg,
 374                     dsl_dataset_phys(ds)->ds_creation_txg,
 375                     &used, &comp, &uncomp);
 376                 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
 377                 dsl_dataset_rele(ds_nextnext, FTAG);
 
 587         ka.ds = ds;
 588         ka.tx = tx;
 589         VERIFY0(traverse_dataset(ds,
 590             dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
 591             kill_blkptr, &ka));
 592         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
 593             dsl_dataset_phys(ds)->ds_unique_bytes == 0);
 594 }
 595 
 596 typedef struct dsl_destroy_head_arg {
 597         const char *ddha_name;
 598 } dsl_destroy_head_arg_t;
 599 
 600 int
 601 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
 602 {
 603         int error;
 604         uint64_t count;
 605         objset_t *mos;
 606 
 607         ASSERT(!ds->ds_is_snapshot);
 608         if (ds->ds_is_snapshot)
 609                 return (SET_ERROR(EINVAL));
 610 
 611         if (refcount_count(&ds->ds_longholds) != expected_holds)
 612                 return (SET_ERROR(EBUSY));
 613 
 614         mos = ds->ds_dir->dd_pool->dp_meta_objset;
 615 
 616         /*
 617          * Can't delete a head dataset if there are snapshots of it.
 618          * (Except if the only snapshots are from the branch we cloned
 619          * from.)
 620          */
 621         if (ds->ds_prev != NULL &&
 622             dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
 623                 return (SET_ERROR(EBUSY));
 624 
 625         /*
 626          * Can't delete if there are children of this fs.
 627          */
 628         error = zap_count(mos,
 
 |