1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
  24  * Copyright (c) 2013 Martin Matuska. All rights reserved.
  25  * Copyright (c) 2014 Joyent, Inc. All rights reserved.
  26  */
  27 
  28 #include <sys/dmu.h>
  29 #include <sys/dmu_objset.h>
  30 #include <sys/dmu_tx.h>
  31 #include <sys/dsl_dataset.h>
  32 #include <sys/dsl_dir.h>
  33 #include <sys/dsl_prop.h>
  34 #include <sys/dsl_synctask.h>
  35 #include <sys/dsl_deleg.h>
  36 #include <sys/dmu_impl.h>
  37 #include <sys/spa.h>
  38 #include <sys/metaslab.h>
  39 #include <sys/zap.h>
  40 #include <sys/zio.h>
  41 #include <sys/arc.h>
  42 #include <sys/sunddi.h>
  43 #include <sys/zfeature.h>
  44 #include <sys/policy.h>
  45 #include <sys/zfs_znode.h>
  46 #include "zfs_namecheck.h"
  47 #include "zfs_prop.h"
  48 
  49 /*
  50  * Filesystem and Snapshot Limits
  51  * ------------------------------
  52  *
  53  * These limits are used to restrict the number of filesystems and/or snapshots
  54  * that can be created at a given level in the tree or below. A typical
  55  * use-case is with a delegated dataset where the administrator wants to ensure
  56  * that a user within the zone is not creating too many additional filesystems
  57  * or snapshots, even though they're not exceeding their space quota.
  58  *
  59  * The filesystem and snapshot counts are stored as extensible properties. This
  60  * capability is controlled by a feature flag and must be enabled to be used.
  61  * Once enabled, the feature is not active until the first limit is set. At
  62  * that point, future operations to create/destroy filesystems or snapshots
  63  * will validate and update the counts.
  64  *
  65  * Because the count properties will not exist before the feature is active,
  66  * the counts are updated when a limit is first set on an uninitialized
  67  * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
  68  * all of the nested filesystems/snapshots. Thus, a new leaf node has a
  69  * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
  70  * snapshot count properties on a node indicate uninitialized counts on that
  71  * node.) When first setting a limit on an uninitialized node, the code starts
  72  * at the filesystem with the new limit and descends into all sub-filesystems
  73  * to add the count properties.
  74  *
  75  * In practice this is lightweight since a limit is typically set when the
  76  * filesystem is created and thus has no children. Once valid, changing the
  77  * limit value won't require a re-traversal since the counts are already valid.
  78  * When recursively fixing the counts, if a node with a limit is encountered
  79  * during the descent, the counts are known to be valid and there is no need to
  80  * descend into that filesystem's children. The counts on filesystems above the
  81  * one with the new limit will still be uninitialized, unless a limit is
  82  * eventually set on one of those filesystems. The counts are always recursively
  83  * updated when a limit is set on a dataset, unless there is already a limit.
  84  * When a new limit value is set on a filesystem with an existing limit, it is
  85  * possible for the new limit to be less than the current count at that level
  86  * since a user who can change the limit is also allowed to exceed the limit.
  87  *
  88  * Once the feature is active, then whenever a filesystem or snapshot is
  89  * created, the code recurses up the tree, validating the new count against the
  90  * limit at each initialized level. In practice, most levels will not have a
  91  * limit set. If there is a limit at any initialized level up the tree, the
  92  * check must pass or the creation will fail. Likewise, when a filesystem or
  93  * snapshot is destroyed, the counts are recursively adjusted all the way up
  94  * the initizized nodes in the tree. Renaming a filesystem into different point
  95  * in the tree will first validate, then update the counts on each branch up to
  96  * the common ancestor. A receive will also validate the counts and then update
  97  * them.
  98  *
  99  * An exception to the above behavior is that the limit is not enforced if the
 100  * user has permission to modify the limit. This is primarily so that
 101  * recursive snapshots in the global zone always work. We want to prevent a
 102  * denial-of-service in which a lower level delegated dataset could max out its
 103  * limit and thus block recursive snapshots from being taken in the global zone.
 104  * Because of this, it is possible for the snapshot count to be over the limit
 105  * and snapshots taken in the global zone could cause a lower level dataset to
 106  * hit or exceed its limit. The administrator taking the global zone recursive
 107  * snapshot should be aware of this side-effect and behave accordingly.
 108  * For consistency, the filesystem limit is also not enforced if the user can
 109  * modify the limit.
 110  *
 111  * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
 112  * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
 113  * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
 114  * dsl_dir_init_fs_ss_count().
 115  *
 116  * There is a special case when we receive a filesystem that already exists. In
 117  * this case a temporary clone name of %X is created (see dmu_recv_begin). We
 118  * never update the filesystem counts for temporary clones.
 119  *
 120  * Likewise, we do not update the snapshot counts for temporary snapshots,
 121  * such as those created by zfs diff.
 122  */
 123 
 124 extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
 125 
 126 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
 127 
 128 /* ARGSUSED */
 129 static void
 130 dsl_dir_evict(dmu_buf_t *db, void *arg)
 131 {
 132         dsl_dir_t *dd = arg;
 133         dsl_pool_t *dp = dd->dd_pool;
 134         int t;
 135 
 136         for (t = 0; t < TXG_SIZE; t++) {
 137                 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
 138                 ASSERT(dd->dd_tempreserved[t] == 0);
 139                 ASSERT(dd->dd_space_towrite[t] == 0);
 140         }
 141 
 142         if (dd->dd_parent)
 143                 dsl_dir_rele(dd->dd_parent, dd);
 144 
 145         spa_close(dd->dd_pool->dp_spa, dd);
 146 
 147         /*
 148          * The props callback list should have been cleaned up by
 149          * objset_evict().
 150          */
 151         list_destroy(&dd->dd_prop_cbs);
 152         mutex_destroy(&dd->dd_lock);
 153         kmem_free(dd, sizeof (dsl_dir_t));
 154 }
 155 
 156 int
 157 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
 158     const char *tail, void *tag, dsl_dir_t **ddp)
 159 {
 160         dmu_buf_t *dbuf;
 161         dsl_dir_t *dd;
 162         int err;
 163 
 164         ASSERT(dsl_pool_config_held(dp));
 165 
 166         err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
 167         if (err != 0)
 168                 return (err);
 169         dd = dmu_buf_get_user(dbuf);
 170 #ifdef ZFS_DEBUG
 171         {
 172                 dmu_object_info_t doi;
 173                 dmu_object_info_from_db(dbuf, &doi);
 174                 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
 175                 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
 176         }
 177 #endif
 178         if (dd == NULL) {
 179                 dsl_dir_t *winner;
 180 
 181                 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
 182                 dd->dd_object = ddobj;
 183                 dd->dd_dbuf = dbuf;
 184                 dd->dd_pool = dp;
 185                 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
 186 
 187                 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
 188                     offsetof(dsl_prop_cb_record_t, cbr_node));
 189 
 190                 dsl_dir_snap_cmtime_update(dd);
 191 
 192                 if (dsl_dir_phys(dd)->dd_parent_obj) {
 193                         err = dsl_dir_hold_obj(dp,
 194                             dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
 195                             &dd->dd_parent);
 196                         if (err != 0)
 197                                 goto errout;
 198                         if (tail) {
 199 #ifdef ZFS_DEBUG
 200                                 uint64_t foundobj;
 201 
 202                                 err = zap_lookup(dp->dp_meta_objset,
 203                                     dsl_dir_phys(dd->dd_parent)->
 204                                     dd_child_dir_zapobj, tail,
 205                                     sizeof (foundobj), 1, &foundobj);
 206                                 ASSERT(err || foundobj == ddobj);
 207 #endif
 208                                 (void) strcpy(dd->dd_myname, tail);
 209                         } else {
 210                                 err = zap_value_search(dp->dp_meta_objset,
 211                                     dsl_dir_phys(dd->dd_parent)->
 212                                     dd_child_dir_zapobj,
 213                                     ddobj, 0, dd->dd_myname);
 214                         }
 215                         if (err != 0)
 216                                 goto errout;
 217                 } else {
 218                         (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
 219                 }
 220 
 221                 if (dsl_dir_is_clone(dd)) {
 222                         dmu_buf_t *origin_bonus;
 223                         dsl_dataset_phys_t *origin_phys;
 224 
 225                         /*
 226                          * We can't open the origin dataset, because
 227                          * that would require opening this dsl_dir.
 228                          * Just look at its phys directly instead.
 229                          */
 230                         err = dmu_bonus_hold(dp->dp_meta_objset,
 231                             dsl_dir_phys(dd)->dd_origin_obj, FTAG,
 232                             &origin_bonus);
 233                         if (err != 0)
 234                                 goto errout;
 235                         origin_phys = origin_bonus->db_data;
 236                         dd->dd_origin_txg =
 237                             origin_phys->ds_creation_txg;
 238                         dmu_buf_rele(origin_bonus, FTAG);
 239                 }
 240 
 241                 winner = dmu_buf_set_user_ie(dbuf, dd, dsl_dir_evict);
 242                 if (winner) {
 243                         if (dd->dd_parent)
 244                                 dsl_dir_rele(dd->dd_parent, dd);
 245                         mutex_destroy(&dd->dd_lock);
 246                         kmem_free(dd, sizeof (dsl_dir_t));
 247                         dd = winner;
 248                 } else {
 249                         spa_open_ref(dp->dp_spa, dd);
 250                 }
 251         }
 252 
 253         /*
 254          * The dsl_dir_t has both open-to-close and instantiate-to-evict
 255          * holds on the spa.  We need the open-to-close holds because
 256          * otherwise the spa_refcnt wouldn't change when we open a
 257          * dir which the spa also has open, so we could incorrectly
 258          * think it was OK to unload/export/destroy the pool.  We need
 259          * the instantiate-to-evict hold because the dsl_dir_t has a
 260          * pointer to the dd_pool, which has a pointer to the spa_t.
 261          */
 262         spa_open_ref(dp->dp_spa, tag);
 263         ASSERT3P(dd->dd_pool, ==, dp);
 264         ASSERT3U(dd->dd_object, ==, ddobj);
 265         ASSERT3P(dd->dd_dbuf, ==, dbuf);
 266         *ddp = dd;
 267         return (0);
 268 
 269 errout:
 270         if (dd->dd_parent)
 271                 dsl_dir_rele(dd->dd_parent, dd);
 272         mutex_destroy(&dd->dd_lock);
 273         kmem_free(dd, sizeof (dsl_dir_t));
 274         dmu_buf_rele(dbuf, tag);
 275         return (err);
 276 }
 277 
 278 void
 279 dsl_dir_rele(dsl_dir_t *dd, void *tag)
 280 {
 281         dprintf_dd(dd, "%s\n", "");
 282         spa_close(dd->dd_pool->dp_spa, tag);
 283         dmu_buf_rele(dd->dd_dbuf, tag);
 284 }
 285 
 286 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
 287 void
 288 dsl_dir_name(dsl_dir_t *dd, char *buf)
 289 {
 290         if (dd->dd_parent) {
 291                 dsl_dir_name(dd->dd_parent, buf);
 292                 (void) strcat(buf, "/");
 293         } else {
 294                 buf[0] = '\0';
 295         }
 296         if (!MUTEX_HELD(&dd->dd_lock)) {
 297                 /*
 298                  * recursive mutex so that we can use
 299                  * dprintf_dd() with dd_lock held
 300                  */
 301                 mutex_enter(&dd->dd_lock);
 302                 (void) strcat(buf, dd->dd_myname);
 303                 mutex_exit(&dd->dd_lock);
 304         } else {
 305                 (void) strcat(buf, dd->dd_myname);
 306         }
 307 }
 308 
 309 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
 310 int
 311 dsl_dir_namelen(dsl_dir_t *dd)
 312 {
 313         int result = 0;
 314 
 315         if (dd->dd_parent) {
 316                 /* parent's name + 1 for the "/" */
 317                 result = dsl_dir_namelen(dd->dd_parent) + 1;
 318         }
 319 
 320         if (!MUTEX_HELD(&dd->dd_lock)) {
 321                 /* see dsl_dir_name */
 322                 mutex_enter(&dd->dd_lock);
 323                 result += strlen(dd->dd_myname);
 324                 mutex_exit(&dd->dd_lock);
 325         } else {
 326                 result += strlen(dd->dd_myname);
 327         }
 328 
 329         return (result);
 330 }
 331 
 332 static int
 333 getcomponent(const char *path, char *component, const char **nextp)
 334 {
 335         char *p;
 336 
 337         if ((path == NULL) || (path[0] == '\0'))
 338                 return (SET_ERROR(ENOENT));
 339         /* This would be a good place to reserve some namespace... */
 340         p = strpbrk(path, "/@");
 341         if (p && (p[1] == '/' || p[1] == '@')) {
 342                 /* two separators in a row */
 343                 return (SET_ERROR(EINVAL));
 344         }
 345         if (p == NULL || p == path) {
 346                 /*
 347                  * if the first thing is an @ or /, it had better be an
 348                  * @ and it had better not have any more ats or slashes,
 349                  * and it had better have something after the @.
 350                  */
 351                 if (p != NULL &&
 352                     (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
 353                         return (SET_ERROR(EINVAL));
 354                 if (strlen(path) >= MAXNAMELEN)
 355                         return (SET_ERROR(ENAMETOOLONG));
 356                 (void) strcpy(component, path);
 357                 p = NULL;
 358         } else if (p[0] == '/') {
 359                 if (p - path >= MAXNAMELEN)
 360                         return (SET_ERROR(ENAMETOOLONG));
 361                 (void) strncpy(component, path, p - path);
 362                 component[p - path] = '\0';
 363                 p++;
 364         } else if (p[0] == '@') {
 365                 /*
 366                  * if the next separator is an @, there better not be
 367                  * any more slashes.
 368                  */
 369                 if (strchr(path, '/'))
 370                         return (SET_ERROR(EINVAL));
 371                 if (p - path >= MAXNAMELEN)
 372                         return (SET_ERROR(ENAMETOOLONG));
 373                 (void) strncpy(component, path, p - path);
 374                 component[p - path] = '\0';
 375         } else {
 376                 panic("invalid p=%p", (void *)p);
 377         }
 378         *nextp = p;
 379         return (0);
 380 }
 381 
 382 /*
 383  * Return the dsl_dir_t, and possibly the last component which couldn't
 384  * be found in *tail.  The name must be in the specified dsl_pool_t.  This
 385  * thread must hold the dp_config_rwlock for the pool.  Returns NULL if the
 386  * path is bogus, or if tail==NULL and we couldn't parse the whole name.
 387  * (*tail)[0] == '@' means that the last component is a snapshot.
 388  */
 389 int
 390 dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
 391     dsl_dir_t **ddp, const char **tailp)
 392 {
 393         char buf[MAXNAMELEN];
 394         const char *spaname, *next, *nextnext = NULL;
 395         int err;
 396         dsl_dir_t *dd;
 397         uint64_t ddobj;
 398 
 399         err = getcomponent(name, buf, &next);
 400         if (err != 0)
 401                 return (err);
 402 
 403         /* Make sure the name is in the specified pool. */
 404         spaname = spa_name(dp->dp_spa);
 405         if (strcmp(buf, spaname) != 0)
 406                 return (SET_ERROR(EINVAL));
 407 
 408         ASSERT(dsl_pool_config_held(dp));
 409 
 410         err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
 411         if (err != 0) {
 412                 return (err);
 413         }
 414 
 415         while (next != NULL) {
 416                 dsl_dir_t *child_ds;
 417                 err = getcomponent(next, buf, &nextnext);
 418                 if (err != 0)
 419                         break;
 420                 ASSERT(next[0] != '\0');
 421                 if (next[0] == '@')
 422                         break;
 423                 dprintf("looking up %s in obj%lld\n",
 424                     buf, dsl_dir_phys(dd)->dd_child_dir_zapobj);
 425 
 426                 err = zap_lookup(dp->dp_meta_objset,
 427                     dsl_dir_phys(dd)->dd_child_dir_zapobj,
 428                     buf, sizeof (ddobj), 1, &ddobj);
 429                 if (err != 0) {
 430                         if (err == ENOENT)
 431                                 err = 0;
 432                         break;
 433                 }
 434 
 435                 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_ds);
 436                 if (err != 0)
 437                         break;
 438                 dsl_dir_rele(dd, tag);
 439                 dd = child_ds;
 440                 next = nextnext;
 441         }
 442 
 443         if (err != 0) {
 444                 dsl_dir_rele(dd, tag);
 445                 return (err);
 446         }
 447 
 448         /*
 449          * It's an error if there's more than one component left, or
 450          * tailp==NULL and there's any component left.
 451          */
 452         if (next != NULL &&
 453             (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
 454                 /* bad path name */
 455                 dsl_dir_rele(dd, tag);
 456                 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
 457                 err = SET_ERROR(ENOENT);
 458         }
 459         if (tailp != NULL)
 460                 *tailp = next;
 461         *ddp = dd;
 462         return (err);
 463 }
 464 
 465 /*
 466  * If the counts are already initialized for this filesystem and its
 467  * descendants then do nothing, otherwise initialize the counts.
 468  *
 469  * The counts on this filesystem, and those below, may be uninitialized due to
 470  * either the use of a pre-existing pool which did not support the
 471  * filesystem/snapshot limit feature, or one in which the feature had not yet
 472  * been enabled.
 473  *
 474  * Recursively descend the filesystem tree and update the filesystem/snapshot
 475  * counts on each filesystem below, then update the cumulative count on the
 476  * current filesystem. If the filesystem already has a count set on it,
 477  * then we know that its counts, and the counts on the filesystems below it,
 478  * are already correct, so we don't have to update this filesystem.
 479  */
 480 static void
 481 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
 482 {
 483         uint64_t my_fs_cnt = 0;
 484         uint64_t my_ss_cnt = 0;
 485         dsl_pool_t *dp = dd->dd_pool;
 486         objset_t *os = dp->dp_meta_objset;
 487         zap_cursor_t *zc;
 488         zap_attribute_t *za;
 489         dsl_dataset_t *ds;
 490 
 491         ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
 492         ASSERT(dsl_pool_config_held(dp));
 493         ASSERT(dmu_tx_is_syncing(tx));
 494 
 495         dsl_dir_zapify(dd, tx);
 496 
 497         /*
 498          * If the filesystem count has already been initialized then we
 499          * don't need to recurse down any further.
 500          */
 501         if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
 502                 return;
 503 
 504         zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
 505         za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
 506 
 507         /* Iterate my child dirs */
 508         for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
 509             zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
 510                 dsl_dir_t *chld_dd;
 511                 uint64_t count;
 512 
 513                 VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
 514                     &chld_dd));
 515 
 516                 /*
 517                  * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and
 518                  * temporary datasets.
 519                  */
 520                 if (chld_dd->dd_myname[0] == '$' ||
 521                     chld_dd->dd_myname[0] == '%') {
 522                         dsl_dir_rele(chld_dd, FTAG);
 523                         continue;
 524                 }
 525 
 526                 my_fs_cnt++;    /* count this child */
 527 
 528                 dsl_dir_init_fs_ss_count(chld_dd, tx);
 529 
 530                 VERIFY0(zap_lookup(os, chld_dd->dd_object,
 531                     DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
 532                 my_fs_cnt += count;
 533                 VERIFY0(zap_lookup(os, chld_dd->dd_object,
 534                     DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
 535                 my_ss_cnt += count;
 536 
 537                 dsl_dir_rele(chld_dd, FTAG);
 538         }
 539         zap_cursor_fini(zc);
 540         /* Count my snapshots (we counted children's snapshots above) */
 541         VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
 542             dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
 543 
 544         for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
 545             zap_cursor_retrieve(zc, za) == 0;
 546             zap_cursor_advance(zc)) {
 547                 /* Don't count temporary snapshots */
 548                 if (za->za_name[0] != '%')
 549                         my_ss_cnt++;
 550         }
 551         zap_cursor_fini(zc);
 552 
 553         dsl_dataset_rele(ds, FTAG);
 554 
 555         kmem_free(zc, sizeof (zap_cursor_t));
 556         kmem_free(za, sizeof (zap_attribute_t));
 557 
 558         /* we're in a sync task, update counts */
 559         dmu_buf_will_dirty(dd->dd_dbuf, tx);
 560         VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
 561             sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
 562         VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
 563             sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
 564 }
 565 
 566 static int
 567 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
 568 {
 569         char *ddname = (char *)arg;
 570         dsl_pool_t *dp = dmu_tx_pool(tx);
 571         dsl_dataset_t *ds;
 572         dsl_dir_t *dd;
 573         int error;
 574 
 575         error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
 576         if (error != 0)
 577                 return (error);
 578 
 579         if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
 580                 dsl_dataset_rele(ds, FTAG);
 581                 return (SET_ERROR(ENOTSUP));
 582         }
 583 
 584         dd = ds->ds_dir;
 585         if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
 586             dsl_dir_is_zapified(dd) &&
 587             zap_contains(dp->dp_meta_objset, dd->dd_object,
 588             DD_FIELD_FILESYSTEM_COUNT) == 0) {
 589                 dsl_dataset_rele(ds, FTAG);
 590                 return (SET_ERROR(EALREADY));
 591         }
 592 
 593         dsl_dataset_rele(ds, FTAG);
 594         return (0);
 595 }
 596 
 597 static void
 598 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
 599 {
 600         char *ddname = (char *)arg;
 601         dsl_pool_t *dp = dmu_tx_pool(tx);
 602         dsl_dataset_t *ds;
 603         spa_t *spa;
 604 
 605         VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
 606 
 607         spa = dsl_dataset_get_spa(ds);
 608 
 609         if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
 610                 /*
 611                  * Since the feature was not active and we're now setting a
 612                  * limit, increment the feature-active counter so that the
 613                  * feature becomes active for the first time.
 614                  *
 615                  * We are already in a sync task so we can update the MOS.
 616                  */
 617                 spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
 618         }
 619 
 620         /*
 621          * Since we are now setting a non-UINT64_MAX limit on the filesystem,
 622          * we need to ensure the counts are correct. Descend down the tree from
 623          * this point and update all of the counts to be accurate.
 624          */
 625         dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
 626 
 627         dsl_dataset_rele(ds, FTAG);
 628 }
 629 
 630 /*
 631  * Make sure the feature is enabled and activate it if necessary.
 632  * Since we're setting a limit, ensure the on-disk counts are valid.
 633  * This is only called by the ioctl path when setting a limit value.
 634  *
 635  * We do not need to validate the new limit, since users who can change the
 636  * limit are also allowed to exceed the limit.
 637  */
 638 int
 639 dsl_dir_activate_fs_ss_limit(const char *ddname)
 640 {
 641         int error;
 642 
 643         error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
 644             dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
 645             ZFS_SPACE_CHECK_RESERVED);
 646 
 647         if (error == EALREADY)
 648                 error = 0;
 649 
 650         return (error);
 651 }
 652 
 653 /*
 654  * Used to determine if the filesystem_limit or snapshot_limit should be
 655  * enforced. We allow the limit to be exceeded if the user has permission to
 656  * write the property value. We pass in the creds that we got in the open
 657  * context since we will always be the GZ root in syncing context. We also have
 658  * to handle the case where we are allowed to change the limit on the current
 659  * dataset, but there may be another limit in the tree above.
 660  *
 661  * We can never modify these two properties within a non-global zone. In
 662  * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
 663  * can't use that function since we are already holding the dp_config_rwlock.
 664  * In addition, we already have the dd and dealing with snapshots is simplified
 665  * in this code.
 666  */
 667 
 668 typedef enum {
 669         ENFORCE_ALWAYS,
 670         ENFORCE_NEVER,
 671         ENFORCE_ABOVE
 672 } enforce_res_t;
 673 
 674 static enforce_res_t
 675 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
 676 {
 677         enforce_res_t enforce = ENFORCE_ALWAYS;
 678         uint64_t obj;
 679         dsl_dataset_t *ds;
 680         uint64_t zoned;
 681 
 682         ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
 683             prop == ZFS_PROP_SNAPSHOT_LIMIT);
 684 
 685 #ifdef _KERNEL
 686         if (crgetzoneid(cr) != GLOBAL_ZONEID)
 687                 return (ENFORCE_ALWAYS);
 688 
 689         if (secpolicy_zfs(cr) == 0)
 690                 return (ENFORCE_NEVER);
 691 #endif
 692 
 693         if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
 694                 return (ENFORCE_ALWAYS);
 695 
 696         ASSERT(dsl_pool_config_held(dd->dd_pool));
 697 
 698         if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
 699                 return (ENFORCE_ALWAYS);
 700 
 701         if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) {
 702                 /* Only root can access zoned fs's from the GZ */
 703                 enforce = ENFORCE_ALWAYS;
 704         } else {
 705                 if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
 706                         enforce = ENFORCE_ABOVE;
 707         }
 708 
 709         dsl_dataset_rele(ds, FTAG);
 710         return (enforce);
 711 }
 712 
 713 /*
 714  * Check if adding additional child filesystem(s) would exceed any filesystem
 715  * limits or adding additional snapshot(s) would exceed any snapshot limits.
 716  * The prop argument indicates which limit to check.
 717  *
 718  * Note that all filesystem limits up to the root (or the highest
 719  * initialized) filesystem or the given ancestor must be satisfied.
 720  */
 721 int
 722 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
 723     dsl_dir_t *ancestor, cred_t *cr)
 724 {
 725         objset_t *os = dd->dd_pool->dp_meta_objset;
 726         uint64_t limit, count;
 727         char *count_prop;
 728         enforce_res_t enforce;
 729         int err = 0;
 730 
 731         ASSERT(dsl_pool_config_held(dd->dd_pool));
 732         ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
 733             prop == ZFS_PROP_SNAPSHOT_LIMIT);
 734 
 735         /*
 736          * If we're allowed to change the limit, don't enforce the limit
 737          * e.g. this can happen if a snapshot is taken by an administrative
 738          * user in the global zone (i.e. a recursive snapshot by root).
 739          * However, we must handle the case of delegated permissions where we
 740          * are allowed to change the limit on the current dataset, but there
 741          * is another limit in the tree above.
 742          */
 743         enforce = dsl_enforce_ds_ss_limits(dd, prop, cr);
 744         if (enforce == ENFORCE_NEVER)
 745                 return (0);
 746 
 747         /*
 748          * e.g. if renaming a dataset with no snapshots, count adjustment
 749          * is 0.
 750          */
 751         if (delta == 0)
 752                 return (0);
 753 
 754         if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
 755                 /*
 756                  * We don't enforce the limit for temporary snapshots. This is
 757                  * indicated by a NULL cred_t argument.
 758                  */
 759                 if (cr == NULL)
 760                         return (0);
 761 
 762                 count_prop = DD_FIELD_SNAPSHOT_COUNT;
 763         } else {
 764                 count_prop = DD_FIELD_FILESYSTEM_COUNT;
 765         }
 766 
 767         /*
 768          * If an ancestor has been provided, stop checking the limit once we
 769          * hit that dir. We need this during rename so that we don't overcount
 770          * the check once we recurse up to the common ancestor.
 771          */
 772         if (ancestor == dd)
 773                 return (0);
 774 
 775         /*
 776          * If we hit an uninitialized node while recursing up the tree, we can
 777          * stop since we know there is no limit here (or above). The counts are
 778          * not valid on this node and we know we won't touch this node's counts.
 779          */
 780         if (!dsl_dir_is_zapified(dd) || zap_lookup(os, dd->dd_object,
 781             count_prop, sizeof (count), 1, &count) == ENOENT)
 782                 return (0);
 783 
 784         err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
 785             B_FALSE);
 786         if (err != 0)
 787                 return (err);
 788 
 789         /* Is there a limit which we've hit? */
 790         if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
 791                 return (SET_ERROR(EDQUOT));
 792 
 793         if (dd->dd_parent != NULL)
 794                 err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
 795                     ancestor, cr);
 796 
 797         return (err);
 798 }
 799 
 800 /*
 801  * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
 802  * parents. When a new filesystem/snapshot is created, increment the count on
 803  * all parents, and when a filesystem/snapshot is destroyed, decrement the
 804  * count.
 805  */
 806 void
 807 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
 808     dmu_tx_t *tx)
 809 {
 810         int err;
 811         objset_t *os = dd->dd_pool->dp_meta_objset;
 812         uint64_t count;
 813 
 814         ASSERT(dsl_pool_config_held(dd->dd_pool));
 815         ASSERT(dmu_tx_is_syncing(tx));
 816         ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
 817             strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
 818 
 819         /*
 820          * When we receive an incremental stream into a filesystem that already
 821          * exists, a temporary clone is created.  We don't count this temporary
 822          * clone, whose name begins with a '%'. We also ignore hidden ($FREE,
 823          * $MOS & $ORIGIN) objsets.
 824          */
 825         if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') &&
 826             strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0)
 827                 return;
 828 
 829         /*
 830          * e.g. if renaming a dataset with no snapshots, count adjustment is 0
 831          */
 832         if (delta == 0)
 833                 return;
 834 
 835         /*
 836          * If we hit an uninitialized node while recursing up the tree, we can
 837          * stop since we know the counts are not valid on this node and we
 838          * know we shouldn't touch this node's counts. An uninitialized count
 839          * on the node indicates that either the feature has not yet been
 840          * activated or there are no limits on this part of the tree.
 841          */
 842         if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
 843             prop, sizeof (count), 1, &count)) == ENOENT)
 844                 return;
 845         VERIFY0(err);
 846 
 847         count += delta;
 848         /* Use a signed verify to make sure we're not neg. */
 849         VERIFY3S(count, >=, 0);
 850 
 851         VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
 852             tx));
 853 
 854         /* Roll up this additional count into our ancestors */
 855         if (dd->dd_parent != NULL)
 856                 dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
 857 }
 858 
 859 uint64_t
 860 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
 861     dmu_tx_t *tx)
 862 {
 863         objset_t *mos = dp->dp_meta_objset;
 864         uint64_t ddobj;
 865         dsl_dir_phys_t *ddphys;
 866         dmu_buf_t *dbuf;
 867 
 868         ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
 869             DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
 870         if (pds) {
 871                 VERIFY(0 == zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
 872                     name, sizeof (uint64_t), 1, &ddobj, tx));
 873         } else {
 874                 /* it's the root dir */
 875                 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
 876                     DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
 877         }
 878         VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
 879         dmu_buf_will_dirty(dbuf, tx);
 880         ddphys = dbuf->db_data;
 881 
 882         ddphys->dd_creation_time = gethrestime_sec();
 883         if (pds) {
 884                 ddphys->dd_parent_obj = pds->dd_object;
 885 
 886                 /* update the filesystem counts */
 887                 dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
 888         }
 889         ddphys->dd_props_zapobj = zap_create(mos,
 890             DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
 891         ddphys->dd_child_dir_zapobj = zap_create(mos,
 892             DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
 893         if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
 894                 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
 895         dmu_buf_rele(dbuf, FTAG);
 896 
 897         return (ddobj);
 898 }
 899 
 900 boolean_t
 901 dsl_dir_is_clone(dsl_dir_t *dd)
 902 {
 903         return (dsl_dir_phys(dd)->dd_origin_obj &&
 904             (dd->dd_pool->dp_origin_snap == NULL ||
 905             dsl_dir_phys(dd)->dd_origin_obj !=
 906             dd->dd_pool->dp_origin_snap->ds_object));
 907 }
 908 
 909 void
 910 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
 911 {
 912         mutex_enter(&dd->dd_lock);
 913         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
 914             dsl_dir_phys(dd)->dd_used_bytes);
 915         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
 916             dsl_dir_phys(dd)->dd_quota);
 917         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
 918             dsl_dir_phys(dd)->dd_reserved);
 919         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
 920             dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
 921             (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
 922             dsl_dir_phys(dd)->dd_compressed_bytes));
 923         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
 924             dsl_dir_phys(dd)->dd_uncompressed_bytes);
 925         if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
 926                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
 927                     dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
 928                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
 929                     dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
 930                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
 931                     dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
 932                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
 933                     dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
 934                     dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
 935         }
 936         mutex_exit(&dd->dd_lock);
 937 
 938         if (dsl_dir_is_zapified(dd)) {
 939                 uint64_t count;
 940                 objset_t *os = dd->dd_pool->dp_meta_objset;
 941 
 942                 if (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
 943                     sizeof (count), 1, &count) == 0) {
 944                         dsl_prop_nvlist_add_uint64(nv,
 945                             ZFS_PROP_FILESYSTEM_COUNT, count);
 946                 }
 947                 if (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
 948                     sizeof (count), 1, &count) == 0) {
 949                         dsl_prop_nvlist_add_uint64(nv,
 950                             ZFS_PROP_SNAPSHOT_COUNT, count);
 951                 }
 952         }
 953 
 954         if (dsl_dir_is_clone(dd)) {
 955                 dsl_dataset_t *ds;
 956                 char buf[MAXNAMELEN];
 957 
 958                 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
 959                     dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
 960                 dsl_dataset_name(ds, buf);
 961                 dsl_dataset_rele(ds, FTAG);
 962                 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
 963         }
 964 }
 965 
 966 void
 967 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
 968 {
 969         dsl_pool_t *dp = dd->dd_pool;
 970 
 971         ASSERT(dsl_dir_phys(dd));
 972 
 973         if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
 974                 /* up the hold count until we can be written out */
 975                 dmu_buf_add_ref(dd->dd_dbuf, dd);
 976         }
 977 }
 978 
 979 static int64_t
 980 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
 981 {
 982         uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
 983         uint64_t new_accounted =
 984             MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
 985         return (new_accounted - old_accounted);
 986 }
 987 
 988 void
 989 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
 990 {
 991         ASSERT(dmu_tx_is_syncing(tx));
 992 
 993         mutex_enter(&dd->dd_lock);
 994         ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
 995         dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
 996             dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
 997         dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
 998         mutex_exit(&dd->dd_lock);
 999 
1000         /* release the hold from dsl_dir_dirty */
1001         dmu_buf_rele(dd->dd_dbuf, dd);
1002 }
1003 
1004 static uint64_t
1005 dsl_dir_space_towrite(dsl_dir_t *dd)
1006 {
1007         uint64_t space = 0;
1008         int i;
1009 
1010         ASSERT(MUTEX_HELD(&dd->dd_lock));
1011 
1012         for (i = 0; i < TXG_SIZE; i++) {
1013                 space += dd->dd_space_towrite[i&TXG_MASK];
1014                 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
1015         }
1016         return (space);
1017 }
1018 
1019 /*
1020  * How much space would dd have available if ancestor had delta applied
1021  * to it?  If ondiskonly is set, we're only interested in what's
1022  * on-disk, not estimated pending changes.
1023  */
1024 uint64_t
1025 dsl_dir_space_available(dsl_dir_t *dd,
1026     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1027 {
1028         uint64_t parentspace, myspace, quota, used;
1029 
1030         /*
1031          * If there are no restrictions otherwise, assume we have
1032          * unlimited space available.
1033          */
1034         quota = UINT64_MAX;
1035         parentspace = UINT64_MAX;
1036 
1037         if (dd->dd_parent != NULL) {
1038                 parentspace = dsl_dir_space_available(dd->dd_parent,
1039                     ancestor, delta, ondiskonly);
1040         }
1041 
1042         mutex_enter(&dd->dd_lock);
1043         if (dsl_dir_phys(dd)->dd_quota != 0)
1044                 quota = dsl_dir_phys(dd)->dd_quota;
1045         used = dsl_dir_phys(dd)->dd_used_bytes;
1046         if (!ondiskonly)
1047                 used += dsl_dir_space_towrite(dd);
1048 
1049         if (dd->dd_parent == NULL) {
1050                 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
1051                 quota = MIN(quota, poolsize);
1052         }
1053 
1054         if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1055                 /*
1056                  * We have some space reserved, in addition to what our
1057                  * parent gave us.
1058                  */
1059                 parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1060         }
1061 
1062         if (dd == ancestor) {
1063                 ASSERT(delta <= 0);
1064                 ASSERT(used >= -delta);
1065                 used += delta;
1066                 if (parentspace != UINT64_MAX)
1067                         parentspace -= delta;
1068         }
1069 
1070         if (used > quota) {
1071                 /* over quota */
1072                 myspace = 0;
1073         } else {
1074                 /*
1075                  * the lesser of the space provided by our parent and
1076                  * the space left in our quota
1077                  */
1078                 myspace = MIN(parentspace, quota - used);
1079         }
1080 
1081         mutex_exit(&dd->dd_lock);
1082 
1083         return (myspace);
1084 }
1085 
1086 struct tempreserve {
1087         list_node_t tr_node;
1088         dsl_dir_t *tr_ds;
1089         uint64_t tr_size;
1090 };
1091 
1092 static int
1093 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1094     boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
1095     dmu_tx_t *tx, boolean_t first)
1096 {
1097         uint64_t txg = tx->tx_txg;
1098         uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
1099         uint64_t deferred = 0;
1100         struct tempreserve *tr;
1101         int retval = EDQUOT;
1102         int txgidx = txg & TXG_MASK;
1103         int i;
1104         uint64_t ref_rsrv = 0;
1105 
1106         ASSERT3U(txg, !=, 0);
1107         ASSERT3S(asize, >, 0);
1108 
1109         mutex_enter(&dd->dd_lock);
1110 
1111         /*
1112          * Check against the dsl_dir's quota.  We don't add in the delta
1113          * when checking for over-quota because they get one free hit.
1114          */
1115         est_inflight = dsl_dir_space_towrite(dd);
1116         for (i = 0; i < TXG_SIZE; i++)
1117                 est_inflight += dd->dd_tempreserved[i];
1118         used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1119 
1120         /*
1121          * On the first iteration, fetch the dataset's used-on-disk and
1122          * refreservation values. Also, if checkrefquota is set, test if
1123          * allocating this space would exceed the dataset's refquota.
1124          */
1125         if (first && tx->tx_objset) {
1126                 int error;
1127                 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1128 
1129                 error = dsl_dataset_check_quota(ds, checkrefquota,
1130                     asize, est_inflight, &used_on_disk, &ref_rsrv);
1131                 if (error) {
1132                         mutex_exit(&dd->dd_lock);
1133                         return (error);
1134                 }
1135         }
1136 
1137         /*
1138          * If this transaction will result in a net free of space,
1139          * we want to let it through.
1140          */
1141         if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
1142                 quota = UINT64_MAX;
1143         else
1144                 quota = dsl_dir_phys(dd)->dd_quota;
1145 
1146         /*
1147          * Adjust the quota against the actual pool size at the root
1148          * minus any outstanding deferred frees.
1149          * To ensure that it's possible to remove files from a full
1150          * pool without inducing transient overcommits, we throttle
1151          * netfree transactions against a quota that is slightly larger,
1152          * but still within the pool's allocation slop.  In cases where
1153          * we're very close to full, this will allow a steady trickle of
1154          * removes to get through.
1155          */
1156         if (dd->dd_parent == NULL) {
1157                 spa_t *spa = dd->dd_pool->dp_spa;
1158                 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
1159                 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
1160                 if (poolsize - deferred < quota) {
1161                         quota = poolsize - deferred;
1162                         retval = ENOSPC;
1163                 }
1164         }
1165 
1166         /*
1167          * If they are requesting more space, and our current estimate
1168          * is over quota, they get to try again unless the actual
1169          * on-disk is over quota and there are no pending changes (which
1170          * may free up space for us).
1171          */
1172         if (used_on_disk + est_inflight >= quota) {
1173                 if (est_inflight > 0 || used_on_disk < quota ||
1174                     (retval == ENOSPC && used_on_disk < quota + deferred))
1175                         retval = ERESTART;
1176                 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1177                     "quota=%lluK tr=%lluK err=%d\n",
1178                     used_on_disk>>10, est_inflight>>10,
1179                     quota>>10, asize>>10, retval);
1180                 mutex_exit(&dd->dd_lock);
1181                 return (SET_ERROR(retval));
1182         }
1183 
1184         /* We need to up our estimated delta before dropping dd_lock */
1185         dd->dd_tempreserved[txgidx] += asize;
1186 
1187         parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1188             asize - ref_rsrv);
1189         mutex_exit(&dd->dd_lock);
1190 
1191         tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1192         tr->tr_ds = dd;
1193         tr->tr_size = asize;
1194         list_insert_tail(tr_list, tr);
1195 
1196         /* see if it's OK with our parent */
1197         if (dd->dd_parent && parent_rsrv) {
1198                 boolean_t ismos = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1199 
1200                 return (dsl_dir_tempreserve_impl(dd->dd_parent,
1201                     parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
1202         } else {
1203                 return (0);
1204         }
1205 }
1206 
1207 /*
1208  * Reserve space in this dsl_dir, to be used in this tx's txg.
1209  * After the space has been dirtied (and dsl_dir_willuse_space()
1210  * has been called), the reservation should be canceled, using
1211  * dsl_dir_tempreserve_clear().
1212  */
1213 int
1214 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1215     uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
1216 {
1217         int err;
1218         list_t *tr_list;
1219 
1220         if (asize == 0) {
1221                 *tr_cookiep = NULL;
1222                 return (0);
1223         }
1224 
1225         tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1226         list_create(tr_list, sizeof (struct tempreserve),
1227             offsetof(struct tempreserve, tr_node));
1228         ASSERT3S(asize, >, 0);
1229         ASSERT3S(fsize, >=, 0);
1230 
1231         err = arc_tempreserve_space(lsize, tx->tx_txg);
1232         if (err == 0) {
1233                 struct tempreserve *tr;
1234 
1235                 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1236                 tr->tr_size = lsize;
1237                 list_insert_tail(tr_list, tr);
1238         } else {
1239                 if (err == EAGAIN) {
1240                         /*
1241                          * If arc_memory_throttle() detected that pageout
1242                          * is running and we are low on memory, we delay new
1243                          * non-pageout transactions to give pageout an
1244                          * advantage.
1245                          *
1246                          * It is unfortunate to be delaying while the caller's
1247                          * locks are held.
1248                          */
1249                         txg_delay(dd->dd_pool, tx->tx_txg,
1250                             MSEC2NSEC(10), MSEC2NSEC(10));
1251                         err = SET_ERROR(ERESTART);
1252                 }
1253         }
1254 
1255         if (err == 0) {
1256                 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
1257                     FALSE, asize > usize, tr_list, tx, TRUE);
1258         }
1259 
1260         if (err != 0)
1261                 dsl_dir_tempreserve_clear(tr_list, tx);
1262         else
1263                 *tr_cookiep = tr_list;
1264 
1265         return (err);
1266 }
1267 
1268 /*
1269  * Clear a temporary reservation that we previously made with
1270  * dsl_dir_tempreserve_space().
1271  */
1272 void
1273 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1274 {
1275         int txgidx = tx->tx_txg & TXG_MASK;
1276         list_t *tr_list = tr_cookie;
1277         struct tempreserve *tr;
1278 
1279         ASSERT3U(tx->tx_txg, !=, 0);
1280 
1281         if (tr_cookie == NULL)
1282                 return;
1283 
1284         while ((tr = list_head(tr_list)) != NULL) {
1285                 if (tr->tr_ds) {
1286                         mutex_enter(&tr->tr_ds->dd_lock);
1287                         ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1288                             tr->tr_size);
1289                         tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1290                         mutex_exit(&tr->tr_ds->dd_lock);
1291                 } else {
1292                         arc_tempreserve_clear(tr->tr_size);
1293                 }
1294                 list_remove(tr_list, tr);
1295                 kmem_free(tr, sizeof (struct tempreserve));
1296         }
1297 
1298         kmem_free(tr_list, sizeof (list_t));
1299 }
1300 
1301 /*
1302  * This should be called from open context when we think we're going to write
1303  * or free space, for example when dirtying data. Be conservative; it's okay
1304  * to write less space or free more, but we don't want to write more or free
1305  * less than the amount specified.
1306  */
1307 void
1308 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1309 {
1310         int64_t parent_space;
1311         uint64_t est_used;
1312 
1313         mutex_enter(&dd->dd_lock);
1314         if (space > 0)
1315                 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1316 
1317         est_used = dsl_dir_space_towrite(dd) + dsl_dir_phys(dd)->dd_used_bytes;
1318         parent_space = parent_delta(dd, est_used, space);
1319         mutex_exit(&dd->dd_lock);
1320 
1321         /* Make sure that we clean up dd_space_to* */
1322         dsl_dir_dirty(dd, tx);
1323 
1324         /* XXX this is potentially expensive and unnecessary... */
1325         if (parent_space && dd->dd_parent)
1326                 dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
1327 }
1328 
1329 /* call from syncing context when we actually write/free space for this dd */
1330 void
1331 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1332     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1333 {
1334         int64_t accounted_delta;
1335 
1336         /*
1337          * dsl_dataset_set_refreservation_sync_impl() calls this with
1338          * dd_lock held, so that it can atomically update
1339          * ds->ds_reserved and the dsl_dir accounting, so that
1340          * dsl_dataset_check_quota() can see dataset and dir accounting
1341          * consistently.
1342          */
1343         boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1344 
1345         ASSERT(dmu_tx_is_syncing(tx));
1346         ASSERT(type < DD_USED_NUM);
1347 
1348         dmu_buf_will_dirty(dd->dd_dbuf, tx);
1349 
1350         if (needlock)
1351                 mutex_enter(&dd->dd_lock);
1352         accounted_delta =
1353             parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, used);
1354         ASSERT(used >= 0 || dsl_dir_phys(dd)->dd_used_bytes >= -used);
1355         ASSERT(compressed >= 0 ||
1356             dsl_dir_phys(dd)->dd_compressed_bytes >= -compressed);
1357         ASSERT(uncompressed >= 0 ||
1358             dsl_dir_phys(dd)->dd_uncompressed_bytes >= -uncompressed);
1359         dsl_dir_phys(dd)->dd_used_bytes += used;
1360         dsl_dir_phys(dd)->dd_uncompressed_bytes += uncompressed;
1361         dsl_dir_phys(dd)->dd_compressed_bytes += compressed;
1362 
1363         if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1364                 ASSERT(used > 0 ||
1365                     dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
1366                 dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
1367 #ifdef DEBUG
1368                 dd_used_t t;
1369                 uint64_t u = 0;
1370                 for (t = 0; t < DD_USED_NUM; t++)
1371                         u += dsl_dir_phys(dd)->dd_used_breakdown[t];
1372                 ASSERT3U(u, ==, dsl_dir_phys(dd)->dd_used_bytes);
1373 #endif
1374         }
1375         if (needlock)
1376                 mutex_exit(&dd->dd_lock);
1377 
1378         if (dd->dd_parent != NULL) {
1379                 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1380                     accounted_delta, compressed, uncompressed, tx);
1381                 dsl_dir_transfer_space(dd->dd_parent,
1382                     used - accounted_delta,
1383                     DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1384         }
1385 }
1386 
1387 void
1388 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1389     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1390 {
1391         ASSERT(dmu_tx_is_syncing(tx));
1392         ASSERT(oldtype < DD_USED_NUM);
1393         ASSERT(newtype < DD_USED_NUM);
1394 
1395         if (delta == 0 ||
1396             !(dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN))
1397                 return;
1398 
1399         dmu_buf_will_dirty(dd->dd_dbuf, tx);
1400         mutex_enter(&dd->dd_lock);
1401         ASSERT(delta > 0 ?
1402             dsl_dir_phys(dd)->dd_used_breakdown[oldtype] >= delta :
1403             dsl_dir_phys(dd)->dd_used_breakdown[newtype] >= -delta);
1404         ASSERT(dsl_dir_phys(dd)->dd_used_bytes >= ABS(delta));
1405         dsl_dir_phys(dd)->dd_used_breakdown[oldtype] -= delta;
1406         dsl_dir_phys(dd)->dd_used_breakdown[newtype] += delta;
1407         mutex_exit(&dd->dd_lock);
1408 }
1409 
1410 typedef struct dsl_dir_set_qr_arg {
1411         const char *ddsqra_name;
1412         zprop_source_t ddsqra_source;
1413         uint64_t ddsqra_value;
1414 } dsl_dir_set_qr_arg_t;
1415 
1416 static int
1417 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1418 {
1419         dsl_dir_set_qr_arg_t *ddsqra = arg;
1420         dsl_pool_t *dp = dmu_tx_pool(tx);
1421         dsl_dataset_t *ds;
1422         int error;
1423         uint64_t towrite, newval;
1424 
1425         error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1426         if (error != 0)
1427                 return (error);
1428 
1429         error = dsl_prop_predict(ds->ds_dir, "quota",
1430             ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1431         if (error != 0) {
1432                 dsl_dataset_rele(ds, FTAG);
1433                 return (error);
1434         }
1435 
1436         if (newval == 0) {
1437                 dsl_dataset_rele(ds, FTAG);
1438                 return (0);
1439         }
1440 
1441         mutex_enter(&ds->ds_dir->dd_lock);
1442         /*
1443          * If we are doing the preliminary check in open context, and
1444          * there are pending changes, then don't fail it, since the
1445          * pending changes could under-estimate the amount of space to be
1446          * freed up.
1447          */
1448         towrite = dsl_dir_space_towrite(ds->ds_dir);
1449         if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1450             (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1451             newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1452                 error = SET_ERROR(ENOSPC);
1453         }
1454         mutex_exit(&ds->ds_dir->dd_lock);
1455         dsl_dataset_rele(ds, FTAG);
1456         return (error);
1457 }
1458 
1459 static void
1460 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1461 {
1462         dsl_dir_set_qr_arg_t *ddsqra = arg;
1463         dsl_pool_t *dp = dmu_tx_pool(tx);
1464         dsl_dataset_t *ds;
1465         uint64_t newval;
1466 
1467         VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1468 
1469         if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1470                 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1471                     ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1472                     &ddsqra->ddsqra_value, tx);
1473 
1474                 VERIFY0(dsl_prop_get_int_ds(ds,
1475                     zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1476         } else {
1477                 newval = ddsqra->ddsqra_value;
1478                 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1479                     zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1480         }
1481 
1482         dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1483         mutex_enter(&ds->ds_dir->dd_lock);
1484         dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1485         mutex_exit(&ds->ds_dir->dd_lock);
1486         dsl_dataset_rele(ds, FTAG);
1487 }
1488 
1489 int
1490 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1491 {
1492         dsl_dir_set_qr_arg_t ddsqra;
1493 
1494         ddsqra.ddsqra_name = ddname;
1495         ddsqra.ddsqra_source = source;
1496         ddsqra.ddsqra_value = quota;
1497 
1498         return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1499             dsl_dir_set_quota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1500 }
1501 
1502 int
1503 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1504 {
1505         dsl_dir_set_qr_arg_t *ddsqra = arg;
1506         dsl_pool_t *dp = dmu_tx_pool(tx);
1507         dsl_dataset_t *ds;
1508         dsl_dir_t *dd;
1509         uint64_t newval, used, avail;
1510         int error;
1511 
1512         error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1513         if (error != 0)
1514                 return (error);
1515         dd = ds->ds_dir;
1516 
1517         /*
1518          * If we are doing the preliminary check in open context, the
1519          * space estimates may be inaccurate.
1520          */
1521         if (!dmu_tx_is_syncing(tx)) {
1522                 dsl_dataset_rele(ds, FTAG);
1523                 return (0);
1524         }
1525 
1526         error = dsl_prop_predict(ds->ds_dir,
1527             zfs_prop_to_name(ZFS_PROP_RESERVATION),
1528             ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1529         if (error != 0) {
1530                 dsl_dataset_rele(ds, FTAG);
1531                 return (error);
1532         }
1533 
1534         mutex_enter(&dd->dd_lock);
1535         used = dsl_dir_phys(dd)->dd_used_bytes;
1536         mutex_exit(&dd->dd_lock);
1537 
1538         if (dd->dd_parent) {
1539                 avail = dsl_dir_space_available(dd->dd_parent,
1540                     NULL, 0, FALSE);
1541         } else {
1542                 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1543         }
1544 
1545         if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1546                 uint64_t delta = MAX(used, newval) -
1547                     MAX(used, dsl_dir_phys(dd)->dd_reserved);
1548 
1549                 if (delta > avail ||
1550                     (dsl_dir_phys(dd)->dd_quota > 0 &&
1551                     newval > dsl_dir_phys(dd)->dd_quota))
1552                         error = SET_ERROR(ENOSPC);
1553         }
1554 
1555         dsl_dataset_rele(ds, FTAG);
1556         return (error);
1557 }
1558 
1559 void
1560 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1561 {
1562         uint64_t used;
1563         int64_t delta;
1564 
1565         dmu_buf_will_dirty(dd->dd_dbuf, tx);
1566 
1567         mutex_enter(&dd->dd_lock);
1568         used = dsl_dir_phys(dd)->dd_used_bytes;
1569         delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1570         dsl_dir_phys(dd)->dd_reserved = value;
1571 
1572         if (dd->dd_parent != NULL) {
1573                 /* Roll up this additional usage into our ancestors */
1574                 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1575                     delta, 0, 0, tx);
1576         }
1577         mutex_exit(&dd->dd_lock);
1578 }
1579 
1580 
1581 static void
1582 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1583 {
1584         dsl_dir_set_qr_arg_t *ddsqra = arg;
1585         dsl_pool_t *dp = dmu_tx_pool(tx);
1586         dsl_dataset_t *ds;
1587         uint64_t newval;
1588 
1589         VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1590 
1591         if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1592                 dsl_prop_set_sync_impl(ds,
1593                     zfs_prop_to_name(ZFS_PROP_RESERVATION),
1594                     ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1595                     &ddsqra->ddsqra_value, tx);
1596 
1597                 VERIFY0(dsl_prop_get_int_ds(ds,
1598                     zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1599         } else {
1600                 newval = ddsqra->ddsqra_value;
1601                 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1602                     zfs_prop_to_name(ZFS_PROP_RESERVATION),
1603                     (longlong_t)newval);
1604         }
1605 
1606         dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1607         dsl_dataset_rele(ds, FTAG);
1608 }
1609 
1610 int
1611 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1612     uint64_t reservation)
1613 {
1614         dsl_dir_set_qr_arg_t ddsqra;
1615 
1616         ddsqra.ddsqra_name = ddname;
1617         ddsqra.ddsqra_source = source;
1618         ddsqra.ddsqra_value = reservation;
1619 
1620         return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1621             dsl_dir_set_reservation_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1622 }
1623 
1624 static dsl_dir_t *
1625 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1626 {
1627         for (; ds1; ds1 = ds1->dd_parent) {
1628                 dsl_dir_t *dd;
1629                 for (dd = ds2; dd; dd = dd->dd_parent) {
1630                         if (ds1 == dd)
1631                                 return (dd);
1632                 }
1633         }
1634         return (NULL);
1635 }
1636 
1637 /*
1638  * If delta is applied to dd, how much of that delta would be applied to
1639  * ancestor?  Syncing context only.
1640  */
1641 static int64_t
1642 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1643 {
1644         if (dd == ancestor)
1645                 return (delta);
1646 
1647         mutex_enter(&dd->dd_lock);
1648         delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1649         mutex_exit(&dd->dd_lock);
1650         return (would_change(dd->dd_parent, delta, ancestor));
1651 }
1652 
1653 typedef struct dsl_dir_rename_arg {
1654         const char *ddra_oldname;
1655         const char *ddra_newname;
1656         cred_t *ddra_cred;
1657 } dsl_dir_rename_arg_t;
1658 
1659 /* ARGSUSED */
1660 static int
1661 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1662 {
1663         int *deltap = arg;
1664         char namebuf[MAXNAMELEN];
1665 
1666         dsl_dataset_name(ds, namebuf);
1667 
1668         if (strlen(namebuf) + *deltap >= MAXNAMELEN)
1669                 return (SET_ERROR(ENAMETOOLONG));
1670         return (0);
1671 }
1672 
1673 static int
1674 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1675 {
1676         dsl_dir_rename_arg_t *ddra = arg;
1677         dsl_pool_t *dp = dmu_tx_pool(tx);
1678         dsl_dir_t *dd, *newparent;
1679         const char *mynewname;
1680         int error;
1681         int delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname);
1682 
1683         /* target dir should exist */
1684         error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1685         if (error != 0)
1686                 return (error);
1687 
1688         /* new parent should exist */
1689         error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1690             &newparent, &mynewname);
1691         if (error != 0) {
1692                 dsl_dir_rele(dd, FTAG);
1693                 return (error);
1694         }
1695 
1696         /* can't rename to different pool */
1697         if (dd->dd_pool != newparent->dd_pool) {
1698                 dsl_dir_rele(newparent, FTAG);
1699                 dsl_dir_rele(dd, FTAG);
1700                 return (SET_ERROR(ENXIO));
1701         }
1702 
1703         /* new name should not already exist */
1704         if (mynewname == NULL) {
1705                 dsl_dir_rele(newparent, FTAG);
1706                 dsl_dir_rele(dd, FTAG);
1707                 return (SET_ERROR(EEXIST));
1708         }
1709 
1710         /* if the name length is growing, validate child name lengths */
1711         if (delta > 0) {
1712                 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
1713                     &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1714                 if (error != 0) {
1715                         dsl_dir_rele(newparent, FTAG);
1716                         dsl_dir_rele(dd, FTAG);
1717                         return (error);
1718                 }
1719         }
1720 
1721         if (dmu_tx_is_syncing(tx)) {
1722                 if (spa_feature_is_active(dp->dp_spa,
1723                     SPA_FEATURE_FS_SS_LIMIT)) {
1724                         /*
1725                          * Although this is the check function and we don't
1726                          * normally make on-disk changes in check functions,
1727                          * we need to do that here.
1728                          *
1729                          * Ensure this portion of the tree's counts have been
1730                          * initialized in case the new parent has limits set.
1731                          */
1732                         dsl_dir_init_fs_ss_count(dd, tx);
1733                 }
1734         }
1735 
1736         if (newparent != dd->dd_parent) {
1737                 /* is there enough space? */
1738                 uint64_t myspace =
1739                     MAX(dsl_dir_phys(dd)->dd_used_bytes,
1740                     dsl_dir_phys(dd)->dd_reserved);
1741                 objset_t *os = dd->dd_pool->dp_meta_objset;
1742                 uint64_t fs_cnt = 0;
1743                 uint64_t ss_cnt = 0;
1744 
1745                 if (dsl_dir_is_zapified(dd)) {
1746                         int err;
1747 
1748                         err = zap_lookup(os, dd->dd_object,
1749                             DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1750                             &fs_cnt);
1751                         if (err != ENOENT && err != 0) {
1752                                 dsl_dir_rele(newparent, FTAG);
1753                                 dsl_dir_rele(dd, FTAG);
1754                                 return (err);
1755                         }
1756 
1757                         /*
1758                          * have to add 1 for the filesystem itself that we're
1759                          * moving
1760                          */
1761                         fs_cnt++;
1762 
1763                         err = zap_lookup(os, dd->dd_object,
1764                             DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1765                             &ss_cnt);
1766                         if (err != ENOENT && err != 0) {
1767                                 dsl_dir_rele(newparent, FTAG);
1768                                 dsl_dir_rele(dd, FTAG);
1769                                 return (err);
1770                         }
1771                 }
1772 
1773                 /* no rename into our descendant */
1774                 if (closest_common_ancestor(dd, newparent) == dd) {
1775                         dsl_dir_rele(newparent, FTAG);
1776                         dsl_dir_rele(dd, FTAG);
1777                         return (SET_ERROR(EINVAL));
1778                 }
1779 
1780                 error = dsl_dir_transfer_possible(dd->dd_parent,
1781                     newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred);
1782                 if (error != 0) {
1783                         dsl_dir_rele(newparent, FTAG);
1784                         dsl_dir_rele(dd, FTAG);
1785                         return (error);
1786                 }
1787         }
1788 
1789         dsl_dir_rele(newparent, FTAG);
1790         dsl_dir_rele(dd, FTAG);
1791         return (0);
1792 }
1793 
1794 static void
1795 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
1796 {
1797         dsl_dir_rename_arg_t *ddra = arg;
1798         dsl_pool_t *dp = dmu_tx_pool(tx);
1799         dsl_dir_t *dd, *newparent;
1800         const char *mynewname;
1801         int error;
1802         objset_t *mos = dp->dp_meta_objset;
1803 
1804         VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
1805         VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
1806             &mynewname));
1807 
1808         /* Log this before we change the name. */
1809         spa_history_log_internal_dd(dd, "rename", tx,
1810             "-> %s", ddra->ddra_newname);
1811 
1812         if (newparent != dd->dd_parent) {
1813                 objset_t *os = dd->dd_pool->dp_meta_objset;
1814                 uint64_t fs_cnt = 0;
1815                 uint64_t ss_cnt = 0;
1816 
1817                 /*
1818                  * We already made sure the dd counts were initialized in the
1819                  * check function.
1820                  */
1821                 if (spa_feature_is_active(dp->dp_spa,
1822                     SPA_FEATURE_FS_SS_LIMIT)) {
1823                         VERIFY0(zap_lookup(os, dd->dd_object,
1824                             DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1825                             &fs_cnt));
1826                         /* add 1 for the filesystem itself that we're moving */
1827                         fs_cnt++;
1828 
1829                         VERIFY0(zap_lookup(os, dd->dd_object,
1830                             DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1831                             &ss_cnt));
1832                 }
1833 
1834                 dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
1835                     DD_FIELD_FILESYSTEM_COUNT, tx);
1836                 dsl_fs_ss_count_adjust(newparent, fs_cnt,
1837                     DD_FIELD_FILESYSTEM_COUNT, tx);
1838 
1839                 dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
1840                     DD_FIELD_SNAPSHOT_COUNT, tx);
1841                 dsl_fs_ss_count_adjust(newparent, ss_cnt,
1842                     DD_FIELD_SNAPSHOT_COUNT, tx);
1843 
1844                 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1845                     -dsl_dir_phys(dd)->dd_used_bytes,
1846                     -dsl_dir_phys(dd)->dd_compressed_bytes,
1847                     -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1848                 dsl_dir_diduse_space(newparent, DD_USED_CHILD,
1849                     dsl_dir_phys(dd)->dd_used_bytes,
1850                     dsl_dir_phys(dd)->dd_compressed_bytes,
1851                     dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1852 
1853                 if (dsl_dir_phys(dd)->dd_reserved >
1854                     dsl_dir_phys(dd)->dd_used_bytes) {
1855                         uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
1856                             dsl_dir_phys(dd)->dd_used_bytes;
1857 
1858                         dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1859                             -unused_rsrv, 0, 0, tx);
1860                         dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
1861                             unused_rsrv, 0, 0, tx);
1862                 }
1863         }
1864 
1865         dmu_buf_will_dirty(dd->dd_dbuf, tx);
1866 
1867         /* remove from old parent zapobj */
1868         error = zap_remove(mos,
1869             dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
1870             dd->dd_myname, tx);
1871         ASSERT0(error);
1872 
1873         (void) strcpy(dd->dd_myname, mynewname);
1874         dsl_dir_rele(dd->dd_parent, dd);
1875         dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
1876         VERIFY0(dsl_dir_hold_obj(dp,
1877             newparent->dd_object, NULL, dd, &dd->dd_parent));
1878 
1879         /* add to new parent zapobj */
1880         VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
1881             dd->dd_myname, 8, 1, &dd->dd_object, tx));
1882 
1883         dsl_prop_notify_all(dd);
1884 
1885         dsl_dir_rele(newparent, FTAG);
1886         dsl_dir_rele(dd, FTAG);
1887 }
1888 
1889 int
1890 dsl_dir_rename(const char *oldname, const char *newname)
1891 {
1892         dsl_dir_rename_arg_t ddra;
1893 
1894         ddra.ddra_oldname = oldname;
1895         ddra.ddra_newname = newname;
1896         ddra.ddra_cred = CRED();
1897 
1898         return (dsl_sync_task(oldname,
1899             dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
1900             3, ZFS_SPACE_CHECK_RESERVED));
1901 }
1902 
1903 int
1904 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
1905     uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *cr)
1906 {
1907         dsl_dir_t *ancestor;
1908         int64_t adelta;
1909         uint64_t avail;
1910         int err;
1911 
1912         ancestor = closest_common_ancestor(sdd, tdd);
1913         adelta = would_change(sdd, -space, ancestor);
1914         avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1915         if (avail < space)
1916                 return (SET_ERROR(ENOSPC));
1917 
1918         err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
1919             ancestor, cr);
1920         if (err != 0)
1921                 return (err);
1922         err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
1923             ancestor, cr);
1924         if (err != 0)
1925                 return (err);
1926 
1927         return (0);
1928 }
1929 
1930 timestruc_t
1931 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1932 {
1933         timestruc_t t;
1934 
1935         mutex_enter(&dd->dd_lock);
1936         t = dd->dd_snap_cmtime;
1937         mutex_exit(&dd->dd_lock);
1938 
1939         return (t);
1940 }
1941 
1942 void
1943 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1944 {
1945         timestruc_t t;
1946 
1947         gethrestime(&t);
1948         mutex_enter(&dd->dd_lock);
1949         dd->dd_snap_cmtime = t;
1950         mutex_exit(&dd->dd_lock);
1951 }
1952 
1953 void
1954 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
1955 {
1956         objset_t *mos = dd->dd_pool->dp_meta_objset;
1957         dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
1958 }
1959 
1960 boolean_t
1961 dsl_dir_is_zapified(dsl_dir_t *dd)
1962 {
1963         dmu_object_info_t doi;
1964 
1965         dmu_object_info_from_db(dd->dd_dbuf, &doi);
1966         return (doi.doi_type == DMU_OTN_ZAP_METADATA);
1967 }