1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  26  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
  27  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  28  * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
  29  * Copyright (c) 2014 Integros [integros.com]
  30  * Copyright 2017 Nexenta Systems, Inc.
  31  */
  32 
  33 /* Portions Copyright 2010 Robert Milkowski */
  34 
  35 #include <sys/cred.h>
  36 #include <sys/zfs_context.h>
  37 #include <sys/dmu_objset.h>
  38 #include <sys/dsl_dir.h>
  39 #include <sys/dsl_dataset.h>
  40 #include <sys/dsl_prop.h>
  41 #include <sys/dsl_pool.h>
  42 #include <sys/dsl_synctask.h>
  43 #include <sys/dsl_deleg.h>
  44 #include <sys/dnode.h>
  45 #include <sys/dbuf.h>
  46 #include <sys/zvol.h>
  47 #include <sys/dmu_tx.h>
  48 #include <sys/zap.h>
  49 #include <sys/zil.h>
  50 #include <sys/dmu_impl.h>
  51 #include <sys/zfs_ioctl.h>
  52 #include <sys/sa.h>
  53 #include <sys/zfs_onexit.h>
  54 #include <sys/dsl_destroy.h>
  55 #include <sys/vdev.h>
  56 #include <sys/zfeature.h>
  57 
  58 /*
  59  * Needed to close a window in dnode_move() that allows the objset to be freed
  60  * before it can be safely accessed.
  61  */
  62 krwlock_t os_lock;
  63 
  64 /*
  65  * Tunable to overwrite the maximum number of threads for the parallization
  66  * of dmu_objset_find_dp, needed to speed up the import of pools with many
  67  * datasets.
  68  * Default is 4 times the number of leaf vdevs.
  69  */
  70 int dmu_find_threads = 0;
  71 
  72 /*
  73  * Backfill lower metadnode objects after this many have been freed.
  74  * Backfilling negatively impacts object creation rates, so only do it
  75  * if there are enough holes to fill.
  76  */
  77 int dmu_rescan_dnode_threshold = 131072;
  78 
  79 static void dmu_objset_find_dp_cb(void *arg);
  80 
  81 void
  82 dmu_objset_init(void)
  83 {
  84         rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
  85 }
  86 
  87 void
  88 dmu_objset_fini(void)
  89 {
  90         rw_destroy(&os_lock);
  91 }
  92 
  93 spa_t *
  94 dmu_objset_spa(objset_t *os)
  95 {
  96         return (os->os_spa);
  97 }
  98 
  99 zilog_t *
 100 dmu_objset_zil(objset_t *os)
 101 {
 102         return (os->os_zil);
 103 }
 104 
 105 dsl_pool_t *
 106 dmu_objset_pool(objset_t *os)
 107 {
 108         dsl_dataset_t *ds;
 109 
 110         if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
 111                 return (ds->ds_dir->dd_pool);
 112         else
 113                 return (spa_get_dsl(os->os_spa));
 114 }
 115 
 116 dsl_dataset_t *
 117 dmu_objset_ds(objset_t *os)
 118 {
 119         return (os->os_dsl_dataset);
 120 }
 121 
 122 dmu_objset_type_t
 123 dmu_objset_type(objset_t *os)
 124 {
 125         return (os->os_phys->os_type);
 126 }
 127 
 128 void
 129 dmu_objset_name(objset_t *os, char *buf)
 130 {
 131         dsl_dataset_name(os->os_dsl_dataset, buf);
 132 }
 133 
 134 uint64_t
 135 dmu_objset_id(objset_t *os)
 136 {
 137         dsl_dataset_t *ds = os->os_dsl_dataset;
 138 
 139         return (ds ? ds->ds_object : 0);
 140 }
 141 
 142 zfs_sync_type_t
 143 dmu_objset_syncprop(objset_t *os)
 144 {
 145         return (os->os_sync);
 146 }
 147 
 148 zfs_logbias_op_t
 149 dmu_objset_logbias(objset_t *os)
 150 {
 151         return (os->os_logbias);
 152 }
 153 
 154 static void
 155 checksum_changed_cb(void *arg, uint64_t newval)
 156 {
 157         objset_t *os = arg;
 158 
 159         /*
 160          * Inheritance should have been done by now.
 161          */
 162         ASSERT(newval != ZIO_CHECKSUM_INHERIT);
 163 
 164         os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
 165 }
 166 
 167 static void
 168 compression_changed_cb(void *arg, uint64_t newval)
 169 {
 170         objset_t *os = arg;
 171 
 172         /*
 173          * Inheritance and range checking should have been done by now.
 174          */
 175         ASSERT(newval != ZIO_COMPRESS_INHERIT);
 176 
 177         os->os_compress = zio_compress_select(os->os_spa, newval,
 178             ZIO_COMPRESS_ON);
 179 }
 180 
 181 static void
 182 copies_changed_cb(void *arg, uint64_t newval)
 183 {
 184         objset_t *os = arg;
 185 
 186         /*
 187          * Inheritance and range checking should have been done by now.
 188          */
 189         ASSERT(newval > 0);
 190         ASSERT(newval <= spa_max_replication(os->os_spa));
 191 
 192         os->os_copies = newval;
 193 }
 194 
 195 static void
 196 dedup_changed_cb(void *arg, uint64_t newval)
 197 {
 198         objset_t *os = arg;
 199         spa_t *spa = os->os_spa;
 200         enum zio_checksum checksum;
 201 
 202         /*
 203          * Inheritance should have been done by now.
 204          */
 205         ASSERT(newval != ZIO_CHECKSUM_INHERIT);
 206 
 207         checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
 208 
 209         os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
 210         os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
 211 }
 212 
 213 static void
 214 primary_cache_changed_cb(void *arg, uint64_t newval)
 215 {
 216         objset_t *os = arg;
 217 
 218         /*
 219          * Inheritance and range checking should have been done by now.
 220          */
 221         ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
 222             newval == ZFS_CACHE_METADATA);
 223 
 224         os->os_primary_cache = newval;
 225 }
 226 
 227 static void
 228 secondary_cache_changed_cb(void *arg, uint64_t newval)
 229 {
 230         objset_t *os = arg;
 231 
 232         /*
 233          * Inheritance and range checking should have been done by now.
 234          */
 235         ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
 236             newval == ZFS_CACHE_METADATA);
 237 
 238         os->os_secondary_cache = newval;
 239 }
 240 
 241 static void
 242 sync_changed_cb(void *arg, uint64_t newval)
 243 {
 244         objset_t *os = arg;
 245 
 246         /*
 247          * Inheritance and range checking should have been done by now.
 248          */
 249         ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
 250             newval == ZFS_SYNC_DISABLED);
 251 
 252         os->os_sync = newval;
 253         if (os->os_zil)
 254                 zil_set_sync(os->os_zil, newval);
 255 }
 256 
 257 static void
 258 redundant_metadata_changed_cb(void *arg, uint64_t newval)
 259 {
 260         objset_t *os = arg;
 261 
 262         /*
 263          * Inheritance and range checking should have been done by now.
 264          */
 265         ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
 266             newval == ZFS_REDUNDANT_METADATA_MOST);
 267 
 268         os->os_redundant_metadata = newval;
 269 }
 270 
 271 static void
 272 logbias_changed_cb(void *arg, uint64_t newval)
 273 {
 274         objset_t *os = arg;
 275 
 276         ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
 277             newval == ZFS_LOGBIAS_THROUGHPUT);
 278         os->os_logbias = newval;
 279         if (os->os_zil)
 280                 zil_set_logbias(os->os_zil, newval);
 281 }
 282 
 283 static void
 284 recordsize_changed_cb(void *arg, uint64_t newval)
 285 {
 286         objset_t *os = arg;
 287 
 288         os->os_recordsize = newval;
 289 }
 290 
 291 void
 292 dmu_objset_byteswap(void *buf, size_t size)
 293 {
 294         objset_phys_t *osp = buf;
 295 
 296         ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
 297         dnode_byteswap(&osp->os_meta_dnode);
 298         byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
 299         osp->os_type = BSWAP_64(osp->os_type);
 300         osp->os_flags = BSWAP_64(osp->os_flags);
 301         if (size == sizeof (objset_phys_t)) {
 302                 dnode_byteswap(&osp->os_userused_dnode);
 303                 dnode_byteswap(&osp->os_groupused_dnode);
 304         }
 305 }
 306 
 307 /*
 308  * The hash is a CRC-based hash of the objset_t pointer and the object number.
 309  */
 310 static uint64_t
 311 dnode_hash(const objset_t *os, uint64_t obj)
 312 {
 313         uintptr_t osv = (uintptr_t)os;
 314         uint64_t crc = -1ULL;
 315 
 316         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 317         /*
 318          * The low 6 bits of the pointer don't have much entropy, because
 319          * the objset_t is larger than 2^6 bytes long.
 320          */
 321         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
 322         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
 323         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
 324         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
 325 
 326         crc ^= (osv>>14) ^ (obj>>24);
 327 
 328         return (crc);
 329 }
 330 
 331 unsigned int
 332 dnode_multilist_index_func(multilist_t *ml, void *obj)
 333 {
 334         dnode_t *dn = obj;
 335         return (dnode_hash(dn->dn_objset, dn->dn_object) %
 336             multilist_get_num_sublists(ml));
 337 }
 338 
 339 /*
 340  * Instantiates the objset_t in-memory structure corresponding to the
 341  * objset_phys_t that's pointed to by the specified blkptr_t.
 342  */
 343 int
 344 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
 345     objset_t **osp)
 346 {
 347         objset_t *os;
 348         int i, err;
 349 
 350         ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
 351 
 352         /*
 353          * The $ORIGIN dataset (if it exists) doesn't have an associated
 354          * objset, so there's no reason to open it. The $ORIGIN dataset
 355          * will not exist on pools older than SPA_VERSION_ORIGIN.
 356          */
 357         if (ds != NULL && spa_get_dsl(spa) != NULL &&
 358             spa_get_dsl(spa)->dp_origin_snap != NULL) {
 359                 ASSERT3P(ds->ds_dir, !=,
 360                     spa_get_dsl(spa)->dp_origin_snap->ds_dir);
 361         }
 362 
 363         os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
 364         os->os_dsl_dataset = ds;
 365         os->os_spa = spa;
 366         os->os_rootbp = bp;
 367         if (!BP_IS_HOLE(os->os_rootbp)) {
 368                 arc_flags_t aflags = ARC_FLAG_WAIT;
 369                 zbookmark_phys_t zb;
 370                 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
 371                     ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
 372 
 373                 if (DMU_OS_IS_L2CACHEABLE(os))
 374                         aflags |= ARC_FLAG_L2CACHE;
 375 
 376                 dprintf_bp(os->os_rootbp, "reading %s", "");
 377                 err = arc_read(NULL, spa, os->os_rootbp,
 378                     arc_getbuf_func, &os->os_phys_buf,
 379                     ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
 380                 if (err != 0) {
 381                         kmem_free(os, sizeof (objset_t));
 382                         /* convert checksum errors into IO errors */
 383                         if (err == ECKSUM)
 384                                 err = SET_ERROR(EIO);
 385                         return (err);
 386                 }
 387 
 388                 /* Increase the blocksize if we are permitted. */
 389                 if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
 390                     arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
 391                         arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
 392                             ARC_BUFC_METADATA, sizeof (objset_phys_t));
 393                         bzero(buf->b_data, sizeof (objset_phys_t));
 394                         bcopy(os->os_phys_buf->b_data, buf->b_data,
 395                             arc_buf_size(os->os_phys_buf));
 396                         arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
 397                         os->os_phys_buf = buf;
 398                 }
 399 
 400                 os->os_phys = os->os_phys_buf->b_data;
 401                 os->os_flags = os->os_phys->os_flags;
 402         } else {
 403                 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
 404                     sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
 405                 os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
 406                     ARC_BUFC_METADATA, size);
 407                 os->os_phys = os->os_phys_buf->b_data;
 408                 bzero(os->os_phys, size);
 409         }
 410 
 411         /*
 412          * Note: the changed_cb will be called once before the register
 413          * func returns, thus changing the checksum/compression from the
 414          * default (fletcher2/off).  Snapshots don't need to know about
 415          * checksum/compression/copies.
 416          */
 417         if (ds != NULL) {
 418                 boolean_t needlock = B_FALSE;
 419 
 420                 /*
 421                  * Note: it's valid to open the objset if the dataset is
 422                  * long-held, in which case the pool_config lock will not
 423                  * be held.
 424                  */
 425                 if (!dsl_pool_config_held(dmu_objset_pool(os))) {
 426                         needlock = B_TRUE;
 427                         dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
 428                 }
 429                 err = dsl_prop_register(ds,
 430                     zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
 431                     primary_cache_changed_cb, os);
 432                 if (err == 0) {
 433                         err = dsl_prop_register(ds,
 434                             zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
 435                             secondary_cache_changed_cb, os);
 436                 }
 437                 if (!ds->ds_is_snapshot) {
 438                         if (err == 0) {
 439                                 err = dsl_prop_register(ds,
 440                                     zfs_prop_to_name(ZFS_PROP_CHECKSUM),
 441                                     checksum_changed_cb, os);
 442                         }
 443                         if (err == 0) {
 444                                 err = dsl_prop_register(ds,
 445                                     zfs_prop_to_name(ZFS_PROP_COMPRESSION),
 446                                     compression_changed_cb, os);
 447                         }
 448                         if (err == 0) {
 449                                 err = dsl_prop_register(ds,
 450                                     zfs_prop_to_name(ZFS_PROP_COPIES),
 451                                     copies_changed_cb, os);
 452                         }
 453                         if (err == 0) {
 454                                 err = dsl_prop_register(ds,
 455                                     zfs_prop_to_name(ZFS_PROP_DEDUP),
 456                                     dedup_changed_cb, os);
 457                         }
 458                         if (err == 0) {
 459                                 err = dsl_prop_register(ds,
 460                                     zfs_prop_to_name(ZFS_PROP_LOGBIAS),
 461                                     logbias_changed_cb, os);
 462                         }
 463                         if (err == 0) {
 464                                 err = dsl_prop_register(ds,
 465                                     zfs_prop_to_name(ZFS_PROP_SYNC),
 466                                     sync_changed_cb, os);
 467                         }
 468                         if (err == 0) {
 469                                 err = dsl_prop_register(ds,
 470                                     zfs_prop_to_name(
 471                                     ZFS_PROP_REDUNDANT_METADATA),
 472                                     redundant_metadata_changed_cb, os);
 473                         }
 474                         if (err == 0) {
 475                                 err = dsl_prop_register(ds,
 476                                     zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
 477                                     recordsize_changed_cb, os);
 478                         }
 479                 }
 480                 if (needlock)
 481                         dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
 482                 if (err != 0) {
 483                         arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
 484                         kmem_free(os, sizeof (objset_t));
 485                         return (err);
 486                 }
 487         } else {
 488                 /* It's the meta-objset. */
 489                 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
 490                 os->os_compress = ZIO_COMPRESS_ON;
 491                 os->os_copies = spa_max_replication(spa);
 492                 os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
 493                 os->os_dedup_verify = B_FALSE;
 494                 os->os_logbias = ZFS_LOGBIAS_LATENCY;
 495                 os->os_sync = ZFS_SYNC_STANDARD;
 496                 os->os_primary_cache = ZFS_CACHE_ALL;
 497                 os->os_secondary_cache = ZFS_CACHE_ALL;
 498         }
 499 
 500         if (ds == NULL || !ds->ds_is_snapshot)
 501                 os->os_zil_header = os->os_phys->os_zil_header;
 502         os->os_zil = zil_alloc(os, &os->os_zil_header);
 503 
 504         for (i = 0; i < TXG_SIZE; i++) {
 505                 os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t),
 506                     offsetof(dnode_t, dn_dirty_link[i]),
 507                     dnode_multilist_index_func);
 508         }
 509         list_create(&os->os_dnodes, sizeof (dnode_t),
 510             offsetof(dnode_t, dn_link));
 511         list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
 512             offsetof(dmu_buf_impl_t, db_link));
 513 
 514         mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
 515         mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
 516         mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
 517         mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
 518 
 519         dnode_special_open(os, &os->os_phys->os_meta_dnode,
 520             DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
 521         if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
 522                 dnode_special_open(os, &os->os_phys->os_userused_dnode,
 523                     DMU_USERUSED_OBJECT, &os->os_userused_dnode);
 524                 dnode_special_open(os, &os->os_phys->os_groupused_dnode,
 525                     DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
 526         }
 527 
 528         *osp = os;
 529         return (0);
 530 }
 531 
 532 int
 533 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
 534 {
 535         int err = 0;
 536 
 537         /*
 538          * We shouldn't be doing anything with dsl_dataset_t's unless the
 539          * pool_config lock is held, or the dataset is long-held.
 540          */
 541         ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
 542             dsl_dataset_long_held(ds));
 543 
 544         mutex_enter(&ds->ds_opening_lock);
 545         if (ds->ds_objset == NULL) {
 546                 objset_t *os;
 547                 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
 548                 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
 549                     ds, dsl_dataset_get_blkptr(ds), &os);
 550                 rrw_exit(&ds->ds_bp_rwlock, FTAG);
 551 
 552                 if (err == 0) {
 553                         mutex_enter(&ds->ds_lock);
 554                         ASSERT(ds->ds_objset == NULL);
 555                         ds->ds_objset = os;
 556                         mutex_exit(&ds->ds_lock);
 557                 }
 558         }
 559         *osp = ds->ds_objset;
 560         mutex_exit(&ds->ds_opening_lock);
 561         return (err);
 562 }
 563 
 564 /*
 565  * Holds the pool while the objset is held.  Therefore only one objset
 566  * can be held at a time.
 567  */
 568 int
 569 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
 570 {
 571         dsl_pool_t *dp;
 572         dsl_dataset_t *ds;
 573         int err;
 574 
 575         err = dsl_pool_hold(name, tag, &dp);
 576         if (err != 0)
 577                 return (err);
 578         err = dsl_dataset_hold(dp, name, tag, &ds);
 579         if (err != 0) {
 580                 dsl_pool_rele(dp, tag);
 581                 return (err);
 582         }
 583 
 584         err = dmu_objset_from_ds(ds, osp);
 585         if (err != 0) {
 586                 dsl_dataset_rele(ds, tag);
 587                 dsl_pool_rele(dp, tag);
 588         }
 589 
 590         return (err);
 591 }
 592 
 593 static int
 594 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
 595     boolean_t readonly, void *tag, objset_t **osp)
 596 {
 597         int err;
 598 
 599         err = dmu_objset_from_ds(ds, osp);
 600         if (err != 0) {
 601                 dsl_dataset_disown(ds, tag);
 602         } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
 603                 dsl_dataset_disown(ds, tag);
 604                 return (SET_ERROR(EINVAL));
 605         } else if (!readonly && dsl_dataset_is_snapshot(ds)) {
 606                 dsl_dataset_disown(ds, tag);
 607                 return (SET_ERROR(EROFS));
 608         }
 609         return (err);
 610 }
 611 
 612 /*
 613  * dsl_pool must not be held when this is called.
 614  * Upon successful return, there will be a longhold on the dataset,
 615  * and the dsl_pool will not be held.
 616  */
 617 int
 618 dmu_objset_own(const char *name, dmu_objset_type_t type,
 619     boolean_t readonly, void *tag, objset_t **osp)
 620 {
 621         dsl_pool_t *dp;
 622         dsl_dataset_t *ds;
 623         int err;
 624 
 625         err = dsl_pool_hold(name, FTAG, &dp);
 626         if (err != 0)
 627                 return (err);
 628         err = dsl_dataset_own(dp, name, tag, &ds);
 629         if (err != 0) {
 630                 dsl_pool_rele(dp, FTAG);
 631                 return (err);
 632         }
 633         err = dmu_objset_own_impl(ds, type, readonly, tag, osp);
 634         dsl_pool_rele(dp, FTAG);
 635 
 636         return (err);
 637 }
 638 
 639 int
 640 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
 641     boolean_t readonly, void *tag, objset_t **osp)
 642 {
 643         dsl_dataset_t *ds;
 644         int err;
 645 
 646         err = dsl_dataset_own_obj(dp, obj, tag, &ds);
 647         if (err != 0)
 648                 return (err);
 649 
 650         return (dmu_objset_own_impl(ds, type, readonly, tag, osp));
 651 }
 652 
 653 void
 654 dmu_objset_rele(objset_t *os, void *tag)
 655 {
 656         dsl_pool_t *dp = dmu_objset_pool(os);
 657         dsl_dataset_rele(os->os_dsl_dataset, tag);
 658         dsl_pool_rele(dp, tag);
 659 }
 660 
 661 /*
 662  * When we are called, os MUST refer to an objset associated with a dataset
 663  * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
 664  * == tag.  We will then release and reacquire ownership of the dataset while
 665  * holding the pool config_rwlock to avoid intervening namespace or ownership
 666  * changes may occur.
 667  *
 668  * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
 669  * release the hold on its dataset and acquire a new one on the dataset of the
 670  * same name so that it can be partially torn down and reconstructed.
 671  */
 672 void
 673 dmu_objset_refresh_ownership(objset_t *os, void *tag)
 674 {
 675         dsl_pool_t *dp;
 676         dsl_dataset_t *ds, *newds;
 677         char name[ZFS_MAX_DATASET_NAME_LEN];
 678 
 679         ds = os->os_dsl_dataset;
 680         VERIFY3P(ds, !=, NULL);
 681         VERIFY3P(ds->ds_owner, ==, tag);
 682         VERIFY(dsl_dataset_long_held(ds));
 683 
 684         dsl_dataset_name(ds, name);
 685         dp = dmu_objset_pool(os);
 686         dsl_pool_config_enter(dp, FTAG);
 687         dmu_objset_disown(os, tag);
 688         VERIFY0(dsl_dataset_own(dp, name, tag, &newds));
 689         VERIFY3P(newds, ==, os->os_dsl_dataset);
 690         dsl_pool_config_exit(dp, FTAG);
 691 }
 692 
 693 void
 694 dmu_objset_disown(objset_t *os, void *tag)
 695 {
 696         dsl_dataset_disown(os->os_dsl_dataset, tag);
 697 }
 698 
 699 void
 700 dmu_objset_evict_dbufs(objset_t *os)
 701 {
 702         dnode_t dn_marker;
 703         dnode_t *dn;
 704 
 705         mutex_enter(&os->os_lock);
 706         dn = list_head(&os->os_dnodes);
 707         while (dn != NULL) {
 708                 /*
 709                  * Skip dnodes without holds.  We have to do this dance
 710                  * because dnode_add_ref() only works if there is already a
 711                  * hold.  If the dnode has no holds, then it has no dbufs.
 712                  */
 713                 if (dnode_add_ref(dn, FTAG)) {
 714                         list_insert_after(&os->os_dnodes, dn, &dn_marker);
 715                         mutex_exit(&os->os_lock);
 716 
 717                         dnode_evict_dbufs(dn);
 718                         dnode_rele(dn, FTAG);
 719 
 720                         mutex_enter(&os->os_lock);
 721                         dn = list_next(&os->os_dnodes, &dn_marker);
 722                         list_remove(&os->os_dnodes, &dn_marker);
 723                 } else {
 724                         dn = list_next(&os->os_dnodes, dn);
 725                 }
 726         }
 727         mutex_exit(&os->os_lock);
 728 
 729         if (DMU_USERUSED_DNODE(os) != NULL) {
 730                 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
 731                 dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
 732         }
 733         dnode_evict_dbufs(DMU_META_DNODE(os));
 734 }
 735 
 736 /*
 737  * Objset eviction processing is split into into two pieces.
 738  * The first marks the objset as evicting, evicts any dbufs that
 739  * have a refcount of zero, and then queues up the objset for the
 740  * second phase of eviction.  Once os->os_dnodes has been cleared by
 741  * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
 742  * The second phase closes the special dnodes, dequeues the objset from
 743  * the list of those undergoing eviction, and finally frees the objset.
 744  *
 745  * NOTE: Due to asynchronous eviction processing (invocation of
 746  *       dnode_buf_pageout()), it is possible for the meta dnode for the
 747  *       objset to have no holds even though os->os_dnodes is not empty.
 748  */
 749 void
 750 dmu_objset_evict(objset_t *os)
 751 {
 752         dsl_dataset_t *ds = os->os_dsl_dataset;
 753 
 754         for (int t = 0; t < TXG_SIZE; t++)
 755                 ASSERT(!dmu_objset_is_dirty(os, t));
 756 
 757         if (ds)
 758                 dsl_prop_unregister_all(ds, os);
 759 
 760         if (os->os_sa)
 761                 sa_tear_down(os);
 762 
 763         dmu_objset_evict_dbufs(os);
 764 
 765         mutex_enter(&os->os_lock);
 766         spa_evicting_os_register(os->os_spa, os);
 767         if (list_is_empty(&os->os_dnodes)) {
 768                 mutex_exit(&os->os_lock);
 769                 dmu_objset_evict_done(os);
 770         } else {
 771                 mutex_exit(&os->os_lock);
 772         }
 773 }
 774 
 775 void
 776 dmu_objset_evict_done(objset_t *os)
 777 {
 778         ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
 779 
 780         dnode_special_close(&os->os_meta_dnode);
 781         if (DMU_USERUSED_DNODE(os)) {
 782                 dnode_special_close(&os->os_userused_dnode);
 783                 dnode_special_close(&os->os_groupused_dnode);
 784         }
 785         zil_free(os->os_zil);
 786 
 787         arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
 788 
 789         /*
 790          * This is a barrier to prevent the objset from going away in
 791          * dnode_move() until we can safely ensure that the objset is still in
 792          * use. We consider the objset valid before the barrier and invalid
 793          * after the barrier.
 794          */
 795         rw_enter(&os_lock, RW_READER);
 796         rw_exit(&os_lock);
 797 
 798         mutex_destroy(&os->os_lock);
 799         mutex_destroy(&os->os_userused_lock);
 800         mutex_destroy(&os->os_obj_lock);
 801         mutex_destroy(&os->os_user_ptr_lock);
 802         for (int i = 0; i < TXG_SIZE; i++) {
 803                 multilist_destroy(os->os_dirty_dnodes[i]);
 804         }
 805         spa_evicting_os_deregister(os->os_spa, os);
 806         kmem_free(os, sizeof (objset_t));
 807 }
 808 
 809 timestruc_t
 810 dmu_objset_snap_cmtime(objset_t *os)
 811 {
 812         return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
 813 }
 814 
 815 /* called from dsl for meta-objset */
 816 objset_t *
 817 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
 818     dmu_objset_type_t type, dmu_tx_t *tx)
 819 {
 820         objset_t *os;
 821         dnode_t *mdn;
 822 
 823         ASSERT(dmu_tx_is_syncing(tx));
 824 
 825         if (ds != NULL)
 826                 VERIFY0(dmu_objset_from_ds(ds, &os));
 827         else
 828                 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
 829 
 830         mdn = DMU_META_DNODE(os);
 831 
 832         dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
 833             DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
 834 
 835         /*
 836          * We don't want to have to increase the meta-dnode's nlevels
 837          * later, because then we could do it in quescing context while
 838          * we are also accessing it in open context.
 839          *
 840          * This precaution is not necessary for the MOS (ds == NULL),
 841          * because the MOS is only updated in syncing context.
 842          * This is most fortunate: the MOS is the only objset that
 843          * needs to be synced multiple times as spa_sync() iterates
 844          * to convergence, so minimizing its dn_nlevels matters.
 845          */
 846         if (ds != NULL) {
 847                 int levels = 1;
 848 
 849                 /*
 850                  * Determine the number of levels necessary for the meta-dnode
 851                  * to contain DN_MAX_OBJECT dnodes.  Note that in order to
 852                  * ensure that we do not overflow 64 bits, there has to be
 853                  * a nlevels that gives us a number of blocks > DN_MAX_OBJECT
 854                  * but < 2^64.  Therefore,
 855                  * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be
 856                  * less than (64 - log2(DN_MAX_OBJECT)) (16).
 857                  */
 858                 while ((uint64_t)mdn->dn_nblkptr <<
 859                     (mdn->dn_datablkshift - DNODE_SHIFT +
 860                     (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
 861                     DN_MAX_OBJECT)
 862                         levels++;
 863 
 864                 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
 865                     mdn->dn_nlevels = levels;
 866         }
 867 
 868         ASSERT(type != DMU_OST_NONE);
 869         ASSERT(type != DMU_OST_ANY);
 870         ASSERT(type < DMU_OST_NUMTYPES);
 871         os->os_phys->os_type = type;
 872         if (dmu_objset_userused_enabled(os)) {
 873                 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
 874                 os->os_flags = os->os_phys->os_flags;
 875         }
 876 
 877         dsl_dataset_dirty(ds, tx);
 878 
 879         return (os);
 880 }
 881 
 882 typedef struct dmu_objset_create_arg {
 883         const char *doca_name;
 884         cred_t *doca_cred;
 885         void (*doca_userfunc)(objset_t *os, void *arg,
 886             cred_t *cr, dmu_tx_t *tx);
 887         void *doca_userarg;
 888         dmu_objset_type_t doca_type;
 889         uint64_t doca_flags;
 890 } dmu_objset_create_arg_t;
 891 
 892 /*ARGSUSED*/
 893 static int
 894 dmu_objset_create_check(void *arg, dmu_tx_t *tx)
 895 {
 896         dmu_objset_create_arg_t *doca = arg;
 897         dsl_pool_t *dp = dmu_tx_pool(tx);
 898         dsl_dir_t *pdd;
 899         const char *tail;
 900         int error;
 901 
 902         if (strchr(doca->doca_name, '@') != NULL)
 903                 return (SET_ERROR(EINVAL));
 904 
 905         if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
 906                 return (SET_ERROR(ENAMETOOLONG));
 907 
 908         error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
 909         if (error != 0)
 910                 return (error);
 911         if (tail == NULL) {
 912                 dsl_dir_rele(pdd, FTAG);
 913                 return (SET_ERROR(EEXIST));
 914         }
 915         error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
 916             doca->doca_cred);
 917         dsl_dir_rele(pdd, FTAG);
 918 
 919         return (error);
 920 }
 921 
 922 static void
 923 dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
 924 {
 925         dmu_objset_create_arg_t *doca = arg;
 926         dsl_pool_t *dp = dmu_tx_pool(tx);
 927         dsl_dir_t *pdd;
 928         const char *tail;
 929         dsl_dataset_t *ds;
 930         uint64_t obj;
 931         blkptr_t *bp;
 932         objset_t *os;
 933 
 934         VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
 935 
 936         obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
 937             doca->doca_cred, tx);
 938 
 939         VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
 940         rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
 941         bp = dsl_dataset_get_blkptr(ds);
 942         os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
 943             ds, bp, doca->doca_type, tx);
 944         rrw_exit(&ds->ds_bp_rwlock, FTAG);
 945 
 946         if (doca->doca_userfunc != NULL) {
 947                 doca->doca_userfunc(os, doca->doca_userarg,
 948                     doca->doca_cred, tx);
 949         }
 950 
 951         spa_history_log_internal_ds(ds, "create", tx, "");
 952         dsl_dataset_rele(ds, FTAG);
 953         dsl_dir_rele(pdd, FTAG);
 954 }
 955 
 956 int
 957 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
 958     void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
 959 {
 960         dmu_objset_create_arg_t doca;
 961 
 962         doca.doca_name = name;
 963         doca.doca_cred = CRED();
 964         doca.doca_flags = flags;
 965         doca.doca_userfunc = func;
 966         doca.doca_userarg = arg;
 967         doca.doca_type = type;
 968 
 969         return (dsl_sync_task(name,
 970             dmu_objset_create_check, dmu_objset_create_sync, &doca,
 971             5, ZFS_SPACE_CHECK_NORMAL));
 972 }
 973 
 974 typedef struct dmu_objset_clone_arg {
 975         const char *doca_clone;
 976         const char *doca_origin;
 977         cred_t *doca_cred;
 978 } dmu_objset_clone_arg_t;
 979 
 980 /*ARGSUSED*/
 981 static int
 982 dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
 983 {
 984         dmu_objset_clone_arg_t *doca = arg;
 985         dsl_dir_t *pdd;
 986         const char *tail;
 987         int error;
 988         dsl_dataset_t *origin;
 989         dsl_pool_t *dp = dmu_tx_pool(tx);
 990 
 991         if (strchr(doca->doca_clone, '@') != NULL)
 992                 return (SET_ERROR(EINVAL));
 993 
 994         if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
 995                 return (SET_ERROR(ENAMETOOLONG));
 996 
 997         error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
 998         if (error != 0)
 999                 return (error);
1000         if (tail == NULL) {
1001                 dsl_dir_rele(pdd, FTAG);
1002                 return (SET_ERROR(EEXIST));
1003         }
1004 
1005         error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
1006             doca->doca_cred);
1007         if (error != 0) {
1008                 dsl_dir_rele(pdd, FTAG);
1009                 return (SET_ERROR(EDQUOT));
1010         }
1011         dsl_dir_rele(pdd, FTAG);
1012 
1013         error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
1014         if (error != 0)
1015                 return (error);
1016 
1017         /* You can only clone snapshots, not the head datasets. */
1018         if (!origin->ds_is_snapshot) {
1019                 dsl_dataset_rele(origin, FTAG);
1020                 return (SET_ERROR(EINVAL));
1021         }
1022         dsl_dataset_rele(origin, FTAG);
1023 
1024         return (0);
1025 }
1026 
1027 static void
1028 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
1029 {
1030         dmu_objset_clone_arg_t *doca = arg;
1031         dsl_pool_t *dp = dmu_tx_pool(tx);
1032         dsl_dir_t *pdd;
1033         const char *tail;
1034         dsl_dataset_t *origin, *ds;
1035         uint64_t obj;
1036         char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1037 
1038         VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
1039         VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
1040 
1041         obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
1042             doca->doca_cred, tx);
1043 
1044         VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
1045         dsl_dataset_name(origin, namebuf);
1046         spa_history_log_internal_ds(ds, "clone", tx,
1047             "origin=%s (%llu)", namebuf, origin->ds_object);
1048         dsl_dataset_rele(ds, FTAG);
1049         dsl_dataset_rele(origin, FTAG);
1050         dsl_dir_rele(pdd, FTAG);
1051 }
1052 
1053 int
1054 dmu_objset_clone(const char *clone, const char *origin)
1055 {
1056         dmu_objset_clone_arg_t doca;
1057 
1058         doca.doca_clone = clone;
1059         doca.doca_origin = origin;
1060         doca.doca_cred = CRED();
1061 
1062         return (dsl_sync_task(clone,
1063             dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
1064             5, ZFS_SPACE_CHECK_NORMAL));
1065 }
1066 
1067 static int
1068 dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
1069 {
1070         int error = 0;
1071         uint64_t object = 0;
1072         while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
1073                 error = dmu_object_remap_indirects(os, object,
1074                     last_removed_txg);
1075                 /*
1076                  * If the ZPL removed the object before we managed to dnode_hold
1077                  * it, we would get an ENOENT. If the ZPL declares its intent
1078                  * to remove the object (dnode_free) before we manage to
1079                  * dnode_hold it, we would get an EEXIST. In either case, we
1080                  * want to continue remapping the other objects in the objset;
1081                  * in all other cases, we want to break early.
1082                  */
1083                 if (error != 0 && error != ENOENT && error != EEXIST) {
1084                         break;
1085                 }
1086         }
1087         if (error == ESRCH) {
1088                 error = 0;
1089         }
1090         return (error);
1091 }
1092 
1093 int
1094 dmu_objset_remap_indirects(const char *fsname)
1095 {
1096         int error = 0;
1097         objset_t *os = NULL;
1098         uint64_t last_removed_txg;
1099         uint64_t remap_start_txg;
1100         dsl_dir_t *dd;
1101 
1102         error = dmu_objset_hold(fsname, FTAG, &os);
1103         if (error != 0) {
1104                 return (error);
1105         }
1106         dd = dmu_objset_ds(os)->ds_dir;
1107 
1108         if (!spa_feature_is_enabled(dmu_objset_spa(os),
1109             SPA_FEATURE_OBSOLETE_COUNTS)) {
1110                 dmu_objset_rele(os, FTAG);
1111                 return (SET_ERROR(ENOTSUP));
1112         }
1113 
1114         if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
1115                 dmu_objset_rele(os, FTAG);
1116                 return (SET_ERROR(EINVAL));
1117         }
1118 
1119         /*
1120          * If there has not been a removal, we're done.
1121          */
1122         last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
1123         if (last_removed_txg == -1ULL) {
1124                 dmu_objset_rele(os, FTAG);
1125                 return (0);
1126         }
1127 
1128         /*
1129          * If we have remapped since the last removal, we're done.
1130          */
1131         if (dsl_dir_is_zapified(dd)) {
1132                 uint64_t last_remap_txg;
1133                 if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
1134                     dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
1135                     sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
1136                     last_remap_txg > last_removed_txg) {
1137                         dmu_objset_rele(os, FTAG);
1138                         return (0);
1139                 }
1140         }
1141 
1142         dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
1143         dsl_pool_rele(dmu_objset_pool(os), FTAG);
1144 
1145         remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
1146         error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
1147         if (error == 0) {
1148                 /*
1149                  * We update the last_remap_txg to be the start txg so that
1150                  * we can guarantee that every block older than last_remap_txg
1151                  * that can be remapped has been remapped.
1152                  */
1153                 error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
1154         }
1155 
1156         dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
1157         dsl_dataset_rele(dmu_objset_ds(os), FTAG);
1158 
1159         return (error);
1160 }
1161 
1162 int
1163 dmu_objset_snapshot_one(const char *fsname, const char *snapname)
1164 {
1165         int err;
1166         char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
1167         nvlist_t *snaps = fnvlist_alloc();
1168 
1169         fnvlist_add_boolean(snaps, longsnap);
1170         strfree(longsnap);
1171         err = dsl_dataset_snapshot(snaps, NULL, NULL);
1172         fnvlist_free(snaps);
1173         return (err);
1174 }
1175 
1176 static void
1177 dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
1178 {
1179         dnode_t *dn;
1180 
1181         while ((dn = multilist_sublist_head(list)) != NULL) {
1182                 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1183                 ASSERT(dn->dn_dbuf->db_data_pending);
1184                 /*
1185                  * Initialize dn_zio outside dnode_sync() because the
1186                  * meta-dnode needs to set it ouside dnode_sync().
1187                  */
1188                 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
1189                 ASSERT(dn->dn_zio);
1190 
1191                 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
1192                 multilist_sublist_remove(list, dn);
1193 
1194                 multilist_t *newlist = dn->dn_objset->os_synced_dnodes;
1195                 if (newlist != NULL) {
1196                         (void) dnode_add_ref(dn, newlist);
1197                         multilist_insert(newlist, dn);
1198                 }
1199 
1200                 dnode_sync(dn, tx);
1201         }
1202 }
1203 
1204 /* ARGSUSED */
1205 static void
1206 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
1207 {
1208         blkptr_t *bp = zio->io_bp;
1209         objset_t *os = arg;
1210         dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
1211 
1212         ASSERT(!BP_IS_EMBEDDED(bp));
1213         ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
1214         ASSERT0(BP_GET_LEVEL(bp));
1215 
1216         /*
1217          * Update rootbp fill count: it should be the number of objects
1218          * allocated in the object set (not counting the "special"
1219          * objects that are stored in the objset_phys_t -- the meta
1220          * dnode and user/group accounting objects).
1221          */
1222         bp->blk_fill = 0;
1223         for (int i = 0; i < dnp->dn_nblkptr; i++)
1224                 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
1225         if (os->os_dsl_dataset != NULL)
1226                 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
1227         *os->os_rootbp = *bp;
1228         if (os->os_dsl_dataset != NULL)
1229                 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1230 }
1231 
1232 /* ARGSUSED */
1233 static void
1234 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1235 {
1236         blkptr_t *bp = zio->io_bp;
1237         blkptr_t *bp_orig = &zio->io_bp_orig;
1238         objset_t *os = arg;
1239 
1240         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1241                 ASSERT(BP_EQUAL(bp, bp_orig));
1242         } else {
1243                 dsl_dataset_t *ds = os->os_dsl_dataset;
1244                 dmu_tx_t *tx = os->os_synctx;
1245 
1246                 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1247                 dsl_dataset_block_born(ds, bp, tx);
1248         }
1249         kmem_free(bp, sizeof (*bp));
1250 }
1251 
1252 typedef struct sync_dnodes_arg {
1253         multilist_t *sda_list;
1254         int sda_sublist_idx;
1255         multilist_t *sda_newlist;
1256         dmu_tx_t *sda_tx;
1257 } sync_dnodes_arg_t;
1258 
1259 static void
1260 sync_dnodes_task(void *arg)
1261 {
1262         sync_dnodes_arg_t *sda = arg;
1263 
1264         multilist_sublist_t *ms =
1265             multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
1266 
1267         dmu_objset_sync_dnodes(ms, sda->sda_tx);
1268 
1269         multilist_sublist_unlock(ms);
1270 
1271         kmem_free(sda, sizeof (*sda));
1272 }
1273 
1274 
1275 /* called from dsl */
1276 void
1277 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1278 {
1279         int txgoff;
1280         zbookmark_phys_t zb;
1281         zio_prop_t zp;
1282         zio_t *zio;
1283         list_t *list;
1284         dbuf_dirty_record_t *dr;
1285         blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
1286         *blkptr_copy = *os->os_rootbp;
1287 
1288         dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1289 
1290         ASSERT(dmu_tx_is_syncing(tx));
1291         /* XXX the write_done callback should really give us the tx... */
1292         os->os_synctx = tx;
1293 
1294         if (os->os_dsl_dataset == NULL) {
1295                 /*
1296                  * This is the MOS.  If we have upgraded,
1297                  * spa_max_replication() could change, so reset
1298                  * os_copies here.
1299                  */
1300                 os->os_copies = spa_max_replication(os->os_spa);
1301         }
1302 
1303         /*
1304          * Create the root block IO
1305          */
1306         SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1307             os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1308             ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1309         arc_release(os->os_phys_buf, &os->os_phys_buf);
1310 
1311         dmu_write_policy(os, NULL, 0, 0, &zp);
1312 
1313         zio = arc_write(pio, os->os_spa, tx->tx_txg,
1314             blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
1315             &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
1316             os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1317 
1318         /*
1319          * Sync special dnodes - the parent IO for the sync is the root block
1320          */
1321         DMU_META_DNODE(os)->dn_zio = zio;
1322         dnode_sync(DMU_META_DNODE(os), tx);
1323 
1324         os->os_phys->os_flags = os->os_flags;
1325 
1326         if (DMU_USERUSED_DNODE(os) &&
1327             DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1328                 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1329                 dnode_sync(DMU_USERUSED_DNODE(os), tx);
1330                 DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1331                 dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1332         }
1333 
1334         txgoff = tx->tx_txg & TXG_MASK;
1335 
1336         if (dmu_objset_userused_enabled(os)) {
1337                 /*
1338                  * We must create the list here because it uses the
1339                  * dn_dirty_link[] of this txg.  But it may already
1340                  * exist because we call dsl_dataset_sync() twice per txg.
1341                  */
1342                 if (os->os_synced_dnodes == NULL) {
1343                         os->os_synced_dnodes =
1344                             multilist_create(sizeof (dnode_t),
1345                             offsetof(dnode_t, dn_dirty_link[txgoff]),
1346                             dnode_multilist_index_func);
1347                 } else {
1348                         ASSERT3U(os->os_synced_dnodes->ml_offset, ==,
1349                             offsetof(dnode_t, dn_dirty_link[txgoff]));
1350                 }
1351         }
1352 
1353         for (int i = 0;
1354             i < multilist_get_num_sublists(os->os_dirty_dnodes[txgoff]); i++) {
1355                 sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
1356                 sda->sda_list = os->os_dirty_dnodes[txgoff];
1357                 sda->sda_sublist_idx = i;
1358                 sda->sda_tx = tx;
1359                 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1360                     sync_dnodes_task, sda, 0);
1361                 /* callback frees sda */
1362         }
1363         taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
1364 
1365         list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1366         while ((dr = list_head(list)) != NULL) {
1367                 ASSERT0(dr->dr_dbuf->db_level);
1368                 list_remove(list, dr);
1369                 if (dr->dr_zio)
1370                         zio_nowait(dr->dr_zio);
1371         }
1372 
1373         /* Enable dnode backfill if enough objects have been freed. */
1374         if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
1375                 os->os_rescan_dnodes = B_TRUE;
1376                 os->os_freed_dnodes = 0;
1377         }
1378 
1379         /*
1380          * Free intent log blocks up to this tx.
1381          */
1382         zil_sync(os->os_zil, tx);
1383         os->os_phys->os_zil_header = os->os_zil_header;
1384         zio_nowait(zio);
1385 }
1386 
1387 boolean_t
1388 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1389 {
1390         return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK]));
1391 }
1392 
1393 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1394 
1395 void
1396 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1397 {
1398         used_cbs[ost] = cb;
1399 }
1400 
1401 boolean_t
1402 dmu_objset_userused_enabled(objset_t *os)
1403 {
1404         return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1405             used_cbs[os->os_phys->os_type] != NULL &&
1406             DMU_USERUSED_DNODE(os) != NULL);
1407 }
1408 
1409 typedef struct userquota_node {
1410         uint64_t uqn_id;
1411         int64_t uqn_delta;
1412         avl_node_t uqn_node;
1413 } userquota_node_t;
1414 
1415 typedef struct userquota_cache {
1416         avl_tree_t uqc_user_deltas;
1417         avl_tree_t uqc_group_deltas;
1418 } userquota_cache_t;
1419 
1420 static int
1421 userquota_compare(const void *l, const void *r)
1422 {
1423         const userquota_node_t *luqn = l;
1424         const userquota_node_t *ruqn = r;
1425 
1426         if (luqn->uqn_id < ruqn->uqn_id)
1427                 return (-1);
1428         if (luqn->uqn_id > ruqn->uqn_id)
1429                 return (1);
1430         return (0);
1431 }
1432 
1433 static void
1434 do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
1435 {
1436         void *cookie;
1437         userquota_node_t *uqn;
1438 
1439         ASSERT(dmu_tx_is_syncing(tx));
1440 
1441         cookie = NULL;
1442         while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
1443             &cookie)) != NULL) {
1444                 /*
1445                  * os_userused_lock protects against concurrent calls to
1446                  * zap_increment_int().  It's needed because zap_increment_int()
1447                  * is not thread-safe (i.e. not atomic).
1448                  */
1449                 mutex_enter(&os->os_userused_lock);
1450                 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT,
1451                     uqn->uqn_id, uqn->uqn_delta, tx));
1452                 mutex_exit(&os->os_userused_lock);
1453                 kmem_free(uqn, sizeof (*uqn));
1454         }
1455         avl_destroy(&cache->uqc_user_deltas);
1456 
1457         cookie = NULL;
1458         while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
1459             &cookie)) != NULL) {
1460                 mutex_enter(&os->os_userused_lock);
1461                 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1462                     uqn->uqn_id, uqn->uqn_delta, tx));
1463                 mutex_exit(&os->os_userused_lock);
1464                 kmem_free(uqn, sizeof (*uqn));
1465         }
1466         avl_destroy(&cache->uqc_group_deltas);
1467 }
1468 
1469 static void
1470 userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta)
1471 {
1472         userquota_node_t search = { .uqn_id = id };
1473         avl_index_t idx;
1474 
1475         userquota_node_t *uqn = avl_find(avl, &search, &idx);
1476         if (uqn == NULL) {
1477                 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
1478                 uqn->uqn_id = id;
1479                 avl_insert(avl, uqn, idx);
1480         }
1481         uqn->uqn_delta += delta;
1482 }
1483 
1484 static void
1485 do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
1486     uint64_t user, uint64_t group, boolean_t subtract)
1487 {
1488         if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1489                 int64_t delta = DNODE_SIZE + used;
1490                 if (subtract)
1491                         delta = -delta;
1492 
1493                 userquota_update_cache(&cache->uqc_user_deltas, user, delta);
1494                 userquota_update_cache(&cache->uqc_group_deltas, group, delta);
1495         }
1496 }
1497 
1498 typedef struct userquota_updates_arg {
1499         objset_t *uua_os;
1500         int uua_sublist_idx;
1501         dmu_tx_t *uua_tx;
1502 } userquota_updates_arg_t;
1503 
1504 static void
1505 userquota_updates_task(void *arg)
1506 {
1507         userquota_updates_arg_t *uua = arg;
1508         objset_t *os = uua->uua_os;
1509         dmu_tx_t *tx = uua->uua_tx;
1510         dnode_t *dn;
1511         userquota_cache_t cache = { 0 };
1512 
1513         multilist_sublist_t *list =
1514             multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx);
1515 
1516         ASSERT(multilist_sublist_head(list) == NULL ||
1517             dmu_objset_userused_enabled(os));
1518         avl_create(&cache.uqc_user_deltas, userquota_compare,
1519             sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1520         avl_create(&cache.uqc_group_deltas, userquota_compare,
1521             sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1522 
1523         while ((dn = multilist_sublist_head(list)) != NULL) {
1524                 int flags;
1525                 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1526                 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1527                     dn->dn_phys->dn_flags &
1528                     DNODE_FLAG_USERUSED_ACCOUNTED);
1529 
1530                 flags = dn->dn_id_flags;
1531                 ASSERT(flags);
1532                 if (flags & DN_ID_OLD_EXIST)  {
1533                         do_userquota_update(&cache,
1534                             dn->dn_oldused, dn->dn_oldflags,
1535                             dn->dn_olduid, dn->dn_oldgid, B_TRUE);
1536                 }
1537                 if (flags & DN_ID_NEW_EXIST) {
1538                         do_userquota_update(&cache,
1539                             DN_USED_BYTES(dn->dn_phys),
1540                             dn->dn_phys->dn_flags,  dn->dn_newuid,
1541                             dn->dn_newgid, B_FALSE);
1542                 }
1543 
1544                 mutex_enter(&dn->dn_mtx);
1545                 dn->dn_oldused = 0;
1546                 dn->dn_oldflags = 0;
1547                 if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1548                         dn->dn_olduid = dn->dn_newuid;
1549                         dn->dn_oldgid = dn->dn_newgid;
1550                         dn->dn_id_flags |= DN_ID_OLD_EXIST;
1551                         if (dn->dn_bonuslen == 0)
1552                                 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1553                         else
1554                                 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1555                 }
1556                 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1557                 mutex_exit(&dn->dn_mtx);
1558 
1559                 multilist_sublist_remove(list, dn);
1560                 dnode_rele(dn, os->os_synced_dnodes);
1561         }
1562         do_userquota_cacheflush(os, &cache, tx);
1563         multilist_sublist_unlock(list);
1564         kmem_free(uua, sizeof (*uua));
1565 }
1566 
1567 void
1568 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1569 {
1570         if (!dmu_objset_userused_enabled(os))
1571                 return;
1572 
1573         /* Allocate the user/groupused objects if necessary. */
1574         if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1575                 VERIFY0(zap_create_claim(os,
1576                     DMU_USERUSED_OBJECT,
1577                     DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1578                 VERIFY0(zap_create_claim(os,
1579                     DMU_GROUPUSED_OBJECT,
1580                     DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1581         }
1582 
1583         for (int i = 0;
1584             i < multilist_get_num_sublists(os->os_synced_dnodes); i++) {
1585                 userquota_updates_arg_t *uua =
1586                     kmem_alloc(sizeof (*uua), KM_SLEEP);
1587                 uua->uua_os = os;
1588                 uua->uua_sublist_idx = i;
1589                 uua->uua_tx = tx;
1590                 /* note: caller does taskq_wait() */
1591                 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
1592                     userquota_updates_task, uua, 0);
1593                 /* callback frees uua */
1594         }
1595 }
1596 
1597 /*
1598  * Returns a pointer to data to find uid/gid from
1599  *
1600  * If a dirty record for transaction group that is syncing can't
1601  * be found then NULL is returned.  In the NULL case it is assumed
1602  * the uid/gid aren't changing.
1603  */
1604 static void *
1605 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1606 {
1607         dbuf_dirty_record_t *dr, **drp;
1608         void *data;
1609 
1610         if (db->db_dirtycnt == 0)
1611                 return (db->db.db_data);  /* Nothing is changing */
1612 
1613         for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1614                 if (dr->dr_txg == tx->tx_txg)
1615                         break;
1616 
1617         if (dr == NULL) {
1618                 data = NULL;
1619         } else {
1620                 dnode_t *dn;
1621 
1622                 DB_DNODE_ENTER(dr->dr_dbuf);
1623                 dn = DB_DNODE(dr->dr_dbuf);
1624 
1625                 if (dn->dn_bonuslen == 0 &&
1626                     dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1627                         data = dr->dt.dl.dr_data->b_data;
1628                 else
1629                         data = dr->dt.dl.dr_data;
1630 
1631                 DB_DNODE_EXIT(dr->dr_dbuf);
1632         }
1633 
1634         return (data);
1635 }
1636 
1637 void
1638 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1639 {
1640         objset_t *os = dn->dn_objset;
1641         void *data = NULL;
1642         dmu_buf_impl_t *db = NULL;
1643         uint64_t *user = NULL;
1644         uint64_t *group = NULL;
1645         int flags = dn->dn_id_flags;
1646         int error;
1647         boolean_t have_spill = B_FALSE;
1648 
1649         if (!dmu_objset_userused_enabled(dn->dn_objset))
1650                 return;
1651 
1652         if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1653             DN_ID_CHKED_SPILL)))
1654                 return;
1655 
1656         if (before && dn->dn_bonuslen != 0)
1657                 data = DN_BONUS(dn->dn_phys);
1658         else if (!before && dn->dn_bonuslen != 0) {
1659                 if (dn->dn_bonus) {
1660                         db = dn->dn_bonus;
1661                         mutex_enter(&db->db_mtx);
1662                         data = dmu_objset_userquota_find_data(db, tx);
1663                 } else {
1664                         data = DN_BONUS(dn->dn_phys);
1665                 }
1666         } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1667                         int rf = 0;
1668 
1669                         if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1670                                 rf |= DB_RF_HAVESTRUCT;
1671                         error = dmu_spill_hold_by_dnode(dn,
1672                             rf | DB_RF_MUST_SUCCEED,
1673                             FTAG, (dmu_buf_t **)&db);
1674                         ASSERT(error == 0);
1675                         mutex_enter(&db->db_mtx);
1676                         data = (before) ? db->db.db_data :
1677                             dmu_objset_userquota_find_data(db, tx);
1678                         have_spill = B_TRUE;
1679         } else {
1680                 mutex_enter(&dn->dn_mtx);
1681                 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1682                 mutex_exit(&dn->dn_mtx);
1683                 return;
1684         }
1685 
1686         if (before) {
1687                 ASSERT(data);
1688                 user = &dn->dn_olduid;
1689                 group = &dn->dn_oldgid;
1690         } else if (data) {
1691                 user = &dn->dn_newuid;
1692                 group = &dn->dn_newgid;
1693         }
1694 
1695         /*
1696          * Must always call the callback in case the object
1697          * type has changed and that type isn't an object type to track
1698          */
1699         error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1700             user, group);
1701 
1702         /*
1703          * Preserve existing uid/gid when the callback can't determine
1704          * what the new uid/gid are and the callback returned EEXIST.
1705          * The EEXIST error tells us to just use the existing uid/gid.
1706          * If we don't know what the old values are then just assign
1707          * them to 0, since that is a new file  being created.
1708          */
1709         if (!before && data == NULL && error == EEXIST) {
1710                 if (flags & DN_ID_OLD_EXIST) {
1711                         dn->dn_newuid = dn->dn_olduid;
1712                         dn->dn_newgid = dn->dn_oldgid;
1713                 } else {
1714                         dn->dn_newuid = 0;
1715                         dn->dn_newgid = 0;
1716                 }
1717                 error = 0;
1718         }
1719 
1720         if (db)
1721                 mutex_exit(&db->db_mtx);
1722 
1723         mutex_enter(&dn->dn_mtx);
1724         if (error == 0 && before)
1725                 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1726         if (error == 0 && !before)
1727                 dn->dn_id_flags |= DN_ID_NEW_EXIST;
1728 
1729         if (have_spill) {
1730                 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1731         } else {
1732                 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1733         }
1734         mutex_exit(&dn->dn_mtx);
1735         if (have_spill)
1736                 dmu_buf_rele((dmu_buf_t *)db, FTAG);
1737 }
1738 
1739 boolean_t
1740 dmu_objset_userspace_present(objset_t *os)
1741 {
1742         return (os->os_phys->os_flags &
1743             OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1744 }
1745 
1746 int
1747 dmu_objset_userspace_upgrade(objset_t *os)
1748 {
1749         uint64_t obj;
1750         int err = 0;
1751 
1752         if (dmu_objset_userspace_present(os))
1753                 return (0);
1754         if (!dmu_objset_userused_enabled(os))
1755                 return (SET_ERROR(ENOTSUP));
1756         if (dmu_objset_is_snapshot(os))
1757                 return (SET_ERROR(EINVAL));
1758 
1759         /*
1760          * We simply need to mark every object dirty, so that it will be
1761          * synced out and now accounted.  If this is called
1762          * concurrently, or if we already did some work before crashing,
1763          * that's fine, since we track each object's accounted state
1764          * independently.
1765          */
1766 
1767         for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1768                 dmu_tx_t *tx;
1769                 dmu_buf_t *db;
1770                 int objerr;
1771 
1772                 if (issig(JUSTLOOKING) && issig(FORREAL))
1773                         return (SET_ERROR(EINTR));
1774 
1775                 objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1776                 if (objerr != 0)
1777                         continue;
1778                 tx = dmu_tx_create(os);
1779                 dmu_tx_hold_bonus(tx, obj);
1780                 objerr = dmu_tx_assign(tx, TXG_WAIT);
1781                 if (objerr != 0) {
1782                         dmu_tx_abort(tx);
1783                         continue;
1784                 }
1785                 dmu_buf_will_dirty(db, tx);
1786                 dmu_buf_rele(db, FTAG);
1787                 dmu_tx_commit(tx);
1788         }
1789 
1790         os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1791         txg_wait_synced(dmu_objset_pool(os), 0);
1792         return (0);
1793 }
1794 
1795 void
1796 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1797     uint64_t *usedobjsp, uint64_t *availobjsp)
1798 {
1799         dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1800             usedobjsp, availobjsp);
1801 }
1802 
1803 uint64_t
1804 dmu_objset_fsid_guid(objset_t *os)
1805 {
1806         return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1807 }
1808 
1809 void
1810 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1811 {
1812         stat->dds_type = os->os_phys->os_type;
1813         if (os->os_dsl_dataset)
1814                 dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1815 }
1816 
1817 void
1818 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1819 {
1820         ASSERT(os->os_dsl_dataset ||
1821             os->os_phys->os_type == DMU_OST_META);
1822 
1823         if (os->os_dsl_dataset != NULL)
1824                 dsl_dataset_stats(os->os_dsl_dataset, nv);
1825 
1826         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1827             os->os_phys->os_type);
1828         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1829             dmu_objset_userspace_present(os));
1830 }
1831 
1832 int
1833 dmu_objset_is_snapshot(objset_t *os)
1834 {
1835         if (os->os_dsl_dataset != NULL)
1836                 return (os->os_dsl_dataset->ds_is_snapshot);
1837         else
1838                 return (B_FALSE);
1839 }
1840 
1841 int
1842 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1843     boolean_t *conflict)
1844 {
1845         dsl_dataset_t *ds = os->os_dsl_dataset;
1846         uint64_t ignored;
1847 
1848         if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1849                 return (SET_ERROR(ENOENT));
1850 
1851         return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1852             dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
1853             MT_NORMALIZE, real, maxlen, conflict));
1854 }
1855 
1856 int
1857 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1858     uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1859 {
1860         dsl_dataset_t *ds = os->os_dsl_dataset;
1861         zap_cursor_t cursor;
1862         zap_attribute_t attr;
1863 
1864         ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
1865 
1866         if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1867                 return (SET_ERROR(ENOENT));
1868 
1869         zap_cursor_init_serialized(&cursor,
1870             ds->ds_dir->dd_pool->dp_meta_objset,
1871             dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
1872 
1873         if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1874                 zap_cursor_fini(&cursor);
1875                 return (SET_ERROR(ENOENT));
1876         }
1877 
1878         if (strlen(attr.za_name) + 1 > namelen) {
1879                 zap_cursor_fini(&cursor);
1880                 return (SET_ERROR(ENAMETOOLONG));
1881         }
1882 
1883         (void) strcpy(name, attr.za_name);
1884         if (idp)
1885                 *idp = attr.za_first_integer;
1886         if (case_conflict)
1887                 *case_conflict = attr.za_normalization_conflict;
1888         zap_cursor_advance(&cursor);
1889         *offp = zap_cursor_serialize(&cursor);
1890         zap_cursor_fini(&cursor);
1891 
1892         return (0);
1893 }
1894 
1895 int
1896 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1897     uint64_t *idp, uint64_t *offp)
1898 {
1899         dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1900         zap_cursor_t cursor;
1901         zap_attribute_t attr;
1902 
1903         /* there is no next dir on a snapshot! */
1904         if (os->os_dsl_dataset->ds_object !=
1905             dsl_dir_phys(dd)->dd_head_dataset_obj)
1906                 return (SET_ERROR(ENOENT));
1907 
1908         zap_cursor_init_serialized(&cursor,
1909             dd->dd_pool->dp_meta_objset,
1910             dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
1911 
1912         if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1913                 zap_cursor_fini(&cursor);
1914                 return (SET_ERROR(ENOENT));
1915         }
1916 
1917         if (strlen(attr.za_name) + 1 > namelen) {
1918                 zap_cursor_fini(&cursor);
1919                 return (SET_ERROR(ENAMETOOLONG));
1920         }
1921 
1922         (void) strcpy(name, attr.za_name);
1923         if (idp)
1924                 *idp = attr.za_first_integer;
1925         zap_cursor_advance(&cursor);
1926         *offp = zap_cursor_serialize(&cursor);
1927         zap_cursor_fini(&cursor);
1928 
1929         return (0);
1930 }
1931 
1932 typedef struct dmu_objset_find_ctx {
1933         taskq_t         *dc_tq;
1934         dsl_pool_t      *dc_dp;
1935         uint64_t        dc_ddobj;
1936         char            *dc_ddname; /* last component of ddobj's name */
1937         int             (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
1938         void            *dc_arg;
1939         int             dc_flags;
1940         kmutex_t        *dc_error_lock;
1941         int             *dc_error;
1942 } dmu_objset_find_ctx_t;
1943 
1944 static void
1945 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
1946 {
1947         dsl_pool_t *dp = dcp->dc_dp;
1948         dsl_dir_t *dd;
1949         dsl_dataset_t *ds;
1950         zap_cursor_t zc;
1951         zap_attribute_t *attr;
1952         uint64_t thisobj;
1953         int err = 0;
1954 
1955         /* don't process if there already was an error */
1956         if (*dcp->dc_error != 0)
1957                 goto out;
1958 
1959         /*
1960          * Note: passing the name (dc_ddname) here is optional, but it
1961          * improves performance because we don't need to call
1962          * zap_value_search() to determine the name.
1963          */
1964         err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
1965         if (err != 0)
1966                 goto out;
1967 
1968         /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1969         if (dd->dd_myname[0] == '$') {
1970                 dsl_dir_rele(dd, FTAG);
1971                 goto out;
1972         }
1973 
1974         thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
1975         attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1976 
1977         /*
1978          * Iterate over all children.
1979          */
1980         if (dcp->dc_flags & DS_FIND_CHILDREN) {
1981                 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1982                     dsl_dir_phys(dd)->dd_child_dir_zapobj);
1983                     zap_cursor_retrieve(&zc, attr) == 0;
1984                     (void) zap_cursor_advance(&zc)) {
1985                         ASSERT3U(attr->za_integer_length, ==,
1986                             sizeof (uint64_t));
1987                         ASSERT3U(attr->za_num_integers, ==, 1);
1988 
1989                         dmu_objset_find_ctx_t *child_dcp =
1990                             kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
1991                         *child_dcp = *dcp;
1992                         child_dcp->dc_ddobj = attr->za_first_integer;
1993                         child_dcp->dc_ddname = spa_strdup(attr->za_name);
1994                         if (dcp->dc_tq != NULL)
1995                                 (void) taskq_dispatch(dcp->dc_tq,
1996                                     dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
1997                         else
1998                                 dmu_objset_find_dp_impl(child_dcp);
1999                 }
2000                 zap_cursor_fini(&zc);
2001         }
2002 
2003         /*
2004          * Iterate over all snapshots.
2005          */
2006         if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
2007                 dsl_dataset_t *ds;
2008                 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2009 
2010                 if (err == 0) {
2011                         uint64_t snapobj;
2012 
2013                         snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2014                         dsl_dataset_rele(ds, FTAG);
2015 
2016                         for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2017                             zap_cursor_retrieve(&zc, attr) == 0;
2018                             (void) zap_cursor_advance(&zc)) {
2019                                 ASSERT3U(attr->za_integer_length, ==,
2020                                     sizeof (uint64_t));
2021                                 ASSERT3U(attr->za_num_integers, ==, 1);
2022 
2023                                 err = dsl_dataset_hold_obj(dp,
2024                                     attr->za_first_integer, FTAG, &ds);
2025                                 if (err != 0)
2026                                         break;
2027                                 err = dcp->dc_func(dp, ds, dcp->dc_arg);
2028                                 dsl_dataset_rele(ds, FTAG);
2029                                 if (err != 0)
2030                                         break;
2031                         }
2032                         zap_cursor_fini(&zc);
2033                 }
2034         }
2035 
2036         kmem_free(attr, sizeof (zap_attribute_t));
2037 
2038         if (err != 0) {
2039                 dsl_dir_rele(dd, FTAG);
2040                 goto out;
2041         }
2042 
2043         /*
2044          * Apply to self.
2045          */
2046         err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2047 
2048         /*
2049          * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2050          * that the dir will remain cached, and we won't have to re-instantiate
2051          * it (which could be expensive due to finding its name via
2052          * zap_value_search()).
2053          */
2054         dsl_dir_rele(dd, FTAG);
2055         if (err != 0)
2056                 goto out;
2057         err = dcp->dc_func(dp, ds, dcp->dc_arg);
2058         dsl_dataset_rele(ds, FTAG);
2059 
2060 out:
2061         if (err != 0) {
2062                 mutex_enter(dcp->dc_error_lock);
2063                 /* only keep first error */
2064                 if (*dcp->dc_error == 0)
2065                         *dcp->dc_error = err;
2066                 mutex_exit(dcp->dc_error_lock);
2067         }
2068 
2069         if (dcp->dc_ddname != NULL)
2070                 spa_strfree(dcp->dc_ddname);
2071         kmem_free(dcp, sizeof (*dcp));
2072 }
2073 
2074 static void
2075 dmu_objset_find_dp_cb(void *arg)
2076 {
2077         dmu_objset_find_ctx_t *dcp = arg;
2078         dsl_pool_t *dp = dcp->dc_dp;
2079 
2080         /*
2081          * We need to get a pool_config_lock here, as there are several
2082          * asssert(pool_config_held) down the stack. Getting a lock via
2083          * dsl_pool_config_enter is risky, as it might be stalled by a
2084          * pending writer. This would deadlock, as the write lock can
2085          * only be granted when our parent thread gives up the lock.
2086          * The _prio interface gives us priority over a pending writer.
2087          */
2088         dsl_pool_config_enter_prio(dp, FTAG);
2089 
2090         dmu_objset_find_dp_impl(dcp);
2091 
2092         dsl_pool_config_exit(dp, FTAG);
2093 }
2094 
2095 /*
2096  * Find objsets under and including ddobj, call func(ds) on each.
2097  * The order for the enumeration is completely undefined.
2098  * func is called with dsl_pool_config held.
2099  */
2100 int
2101 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
2102     int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
2103 {
2104         int error = 0;
2105         taskq_t *tq = NULL;
2106         int ntasks;
2107         dmu_objset_find_ctx_t *dcp;
2108         kmutex_t err_lock;
2109 
2110         mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
2111         dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
2112         dcp->dc_tq = NULL;
2113         dcp->dc_dp = dp;
2114         dcp->dc_ddobj = ddobj;
2115         dcp->dc_ddname = NULL;
2116         dcp->dc_func = func;
2117         dcp->dc_arg = arg;
2118         dcp->dc_flags = flags;
2119         dcp->dc_error_lock = &err_lock;
2120         dcp->dc_error = &error;
2121 
2122         if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
2123                 /*
2124                  * In case a write lock is held we can't make use of
2125                  * parallelism, as down the stack of the worker threads
2126                  * the lock is asserted via dsl_pool_config_held.
2127                  * In case of a read lock this is solved by getting a read
2128                  * lock in each worker thread, which isn't possible in case
2129                  * of a writer lock. So we fall back to the synchronous path
2130                  * here.
2131                  * In the future it might be possible to get some magic into
2132                  * dsl_pool_config_held in a way that it returns true for
2133                  * the worker threads so that a single lock held from this
2134                  * thread suffices. For now, stay single threaded.
2135                  */
2136                 dmu_objset_find_dp_impl(dcp);
2137                 mutex_destroy(&err_lock);
2138 
2139                 return (error);
2140         }
2141 
2142         ntasks = dmu_find_threads;
2143         if (ntasks == 0)
2144                 ntasks = vdev_count_leaves(dp->dp_spa) * 4;
2145         tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks,
2146             INT_MAX, 0);
2147         if (tq == NULL) {
2148                 kmem_free(dcp, sizeof (*dcp));
2149                 mutex_destroy(&err_lock);
2150 
2151                 return (SET_ERROR(ENOMEM));
2152         }
2153         dcp->dc_tq = tq;
2154 
2155         /* dcp will be freed by task */
2156         (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
2157 
2158         /*
2159          * PORTING: this code relies on the property of taskq_wait to wait
2160          * until no more tasks are queued and no more tasks are active. As
2161          * we always queue new tasks from within other tasks, task_wait
2162          * reliably waits for the full recursion to finish, even though we
2163          * enqueue new tasks after taskq_wait has been called.
2164          * On platforms other than illumos, taskq_wait may not have this
2165          * property.
2166          */
2167         taskq_wait(tq);
2168         taskq_destroy(tq);
2169         mutex_destroy(&err_lock);
2170 
2171         return (error);
2172 }
2173 
2174 /*
2175  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2176  * The dp_config_rwlock must not be held when this is called, and it
2177  * will not be held when the callback is called.
2178  * Therefore this function should only be used when the pool is not changing
2179  * (e.g. in syncing context), or the callback can deal with the possible races.
2180  */
2181 static int
2182 dmu_objset_find_impl(spa_t *spa, const char *name,
2183     int func(const char *, void *), void *arg, int flags)
2184 {
2185         dsl_dir_t *dd;
2186         dsl_pool_t *dp = spa_get_dsl(spa);
2187         dsl_dataset_t *ds;
2188         zap_cursor_t zc;
2189         zap_attribute_t *attr;
2190         char *child;
2191         uint64_t thisobj;
2192         int err;
2193 
2194         dsl_pool_config_enter(dp, FTAG);
2195 
2196         err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
2197         if (err != 0) {
2198                 dsl_pool_config_exit(dp, FTAG);
2199                 return (err);
2200         }
2201 
2202         /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2203         if (dd->dd_myname[0] == '$') {
2204                 dsl_dir_rele(dd, FTAG);
2205                 dsl_pool_config_exit(dp, FTAG);
2206                 return (0);
2207         }
2208 
2209         thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
2210         attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
2211 
2212         /*
2213          * Iterate over all children.
2214          */
2215         if (flags & DS_FIND_CHILDREN) {
2216                 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2217                     dsl_dir_phys(dd)->dd_child_dir_zapobj);
2218                     zap_cursor_retrieve(&zc, attr) == 0;
2219                     (void) zap_cursor_advance(&zc)) {
2220                         ASSERT3U(attr->za_integer_length, ==,
2221                             sizeof (uint64_t));
2222                         ASSERT3U(attr->za_num_integers, ==, 1);
2223 
2224                         child = kmem_asprintf("%s/%s", name, attr->za_name);
2225                         dsl_pool_config_exit(dp, FTAG);
2226                         err = dmu_objset_find_impl(spa, child,
2227                             func, arg, flags);
2228                         dsl_pool_config_enter(dp, FTAG);
2229                         strfree(child);
2230                         if (err != 0)
2231                                 break;
2232                 }
2233                 zap_cursor_fini(&zc);
2234 
2235                 if (err != 0) {
2236                         dsl_dir_rele(dd, FTAG);
2237                         dsl_pool_config_exit(dp, FTAG);
2238                         kmem_free(attr, sizeof (zap_attribute_t));
2239                         return (err);
2240                 }
2241         }
2242 
2243         /*
2244          * Iterate over all snapshots.
2245          */
2246         if (flags & DS_FIND_SNAPSHOTS) {
2247                 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2248 
2249                 if (err == 0) {
2250                         uint64_t snapobj;
2251 
2252                         snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2253                         dsl_dataset_rele(ds, FTAG);
2254 
2255                         for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2256                             zap_cursor_retrieve(&zc, attr) == 0;
2257                             (void) zap_cursor_advance(&zc)) {
2258                                 ASSERT3U(attr->za_integer_length, ==,
2259                                     sizeof (uint64_t));
2260                                 ASSERT3U(attr->za_num_integers, ==, 1);
2261 
2262                                 child = kmem_asprintf("%s@%s",
2263                                     name, attr->za_name);
2264                                 dsl_pool_config_exit(dp, FTAG);
2265                                 err = func(child, arg);
2266                                 dsl_pool_config_enter(dp, FTAG);
2267                                 strfree(child);
2268                                 if (err != 0)
2269                                         break;
2270                         }
2271                         zap_cursor_fini(&zc);
2272                 }
2273         }
2274 
2275         dsl_dir_rele(dd, FTAG);
2276         kmem_free(attr, sizeof (zap_attribute_t));
2277         dsl_pool_config_exit(dp, FTAG);
2278 
2279         if (err != 0)
2280                 return (err);
2281 
2282         /* Apply to self. */
2283         return (func(name, arg));
2284 }
2285 
2286 /*
2287  * See comment above dmu_objset_find_impl().
2288  */
2289 int
2290 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
2291     int flags)
2292 {
2293         spa_t *spa;
2294         int error;
2295 
2296         error = spa_open(name, &spa, FTAG);
2297         if (error != 0)
2298                 return (error);
2299         error = dmu_objset_find_impl(spa, name, func, arg, flags);
2300         spa_close(spa, FTAG);
2301         return (error);
2302 }
2303 
2304 void
2305 dmu_objset_set_user(objset_t *os, void *user_ptr)
2306 {
2307         ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2308         os->os_user_ptr = user_ptr;
2309 }
2310 
2311 void *
2312 dmu_objset_get_user(objset_t *os)
2313 {
2314         ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2315         return (os->os_user_ptr);
2316 }
2317 
2318 /*
2319  * Determine name of filesystem, given name of snapshot.
2320  * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2321  */
2322 int
2323 dmu_fsname(const char *snapname, char *buf)
2324 {
2325         char *atp = strchr(snapname, '@');
2326         if (atp == NULL)
2327                 return (SET_ERROR(EINVAL));
2328         if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
2329                 return (SET_ERROR(ENAMETOOLONG));
2330         (void) strlcpy(buf, snapname, atp - snapname + 1);
2331         return (0);
2332 }
2333 
2334 /*
2335  * Call when we think we're going to write/free space in open context to track
2336  * the amount of dirty data in the open txg, which is also the amount
2337  * of memory that can not be evicted until this txg syncs.
2338  */
2339 void
2340 dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
2341 {
2342         dsl_dataset_t *ds = os->os_dsl_dataset;
2343         int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
2344 
2345         if (ds != NULL) {
2346                 dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
2347                 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
2348         }
2349 }