1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
  25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  26  */
  27 
  28 #include <sys/zfs_context.h>
  29 #include <sys/dbuf.h>
  30 #include <sys/dnode.h>
  31 #include <sys/dmu.h>
  32 #include <sys/dmu_tx.h>
  33 #include <sys/dmu_objset.h>
  34 #include <sys/dsl_dataset.h>
  35 #include <sys/spa.h>
  36 #include <sys/range_tree.h>
  37 #include <sys/zfeature.h>
  38 
  39 static void
  40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
  41 {
  42         dmu_buf_impl_t *db;
  43         int txgoff = tx->tx_txg & TXG_MASK;
  44         int nblkptr = dn->dn_phys->dn_nblkptr;
  45         int old_toplvl = dn->dn_phys->dn_nlevels - 1;
  46         int new_level = dn->dn_next_nlevels[txgoff];
  47         int i;
  48 
  49         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
  50 
  51         /* this dnode can't be paged out because it's dirty */
  52         ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
  53         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
  54         ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
  55 
  56         db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
  57         ASSERT(db != NULL);
  58 
  59         dn->dn_phys->dn_nlevels = new_level;
  60         dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
  61             dn->dn_object, dn->dn_phys->dn_nlevels);
  62 
  63         /* check for existing blkptrs in the dnode */
  64         for (i = 0; i < nblkptr; i++)
  65                 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
  66                         break;
  67         if (i != nblkptr) {
  68                 /* transfer dnode's block pointers to new indirect block */
  69                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
  70                 ASSERT(db->db.db_data);
  71                 ASSERT(arc_released(db->db_buf));
  72                 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
  73                 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
  74                     sizeof (blkptr_t) * nblkptr);
  75                 arc_buf_freeze(db->db_buf);
  76         }
  77 
  78         /* set dbuf's parent pointers to new indirect buf */
  79         for (i = 0; i < nblkptr; i++) {
  80                 dmu_buf_impl_t *child =
  81                     dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
  82 
  83                 if (child == NULL)
  84                         continue;
  85 #ifdef  DEBUG
  86                 DB_DNODE_ENTER(child);
  87                 ASSERT3P(DB_DNODE(child), ==, dn);
  88                 DB_DNODE_EXIT(child);
  89 #endif  /* DEBUG */
  90                 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
  91                         ASSERT(child->db_parent->db_level == db->db_level);
  92                         ASSERT(child->db_blkptr !=
  93                             &dn->dn_phys->dn_blkptr[child->db_blkid]);
  94                         mutex_exit(&child->db_mtx);
  95                         continue;
  96                 }
  97                 ASSERT(child->db_parent == NULL ||
  98                     child->db_parent == dn->dn_dbuf);
  99 
 100                 child->db_parent = db;
 101                 dbuf_add_ref(db, child);
 102                 if (db->db.db_data)
 103                         child->db_blkptr = (blkptr_t *)db->db.db_data + i;
 104                 else
 105                         child->db_blkptr = NULL;
 106                 dprintf_dbuf_bp(child, child->db_blkptr,
 107                     "changed db_blkptr to new indirect %s", "");
 108 
 109                 mutex_exit(&child->db_mtx);
 110         }
 111 
 112         bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
 113 
 114         dbuf_rele(db, FTAG);
 115 
 116         rw_exit(&dn->dn_struct_rwlock);
 117 }
 118 
 119 static void
 120 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
 121 {
 122         dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
 123         uint64_t bytesfreed = 0;
 124 
 125         dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
 126 
 127         for (int i = 0; i < num; i++, bp++) {
 128                 if (BP_IS_HOLE(bp))
 129                         continue;
 130 
 131                 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
 132                 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
 133 
 134                 /*
 135                  * Save some useful information on the holes being
 136                  * punched, including logical size, type, and indirection
 137                  * level. Retaining birth time enables detection of when
 138                  * holes are punched for reducing the number of free
 139                  * records transmitted during a zfs send.
 140                  */
 141 
 142                 uint64_t lsize = BP_GET_LSIZE(bp);
 143                 dmu_object_type_t type = BP_GET_TYPE(bp);
 144                 uint64_t lvl = BP_GET_LEVEL(bp);
 145 
 146                 bzero(bp, sizeof (blkptr_t));
 147 
 148                 if (spa_feature_is_active(dn->dn_objset->os_spa,
 149                     SPA_FEATURE_HOLE_BIRTH)) {
 150                         BP_SET_LSIZE(bp, lsize);
 151                         BP_SET_TYPE(bp, type);
 152                         BP_SET_LEVEL(bp, lvl);
 153                         BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
 154                 }
 155         }
 156         dnode_diduse_space(dn, -bytesfreed);
 157 }
 158 
 159 #ifdef ZFS_DEBUG
 160 static void
 161 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
 162 {
 163         int off, num;
 164         int i, err, epbs;
 165         uint64_t txg = tx->tx_txg;
 166         dnode_t *dn;
 167 
 168         DB_DNODE_ENTER(db);
 169         dn = DB_DNODE(db);
 170         epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
 171         off = start - (db->db_blkid * 1<<epbs);
 172         num = end - start + 1;
 173 
 174         ASSERT3U(off, >=, 0);
 175         ASSERT3U(num, >=, 0);
 176         ASSERT3U(db->db_level, >, 0);
 177         ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
 178         ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
 179         ASSERT(db->db_blkptr != NULL);
 180 
 181         for (i = off; i < off+num; i++) {
 182                 uint64_t *buf;
 183                 dmu_buf_impl_t *child;
 184                 dbuf_dirty_record_t *dr;
 185                 int j;
 186 
 187                 ASSERT(db->db_level == 1);
 188 
 189                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 190                 err = dbuf_hold_impl(dn, db->db_level-1,
 191                     (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
 192                 rw_exit(&dn->dn_struct_rwlock);
 193                 if (err == ENOENT)
 194                         continue;
 195                 ASSERT(err == 0);
 196                 ASSERT(child->db_level == 0);
 197                 dr = child->db_last_dirty;
 198                 while (dr && dr->dr_txg > txg)
 199                         dr = dr->dr_next;
 200                 ASSERT(dr == NULL || dr->dr_txg == txg);
 201 
 202                 /* data_old better be zeroed */
 203                 if (dr) {
 204                         buf = dr->dt.dl.dr_data->b_data;
 205                         for (j = 0; j < child->db.db_size >> 3; j++) {
 206                                 if (buf[j] != 0) {
 207                                         panic("freed data not zero: "
 208                                             "child=%p i=%d off=%d num=%d\n",
 209                                             (void *)child, i, off, num);
 210                                 }
 211                         }
 212                 }
 213 
 214                 /*
 215                  * db_data better be zeroed unless it's dirty in a
 216                  * future txg.
 217                  */
 218                 mutex_enter(&child->db_mtx);
 219                 buf = child->db.db_data;
 220                 if (buf != NULL && child->db_state != DB_FILL &&
 221                     child->db_last_dirty == NULL) {
 222                         for (j = 0; j < child->db.db_size >> 3; j++) {
 223                                 if (buf[j] != 0) {
 224                                         panic("freed data not zero: "
 225                                             "child=%p i=%d off=%d num=%d\n",
 226                                             (void *)child, i, off, num);
 227                                 }
 228                         }
 229                 }
 230                 mutex_exit(&child->db_mtx);
 231 
 232                 dbuf_rele(child, FTAG);
 233         }
 234         DB_DNODE_EXIT(db);
 235 }
 236 #endif
 237 
 238 static void
 239 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
 240     dmu_tx_t *tx)
 241 {
 242         dnode_t *dn;
 243         blkptr_t *bp;
 244         dmu_buf_impl_t *subdb;
 245         uint64_t start, end, dbstart, dbend, i;
 246         int epbs, shift;
 247 
 248         /*
 249          * There is a small possibility that this block will not be cached:
 250          *   1 - if level > 1 and there are no children with level <= 1
 251          *   2 - if this block was evicted since we read it from
 252          *       dmu_tx_hold_free().
 253          */
 254         if (db->db_state != DB_CACHED)
 255                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
 256 
 257         dbuf_release_bp(db);
 258         bp = db->db.db_data;
 259 
 260         DB_DNODE_ENTER(db);
 261         dn = DB_DNODE(db);
 262         epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
 263         shift = (db->db_level - 1) * epbs;
 264         dbstart = db->db_blkid << epbs;
 265         start = blkid >> shift;
 266         if (dbstart < start) {
 267                 bp += start - dbstart;
 268         } else {
 269                 start = dbstart;
 270         }
 271         dbend = ((db->db_blkid + 1) << epbs) - 1;
 272         end = (blkid + nblks - 1) >> shift;
 273         if (dbend <= end)
 274                 end = dbend;
 275 
 276         ASSERT3U(start, <=, end);
 277 
 278         if (db->db_level == 1) {
 279                 FREE_VERIFY(db, start, end, tx);
 280                 free_blocks(dn, bp, end-start+1, tx);
 281         } else {
 282                 for (i = start; i <= end; i++, bp++) {
 283                         if (BP_IS_HOLE(bp))
 284                                 continue;
 285                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 286                         VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
 287                             i, B_TRUE, FTAG, &subdb));
 288                         rw_exit(&dn->dn_struct_rwlock);
 289                         ASSERT3P(bp, ==, subdb->db_blkptr);
 290 
 291                         free_children(subdb, blkid, nblks, tx);
 292                         dbuf_rele(subdb, FTAG);
 293                 }
 294         }
 295 
 296         /* If this whole block is free, free ourself too. */
 297         for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
 298                 if (!BP_IS_HOLE(bp))
 299                         break;
 300         }
 301         if (i == 1 << epbs) {
 302                 /* didn't find any non-holes */
 303                 bzero(db->db.db_data, db->db.db_size);
 304                 free_blocks(dn, db->db_blkptr, 1, tx);
 305         } else {
 306                 /*
 307                  * Partial block free; must be marked dirty so that it
 308                  * will be written out.
 309                  */
 310                 ASSERT(db->db_dirtycnt > 0);
 311         }
 312 
 313         DB_DNODE_EXIT(db);
 314         arc_buf_freeze(db->db_buf);
 315 }
 316 
 317 /*
 318  * Traverse the indicated range of the provided file
 319  * and "free" all the blocks contained there.
 320  */
 321 static void
 322 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
 323     dmu_tx_t *tx)
 324 {
 325         blkptr_t *bp = dn->dn_phys->dn_blkptr;
 326         int dnlevel = dn->dn_phys->dn_nlevels;
 327         boolean_t trunc = B_FALSE;
 328 
 329         if (blkid > dn->dn_phys->dn_maxblkid)
 330                 return;
 331 
 332         ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
 333         if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
 334                 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
 335                 trunc = B_TRUE;
 336         }
 337 
 338         /* There are no indirect blocks in the object */
 339         if (dnlevel == 1) {
 340                 if (blkid >= dn->dn_phys->dn_nblkptr) {
 341                         /* this range was never made persistent */
 342                         return;
 343                 }
 344                 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
 345                 free_blocks(dn, bp + blkid, nblks, tx);
 346         } else {
 347                 int shift = (dnlevel - 1) *
 348                     (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
 349                 int start = blkid >> shift;
 350                 int end = (blkid + nblks - 1) >> shift;
 351                 dmu_buf_impl_t *db;
 352 
 353                 ASSERT(start < dn->dn_phys->dn_nblkptr);
 354                 bp += start;
 355                 for (int i = start; i <= end; i++, bp++) {
 356                         if (BP_IS_HOLE(bp))
 357                                 continue;
 358                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 359                         VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
 360                             TRUE, FTAG, &db));
 361                         rw_exit(&dn->dn_struct_rwlock);
 362 
 363                         free_children(db, blkid, nblks, tx);
 364                         dbuf_rele(db, FTAG);
 365                 }
 366         }
 367 
 368         if (trunc) {
 369                 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
 370 
 371                 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
 372                     (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 373                 ASSERT(off < dn->dn_phys->dn_maxblkid ||
 374                     dn->dn_phys->dn_maxblkid == 0 ||
 375                     dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
 376         }
 377 }
 378 
 379 typedef struct dnode_sync_free_range_arg {
 380         dnode_t *dsfra_dnode;
 381         dmu_tx_t *dsfra_tx;
 382 } dnode_sync_free_range_arg_t;
 383 
 384 static void
 385 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
 386 {
 387         dnode_sync_free_range_arg_t *dsfra = arg;
 388         dnode_t *dn = dsfra->dsfra_dnode;
 389 
 390         mutex_exit(&dn->dn_mtx);
 391         dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx);
 392         mutex_enter(&dn->dn_mtx);
 393 }
 394 
 395 /*
 396  * Try to kick all the dnode's dbufs out of the cache...
 397  */
 398 void
 399 dnode_evict_dbufs(dnode_t *dn)
 400 {
 401         dmu_buf_impl_t db_marker;
 402         dmu_buf_impl_t *db, *db_next;
 403 
 404         mutex_enter(&dn->dn_dbufs_mtx);
 405         for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
 406 
 407 #ifdef  DEBUG
 408                 DB_DNODE_ENTER(db);
 409                 ASSERT3P(DB_DNODE(db), ==, dn);
 410                 DB_DNODE_EXIT(db);
 411 #endif  /* DEBUG */
 412 
 413                 mutex_enter(&db->db_mtx);
 414                 if (db->db_state != DB_EVICTING &&
 415                     refcount_is_zero(&db->db_holds)) {
 416                         db_marker.db_level = db->db_level;
 417                         db_marker.db_blkid = db->db_blkid;
 418                         db_marker.db_state = DB_SEARCH;
 419                         avl_insert_here(&dn->dn_dbufs, &db_marker, db,
 420                             AVL_BEFORE);
 421 
 422                         dbuf_clear(db);
 423 
 424                         db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
 425                         avl_remove(&dn->dn_dbufs, &db_marker);
 426                 } else {
 427                         db->db_pending_evict = TRUE;
 428                         mutex_exit(&db->db_mtx);
 429                         db_next = AVL_NEXT(&dn->dn_dbufs, db);
 430                 }
 431         }
 432         mutex_exit(&dn->dn_dbufs_mtx);
 433 
 434         dnode_evict_bonus(dn);
 435 }
 436 
 437 void
 438 dnode_evict_bonus(dnode_t *dn)
 439 {
 440         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 441         if (dn->dn_bonus != NULL) {
 442                 if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
 443                         mutex_enter(&dn->dn_bonus->db_mtx);
 444                         dbuf_evict(dn->dn_bonus);
 445                         dn->dn_bonus = NULL;
 446                 } else {
 447                         dn->dn_bonus->db_pending_evict = TRUE;
 448                 }
 449         }
 450         rw_exit(&dn->dn_struct_rwlock);
 451 }
 452 
 453 static void
 454 dnode_undirty_dbufs(list_t *list)
 455 {
 456         dbuf_dirty_record_t *dr;
 457 
 458         while (dr = list_head(list)) {
 459                 dmu_buf_impl_t *db = dr->dr_dbuf;
 460                 uint64_t txg = dr->dr_txg;
 461 
 462                 if (db->db_level != 0)
 463                         dnode_undirty_dbufs(&dr->dt.di.dr_children);
 464 
 465                 mutex_enter(&db->db_mtx);
 466                 /* XXX - use dbuf_undirty()? */
 467                 list_remove(list, dr);
 468                 ASSERT(db->db_last_dirty == dr);
 469                 db->db_last_dirty = NULL;
 470                 db->db_dirtycnt -= 1;
 471                 if (db->db_level == 0) {
 472                         ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
 473                             dr->dt.dl.dr_data == db->db_buf);
 474                         dbuf_unoverride(dr);
 475                 } else {
 476                         mutex_destroy(&dr->dt.di.dr_mtx);
 477                         list_destroy(&dr->dt.di.dr_children);
 478                 }
 479                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
 480                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
 481         }
 482 }
 483 
 484 static void
 485 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
 486 {
 487         int txgoff = tx->tx_txg & TXG_MASK;
 488 
 489         ASSERT(dmu_tx_is_syncing(tx));
 490 
 491         /*
 492          * Our contents should have been freed in dnode_sync() by the
 493          * free range record inserted by the caller of dnode_free().
 494          */
 495         ASSERT0(DN_USED_BYTES(dn->dn_phys));
 496         ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
 497 
 498         dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
 499         dnode_evict_dbufs(dn);
 500 
 501         /*
 502          * XXX - It would be nice to assert this, but we may still
 503          * have residual holds from async evictions from the arc...
 504          *
 505          * zfs_obj_to_path() also depends on this being
 506          * commented out.
 507          *
 508          * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
 509          */
 510 
 511         /* Undirty next bits */
 512         dn->dn_next_nlevels[txgoff] = 0;
 513         dn->dn_next_indblkshift[txgoff] = 0;
 514         dn->dn_next_blksz[txgoff] = 0;
 515 
 516         /* ASSERT(blkptrs are zero); */
 517         ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
 518         ASSERT(dn->dn_type != DMU_OT_NONE);
 519 
 520         ASSERT(dn->dn_free_txg > 0);
 521         if (dn->dn_allocated_txg != dn->dn_free_txg)
 522                 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
 523         bzero(dn->dn_phys, sizeof (dnode_phys_t));
 524 
 525         mutex_enter(&dn->dn_mtx);
 526         dn->dn_type = DMU_OT_NONE;
 527         dn->dn_maxblkid = 0;
 528         dn->dn_allocated_txg = 0;
 529         dn->dn_free_txg = 0;
 530         dn->dn_have_spill = B_FALSE;
 531         mutex_exit(&dn->dn_mtx);
 532 
 533         ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
 534 
 535         dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
 536         /*
 537          * Now that we've released our hold, the dnode may
 538          * be evicted, so we musn't access it.
 539          */
 540 }
 541 
 542 /*
 543  * Write out the dnode's dirty buffers.
 544  */
 545 void
 546 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
 547 {
 548         dnode_phys_t *dnp = dn->dn_phys;
 549         int txgoff = tx->tx_txg & TXG_MASK;
 550         list_t *list = &dn->dn_dirty_records[txgoff];
 551         static const dnode_phys_t zerodn = { 0 };
 552         boolean_t kill_spill = B_FALSE;
 553 
 554         ASSERT(dmu_tx_is_syncing(tx));
 555         ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
 556         ASSERT(dnp->dn_type != DMU_OT_NONE ||
 557             bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
 558         DNODE_VERIFY(dn);
 559 
 560         ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
 561 
 562         if (dmu_objset_userused_enabled(dn->dn_objset) &&
 563             !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
 564                 mutex_enter(&dn->dn_mtx);
 565                 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
 566                 dn->dn_oldflags = dn->dn_phys->dn_flags;
 567                 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
 568                 mutex_exit(&dn->dn_mtx);
 569                 dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
 570         } else {
 571                 /* Once we account for it, we should always account for it. */
 572                 ASSERT(!(dn->dn_phys->dn_flags &
 573                     DNODE_FLAG_USERUSED_ACCOUNTED));
 574         }
 575 
 576         mutex_enter(&dn->dn_mtx);
 577         if (dn->dn_allocated_txg == tx->tx_txg) {
 578                 /* The dnode is newly allocated or reallocated */
 579                 if (dnp->dn_type == DMU_OT_NONE) {
 580                         /* this is a first alloc, not a realloc */
 581                         dnp->dn_nlevels = 1;
 582                         dnp->dn_nblkptr = dn->dn_nblkptr;
 583                 }
 584 
 585                 dnp->dn_type = dn->dn_type;
 586                 dnp->dn_bonustype = dn->dn_bonustype;
 587                 dnp->dn_bonuslen = dn->dn_bonuslen;
 588         }
 589         ASSERT(dnp->dn_nlevels > 1 ||
 590             BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
 591             BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
 592             BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
 593             dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 594         ASSERT(dnp->dn_nlevels < 2 ||
 595             BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
 596             BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
 597 
 598         if (dn->dn_next_type[txgoff] != 0) {
 599                 dnp->dn_type = dn->dn_type;
 600                 dn->dn_next_type[txgoff] = 0;
 601         }
 602 
 603         if (dn->dn_next_blksz[txgoff] != 0) {
 604                 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
 605                     SPA_MINBLOCKSIZE) == 0);
 606                 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
 607                     dn->dn_maxblkid == 0 || list_head(list) != NULL ||
 608                     dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
 609                     dnp->dn_datablkszsec ||
 610                     range_tree_space(dn->dn_free_ranges[txgoff]) != 0);
 611                 dnp->dn_datablkszsec =
 612                     dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
 613                 dn->dn_next_blksz[txgoff] = 0;
 614         }
 615 
 616         if (dn->dn_next_bonuslen[txgoff] != 0) {
 617                 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
 618                         dnp->dn_bonuslen = 0;
 619                 else
 620                         dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
 621                 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
 622                 dn->dn_next_bonuslen[txgoff] = 0;
 623         }
 624 
 625         if (dn->dn_next_bonustype[txgoff] != 0) {
 626                 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
 627                 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
 628                 dn->dn_next_bonustype[txgoff] = 0;
 629         }
 630 
 631         boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
 632             dn->dn_free_txg <= tx->tx_txg;
 633 
 634         /*
 635          * Remove the spill block if we have been explicitly asked to
 636          * remove it, or if the object is being removed.
 637          */
 638         if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
 639                 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
 640                         kill_spill = B_TRUE;
 641                 dn->dn_rm_spillblk[txgoff] = 0;
 642         }
 643 
 644         if (dn->dn_next_indblkshift[txgoff] != 0) {
 645                 ASSERT(dnp->dn_nlevels == 1);
 646                 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
 647                 dn->dn_next_indblkshift[txgoff] = 0;
 648         }
 649 
 650         /*
 651          * Just take the live (open-context) values for checksum and compress.
 652          * Strictly speaking it's a future leak, but nothing bad happens if we
 653          * start using the new checksum or compress algorithm a little early.
 654          */
 655         dnp->dn_checksum = dn->dn_checksum;
 656         dnp->dn_compress = dn->dn_compress;
 657 
 658         mutex_exit(&dn->dn_mtx);
 659 
 660         if (kill_spill) {
 661                 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
 662                 mutex_enter(&dn->dn_mtx);
 663                 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
 664                 mutex_exit(&dn->dn_mtx);
 665         }
 666 
 667         /* process all the "freed" ranges in the file */
 668         if (dn->dn_free_ranges[txgoff] != NULL) {
 669                 dnode_sync_free_range_arg_t dsfra;
 670                 dsfra.dsfra_dnode = dn;
 671                 dsfra.dsfra_tx = tx;
 672                 mutex_enter(&dn->dn_mtx);
 673                 range_tree_vacate(dn->dn_free_ranges[txgoff],
 674                     dnode_sync_free_range, &dsfra);
 675                 range_tree_destroy(dn->dn_free_ranges[txgoff]);
 676                 dn->dn_free_ranges[txgoff] = NULL;
 677                 mutex_exit(&dn->dn_mtx);
 678         }
 679 
 680         if (freeing_dnode) {
 681                 dnode_sync_free(dn, tx);
 682                 return;
 683         }
 684 
 685         if (dn->dn_next_nlevels[txgoff]) {
 686                 dnode_increase_indirection(dn, tx);
 687                 dn->dn_next_nlevels[txgoff] = 0;
 688         }
 689 
 690         if (dn->dn_next_nblkptr[txgoff]) {
 691                 /* this should only happen on a realloc */
 692                 ASSERT(dn->dn_allocated_txg == tx->tx_txg);
 693                 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
 694                         /* zero the new blkptrs we are gaining */
 695                         bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
 696                             sizeof (blkptr_t) *
 697                             (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
 698 #ifdef ZFS_DEBUG
 699                 } else {
 700                         int i;
 701                         ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
 702                         /* the blkptrs we are losing better be unallocated */
 703                         for (i = dn->dn_next_nblkptr[txgoff];
 704                             i < dnp->dn_nblkptr; i++)
 705                                 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
 706 #endif
 707                 }
 708                 mutex_enter(&dn->dn_mtx);
 709                 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
 710                 dn->dn_next_nblkptr[txgoff] = 0;
 711                 mutex_exit(&dn->dn_mtx);
 712         }
 713 
 714         dbuf_sync_list(list, tx);
 715 
 716         if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
 717                 ASSERT3P(list_head(list), ==, NULL);
 718                 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
 719         }
 720 
 721         /*
 722          * Although we have dropped our reference to the dnode, it
 723          * can't be evicted until its written, and we haven't yet
 724          * initiated the IO for the dnode's dbuf.
 725          */
 726 }