28 
  29 #include <sys/dmu.h>
  30 #include <sys/dmu_impl.h>
  31 #include <sys/dmu_tx.h>
  32 #include <sys/dbuf.h>
  33 #include <sys/dnode.h>
  34 #include <sys/zfs_context.h>
  35 #include <sys/dmu_objset.h>
  36 #include <sys/dmu_traverse.h>
  37 #include <sys/dsl_dataset.h>
  38 #include <sys/dsl_dir.h>
  39 #include <sys/dsl_pool.h>
  40 #include <sys/dsl_synctask.h>
  41 #include <sys/dsl_prop.h>
  42 #include <sys/dmu_zfetch.h>
  43 #include <sys/zfs_ioctl.h>
  44 #include <sys/zap.h>
  45 #include <sys/zio_checksum.h>
  46 #include <sys/zio_compress.h>
  47 #include <sys/sa.h>
  48 #include <sys/zfeature.h>
  49 #include <sys/abd.h>
  50 #ifdef _KERNEL
  51 #include <sys/vmsystm.h>
  52 #include <sys/zfs_znode.h>
  53 #endif
  54 
  55 /*
  56  * Enable/disable nopwrite feature.
  57  */
  58 int zfs_nopwrite_enabled = 1;
  59 
  60 /*
  61  * Tunable to control percentage of dirtied blocks from frees in one TXG.
  62  * After this threshold is crossed, additional dirty blocks from frees
  63  * wait until the next TXG.
  64  * A value of zero will disable this throttle.
  65  */
  66 uint32_t zfs_per_txg_dirty_frees_percent = 30;
  67 
  68 /*
  69  * This can be used for testing, to ensure that certain actions happen
  70  * while in the middle of a remap (which might otherwise complete too
  71  * quickly).
  72  */
  73 int zfs_object_remap_one_indirect_delay_ticks = 0;
  74 
  75 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
  76         {       DMU_BSWAP_UINT8,        TRUE,   "unallocated"           },
  77         {       DMU_BSWAP_ZAP,          TRUE,   "object directory"      },
  78         {       DMU_BSWAP_UINT64,       TRUE,   "object array"          },
  79         {       DMU_BSWAP_UINT8,        TRUE,   "packed nvlist"         },
  80         {       DMU_BSWAP_UINT64,       TRUE,   "packed nvlist size"    },
  81         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj"                 },
  82         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj header"          },
  83         {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map header"  },
  84         {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map"         },
  85         {       DMU_BSWAP_UINT64,       TRUE,   "ZIL intent log"        },
  86         {       DMU_BSWAP_DNODE,        TRUE,   "DMU dnode"             },
  87         {       DMU_BSWAP_OBJSET,       TRUE,   "DMU objset"            },
  88         {       DMU_BSWAP_UINT64,       TRUE,   "DSL directory"         },
  89         {       DMU_BSWAP_ZAP,          TRUE,   "DSL directory child map"},
  90         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset snap map"  },
  91         {       DMU_BSWAP_ZAP,          TRUE,   "DSL props"             },
  92         {       DMU_BSWAP_UINT64,       TRUE,   "DSL dataset"           },
  93         {       DMU_BSWAP_ZNODE,        TRUE,   "ZFS znode"             },
  94         {       DMU_BSWAP_OLDACL,       TRUE,   "ZFS V0 ACL"            },
  95         {       DMU_BSWAP_UINT8,        FALSE,  "ZFS plain file"        },
  96         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS directory"         },
  97         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS master node"       },
  98         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS delete queue"      },
  99         {       DMU_BSWAP_UINT8,        FALSE,  "zvol object"           },
 100         {       DMU_BSWAP_ZAP,          TRUE,   "zvol prop"             },
 101         {       DMU_BSWAP_UINT8,        FALSE,  "other uint8[]"         },
 102         {       DMU_BSWAP_UINT64,       FALSE,  "other uint64[]"        },
 103         {       DMU_BSWAP_ZAP,          TRUE,   "other ZAP"             },
 104         {       DMU_BSWAP_ZAP,          TRUE,   "persistent error log"  },
 105         {       DMU_BSWAP_UINT8,        TRUE,   "SPA history"           },
 106         {       DMU_BSWAP_UINT64,       TRUE,   "SPA history offsets"   },
 107         {       DMU_BSWAP_ZAP,          TRUE,   "Pool properties"       },
 108         {       DMU_BSWAP_ZAP,          TRUE,   "DSL permissions"       },
 109         {       DMU_BSWAP_ACL,          TRUE,   "ZFS ACL"               },
 110         {       DMU_BSWAP_UINT8,        TRUE,   "ZFS SYSACL"            },
 111         {       DMU_BSWAP_UINT8,        TRUE,   "FUID table"            },
 112         {       DMU_BSWAP_UINT64,       TRUE,   "FUID table size"       },
 113         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset next clones"},
 114         {       DMU_BSWAP_ZAP,          TRUE,   "scan work queue"       },
 115         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group used"   },
 116         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group quota"  },
 117         {       DMU_BSWAP_ZAP,          TRUE,   "snapshot refcount tags"},
 118         {       DMU_BSWAP_ZAP,          TRUE,   "DDT ZAP algorithm"     },
 119         {       DMU_BSWAP_ZAP,          TRUE,   "DDT statistics"        },
 120         {       DMU_BSWAP_UINT8,        TRUE,   "System attributes"     },
 121         {       DMU_BSWAP_ZAP,          TRUE,   "SA master node"        },
 122         {       DMU_BSWAP_ZAP,          TRUE,   "SA attr registration"  },
 123         {       DMU_BSWAP_ZAP,          TRUE,   "SA attr layouts"       },
 124         {       DMU_BSWAP_ZAP,          TRUE,   "scan translations"     },
 125         {       DMU_BSWAP_UINT8,        FALSE,  "deduplicated block"    },
 126         {       DMU_BSWAP_ZAP,          TRUE,   "DSL deadlist map"      },
 127         {       DMU_BSWAP_UINT64,       TRUE,   "DSL deadlist map hdr"  },
 128         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dir clones"        },
 129         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj subobj"          }
 130 };
 131 
 132 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
 133         {       byteswap_uint8_array,   "uint8"         },
 134         {       byteswap_uint16_array,  "uint16"        },
 135         {       byteswap_uint32_array,  "uint32"        },
 136         {       byteswap_uint64_array,  "uint64"        },
 137         {       zap_byteswap,           "zap"           },
 138         {       dnode_buf_byteswap,     "dnode"         },
 139         {       dmu_objset_byteswap,    "objset"        },
 140         {       zfs_znode_byteswap,     "znode"         },
 141         {       zfs_oldacl_byteswap,    "oldacl"        },
 142         {       zfs_acl_byteswap,       "acl"           }
 143 };
 144 
 145 int
 146 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
 147     void *tag, dmu_buf_t **dbp)
 148 {
 149         uint64_t blkid;
 
 695                 err = dnode_next_offset(dn,
 696                     DNODE_FIND_BACKWARDS, start, 2, 1, 0);
 697 
 698                 /* if there are no indirect blocks before start, we are done */
 699                 if (err == ESRCH) {
 700                         *start = minimum;
 701                         break;
 702                 } else if (err != 0) {
 703                         return (err);
 704                 }
 705 
 706                 /* set start to the beginning of this L1 indirect */
 707                 *start = P2ALIGN(*start, iblkrange);
 708         }
 709         if (*start < minimum)
 710                 *start = minimum;
 711         return (0);
 712 }
 713 
 714 /*
 715  * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
 716  * otherwise return false.
 717  * Used below in dmu_free_long_range_impl() to enable abort when unmounting
 718  */
 719 /*ARGSUSED*/
 720 static boolean_t
 721 dmu_objset_zfs_unmounting(objset_t *os)
 722 {
 723 #ifdef _KERNEL
 724         if (dmu_objset_type(os) == DMU_OST_ZFS)
 725                 return (zfs_get_vfs_flag_unmounted(os));
 726 #endif
 727         return (B_FALSE);
 728 }
 729 
 730 static int
 731 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
 732     uint64_t length)
 733 {
 734         uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
 735         int err;
 736         uint64_t dirty_frees_threshold;
 737         dsl_pool_t *dp = dmu_objset_pool(os);
 738 
 739         if (offset >= object_size)
 740                 return (0);
 741 
 742         if (zfs_per_txg_dirty_frees_percent <= 100)
 743                 dirty_frees_threshold =
 744                     zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
 745         else
 746                 dirty_frees_threshold = zfs_dirty_data_max / 4;
 747 
 748         if (length == DMU_OBJECT_END || offset + length > object_size)
 749                 length = object_size - offset;
 750 
 751         while (length != 0) {
 752                 uint64_t chunk_end, chunk_begin, chunk_len;
 753                 uint64_t long_free_dirty_all_txgs = 0;
 754                 dmu_tx_t *tx;
 755 
 756                 if (dmu_objset_zfs_unmounting(dn->dn_objset))
 757                         return (SET_ERROR(EINTR));
 758 
 759                 chunk_end = chunk_begin = offset + length;
 760 
 761                 /* move chunk_begin backwards to the beginning of this chunk */
 762                 err = get_next_chunk(dn, &chunk_begin, offset);
 763                 if (err)
 764                         return (err);
 765                 ASSERT3U(chunk_begin, >=, offset);
 766                 ASSERT3U(chunk_begin, <=, chunk_end);
 767 
 768                 chunk_len = chunk_end - chunk_begin;
 769 
 770                 mutex_enter(&dp->dp_lock);
 771                 for (int t = 0; t < TXG_SIZE; t++) {
 772                         long_free_dirty_all_txgs +=
 773                             dp->dp_long_free_dirty_pertxg[t];
 774                 }
 775                 mutex_exit(&dp->dp_lock);
 776 
 777                 /*
 
 779                  * the next TXG to open before freeing more chunks if
 780                  * we have reached the threshold of frees
 781                  */
 782                 if (dirty_frees_threshold != 0 &&
 783                     long_free_dirty_all_txgs >= dirty_frees_threshold) {
 784                         txg_wait_open(dp, 0);
 785                         continue;
 786                 }
 787 
 788                 tx = dmu_tx_create(os);
 789                 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
 790 
 791                 /*
 792                  * Mark this transaction as typically resulting in a net
 793                  * reduction in space used.
 794                  */
 795                 dmu_tx_mark_netfree(tx);
 796                 err = dmu_tx_assign(tx, TXG_WAIT);
 797                 if (err) {
 798                         dmu_tx_abort(tx);
 799                         return (err);
 800                 }
 801 
 802                 mutex_enter(&dp->dp_lock);
 803                 dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
 804                     chunk_len;
 805                 mutex_exit(&dp->dp_lock);
 806                 DTRACE_PROBE3(free__long__range,
 807                     uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
 808                     uint64_t, dmu_tx_get_txg(tx));
 809                 dnode_free_range(dn, chunk_begin, chunk_len, tx);
 810                 dmu_tx_commit(tx);
 811 
 812                 length -= chunk_len;
 813         }
 814         return (0);
 815 }
 816 
 817 int
 818 dmu_free_long_range(objset_t *os, uint64_t object,
 
1004         dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1005         dmu_buf_rele_array(dbp, numbufs, FTAG);
1006 }
1007 
1008 void
1009 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1010     const void *buf, dmu_tx_t *tx)
1011 {
1012         dmu_buf_t **dbp;
1013         int numbufs;
1014 
1015         if (size == 0)
1016                 return;
1017 
1018         VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1019             FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1020         dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1021         dmu_buf_rele_array(dbp, numbufs, FTAG);
1022 }
1023 
1024 static int
1025 dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
1026     uint64_t last_removal_txg, uint64_t offset)
1027 {
1028         uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
1029         int err = 0;
1030 
1031         rw_enter(&dn->dn_struct_rwlock, RW_READER);
1032         dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1033         ASSERT3P(dbuf, !=, NULL);
1034 
1035         /*
1036          * If the block hasn't been written yet, this default will ensure
1037          * we don't try to remap it.
1038          */
1039         uint64_t birth = UINT64_MAX;
1040         ASSERT3U(last_removal_txg, !=, UINT64_MAX);
1041         if (dbuf->db_blkptr != NULL)
1042                 birth = dbuf->db_blkptr->blk_birth;
1043         rw_exit(&dn->dn_struct_rwlock);
1044 
1045         /*
1046          * If this L1 was already written after the last removal, then we've
1047          * already tried to remap it.
1048          */
1049         if (birth <= last_removal_txg &&
1050             dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
1051             dbuf_can_remap(dbuf)) {
1052                 dmu_tx_t *tx = dmu_tx_create(os);
1053                 dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
1054                 err = dmu_tx_assign(tx, TXG_WAIT);
1055                 if (err == 0) {
1056                         (void) dbuf_dirty(dbuf, tx);
1057                         dmu_tx_commit(tx);
1058                 } else {
1059                         dmu_tx_abort(tx);
1060                 }
1061         }
1062 
1063         dbuf_rele(dbuf, FTAG);
1064 
1065         delay(zfs_object_remap_one_indirect_delay_ticks);
1066 
1067         return (err);
1068 }
1069 
1070 /*
1071  * Remap all blockpointers in the object, if possible, so that they reference
1072  * only concrete vdevs.
1073  *
1074  * To do this, iterate over the L0 blockpointers and remap any that reference
1075  * an indirect vdev. Note that we only examine L0 blockpointers; since we
1076  * cannot guarantee that we can remap all blockpointer anyways (due to split
1077  * blocks), we do not want to make the code unnecessarily complicated to
1078  * catch the unlikely case that there is an L1 block on an indirect vdev that
1079  * contains no indirect blockpointers.
1080  */
1081 int
1082 dmu_object_remap_indirects(objset_t *os, uint64_t object,
1083     uint64_t last_removal_txg)
1084 {
1085         uint64_t offset, l1span;
1086         int err;
1087         dnode_t *dn;
1088 
1089         err = dnode_hold(os, object, FTAG, &dn);
1090         if (err != 0) {
1091                 return (err);
1092         }
1093 
1094         if (dn->dn_nlevels <= 1) {
1095                 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1096                         err = SET_ERROR(EINTR);
1097                 }
1098 
1099                 /*
1100                  * If the dnode has no indirect blocks, we cannot dirty them.
1101                  * We still want to remap the blkptr(s) in the dnode if
1102                  * appropriate, so mark it as dirty.
1103                  */
1104                 if (err == 0 && dnode_needs_remap(dn)) {
1105                         dmu_tx_t *tx = dmu_tx_create(os);
1106                         dmu_tx_hold_bonus(tx, dn->dn_object);
1107                         if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) {
1108                                 dnode_setdirty(dn, tx);
1109                                 dmu_tx_commit(tx);
1110                         } else {
1111                                 dmu_tx_abort(tx);
1112                         }
1113                 }
1114 
1115                 dnode_rele(dn, FTAG);
1116                 return (err);
1117         }
1118 
1119         offset = 0;
1120         l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
1121             dn->dn_datablkshift);
1122         /*
1123          * Find the next L1 indirect that is not a hole.
1124          */
1125         while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
1126                 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1127                         err = SET_ERROR(EINTR);
1128                         break;
1129                 }
1130                 if ((err = dmu_object_remap_one_indirect(os, dn,
1131                     last_removal_txg, offset)) != 0) {
1132                         break;
1133                 }
1134                 offset += l1span;
1135         }
1136 
1137         dnode_rele(dn, FTAG);
1138         return (err);
1139 }
1140 
1141 void
1142 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1143     dmu_tx_t *tx)
1144 {
1145         dmu_buf_t **dbp;
1146         int numbufs, i;
1147 
1148         if (size == 0)
1149                 return;
1150 
1151         VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1152             FALSE, FTAG, &numbufs, &dbp));
1153 
1154         for (i = 0; i < numbufs; i++) {
1155                 dmu_buf_t *db = dbp[i];
1156 
1157                 dmu_buf_will_not_fill(db, tx);
1158         }
1159         dmu_buf_rele_array(dbp, numbufs, FTAG);
1160 }
 
1670                 } else if (!BP_IS_EMBEDDED(bp)) {
1671                         ASSERT(BP_GET_LEVEL(bp) == 0);
1672                         bp->blk_fill = 1;
1673                 }
1674         }
1675 }
1676 
1677 static void
1678 dmu_sync_late_arrival_ready(zio_t *zio)
1679 {
1680         dmu_sync_ready(zio, NULL, zio->io_private);
1681 }
1682 
1683 /* ARGSUSED */
1684 static void
1685 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1686 {
1687         dmu_sync_arg_t *dsa = varg;
1688         dbuf_dirty_record_t *dr = dsa->dsa_dr;
1689         dmu_buf_impl_t *db = dr->dr_dbuf;
1690 
1691         mutex_enter(&db->db_mtx);
1692         ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1693         if (zio->io_error == 0) {
1694                 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1695                 if (dr->dt.dl.dr_nopwrite) {
1696                         blkptr_t *bp = zio->io_bp;
1697                         blkptr_t *bp_orig = &zio->io_bp_orig;
1698                         uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1699 
1700                         ASSERT(BP_EQUAL(bp, bp_orig));
1701                         VERIFY(BP_EQUAL(bp, db->db_blkptr));
1702                         ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1703                         ASSERT(zio_checksum_table[chksum].ci_flags &
1704                             ZCHECKSUM_FLAG_NOPWRITE);
1705                 }
1706                 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1707                 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1708                 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1709 
1710                 /*
 
1720                 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1721                     dr->dt.dl.dr_overridden_by.blk_birth == 0)
1722                         BP_ZERO(&dr->dt.dl.dr_overridden_by);
1723         } else {
1724                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1725         }
1726         cv_broadcast(&db->db_changed);
1727         mutex_exit(&db->db_mtx);
1728 
1729         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1730 
1731         kmem_free(dsa, sizeof (*dsa));
1732 }
1733 
1734 static void
1735 dmu_sync_late_arrival_done(zio_t *zio)
1736 {
1737         blkptr_t *bp = zio->io_bp;
1738         dmu_sync_arg_t *dsa = zio->io_private;
1739         blkptr_t *bp_orig = &zio->io_bp_orig;
1740 
1741         if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
1742                 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1743                 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1744                 ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1745                 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1746                 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1747         }
1748 
1749         dmu_tx_commit(dsa->dsa_tx);
1750 
1751         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1752 
1753         abd_put(zio->io_abd);
1754         kmem_free(dsa, sizeof (*dsa));
1755 }
1756 
1757 static int
1758 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1759     zio_prop_t *zp, zbookmark_phys_t *zb)
1760 {
1761         dmu_sync_arg_t *dsa;
1762         dmu_tx_t *tx;
1763 
1764         tx = dmu_tx_create(os);
1765         dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1766         if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1767                 dmu_tx_abort(tx);
1768                 /* Make zl_get_data do txg_waited_synced() */
1769                 return (SET_ERROR(EIO));
1770         }
1771 
1772         /*
1773          * In order to prevent the zgd's lwb from being free'd prior to
1774          * dmu_sync_late_arrival_done() being called, we have to ensure
1775          * the lwb's "max txg" takes this tx's txg into account.
1776          */
1777         zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
1778 
1779         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
 
1792          * dirty in a future txg).
1793          *
1794          * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1795          * We can not nopwrite against it because although the BP will not
1796          * (typically) be changed, the data has not yet been persisted to this
1797          * location.
1798          *
1799          * Finally, when dbuf_write_done() is called, it is theoretically
1800          * possible to always nopwrite, because the data that was written in
1801          * this txg is the same data that we are trying to write.  However we
1802          * would need to check that this dbuf is not dirty in any future
1803          * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1804          * don't nopwrite in this case.
1805          */
1806         zp->zp_nopwrite = B_FALSE;
1807 
1808         zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1809             abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1810             zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1811             dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1812             dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1813 
1814         return (0);
1815 }
1816 
1817 /*
1818  * Intent log support: sync the block associated with db to disk.
1819  * N.B. and XXX: the caller is responsible for making sure that the
1820  * data isn't changing while dmu_sync() is writing it.
1821  *
1822  * Return values:
1823  *
1824  *      EEXIST: this txg has already been synced, so there's nothing to do.
1825  *              The caller should not log the write.
1826  *
1827  *      ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1828  *              The caller should not log the write.
1829  *
1830  *      EALREADY: this block is already in the process of being synced.
1831  *              The caller should track its progress (somehow).
1832  *
1833  *      EIO: could not do the I/O.
1834  *              The caller should do a txg_wait_synced().
1835  *
1836  *      0: the I/O has been initiated.
1837  *              The caller should log this blkptr in the done callback.
1838  *              It is possible that the I/O will fail, in which case
1839  *              the error will be reported to the done callback and
1840  *              propagated to pio from zio_done().
1841  */
1842 int
1843 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1844 {
1845         dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1846         objset_t *os = db->db_objset;
1847         dsl_dataset_t *ds = os->os_dsl_dataset;
1848         dbuf_dirty_record_t *dr;
1849         dmu_sync_arg_t *dsa;
1850         zbookmark_phys_t zb;
1851         zio_prop_t zp;
1852         dnode_t *dn;
1853 
1854         ASSERT(pio != NULL);
1855         ASSERT(txg != 0);
1856 
1857         SET_BOOKMARK(&zb, ds->ds_object,
1858             db->db.db_object, db->db_level, db->db_blkid);
1859 
1860         DB_DNODE_ENTER(db);
1861         dn = DB_DNODE(db);
1862         dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1863         DB_DNODE_EXIT(db);
1864 
1865         /*
1866          * If we're frozen (running ziltest), we always need to generate a bp.
1867          */
1868         if (txg > spa_freeze_txg(os->os_spa))
1869                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1870 
1871         /*
1872          * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1873          * and us.  If we determine that this txg is not yet syncing,
1874          * but it begins to sync a moment later, that's OK because the
1875          * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1876          */
1877         mutex_enter(&db->db_mtx);
1878 
1879         if (txg <= spa_last_synced_txg(os->os_spa)) {
1880                 /*
1881                  * This txg has already synced.  There's nothing to do.
1882                  */
1883                 mutex_exit(&db->db_mtx);
1884                 return (SET_ERROR(EEXIST));
1885         }
1886 
1887         if (txg <= spa_syncing_txg(os->os_spa)) {
1888                 /*
1889                  * This txg is currently syncing, so we can't mess with
1890                  * the dirty record anymore; just write a new log block.
1891                  */
1892                 mutex_exit(&db->db_mtx);
1893                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1894         }
1895 
1896         dr = db->db_last_dirty;
1897         while (dr && dr->dr_txg != txg)
1898                 dr = dr->dr_next;
1899 
1900         if (dr == NULL) {
1901                 /*
1902                  * There's no dr for this dbuf, so it must have been freed.
1903                  * There's no need to log writes to freed blocks, so we're done.
1904                  */
1905                 mutex_exit(&db->db_mtx);
1906                 return (SET_ERROR(ENOENT));
1907         }
1908 
1909         ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
1910 
1911         if (db->db_blkptr != NULL) {
1912                 /*
1913                  * We need to fill in zgd_bp with the current blkptr so that
 
1959                  * or this buffer has already been synced.  It could not
1960                  * have been dirtied since, or we would have cleared the state.
1961                  */
1962                 mutex_exit(&db->db_mtx);
1963                 return (SET_ERROR(EALREADY));
1964         }
1965 
1966         ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1967         dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1968         mutex_exit(&db->db_mtx);
1969 
1970         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1971         dsa->dsa_dr = dr;
1972         dsa->dsa_done = done;
1973         dsa->dsa_zgd = zgd;
1974         dsa->dsa_tx = NULL;
1975 
1976         zio_nowait(arc_write(pio, os->os_spa, txg,
1977             zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1978             &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
1979             ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
1980 
1981         return (0);
1982 }
1983 
1984 int
1985 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1986     dmu_tx_t *tx)
1987 {
1988         dnode_t *dn;
1989         int err;
1990 
1991         err = dnode_hold(os, object, FTAG, &dn);
1992         if (err)
1993                 return (err);
1994         err = dnode_set_blksz(dn, size, ibs, tx);
1995         dnode_rele(dn, FTAG);
1996         return (err);
1997 }
1998 
1999 void
 
2125                  */
2126                 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2127                         dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
2128                         if (!(zio_checksum_table[checksum].ci_flags &
2129                             ZCHECKSUM_FLAG_DEDUP))
2130                                 dedup_verify = B_TRUE;
2131                 }
2132 
2133                 /*
2134                  * Enable nopwrite if we have secure enough checksum
2135                  * algorithm (see comment in zio_nop_write) and
2136                  * compression is enabled.  We don't enable nopwrite if
2137                  * dedup is enabled as the two features are mutually
2138                  * exclusive.
2139                  */
2140                 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2141                     ZCHECKSUM_FLAG_NOPWRITE) &&
2142                     compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2143         }
2144 
2145         zp->zp_checksum = checksum;
2146         zp->zp_compress = compress;
2147         ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2148 
2149         zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2150         zp->zp_level = level;
2151         zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2152         zp->zp_dedup = dedup;
2153         zp->zp_dedup_verify = dedup && dedup_verify;
2154         zp->zp_nopwrite = nopwrite;
2155 }
2156 
2157 int
2158 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2159 {
2160         dnode_t *dn;
2161         int err;
2162 
2163         /*
2164          * Sync any current changes before
2165          * we go trundling through the block pointers.
2166          */
2167         err = dmu_object_wait_synced(os, object);
2168         if (err) {
2169                 return (err);
2170         }
2171 
2172         err = dnode_hold(os, object, FTAG, &dn);
2173         if (err) {
2174                 return (err);
 
 | 
 
 
  28 
  29 #include <sys/dmu.h>
  30 #include <sys/dmu_impl.h>
  31 #include <sys/dmu_tx.h>
  32 #include <sys/dbuf.h>
  33 #include <sys/dnode.h>
  34 #include <sys/zfs_context.h>
  35 #include <sys/dmu_objset.h>
  36 #include <sys/dmu_traverse.h>
  37 #include <sys/dsl_dataset.h>
  38 #include <sys/dsl_dir.h>
  39 #include <sys/dsl_pool.h>
  40 #include <sys/dsl_synctask.h>
  41 #include <sys/dsl_prop.h>
  42 #include <sys/dmu_zfetch.h>
  43 #include <sys/zfs_ioctl.h>
  44 #include <sys/zap.h>
  45 #include <sys/zio_checksum.h>
  46 #include <sys/zio_compress.h>
  47 #include <sys/sa.h>
  48 #include <sys/spa_impl.h>
  49 #include <sys/zfeature.h>
  50 #include <sys/abd.h>
  51 #ifdef _KERNEL
  52 #include <sys/vmsystm.h>
  53 #include <sys/zfs_znode.h>
  54 #include <sys/zfs_vfsops.h>
  55 #endif
  56 #include <sys/special.h>
  57 
  58 /*
  59  * Enable/disable nopwrite feature.
  60  */
  61 int zfs_nopwrite_enabled = 1;
  62 
  63 /*
  64  * Tunable to control percentage of dirtied blocks from frees in one TXG.
  65  * After this threshold is crossed, additional dirty blocks from frees
  66  * wait until the next TXG.
  67  * A value of zero will disable this throttle.
  68  */
  69 uint32_t zfs_per_txg_dirty_frees_percent = 30;
  70 
  71 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
  72         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "unallocated"                },
  73         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "object directory"           },
  74         { DMU_BSWAP_UINT64, TRUE,  TRUE,   "object array"               },
  75         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "packed nvlist"              },
  76         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "packed nvlist size"         },
  77         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj"                      },
  78         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj header"               },
  79         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map header"       },
  80         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map"              },
  81         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "ZIL intent log"             },
  82         { DMU_BSWAP_DNODE,  TRUE,  FALSE,  "DMU dnode"                  },
  83         { DMU_BSWAP_OBJSET, TRUE,  TRUE,   "DMU objset"                 },
  84         { DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL directory"              },
  85         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL directory child map"    },
  86         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset snap map"       },
  87         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL props"                  },
  88         { DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL dataset"                },
  89         { DMU_BSWAP_ZNODE,  TRUE,  FALSE,  "ZFS znode"                  },
  90         { DMU_BSWAP_OLDACL, TRUE,  FALSE,  "ZFS V0 ACL"                 },
  91         { DMU_BSWAP_UINT8,  FALSE, FALSE,  "ZFS plain file"             },
  92         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS directory"              },
  93         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS master node"            },
  94         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS delete queue"           },
  95         { DMU_BSWAP_UINT8,  FALSE, FALSE,  "zvol object"                },
  96         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "zvol prop"                  },
  97         { DMU_BSWAP_UINT8,  FALSE, FALSE,  "other uint8[]"              },
  98         { DMU_BSWAP_UINT64, FALSE, FALSE,  "other uint64[]"             },
  99         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "other ZAP"                  },
 100         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "persistent error log"       },
 101         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "SPA history"                },
 102         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA history offsets"        },
 103         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "Pool properties"            },
 104         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL permissions"            },
 105         { DMU_BSWAP_ACL,    TRUE,  FALSE,  "ZFS ACL"                    },
 106         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "ZFS SYSACL"                 },
 107         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "FUID table"                 },
 108         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "FUID table size"            },
 109         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset next clones"    },
 110         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan work queue"            },
 111         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group used"        },
 112         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group quota"       },
 113         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "snapshot refcount tags"     },
 114         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT ZAP algorithm"          },
 115         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT statistics"             },
 116         { DMU_BSWAP_UINT8,  TRUE,  FALSE,  "System attributes"          },
 117         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA master node"             },
 118         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr registration"       },
 119         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr layouts"            },
 120         { DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan translations"          },
 121         { DMU_BSWAP_UINT8,  FALSE, FALSE,  "deduplicated block"         },
 122         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL deadlist map"           },
 123         { DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL deadlist map hdr"       },
 124         { DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dir clones"             },
 125         { DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj subobj"               }
 126 };
 127 
 128 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
 129         {       byteswap_uint8_array,   "uint8"         },
 130         {       byteswap_uint16_array,  "uint16"        },
 131         {       byteswap_uint32_array,  "uint32"        },
 132         {       byteswap_uint64_array,  "uint64"        },
 133         {       zap_byteswap,           "zap"           },
 134         {       dnode_buf_byteswap,     "dnode"         },
 135         {       dmu_objset_byteswap,    "objset"        },
 136         {       zfs_znode_byteswap,     "znode"         },
 137         {       zfs_oldacl_byteswap,    "oldacl"        },
 138         {       zfs_acl_byteswap,       "acl"           }
 139 };
 140 
 141 int
 142 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
 143     void *tag, dmu_buf_t **dbp)
 144 {
 145         uint64_t blkid;
 
 691                 err = dnode_next_offset(dn,
 692                     DNODE_FIND_BACKWARDS, start, 2, 1, 0);
 693 
 694                 /* if there are no indirect blocks before start, we are done */
 695                 if (err == ESRCH) {
 696                         *start = minimum;
 697                         break;
 698                 } else if (err != 0) {
 699                         return (err);
 700                 }
 701 
 702                 /* set start to the beginning of this L1 indirect */
 703                 *start = P2ALIGN(*start, iblkrange);
 704         }
 705         if (*start < minimum)
 706                 *start = minimum;
 707         return (0);
 708 }
 709 
 710 /*
 711  * If this dnode is in the ZFS object set
 712  * return true if vfs's unmounted flag is set or the
 713  * zfsvfs is currently suspended, otherwise return false.
 714  */
 715 /*ARGSUSED*/
 716 static boolean_t
 717 dmu_dnode_fs_unmounting_or_suspended(dnode_t *freeing_dn)
 718 {
 719 #ifdef _KERNEL
 720         boolean_t busy = B_FALSE;
 721         objset_t *os = freeing_dn->dn_objset;
 722         zfsvfs_t *zfsvfs;
 723 
 724         if (dmu_objset_type(os) == DMU_OST_ZFS) {
 725                 mutex_enter(&os->os_user_ptr_lock);
 726                 zfsvfs = dmu_objset_get_user(os);
 727                 if (zfsvfs != NULL && zfsvfs->z_vfs != NULL &&
 728                     ((zfsvfs->z_vfs->vfs_flag & VFS_UNMOUNTED) ||
 729                      zfsvfs->z_busy))
 730                         busy = B_TRUE;
 731                 mutex_exit(&os->os_user_ptr_lock);
 732         }
 733 
 734         return (busy);
 735 #else
 736         return (B_FALSE);
 737 #endif
 738 }
 739 
 740 static int
 741 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
 742     uint64_t length)
 743 {
 744         uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
 745         int err;
 746         uint64_t dirty_frees_threshold;
 747         dsl_pool_t *dp = dmu_objset_pool(os);
 748 
 749         if (offset >= object_size)
 750                 return (0);
 751 
 752         if (zfs_per_txg_dirty_frees_percent <= 100)
 753                 dirty_frees_threshold =
 754                     zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
 755         else
 756                 dirty_frees_threshold = zfs_dirty_data_max / 4;
 757 
 758         if (length == DMU_OBJECT_END && offset == 0)
 759                 dnode_evict_dbufs(dn, 0);
 760 
 761         if (length == DMU_OBJECT_END || offset + length > object_size)
 762                 length = object_size - offset;
 763 
 764         mutex_enter(&dp->dp_lock);
 765         dp->dp_long_freeing_total += length;
 766         mutex_exit(&dp->dp_lock);
 767 
 768         while (length != 0) {
 769                 uint64_t chunk_end, chunk_begin, chunk_len;
 770                 uint64_t long_free_dirty_all_txgs = 0;
 771                 dmu_tx_t *tx;
 772 
 773                 if (dmu_dnode_fs_unmounting_or_suspended(dn)) {
 774                         mutex_enter(&dp->dp_lock);
 775                         dp->dp_long_freeing_total -= length;
 776                         mutex_exit(&dp->dp_lock);
 777 
 778                         return (SET_ERROR(EINTR));
 779                 }
 780 
 781                 chunk_end = chunk_begin = offset + length;
 782 
 783                 /* move chunk_begin backwards to the beginning of this chunk */
 784                 err = get_next_chunk(dn, &chunk_begin, offset);
 785                 if (err)
 786                         return (err);
 787                 ASSERT3U(chunk_begin, >=, offset);
 788                 ASSERT3U(chunk_begin, <=, chunk_end);
 789 
 790                 chunk_len = chunk_end - chunk_begin;
 791 
 792                 mutex_enter(&dp->dp_lock);
 793                 for (int t = 0; t < TXG_SIZE; t++) {
 794                         long_free_dirty_all_txgs +=
 795                             dp->dp_long_free_dirty_pertxg[t];
 796                 }
 797                 mutex_exit(&dp->dp_lock);
 798 
 799                 /*
 
 801                  * the next TXG to open before freeing more chunks if
 802                  * we have reached the threshold of frees
 803                  */
 804                 if (dirty_frees_threshold != 0 &&
 805                     long_free_dirty_all_txgs >= dirty_frees_threshold) {
 806                         txg_wait_open(dp, 0);
 807                         continue;
 808                 }
 809 
 810                 tx = dmu_tx_create(os);
 811                 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
 812 
 813                 /*
 814                  * Mark this transaction as typically resulting in a net
 815                  * reduction in space used.
 816                  */
 817                 dmu_tx_mark_netfree(tx);
 818                 err = dmu_tx_assign(tx, TXG_WAIT);
 819                 if (err) {
 820                         dmu_tx_abort(tx);
 821                         mutex_enter(&dp->dp_lock);
 822                         dp->dp_long_freeing_total -= length - chunk_len;
 823                         mutex_exit(&dp->dp_lock);
 824                         return (err);
 825                 }
 826 
 827                 mutex_enter(&dp->dp_lock);
 828                 dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
 829                     chunk_len;
 830                 mutex_exit(&dp->dp_lock);
 831                 DTRACE_PROBE3(free__long__range,
 832                     uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
 833                     uint64_t, dmu_tx_get_txg(tx));
 834                 dnode_free_range(dn, chunk_begin, chunk_len, tx);
 835                 dmu_tx_commit(tx);
 836 
 837                 length -= chunk_len;
 838         }
 839         return (0);
 840 }
 841 
 842 int
 843 dmu_free_long_range(objset_t *os, uint64_t object,
 
1029         dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1030         dmu_buf_rele_array(dbp, numbufs, FTAG);
1031 }
1032 
1033 void
1034 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1035     const void *buf, dmu_tx_t *tx)
1036 {
1037         dmu_buf_t **dbp;
1038         int numbufs;
1039 
1040         if (size == 0)
1041                 return;
1042 
1043         VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1044             FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1045         dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1046         dmu_buf_rele_array(dbp, numbufs, FTAG);
1047 }
1048 
1049 void
1050 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1051     dmu_tx_t *tx)
1052 {
1053         dmu_buf_t **dbp;
1054         int numbufs, i;
1055 
1056         if (size == 0)
1057                 return;
1058 
1059         VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1060             FALSE, FTAG, &numbufs, &dbp));
1061 
1062         for (i = 0; i < numbufs; i++) {
1063                 dmu_buf_t *db = dbp[i];
1064 
1065                 dmu_buf_will_not_fill(db, tx);
1066         }
1067         dmu_buf_rele_array(dbp, numbufs, FTAG);
1068 }
 
1578                 } else if (!BP_IS_EMBEDDED(bp)) {
1579                         ASSERT(BP_GET_LEVEL(bp) == 0);
1580                         bp->blk_fill = 1;
1581                 }
1582         }
1583 }
1584 
1585 static void
1586 dmu_sync_late_arrival_ready(zio_t *zio)
1587 {
1588         dmu_sync_ready(zio, NULL, zio->io_private);
1589 }
1590 
1591 /* ARGSUSED */
1592 static void
1593 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1594 {
1595         dmu_sync_arg_t *dsa = varg;
1596         dbuf_dirty_record_t *dr = dsa->dsa_dr;
1597         dmu_buf_impl_t *db = dr->dr_dbuf;
1598         zgd_t *zgd = dsa->dsa_zgd;
1599 
1600         /*
1601          * Record the vdev(s) backing this blkptr so they can be flushed after
1602          * the writes for the lwb have completed.
1603          */
1604         if (zio->io_error == 0) {
1605                 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1606         }
1607 
1608         mutex_enter(&db->db_mtx);
1609         ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1610         if (zio->io_error == 0) {
1611                 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1612                 if (dr->dt.dl.dr_nopwrite) {
1613                         blkptr_t *bp = zio->io_bp;
1614                         blkptr_t *bp_orig = &zio->io_bp_orig;
1615                         uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1616 
1617                         ASSERT(BP_EQUAL(bp, bp_orig));
1618                         VERIFY(BP_EQUAL(bp, db->db_blkptr));
1619                         ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1620                         ASSERT(zio_checksum_table[chksum].ci_flags &
1621                             ZCHECKSUM_FLAG_NOPWRITE);
1622                 }
1623                 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1624                 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1625                 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1626 
1627                 /*
 
1637                 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1638                     dr->dt.dl.dr_overridden_by.blk_birth == 0)
1639                         BP_ZERO(&dr->dt.dl.dr_overridden_by);
1640         } else {
1641                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1642         }
1643         cv_broadcast(&db->db_changed);
1644         mutex_exit(&db->db_mtx);
1645 
1646         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1647 
1648         kmem_free(dsa, sizeof (*dsa));
1649 }
1650 
1651 static void
1652 dmu_sync_late_arrival_done(zio_t *zio)
1653 {
1654         blkptr_t *bp = zio->io_bp;
1655         dmu_sync_arg_t *dsa = zio->io_private;
1656         blkptr_t *bp_orig = &zio->io_bp_orig;
1657         zgd_t *zgd = dsa->dsa_zgd;
1658 
1659         if (zio->io_error == 0) {
1660                 /*
1661                  * Record the vdev(s) backing this blkptr so they can be
1662                  * flushed after the writes for the lwb have completed.
1663                  */
1664                 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1665 
1666                 if (!BP_IS_HOLE(bp)) {
1667                         ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1668                         ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1669                         ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1670                         ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1671                         zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1672                 }
1673         }
1674 
1675         dmu_tx_commit(dsa->dsa_tx);
1676 
1677         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1678 
1679         abd_put(zio->io_abd);
1680         kmem_free(dsa, sizeof (*dsa));
1681 }
1682 
1683 static int
1684 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1685     zio_prop_t *zp, zbookmark_phys_t *zb, const zio_smartcomp_info_t *sc)
1686 {
1687         dmu_sync_arg_t *dsa;
1688         dmu_tx_t *tx;
1689 
1690         tx = dmu_tx_create(os);
1691         dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1692         if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1693                 dmu_tx_abort(tx);
1694                 /* Make zl_get_data do txg_waited_synced() */
1695                 return (SET_ERROR(EIO));
1696         }
1697 
1698         /*
1699          * In order to prevent the zgd's lwb from being free'd prior to
1700          * dmu_sync_late_arrival_done() being called, we have to ensure
1701          * the lwb's "max txg" takes this tx's txg into account.
1702          */
1703         zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
1704 
1705         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
 
1718          * dirty in a future txg).
1719          *
1720          * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1721          * We can not nopwrite against it because although the BP will not
1722          * (typically) be changed, the data has not yet been persisted to this
1723          * location.
1724          *
1725          * Finally, when dbuf_write_done() is called, it is theoretically
1726          * possible to always nopwrite, because the data that was written in
1727          * this txg is the same data that we are trying to write.  However we
1728          * would need to check that this dbuf is not dirty in any future
1729          * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1730          * don't nopwrite in this case.
1731          */
1732         zp->zp_nopwrite = B_FALSE;
1733 
1734         zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1735             abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1736             zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1737             dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1738             dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb, sc));
1739 
1740         return (0);
1741 }
1742 
1743 /*
1744  * Intent log support: sync the block associated with db to disk.
1745  * N.B. and XXX: the caller is responsible for making sure that the
1746  * data isn't changing while dmu_sync() is writing it.
1747  *
1748  * Return values:
1749  *
1750  *      EEXIST: this txg has already been synced, so there's nothing to do.
1751  *              The caller should not log the write.
1752  *
1753  *      ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1754  *              The caller should not log the write.
1755  *
1756  *      EALREADY: this block is already in the process of being synced.
1757  *              The caller should track its progress (somehow).
1758  *
1759  *      EIO: could not do the I/O.
1760  *              The caller should do a txg_wait_synced().
1761  *
1762  *      0: the I/O has been initiated.
1763  *              The caller should log this blkptr in the done callback.
1764  *              It is possible that the I/O will fail, in which case
1765  *              the error will be reported to the done callback and
1766  *              propagated to pio from zio_done().
1767  */
1768 
1769 int
1770 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1771 {
1772         dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1773         objset_t *os = db->db_objset;
1774         dsl_dataset_t *ds = os->os_dsl_dataset;
1775         dbuf_dirty_record_t *dr;
1776         dmu_sync_arg_t *dsa;
1777         zbookmark_phys_t zb;
1778         zio_prop_t zp;
1779         dnode_t *dn;
1780         int flags = 0;
1781         zio_smartcomp_info_t sc;
1782 
1783         ASSERT(pio != NULL);
1784         ASSERT(txg != 0);
1785 
1786         SET_BOOKMARK(&zb, ds->ds_object,
1787             db->db.db_object, db->db_level, db->db_blkid);
1788 
1789         /* write to special only if proper conditions hold */
1790         if (spa_write_data_to_special(os->os_spa, os))
1791                 WP_SET_SPECIALCLASS(flags, B_TRUE);
1792 
1793         DB_DNODE_ENTER(db);
1794         dn = DB_DNODE(db);
1795         dmu_write_policy(os, dn, db->db_level, flags | WP_DMU_SYNC, &zp);
1796         dnode_setup_zio_smartcomp(db, &sc);
1797         DB_DNODE_EXIT(db);
1798 
1799         /*
1800          * If we're frozen (running ziltest), we always need to generate a bp.
1801          */
1802         if (txg > spa_freeze_txg(os->os_spa))
1803                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb,
1804                     &sc));
1805 
1806         /*
1807          * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1808          * and us.  If we determine that this txg is not yet syncing,
1809          * but it begins to sync a moment later, that's OK because the
1810          * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1811          */
1812         mutex_enter(&db->db_mtx);
1813 
1814         if (txg <= spa_last_synced_txg(os->os_spa)) {
1815                 /*
1816                  * This txg has already synced.  There's nothing to do.
1817                  */
1818                 mutex_exit(&db->db_mtx);
1819                 return (SET_ERROR(EEXIST));
1820         }
1821 
1822         if (txg <= spa_syncing_txg(os->os_spa)) {
1823                 /*
1824                  * This txg is currently syncing, so we can't mess with
1825                  * the dirty record anymore; just write a new log block.
1826                  */
1827                 mutex_exit(&db->db_mtx);
1828                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb,
1829                     &sc));
1830         }
1831 
1832         dr = db->db_last_dirty;
1833         while (dr && dr->dr_txg != txg)
1834                 dr = dr->dr_next;
1835 
1836         if (dr == NULL) {
1837                 /*
1838                  * There's no dr for this dbuf, so it must have been freed.
1839                  * There's no need to log writes to freed blocks, so we're done.
1840                  */
1841                 mutex_exit(&db->db_mtx);
1842                 return (SET_ERROR(ENOENT));
1843         }
1844 
1845         ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
1846 
1847         if (db->db_blkptr != NULL) {
1848                 /*
1849                  * We need to fill in zgd_bp with the current blkptr so that
 
1895                  * or this buffer has already been synced.  It could not
1896                  * have been dirtied since, or we would have cleared the state.
1897                  */
1898                 mutex_exit(&db->db_mtx);
1899                 return (SET_ERROR(EALREADY));
1900         }
1901 
1902         ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1903         dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1904         mutex_exit(&db->db_mtx);
1905 
1906         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1907         dsa->dsa_dr = dr;
1908         dsa->dsa_done = done;
1909         dsa->dsa_zgd = zgd;
1910         dsa->dsa_tx = NULL;
1911 
1912         zio_nowait(arc_write(pio, os->os_spa, txg,
1913             zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1914             &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
1915             ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb, &sc));
1916 
1917         return (0);
1918 }
1919 
1920 int
1921 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1922     dmu_tx_t *tx)
1923 {
1924         dnode_t *dn;
1925         int err;
1926 
1927         err = dnode_hold(os, object, FTAG, &dn);
1928         if (err)
1929                 return (err);
1930         err = dnode_set_blksz(dn, size, ibs, tx);
1931         dnode_rele(dn, FTAG);
1932         return (err);
1933 }
1934 
1935 void
 
2061                  */
2062                 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2063                         dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
2064                         if (!(zio_checksum_table[checksum].ci_flags &
2065                             ZCHECKSUM_FLAG_DEDUP))
2066                                 dedup_verify = B_TRUE;
2067                 }
2068 
2069                 /*
2070                  * Enable nopwrite if we have secure enough checksum
2071                  * algorithm (see comment in zio_nop_write) and
2072                  * compression is enabled.  We don't enable nopwrite if
2073                  * dedup is enabled as the two features are mutually
2074                  * exclusive.
2075                  */
2076                 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2077                     ZCHECKSUM_FLAG_NOPWRITE) &&
2078                     compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2079         }
2080 
2081         zp->zp_usesc = WP_GET_SPECIALCLASS(wp);
2082         zp->zp_checksum = checksum;
2083         zp->zp_compress = compress;
2084         ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2085 
2086         zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2087         zp->zp_level = level;
2088         zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2089         zp->zp_dedup = dedup;
2090         zp->zp_dedup_verify = dedup && dedup_verify;
2091         zp->zp_metadata = ismd;
2092         zp->zp_nopwrite = nopwrite;
2093         zp->zp_zpl_meta_to_special = os->os_zpl_meta_to_special;
2094         zp->zp_usewbc = (zp->zp_usesc &&
2095             os->os_wbc_mode == ZFS_WBC_MODE_ON && !ismd);
2096 
2097         /* explicitly control the number for copies for DDT */
2098         if (DMU_OT_IS_DDT_META(type) &&
2099             os->os_spa->spa_ddt_meta_copies > 0) {
2100                 zp->zp_copies =
2101                     MIN(os->os_spa->spa_ddt_meta_copies,
2102                     spa_max_replication(os->os_spa));
2103         }
2104 
2105         DTRACE_PROBE2(dmu_wp, boolean_t, zp->zp_metadata,
2106             boolean_t, zp->zp_usesc);
2107 }
2108 
2109 int
2110 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2111 {
2112         dnode_t *dn;
2113         int err;
2114 
2115         /*
2116          * Sync any current changes before
2117          * we go trundling through the block pointers.
2118          */
2119         err = dmu_object_wait_synced(os, object);
2120         if (err) {
2121                 return (err);
2122         }
2123 
2124         err = dnode_hold(os, object, FTAG, &dn);
2125         if (err) {
2126                 return (err);
 
 |