39 #include <sys/zio.h>
40 #include <sys/dmu_zfetch.h>
41 #include <sys/range_tree.h>
42 #include <sys/zfs_project.h>
43
44 dnode_stats_t dnode_stats = {
45 { "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
46 { "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
47 { "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
48 { "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
49 { "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
50 { "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
51 { "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
52 { "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
53 { "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
54 { "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
55 { "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
56 { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
57 { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
58 { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
59 { "dnode_hold_free_txg", KSTAT_DATA_UINT64 },
60 { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
61 { "dnode_allocate", KSTAT_DATA_UINT64 },
62 { "dnode_reallocate", KSTAT_DATA_UINT64 },
63 { "dnode_buf_evict", KSTAT_DATA_UINT64 },
64 { "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
65 { "dnode_alloc_race", KSTAT_DATA_UINT64 },
66 { "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
67 { "dnode_move_invalid", KSTAT_DATA_UINT64 },
68 { "dnode_move_recheck1", KSTAT_DATA_UINT64 },
69 { "dnode_move_recheck2", KSTAT_DATA_UINT64 },
70 { "dnode_move_special", KSTAT_DATA_UINT64 },
71 { "dnode_move_handle", KSTAT_DATA_UINT64 },
72 { "dnode_move_rwlock", KSTAT_DATA_UINT64 },
73 { "dnode_move_active", KSTAT_DATA_UINT64 },
74 };
75
76 static kstat_t *dnode_ksp;
77 static kmem_cache_t *dnode_cache;
78
79 static dnode_phys_t dnode_phys_zero;
1243 zrl_destroy(&dnh->dnh_zrlock);
1244 dnh->dnh_dnode = DN_SLOT_UNINIT;
1245 }
1246 kmem_free(dnc, sizeof (dnode_children_t) +
1247 dnc->dnc_count * sizeof (dnode_handle_t));
1248 }
1249
1250 /*
1251 * When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
1252 * to ensure the hole at the specified object offset is large enough to
1253 * hold the dnode being created. The slots parameter is also used to ensure
1254 * a dnode does not span multiple dnode blocks. In both of these cases, if
1255 * a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
1256 * are only possible when using DNODE_MUST_BE_FREE.
1257 *
1258 * If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
1259 * dnode_hold_impl() will check if the requested dnode is already consumed
1260 * as an extra dnode slot by an large dnode, in which case it returns
1261 * ENOENT.
1262 *
1263 * errors:
1264 * EINVAL - invalid object number or flags.
1265 * ENOSPC - hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
1266 * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
1267 * - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
1268 * - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
1269 * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
1270 * - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
1271 * EIO - i/o error error when reading the meta dnode dbuf.
1272 * succeeds even for free dnodes.
1273 */
1274 int
1275 dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1276 void *tag, dnode_t **dnp)
1277 {
1278 int epb, idx, err;
1279 int drop_struct_lock = FALSE;
1280 int type;
1281 uint64_t blk;
1282 dnode_t *mdn, *dn;
1283 dmu_buf_impl_t *db;
1284 dnode_children_t *dnc;
1285 dnode_phys_t *dn_block;
1286 dnode_handle_t *dnh;
1287
1288 ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
1289 ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
1290
1291 /*
1292 * If you are holding the spa config lock as writer, you shouldn't
1293 * be asking the DMU to do *anything* unless it's the root pool
1294 * which may require us to read from the root filesystem while
1295 * holding some (not all) of the locks as writer.
1296 */
1297 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1298 (spa_is_root(os->os_spa) &&
1299 spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1300
1301 ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
1302
1303 if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
1304 object == DMU_PROJECTUSED_OBJECT) {
1305 if (object == DMU_USERUSED_OBJECT)
1306 dn = DMU_USERUSED_DNODE(os);
1307 else if (object == DMU_GROUPUSED_OBJECT)
1308 dn = DMU_GROUPUSED_DNODE(os);
1309 else
1310 dn = DMU_PROJECTUSED_DNODE(os);
1311 if (dn == NULL)
1312 return (SET_ERROR(ENOENT));
1313 type = dn->dn_type;
1314 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1315 return (SET_ERROR(ENOENT));
1316 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1317 return (SET_ERROR(EEXIST));
1318 DNODE_VERIFY(dn);
1319 (void) zfs_refcount_add(&dn->dn_holds, tag);
1320 *dnp = dn;
1321 return (0);
1322 }
1323
1324 if (object == 0 || object >= DN_MAX_OBJECT)
1325 return (SET_ERROR(EINVAL));
1326
1327 mdn = DMU_META_DNODE(os);
1328 ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1329
1330 DNODE_VERIFY(mdn);
1331
1332 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1333 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1334 drop_struct_lock = TRUE;
1335 }
1336
1337 blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1338
1339 db = dbuf_hold(mdn, blk, FTAG);
1340 if (drop_struct_lock)
1445 * we acquired the lock.
1446 */
1447 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1448 DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
1449 dn = dnh->dnh_dnode;
1450 } else {
1451 dn = dnode_create(os, dn_block + idx, db,
1452 object, dnh);
1453 }
1454 }
1455
1456 mutex_enter(&dn->dn_mtx);
1457 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
1458 DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
1459 mutex_exit(&dn->dn_mtx);
1460 dnode_slots_rele(dnc, idx, slots);
1461 dbuf_rele(db, FTAG);
1462 return (SET_ERROR(ENOENT));
1463 }
1464
1465 DNODE_STAT_BUMP(dnode_hold_alloc_hits);
1466 } else if (flag & DNODE_MUST_BE_FREE) {
1467
1468 if (idx + slots - 1 >= DNODES_PER_BLOCK) {
1469 DNODE_STAT_BUMP(dnode_hold_free_overflow);
1470 dbuf_rele(db, FTAG);
1471 return (SET_ERROR(ENOSPC));
1472 }
1473
1474 while (dn == DN_SLOT_UNINIT) {
1475 dnode_slots_hold(dnc, idx, slots);
1476
1477 if (!dnode_check_slots_free(dnc, idx, slots)) {
1478 DNODE_STAT_BUMP(dnode_hold_free_misses);
1479 dnode_slots_rele(dnc, idx, slots);
1480 dbuf_rele(db, FTAG);
1481 return (SET_ERROR(ENOSPC));
1482 }
1483
1484 dnode_slots_rele(dnc, idx, slots);
1504 dnode_reclaim_slots(dnc, idx + 1, slots - 1);
1505
1506 dnh = &dnc->dnc_children[idx];
1507 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1508 dn = dnh->dnh_dnode;
1509 } else {
1510 dn = dnode_create(os, dn_block + idx, db,
1511 object, dnh);
1512 }
1513 }
1514
1515 mutex_enter(&dn->dn_mtx);
1516 if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
1517 DNODE_STAT_BUMP(dnode_hold_free_refcount);
1518 mutex_exit(&dn->dn_mtx);
1519 dnode_slots_rele(dnc, idx, slots);
1520 dbuf_rele(db, FTAG);
1521 return (SET_ERROR(EEXIST));
1522 }
1523
1524 dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
1525 DNODE_STAT_BUMP(dnode_hold_free_hits);
1526 } else {
1527 dbuf_rele(db, FTAG);
1528 return (SET_ERROR(EINVAL));
1529 }
1530
1531 if (dn->dn_free_txg) {
1532 DNODE_STAT_BUMP(dnode_hold_free_txg);
1533 type = dn->dn_type;
1534 mutex_exit(&dn->dn_mtx);
1535 dnode_slots_rele(dnc, idx, slots);
1536 dbuf_rele(db, FTAG);
1537 return (SET_ERROR((flag & DNODE_MUST_BE_ALLOCATED) ?
1538 ENOENT : EEXIST));
1539 }
1540
1541 if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
1542 dbuf_add_ref(db, dnh);
1543
1544 mutex_exit(&dn->dn_mtx);
1545
1546 /* Now we can rely on the hold to prevent the dnode from moving. */
1547 dnode_slots_rele(dnc, idx, slots);
1548
1549 DNODE_VERIFY(dn);
1550 ASSERT3P(dn->dn_dbuf, ==, db);
1551 ASSERT3U(dn->dn_object, ==, object);
1552 dbuf_rele(db, FTAG);
1553
1554 *dnp = dn;
1555 return (0);
1556 }
1557
1558 /*
1559 * Return held dnode if the object is allocated, NULL if not.
1610 * other direct or indirect hold on the dnode must first drop the dnode
1611 * handle.
1612 */
1613 ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1614
1615 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1616 if (refs == 0 && db != NULL) {
1617 /*
1618 * Another thread could add a hold to the dnode handle in
1619 * dnode_hold_impl() while holding the parent dbuf. Since the
1620 * hold on the parent dbuf prevents the handle from being
1621 * destroyed, the hold on the handle is OK. We can't yet assert
1622 * that the handle has zero references, but that will be
1623 * asserted anyway when the handle gets destroyed.
1624 */
1625 mutex_enter(&db->db_mtx);
1626 dbuf_rele_and_unlock(db, dnh, evicting);
1627 }
1628 }
1629
1630 void
1631 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1632 {
1633 objset_t *os = dn->dn_objset;
1634 uint64_t txg = tx->tx_txg;
1635
1636 if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1637 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1638 return;
1639 }
1640
1641 DNODE_VERIFY(dn);
1642
1643 #ifdef ZFS_DEBUG
1644 mutex_enter(&dn->dn_mtx);
1645 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1646 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1647 mutex_exit(&dn->dn_mtx);
1648 #endif
1649
|
39 #include <sys/zio.h>
40 #include <sys/dmu_zfetch.h>
41 #include <sys/range_tree.h>
42 #include <sys/zfs_project.h>
43
44 dnode_stats_t dnode_stats = {
45 { "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
46 { "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
47 { "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
48 { "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
49 { "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
50 { "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
51 { "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
52 { "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
53 { "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
54 { "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
55 { "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
56 { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
57 { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
58 { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
59 { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
60 { "dnode_allocate", KSTAT_DATA_UINT64 },
61 { "dnode_reallocate", KSTAT_DATA_UINT64 },
62 { "dnode_buf_evict", KSTAT_DATA_UINT64 },
63 { "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
64 { "dnode_alloc_race", KSTAT_DATA_UINT64 },
65 { "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
66 { "dnode_move_invalid", KSTAT_DATA_UINT64 },
67 { "dnode_move_recheck1", KSTAT_DATA_UINT64 },
68 { "dnode_move_recheck2", KSTAT_DATA_UINT64 },
69 { "dnode_move_special", KSTAT_DATA_UINT64 },
70 { "dnode_move_handle", KSTAT_DATA_UINT64 },
71 { "dnode_move_rwlock", KSTAT_DATA_UINT64 },
72 { "dnode_move_active", KSTAT_DATA_UINT64 },
73 };
74
75 static kstat_t *dnode_ksp;
76 static kmem_cache_t *dnode_cache;
77
78 static dnode_phys_t dnode_phys_zero;
1242 zrl_destroy(&dnh->dnh_zrlock);
1243 dnh->dnh_dnode = DN_SLOT_UNINIT;
1244 }
1245 kmem_free(dnc, sizeof (dnode_children_t) +
1246 dnc->dnc_count * sizeof (dnode_handle_t));
1247 }
1248
1249 /*
1250 * When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
1251 * to ensure the hole at the specified object offset is large enough to
1252 * hold the dnode being created. The slots parameter is also used to ensure
1253 * a dnode does not span multiple dnode blocks. In both of these cases, if
1254 * a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
1255 * are only possible when using DNODE_MUST_BE_FREE.
1256 *
1257 * If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
1258 * dnode_hold_impl() will check if the requested dnode is already consumed
1259 * as an extra dnode slot by an large dnode, in which case it returns
1260 * ENOENT.
1261 *
1262 * If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
1263 * return whether the hold would succeed or not. tag and dnp should set to
1264 * NULL in this case.
1265 *
1266 * errors:
1267 * EINVAL - invalid object number or flags.
1268 * ENOSPC - hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
1269 * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
1270 * - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
1271 * - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
1272 * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
1273 * - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
1274 * EIO - i/o error error when reading the meta dnode dbuf.
1275 * succeeds even for free dnodes.
1276 */
1277 int
1278 dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1279 void *tag, dnode_t **dnp)
1280 {
1281 int epb, idx, err;
1282 int drop_struct_lock = FALSE;
1283 int type;
1284 uint64_t blk;
1285 dnode_t *mdn, *dn;
1286 dmu_buf_impl_t *db;
1287 dnode_children_t *dnc;
1288 dnode_phys_t *dn_block;
1289 dnode_handle_t *dnh;
1290
1291 ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
1292 ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
1293 IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
1294
1295 /*
1296 * If you are holding the spa config lock as writer, you shouldn't
1297 * be asking the DMU to do *anything* unless it's the root pool
1298 * which may require us to read from the root filesystem while
1299 * holding some (not all) of the locks as writer.
1300 */
1301 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1302 (spa_is_root(os->os_spa) &&
1303 spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1304
1305 ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
1306
1307 if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
1308 object == DMU_PROJECTUSED_OBJECT) {
1309 if (object == DMU_USERUSED_OBJECT)
1310 dn = DMU_USERUSED_DNODE(os);
1311 else if (object == DMU_GROUPUSED_OBJECT)
1312 dn = DMU_GROUPUSED_DNODE(os);
1313 else
1314 dn = DMU_PROJECTUSED_DNODE(os);
1315 if (dn == NULL)
1316 return (SET_ERROR(ENOENT));
1317 type = dn->dn_type;
1318 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1319 return (SET_ERROR(ENOENT));
1320 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1321 return (SET_ERROR(EEXIST));
1322 DNODE_VERIFY(dn);
1323 /* Don't actually hold if dry run, just return 0 */
1324 if (!(flag & DNODE_DRY_RUN)) {
1325 (void) zfs_refcount_add(&dn->dn_holds, tag);
1326 *dnp = dn;
1327 }
1328 return (0);
1329 }
1330
1331 if (object == 0 || object >= DN_MAX_OBJECT)
1332 return (SET_ERROR(EINVAL));
1333
1334 mdn = DMU_META_DNODE(os);
1335 ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1336
1337 DNODE_VERIFY(mdn);
1338
1339 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1340 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1341 drop_struct_lock = TRUE;
1342 }
1343
1344 blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1345
1346 db = dbuf_hold(mdn, blk, FTAG);
1347 if (drop_struct_lock)
1452 * we acquired the lock.
1453 */
1454 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1455 DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
1456 dn = dnh->dnh_dnode;
1457 } else {
1458 dn = dnode_create(os, dn_block + idx, db,
1459 object, dnh);
1460 }
1461 }
1462
1463 mutex_enter(&dn->dn_mtx);
1464 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
1465 DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
1466 mutex_exit(&dn->dn_mtx);
1467 dnode_slots_rele(dnc, idx, slots);
1468 dbuf_rele(db, FTAG);
1469 return (SET_ERROR(ENOENT));
1470 }
1471
1472 /* Don't actually hold if dry run, just return 0 */
1473 if (flag & DNODE_DRY_RUN) {
1474 mutex_exit(&dn->dn_mtx);
1475 dnode_slots_rele(dnc, idx, slots);
1476 dbuf_rele(db, FTAG);
1477 return (0);
1478 }
1479
1480 DNODE_STAT_BUMP(dnode_hold_alloc_hits);
1481 } else if (flag & DNODE_MUST_BE_FREE) {
1482
1483 if (idx + slots - 1 >= DNODES_PER_BLOCK) {
1484 DNODE_STAT_BUMP(dnode_hold_free_overflow);
1485 dbuf_rele(db, FTAG);
1486 return (SET_ERROR(ENOSPC));
1487 }
1488
1489 while (dn == DN_SLOT_UNINIT) {
1490 dnode_slots_hold(dnc, idx, slots);
1491
1492 if (!dnode_check_slots_free(dnc, idx, slots)) {
1493 DNODE_STAT_BUMP(dnode_hold_free_misses);
1494 dnode_slots_rele(dnc, idx, slots);
1495 dbuf_rele(db, FTAG);
1496 return (SET_ERROR(ENOSPC));
1497 }
1498
1499 dnode_slots_rele(dnc, idx, slots);
1519 dnode_reclaim_slots(dnc, idx + 1, slots - 1);
1520
1521 dnh = &dnc->dnc_children[idx];
1522 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1523 dn = dnh->dnh_dnode;
1524 } else {
1525 dn = dnode_create(os, dn_block + idx, db,
1526 object, dnh);
1527 }
1528 }
1529
1530 mutex_enter(&dn->dn_mtx);
1531 if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
1532 DNODE_STAT_BUMP(dnode_hold_free_refcount);
1533 mutex_exit(&dn->dn_mtx);
1534 dnode_slots_rele(dnc, idx, slots);
1535 dbuf_rele(db, FTAG);
1536 return (SET_ERROR(EEXIST));
1537 }
1538
1539 /* Don't actually hold if dry run, just return 0 */
1540 if (flag & DNODE_DRY_RUN) {
1541 mutex_exit(&dn->dn_mtx);
1542 dnode_slots_rele(dnc, idx, slots);
1543 dbuf_rele(db, FTAG);
1544 return (0);
1545 }
1546
1547 dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
1548 DNODE_STAT_BUMP(dnode_hold_free_hits);
1549 } else {
1550 dbuf_rele(db, FTAG);
1551 return (SET_ERROR(EINVAL));
1552 }
1553
1554 ASSERT0(dn->dn_free_txg);
1555
1556 if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
1557 dbuf_add_ref(db, dnh);
1558
1559 mutex_exit(&dn->dn_mtx);
1560
1561 /* Now we can rely on the hold to prevent the dnode from moving. */
1562 dnode_slots_rele(dnc, idx, slots);
1563
1564 DNODE_VERIFY(dn);
1565 ASSERT3P(dn->dn_dbuf, ==, db);
1566 ASSERT3U(dn->dn_object, ==, object);
1567 dbuf_rele(db, FTAG);
1568
1569 *dnp = dn;
1570 return (0);
1571 }
1572
1573 /*
1574 * Return held dnode if the object is allocated, NULL if not.
1625 * other direct or indirect hold on the dnode must first drop the dnode
1626 * handle.
1627 */
1628 ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1629
1630 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1631 if (refs == 0 && db != NULL) {
1632 /*
1633 * Another thread could add a hold to the dnode handle in
1634 * dnode_hold_impl() while holding the parent dbuf. Since the
1635 * hold on the parent dbuf prevents the handle from being
1636 * destroyed, the hold on the handle is OK. We can't yet assert
1637 * that the handle has zero references, but that will be
1638 * asserted anyway when the handle gets destroyed.
1639 */
1640 mutex_enter(&db->db_mtx);
1641 dbuf_rele_and_unlock(db, dnh, evicting);
1642 }
1643 }
1644
1645 /*
1646 * Test whether we can create a dnode at the specified location.
1647 */
1648 int
1649 dnode_try_claim(objset_t *os, uint64_t object, int slots)
1650 {
1651 return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
1652 slots, NULL, NULL));
1653 }
1654
1655 void
1656 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1657 {
1658 objset_t *os = dn->dn_objset;
1659 uint64_t txg = tx->tx_txg;
1660
1661 if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1662 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1663 return;
1664 }
1665
1666 DNODE_VERIFY(dn);
1667
1668 #ifdef ZFS_DEBUG
1669 mutex_enter(&dn->dn_mtx);
1670 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1671 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1672 mutex_exit(&dn->dn_mtx);
1673 #endif
1674
|