457 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
458 }
459
460 mutex_enter(&zilog->zl_lock);
461 list_insert_tail(&zilog->zl_lwb_list, lwb);
462 mutex_exit(&zilog->zl_lock);
463
464 return (lwb);
465 }
466
467 /*
468 * Called when we create in-memory log transactions so that we know
469 * to cleanup the itxs at the end of spa_sync().
470 */
471 void
472 zilog_dirty(zilog_t *zilog, uint64_t txg)
473 {
474 dsl_pool_t *dp = zilog->zl_dmu_pool;
475 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
476
477 if (dsl_dataset_is_snapshot(ds))
478 panic("dirtying snapshot!");
479
480 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
481 /* up the hold count until we can be written out */
482 dmu_buf_add_ref(ds->ds_dbuf, zilog);
483 }
484 }
485
486 boolean_t
487 zilog_is_dirty(zilog_t *zilog)
488 {
489 dsl_pool_t *dp = zilog->zl_dmu_pool;
490
491 for (int t = 0; t < TXG_SIZE; t++) {
492 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
493 return (B_TRUE);
494 }
495 return (B_FALSE);
496 }
497
|
457 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
458 }
459
460 mutex_enter(&zilog->zl_lock);
461 list_insert_tail(&zilog->zl_lwb_list, lwb);
462 mutex_exit(&zilog->zl_lock);
463
464 return (lwb);
465 }
466
467 /*
468 * Called when we create in-memory log transactions so that we know
469 * to cleanup the itxs at the end of spa_sync().
470 */
471 void
472 zilog_dirty(zilog_t *zilog, uint64_t txg)
473 {
474 dsl_pool_t *dp = zilog->zl_dmu_pool;
475 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
476
477 if (ds->ds_is_snapshot)
478 panic("dirtying snapshot!");
479
480 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
481 /* up the hold count until we can be written out */
482 dmu_buf_add_ref(ds->ds_dbuf, zilog);
483 }
484 }
485
486 boolean_t
487 zilog_is_dirty(zilog_t *zilog)
488 {
489 dsl_pool_t *dp = zilog->zl_dmu_pool;
490
491 for (int t = 0; t < TXG_SIZE; t++) {
492 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
493 return (B_TRUE);
494 }
495 return (B_FALSE);
496 }
497
|