Print this page
NEX-18589 checksum errors on SSD-based pool
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-9554 dsl_scan.c internals contain some confusingly similar function names for handling the dataset and block sorting queues
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-9562 Attaching a vdev while resilver/scrub is running causes panic.
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-6088 ZFS scrub/resilver take excessively long due to issuing lots of random IO
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
@@ -18,11 +18,11 @@
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#ifndef _SYS_DSL_SCAN_H
#define _SYS_DSL_SCAN_H
@@ -115,18 +115,61 @@
/* for freeing blocks */
boolean_t scn_is_bptree;
boolean_t scn_async_destroying;
boolean_t scn_async_stalled;
- uint64_t scn_async_block_min_time_ms;
-
- /* for debugging / information */
uint64_t scn_visited_this_txg;
dsl_scan_phys_t scn_phys;
+ dsl_scan_phys_t scn_phys_cached;
+
+ /*
+ * With multi-threaded sync, we need to make sure scn_queue access
+ * is kept nicely serialized.
+ */
+ kmutex_t scn_queue_lock;
+ avl_tree_t scn_queue;
+
+ /*
+ * These signal how much work is pending from the scanner to the
+ * reader. Whenever the queue of zios grows, scn_bytes_pending grows
+ * the corresponding amount. Once a read for a block has been issued
+ * (whether in-order or out-of-order later on), scn_bytes_issued is
+ * incremented by the amount of data consumed from scn_bytes_pending.
+ * After a scan has completed, scn_bytes_pending will be 0 and
+ * scn_bytes_issued will have the total amount of data read.
+ *
+ * Lock ordering:
+ * scn_status_lock may only be held on its own or AFTER grabbing
+ * a vdev_scan_queue_lock, never BEFORE vdev_scan_queue_lock.
+ */
+ kmutex_t scn_status_lock;
+ uint64_t scn_bytes_pending;
+ uint64_t scn_bytes_issued;
+
+ boolean_t scn_clearing;
+ boolean_t scn_checkpointing;
+ uint64_t scn_last_checkpoint;
+ taskq_t *scn_taskq;
+
+ uint64_t scn_last_queue_run_time;
+ uint64_t scn_last_dequeue_limit;
+
+ /* protects scn_is_sorted and scn_done_ds */
+ kmutex_t scn_sorted_lock;
+ /*
+ * Flag denoting if we're running an out-of-order sorting scan or an
+ * old non-sorting inline scan. This changes our checking behavior.
+ */
+ boolean_t scn_is_sorted;
} dsl_scan_t;
+typedef struct dsl_scan_io_queue dsl_scan_io_queue_t;
+
+void dsl_scan_global_init(void);
+void dsl_scan_global_fini(void);
+
int dsl_scan_init(struct dsl_pool *dp, uint64_t txg);
void dsl_scan_fini(struct dsl_pool *dp);
void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *);
int dsl_scan_cancel(struct dsl_pool *);
int dsl_scan(struct dsl_pool *, pool_scan_func_t);
@@ -140,10 +183,13 @@
void dsl_scan_ds_destroyed(struct dsl_dataset *ds, struct dmu_tx *tx);
void dsl_scan_ds_snapshotted(struct dsl_dataset *ds, struct dmu_tx *tx);
void dsl_scan_ds_clone_swapped(struct dsl_dataset *ds1, struct dsl_dataset *ds2,
struct dmu_tx *tx);
boolean_t dsl_scan_active(dsl_scan_t *scn);
+void dsl_scan_freed(spa_t *spa, const blkptr_t *bp);
+void dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue);
+void dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd);
boolean_t dsl_scan_is_paused_scrub(const dsl_scan_t *scn);
#ifdef __cplusplus
}
#endif