1 /*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16 /*
17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
18 */
19
20 #include <sys/zfs_context.h>
21 #include <sys/spa.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
25 #include <sys/zio.h>
26 #include <sys/zio_checksum.h>
27 #include <sys/metaslab.h>
28 #include <sys/refcount.h>
29 #include <sys/dmu.h>
30 #include <sys/vdev_indirect_mapping.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/zap.h>
34 #include <sys/abd.h>
35 #include <sys/zthr.h>
36
37 /*
38 * An indirect vdev corresponds to a vdev that has been removed. Since
39 * we cannot rewrite block pointers of snapshots, etc., we keep a
40 * mapping from old location on the removed device to the new location
41 * on another device in the pool and use this mapping whenever we need
42 * to access the DVA. Unfortunately, this mapping did not respect
43 * logical block boundaries when it was first created, and so a DVA on
44 * this indirect vdev may be "split" into multiple sections that each
45 * map to a different location. As a consequence, not all DVAs can be
46 * translated to an equivalent new DVA. Instead we must provide a
47 * "vdev_remap" operation that executes a callback on each contiguous
48 * segment of the new location. This function is used in multiple ways:
49 *
50 * - i/os to this vdev use the callback to determine where the
51 * data is now located, and issue child i/os for each segment's new
52 * location.
53 *
54 * - frees and claims to this vdev use the callback to free or claim
55 * each mapped segment. (Note that we don't actually need to claim
56 * log blocks on indirect vdevs, because we don't allocate to
57 * removing vdevs. However, zdb uses zio_claim() for its leak
58 * detection.)
59 */
60
61 /*
62 * "Big theory statement" for how we mark blocks obsolete.
63 *
64 * When a block on an indirect vdev is freed or remapped, a section of
65 * that vdev's mapping may no longer be referenced (aka "obsolete"). We
66 * keep track of how much of each mapping entry is obsolete. When
67 * an entry becomes completely obsolete, we can remove it, thus reducing
68 * the memory used by the mapping. The complete picture of obsolescence
69 * is given by the following data structures, described below:
70 * - the entry-specific obsolete count
71 * - the vdev-specific obsolete spacemap
72 * - the pool-specific obsolete bpobj
73 *
74 * == On disk data structures used ==
75 *
76 * We track the obsolete space for the pool using several objects. Each
77 * of these objects is created on demand and freed when no longer
78 * needed, and is assumed to be empty if it does not exist.
79 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
80 *
81 * - Each vic_mapping_object (associated with an indirect vdev) can
82 * have a vimp_counts_object. This is an array of uint32_t's
83 * with the same number of entries as the vic_mapping_object. When
84 * the mapping is condensed, entries from the vic_obsolete_sm_object
85 * (see below) are folded into the counts. Therefore, each
86 * obsolete_counts entry tells us the number of bytes in the
87 * corresponding mapping entry that were not referenced when the
88 * mapping was last condensed.
89 *
90 * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
91 * This is a space map containing an alloc entry for every DVA that
92 * has been obsoleted since the last time this indirect vdev was
93 * condensed. We use this object in order to improve performance
94 * when marking a DVA as obsolete. Instead of modifying an arbitrary
95 * offset of the vimp_counts_object, we only need to append an entry
96 * to the end of this object. When a DVA becomes obsolete, it is
97 * added to the obsolete space map. This happens when the DVA is
98 * freed, remapped and not referenced by a snapshot, or the last
99 * snapshot referencing it is destroyed.
100 *
101 * - Each dataset can have a ds_remap_deadlist object. This is a
102 * deadlist object containing all blocks that were remapped in this
103 * dataset but referenced in a previous snapshot. Blocks can *only*
104 * appear on this list if they were remapped (dsl_dataset_block_remapped);
105 * blocks that were killed in a head dataset are put on the normal
106 * ds_deadlist and marked obsolete when they are freed.
107 *
108 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
109 * in the pool that need to be marked obsolete. When a snapshot is
110 * destroyed, we move some of the ds_remap_deadlist to the obsolete
111 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
112 * asynchronously process the obsolete bpobj, moving its entries to
113 * the specific vdevs' obsolete space maps.
114 *
115 * == Summary of how we mark blocks as obsolete ==
116 *
117 * - When freeing a block: if any DVA is on an indirect vdev, append to
118 * vic_obsolete_sm_object.
119 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
120 * references; otherwise append to vic_obsolete_sm_object).
121 * - When freeing a snapshot: move parts of ds_remap_deadlist to
122 * dp_obsolete_bpobj (same algorithm as ds_deadlist).
123 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
124 * individual vdev's vic_obsolete_sm_object.
125 */
126
127 /*
128 * "Big theory statement" for how we condense indirect vdevs.
129 *
130 * Condensing an indirect vdev's mapping is the process of determining
131 * the precise counts of obsolete space for each mapping entry (by
132 * integrating the obsolete spacemap into the obsolete counts) and
133 * writing out a new mapping that contains only referenced entries.
134 *
135 * We condense a vdev when we expect the mapping to shrink (see
136 * vdev_indirect_should_condense()), but only perform one condense at a
137 * time to limit the memory usage. In addition, we use a separate
138 * open-context thread (spa_condense_indirect_thread) to incrementally
139 * create the new mapping object in a way that minimizes the impact on
140 * the rest of the system.
141 *
142 * == Generating a new mapping ==
143 *
144 * To generate a new mapping, we follow these steps:
145 *
146 * 1. Save the old obsolete space map and create a new mapping object
147 * (see spa_condense_indirect_start_sync()). This initializes the
148 * spa_condensing_indirect_phys with the "previous obsolete space map",
149 * which is now read only. Newly obsolete DVAs will be added to a
150 * new (initially empty) obsolete space map, and will not be
151 * considered as part of this condense operation.
152 *
153 * 2. Construct in memory the precise counts of obsolete space for each
154 * mapping entry, by incorporating the obsolete space map into the
155 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
156 *
157 * 3. Iterate through each mapping entry, writing to the new mapping any
158 * entries that are not completely obsolete (i.e. which don't have
159 * obsolete count == mapping length). (See
160 * spa_condense_indirect_generate_new_mapping().)
161 *
162 * 4. Destroy the old mapping object and switch over to the new one
163 * (spa_condense_indirect_complete_sync).
164 *
165 * == Restarting from failure ==
166 *
167 * To restart the condense when we import/open the pool, we must start
168 * at the 2nd step above: reconstruct the precise counts in memory,
169 * based on the space map + counts. Then in the 3rd step, we start
170 * iterating where we left off: at vimp_max_offset of the new mapping
171 * object.
172 */
173
174 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE;
175
176 /*
177 * Condense if at least this percent of the bytes in the mapping is
178 * obsolete. With the default of 25%, the amount of space mapped
179 * will be reduced to 1% of its original size after at most 16
180 * condenses. Higher values will condense less often (causing less
181 * i/o); lower values will reduce the mapping size more quickly.
182 */
183 int zfs_indirect_condense_obsolete_pct = 25;
184
185 /*
186 * Condense if the obsolete space map takes up more than this amount of
187 * space on disk (logically). This limits the amount of disk space
188 * consumed by the obsolete space map; the default of 1GB is small enough
189 * that we typically don't mind "wasting" it.
190 */
191 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
192
193 /*
194 * Don't bother condensing if the mapping uses less than this amount of
195 * memory. The default of 128KB is considered a "trivial" amount of
196 * memory and not worth reducing.
197 */
198 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
199
200 /*
201 * This is used by the test suite so that it can ensure that certain
202 * actions happen while in the middle of a condense (which might otherwise
203 * complete too quickly). If used to reduce the performance impact of
204 * condensing in production, a maximum value of 1 should be sufficient.
205 */
206 int zfs_condense_indirect_commit_entry_delay_ticks = 0;
207
208 /*
209 * If an indirect split block contains more than this many possible unique
210 * combinations when being reconstructed, consider it too computationally
211 * expensive to check them all. Instead, try at most 100 randomly-selected
212 * combinations each time the block is accessed. This allows all segment
213 * copies to participate fairly in the reconstruction when all combinations
214 * cannot be checked and prevents repeated use of one bad copy.
215 */
216 int zfs_reconstruct_indirect_combinations_max = 256;
217
218
219 /*
220 * Enable to simulate damaged segments and validate reconstruction.
221 * Used by ztest
222 */
223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
224
225 /*
226 * The indirect_child_t represents the vdev that we will read from, when we
227 * need to read all copies of the data (e.g. for scrub or reconstruction).
228 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
229 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
230 * ic_vdev is a child of the mirror.
231 */
232 typedef struct indirect_child {
233 abd_t *ic_data;
234 vdev_t *ic_vdev;
235
236 /*
237 * ic_duplicate is NULL when the ic_data contents are unique, when it
238 * is determined to be a duplicate it references the primary child.
239 */
240 struct indirect_child *ic_duplicate;
241 list_node_t ic_node; /* node on is_unique_child */
242 } indirect_child_t;
243
244 /*
245 * The indirect_split_t represents one mapped segment of an i/o to the
246 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
247 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
248 * For split blocks, there will be several of these.
249 */
250 typedef struct indirect_split {
251 list_node_t is_node; /* link on iv_splits */
252
253 /*
254 * is_split_offset is the offset into the i/o.
255 * This is the sum of the previous splits' is_size's.
256 */
257 uint64_t is_split_offset;
258
259 vdev_t *is_vdev; /* top-level vdev */
260 uint64_t is_target_offset; /* offset on is_vdev */
261 uint64_t is_size;
262 int is_children; /* number of entries in is_child[] */
263 int is_unique_children; /* number of entries in is_unique_child */
264 list_t is_unique_child;
265
266 /*
267 * is_good_child is the child that we are currently using to
268 * attempt reconstruction.
269 */
270 indirect_child_t *is_good_child;
271
272 indirect_child_t is_child[1]; /* variable-length */
273 } indirect_split_t;
274
275 /*
276 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
277 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
278 */
279 typedef struct indirect_vsd {
280 boolean_t iv_split_block;
281 boolean_t iv_reconstruct;
282 uint64_t iv_unique_combinations;
283 uint64_t iv_attempts;
284 uint64_t iv_attempts_max;
285
286 list_t iv_splits; /* list of indirect_split_t's */
287 } indirect_vsd_t;
288
289 static void
290 vdev_indirect_map_free(zio_t *zio)
291 {
292 indirect_vsd_t *iv = zio->io_vsd;
293
294 indirect_split_t *is;
295 while ((is = list_head(&iv->iv_splits)) != NULL) {
296 for (int c = 0; c < is->is_children; c++) {
297 indirect_child_t *ic = &is->is_child[c];
298 if (ic->ic_data != NULL)
299 abd_free(ic->ic_data);
300 }
301 list_remove(&iv->iv_splits, is);
302
303 indirect_child_t *ic;
304 while ((ic = list_head(&is->is_unique_child)) != NULL)
305 list_remove(&is->is_unique_child, ic);
306
307 list_destroy(&is->is_unique_child);
308
309 kmem_free(is,
310 offsetof(indirect_split_t, is_child[is->is_children]));
311 }
312 kmem_free(iv, sizeof (*iv));
313 }
314
315 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
316 vdev_indirect_map_free,
317 zio_vsd_default_cksum_report
318 };
319 /*
320 * Mark the given offset and size as being obsolete.
321 */
322 void
323 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
324 {
325 spa_t *spa = vd->vdev_spa;
326
327 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
328 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
329 ASSERT(size > 0);
330 VERIFY(vdev_indirect_mapping_entry_for_offset(
331 vd->vdev_indirect_mapping, offset) != NULL);
332
333 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
334 mutex_enter(&vd->vdev_obsolete_lock);
335 range_tree_add(vd->vdev_obsolete_segments, offset, size);
336 mutex_exit(&vd->vdev_obsolete_lock);
337 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
338 }
339 }
340
341 /*
342 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
343 * wrapper is provided because the DMU does not know about vdev_t's and
344 * cannot directly call vdev_indirect_mark_obsolete.
345 */
346 void
347 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
348 uint64_t size, dmu_tx_t *tx)
349 {
350 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
351 ASSERT(dmu_tx_is_syncing(tx));
352
353 /* The DMU can only remap indirect vdevs. */
354 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
355 vdev_indirect_mark_obsolete(vd, offset, size);
356 }
357
358 static spa_condensing_indirect_t *
359 spa_condensing_indirect_create(spa_t *spa)
360 {
361 spa_condensing_indirect_phys_t *scip =
362 &spa->spa_condensing_indirect_phys;
363 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
364 objset_t *mos = spa->spa_meta_objset;
365
366 for (int i = 0; i < TXG_SIZE; i++) {
367 list_create(&sci->sci_new_mapping_entries[i],
368 sizeof (vdev_indirect_mapping_entry_t),
369 offsetof(vdev_indirect_mapping_entry_t, vime_node));
370 }
371
372 sci->sci_new_mapping =
373 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
374
375 return (sci);
376 }
377
378 static void
379 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
380 {
381 for (int i = 0; i < TXG_SIZE; i++)
382 list_destroy(&sci->sci_new_mapping_entries[i]);
383
384 if (sci->sci_new_mapping != NULL)
385 vdev_indirect_mapping_close(sci->sci_new_mapping);
386
387 kmem_free(sci, sizeof (*sci));
388 }
389
390 boolean_t
391 vdev_indirect_should_condense(vdev_t *vd)
392 {
393 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
394 spa_t *spa = vd->vdev_spa;
395
396 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
397
398 if (!zfs_condense_indirect_vdevs_enable)
399 return (B_FALSE);
400
401 /*
402 * We can only condense one indirect vdev at a time.
403 */
404 if (spa->spa_condensing_indirect != NULL)
405 return (B_FALSE);
406
407 if (spa_shutting_down(spa))
408 return (B_FALSE);
409
410 /*
411 * The mapping object size must not change while we are
412 * condensing, so we can only condense indirect vdevs
413 * (not vdevs that are still in the middle of being removed).
414 */
415 if (vd->vdev_ops != &vdev_indirect_ops)
416 return (B_FALSE);
417
418 /*
419 * If nothing new has been marked obsolete, there is no
420 * point in condensing.
421 */
422 if (vd->vdev_obsolete_sm == NULL) {
423 ASSERT0(vdev_obsolete_sm_object(vd));
424 return (B_FALSE);
425 }
426
427 ASSERT(vd->vdev_obsolete_sm != NULL);
428
429 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
430 space_map_object(vd->vdev_obsolete_sm));
431
432 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
433 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
434 uint64_t mapping_size = vdev_indirect_mapping_size(vim);
435 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
436
437 ASSERT3U(bytes_obsolete, <=, bytes_mapped);
438
439 /*
440 * If a high percentage of the bytes that are mapped have become
441 * obsolete, condense (unless the mapping is already small enough).
442 * This has a good chance of reducing the amount of memory used
443 * by the mapping.
444 */
445 if (bytes_obsolete * 100 / bytes_mapped >=
446 zfs_indirect_condense_obsolete_pct &&
447 mapping_size > zfs_condense_min_mapping_bytes) {
448 zfs_dbgmsg("should condense vdev %llu because obsolete "
449 "spacemap covers %d%% of %lluMB mapping",
450 (u_longlong_t)vd->vdev_id,
451 (int)(bytes_obsolete * 100 / bytes_mapped),
452 (u_longlong_t)bytes_mapped / 1024 / 1024);
453 return (B_TRUE);
454 }
455
456 /*
457 * If the obsolete space map takes up too much space on disk,
458 * condense in order to free up this disk space.
459 */
460 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
461 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
462 "length %lluMB >= max size %lluMB",
463 (u_longlong_t)vd->vdev_id,
464 (u_longlong_t)obsolete_sm_size / 1024 / 1024,
465 (u_longlong_t)zfs_condense_max_obsolete_bytes /
466 1024 / 1024);
467 return (B_TRUE);
468 }
469
470 return (B_FALSE);
471 }
472
473 /*
474 * This sync task completes (finishes) a condense, deleting the old
475 * mapping and replacing it with the new one.
476 */
477 static void
478 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
479 {
480 spa_condensing_indirect_t *sci = arg;
481 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
482 spa_condensing_indirect_phys_t *scip =
483 &spa->spa_condensing_indirect_phys;
484 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
485 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
486 objset_t *mos = spa->spa_meta_objset;
487 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
488 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
489 uint64_t new_count =
490 vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
491
492 ASSERT(dmu_tx_is_syncing(tx));
493 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
494 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
495 for (int i = 0; i < TXG_SIZE; i++) {
496 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
497 }
498 ASSERT(vic->vic_mapping_object != 0);
499 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
500 ASSERT(scip->scip_next_mapping_object != 0);
501 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
502
503 /*
504 * Reset vdev_indirect_mapping to refer to the new object.
505 */
506 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
507 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
508 vd->vdev_indirect_mapping = sci->sci_new_mapping;
509 rw_exit(&vd->vdev_indirect_rwlock);
510
511 sci->sci_new_mapping = NULL;
512 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
513 vic->vic_mapping_object = scip->scip_next_mapping_object;
514 scip->scip_next_mapping_object = 0;
515
516 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
517 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
518 scip->scip_prev_obsolete_sm_object = 0;
519
520 scip->scip_vdev = 0;
521
522 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
523 DMU_POOL_CONDENSING_INDIRECT, tx));
524 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
525 spa->spa_condensing_indirect = NULL;
526
527 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
528 "new mapping object %llu has %llu entries "
529 "(was %llu entries)",
530 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
531 new_count, old_count);
532
533 vdev_config_dirty(spa->spa_root_vdev);
534 }
535
536 /*
537 * This sync task appends entries to the new mapping object.
538 */
539 static void
540 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
541 {
542 spa_condensing_indirect_t *sci = arg;
543 uint64_t txg = dmu_tx_get_txg(tx);
544 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
545
546 ASSERT(dmu_tx_is_syncing(tx));
547 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
548
549 vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
550 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
551 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
552 }
553
554 /*
555 * Open-context function to add one entry to the new mapping. The new
556 * entry will be remembered and written from syncing context.
557 */
558 static void
559 spa_condense_indirect_commit_entry(spa_t *spa,
560 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
561 {
562 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
563
564 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
565
566 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
567 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
568 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
569 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
570
571 /*
572 * If we are the first entry committed this txg, kick off the sync
573 * task to write to the MOS on our behalf.
574 */
575 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
576 dsl_sync_task_nowait(dmu_tx_pool(tx),
577 spa_condense_indirect_commit_sync, sci,
578 0, ZFS_SPACE_CHECK_NONE, tx);
579 }
580
581 vdev_indirect_mapping_entry_t *vime =
582 kmem_alloc(sizeof (*vime), KM_SLEEP);
583 vime->vime_mapping = *vimep;
584 vime->vime_obsolete_count = count;
585 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
586
587 dmu_tx_commit(tx);
588 }
589
590 static void
591 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
592 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
593 {
594 spa_t *spa = vd->vdev_spa;
595 uint64_t mapi = start_index;
596 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
597 uint64_t old_num_entries =
598 vdev_indirect_mapping_num_entries(old_mapping);
599
600 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
601 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
602
603 zfs_dbgmsg("starting condense of vdev %llu from index %llu",
604 (u_longlong_t)vd->vdev_id,
605 (u_longlong_t)mapi);
606
607 while (mapi < old_num_entries) {
608
609 if (zthr_iscancelled(zthr)) {
610 zfs_dbgmsg("pausing condense of vdev %llu "
611 "at index %llu", (u_longlong_t)vd->vdev_id,
612 (u_longlong_t)mapi);
613 break;
614 }
615
616 vdev_indirect_mapping_entry_phys_t *entry =
617 &old_mapping->vim_entries[mapi];
618 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
619 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
620 if (obsolete_counts[mapi] < entry_size) {
621 spa_condense_indirect_commit_entry(spa, entry,
622 obsolete_counts[mapi]);
623
624 /*
625 * This delay may be requested for testing, debugging,
626 * or performance reasons.
627 */
628 delay(zfs_condense_indirect_commit_entry_delay_ticks);
629 }
630
631 mapi++;
632 }
633 }
634
635 /* ARGSUSED */
636 static boolean_t
637 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
638 {
639 spa_t *spa = arg;
640
641 return (spa->spa_condensing_indirect != NULL);
642 }
643
644 /* ARGSUSED */
645 static int
646 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
647 {
648 spa_t *spa = arg;
649 vdev_t *vd;
650
651 ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
652 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
653 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
654 ASSERT3P(vd, !=, NULL);
655 spa_config_exit(spa, SCL_VDEV, FTAG);
656
657 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
658 spa_condensing_indirect_phys_t *scip =
659 &spa->spa_condensing_indirect_phys;
660 uint32_t *counts;
661 uint64_t start_index;
662 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
663 space_map_t *prev_obsolete_sm = NULL;
664
665 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
666 ASSERT(scip->scip_next_mapping_object != 0);
667 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
668 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
669
670 for (int i = 0; i < TXG_SIZE; i++) {
671 /*
672 * The list must start out empty in order for the
673 * _commit_sync() sync task to be properly registered
674 * on the first call to _commit_entry(); so it's wise
675 * to double check and ensure we actually are starting
676 * with empty lists.
677 */
678 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
679 }
680
681 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
682 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
683 space_map_update(prev_obsolete_sm);
684 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
685 if (prev_obsolete_sm != NULL) {
686 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
687 counts, prev_obsolete_sm);
688 }
689 space_map_close(prev_obsolete_sm);
690
691 /*
692 * Generate new mapping. Determine what index to continue from
693 * based on the max offset that we've already written in the
694 * new mapping.
695 */
696 uint64_t max_offset =
697 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
698 if (max_offset == 0) {
699 /* We haven't written anything to the new mapping yet. */
700 start_index = 0;
701 } else {
702 /*
703 * Pick up from where we left off. _entry_for_offset()
704 * returns a pointer into the vim_entries array. If
705 * max_offset is greater than any of the mappings
706 * contained in the table NULL will be returned and
707 * that indicates we've exhausted our iteration of the
708 * old_mapping.
709 */
710
711 vdev_indirect_mapping_entry_phys_t *entry =
712 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
713 max_offset);
714
715 if (entry == NULL) {
716 /*
717 * We've already written the whole new mapping.
718 * This special value will cause us to skip the
719 * generate_new_mapping step and just do the sync
720 * task to complete the condense.
721 */
722 start_index = UINT64_MAX;
723 } else {
724 start_index = entry - old_mapping->vim_entries;
725 ASSERT3U(start_index, <,
726 vdev_indirect_mapping_num_entries(old_mapping));
727 }
728 }
729
730 spa_condense_indirect_generate_new_mapping(vd, counts,
731 start_index, zthr);
732
733 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
734
735 /*
736 * If the zthr has received a cancellation signal while running
737 * in generate_new_mapping() or at any point after that, then bail
738 * early. We don't want to complete the condense if the spa is
739 * shutting down.
740 */
741 if (zthr_iscancelled(zthr))
742 return (0);
743
744 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
745 spa_condense_indirect_complete_sync, sci, 0,
746 ZFS_SPACE_CHECK_EXTRA_RESERVED));
747
748 return (0);
749 }
750
751 /*
752 * Sync task to begin the condensing process.
753 */
754 void
755 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
756 {
757 spa_t *spa = vd->vdev_spa;
758 spa_condensing_indirect_phys_t *scip =
759 &spa->spa_condensing_indirect_phys;
760
761 ASSERT0(scip->scip_next_mapping_object);
762 ASSERT0(scip->scip_prev_obsolete_sm_object);
763 ASSERT0(scip->scip_vdev);
764 ASSERT(dmu_tx_is_syncing(tx));
765 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
766 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
767 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
768
769 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
770 ASSERT(obsolete_sm_obj != 0);
771
772 scip->scip_vdev = vd->vdev_id;
773 scip->scip_next_mapping_object =
774 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
775
776 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
777
778 /*
779 * We don't need to allocate a new space map object, since
780 * vdev_indirect_sync_obsolete will allocate one when needed.
781 */
782 space_map_close(vd->vdev_obsolete_sm);
783 vd->vdev_obsolete_sm = NULL;
784 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
785 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
786
787 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
788 DMU_POOL_DIRECTORY_OBJECT,
789 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
790 sizeof (*scip) / sizeof (uint64_t), scip, tx));
791
792 ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
793 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
794
795 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
796 "posm=%llu nm=%llu",
797 vd->vdev_id, dmu_tx_get_txg(tx),
798 (u_longlong_t)scip->scip_prev_obsolete_sm_object,
799 (u_longlong_t)scip->scip_next_mapping_object);
800
801 zthr_wakeup(spa->spa_condense_zthr);
802 }
803
804 /*
805 * Sync to the given vdev's obsolete space map any segments that are no longer
806 * referenced as of the given txg.
807 *
808 * If the obsolete space map doesn't exist yet, create and open it.
809 */
810 void
811 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
812 {
813 spa_t *spa = vd->vdev_spa;
814 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
815
816 ASSERT3U(vic->vic_mapping_object, !=, 0);
817 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
818 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
819 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
820
821 if (vdev_obsolete_sm_object(vd) == 0) {
822 uint64_t obsolete_sm_object =
823 space_map_alloc(spa->spa_meta_objset,
824 vdev_standard_sm_blksz, tx);
825
826 ASSERT(vd->vdev_top_zap != 0);
827 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
828 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
829 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
830 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
831
832 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
833 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
834 spa->spa_meta_objset, obsolete_sm_object,
835 0, vd->vdev_asize, 0));
836 space_map_update(vd->vdev_obsolete_sm);
837 }
838
839 ASSERT(vd->vdev_obsolete_sm != NULL);
840 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
841 space_map_object(vd->vdev_obsolete_sm));
842
843 space_map_write(vd->vdev_obsolete_sm,
844 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
845 space_map_update(vd->vdev_obsolete_sm);
846 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
847 }
848
849 int
850 spa_condense_init(spa_t *spa)
851 {
852 int error = zap_lookup(spa->spa_meta_objset,
853 DMU_POOL_DIRECTORY_OBJECT,
854 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
855 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
856 &spa->spa_condensing_indirect_phys);
857 if (error == 0) {
858 if (spa_writeable(spa)) {
859 spa->spa_condensing_indirect =
860 spa_condensing_indirect_create(spa);
861 }
862 return (0);
863 } else if (error == ENOENT) {
864 return (0);
865 } else {
866 return (error);
867 }
868 }
869
870 void
871 spa_condense_fini(spa_t *spa)
872 {
873 if (spa->spa_condensing_indirect != NULL) {
874 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
875 spa->spa_condensing_indirect = NULL;
876 }
877 }
878
879 void
880 spa_start_indirect_condensing_thread(spa_t *spa)
881 {
882 ASSERT3P(spa->spa_condense_zthr, ==, NULL);
883 spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check,
884 spa_condense_indirect_thread, spa);
885 }
886
887 /*
888 * Gets the obsolete spacemap object from the vdev's ZAP.
889 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
890 * exist yet.
891 */
892 int
893 vdev_obsolete_sm_object(vdev_t *vd)
894 {
895 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
896 if (vd->vdev_top_zap == 0) {
897 return (0);
898 }
899
900 uint64_t sm_obj = 0;
901 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
902 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
903
904 ASSERT(err == 0 || err == ENOENT);
905
906 return (sm_obj);
907 }
908
909 boolean_t
910 vdev_obsolete_counts_are_precise(vdev_t *vd)
911 {
912 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
913 if (vd->vdev_top_zap == 0) {
914 return (B_FALSE);
915 }
916
917 uint64_t val = 0;
918 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
919 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
920
921 ASSERT(err == 0 || err == ENOENT);
922
923 return (val != 0);
924 }
925
926 /* ARGSUSED */
927 static void
928 vdev_indirect_close(vdev_t *vd)
929 {
930 }
931
932 /* ARGSUSED */
933 static int
934 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
935 uint64_t *ashift)
936 {
937 *psize = *max_psize = vd->vdev_asize +
938 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
939 *ashift = vd->vdev_ashift;
940 return (0);
941 }
942
943 typedef struct remap_segment {
944 vdev_t *rs_vd;
945 uint64_t rs_offset;
946 uint64_t rs_asize;
947 uint64_t rs_split_offset;
948 list_node_t rs_node;
949 } remap_segment_t;
950
951 remap_segment_t *
952 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
953 {
954 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
955 rs->rs_vd = vd;
956 rs->rs_offset = offset;
957 rs->rs_asize = asize;
958 rs->rs_split_offset = split_offset;
959 return (rs);
960 }
961
962 /*
963 * Given an indirect vdev and an extent on that vdev, it duplicates the
964 * physical entries of the indirect mapping that correspond to the extent
965 * to a new array and returns a pointer to it. In addition, copied_entries
966 * is populated with the number of mapping entries that were duplicated.
967 *
968 * Note that the function assumes that the caller holds vdev_indirect_rwlock.
969 * This ensures that the mapping won't change due to condensing as we
970 * copy over its contents.
971 *
972 * Finally, since we are doing an allocation, it is up to the caller to
973 * free the array allocated in this function.
974 */
975 vdev_indirect_mapping_entry_phys_t *
976 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
977 uint64_t asize, uint64_t *copied_entries)
978 {
979 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
980 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
981 uint64_t entries = 0;
982
983 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
984
985 vdev_indirect_mapping_entry_phys_t *first_mapping =
986 vdev_indirect_mapping_entry_for_offset(vim, offset);
987 ASSERT3P(first_mapping, !=, NULL);
988
989 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
990 while (asize > 0) {
991 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
992
993 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
994 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
995
996 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
997 uint64_t inner_size = MIN(asize, size - inner_offset);
998
999 offset += inner_size;
1000 asize -= inner_size;
1001 entries++;
1002 m++;
1003 }
1004
1005 size_t copy_length = entries * sizeof (*first_mapping);
1006 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1007 bcopy(first_mapping, duplicate_mappings, copy_length);
1008 *copied_entries = entries;
1009
1010 return (duplicate_mappings);
1011 }
1012
1013 /*
1014 * Goes through the relevant indirect mappings until it hits a concrete vdev
1015 * and issues the callback. On the way to the concrete vdev, if any other
1016 * indirect vdevs are encountered, then the callback will also be called on
1017 * each of those indirect vdevs. For example, if the segment is mapped to
1018 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1019 * mapped to segment B on concrete vdev 2, then the callback will be called on
1020 * both vdev 1 and vdev 2.
1021 *
1022 * While the callback passed to vdev_indirect_remap() is called on every vdev
1023 * the function encounters, certain callbacks only care about concrete vdevs.
1024 * These types of callbacks should return immediately and explicitly when they
1025 * are called on an indirect vdev.
1026 *
1027 * Because there is a possibility that a DVA section in the indirect device
1028 * has been split into multiple sections in our mapping, we keep track
1029 * of the relevant contiguous segments of the new location (remap_segment_t)
1030 * in a stack. This way we can call the callback for each of the new sections
1031 * created by a single section of the indirect device. Note though, that in
1032 * this scenario the callbacks in each split block won't occur in-order in
1033 * terms of offset, so callers should not make any assumptions about that.
1034 *
1035 * For callbacks that don't handle split blocks and immediately return when
1036 * they encounter them (as is the case for remap_blkptr_cb), the caller can
1037 * assume that its callback will be applied from the first indirect vdev
1038 * encountered to the last one and then the concrete vdev, in that order.
1039 */
1040 static void
1041 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1042 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1043 {
1044 list_t stack;
1045 spa_t *spa = vd->vdev_spa;
1046
1047 list_create(&stack, sizeof (remap_segment_t),
1048 offsetof(remap_segment_t, rs_node));
1049
1050 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1051 rs != NULL; rs = list_remove_head(&stack)) {
1052 vdev_t *v = rs->rs_vd;
1053 uint64_t num_entries = 0;
1054
1055 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1056 ASSERT(rs->rs_asize > 0);
1057
1058 /*
1059 * Note: As this function can be called from open context
1060 * (e.g. zio_read()), we need the following rwlock to
1061 * prevent the mapping from being changed by condensing.
1062 *
1063 * So we grab the lock and we make a copy of the entries
1064 * that are relevant to the extent that we are working on.
1065 * Once that is done, we drop the lock and iterate over
1066 * our copy of the mapping. Once we are done with the with
1067 * the remap segment and we free it, we also free our copy
1068 * of the indirect mapping entries that are relevant to it.
1069 *
1070 * This way we don't need to wait until the function is
1071 * finished with a segment, to condense it. In addition, we
1072 * don't need a recursive rwlock for the case that a call to
1073 * vdev_indirect_remap() needs to call itself (through the
1074 * codepath of its callback) for the same vdev in the middle
1075 * of its execution.
1076 */
1077 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1078 vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping;
1079 ASSERT3P(vim, !=, NULL);
1080
1081 vdev_indirect_mapping_entry_phys_t *mapping =
1082 vdev_indirect_mapping_duplicate_adjacent_entries(v,
1083 rs->rs_offset, rs->rs_asize, &num_entries);
1084 ASSERT3P(mapping, !=, NULL);
1085 ASSERT3U(num_entries, >, 0);
1086 rw_exit(&v->vdev_indirect_rwlock);
1087
1088 for (uint64_t i = 0; i < num_entries; i++) {
1089 /*
1090 * Note: the vdev_indirect_mapping can not change
1091 * while we are running. It only changes while the
1092 * removal is in progress, and then only from syncing
1093 * context. While a removal is in progress, this
1094 * function is only called for frees, which also only
1095 * happen from syncing context.
1096 */
1097 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1098
1099 ASSERT3P(m, !=, NULL);
1100 ASSERT3U(rs->rs_asize, >, 0);
1101
1102 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1103 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1104 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1105
1106 ASSERT3U(rs->rs_offset, >=,
1107 DVA_MAPPING_GET_SRC_OFFSET(m));
1108 ASSERT3U(rs->rs_offset, <,
1109 DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1110 ASSERT3U(dst_vdev, !=, v->vdev_id);
1111
1112 uint64_t inner_offset = rs->rs_offset -
1113 DVA_MAPPING_GET_SRC_OFFSET(m);
1114 uint64_t inner_size =
1115 MIN(rs->rs_asize, size - inner_offset);
1116
1117 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1118 ASSERT3P(dst_v, !=, NULL);
1119
1120 if (dst_v->vdev_ops == &vdev_indirect_ops) {
1121 list_insert_head(&stack,
1122 rs_alloc(dst_v, dst_offset + inner_offset,
1123 inner_size, rs->rs_split_offset));
1124
1125 }
1126
1127 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1128 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1129 /*
1130 * Note: This clause exists only solely for
1131 * testing purposes. We use it to ensure that
1132 * split blocks work and that the callbacks
1133 * using them yield the same result if issued
1134 * in reverse order.
1135 */
1136 uint64_t inner_half = inner_size / 2;
1137
1138 func(rs->rs_split_offset + inner_half, dst_v,
1139 dst_offset + inner_offset + inner_half,
1140 inner_half, arg);
1141
1142 func(rs->rs_split_offset, dst_v,
1143 dst_offset + inner_offset,
1144 inner_half, arg);
1145 } else {
1146 func(rs->rs_split_offset, dst_v,
1147 dst_offset + inner_offset,
1148 inner_size, arg);
1149 }
1150
1151 rs->rs_offset += inner_size;
1152 rs->rs_asize -= inner_size;
1153 rs->rs_split_offset += inner_size;
1154 }
1155 VERIFY0(rs->rs_asize);
1156
1157 kmem_free(mapping, num_entries * sizeof (*mapping));
1158 kmem_free(rs, sizeof (remap_segment_t));
1159 }
1160 list_destroy(&stack);
1161 }
1162
1163 static void
1164 vdev_indirect_child_io_done(zio_t *zio)
1165 {
1166 zio_t *pio = zio->io_private;
1167
1168 mutex_enter(&pio->io_lock);
1169 pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1170 mutex_exit(&pio->io_lock);
1171
1172 abd_put(zio->io_abd);
1173 }
1174
1175 /*
1176 * This is a callback for vdev_indirect_remap() which allocates an
1177 * indirect_split_t for each split segment and adds it to iv_splits.
1178 */
1179 static void
1180 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1181 uint64_t size, void *arg)
1182 {
1183 zio_t *zio = arg;
1184 indirect_vsd_t *iv = zio->io_vsd;
1185
1186 ASSERT3P(vd, !=, NULL);
1187
1188 if (vd->vdev_ops == &vdev_indirect_ops)
1189 return;
1190
1191 int n = 1;
1192 if (vd->vdev_ops == &vdev_mirror_ops)
1193 n = vd->vdev_children;
1194
1195 indirect_split_t *is =
1196 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1197
1198 is->is_children = n;
1199 is->is_size = size;
1200 is->is_split_offset = split_offset;
1201 is->is_target_offset = offset;
1202 is->is_vdev = vd;
1203 list_create(&is->is_unique_child, sizeof (indirect_child_t),
1204 offsetof(indirect_child_t, ic_node));
1205
1206 /*
1207 * Note that we only consider multiple copies of the data for
1208 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
1209 * though they use the same ops as mirror, because there's only one
1210 * "good" copy under the replacing/spare.
1211 */
1212 if (vd->vdev_ops == &vdev_mirror_ops) {
1213 for (int i = 0; i < n; i++) {
1214 is->is_child[i].ic_vdev = vd->vdev_child[i];
1215 list_link_init(&is->is_child[i].ic_node);
1216 }
1217 } else {
1218 is->is_child[0].ic_vdev = vd;
1219 }
1220
1221 list_insert_tail(&iv->iv_splits, is);
1222 }
1223
1224 static void
1225 vdev_indirect_read_split_done(zio_t *zio)
1226 {
1227 indirect_child_t *ic = zio->io_private;
1228
1229 if (zio->io_error != 0) {
1230 /*
1231 * Clear ic_data to indicate that we do not have data for this
1232 * child.
1233 */
1234 abd_free(ic->ic_data);
1235 ic->ic_data = NULL;
1236 }
1237 }
1238
1239 /*
1240 * Issue reads for all copies (mirror children) of all splits.
1241 */
1242 static void
1243 vdev_indirect_read_all(zio_t *zio)
1244 {
1245 indirect_vsd_t *iv = zio->io_vsd;
1246
1247 for (indirect_split_t *is = list_head(&iv->iv_splits);
1248 is != NULL; is = list_next(&iv->iv_splits, is)) {
1249 for (int i = 0; i < is->is_children; i++) {
1250 indirect_child_t *ic = &is->is_child[i];
1251
1252 if (!vdev_readable(ic->ic_vdev))
1253 continue;
1254
1255 /*
1256 * Note, we may read from a child whose DTL
1257 * indicates that the data may not be present here.
1258 * While this might result in a few i/os that will
1259 * likely return incorrect data, it simplifies the
1260 * code since we can treat scrub and resilver
1261 * identically. (The incorrect data will be
1262 * detected and ignored when we verify the
1263 * checksum.)
1264 */
1265
1266 ic->ic_data = abd_alloc_sametype(zio->io_abd,
1267 is->is_size);
1268 ic->ic_duplicate = NULL;
1269
1270 zio_nowait(zio_vdev_child_io(zio, NULL,
1271 ic->ic_vdev, is->is_target_offset, ic->ic_data,
1272 is->is_size, zio->io_type, zio->io_priority, 0,
1273 vdev_indirect_read_split_done, ic));
1274 }
1275 }
1276 iv->iv_reconstruct = B_TRUE;
1277 }
1278
1279 static void
1280 vdev_indirect_io_start(zio_t *zio)
1281 {
1282 spa_t *spa = zio->io_spa;
1283 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1284 list_create(&iv->iv_splits,
1285 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1286
1287 zio->io_vsd = iv;
1288 zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1289
1290 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1291 if (zio->io_type != ZIO_TYPE_READ) {
1292 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1293 /*
1294 * Note: this code can handle other kinds of writes,
1295 * but we don't expect them.
1296 */
1297 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1298 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1299 }
1300
1301 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1302 vdev_indirect_gather_splits, zio);
1303
1304 indirect_split_t *first = list_head(&iv->iv_splits);
1305 if (first->is_size == zio->io_size) {
1306 /*
1307 * This is not a split block; we are pointing to the entire
1308 * data, which will checksum the same as the original data.
1309 * Pass the BP down so that the child i/o can verify the
1310 * checksum, and try a different location if available
1311 * (e.g. on a mirror).
1312 *
1313 * While this special case could be handled the same as the
1314 * general (split block) case, doing it this way ensures
1315 * that the vast majority of blocks on indirect vdevs
1316 * (which are not split) are handled identically to blocks
1317 * on non-indirect vdevs. This allows us to be less strict
1318 * about performance in the general (but rare) case.
1319 */
1320 ASSERT0(first->is_split_offset);
1321 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1322 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1323 first->is_vdev, first->is_target_offset,
1324 abd_get_offset(zio->io_abd, 0),
1325 zio->io_size, zio->io_type, zio->io_priority, 0,
1326 vdev_indirect_child_io_done, zio));
1327 } else {
1328 iv->iv_split_block = B_TRUE;
1329 if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1330 /*
1331 * Read all copies. Note that for simplicity,
1332 * we don't bother consulting the DTL in the
1333 * resilver case.
1334 */
1335 vdev_indirect_read_all(zio);
1336 } else {
1337 /*
1338 * Read one copy of each split segment, from the
1339 * top-level vdev. Since we don't know the
1340 * checksum of each split individually, the child
1341 * zio can't ensure that we get the right data.
1342 * E.g. if it's a mirror, it will just read from a
1343 * random (healthy) leaf vdev. We have to verify
1344 * the checksum in vdev_indirect_io_done().
1345 */
1346 for (indirect_split_t *is = list_head(&iv->iv_splits);
1347 is != NULL; is = list_next(&iv->iv_splits, is)) {
1348 zio_nowait(zio_vdev_child_io(zio, NULL,
1349 is->is_vdev, is->is_target_offset,
1350 abd_get_offset(zio->io_abd,
1351 is->is_split_offset),
1352 is->is_size, zio->io_type,
1353 zio->io_priority, 0,
1354 vdev_indirect_child_io_done, zio));
1355 }
1356 }
1357 }
1358
1359 zio_execute(zio);
1360 }
1361
1362 /*
1363 * Report a checksum error for a child.
1364 */
1365 static void
1366 vdev_indirect_checksum_error(zio_t *zio,
1367 indirect_split_t *is, indirect_child_t *ic)
1368 {
1369 vdev_t *vd = ic->ic_vdev;
1370
1371 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1372 return;
1373
1374 mutex_enter(&vd->vdev_stat_lock);
1375 vd->vdev_stat.vs_checksum_errors++;
1376 mutex_exit(&vd->vdev_stat_lock);
1377
1378 zio_bad_cksum_t zbc = { 0 };
1379 void *bad_buf = abd_borrow_buf_copy(ic->ic_data, is->is_size);
1380 abd_t *good_abd = is->is_good_child->ic_data;
1381 void *good_buf = abd_borrow_buf_copy(good_abd, is->is_size);
1382 zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1383 is->is_target_offset, is->is_size, good_buf, bad_buf, &zbc);
1384 abd_return_buf(ic->ic_data, bad_buf, is->is_size);
1385 abd_return_buf(good_abd, good_buf, is->is_size);
1386 }
1387
1388 /*
1389 * Issue repair i/os for any incorrect copies. We do this by comparing
1390 * each split segment's correct data (is_good_child's ic_data) with each
1391 * other copy of the data. If they differ, then we overwrite the bad data
1392 * with the good copy. Note that we do this without regard for the DTL's,
1393 * which simplifies this code and also issues the optimal number of writes
1394 * (based on which copies actually read bad data, as opposed to which we
1395 * think might be wrong). For the same reason, we always use
1396 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1397 */
1398 static void
1399 vdev_indirect_repair(zio_t *zio)
1400 {
1401 indirect_vsd_t *iv = zio->io_vsd;
1402
1403 enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
1404
1405 if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
1406 flags |= ZIO_FLAG_SELF_HEAL;
1407
1408 if (!spa_writeable(zio->io_spa))
1409 return;
1410
1411 for (indirect_split_t *is = list_head(&iv->iv_splits);
1412 is != NULL; is = list_next(&iv->iv_splits, is)) {
1413 for (int c = 0; c < is->is_children; c++) {
1414 indirect_child_t *ic = &is->is_child[c];
1415 if (ic == is->is_good_child)
1416 continue;
1417 if (ic->ic_data == NULL)
1418 continue;
1419 if (ic->ic_duplicate == is->is_good_child)
1420 continue;
1421
1422 zio_nowait(zio_vdev_child_io(zio, NULL,
1423 ic->ic_vdev, is->is_target_offset,
1424 is->is_good_child->ic_data, is->is_size,
1425 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1426 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1427 NULL, NULL));
1428
1429 vdev_indirect_checksum_error(zio, is, ic);
1430 }
1431 }
1432 }
1433
1434 /*
1435 * Report checksum errors on all children that we read from.
1436 */
1437 static void
1438 vdev_indirect_all_checksum_errors(zio_t *zio)
1439 {
1440 indirect_vsd_t *iv = zio->io_vsd;
1441
1442 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1443 return;
1444
1445 for (indirect_split_t *is = list_head(&iv->iv_splits);
1446 is != NULL; is = list_next(&iv->iv_splits, is)) {
1447 for (int c = 0; c < is->is_children; c++) {
1448 indirect_child_t *ic = &is->is_child[c];
1449
1450 if (ic->ic_data == NULL)
1451 continue;
1452
1453 vdev_t *vd = ic->ic_vdev;
1454
1455 mutex_enter(&vd->vdev_stat_lock);
1456 vd->vdev_stat.vs_checksum_errors++;
1457 mutex_exit(&vd->vdev_stat_lock);
1458
1459 zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1460 is->is_target_offset, is->is_size,
1461 NULL, NULL, NULL);
1462 }
1463 }
1464 }
1465
1466 /*
1467 * Copy data from all the splits to a main zio then validate the checksum.
1468 * If then checksum is successfully validated return success.
1469 */
1470 static int
1471 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1472 {
1473 zio_bad_cksum_t zbc;
1474
1475 for (indirect_split_t *is = list_head(&iv->iv_splits);
1476 is != NULL; is = list_next(&iv->iv_splits, is)) {
1477
1478 ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1479 ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1480
1481 abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1482 is->is_split_offset, 0, is->is_size);
1483 }
1484
1485 return (zio_checksum_error(zio, &zbc));
1486 }
1487
1488 /*
1489 * There are relatively few possible combinations making it feasible to
1490 * deterministically check them all. We do this by setting the good_child
1491 * to the next unique split version. If we reach the end of the list then
1492 * "carry over" to the next unique split version (like counting in base
1493 * is_unique_children, but each digit can have a different base).
1494 */
1495 static int
1496 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1497 {
1498 boolean_t more = B_TRUE;
1499
1500 iv->iv_attempts = 0;
1501
1502 for (indirect_split_t *is = list_head(&iv->iv_splits);
1503 is != NULL; is = list_next(&iv->iv_splits, is))
1504 is->is_good_child = list_head(&is->is_unique_child);
1505
1506 while (more == B_TRUE) {
1507 iv->iv_attempts++;
1508 more = B_FALSE;
1509
1510 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1511 return (0);
1512
1513 for (indirect_split_t *is = list_head(&iv->iv_splits);
1514 is != NULL; is = list_next(&iv->iv_splits, is)) {
1515 is->is_good_child = list_next(&is->is_unique_child,
1516 is->is_good_child);
1517 if (is->is_good_child != NULL) {
1518 more = B_TRUE;
1519 break;
1520 }
1521
1522 is->is_good_child = list_head(&is->is_unique_child);
1523 }
1524 }
1525
1526 ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1527
1528 return (SET_ERROR(ECKSUM));
1529 }
1530
1531 /*
1532 * There are too many combinations to try all of them in a reasonable amount
1533 * of time. So try a fixed number of random combinations from the unique
1534 * split versions, after which we'll consider the block unrecoverable.
1535 */
1536 static int
1537 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1538 {
1539 iv->iv_attempts = 0;
1540
1541 while (iv->iv_attempts < iv->iv_attempts_max) {
1542 iv->iv_attempts++;
1543
1544 for (indirect_split_t *is = list_head(&iv->iv_splits);
1545 is != NULL; is = list_next(&iv->iv_splits, is)) {
1546 indirect_child_t *ic = list_head(&is->is_unique_child);
1547 int children = is->is_unique_children;
1548
1549 for (int i = spa_get_random(children); i > 0; i--)
1550 ic = list_next(&is->is_unique_child, ic);
1551
1552 ASSERT3P(ic, !=, NULL);
1553 is->is_good_child = ic;
1554 }
1555
1556 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1557 return (0);
1558 }
1559
1560 return (SET_ERROR(ECKSUM));
1561 }
1562
1563 /*
1564 * This is a validation function for reconstruction. It randomly selects
1565 * a good combination, if one can be found, and then it intentionally
1566 * damages all other segment copes by zeroing them. This forces the
1567 * reconstruction algorithm to locate the one remaining known good copy.
1568 */
1569 static int
1570 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1571 {
1572 /* Presume all the copies are unique for initial selection. */
1573 for (indirect_split_t *is = list_head(&iv->iv_splits);
1574 is != NULL; is = list_next(&iv->iv_splits, is)) {
1575 is->is_unique_children = 0;
1576
1577 for (int i = 0; i < is->is_children; i++) {
1578 indirect_child_t *ic = &is->is_child[i];
1579 if (ic->ic_data != NULL) {
1580 is->is_unique_children++;
1581 list_insert_tail(&is->is_unique_child, ic);
1582 }
1583 }
1584 }
1585
1586 /*
1587 * Set each is_good_child to a randomly-selected child which
1588 * is known to contain validated data.
1589 */
1590 int error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1591 if (error)
1592 goto out;
1593
1594 /*
1595 * Damage all but the known good copy by zeroing it. This will
1596 * result in two or less unique copies per indirect_child_t.
1597 * Both may need to be checked in order to reconstruct the block.
1598 * Set iv->iv_attempts_max such that all unique combinations will
1599 * enumerated, but limit the damage to at most 16 indirect splits.
1600 */
1601 iv->iv_attempts_max = 1;
1602
1603 for (indirect_split_t *is = list_head(&iv->iv_splits);
1604 is != NULL; is = list_next(&iv->iv_splits, is)) {
1605 for (int c = 0; c < is->is_children; c++) {
1606 indirect_child_t *ic = &is->is_child[c];
1607
1608 if (ic == is->is_good_child)
1609 continue;
1610 if (ic->ic_data == NULL)
1611 continue;
1612
1613 abd_zero(ic->ic_data, ic->ic_data->abd_size);
1614 }
1615
1616 iv->iv_attempts_max *= 2;
1617 if (iv->iv_attempts_max > (1ULL << 16)) {
1618 iv->iv_attempts_max = UINT64_MAX;
1619 break;
1620 }
1621 }
1622
1623 out:
1624 /* Empty the unique children lists so they can be reconstructed. */
1625 for (indirect_split_t *is = list_head(&iv->iv_splits);
1626 is != NULL; is = list_next(&iv->iv_splits, is)) {
1627 indirect_child_t *ic;
1628 while ((ic = list_head(&is->is_unique_child)) != NULL)
1629 list_remove(&is->is_unique_child, ic);
1630
1631 is->is_unique_children = 0;
1632 }
1633
1634 return (error);
1635 }
1636
1637 /*
1638 * This function is called when we have read all copies of the data and need
1639 * to try to find a combination of copies that gives us the right checksum.
1640 *
1641 * If we pointed to any mirror vdevs, this effectively does the job of the
1642 * mirror. The mirror vdev code can't do its own job because we don't know
1643 * the checksum of each split segment individually.
1644 *
1645 * We have to try every unique combination of copies of split segments, until
1646 * we find one that checksums correctly. Duplicate segment copies are first
1647 * identified and latter skipped during reconstruction. This optimization
1648 * reduces the search space and ensures that of the remaining combinations
1649 * at most one is correct.
1650 *
1651 * When the total number of combinations is small they can all be checked.
1652 * For example, if we have 3 segments in the split, and each points to a
1653 * 2-way mirror with unique copies, we will have the following pieces of data:
1654 *
1655 * | mirror child
1656 * split | [0] [1]
1657 * ======|=====================
1658 * A | data_A_0 data_A_1
1659 * B | data_B_0 data_B_1
1660 * C | data_C_0 data_C_1
1661 *
1662 * We will try the following (mirror children)^(number of splits) (2^3=8)
1663 * combinations, which is similar to bitwise-little-endian counting in
1664 * binary. In general each "digit" corresponds to a split segment, and the
1665 * base of each digit is is_children, which can be different for each
1666 * digit.
1667 *
1668 * "low bit" "high bit"
1669 * v v
1670 * data_A_0 data_B_0 data_C_0
1671 * data_A_1 data_B_0 data_C_0
1672 * data_A_0 data_B_1 data_C_0
1673 * data_A_1 data_B_1 data_C_0
1674 * data_A_0 data_B_0 data_C_1
1675 * data_A_1 data_B_0 data_C_1
1676 * data_A_0 data_B_1 data_C_1
1677 * data_A_1 data_B_1 data_C_1
1678 *
1679 * Note that the split segments may be on the same or different top-level
1680 * vdevs. In either case, we may need to try lots of combinations (see
1681 * zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
1682 * has small silent errors on all of its children, we can still reconstruct
1683 * the correct data, as long as those errors are at sufficiently-separated
1684 * offsets (specifically, separated by the largest block size - default of
1685 * 128KB, but up to 16MB).
1686 */
1687 static void
1688 vdev_indirect_reconstruct_io_done(zio_t *zio)
1689 {
1690 indirect_vsd_t *iv = zio->io_vsd;
1691 boolean_t known_good = B_FALSE;
1692 int error;
1693
1694 iv->iv_unique_combinations = 1;
1695 iv->iv_attempts_max = UINT64_MAX;
1696
1697 if (zfs_reconstruct_indirect_combinations_max > 0)
1698 iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1699
1700 /*
1701 * If nonzero, every 1/x blocks will be damaged, in order to validate
1702 * reconstruction when there are split segments with damaged copies.
1703 * Known_good will TRUE when reconstruction is known to be possible.
1704 */
1705 if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1706 spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0)
1707 known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1708
1709 /*
1710 * Determine the unique children for a split segment and add them
1711 * to the is_unique_child list. By restricting reconstruction
1712 * to these children, only unique combinations will be considered.
1713 * This can vastly reduce the search space when there are a large
1714 * number of indirect splits.
1715 */
1716 for (indirect_split_t *is = list_head(&iv->iv_splits);
1717 is != NULL; is = list_next(&iv->iv_splits, is)) {
1718 is->is_unique_children = 0;
1719
1720 for (int i = 0; i < is->is_children; i++) {
1721 indirect_child_t *ic_i = &is->is_child[i];
1722
1723 if (ic_i->ic_data == NULL ||
1724 ic_i->ic_duplicate != NULL)
1725 continue;
1726
1727 for (int j = i + 1; j < is->is_children; j++) {
1728 indirect_child_t *ic_j = &is->is_child[j];
1729
1730 if (ic_j->ic_data == NULL ||
1731 ic_j->ic_duplicate != NULL)
1732 continue;
1733
1734 if (abd_cmp(ic_i->ic_data, ic_j->ic_data,
1735 is->is_size) == 0) {
1736 ic_j->ic_duplicate = ic_i;
1737 }
1738 }
1739
1740 is->is_unique_children++;
1741 list_insert_tail(&is->is_unique_child, ic_i);
1742 }
1743
1744 /* Reconstruction is impossible, no valid children */
1745 EQUIV(list_is_empty(&is->is_unique_child),
1746 is->is_unique_children == 0);
1747 if (list_is_empty(&is->is_unique_child)) {
1748 zio->io_error = EIO;
1749 vdev_indirect_all_checksum_errors(zio);
1750 zio_checksum_verified(zio);
1751 return;
1752 }
1753
1754 iv->iv_unique_combinations *= is->is_unique_children;
1755 }
1756
1757 if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1758 error = vdev_indirect_splits_enumerate_all(iv, zio);
1759 else
1760 error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1761
1762 if (error != 0) {
1763 /* All attempted combinations failed. */
1764 ASSERT3B(known_good, ==, B_FALSE);
1765 zio->io_error = error;
1766 vdev_indirect_all_checksum_errors(zio);
1767 } else {
1768 /*
1769 * The checksum has been successfully validated. Issue
1770 * repair I/Os to any copies of splits which don't match
1771 * the validated version.
1772 */
1773 ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1774 vdev_indirect_repair(zio);
1775 zio_checksum_verified(zio);
1776 }
1777 }
1778
1779 static void
1780 vdev_indirect_io_done(zio_t *zio)
1781 {
1782 indirect_vsd_t *iv = zio->io_vsd;
1783
1784 if (iv->iv_reconstruct) {
1785 /*
1786 * We have read all copies of the data (e.g. from mirrors),
1787 * either because this was a scrub/resilver, or because the
1788 * one-copy read didn't checksum correctly.
1789 */
1790 vdev_indirect_reconstruct_io_done(zio);
1791 return;
1792 }
1793
1794 if (!iv->iv_split_block) {
1795 /*
1796 * This was not a split block, so we passed the BP down,
1797 * and the checksum was handled by the (one) child zio.
1798 */
1799 return;
1800 }
1801
1802 zio_bad_cksum_t zbc;
1803 int ret = zio_checksum_error(zio, &zbc);
1804 if (ret == 0) {
1805 zio_checksum_verified(zio);
1806 return;
1807 }
1808
1809 /*
1810 * The checksum didn't match. Read all copies of all splits, and
1811 * then we will try to reconstruct. The next time
1812 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1813 */
1814 vdev_indirect_read_all(zio);
1815
1816 zio_vdev_io_redone(zio);
1817 }
1818
1819 vdev_ops_t vdev_indirect_ops = {
1820 vdev_indirect_open,
1821 vdev_indirect_close,
1822 vdev_default_asize,
1823 vdev_indirect_io_start,
1824 vdev_indirect_io_done,
1825 NULL,
1826 NULL,
1827 NULL,
1828 vdev_indirect_remap,
1829 NULL,
1830 VDEV_TYPE_INDIRECT, /* name of this vdev type */
1831 B_FALSE /* leaf vdev */
1832 };