1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 by Delphix. All rights reserved.
26 */
27
28 /*
29 * This file contains all the routines used when modifying on-disk SPA state.
30 * This includes opening, importing, destroying, exporting a pool, and syncing a
31 * pool.
32 */
33
34 #include <sys/zfs_context.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/spa_impl.h>
37 #include <sys/zio.h>
38 #include <sys/zio_checksum.h>
39 #include <sys/dmu.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/zap.h>
42 #include <sys/zil.h>
43 #include <sys/ddt.h>
44 #include <sys/vdev_impl.h>
45 #include <sys/metaslab.h>
46 #include <sys/metaslab_impl.h>
47 #include <sys/uberblock_impl.h>
48 #include <sys/txg.h>
49 #include <sys/avl.h>
50 #include <sys/dmu_traverse.h>
51 #include <sys/dmu_objset.h>
52 #include <sys/unique.h>
53 #include <sys/dsl_pool.h>
54 #include <sys/dsl_dataset.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_synctask.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/arc.h>
60 #include <sys/callb.h>
61 #include <sys/systeminfo.h>
62 #include <sys/spa_boot.h>
63 #include <sys/zfs_ioctl.h>
64 #include <sys/dsl_scan.h>
65
66 #ifdef _KERNEL
67 #include <sys/bootprops.h>
68 #include <sys/callb.h>
69 #include <sys/cpupart.h>
70 #include <sys/pool.h>
71 #include <sys/sysdc.h>
72 #include <sys/zone.h>
73 #endif /* _KERNEL */
74
75 #include "zfs_prop.h"
76 #include "zfs_comutil.h"
77
78 typedef enum zti_modes {
79 zti_mode_fixed, /* value is # of threads (min 1) */
80 zti_mode_online_percent, /* value is % of online CPUs */
81 zti_mode_batch, /* cpu-intensive; value is ignored */
82 zti_mode_null, /* don't create a taskq */
83 zti_nmodes
84 } zti_modes_t;
85
86 #define ZTI_FIX(n) { zti_mode_fixed, (n) }
87 #define ZTI_PCT(n) { zti_mode_online_percent, (n) }
88 #define ZTI_BATCH { zti_mode_batch, 0 }
89 #define ZTI_NULL { zti_mode_null, 0 }
90
91 #define ZTI_ONE ZTI_FIX(1)
92
93 typedef struct zio_taskq_info {
94 enum zti_modes zti_mode;
95 uint_t zti_value;
96 } zio_taskq_info_t;
97
98 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
99 "issue", "issue_high", "intr", "intr_high"
100 };
101
102 /*
103 * Define the taskq threads for the following I/O types:
104 * NULL, READ, WRITE, FREE, CLAIM, and IOCTL
105 */
106 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
107 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
108 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
109 { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL },
110 { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(8), ZTI_FIX(5) },
111 { ZTI_FIX(100), ZTI_NULL, ZTI_ONE, ZTI_NULL },
112 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
113 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
114 };
115
116 static dsl_syncfunc_t spa_sync_props;
117 static boolean_t spa_has_active_shared_spare(spa_t *spa);
118 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
119 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
120 char **ereport);
121 static void spa_vdev_resilver_done(spa_t *spa);
122
123 uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */
124 id_t zio_taskq_psrset_bind = PS_NONE;
125 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
126 uint_t zio_taskq_basedc = 80; /* base duty cycle */
127
128 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
129
130 /*
131 * This (illegal) pool name is used when temporarily importing a spa_t in order
132 * to get the vdev stats associated with the imported devices.
133 */
134 #define TRYIMPORT_NAME "$import"
135
136 /*
137 * ==========================================================================
138 * SPA properties routines
139 * ==========================================================================
140 */
141
142 /*
143 * Add a (source=src, propname=propval) list to an nvlist.
144 */
145 static void
146 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
147 uint64_t intval, zprop_source_t src)
148 {
149 const char *propname = zpool_prop_to_name(prop);
150 nvlist_t *propval;
151
152 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
153 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
154
155 if (strval != NULL)
156 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
157 else
158 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
159
160 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
161 nvlist_free(propval);
162 }
163
164 /*
165 * Get property values from the spa configuration.
166 */
167 static void
168 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
169 {
170 uint64_t size;
171 uint64_t alloc;
172 uint64_t cap, version;
173 zprop_source_t src = ZPROP_SRC_NONE;
174 spa_config_dirent_t *dp;
175
176 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
177
178 if (spa->spa_root_vdev != NULL) {
179 alloc = metaslab_class_get_alloc(spa_normal_class(spa));
180 size = metaslab_class_get_space(spa_normal_class(spa));
181 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
182 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
183 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
184 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
185 size - alloc, src);
186 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
187 (spa_mode(spa) == FREAD), src);
188
189 cap = (size == 0) ? 0 : (alloc * 100 / size);
190 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
191
192 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
193 ddt_get_pool_dedup_ratio(spa), src);
194
195 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
196 spa->spa_root_vdev->vdev_state, src);
197
198 version = spa_version(spa);
199 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
200 src = ZPROP_SRC_DEFAULT;
201 else
202 src = ZPROP_SRC_LOCAL;
203 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
204 }
205
206 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
207
208 if (spa->spa_root != NULL)
209 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
210 0, ZPROP_SRC_LOCAL);
211
212 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
213 if (dp->scd_path == NULL) {
214 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
215 "none", 0, ZPROP_SRC_LOCAL);
216 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
217 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
218 dp->scd_path, 0, ZPROP_SRC_LOCAL);
219 }
220 }
221 }
222
223 /*
224 * Get zpool property values.
225 */
226 int
227 spa_prop_get(spa_t *spa, nvlist_t **nvp)
228 {
229 objset_t *mos = spa->spa_meta_objset;
230 zap_cursor_t zc;
231 zap_attribute_t za;
232 int err;
233
234 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
235
236 mutex_enter(&spa->spa_props_lock);
237
238 /*
239 * Get properties from the spa config.
240 */
241 spa_prop_get_config(spa, nvp);
242
243 /* If no pool property object, no more prop to get. */
244 if (mos == NULL || spa->spa_pool_props_object == 0) {
245 mutex_exit(&spa->spa_props_lock);
246 return (0);
247 }
248
249 /*
250 * Get properties from the MOS pool property object.
251 */
252 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
253 (err = zap_cursor_retrieve(&zc, &za)) == 0;
254 zap_cursor_advance(&zc)) {
255 uint64_t intval = 0;
256 char *strval = NULL;
257 zprop_source_t src = ZPROP_SRC_DEFAULT;
258 zpool_prop_t prop;
259
260 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
261 continue;
262
263 switch (za.za_integer_length) {
264 case 8:
265 /* integer property */
266 if (za.za_first_integer !=
267 zpool_prop_default_numeric(prop))
268 src = ZPROP_SRC_LOCAL;
269
270 if (prop == ZPOOL_PROP_BOOTFS) {
271 dsl_pool_t *dp;
272 dsl_dataset_t *ds = NULL;
273
274 dp = spa_get_dsl(spa);
275 rw_enter(&dp->dp_config_rwlock, RW_READER);
276 if (err = dsl_dataset_hold_obj(dp,
277 za.za_first_integer, FTAG, &ds)) {
278 rw_exit(&dp->dp_config_rwlock);
279 break;
280 }
281
282 strval = kmem_alloc(
283 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
284 KM_SLEEP);
285 dsl_dataset_name(ds, strval);
286 dsl_dataset_rele(ds, FTAG);
287 rw_exit(&dp->dp_config_rwlock);
288 } else {
289 strval = NULL;
290 intval = za.za_first_integer;
291 }
292
293 spa_prop_add_list(*nvp, prop, strval, intval, src);
294
295 if (strval != NULL)
296 kmem_free(strval,
297 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
298
299 break;
300
301 case 1:
302 /* string property */
303 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
304 err = zap_lookup(mos, spa->spa_pool_props_object,
305 za.za_name, 1, za.za_num_integers, strval);
306 if (err) {
307 kmem_free(strval, za.za_num_integers);
308 break;
309 }
310 spa_prop_add_list(*nvp, prop, strval, 0, src);
311 kmem_free(strval, za.za_num_integers);
312 break;
313
314 default:
315 break;
316 }
317 }
318 zap_cursor_fini(&zc);
319 mutex_exit(&spa->spa_props_lock);
320 out:
321 if (err && err != ENOENT) {
322 nvlist_free(*nvp);
323 *nvp = NULL;
324 return (err);
325 }
326
327 return (0);
328 }
329
330 /*
331 * Validate the given pool properties nvlist and modify the list
332 * for the property values to be set.
333 */
334 static int
335 spa_prop_validate(spa_t *spa, nvlist_t *props)
336 {
337 nvpair_t *elem;
338 int error = 0, reset_bootfs = 0;
339 uint64_t objnum;
340
341 elem = NULL;
342 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
343 zpool_prop_t prop;
344 char *propname, *strval;
345 uint64_t intval;
346 objset_t *os;
347 char *slash;
348
349 propname = nvpair_name(elem);
350
351 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
352 return (EINVAL);
353
354 switch (prop) {
355 case ZPOOL_PROP_VERSION:
356 error = nvpair_value_uint64(elem, &intval);
357 if (!error &&
358 (intval < spa_version(spa) || intval > SPA_VERSION))
359 error = EINVAL;
360 break;
361
362 case ZPOOL_PROP_DELEGATION:
363 case ZPOOL_PROP_AUTOREPLACE:
364 case ZPOOL_PROP_LISTSNAPS:
365 case ZPOOL_PROP_AUTOEXPAND:
366 error = nvpair_value_uint64(elem, &intval);
367 if (!error && intval > 1)
368 error = EINVAL;
369 break;
370
371 case ZPOOL_PROP_BOOTFS:
372 /*
373 * If the pool version is less than SPA_VERSION_BOOTFS,
374 * or the pool is still being created (version == 0),
375 * the bootfs property cannot be set.
376 */
377 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
378 error = ENOTSUP;
379 break;
380 }
381
382 /*
383 * Make sure the vdev config is bootable
384 */
385 if (!vdev_is_bootable(spa->spa_root_vdev)) {
386 error = ENOTSUP;
387 break;
388 }
389
390 reset_bootfs = 1;
391
392 error = nvpair_value_string(elem, &strval);
393
394 if (!error) {
395 uint64_t compress;
396
397 if (strval == NULL || strval[0] == '\0') {
398 objnum = zpool_prop_default_numeric(
399 ZPOOL_PROP_BOOTFS);
400 break;
401 }
402
403 if (error = dmu_objset_hold(strval, FTAG, &os))
404 break;
405
406 /* Must be ZPL and not gzip compressed. */
407
408 if (dmu_objset_type(os) != DMU_OST_ZFS) {
409 error = ENOTSUP;
410 } else if ((error = dsl_prop_get_integer(strval,
411 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
412 &compress, NULL)) == 0 &&
413 !BOOTFS_COMPRESS_VALID(compress)) {
414 error = ENOTSUP;
415 } else {
416 objnum = dmu_objset_id(os);
417 }
418 dmu_objset_rele(os, FTAG);
419 }
420 break;
421
422 case ZPOOL_PROP_FAILUREMODE:
423 error = nvpair_value_uint64(elem, &intval);
424 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
425 intval > ZIO_FAILURE_MODE_PANIC))
426 error = EINVAL;
427
428 /*
429 * This is a special case which only occurs when
430 * the pool has completely failed. This allows
431 * the user to change the in-core failmode property
432 * without syncing it out to disk (I/Os might
433 * currently be blocked). We do this by returning
434 * EIO to the caller (spa_prop_set) to trick it
435 * into thinking we encountered a property validation
436 * error.
437 */
438 if (!error && spa_suspended(spa)) {
439 spa->spa_failmode = intval;
440 error = EIO;
441 }
442 break;
443
444 case ZPOOL_PROP_CACHEFILE:
445 if ((error = nvpair_value_string(elem, &strval)) != 0)
446 break;
447
448 if (strval[0] == '\0')
449 break;
450
451 if (strcmp(strval, "none") == 0)
452 break;
453
454 if (strval[0] != '/') {
455 error = EINVAL;
456 break;
457 }
458
459 slash = strrchr(strval, '/');
460 ASSERT(slash != NULL);
461
462 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
463 strcmp(slash, "/..") == 0)
464 error = EINVAL;
465 break;
466
467 case ZPOOL_PROP_DEDUPDITTO:
468 if (spa_version(spa) < SPA_VERSION_DEDUP)
469 error = ENOTSUP;
470 else
471 error = nvpair_value_uint64(elem, &intval);
472 if (error == 0 &&
473 intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
474 error = EINVAL;
475 break;
476 }
477
478 if (error)
479 break;
480 }
481
482 if (!error && reset_bootfs) {
483 error = nvlist_remove(props,
484 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
485
486 if (!error) {
487 error = nvlist_add_uint64(props,
488 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
489 }
490 }
491
492 return (error);
493 }
494
495 void
496 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
497 {
498 char *cachefile;
499 spa_config_dirent_t *dp;
500
501 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
502 &cachefile) != 0)
503 return;
504
505 dp = kmem_alloc(sizeof (spa_config_dirent_t),
506 KM_SLEEP);
507
508 if (cachefile[0] == '\0')
509 dp->scd_path = spa_strdup(spa_config_path);
510 else if (strcmp(cachefile, "none") == 0)
511 dp->scd_path = NULL;
512 else
513 dp->scd_path = spa_strdup(cachefile);
514
515 list_insert_head(&spa->spa_config_list, dp);
516 if (need_sync)
517 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
518 }
519
520 int
521 spa_prop_set(spa_t *spa, nvlist_t *nvp)
522 {
523 int error;
524 nvpair_t *elem;
525 boolean_t need_sync = B_FALSE;
526 zpool_prop_t prop;
527
528 if ((error = spa_prop_validate(spa, nvp)) != 0)
529 return (error);
530
531 elem = NULL;
532 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
533 if ((prop = zpool_name_to_prop(
534 nvpair_name(elem))) == ZPROP_INVAL)
535 return (EINVAL);
536
537 if (prop == ZPOOL_PROP_CACHEFILE ||
538 prop == ZPOOL_PROP_ALTROOT ||
539 prop == ZPOOL_PROP_READONLY)
540 continue;
541
542 need_sync = B_TRUE;
543 break;
544 }
545
546 if (need_sync)
547 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
548 spa, nvp, 3));
549 else
550 return (0);
551 }
552
553 /*
554 * If the bootfs property value is dsobj, clear it.
555 */
556 void
557 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
558 {
559 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
560 VERIFY(zap_remove(spa->spa_meta_objset,
561 spa->spa_pool_props_object,
562 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
563 spa->spa_bootfs = 0;
564 }
565 }
566
567 /*
568 * Change the GUID for the pool. This is done so that we can later
569 * re-import a pool built from a clone of our own vdevs. We will modify
570 * the root vdev's guid, our own pool guid, and then mark all of our
571 * vdevs dirty. Note that we must make sure that all our vdevs are
572 * online when we do this, or else any vdevs that weren't present
573 * would be orphaned from our pool. We are also going to issue a
574 * sysevent to update any watchers.
575 */
576 int
577 spa_change_guid(spa_t *spa)
578 {
579 uint64_t oldguid, newguid;
580 uint64_t txg;
581
582 if (!(spa_mode_global & FWRITE))
583 return (EROFS);
584
585 txg = spa_vdev_enter(spa);
586
587 if (spa->spa_root_vdev->vdev_state != VDEV_STATE_HEALTHY)
588 return (spa_vdev_exit(spa, NULL, txg, ENXIO));
589
590 oldguid = spa_guid(spa);
591 newguid = spa_generate_guid(NULL);
592 ASSERT3U(oldguid, !=, newguid);
593
594 spa->spa_root_vdev->vdev_guid = newguid;
595 spa->spa_root_vdev->vdev_guid_sum += (newguid - oldguid);
596
597 vdev_config_dirty(spa->spa_root_vdev);
598
599 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
600
601 return (spa_vdev_exit(spa, NULL, txg, 0));
602 }
603
604 /*
605 * ==========================================================================
606 * SPA state manipulation (open/create/destroy/import/export)
607 * ==========================================================================
608 */
609
610 static int
611 spa_error_entry_compare(const void *a, const void *b)
612 {
613 spa_error_entry_t *sa = (spa_error_entry_t *)a;
614 spa_error_entry_t *sb = (spa_error_entry_t *)b;
615 int ret;
616
617 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
618 sizeof (zbookmark_t));
619
620 if (ret < 0)
621 return (-1);
622 else if (ret > 0)
623 return (1);
624 else
625 return (0);
626 }
627
628 /*
629 * Utility function which retrieves copies of the current logs and
630 * re-initializes them in the process.
631 */
632 void
633 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
634 {
635 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
636
637 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
638 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
639
640 avl_create(&spa->spa_errlist_scrub,
641 spa_error_entry_compare, sizeof (spa_error_entry_t),
642 offsetof(spa_error_entry_t, se_avl));
643 avl_create(&spa->spa_errlist_last,
644 spa_error_entry_compare, sizeof (spa_error_entry_t),
645 offsetof(spa_error_entry_t, se_avl));
646 }
647
648 static taskq_t *
649 spa_taskq_create(spa_t *spa, const char *name, enum zti_modes mode,
650 uint_t value)
651 {
652 uint_t flags = 0;
653 boolean_t batch = B_FALSE;
654
655 switch (mode) {
656 case zti_mode_null:
657 return (NULL); /* no taskq needed */
658
659 case zti_mode_fixed:
660 ASSERT3U(value, >=, 1);
661 value = MAX(value, 1);
662 break;
663
664 case zti_mode_batch:
665 batch = B_TRUE;
666 flags |= TASKQ_THREADS_CPU_PCT;
667 value = zio_taskq_batch_pct;
668 break;
669
670 case zti_mode_online_percent:
671 flags |= TASKQ_THREADS_CPU_PCT;
672 break;
673
674 default:
675 panic("unrecognized mode for %s taskq (%u:%u) in "
676 "spa_activate()",
677 name, mode, value);
678 break;
679 }
680
681 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
682 if (batch)
683 flags |= TASKQ_DC_BATCH;
684
685 return (taskq_create_sysdc(name, value, 50, INT_MAX,
686 spa->spa_proc, zio_taskq_basedc, flags));
687 }
688 return (taskq_create_proc(name, value, maxclsyspri, 50, INT_MAX,
689 spa->spa_proc, flags));
690 }
691
692 static void
693 spa_create_zio_taskqs(spa_t *spa)
694 {
695 for (int t = 0; t < ZIO_TYPES; t++) {
696 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
697 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
698 enum zti_modes mode = ztip->zti_mode;
699 uint_t value = ztip->zti_value;
700 char name[32];
701
702 (void) snprintf(name, sizeof (name),
703 "%s_%s", zio_type_name[t], zio_taskq_types[q]);
704
705 spa->spa_zio_taskq[t][q] =
706 spa_taskq_create(spa, name, mode, value);
707 }
708 }
709 }
710
711 #ifdef _KERNEL
712 static void
713 spa_thread(void *arg)
714 {
715 callb_cpr_t cprinfo;
716
717 spa_t *spa = arg;
718 user_t *pu = PTOU(curproc);
719
720 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
721 spa->spa_name);
722
723 ASSERT(curproc != &p0);
724 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
725 "zpool-%s", spa->spa_name);
726 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
727
728 /* bind this thread to the requested psrset */
729 if (zio_taskq_psrset_bind != PS_NONE) {
730 pool_lock();
731 mutex_enter(&cpu_lock);
732 mutex_enter(&pidlock);
733 mutex_enter(&curproc->p_lock);
734
735 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
736 0, NULL, NULL) == 0) {
737 curthread->t_bind_pset = zio_taskq_psrset_bind;
738 } else {
739 cmn_err(CE_WARN,
740 "Couldn't bind process for zfs pool \"%s\" to "
741 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
742 }
743
744 mutex_exit(&curproc->p_lock);
745 mutex_exit(&pidlock);
746 mutex_exit(&cpu_lock);
747 pool_unlock();
748 }
749
750 if (zio_taskq_sysdc) {
751 sysdc_thread_enter(curthread, 100, 0);
752 }
753
754 spa->spa_proc = curproc;
755 spa->spa_did = curthread->t_did;
756
757 spa_create_zio_taskqs(spa);
758
759 mutex_enter(&spa->spa_proc_lock);
760 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
761
762 spa->spa_proc_state = SPA_PROC_ACTIVE;
763 cv_broadcast(&spa->spa_proc_cv);
764
765 CALLB_CPR_SAFE_BEGIN(&cprinfo);
766 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
767 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
768 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
769
770 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
771 spa->spa_proc_state = SPA_PROC_GONE;
772 spa->spa_proc = &p0;
773 cv_broadcast(&spa->spa_proc_cv);
774 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
775
776 mutex_enter(&curproc->p_lock);
777 lwp_exit();
778 }
779 #endif
780
781 /*
782 * Activate an uninitialized pool.
783 */
784 static void
785 spa_activate(spa_t *spa, int mode)
786 {
787 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
788
789 spa->spa_state = POOL_STATE_ACTIVE;
790 spa->spa_mode = mode;
791
792 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
793 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
794
795 /* Try to create a covering process */
796 mutex_enter(&spa->spa_proc_lock);
797 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
798 ASSERT(spa->spa_proc == &p0);
799 spa->spa_did = 0;
800
801 /* Only create a process if we're going to be around a while. */
802 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
803 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
804 NULL, 0) == 0) {
805 spa->spa_proc_state = SPA_PROC_CREATED;
806 while (spa->spa_proc_state == SPA_PROC_CREATED) {
807 cv_wait(&spa->spa_proc_cv,
808 &spa->spa_proc_lock);
809 }
810 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
811 ASSERT(spa->spa_proc != &p0);
812 ASSERT(spa->spa_did != 0);
813 } else {
814 #ifdef _KERNEL
815 cmn_err(CE_WARN,
816 "Couldn't create process for zfs pool \"%s\"\n",
817 spa->spa_name);
818 #endif
819 }
820 }
821 mutex_exit(&spa->spa_proc_lock);
822
823 /* If we didn't create a process, we need to create our taskqs. */
824 if (spa->spa_proc == &p0) {
825 spa_create_zio_taskqs(spa);
826 }
827
828 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
829 offsetof(vdev_t, vdev_config_dirty_node));
830 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
831 offsetof(vdev_t, vdev_state_dirty_node));
832
833 txg_list_create(&spa->spa_vdev_txg_list,
834 offsetof(struct vdev, vdev_txg_node));
835
836 avl_create(&spa->spa_errlist_scrub,
837 spa_error_entry_compare, sizeof (spa_error_entry_t),
838 offsetof(spa_error_entry_t, se_avl));
839 avl_create(&spa->spa_errlist_last,
840 spa_error_entry_compare, sizeof (spa_error_entry_t),
841 offsetof(spa_error_entry_t, se_avl));
842 }
843
844 /*
845 * Opposite of spa_activate().
846 */
847 static void
848 spa_deactivate(spa_t *spa)
849 {
850 ASSERT(spa->spa_sync_on == B_FALSE);
851 ASSERT(spa->spa_dsl_pool == NULL);
852 ASSERT(spa->spa_root_vdev == NULL);
853 ASSERT(spa->spa_async_zio_root == NULL);
854 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
855
856 txg_list_destroy(&spa->spa_vdev_txg_list);
857
858 list_destroy(&spa->spa_config_dirty_list);
859 list_destroy(&spa->spa_state_dirty_list);
860
861 for (int t = 0; t < ZIO_TYPES; t++) {
862 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
863 if (spa->spa_zio_taskq[t][q] != NULL)
864 taskq_destroy(spa->spa_zio_taskq[t][q]);
865 spa->spa_zio_taskq[t][q] = NULL;
866 }
867 }
868
869 metaslab_class_destroy(spa->spa_normal_class);
870 spa->spa_normal_class = NULL;
871
872 metaslab_class_destroy(spa->spa_log_class);
873 spa->spa_log_class = NULL;
874
875 /*
876 * If this was part of an import or the open otherwise failed, we may
877 * still have errors left in the queues. Empty them just in case.
878 */
879 spa_errlog_drain(spa);
880
881 avl_destroy(&spa->spa_errlist_scrub);
882 avl_destroy(&spa->spa_errlist_last);
883
884 spa->spa_state = POOL_STATE_UNINITIALIZED;
885
886 mutex_enter(&spa->spa_proc_lock);
887 if (spa->spa_proc_state != SPA_PROC_NONE) {
888 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
889 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
890 cv_broadcast(&spa->spa_proc_cv);
891 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
892 ASSERT(spa->spa_proc != &p0);
893 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
894 }
895 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
896 spa->spa_proc_state = SPA_PROC_NONE;
897 }
898 ASSERT(spa->spa_proc == &p0);
899 mutex_exit(&spa->spa_proc_lock);
900
901 /*
902 * We want to make sure spa_thread() has actually exited the ZFS
903 * module, so that the module can't be unloaded out from underneath
904 * it.
905 */
906 if (spa->spa_did != 0) {
907 thread_join(spa->spa_did);
908 spa->spa_did = 0;
909 }
910 }
911
912 /*
913 * Verify a pool configuration, and construct the vdev tree appropriately. This
914 * will create all the necessary vdevs in the appropriate layout, with each vdev
915 * in the CLOSED state. This will prep the pool before open/creation/import.
916 * All vdev validation is done by the vdev_alloc() routine.
917 */
918 static int
919 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
920 uint_t id, int atype)
921 {
922 nvlist_t **child;
923 uint_t children;
924 int error;
925
926 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
927 return (error);
928
929 if ((*vdp)->vdev_ops->vdev_op_leaf)
930 return (0);
931
932 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
933 &child, &children);
934
935 if (error == ENOENT)
936 return (0);
937
938 if (error) {
939 vdev_free(*vdp);
940 *vdp = NULL;
941 return (EINVAL);
942 }
943
944 for (int c = 0; c < children; c++) {
945 vdev_t *vd;
946 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
947 atype)) != 0) {
948 vdev_free(*vdp);
949 *vdp = NULL;
950 return (error);
951 }
952 }
953
954 ASSERT(*vdp != NULL);
955
956 return (0);
957 }
958
959 /*
960 * Opposite of spa_load().
961 */
962 static void
963 spa_unload(spa_t *spa)
964 {
965 int i;
966
967 ASSERT(MUTEX_HELD(&spa_namespace_lock));
968
969 /*
970 * Stop async tasks.
971 */
972 spa_async_suspend(spa);
973
974 /*
975 * Stop syncing.
976 */
977 if (spa->spa_sync_on) {
978 txg_sync_stop(spa->spa_dsl_pool);
979 spa->spa_sync_on = B_FALSE;
980 }
981
982 /*
983 * Wait for any outstanding async I/O to complete.
984 */
985 if (spa->spa_async_zio_root != NULL) {
986 (void) zio_wait(spa->spa_async_zio_root);
987 spa->spa_async_zio_root = NULL;
988 }
989
990 bpobj_close(&spa->spa_deferred_bpobj);
991
992 /*
993 * Close the dsl pool.
994 */
995 if (spa->spa_dsl_pool) {
996 dsl_pool_close(spa->spa_dsl_pool);
997 spa->spa_dsl_pool = NULL;
998 spa->spa_meta_objset = NULL;
999 }
1000
1001 ddt_unload(spa);
1002
1003 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1004
1005 /*
1006 * Drop and purge level 2 cache
1007 */
1008 spa_l2cache_drop(spa);
1009
1010 /*
1011 * Close all vdevs.
1012 */
1013 if (spa->spa_root_vdev)
1014 vdev_free(spa->spa_root_vdev);
1015 ASSERT(spa->spa_root_vdev == NULL);
1016
1017 for (i = 0; i < spa->spa_spares.sav_count; i++)
1018 vdev_free(spa->spa_spares.sav_vdevs[i]);
1019 if (spa->spa_spares.sav_vdevs) {
1020 kmem_free(spa->spa_spares.sav_vdevs,
1021 spa->spa_spares.sav_count * sizeof (void *));
1022 spa->spa_spares.sav_vdevs = NULL;
1023 }
1024 if (spa->spa_spares.sav_config) {
1025 nvlist_free(spa->spa_spares.sav_config);
1026 spa->spa_spares.sav_config = NULL;
1027 }
1028 spa->spa_spares.sav_count = 0;
1029
1030 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
1031 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1032 if (spa->spa_l2cache.sav_vdevs) {
1033 kmem_free(spa->spa_l2cache.sav_vdevs,
1034 spa->spa_l2cache.sav_count * sizeof (void *));
1035 spa->spa_l2cache.sav_vdevs = NULL;
1036 }
1037 if (spa->spa_l2cache.sav_config) {
1038 nvlist_free(spa->spa_l2cache.sav_config);
1039 spa->spa_l2cache.sav_config = NULL;
1040 }
1041 spa->spa_l2cache.sav_count = 0;
1042
1043 spa->spa_async_suspended = 0;
1044
1045 spa_config_exit(spa, SCL_ALL, FTAG);
1046 }
1047
1048 /*
1049 * Load (or re-load) the current list of vdevs describing the active spares for
1050 * this pool. When this is called, we have some form of basic information in
1051 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
1052 * then re-generate a more complete list including status information.
1053 */
1054 static void
1055 spa_load_spares(spa_t *spa)
1056 {
1057 nvlist_t **spares;
1058 uint_t nspares;
1059 int i;
1060 vdev_t *vd, *tvd;
1061
1062 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1063
1064 /*
1065 * First, close and free any existing spare vdevs.
1066 */
1067 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1068 vd = spa->spa_spares.sav_vdevs[i];
1069
1070 /* Undo the call to spa_activate() below */
1071 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1072 B_FALSE)) != NULL && tvd->vdev_isspare)
1073 spa_spare_remove(tvd);
1074 vdev_close(vd);
1075 vdev_free(vd);
1076 }
1077
1078 if (spa->spa_spares.sav_vdevs)
1079 kmem_free(spa->spa_spares.sav_vdevs,
1080 spa->spa_spares.sav_count * sizeof (void *));
1081
1082 if (spa->spa_spares.sav_config == NULL)
1083 nspares = 0;
1084 else
1085 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1086 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1087
1088 spa->spa_spares.sav_count = (int)nspares;
1089 spa->spa_spares.sav_vdevs = NULL;
1090
1091 if (nspares == 0)
1092 return;
1093
1094 /*
1095 * Construct the array of vdevs, opening them to get status in the
1096 * process. For each spare, there is potentially two different vdev_t
1097 * structures associated with it: one in the list of spares (used only
1098 * for basic validation purposes) and one in the active vdev
1099 * configuration (if it's spared in). During this phase we open and
1100 * validate each vdev on the spare list. If the vdev also exists in the
1101 * active configuration, then we also mark this vdev as an active spare.
1102 */
1103 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1104 KM_SLEEP);
1105 for (i = 0; i < spa->spa_spares.sav_count; i++) {
1106 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1107 VDEV_ALLOC_SPARE) == 0);
1108 ASSERT(vd != NULL);
1109
1110 spa->spa_spares.sav_vdevs[i] = vd;
1111
1112 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1113 B_FALSE)) != NULL) {
1114 if (!tvd->vdev_isspare)
1115 spa_spare_add(tvd);
1116
1117 /*
1118 * We only mark the spare active if we were successfully
1119 * able to load the vdev. Otherwise, importing a pool
1120 * with a bad active spare would result in strange
1121 * behavior, because multiple pool would think the spare
1122 * is actively in use.
1123 *
1124 * There is a vulnerability here to an equally bizarre
1125 * circumstance, where a dead active spare is later
1126 * brought back to life (onlined or otherwise). Given
1127 * the rarity of this scenario, and the extra complexity
1128 * it adds, we ignore the possibility.
1129 */
1130 if (!vdev_is_dead(tvd))
1131 spa_spare_activate(tvd);
1132 }
1133
1134 vd->vdev_top = vd;
1135 vd->vdev_aux = &spa->spa_spares;
1136
1137 if (vdev_open(vd) != 0)
1138 continue;
1139
1140 if (vdev_validate_aux(vd) == 0)
1141 spa_spare_add(vd);
1142 }
1143
1144 /*
1145 * Recompute the stashed list of spares, with status information
1146 * this time.
1147 */
1148 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1149 DATA_TYPE_NVLIST_ARRAY) == 0);
1150
1151 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1152 KM_SLEEP);
1153 for (i = 0; i < spa->spa_spares.sav_count; i++)
1154 spares[i] = vdev_config_generate(spa,
1155 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1156 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1157 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1158 for (i = 0; i < spa->spa_spares.sav_count; i++)
1159 nvlist_free(spares[i]);
1160 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1161 }
1162
1163 /*
1164 * Load (or re-load) the current list of vdevs describing the active l2cache for
1165 * this pool. When this is called, we have some form of basic information in
1166 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
1167 * then re-generate a more complete list including status information.
1168 * Devices which are already active have their details maintained, and are
1169 * not re-opened.
1170 */
1171 static void
1172 spa_load_l2cache(spa_t *spa)
1173 {
1174 nvlist_t **l2cache;
1175 uint_t nl2cache;
1176 int i, j, oldnvdevs;
1177 uint64_t guid;
1178 vdev_t *vd, **oldvdevs, **newvdevs;
1179 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1180
1181 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1182
1183 if (sav->sav_config != NULL) {
1184 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1185 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1186 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1187 } else {
1188 nl2cache = 0;
1189 }
1190
1191 oldvdevs = sav->sav_vdevs;
1192 oldnvdevs = sav->sav_count;
1193 sav->sav_vdevs = NULL;
1194 sav->sav_count = 0;
1195
1196 /*
1197 * Process new nvlist of vdevs.
1198 */
1199 for (i = 0; i < nl2cache; i++) {
1200 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1201 &guid) == 0);
1202
1203 newvdevs[i] = NULL;
1204 for (j = 0; j < oldnvdevs; j++) {
1205 vd = oldvdevs[j];
1206 if (vd != NULL && guid == vd->vdev_guid) {
1207 /*
1208 * Retain previous vdev for add/remove ops.
1209 */
1210 newvdevs[i] = vd;
1211 oldvdevs[j] = NULL;
1212 break;
1213 }
1214 }
1215
1216 if (newvdevs[i] == NULL) {
1217 /*
1218 * Create new vdev
1219 */
1220 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1221 VDEV_ALLOC_L2CACHE) == 0);
1222 ASSERT(vd != NULL);
1223 newvdevs[i] = vd;
1224
1225 /*
1226 * Commit this vdev as an l2cache device,
1227 * even if it fails to open.
1228 */
1229 spa_l2cache_add(vd);
1230
1231 vd->vdev_top = vd;
1232 vd->vdev_aux = sav;
1233
1234 spa_l2cache_activate(vd);
1235
1236 if (vdev_open(vd) != 0)
1237 continue;
1238
1239 (void) vdev_validate_aux(vd);
1240
1241 if (!vdev_is_dead(vd))
1242 l2arc_add_vdev(spa, vd);
1243 }
1244 }
1245
1246 /*
1247 * Purge vdevs that were dropped
1248 */
1249 for (i = 0; i < oldnvdevs; i++) {
1250 uint64_t pool;
1251
1252 vd = oldvdevs[i];
1253 if (vd != NULL) {
1254 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1255 pool != 0ULL && l2arc_vdev_present(vd))
1256 l2arc_remove_vdev(vd);
1257 (void) vdev_close(vd);
1258 spa_l2cache_remove(vd);
1259 }
1260 }
1261
1262 if (oldvdevs)
1263 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1264
1265 if (sav->sav_config == NULL)
1266 goto out;
1267
1268 sav->sav_vdevs = newvdevs;
1269 sav->sav_count = (int)nl2cache;
1270
1271 /*
1272 * Recompute the stashed list of l2cache devices, with status
1273 * information this time.
1274 */
1275 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1276 DATA_TYPE_NVLIST_ARRAY) == 0);
1277
1278 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1279 for (i = 0; i < sav->sav_count; i++)
1280 l2cache[i] = vdev_config_generate(spa,
1281 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1282 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1283 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1284 out:
1285 for (i = 0; i < sav->sav_count; i++)
1286 nvlist_free(l2cache[i]);
1287 if (sav->sav_count)
1288 kmem_free(l2cache, sav->sav_count * sizeof (void *));
1289 }
1290
1291 static int
1292 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1293 {
1294 dmu_buf_t *db;
1295 char *packed = NULL;
1296 size_t nvsize = 0;
1297 int error;
1298 *value = NULL;
1299
1300 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1301 nvsize = *(uint64_t *)db->db_data;
1302 dmu_buf_rele(db, FTAG);
1303
1304 packed = kmem_alloc(nvsize, KM_SLEEP);
1305 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1306 DMU_READ_PREFETCH);
1307 if (error == 0)
1308 error = nvlist_unpack(packed, nvsize, value, 0);
1309 kmem_free(packed, nvsize);
1310
1311 return (error);
1312 }
1313
1314 /*
1315 * Checks to see if the given vdev could not be opened, in which case we post a
1316 * sysevent to notify the autoreplace code that the device has been removed.
1317 */
1318 static void
1319 spa_check_removed(vdev_t *vd)
1320 {
1321 for (int c = 0; c < vd->vdev_children; c++)
1322 spa_check_removed(vd->vdev_child[c]);
1323
1324 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1325 zfs_post_autoreplace(vd->vdev_spa, vd);
1326 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1327 }
1328 }
1329
1330 /*
1331 * Validate the current config against the MOS config
1332 */
1333 static boolean_t
1334 spa_config_valid(spa_t *spa, nvlist_t *config)
1335 {
1336 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1337 nvlist_t *nv;
1338
1339 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1340
1341 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1342 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1343
1344 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1345
1346 /*
1347 * If we're doing a normal import, then build up any additional
1348 * diagnostic information about missing devices in this config.
1349 * We'll pass this up to the user for further processing.
1350 */
1351 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1352 nvlist_t **child, *nv;
1353 uint64_t idx = 0;
1354
1355 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1356 KM_SLEEP);
1357 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1358
1359 for (int c = 0; c < rvd->vdev_children; c++) {
1360 vdev_t *tvd = rvd->vdev_child[c];
1361 vdev_t *mtvd = mrvd->vdev_child[c];
1362
1363 if (tvd->vdev_ops == &vdev_missing_ops &&
1364 mtvd->vdev_ops != &vdev_missing_ops &&
1365 mtvd->vdev_islog)
1366 child[idx++] = vdev_config_generate(spa, mtvd,
1367 B_FALSE, 0);
1368 }
1369
1370 if (idx) {
1371 VERIFY(nvlist_add_nvlist_array(nv,
1372 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1373 VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1374 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1375
1376 for (int i = 0; i < idx; i++)
1377 nvlist_free(child[i]);
1378 }
1379 nvlist_free(nv);
1380 kmem_free(child, rvd->vdev_children * sizeof (char **));
1381 }
1382
1383 /*
1384 * Compare the root vdev tree with the information we have
1385 * from the MOS config (mrvd). Check each top-level vdev
1386 * with the corresponding MOS config top-level (mtvd).
1387 */
1388 for (int c = 0; c < rvd->vdev_children; c++) {
1389 vdev_t *tvd = rvd->vdev_child[c];
1390 vdev_t *mtvd = mrvd->vdev_child[c];
1391
1392 /*
1393 * Resolve any "missing" vdevs in the current configuration.
1394 * If we find that the MOS config has more accurate information
1395 * about the top-level vdev then use that vdev instead.
1396 */
1397 if (tvd->vdev_ops == &vdev_missing_ops &&
1398 mtvd->vdev_ops != &vdev_missing_ops) {
1399
1400 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1401 continue;
1402
1403 /*
1404 * Device specific actions.
1405 */
1406 if (mtvd->vdev_islog) {
1407 spa_set_log_state(spa, SPA_LOG_CLEAR);
1408 } else {
1409 /*
1410 * XXX - once we have 'readonly' pool
1411 * support we should be able to handle
1412 * missing data devices by transitioning
1413 * the pool to readonly.
1414 */
1415 continue;
1416 }
1417
1418 /*
1419 * Swap the missing vdev with the data we were
1420 * able to obtain from the MOS config.
1421 */
1422 vdev_remove_child(rvd, tvd);
1423 vdev_remove_child(mrvd, mtvd);
1424
1425 vdev_add_child(rvd, mtvd);
1426 vdev_add_child(mrvd, tvd);
1427
1428 spa_config_exit(spa, SCL_ALL, FTAG);
1429 vdev_load(mtvd);
1430 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1431
1432 vdev_reopen(rvd);
1433 } else if (mtvd->vdev_islog) {
1434 /*
1435 * Load the slog device's state from the MOS config
1436 * since it's possible that the label does not
1437 * contain the most up-to-date information.
1438 */
1439 vdev_load_log_state(tvd, mtvd);
1440 vdev_reopen(tvd);
1441 }
1442 }
1443 vdev_free(mrvd);
1444 spa_config_exit(spa, SCL_ALL, FTAG);
1445
1446 /*
1447 * Ensure we were able to validate the config.
1448 */
1449 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1450 }
1451
1452 /*
1453 * Check for missing log devices
1454 */
1455 static int
1456 spa_check_logs(spa_t *spa)
1457 {
1458 switch (spa->spa_log_state) {
1459 case SPA_LOG_MISSING:
1460 /* need to recheck in case slog has been restored */
1461 case SPA_LOG_UNKNOWN:
1462 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1463 DS_FIND_CHILDREN)) {
1464 spa_set_log_state(spa, SPA_LOG_MISSING);
1465 return (1);
1466 }
1467 break;
1468 }
1469 return (0);
1470 }
1471
1472 static boolean_t
1473 spa_passivate_log(spa_t *spa)
1474 {
1475 vdev_t *rvd = spa->spa_root_vdev;
1476 boolean_t slog_found = B_FALSE;
1477
1478 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1479
1480 if (!spa_has_slogs(spa))
1481 return (B_FALSE);
1482
1483 for (int c = 0; c < rvd->vdev_children; c++) {
1484 vdev_t *tvd = rvd->vdev_child[c];
1485 metaslab_group_t *mg = tvd->vdev_mg;
1486
1487 if (tvd->vdev_islog) {
1488 metaslab_group_passivate(mg);
1489 slog_found = B_TRUE;
1490 }
1491 }
1492
1493 return (slog_found);
1494 }
1495
1496 static void
1497 spa_activate_log(spa_t *spa)
1498 {
1499 vdev_t *rvd = spa->spa_root_vdev;
1500
1501 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1502
1503 for (int c = 0; c < rvd->vdev_children; c++) {
1504 vdev_t *tvd = rvd->vdev_child[c];
1505 metaslab_group_t *mg = tvd->vdev_mg;
1506
1507 if (tvd->vdev_islog)
1508 metaslab_group_activate(mg);
1509 }
1510 }
1511
1512 int
1513 spa_offline_log(spa_t *spa)
1514 {
1515 int error = 0;
1516
1517 if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1518 NULL, DS_FIND_CHILDREN)) == 0) {
1519
1520 /*
1521 * We successfully offlined the log device, sync out the
1522 * current txg so that the "stubby" block can be removed
1523 * by zil_sync().
1524 */
1525 txg_wait_synced(spa->spa_dsl_pool, 0);
1526 }
1527 return (error);
1528 }
1529
1530 static void
1531 spa_aux_check_removed(spa_aux_vdev_t *sav)
1532 {
1533 for (int i = 0; i < sav->sav_count; i++)
1534 spa_check_removed(sav->sav_vdevs[i]);
1535 }
1536
1537 void
1538 spa_claim_notify(zio_t *zio)
1539 {
1540 spa_t *spa = zio->io_spa;
1541
1542 if (zio->io_error)
1543 return;
1544
1545 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
1546 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1547 spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1548 mutex_exit(&spa->spa_props_lock);
1549 }
1550
1551 typedef struct spa_load_error {
1552 uint64_t sle_meta_count;
1553 uint64_t sle_data_count;
1554 } spa_load_error_t;
1555
1556 static void
1557 spa_load_verify_done(zio_t *zio)
1558 {
1559 blkptr_t *bp = zio->io_bp;
1560 spa_load_error_t *sle = zio->io_private;
1561 dmu_object_type_t type = BP_GET_TYPE(bp);
1562 int error = zio->io_error;
1563
1564 if (error) {
1565 if ((BP_GET_LEVEL(bp) != 0 || dmu_ot[type].ot_metadata) &&
1566 type != DMU_OT_INTENT_LOG)
1567 atomic_add_64(&sle->sle_meta_count, 1);
1568 else
1569 atomic_add_64(&sle->sle_data_count, 1);
1570 }
1571 zio_data_buf_free(zio->io_data, zio->io_size);
1572 }
1573
1574 /*ARGSUSED*/
1575 static int
1576 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1577 arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1578 {
1579 if (bp != NULL) {
1580 zio_t *rio = arg;
1581 size_t size = BP_GET_PSIZE(bp);
1582 void *data = zio_data_buf_alloc(size);
1583
1584 zio_nowait(zio_read(rio, spa, bp, data, size,
1585 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1586 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1587 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
1588 }
1589 return (0);
1590 }
1591
1592 static int
1593 spa_load_verify(spa_t *spa)
1594 {
1595 zio_t *rio;
1596 spa_load_error_t sle = { 0 };
1597 zpool_rewind_policy_t policy;
1598 boolean_t verify_ok = B_FALSE;
1599 int error;
1600
1601 zpool_get_rewind_policy(spa->spa_config, &policy);
1602
1603 if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1604 return (0);
1605
1606 rio = zio_root(spa, NULL, &sle,
1607 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1608
1609 error = traverse_pool(spa, spa->spa_verify_min_txg,
1610 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
1611
1612 (void) zio_wait(rio);
1613
1614 spa->spa_load_meta_errors = sle.sle_meta_count;
1615 spa->spa_load_data_errors = sle.sle_data_count;
1616
1617 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1618 sle.sle_data_count <= policy.zrp_maxdata) {
1619 int64_t loss = 0;
1620
1621 verify_ok = B_TRUE;
1622 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1623 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
1624
1625 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1626 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1627 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1628 VERIFY(nvlist_add_int64(spa->spa_load_info,
1629 ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1630 VERIFY(nvlist_add_uint64(spa->spa_load_info,
1631 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
1632 } else {
1633 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1634 }
1635
1636 if (error) {
1637 if (error != ENXIO && error != EIO)
1638 error = EIO;
1639 return (error);
1640 }
1641
1642 return (verify_ok ? 0 : EIO);
1643 }
1644
1645 /*
1646 * Find a value in the pool props object.
1647 */
1648 static void
1649 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1650 {
1651 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1652 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1653 }
1654
1655 /*
1656 * Find a value in the pool directory object.
1657 */
1658 static int
1659 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1660 {
1661 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1662 name, sizeof (uint64_t), 1, val));
1663 }
1664
1665 static int
1666 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1667 {
1668 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1669 return (err);
1670 }
1671
1672 /*
1673 * Fix up config after a partly-completed split. This is done with the
1674 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
1675 * pool have that entry in their config, but only the splitting one contains
1676 * a list of all the guids of the vdevs that are being split off.
1677 *
1678 * This function determines what to do with that list: either rejoin
1679 * all the disks to the pool, or complete the splitting process. To attempt
1680 * the rejoin, each disk that is offlined is marked online again, and
1681 * we do a reopen() call. If the vdev label for every disk that was
1682 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1683 * then we call vdev_split() on each disk, and complete the split.
1684 *
1685 * Otherwise we leave the config alone, with all the vdevs in place in
1686 * the original pool.
1687 */
1688 static void
1689 spa_try_repair(spa_t *spa, nvlist_t *config)
1690 {
1691 uint_t extracted;
1692 uint64_t *glist;
1693 uint_t i, gcount;
1694 nvlist_t *nvl;
1695 vdev_t **vd;
1696 boolean_t attempt_reopen;
1697
1698 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
1699 return;
1700
1701 /* check that the config is complete */
1702 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
1703 &glist, &gcount) != 0)
1704 return;
1705
1706 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
1707
1708 /* attempt to online all the vdevs & validate */
1709 attempt_reopen = B_TRUE;
1710 for (i = 0; i < gcount; i++) {
1711 if (glist[i] == 0) /* vdev is hole */
1712 continue;
1713
1714 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
1715 if (vd[i] == NULL) {
1716 /*
1717 * Don't bother attempting to reopen the disks;
1718 * just do the split.
1719 */
1720 attempt_reopen = B_FALSE;
1721 } else {
1722 /* attempt to re-online it */
1723 vd[i]->vdev_offline = B_FALSE;
1724 }
1725 }
1726
1727 if (attempt_reopen) {
1728 vdev_reopen(spa->spa_root_vdev);
1729
1730 /* check each device to see what state it's in */
1731 for (extracted = 0, i = 0; i < gcount; i++) {
1732 if (vd[i] != NULL &&
1733 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
1734 break;
1735 ++extracted;
1736 }
1737 }
1738
1739 /*
1740 * If every disk has been moved to the new pool, or if we never
1741 * even attempted to look at them, then we split them off for
1742 * good.
1743 */
1744 if (!attempt_reopen || gcount == extracted) {
1745 for (i = 0; i < gcount; i++)
1746 if (vd[i] != NULL)
1747 vdev_split(vd[i]);
1748 vdev_reopen(spa->spa_root_vdev);
1749 }
1750
1751 kmem_free(vd, gcount * sizeof (vdev_t *));
1752 }
1753
1754 static int
1755 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
1756 boolean_t mosconfig)
1757 {
1758 nvlist_t *config = spa->spa_config;
1759 char *ereport = FM_EREPORT_ZFS_POOL;
1760 int error;
1761 uint64_t pool_guid;
1762 nvlist_t *nvl;
1763
1764 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
1765 return (EINVAL);
1766
1767 /*
1768 * Versioning wasn't explicitly added to the label until later, so if
1769 * it's not present treat it as the initial version.
1770 */
1771 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1772 &spa->spa_ubsync.ub_version) != 0)
1773 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
1774
1775 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1776 &spa->spa_config_txg);
1777
1778 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1779 spa_guid_exists(pool_guid, 0)) {
1780 error = EEXIST;
1781 } else {
1782 spa->spa_config_guid = pool_guid;
1783
1784 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
1785 &nvl) == 0) {
1786 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
1787 KM_SLEEP) == 0);
1788 }
1789
1790 gethrestime(&spa->spa_loaded_ts);
1791 error = spa_load_impl(spa, pool_guid, config, state, type,
1792 mosconfig, &ereport);
1793 }
1794
1795 spa->spa_minref = refcount_count(&spa->spa_refcount);
1796 if (error) {
1797 if (error != EEXIST) {
1798 spa->spa_loaded_ts.tv_sec = 0;
1799 spa->spa_loaded_ts.tv_nsec = 0;
1800 }
1801 if (error != EBADF) {
1802 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1803 }
1804 }
1805 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
1806 spa->spa_ena = 0;
1807
1808 return (error);
1809 }
1810
1811 /*
1812 * Load an existing storage pool, using the pool's builtin spa_config as a
1813 * source of configuration information.
1814 */
1815 static int
1816 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
1817 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
1818 char **ereport)
1819 {
1820 int error = 0;
1821 nvlist_t *nvroot = NULL;
1822 vdev_t *rvd;
1823 uberblock_t *ub = &spa->spa_uberblock;
1824 uint64_t children, config_cache_txg = spa->spa_config_txg;
1825 int orig_mode = spa->spa_mode;
1826 int parse;
1827 uint64_t obj;
1828
1829 /*
1830 * If this is an untrusted config, access the pool in read-only mode.
1831 * This prevents things like resilvering recently removed devices.
1832 */
1833 if (!mosconfig)
1834 spa->spa_mode = FREAD;
1835
1836 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1837
1838 spa->spa_load_state = state;
1839
1840 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
1841 return (EINVAL);
1842
1843 parse = (type == SPA_IMPORT_EXISTING ?
1844 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
1845
1846 /*
1847 * Create "The Godfather" zio to hold all async IOs
1848 */
1849 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1850 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1851
1852 /*
1853 * Parse the configuration into a vdev tree. We explicitly set the
1854 * value that will be returned by spa_version() since parsing the
1855 * configuration requires knowing the version number.
1856 */
1857 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1858 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
1859 spa_config_exit(spa, SCL_ALL, FTAG);
1860
1861 if (error != 0)
1862 return (error);
1863
1864 ASSERT(spa->spa_root_vdev == rvd);
1865
1866 if (type != SPA_IMPORT_ASSEMBLE) {
1867 ASSERT(spa_guid(spa) == pool_guid);
1868 }
1869
1870 /*
1871 * Try to open all vdevs, loading each label in the process.
1872 */
1873 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1874 error = vdev_open(rvd);
1875 spa_config_exit(spa, SCL_ALL, FTAG);
1876 if (error != 0)
1877 return (error);
1878
1879 /*
1880 * We need to validate the vdev labels against the configuration that
1881 * we have in hand, which is dependent on the setting of mosconfig. If
1882 * mosconfig is true then we're validating the vdev labels based on
1883 * that config. Otherwise, we're validating against the cached config
1884 * (zpool.cache) that was read when we loaded the zfs module, and then
1885 * later we will recursively call spa_load() and validate against
1886 * the vdev config.
1887 *
1888 * If we're assembling a new pool that's been split off from an
1889 * existing pool, the labels haven't yet been updated so we skip
1890 * validation for now.
1891 */
1892 if (type != SPA_IMPORT_ASSEMBLE) {
1893 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1894 error = vdev_validate(rvd);
1895 spa_config_exit(spa, SCL_ALL, FTAG);
1896
1897 if (error != 0)
1898 return (error);
1899
1900 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
1901 return (ENXIO);
1902 }
1903
1904 /*
1905 * Find the best uberblock.
1906 */
1907 vdev_uberblock_load(NULL, rvd, ub);
1908
1909 /*
1910 * If we weren't able to find a single valid uberblock, return failure.
1911 */
1912 if (ub->ub_txg == 0)
1913 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
1914
1915 /*
1916 * If the pool is newer than the code, we can't open it.
1917 */
1918 if (ub->ub_version > SPA_VERSION)
1919 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
1920
1921 /*
1922 * If the vdev guid sum doesn't match the uberblock, we have an
1923 * incomplete configuration. We first check to see if the pool
1924 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
1925 * If it is, defer the vdev_guid_sum check till later so we
1926 * can handle missing vdevs.
1927 */
1928 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
1929 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
1930 rvd->vdev_guid_sum != ub->ub_guid_sum)
1931 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
1932
1933 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
1934 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1935 spa_try_repair(spa, config);
1936 spa_config_exit(spa, SCL_ALL, FTAG);
1937 nvlist_free(spa->spa_config_splitting);
1938 spa->spa_config_splitting = NULL;
1939 }
1940
1941 /*
1942 * Initialize internal SPA structures.
1943 */
1944 spa->spa_state = POOL_STATE_ACTIVE;
1945 spa->spa_ubsync = spa->spa_uberblock;
1946 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
1947 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
1948 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
1949 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
1950 spa->spa_claim_max_txg = spa->spa_first_txg;
1951 spa->spa_prev_software_version = ub->ub_software_version;
1952
1953 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1954 if (error)
1955 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
1956 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1957
1958 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
1959 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
1960
1961 if (!mosconfig) {
1962 uint64_t hostid;
1963 nvlist_t *policy = NULL, *nvconfig;
1964
1965 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
1966 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
1967
1968 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
1969 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1970 char *hostname;
1971 unsigned long myhostid = 0;
1972
1973 VERIFY(nvlist_lookup_string(nvconfig,
1974 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1975
1976 #ifdef _KERNEL
1977 myhostid = zone_get_hostid(NULL);
1978 #else /* _KERNEL */
1979 /*
1980 * We're emulating the system's hostid in userland, so
1981 * we can't use zone_get_hostid().
1982 */
1983 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1984 #endif /* _KERNEL */
1985 if (hostid != 0 && myhostid != 0 &&
1986 hostid != myhostid) {
1987 nvlist_free(nvconfig);
1988 cmn_err(CE_WARN, "pool '%s' could not be "
1989 "loaded as it was last accessed by "
1990 "another system (host: %s hostid: 0x%lx). "
1991 "See: http://www.sun.com/msg/ZFS-8000-EY",
1992 spa_name(spa), hostname,
1993 (unsigned long)hostid);
1994 return (EBADF);
1995 }
1996 }
1997 if (nvlist_lookup_nvlist(spa->spa_config,
1998 ZPOOL_REWIND_POLICY, &policy) == 0)
1999 VERIFY(nvlist_add_nvlist(nvconfig,
2000 ZPOOL_REWIND_POLICY, policy) == 0);
2001
2002 spa_config_set(spa, nvconfig);
2003 spa_unload(spa);
2004 spa_deactivate(spa);
2005 spa_activate(spa, orig_mode);
2006
2007 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2008 }
2009
2010 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2011 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2012 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2013 if (error != 0)
2014 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2015
2016 /*
2017 * Load the bit that tells us to use the new accounting function
2018 * (raid-z deflation). If we have an older pool, this will not
2019 * be present.
2020 */
2021 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2022 if (error != 0 && error != ENOENT)
2023 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2024
2025 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2026 &spa->spa_creation_version);
2027 if (error != 0 && error != ENOENT)
2028 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2029
2030 /*
2031 * Load the persistent error log. If we have an older pool, this will
2032 * not be present.
2033 */
2034 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2035 if (error != 0 && error != ENOENT)
2036 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2037
2038 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2039 &spa->spa_errlog_scrub);
2040 if (error != 0 && error != ENOENT)
2041 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2042
2043 /*
2044 * Load the history object. If we have an older pool, this
2045 * will not be present.
2046 */
2047 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2048 if (error != 0 && error != ENOENT)
2049 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2050
2051 /*
2052 * If we're assembling the pool from the split-off vdevs of
2053 * an existing pool, we don't want to attach the spares & cache
2054 * devices.
2055 */
2056
2057 /*
2058 * Load any hot spares for this pool.
2059 */
2060 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2061 if (error != 0 && error != ENOENT)
2062 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2063 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2064 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2065 if (load_nvlist(spa, spa->spa_spares.sav_object,
2066 &spa->spa_spares.sav_config) != 0)
2067 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2068
2069 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2070 spa_load_spares(spa);
2071 spa_config_exit(spa, SCL_ALL, FTAG);
2072 } else if (error == 0) {
2073 spa->spa_spares.sav_sync = B_TRUE;
2074 }
2075
2076 /*
2077 * Load any level 2 ARC devices for this pool.
2078 */
2079 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2080 &spa->spa_l2cache.sav_object);
2081 if (error != 0 && error != ENOENT)
2082 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2083 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2084 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2085 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2086 &spa->spa_l2cache.sav_config) != 0)
2087 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2088
2089 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2090 spa_load_l2cache(spa);
2091 spa_config_exit(spa, SCL_ALL, FTAG);
2092 } else if (error == 0) {
2093 spa->spa_l2cache.sav_sync = B_TRUE;
2094 }
2095
2096 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2097
2098 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2099 if (error && error != ENOENT)
2100 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2101
2102 if (error == 0) {
2103 uint64_t autoreplace;
2104
2105 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2106 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2107 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2108 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2109 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2110 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2111 &spa->spa_dedup_ditto);
2112
2113 spa->spa_autoreplace = (autoreplace != 0);
2114 }
2115
2116 /*
2117 * If the 'autoreplace' property is set, then post a resource notifying
2118 * the ZFS DE that it should not issue any faults for unopenable
2119 * devices. We also iterate over the vdevs, and post a sysevent for any
2120 * unopenable vdevs so that the normal autoreplace handler can take
2121 * over.
2122 */
2123 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2124 spa_check_removed(spa->spa_root_vdev);
2125 /*
2126 * For the import case, this is done in spa_import(), because
2127 * at this point we're using the spare definitions from
2128 * the MOS config, not necessarily from the userland config.
2129 */
2130 if (state != SPA_LOAD_IMPORT) {
2131 spa_aux_check_removed(&spa->spa_spares);
2132 spa_aux_check_removed(&spa->spa_l2cache);
2133 }
2134 }
2135
2136 /*
2137 * Load the vdev state for all toplevel vdevs.
2138 */
2139 vdev_load(rvd);
2140
2141 /*
2142 * Propagate the leaf DTLs we just loaded all the way up the tree.
2143 */
2144 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2145 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2146 spa_config_exit(spa, SCL_ALL, FTAG);
2147
2148 /*
2149 * Load the DDTs (dedup tables).
2150 */
2151 error = ddt_load(spa);
2152 if (error != 0)
2153 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2154
2155 spa_update_dspace(spa);
2156
2157 /*
2158 * Validate the config, using the MOS config to fill in any
2159 * information which might be missing. If we fail to validate
2160 * the config then declare the pool unfit for use. If we're
2161 * assembling a pool from a split, the log is not transferred
2162 * over.
2163 */
2164 if (type != SPA_IMPORT_ASSEMBLE) {
2165 nvlist_t *nvconfig;
2166
2167 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2168 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2169
2170 if (!spa_config_valid(spa, nvconfig)) {
2171 nvlist_free(nvconfig);
2172 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2173 ENXIO));
2174 }
2175 nvlist_free(nvconfig);
2176
2177 /*
2178 * Now that we've validate the config, check the state of the
2179 * root vdev. If it can't be opened, it indicates one or
2180 * more toplevel vdevs are faulted.
2181 */
2182 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2183 return (ENXIO);
2184
2185 if (spa_check_logs(spa)) {
2186 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2187 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2188 }
2189 }
2190
2191 /*
2192 * We've successfully opened the pool, verify that we're ready
2193 * to start pushing transactions.
2194 */
2195 if (state != SPA_LOAD_TRYIMPORT) {
2196 if (error = spa_load_verify(spa))
2197 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2198 error));
2199 }
2200
2201 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2202 spa->spa_load_max_txg == UINT64_MAX)) {
2203 dmu_tx_t *tx;
2204 int need_update = B_FALSE;
2205
2206 ASSERT(state != SPA_LOAD_TRYIMPORT);
2207
2208 /*
2209 * Claim log blocks that haven't been committed yet.
2210 * This must all happen in a single txg.
2211 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2212 * invoked from zil_claim_log_block()'s i/o done callback.
2213 * Price of rollback is that we abandon the log.
2214 */
2215 spa->spa_claiming = B_TRUE;
2216
2217 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2218 spa_first_txg(spa));
2219 (void) dmu_objset_find(spa_name(spa),
2220 zil_claim, tx, DS_FIND_CHILDREN);
2221 dmu_tx_commit(tx);
2222
2223 spa->spa_claiming = B_FALSE;
2224
2225 spa_set_log_state(spa, SPA_LOG_GOOD);
2226 spa->spa_sync_on = B_TRUE;
2227 txg_sync_start(spa->spa_dsl_pool);
2228
2229 /*
2230 * Wait for all claims to sync. We sync up to the highest
2231 * claimed log block birth time so that claimed log blocks
2232 * don't appear to be from the future. spa_claim_max_txg
2233 * will have been set for us by either zil_check_log_chain()
2234 * (invoked from spa_check_logs()) or zil_claim() above.
2235 */
2236 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2237
2238 /*
2239 * If the config cache is stale, or we have uninitialized
2240 * metaslabs (see spa_vdev_add()), then update the config.
2241 *
2242 * If this is a verbatim import, trust the current
2243 * in-core spa_config and update the disk labels.
2244 */
2245 if (config_cache_txg != spa->spa_config_txg ||
2246 state == SPA_LOAD_IMPORT ||
2247 state == SPA_LOAD_RECOVER ||
2248 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2249 need_update = B_TRUE;
2250
2251 for (int c = 0; c < rvd->vdev_children; c++)
2252 if (rvd->vdev_child[c]->vdev_ms_array == 0)
2253 need_update = B_TRUE;
2254
2255 /*
2256 * Update the config cache asychronously in case we're the
2257 * root pool, in which case the config cache isn't writable yet.
2258 */
2259 if (need_update)
2260 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2261
2262 /*
2263 * Check all DTLs to see if anything needs resilvering.
2264 */
2265 if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2266 vdev_resilver_needed(rvd, NULL, NULL))
2267 spa_async_request(spa, SPA_ASYNC_RESILVER);
2268
2269 /*
2270 * Delete any inconsistent datasets.
2271 */
2272 (void) dmu_objset_find(spa_name(spa),
2273 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2274
2275 /*
2276 * Clean up any stale temporary dataset userrefs.
2277 */
2278 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2279 }
2280
2281 return (0);
2282 }
2283
2284 static int
2285 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2286 {
2287 int mode = spa->spa_mode;
2288
2289 spa_unload(spa);
2290 spa_deactivate(spa);
2291
2292 spa->spa_load_max_txg--;
2293
2294 spa_activate(spa, mode);
2295 spa_async_suspend(spa);
2296
2297 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2298 }
2299
2300 static int
2301 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2302 uint64_t max_request, int rewind_flags)
2303 {
2304 nvlist_t *config = NULL;
2305 int load_error, rewind_error;
2306 uint64_t safe_rewind_txg;
2307 uint64_t min_txg;
2308
2309 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2310 spa->spa_load_max_txg = spa->spa_load_txg;
2311 spa_set_log_state(spa, SPA_LOG_CLEAR);
2312 } else {
2313 spa->spa_load_max_txg = max_request;
2314 }
2315
2316 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2317 mosconfig);
2318 if (load_error == 0)
2319 return (0);
2320
2321 if (spa->spa_root_vdev != NULL)
2322 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2323
2324 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2325 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2326
2327 if (rewind_flags & ZPOOL_NEVER_REWIND) {
2328 nvlist_free(config);
2329 return (load_error);
2330 }
2331
2332 /* Price of rolling back is discarding txgs, including log */
2333 if (state == SPA_LOAD_RECOVER)
2334 spa_set_log_state(spa, SPA_LOG_CLEAR);
2335
2336 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2337 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2338 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2339 TXG_INITIAL : safe_rewind_txg;
2340
2341 /*
2342 * Continue as long as we're finding errors, we're still within
2343 * the acceptable rewind range, and we're still finding uberblocks
2344 */
2345 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2346 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2347 if (spa->spa_load_max_txg < safe_rewind_txg)
2348 spa->spa_extreme_rewind = B_TRUE;
2349 rewind_error = spa_load_retry(spa, state, mosconfig);
2350 }
2351
2352 spa->spa_extreme_rewind = B_FALSE;
2353 spa->spa_load_max_txg = UINT64_MAX;
2354
2355 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2356 spa_config_set(spa, config);
2357
2358 return (state == SPA_LOAD_RECOVER ? rewind_error : load_error);
2359 }
2360
2361 /*
2362 * Pool Open/Import
2363 *
2364 * The import case is identical to an open except that the configuration is sent
2365 * down from userland, instead of grabbed from the configuration cache. For the
2366 * case of an open, the pool configuration will exist in the
2367 * POOL_STATE_UNINITIALIZED state.
2368 *
2369 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2370 * the same time open the pool, without having to keep around the spa_t in some
2371 * ambiguous state.
2372 */
2373 static int
2374 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2375 nvlist_t **config)
2376 {
2377 spa_t *spa;
2378 spa_load_state_t state = SPA_LOAD_OPEN;
2379 int error;
2380 int locked = B_FALSE;
2381
2382 *spapp = NULL;
2383
2384 /*
2385 * As disgusting as this is, we need to support recursive calls to this
2386 * function because dsl_dir_open() is called during spa_load(), and ends
2387 * up calling spa_open() again. The real fix is to figure out how to
2388 * avoid dsl_dir_open() calling this in the first place.
2389 */
2390 if (mutex_owner(&spa_namespace_lock) != curthread) {
2391 mutex_enter(&spa_namespace_lock);
2392 locked = B_TRUE;
2393 }
2394
2395 if ((spa = spa_lookup(pool)) == NULL) {
2396 if (locked)
2397 mutex_exit(&spa_namespace_lock);
2398 return (ENOENT);
2399 }
2400
2401 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2402 zpool_rewind_policy_t policy;
2403
2404 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2405 &policy);
2406 if (policy.zrp_request & ZPOOL_DO_REWIND)
2407 state = SPA_LOAD_RECOVER;
2408
2409 spa_activate(spa, spa_mode_global);
2410
2411 if (state != SPA_LOAD_RECOVER)
2412 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2413
2414 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2415 policy.zrp_request);
2416
2417 if (error == EBADF) {
2418 /*
2419 * If vdev_validate() returns failure (indicated by
2420 * EBADF), it indicates that one of the vdevs indicates
2421 * that the pool has been exported or destroyed. If
2422 * this is the case, the config cache is out of sync and
2423 * we should remove the pool from the namespace.
2424 */
2425 spa_unload(spa);
2426 spa_deactivate(spa);
2427 spa_config_sync(spa, B_TRUE, B_TRUE);
2428 spa_remove(spa);
2429 if (locked)
2430 mutex_exit(&spa_namespace_lock);
2431 return (ENOENT);
2432 }
2433
2434 if (error) {
2435 /*
2436 * We can't open the pool, but we still have useful
2437 * information: the state of each vdev after the
2438 * attempted vdev_open(). Return this to the user.
2439 */
2440 if (config != NULL && spa->spa_config) {
2441 VERIFY(nvlist_dup(spa->spa_config, config,
2442 KM_SLEEP) == 0);
2443 VERIFY(nvlist_add_nvlist(*config,
2444 ZPOOL_CONFIG_LOAD_INFO,
2445 spa->spa_load_info) == 0);
2446 }
2447 spa_unload(spa);
2448 spa_deactivate(spa);
2449 spa->spa_last_open_failed = error;
2450 if (locked)
2451 mutex_exit(&spa_namespace_lock);
2452 *spapp = NULL;
2453 return (error);
2454 }
2455 }
2456
2457 spa_open_ref(spa, tag);
2458
2459 if (config != NULL)
2460 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2461
2462 /*
2463 * If we've recovered the pool, pass back any information we
2464 * gathered while doing the load.
2465 */
2466 if (state == SPA_LOAD_RECOVER) {
2467 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2468 spa->spa_load_info) == 0);
2469 }
2470
2471 if (locked) {
2472 spa->spa_last_open_failed = 0;
2473 spa->spa_last_ubsync_txg = 0;
2474 spa->spa_load_txg = 0;
2475 mutex_exit(&spa_namespace_lock);
2476 }
2477
2478 *spapp = spa;
2479
2480 return (0);
2481 }
2482
2483 int
2484 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2485 nvlist_t **config)
2486 {
2487 return (spa_open_common(name, spapp, tag, policy, config));
2488 }
2489
2490 int
2491 spa_open(const char *name, spa_t **spapp, void *tag)
2492 {
2493 return (spa_open_common(name, spapp, tag, NULL, NULL));
2494 }
2495
2496 /*
2497 * Lookup the given spa_t, incrementing the inject count in the process,
2498 * preventing it from being exported or destroyed.
2499 */
2500 spa_t *
2501 spa_inject_addref(char *name)
2502 {
2503 spa_t *spa;
2504
2505 mutex_enter(&spa_namespace_lock);
2506 if ((spa = spa_lookup(name)) == NULL) {
2507 mutex_exit(&spa_namespace_lock);
2508 return (NULL);
2509 }
2510 spa->spa_inject_ref++;
2511 mutex_exit(&spa_namespace_lock);
2512
2513 return (spa);
2514 }
2515
2516 void
2517 spa_inject_delref(spa_t *spa)
2518 {
2519 mutex_enter(&spa_namespace_lock);
2520 spa->spa_inject_ref--;
2521 mutex_exit(&spa_namespace_lock);
2522 }
2523
2524 /*
2525 * Add spares device information to the nvlist.
2526 */
2527 static void
2528 spa_add_spares(spa_t *spa, nvlist_t *config)
2529 {
2530 nvlist_t **spares;
2531 uint_t i, nspares;
2532 nvlist_t *nvroot;
2533 uint64_t guid;
2534 vdev_stat_t *vs;
2535 uint_t vsc;
2536 uint64_t pool;
2537
2538 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2539
2540 if (spa->spa_spares.sav_count == 0)
2541 return;
2542
2543 VERIFY(nvlist_lookup_nvlist(config,
2544 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2545 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2546 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2547 if (nspares != 0) {
2548 VERIFY(nvlist_add_nvlist_array(nvroot,
2549 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2550 VERIFY(nvlist_lookup_nvlist_array(nvroot,
2551 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2552
2553 /*
2554 * Go through and find any spares which have since been
2555 * repurposed as an active spare. If this is the case, update
2556 * their status appropriately.
2557 */
2558 for (i = 0; i < nspares; i++) {
2559 VERIFY(nvlist_lookup_uint64(spares[i],
2560 ZPOOL_CONFIG_GUID, &guid) == 0);
2561 if (spa_spare_exists(guid, &pool, NULL) &&
2562 pool != 0ULL) {
2563 VERIFY(nvlist_lookup_uint64_array(
2564 spares[i], ZPOOL_CONFIG_VDEV_STATS,
2565 (uint64_t **)&vs, &vsc) == 0);
2566 vs->vs_state = VDEV_STATE_CANT_OPEN;
2567 vs->vs_aux = VDEV_AUX_SPARED;
2568 }
2569 }
2570 }
2571 }
2572
2573 /*
2574 * Add l2cache device information to the nvlist, including vdev stats.
2575 */
2576 static void
2577 spa_add_l2cache(spa_t *spa, nvlist_t *config)
2578 {
2579 nvlist_t **l2cache;
2580 uint_t i, j, nl2cache;
2581 nvlist_t *nvroot;
2582 uint64_t guid;
2583 vdev_t *vd;
2584 vdev_stat_t *vs;
2585 uint_t vsc;
2586
2587 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2588
2589 if (spa->spa_l2cache.sav_count == 0)
2590 return;
2591
2592 VERIFY(nvlist_lookup_nvlist(config,
2593 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2594 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
2595 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2596 if (nl2cache != 0) {
2597 VERIFY(nvlist_add_nvlist_array(nvroot,
2598 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2599 VERIFY(nvlist_lookup_nvlist_array(nvroot,
2600 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2601
2602 /*
2603 * Update level 2 cache device stats.
2604 */
2605
2606 for (i = 0; i < nl2cache; i++) {
2607 VERIFY(nvlist_lookup_uint64(l2cache[i],
2608 ZPOOL_CONFIG_GUID, &guid) == 0);
2609
2610 vd = NULL;
2611 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
2612 if (guid ==
2613 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
2614 vd = spa->spa_l2cache.sav_vdevs[j];
2615 break;
2616 }
2617 }
2618 ASSERT(vd != NULL);
2619
2620 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
2621 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
2622 == 0);
2623 vdev_get_stats(vd, vs);
2624 }
2625 }
2626 }
2627
2628 int
2629 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
2630 {
2631 int error;
2632 spa_t *spa;
2633
2634 *config = NULL;
2635 error = spa_open_common(name, &spa, FTAG, NULL, config);
2636
2637 if (spa != NULL) {
2638 /*
2639 * This still leaves a window of inconsistency where the spares
2640 * or l2cache devices could change and the config would be
2641 * self-inconsistent.
2642 */
2643 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2644
2645 if (*config != NULL) {
2646 uint64_t loadtimes[2];
2647
2648 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
2649 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
2650 VERIFY(nvlist_add_uint64_array(*config,
2651 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
2652
2653 VERIFY(nvlist_add_uint64(*config,
2654 ZPOOL_CONFIG_ERRCOUNT,
2655 spa_get_errlog_size(spa)) == 0);
2656
2657 if (spa_suspended(spa))
2658 VERIFY(nvlist_add_uint64(*config,
2659 ZPOOL_CONFIG_SUSPENDED,
2660 spa->spa_failmode) == 0);
2661
2662 spa_add_spares(spa, *config);
2663 spa_add_l2cache(spa, *config);
2664 }
2665 }
2666
2667 /*
2668 * We want to get the alternate root even for faulted pools, so we cheat
2669 * and call spa_lookup() directly.
2670 */
2671 if (altroot) {
2672 if (spa == NULL) {
2673 mutex_enter(&spa_namespace_lock);
2674 spa = spa_lookup(name);
2675 if (spa)
2676 spa_altroot(spa, altroot, buflen);
2677 else
2678 altroot[0] = '\0';
2679 spa = NULL;
2680 mutex_exit(&spa_namespace_lock);
2681 } else {
2682 spa_altroot(spa, altroot, buflen);
2683 }
2684 }
2685
2686 if (spa != NULL) {
2687 spa_config_exit(spa, SCL_CONFIG, FTAG);
2688 spa_close(spa, FTAG);
2689 }
2690
2691 return (error);
2692 }
2693
2694 /*
2695 * Validate that the auxiliary device array is well formed. We must have an
2696 * array of nvlists, each which describes a valid leaf vdev. If this is an
2697 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
2698 * specified, as long as they are well-formed.
2699 */
2700 static int
2701 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
2702 spa_aux_vdev_t *sav, const char *config, uint64_t version,
2703 vdev_labeltype_t label)
2704 {
2705 nvlist_t **dev;
2706 uint_t i, ndev;
2707 vdev_t *vd;
2708 int error;
2709
2710 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2711
2712 /*
2713 * It's acceptable to have no devs specified.
2714 */
2715 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
2716 return (0);
2717
2718 if (ndev == 0)
2719 return (EINVAL);
2720
2721 /*
2722 * Make sure the pool is formatted with a version that supports this
2723 * device type.
2724 */
2725 if (spa_version(spa) < version)
2726 return (ENOTSUP);
2727
2728 /*
2729 * Set the pending device list so we correctly handle device in-use
2730 * checking.
2731 */
2732 sav->sav_pending = dev;
2733 sav->sav_npending = ndev;
2734
2735 for (i = 0; i < ndev; i++) {
2736 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
2737 mode)) != 0)
2738 goto out;
2739
2740 if (!vd->vdev_ops->vdev_op_leaf) {
2741 vdev_free(vd);
2742 error = EINVAL;
2743 goto out;
2744 }
2745
2746 /*
2747 * The L2ARC currently only supports disk devices in
2748 * kernel context. For user-level testing, we allow it.
2749 */
2750 #ifdef _KERNEL
2751 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
2752 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
2753 error = ENOTBLK;
2754 goto out;
2755 }
2756 #endif
2757 vd->vdev_top = vd;
2758
2759 if ((error = vdev_open(vd)) == 0 &&
2760 (error = vdev_label_init(vd, crtxg, label)) == 0) {
2761 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
2762 vd->vdev_guid) == 0);
2763 }
2764
2765 vdev_free(vd);
2766
2767 if (error &&
2768 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
2769 goto out;
2770 else
2771 error = 0;
2772 }
2773
2774 out:
2775 sav->sav_pending = NULL;
2776 sav->sav_npending = 0;
2777 return (error);
2778 }
2779
2780 static int
2781 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
2782 {
2783 int error;
2784
2785 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2786
2787 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2788 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
2789 VDEV_LABEL_SPARE)) != 0) {
2790 return (error);
2791 }
2792
2793 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2794 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
2795 VDEV_LABEL_L2CACHE));
2796 }
2797
2798 static void
2799 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
2800 const char *config)
2801 {
2802 int i;
2803
2804 if (sav->sav_config != NULL) {
2805 nvlist_t **olddevs;
2806 uint_t oldndevs;
2807 nvlist_t **newdevs;
2808
2809 /*
2810 * Generate new dev list by concatentating with the
2811 * current dev list.
2812 */
2813 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
2814 &olddevs, &oldndevs) == 0);
2815
2816 newdevs = kmem_alloc(sizeof (void *) *
2817 (ndevs + oldndevs), KM_SLEEP);
2818 for (i = 0; i < oldndevs; i++)
2819 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
2820 KM_SLEEP) == 0);
2821 for (i = 0; i < ndevs; i++)
2822 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2823 KM_SLEEP) == 0);
2824
2825 VERIFY(nvlist_remove(sav->sav_config, config,
2826 DATA_TYPE_NVLIST_ARRAY) == 0);
2827
2828 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2829 config, newdevs, ndevs + oldndevs) == 0);
2830 for (i = 0; i < oldndevs + ndevs; i++)
2831 nvlist_free(newdevs[i]);
2832 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2833 } else {
2834 /*
2835 * Generate a new dev list.
2836 */
2837 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2838 KM_SLEEP) == 0);
2839 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2840 devs, ndevs) == 0);
2841 }
2842 }
2843
2844 /*
2845 * Stop and drop level 2 ARC devices
2846 */
2847 void
2848 spa_l2cache_drop(spa_t *spa)
2849 {
2850 vdev_t *vd;
2851 int i;
2852 spa_aux_vdev_t *sav = &spa->spa_l2cache;
2853
2854 for (i = 0; i < sav->sav_count; i++) {
2855 uint64_t pool;
2856
2857 vd = sav->sav_vdevs[i];
2858 ASSERT(vd != NULL);
2859
2860 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2861 pool != 0ULL && l2arc_vdev_present(vd))
2862 l2arc_remove_vdev(vd);
2863 if (vd->vdev_isl2cache)
2864 spa_l2cache_remove(vd);
2865 vdev_clear_stats(vd);
2866 (void) vdev_close(vd);
2867 }
2868 }
2869
2870 /*
2871 * Pool Creation
2872 */
2873 int
2874 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2875 const char *history_str, nvlist_t *zplprops)
2876 {
2877 spa_t *spa;
2878 char *altroot = NULL;
2879 vdev_t *rvd;
2880 dsl_pool_t *dp;
2881 dmu_tx_t *tx;
2882 int error = 0;
2883 uint64_t txg = TXG_INITIAL;
2884 nvlist_t **spares, **l2cache;
2885 uint_t nspares, nl2cache;
2886 uint64_t version, obj;
2887
2888 /*
2889 * If this pool already exists, return failure.
2890 */
2891 mutex_enter(&spa_namespace_lock);
2892 if (spa_lookup(pool) != NULL) {
2893 mutex_exit(&spa_namespace_lock);
2894 return (EEXIST);
2895 }
2896
2897 /*
2898 * Allocate a new spa_t structure.
2899 */
2900 (void) nvlist_lookup_string(props,
2901 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2902 spa = spa_add(pool, NULL, altroot);
2903 spa_activate(spa, spa_mode_global);
2904
2905 if (props && (error = spa_prop_validate(spa, props))) {
2906 spa_deactivate(spa);
2907 spa_remove(spa);
2908 mutex_exit(&spa_namespace_lock);
2909 return (error);
2910 }
2911
2912 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2913 &version) != 0)
2914 version = SPA_VERSION;
2915 ASSERT(version <= SPA_VERSION);
2916
2917 spa->spa_first_txg = txg;
2918 spa->spa_uberblock.ub_txg = txg - 1;
2919 spa->spa_uberblock.ub_version = version;
2920 spa->spa_ubsync = spa->spa_uberblock;
2921
2922 /*
2923 * Create "The Godfather" zio to hold all async IOs
2924 */
2925 spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2926 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2927
2928 /*
2929 * Create the root vdev.
2930 */
2931 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2932
2933 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2934
2935 ASSERT(error != 0 || rvd != NULL);
2936 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2937
2938 if (error == 0 && !zfs_allocatable_devs(nvroot))
2939 error = EINVAL;
2940
2941 if (error == 0 &&
2942 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2943 (error = spa_validate_aux(spa, nvroot, txg,
2944 VDEV_ALLOC_ADD)) == 0) {
2945 for (int c = 0; c < rvd->vdev_children; c++) {
2946 vdev_metaslab_set_size(rvd->vdev_child[c]);
2947 vdev_expand(rvd->vdev_child[c], txg);
2948 }
2949 }
2950
2951 spa_config_exit(spa, SCL_ALL, FTAG);
2952
2953 if (error != 0) {
2954 spa_unload(spa);
2955 spa_deactivate(spa);
2956 spa_remove(spa);
2957 mutex_exit(&spa_namespace_lock);
2958 return (error);
2959 }
2960
2961 /*
2962 * Get the list of spares, if specified.
2963 */
2964 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2965 &spares, &nspares) == 0) {
2966 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2967 KM_SLEEP) == 0);
2968 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2969 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2970 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2971 spa_load_spares(spa);
2972 spa_config_exit(spa, SCL_ALL, FTAG);
2973 spa->spa_spares.sav_sync = B_TRUE;
2974 }
2975
2976 /*
2977 * Get the list of level 2 cache devices, if specified.
2978 */
2979 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2980 &l2cache, &nl2cache) == 0) {
2981 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2982 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2983 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2984 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2985 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2986 spa_load_l2cache(spa);
2987 spa_config_exit(spa, SCL_ALL, FTAG);
2988 spa->spa_l2cache.sav_sync = B_TRUE;
2989 }
2990
2991 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2992 spa->spa_meta_objset = dp->dp_meta_objset;
2993
2994 /*
2995 * Create DDTs (dedup tables).
2996 */
2997 ddt_create(spa);
2998
2999 spa_update_dspace(spa);
3000
3001 tx = dmu_tx_create_assigned(dp, txg);
3002
3003 /*
3004 * Create the pool config object.
3005 */
3006 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3007 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3008 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3009
3010 if (zap_add(spa->spa_meta_objset,
3011 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3012 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3013 cmn_err(CE_PANIC, "failed to add pool config");
3014 }
3015
3016 if (zap_add(spa->spa_meta_objset,
3017 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3018 sizeof (uint64_t), 1, &version, tx) != 0) {
3019 cmn_err(CE_PANIC, "failed to add pool version");
3020 }
3021
3022 /* Newly created pools with the right version are always deflated. */
3023 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3024 spa->spa_deflate = TRUE;
3025 if (zap_add(spa->spa_meta_objset,
3026 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3027 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3028 cmn_err(CE_PANIC, "failed to add deflate");
3029 }
3030 }
3031
3032 /*
3033 * Create the deferred-free bpobj. Turn off compression
3034 * because sync-to-convergence takes longer if the blocksize
3035 * keeps changing.
3036 */
3037 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3038 dmu_object_set_compress(spa->spa_meta_objset, obj,
3039 ZIO_COMPRESS_OFF, tx);
3040 if (zap_add(spa->spa_meta_objset,
3041 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3042 sizeof (uint64_t), 1, &obj, tx) != 0) {
3043 cmn_err(CE_PANIC, "failed to add bpobj");
3044 }
3045 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3046 spa->spa_meta_objset, obj));
3047
3048 /*
3049 * Create the pool's history object.
3050 */
3051 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3052 spa_history_create_obj(spa, tx);
3053
3054 /*
3055 * Set pool properties.
3056 */
3057 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3058 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3059 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3060 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3061
3062 if (props != NULL) {
3063 spa_configfile_set(spa, props, B_FALSE);
3064 spa_sync_props(spa, props, tx);
3065 }
3066
3067 dmu_tx_commit(tx);
3068
3069 spa->spa_sync_on = B_TRUE;
3070 txg_sync_start(spa->spa_dsl_pool);
3071
3072 /*
3073 * We explicitly wait for the first transaction to complete so that our
3074 * bean counters are appropriately updated.
3075 */
3076 txg_wait_synced(spa->spa_dsl_pool, txg);
3077
3078 spa_config_sync(spa, B_FALSE, B_TRUE);
3079
3080 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
3081 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
3082 spa_history_log_version(spa, LOG_POOL_CREATE);
3083
3084 spa->spa_minref = refcount_count(&spa->spa_refcount);
3085
3086 mutex_exit(&spa_namespace_lock);
3087
3088 return (0);
3089 }
3090
3091 #ifdef _KERNEL
3092 /*
3093 * Get the root pool information from the root disk, then import the root pool
3094 * during the system boot up time.
3095 */
3096 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3097
3098 static nvlist_t *
3099 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3100 {
3101 nvlist_t *config;
3102 nvlist_t *nvtop, *nvroot;
3103 uint64_t pgid;
3104
3105 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3106 return (NULL);
3107
3108 /*
3109 * Add this top-level vdev to the child array.
3110 */
3111 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3112 &nvtop) == 0);
3113 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3114 &pgid) == 0);
3115 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3116
3117 /*
3118 * Put this pool's top-level vdevs into a root vdev.
3119 */
3120 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3121 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3122 VDEV_TYPE_ROOT) == 0);
3123 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3124 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3125 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3126 &nvtop, 1) == 0);
3127
3128 /*
3129 * Replace the existing vdev_tree with the new root vdev in
3130 * this pool's configuration (remove the old, add the new).
3131 */
3132 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3133 nvlist_free(nvroot);
3134 return (config);
3135 }
3136
3137 /*
3138 * Walk the vdev tree and see if we can find a device with "better"
3139 * configuration. A configuration is "better" if the label on that
3140 * device has a more recent txg.
3141 */
3142 static void
3143 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3144 {
3145 for (int c = 0; c < vd->vdev_children; c++)
3146 spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3147
3148 if (vd->vdev_ops->vdev_op_leaf) {
3149 nvlist_t *label;
3150 uint64_t label_txg;
3151
3152 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3153 &label) != 0)
3154 return;
3155
3156 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3157 &label_txg) == 0);
3158
3159 /*
3160 * Do we have a better boot device?
3161 */
3162 if (label_txg > *txg) {
3163 *txg = label_txg;
3164 *avd = vd;
3165 }
3166 nvlist_free(label);
3167 }
3168 }
3169
3170 /*
3171 * Import a root pool.
3172 *
3173 * For x86. devpath_list will consist of devid and/or physpath name of
3174 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3175 * The GRUB "findroot" command will return the vdev we should boot.
3176 *
3177 * For Sparc, devpath_list consists the physpath name of the booting device
3178 * no matter the rootpool is a single device pool or a mirrored pool.
3179 * e.g.
3180 * "/pci@1f,0/ide@d/disk@0,0:a"
3181 */
3182 int
3183 spa_import_rootpool(char *devpath, char *devid)
3184 {
3185 spa_t *spa;
3186 vdev_t *rvd, *bvd, *avd = NULL;
3187 nvlist_t *config, *nvtop;
3188 uint64_t guid, txg;
3189 char *pname;
3190 int error;
3191
3192 /*
3193 * Read the label from the boot device and generate a configuration.
3194 */
3195 config = spa_generate_rootconf(devpath, devid, &guid);
3196 #if defined(_OBP) && defined(_KERNEL)
3197 if (config == NULL) {
3198 if (strstr(devpath, "/iscsi/ssd") != NULL) {
3199 /* iscsi boot */
3200 get_iscsi_bootpath_phy(devpath);
3201 config = spa_generate_rootconf(devpath, devid, &guid);
3202 }
3203 }
3204 #endif
3205 if (config == NULL) {
3206 cmn_err(CE_NOTE, "Can not read the pool label from '%s'",
3207 devpath);
3208 return (EIO);
3209 }
3210
3211 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3212 &pname) == 0);
3213 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3214
3215 mutex_enter(&spa_namespace_lock);
3216 if ((spa = spa_lookup(pname)) != NULL) {
3217 /*
3218 * Remove the existing root pool from the namespace so that we
3219 * can replace it with the correct config we just read in.
3220 */
3221 spa_remove(spa);
3222 }
3223
3224 spa = spa_add(pname, config, NULL);
3225 spa->spa_is_root = B_TRUE;
3226 spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
3227
3228 /*
3229 * Build up a vdev tree based on the boot device's label config.
3230 */
3231 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3232 &nvtop) == 0);
3233 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3234 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3235 VDEV_ALLOC_ROOTPOOL);
3236 spa_config_exit(spa, SCL_ALL, FTAG);
3237 if (error) {
3238 mutex_exit(&spa_namespace_lock);
3239 nvlist_free(config);
3240 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3241 pname);
3242 return (error);
3243 }
3244
3245 /*
3246 * Get the boot vdev.
3247 */
3248 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3249 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3250 (u_longlong_t)guid);
3251 error = ENOENT;
3252 goto out;
3253 }
3254
3255 /*
3256 * Determine if there is a better boot device.
3257 */
3258 avd = bvd;
3259 spa_alt_rootvdev(rvd, &avd, &txg);
3260 if (avd != bvd) {
3261 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3262 "try booting from '%s'", avd->vdev_path);
3263 error = EINVAL;
3264 goto out;
3265 }
3266
3267 /*
3268 * If the boot device is part of a spare vdev then ensure that
3269 * we're booting off the active spare.
3270 */
3271 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3272 !bvd->vdev_isspare) {
3273 cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3274 "try booting from '%s'",
3275 bvd->vdev_parent->
3276 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
3277 error = EINVAL;
3278 goto out;
3279 }
3280
3281 error = 0;
3282 spa_history_log_version(spa, LOG_POOL_IMPORT);
3283 out:
3284 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3285 vdev_free(rvd);
3286 spa_config_exit(spa, SCL_ALL, FTAG);
3287 mutex_exit(&spa_namespace_lock);
3288
3289 nvlist_free(config);
3290 return (error);
3291 }
3292
3293 #endif
3294
3295 /*
3296 * Import a non-root pool into the system.
3297 */
3298 int
3299 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
3300 {
3301 spa_t *spa;
3302 char *altroot = NULL;
3303 spa_load_state_t state = SPA_LOAD_IMPORT;
3304 zpool_rewind_policy_t policy;
3305 uint64_t mode = spa_mode_global;
3306 uint64_t readonly = B_FALSE;
3307 int error;
3308 nvlist_t *nvroot;
3309 nvlist_t **spares, **l2cache;
3310 uint_t nspares, nl2cache;
3311
3312 /*
3313 * If a pool with this name exists, return failure.
3314 */
3315 mutex_enter(&spa_namespace_lock);
3316 if (spa_lookup(pool) != NULL) {
3317 mutex_exit(&spa_namespace_lock);
3318 return (EEXIST);
3319 }
3320
3321 /*
3322 * Create and initialize the spa structure.
3323 */
3324 (void) nvlist_lookup_string(props,
3325 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3326 (void) nvlist_lookup_uint64(props,
3327 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3328 if (readonly)
3329 mode = FREAD;
3330 spa = spa_add(pool, config, altroot);
3331 spa->spa_import_flags = flags;
3332
3333 /*
3334 * Verbatim import - Take a pool and insert it into the namespace
3335 * as if it had been loaded at boot.
3336 */
3337 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3338 if (props != NULL)
3339 spa_configfile_set(spa, props, B_FALSE);
3340
3341 spa_config_sync(spa, B_FALSE, B_TRUE);
3342
3343 mutex_exit(&spa_namespace_lock);
3344 spa_history_log_version(spa, LOG_POOL_IMPORT);
3345
3346 return (0);
3347 }
3348
3349 spa_activate(spa, mode);
3350
3351 /*
3352 * Don't start async tasks until we know everything is healthy.
3353 */
3354 spa_async_suspend(spa);
3355
3356 zpool_get_rewind_policy(config, &policy);
3357 if (policy.zrp_request & ZPOOL_DO_REWIND)
3358 state = SPA_LOAD_RECOVER;
3359
3360 /*
3361 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
3362 * because the user-supplied config is actually the one to trust when
3363 * doing an import.
3364 */
3365 if (state != SPA_LOAD_RECOVER)
3366 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3367
3368 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3369 policy.zrp_request);
3370
3371 /*
3372 * Propagate anything learned while loading the pool and pass it
3373 * back to caller (i.e. rewind info, missing devices, etc).
3374 */
3375 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3376 spa->spa_load_info) == 0);
3377
3378 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3379 /*
3380 * Toss any existing sparelist, as it doesn't have any validity
3381 * anymore, and conflicts with spa_has_spare().
3382 */
3383 if (spa->spa_spares.sav_config) {
3384 nvlist_free(spa->spa_spares.sav_config);
3385 spa->spa_spares.sav_config = NULL;
3386 spa_load_spares(spa);
3387 }
3388 if (spa->spa_l2cache.sav_config) {
3389 nvlist_free(spa->spa_l2cache.sav_config);
3390 spa->spa_l2cache.sav_config = NULL;
3391 spa_load_l2cache(spa);
3392 }
3393
3394 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3395 &nvroot) == 0);
3396 if (error == 0)
3397 error = spa_validate_aux(spa, nvroot, -1ULL,
3398 VDEV_ALLOC_SPARE);
3399 if (error == 0)
3400 error = spa_validate_aux(spa, nvroot, -1ULL,
3401 VDEV_ALLOC_L2CACHE);
3402 spa_config_exit(spa, SCL_ALL, FTAG);
3403
3404 if (props != NULL)
3405 spa_configfile_set(spa, props, B_FALSE);
3406
3407 if (error != 0 || (props && spa_writeable(spa) &&
3408 (error = spa_prop_set(spa, props)))) {
3409 spa_unload(spa);
3410 spa_deactivate(spa);
3411 spa_remove(spa);
3412 mutex_exit(&spa_namespace_lock);
3413 return (error);
3414 }
3415
3416 spa_async_resume(spa);
3417
3418 /*
3419 * Override any spares and level 2 cache devices as specified by
3420 * the user, as these may have correct device names/devids, etc.
3421 */
3422 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3423 &spares, &nspares) == 0) {
3424 if (spa->spa_spares.sav_config)
3425 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
3426 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
3427 else
3428 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
3429 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3430 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3431 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3432 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3433 spa_load_spares(spa);
3434 spa_config_exit(spa, SCL_ALL, FTAG);
3435 spa->spa_spares.sav_sync = B_TRUE;
3436 }
3437 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3438 &l2cache, &nl2cache) == 0) {
3439 if (spa->spa_l2cache.sav_config)
3440 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
3441 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
3442 else
3443 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3444 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3445 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3446 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3447 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3448 spa_load_l2cache(spa);
3449 spa_config_exit(spa, SCL_ALL, FTAG);
3450 spa->spa_l2cache.sav_sync = B_TRUE;
3451 }
3452
3453 /*
3454 * Check for any removed devices.
3455 */
3456 if (spa->spa_autoreplace) {
3457 spa_aux_check_removed(&spa->spa_spares);
3458 spa_aux_check_removed(&spa->spa_l2cache);
3459 }
3460
3461 if (spa_writeable(spa)) {
3462 /*
3463 * Update the config cache to include the newly-imported pool.
3464 */
3465 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3466 }
3467
3468 /*
3469 * It's possible that the pool was expanded while it was exported.
3470 * We kick off an async task to handle this for us.
3471 */
3472 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
3473
3474 mutex_exit(&spa_namespace_lock);
3475 spa_history_log_version(spa, LOG_POOL_IMPORT);
3476
3477 return (0);
3478 }
3479
3480 nvlist_t *
3481 spa_tryimport(nvlist_t *tryconfig)
3482 {
3483 nvlist_t *config = NULL;
3484 char *poolname;
3485 spa_t *spa;
3486 uint64_t state;
3487 int error;
3488
3489 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
3490 return (NULL);
3491
3492 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
3493 return (NULL);
3494
3495 /*
3496 * Create and initialize the spa structure.
3497 */
3498 mutex_enter(&spa_namespace_lock);
3499 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
3500 spa_activate(spa, FREAD);
3501
3502 /*
3503 * Pass off the heavy lifting to spa_load().
3504 * Pass TRUE for mosconfig because the user-supplied config
3505 * is actually the one to trust when doing an import.
3506 */
3507 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
3508
3509 /*
3510 * If 'tryconfig' was at least parsable, return the current config.
3511 */
3512 if (spa->spa_root_vdev != NULL) {
3513 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3514 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
3515 poolname) == 0);
3516 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3517 state) == 0);
3518 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3519 spa->spa_uberblock.ub_timestamp) == 0);
3520
3521 /*
3522 * If the bootfs property exists on this pool then we
3523 * copy it out so that external consumers can tell which
3524 * pools are bootable.
3525 */
3526 if ((!error || error == EEXIST) && spa->spa_bootfs) {
3527 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3528
3529 /*
3530 * We have to play games with the name since the
3531 * pool was opened as TRYIMPORT_NAME.
3532 */
3533 if (dsl_dsobj_to_dsname(spa_name(spa),
3534 spa->spa_bootfs, tmpname) == 0) {
3535 char *cp;
3536 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3537
3538 cp = strchr(tmpname, '/');
3539 if (cp == NULL) {
3540 (void) strlcpy(dsname, tmpname,
3541 MAXPATHLEN);
3542 } else {
3543 (void) snprintf(dsname, MAXPATHLEN,
3544 "%s/%s", poolname, ++cp);
3545 }
3546 VERIFY(nvlist_add_string(config,
3547 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
3548 kmem_free(dsname, MAXPATHLEN);
3549 }
3550 kmem_free(tmpname, MAXPATHLEN);
3551 }
3552
3553 /*
3554 * Add the list of hot spares and level 2 cache devices.
3555 */
3556 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3557 spa_add_spares(spa, config);
3558 spa_add_l2cache(spa, config);
3559 spa_config_exit(spa, SCL_CONFIG, FTAG);
3560 }
3561
3562 spa_unload(spa);
3563 spa_deactivate(spa);
3564 spa_remove(spa);
3565 mutex_exit(&spa_namespace_lock);
3566
3567 return (config);
3568 }
3569
3570 /*
3571 * Pool export/destroy
3572 *
3573 * The act of destroying or exporting a pool is very simple. We make sure there
3574 * is no more pending I/O and any references to the pool are gone. Then, we
3575 * update the pool state and sync all the labels to disk, removing the
3576 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
3577 * we don't sync the labels or remove the configuration cache.
3578 */
3579 static int
3580 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
3581 boolean_t force, boolean_t hardforce)
3582 {
3583 spa_t *spa;
3584
3585 if (oldconfig)
3586 *oldconfig = NULL;
3587
3588 if (!(spa_mode_global & FWRITE))
3589 return (EROFS);
3590
3591 mutex_enter(&spa_namespace_lock);
3592 if ((spa = spa_lookup(pool)) == NULL) {
3593 mutex_exit(&spa_namespace_lock);
3594 return (ENOENT);
3595 }
3596
3597 /*
3598 * Put a hold on the pool, drop the namespace lock, stop async tasks,
3599 * reacquire the namespace lock, and see if we can export.
3600 */
3601 spa_open_ref(spa, FTAG);
3602 mutex_exit(&spa_namespace_lock);
3603 spa_async_suspend(spa);
3604 mutex_enter(&spa_namespace_lock);
3605 spa_close(spa, FTAG);
3606
3607 /*
3608 * The pool will be in core if it's openable,
3609 * in which case we can modify its state.
3610 */
3611 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
3612 /*
3613 * Objsets may be open only because they're dirty, so we
3614 * have to force it to sync before checking spa_refcnt.
3615 */
3616 txg_wait_synced(spa->spa_dsl_pool, 0);
3617
3618 /*
3619 * A pool cannot be exported or destroyed if there are active
3620 * references. If we are resetting a pool, allow references by
3621 * fault injection handlers.
3622 */
3623 if (!spa_refcount_zero(spa) ||
3624 (spa->spa_inject_ref != 0 &&
3625 new_state != POOL_STATE_UNINITIALIZED)) {
3626 spa_async_resume(spa);
3627 mutex_exit(&spa_namespace_lock);
3628 return (EBUSY);
3629 }
3630
3631 /*
3632 * A pool cannot be exported if it has an active shared spare.
3633 * This is to prevent other pools stealing the active spare
3634 * from an exported pool. At user's own will, such pool can
3635 * be forcedly exported.
3636 */
3637 if (!force && new_state == POOL_STATE_EXPORTED &&
3638 spa_has_active_shared_spare(spa)) {
3639 spa_async_resume(spa);
3640 mutex_exit(&spa_namespace_lock);
3641 return (EXDEV);
3642 }
3643
3644 /*
3645 * We want this to be reflected on every label,
3646 * so mark them all dirty. spa_unload() will do the
3647 * final sync that pushes these changes out.
3648 */
3649 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
3650 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3651 spa->spa_state = new_state;
3652 spa->spa_final_txg = spa_last_synced_txg(spa) +
3653 TXG_DEFER_SIZE + 1;
3654 vdev_config_dirty(spa->spa_root_vdev);
3655 spa_config_exit(spa, SCL_ALL, FTAG);
3656 }
3657 }
3658
3659 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
3660
3661 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
3662 spa_unload(spa);
3663 spa_deactivate(spa);
3664 }
3665
3666 if (oldconfig && spa->spa_config)
3667 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
3668
3669 if (new_state != POOL_STATE_UNINITIALIZED) {
3670 if (!hardforce)
3671 spa_config_sync(spa, B_TRUE, B_TRUE);
3672 spa_remove(spa);
3673 }
3674 mutex_exit(&spa_namespace_lock);
3675
3676 return (0);
3677 }
3678
3679 /*
3680 * Destroy a storage pool.
3681 */
3682 int
3683 spa_destroy(char *pool)
3684 {
3685 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
3686 B_FALSE, B_FALSE));
3687 }
3688
3689 /*
3690 * Export a storage pool.
3691 */
3692 int
3693 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
3694 boolean_t hardforce)
3695 {
3696 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
3697 force, hardforce));
3698 }
3699
3700 /*
3701 * Similar to spa_export(), this unloads the spa_t without actually removing it
3702 * from the namespace in any way.
3703 */
3704 int
3705 spa_reset(char *pool)
3706 {
3707 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
3708 B_FALSE, B_FALSE));
3709 }
3710
3711 /*
3712 * ==========================================================================
3713 * Device manipulation
3714 * ==========================================================================
3715 */
3716
3717 /*
3718 * Add a device to a storage pool.
3719 */
3720 int
3721 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
3722 {
3723 uint64_t txg, id;
3724 int error;
3725 vdev_t *rvd = spa->spa_root_vdev;
3726 vdev_t *vd, *tvd;
3727 nvlist_t **spares, **l2cache;
3728 uint_t nspares, nl2cache;
3729
3730 ASSERT(spa_writeable(spa));
3731
3732 txg = spa_vdev_enter(spa);
3733
3734 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
3735 VDEV_ALLOC_ADD)) != 0)
3736 return (spa_vdev_exit(spa, NULL, txg, error));
3737
3738 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
3739
3740 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
3741 &nspares) != 0)
3742 nspares = 0;
3743
3744 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
3745 &nl2cache) != 0)
3746 nl2cache = 0;
3747
3748 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
3749 return (spa_vdev_exit(spa, vd, txg, EINVAL));
3750
3751 if (vd->vdev_children != 0 &&
3752 (error = vdev_create(vd, txg, B_FALSE)) != 0)
3753 return (spa_vdev_exit(spa, vd, txg, error));
3754
3755 /*
3756 * We must validate the spares and l2cache devices after checking the
3757 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
3758 */
3759 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
3760 return (spa_vdev_exit(spa, vd, txg, error));
3761
3762 /*
3763 * Transfer each new top-level vdev from vd to rvd.
3764 */
3765 for (int c = 0; c < vd->vdev_children; c++) {
3766
3767 /*
3768 * Set the vdev id to the first hole, if one exists.
3769 */
3770 for (id = 0; id < rvd->vdev_children; id++) {
3771 if (rvd->vdev_child[id]->vdev_ishole) {
3772 vdev_free(rvd->vdev_child[id]);
3773 break;
3774 }
3775 }
3776 tvd = vd->vdev_child[c];
3777 vdev_remove_child(vd, tvd);
3778 tvd->vdev_id = id;
3779 vdev_add_child(rvd, tvd);
3780 vdev_config_dirty(tvd);
3781 }
3782
3783 if (nspares != 0) {
3784 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
3785 ZPOOL_CONFIG_SPARES);
3786 spa_load_spares(spa);
3787 spa->spa_spares.sav_sync = B_TRUE;
3788 }
3789
3790 if (nl2cache != 0) {
3791 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
3792 ZPOOL_CONFIG_L2CACHE);
3793 spa_load_l2cache(spa);
3794 spa->spa_l2cache.sav_sync = B_TRUE;
3795 }
3796
3797 /*
3798 * We have to be careful when adding new vdevs to an existing pool.
3799 * If other threads start allocating from these vdevs before we
3800 * sync the config cache, and we lose power, then upon reboot we may
3801 * fail to open the pool because there are DVAs that the config cache
3802 * can't translate. Therefore, we first add the vdevs without
3803 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
3804 * and then let spa_config_update() initialize the new metaslabs.
3805 *
3806 * spa_load() checks for added-but-not-initialized vdevs, so that
3807 * if we lose power at any point in this sequence, the remaining
3808 * steps will be completed the next time we load the pool.
3809 */
3810 (void) spa_vdev_exit(spa, vd, txg, 0);
3811
3812 mutex_enter(&spa_namespace_lock);
3813 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3814 mutex_exit(&spa_namespace_lock);
3815
3816 return (0);
3817 }
3818
3819 /*
3820 * Attach a device to a mirror. The arguments are the path to any device
3821 * in the mirror, and the nvroot for the new device. If the path specifies
3822 * a device that is not mirrored, we automatically insert the mirror vdev.
3823 *
3824 * If 'replacing' is specified, the new device is intended to replace the
3825 * existing device; in this case the two devices are made into their own
3826 * mirror using the 'replacing' vdev, which is functionally identical to
3827 * the mirror vdev (it actually reuses all the same ops) but has a few
3828 * extra rules: you can't attach to it after it's been created, and upon
3829 * completion of resilvering, the first disk (the one being replaced)
3830 * is automatically detached.
3831 */
3832 int
3833 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
3834 {
3835 uint64_t txg, dtl_max_txg;
3836 vdev_t *rvd = spa->spa_root_vdev;
3837 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
3838 vdev_ops_t *pvops;
3839 char *oldvdpath, *newvdpath;
3840 int newvd_isspare;
3841 int error;
3842
3843 ASSERT(spa_writeable(spa));
3844
3845 txg = spa_vdev_enter(spa);
3846
3847 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
3848
3849 if (oldvd == NULL)
3850 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3851
3852 if (!oldvd->vdev_ops->vdev_op_leaf)
3853 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3854
3855 pvd = oldvd->vdev_parent;
3856
3857 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
3858 VDEV_ALLOC_ADD)) != 0)
3859 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
3860
3861 if (newrootvd->vdev_children != 1)
3862 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3863
3864 newvd = newrootvd->vdev_child[0];
3865
3866 if (!newvd->vdev_ops->vdev_op_leaf)
3867 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3868
3869 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3870 return (spa_vdev_exit(spa, newrootvd, txg, error));
3871
3872 /*
3873 * Spares can't replace logs
3874 */
3875 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
3876 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3877
3878 if (!replacing) {
3879 /*
3880 * For attach, the only allowable parent is a mirror or the root
3881 * vdev.
3882 */
3883 if (pvd->vdev_ops != &vdev_mirror_ops &&
3884 pvd->vdev_ops != &vdev_root_ops)
3885 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3886
3887 pvops = &vdev_mirror_ops;
3888 } else {
3889 /*
3890 * Active hot spares can only be replaced by inactive hot
3891 * spares.
3892 */
3893 if (pvd->vdev_ops == &vdev_spare_ops &&
3894 oldvd->vdev_isspare &&
3895 !spa_has_spare(spa, newvd->vdev_guid))
3896 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3897
3898 /*
3899 * If the source is a hot spare, and the parent isn't already a
3900 * spare, then we want to create a new hot spare. Otherwise, we
3901 * want to create a replacing vdev. The user is not allowed to
3902 * attach to a spared vdev child unless the 'isspare' state is
3903 * the same (spare replaces spare, non-spare replaces
3904 * non-spare).
3905 */
3906 if (pvd->vdev_ops == &vdev_replacing_ops &&
3907 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
3908 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3909 } else if (pvd->vdev_ops == &vdev_spare_ops &&
3910 newvd->vdev_isspare != oldvd->vdev_isspare) {
3911 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3912 }
3913
3914 if (newvd->vdev_isspare)
3915 pvops = &vdev_spare_ops;
3916 else
3917 pvops = &vdev_replacing_ops;
3918 }
3919
3920 /*
3921 * Make sure the new device is big enough.
3922 */
3923 if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
3924 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3925
3926 /*
3927 * The new device cannot have a higher alignment requirement
3928 * than the top-level vdev.
3929 */
3930 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3931 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3932
3933 /*
3934 * If this is an in-place replacement, update oldvd's path and devid
3935 * to make it distinguishable from newvd, and unopenable from now on.
3936 */
3937 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3938 spa_strfree(oldvd->vdev_path);
3939 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3940 KM_SLEEP);
3941 (void) sprintf(oldvd->vdev_path, "%s/%s",
3942 newvd->vdev_path, "old");
3943 if (oldvd->vdev_devid != NULL) {
3944 spa_strfree(oldvd->vdev_devid);
3945 oldvd->vdev_devid = NULL;
3946 }
3947 }
3948
3949 /* mark the device being resilvered */
3950 newvd->vdev_resilvering = B_TRUE;
3951
3952 /*
3953 * If the parent is not a mirror, or if we're replacing, insert the new
3954 * mirror/replacing/spare vdev above oldvd.
3955 */
3956 if (pvd->vdev_ops != pvops)
3957 pvd = vdev_add_parent(oldvd, pvops);
3958
3959 ASSERT(pvd->vdev_top->vdev_parent == rvd);
3960 ASSERT(pvd->vdev_ops == pvops);
3961 ASSERT(oldvd->vdev_parent == pvd);
3962
3963 /*
3964 * Extract the new device from its root and add it to pvd.
3965 */
3966 vdev_remove_child(newrootvd, newvd);
3967 newvd->vdev_id = pvd->vdev_children;
3968 newvd->vdev_crtxg = oldvd->vdev_crtxg;
3969 vdev_add_child(pvd, newvd);
3970
3971 tvd = newvd->vdev_top;
3972 ASSERT(pvd->vdev_top == tvd);
3973 ASSERT(tvd->vdev_parent == rvd);
3974
3975 vdev_config_dirty(tvd);
3976
3977 /*
3978 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
3979 * for any dmu_sync-ed blocks. It will propagate upward when
3980 * spa_vdev_exit() calls vdev_dtl_reassess().
3981 */
3982 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
3983
3984 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
3985 dtl_max_txg - TXG_INITIAL);
3986
3987 if (newvd->vdev_isspare) {
3988 spa_spare_activate(newvd);
3989 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
3990 }
3991
3992 oldvdpath = spa_strdup(oldvd->vdev_path);
3993 newvdpath = spa_strdup(newvd->vdev_path);
3994 newvd_isspare = newvd->vdev_isspare;
3995
3996 /*
3997 * Mark newvd's DTL dirty in this txg.
3998 */
3999 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4000
4001 /*
4002 * Restart the resilver
4003 */
4004 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4005
4006 /*
4007 * Commit the config
4008 */
4009 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4010
4011 spa_history_log_internal(LOG_POOL_VDEV_ATTACH, spa, NULL,
4012 "%s vdev=%s %s vdev=%s",
4013 replacing && newvd_isspare ? "spare in" :
4014 replacing ? "replace" : "attach", newvdpath,
4015 replacing ? "for" : "to", oldvdpath);
4016
4017 spa_strfree(oldvdpath);
4018 spa_strfree(newvdpath);
4019
4020 if (spa->spa_bootfs)
4021 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4022
4023 return (0);
4024 }
4025
4026 /*
4027 * Detach a device from a mirror or replacing vdev.
4028 * If 'replace_done' is specified, only detach if the parent
4029 * is a replacing vdev.
4030 */
4031 int
4032 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
4033 {
4034 uint64_t txg;
4035 int error;
4036 vdev_t *rvd = spa->spa_root_vdev;
4037 vdev_t *vd, *pvd, *cvd, *tvd;
4038 boolean_t unspare = B_FALSE;
4039 uint64_t unspare_guid;
4040 char *vdpath;
4041
4042 ASSERT(spa_writeable(spa));
4043
4044 txg = spa_vdev_enter(spa);
4045
4046 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
4047
4048 if (vd == NULL)
4049 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4050
4051 if (!vd->vdev_ops->vdev_op_leaf)
4052 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4053
4054 pvd = vd->vdev_parent;
4055
4056 /*
4057 * If the parent/child relationship is not as expected, don't do it.
4058 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4059 * vdev that's replacing B with C. The user's intent in replacing
4060 * is to go from M(A,B) to M(A,C). If the user decides to cancel
4061 * the replace by detaching C, the expected behavior is to end up
4062 * M(A,B). But suppose that right after deciding to detach C,
4063 * the replacement of B completes. We would have M(A,C), and then
4064 * ask to detach C, which would leave us with just A -- not what
4065 * the user wanted. To prevent this, we make sure that the
4066 * parent/child relationship hasn't changed -- in this example,
4067 * that C's parent is still the replacing vdev R.
4068 */
4069 if (pvd->vdev_guid != pguid && pguid != 0)
4070 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4071
4072 /*
4073 * Only 'replacing' or 'spare' vdevs can be replaced.
4074 */
4075 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4076 pvd->vdev_ops != &vdev_spare_ops)
4077 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4078
4079 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4080 spa_version(spa) >= SPA_VERSION_SPARES);
4081
4082 /*
4083 * Only mirror, replacing, and spare vdevs support detach.
4084 */
4085 if (pvd->vdev_ops != &vdev_replacing_ops &&
4086 pvd->vdev_ops != &vdev_mirror_ops &&
4087 pvd->vdev_ops != &vdev_spare_ops)
4088 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4089
4090 /*
4091 * If this device has the only valid copy of some data,
4092 * we cannot safely detach it.
4093 */
4094 if (vdev_dtl_required(vd))
4095 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4096
4097 ASSERT(pvd->vdev_children >= 2);
4098
4099 /*
4100 * If we are detaching the second disk from a replacing vdev, then
4101 * check to see if we changed the original vdev's path to have "/old"
4102 * at the end in spa_vdev_attach(). If so, undo that change now.
4103 */
4104 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4105 vd->vdev_path != NULL) {
4106 size_t len = strlen(vd->vdev_path);
4107
4108 for (int c = 0; c < pvd->vdev_children; c++) {
4109 cvd = pvd->vdev_child[c];
4110
4111 if (cvd == vd || cvd->vdev_path == NULL)
4112 continue;
4113
4114 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4115 strcmp(cvd->vdev_path + len, "/old") == 0) {
4116 spa_strfree(cvd->vdev_path);
4117 cvd->vdev_path = spa_strdup(vd->vdev_path);
4118 break;
4119 }
4120 }
4121 }
4122
4123 /*
4124 * If we are detaching the original disk from a spare, then it implies
4125 * that the spare should become a real disk, and be removed from the
4126 * active spare list for the pool.
4127 */
4128 if (pvd->vdev_ops == &vdev_spare_ops &&
4129 vd->vdev_id == 0 &&
4130 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
4131 unspare = B_TRUE;
4132
4133 /*
4134 * Erase the disk labels so the disk can be used for other things.
4135 * This must be done after all other error cases are handled,
4136 * but before we disembowel vd (so we can still do I/O to it).
4137 * But if we can't do it, don't treat the error as fatal --
4138 * it may be that the unwritability of the disk is the reason
4139 * it's being detached!
4140 */
4141 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4142
4143 /*
4144 * Remove vd from its parent and compact the parent's children.
4145 */
4146 vdev_remove_child(pvd, vd);
4147 vdev_compact_children(pvd);
4148
4149 /*
4150 * Remember one of the remaining children so we can get tvd below.
4151 */
4152 cvd = pvd->vdev_child[pvd->vdev_children - 1];
4153
4154 /*
4155 * If we need to remove the remaining child from the list of hot spares,
4156 * do it now, marking the vdev as no longer a spare in the process.
4157 * We must do this before vdev_remove_parent(), because that can
4158 * change the GUID if it creates a new toplevel GUID. For a similar
4159 * reason, we must remove the spare now, in the same txg as the detach;
4160 * otherwise someone could attach a new sibling, change the GUID, and
4161 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
4162 */
4163 if (unspare) {
4164 ASSERT(cvd->vdev_isspare);
4165 spa_spare_remove(cvd);
4166 unspare_guid = cvd->vdev_guid;
4167 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
4168 cvd->vdev_unspare = B_TRUE;
4169 }
4170
4171 /*
4172 * If the parent mirror/replacing vdev only has one child,
4173 * the parent is no longer needed. Remove it from the tree.
4174 */
4175 if (pvd->vdev_children == 1) {
4176 if (pvd->vdev_ops == &vdev_spare_ops)
4177 cvd->vdev_unspare = B_FALSE;
4178 vdev_remove_parent(cvd);
4179 cvd->vdev_resilvering = B_FALSE;
4180 }
4181
4182
4183 /*
4184 * We don't set tvd until now because the parent we just removed
4185 * may have been the previous top-level vdev.
4186 */
4187 tvd = cvd->vdev_top;
4188 ASSERT(tvd->vdev_parent == rvd);
4189
4190 /*
4191 * Reevaluate the parent vdev state.
4192 */
4193 vdev_propagate_state(cvd);
4194
4195 /*
4196 * If the 'autoexpand' property is set on the pool then automatically
4197 * try to expand the size of the pool. For example if the device we
4198 * just detached was smaller than the others, it may be possible to
4199 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4200 * first so that we can obtain the updated sizes of the leaf vdevs.
4201 */
4202 if (spa->spa_autoexpand) {
4203 vdev_reopen(tvd);
4204 vdev_expand(tvd, txg);
4205 }
4206
4207 vdev_config_dirty(tvd);
4208
4209 /*
4210 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
4211 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4212 * But first make sure we're not on any *other* txg's DTL list, to
4213 * prevent vd from being accessed after it's freed.
4214 */
4215 vdpath = spa_strdup(vd->vdev_path);
4216 for (int t = 0; t < TXG_SIZE; t++)
4217 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4218 vd->vdev_detached = B_TRUE;
4219 vdev_dirty(tvd, VDD_DTL, vd, txg);
4220
4221 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
4222
4223 /* hang on to the spa before we release the lock */
4224 spa_open_ref(spa, FTAG);
4225
4226 error = spa_vdev_exit(spa, vd, txg, 0);
4227
4228 spa_history_log_internal(LOG_POOL_VDEV_DETACH, spa, NULL,
4229 "vdev=%s", vdpath);
4230 spa_strfree(vdpath);
4231
4232 /*
4233 * If this was the removal of the original device in a hot spare vdev,
4234 * then we want to go through and remove the device from the hot spare
4235 * list of every other pool.
4236 */
4237 if (unspare) {
4238 spa_t *altspa = NULL;
4239
4240 mutex_enter(&spa_namespace_lock);
4241 while ((altspa = spa_next(altspa)) != NULL) {
4242 if (altspa->spa_state != POOL_STATE_ACTIVE ||
4243 altspa == spa)
4244 continue;
4245
4246 spa_open_ref(altspa, FTAG);
4247 mutex_exit(&spa_namespace_lock);
4248 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
4249 mutex_enter(&spa_namespace_lock);
4250 spa_close(altspa, FTAG);
4251 }
4252 mutex_exit(&spa_namespace_lock);
4253
4254 /* search the rest of the vdevs for spares to remove */
4255 spa_vdev_resilver_done(spa);
4256 }
4257
4258 /* all done with the spa; OK to release */
4259 mutex_enter(&spa_namespace_lock);
4260 spa_close(spa, FTAG);
4261 mutex_exit(&spa_namespace_lock);
4262
4263 return (error);
4264 }
4265
4266 /*
4267 * Split a set of devices from their mirrors, and create a new pool from them.
4268 */
4269 int
4270 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4271 nvlist_t *props, boolean_t exp)
4272 {
4273 int error = 0;
4274 uint64_t txg, *glist;
4275 spa_t *newspa;
4276 uint_t c, children, lastlog;
4277 nvlist_t **child, *nvl, *tmp;
4278 dmu_tx_t *tx;
4279 char *altroot = NULL;
4280 vdev_t *rvd, **vml = NULL; /* vdev modify list */
4281 boolean_t activate_slog;
4282
4283 ASSERT(spa_writeable(spa));
4284
4285 txg = spa_vdev_enter(spa);
4286
4287 /* clear the log and flush everything up to now */
4288 activate_slog = spa_passivate_log(spa);
4289 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4290 error = spa_offline_log(spa);
4291 txg = spa_vdev_config_enter(spa);
4292
4293 if (activate_slog)
4294 spa_activate_log(spa);
4295
4296 if (error != 0)
4297 return (spa_vdev_exit(spa, NULL, txg, error));
4298
4299 /* check new spa name before going any further */
4300 if (spa_lookup(newname) != NULL)
4301 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4302
4303 /*
4304 * scan through all the children to ensure they're all mirrors
4305 */
4306 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4307 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4308 &children) != 0)
4309 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4310
4311 /* first, check to ensure we've got the right child count */
4312 rvd = spa->spa_root_vdev;
4313 lastlog = 0;
4314 for (c = 0; c < rvd->vdev_children; c++) {
4315 vdev_t *vd = rvd->vdev_child[c];
4316
4317 /* don't count the holes & logs as children */
4318 if (vd->vdev_islog || vd->vdev_ishole) {
4319 if (lastlog == 0)
4320 lastlog = c;
4321 continue;
4322 }
4323
4324 lastlog = 0;
4325 }
4326 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4327 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4328
4329 /* next, ensure no spare or cache devices are part of the split */
4330 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4331 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4332 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4333
4334 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
4335 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
4336
4337 /* then, loop over each vdev and validate it */
4338 for (c = 0; c < children; c++) {
4339 uint64_t is_hole = 0;
4340
4341 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4342 &is_hole);
4343
4344 if (is_hole != 0) {
4345 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4346 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4347 continue;
4348 } else {
4349 error = EINVAL;
4350 break;
4351 }
4352 }
4353
4354 /* which disk is going to be split? */
4355 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4356 &glist[c]) != 0) {
4357 error = EINVAL;
4358 break;
4359 }
4360
4361 /* look it up in the spa */
4362 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4363 if (vml[c] == NULL) {
4364 error = ENODEV;
4365 break;
4366 }
4367
4368 /* make sure there's nothing stopping the split */
4369 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4370 vml[c]->vdev_islog ||
4371 vml[c]->vdev_ishole ||
4372 vml[c]->vdev_isspare ||
4373 vml[c]->vdev_isl2cache ||
4374 !vdev_writeable(vml[c]) ||
4375 vml[c]->vdev_children != 0 ||
4376 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4377 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
4378 error = EINVAL;
4379 break;
4380 }
4381
4382 if (vdev_dtl_required(vml[c])) {
4383 error = EBUSY;
4384 break;
4385 }
4386
4387 /* we need certain info from the top level */
4388 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4389 vml[c]->vdev_top->vdev_ms_array) == 0);
4390 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4391 vml[c]->vdev_top->vdev_ms_shift) == 0);
4392 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4393 vml[c]->vdev_top->vdev_asize) == 0);
4394 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4395 vml[c]->vdev_top->vdev_ashift) == 0);
4396 }
4397
4398 if (error != 0) {
4399 kmem_free(vml, children * sizeof (vdev_t *));
4400 kmem_free(glist, children * sizeof (uint64_t));
4401 return (spa_vdev_exit(spa, NULL, txg, error));
4402 }
4403
4404 /* stop writers from using the disks */
4405 for (c = 0; c < children; c++) {
4406 if (vml[c] != NULL)
4407 vml[c]->vdev_offline = B_TRUE;
4408 }
4409 vdev_reopen(spa->spa_root_vdev);
4410
4411 /*
4412 * Temporarily record the splitting vdevs in the spa config. This
4413 * will disappear once the config is regenerated.
4414 */
4415 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4416 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
4417 glist, children) == 0);
4418 kmem_free(glist, children * sizeof (uint64_t));
4419
4420 mutex_enter(&spa->spa_props_lock);
4421 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
4422 nvl) == 0);
4423 mutex_exit(&spa->spa_props_lock);
4424 spa->spa_config_splitting = nvl;
4425 vdev_config_dirty(spa->spa_root_vdev);
4426
4427 /* configure and create the new pool */
4428 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
4429 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4430 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
4431 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
4432 spa_version(spa)) == 0);
4433 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
4434 spa->spa_config_txg) == 0);
4435 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4436 spa_generate_guid(NULL)) == 0);
4437 (void) nvlist_lookup_string(props,
4438 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4439
4440 /* add the new pool to the namespace */
4441 newspa = spa_add(newname, config, altroot);
4442 newspa->spa_config_txg = spa->spa_config_txg;
4443 spa_set_log_state(newspa, SPA_LOG_CLEAR);
4444
4445 /* release the spa config lock, retaining the namespace lock */
4446 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4447
4448 if (zio_injection_enabled)
4449 zio_handle_panic_injection(spa, FTAG, 1);
4450
4451 spa_activate(newspa, spa_mode_global);
4452 spa_async_suspend(newspa);
4453
4454 /* create the new pool from the disks of the original pool */
4455 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
4456 if (error)
4457 goto out;
4458
4459 /* if that worked, generate a real config for the new pool */
4460 if (newspa->spa_root_vdev != NULL) {
4461 VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
4462 NV_UNIQUE_NAME, KM_SLEEP) == 0);
4463 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
4464 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
4465 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
4466 B_TRUE));
4467 }
4468
4469 /* set the props */
4470 if (props != NULL) {
4471 spa_configfile_set(newspa, props, B_FALSE);
4472 error = spa_prop_set(newspa, props);
4473 if (error)
4474 goto out;
4475 }
4476
4477 /* flush everything */
4478 txg = spa_vdev_config_enter(newspa);
4479 vdev_config_dirty(newspa->spa_root_vdev);
4480 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
4481
4482 if (zio_injection_enabled)
4483 zio_handle_panic_injection(spa, FTAG, 2);
4484
4485 spa_async_resume(newspa);
4486
4487 /* finally, update the original pool's config */
4488 txg = spa_vdev_config_enter(spa);
4489 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
4490 error = dmu_tx_assign(tx, TXG_WAIT);
4491 if (error != 0)
4492 dmu_tx_abort(tx);
4493 for (c = 0; c < children; c++) {
4494 if (vml[c] != NULL) {
4495 vdev_split(vml[c]);
4496 if (error == 0)
4497 spa_history_log_internal(LOG_POOL_VDEV_DETACH,
4498 spa, tx, "vdev=%s",
4499 vml[c]->vdev_path);
4500 vdev_free(vml[c]);
4501 }
4502 }
4503 vdev_config_dirty(spa->spa_root_vdev);
4504 spa->spa_config_splitting = NULL;
4505 nvlist_free(nvl);
4506 if (error == 0)
4507 dmu_tx_commit(tx);
4508 (void) spa_vdev_exit(spa, NULL, txg, 0);
4509
4510 if (zio_injection_enabled)
4511 zio_handle_panic_injection(spa, FTAG, 3);
4512
4513 /* split is complete; log a history record */
4514 spa_history_log_internal(LOG_POOL_SPLIT, newspa, NULL,
4515 "split new pool %s from pool %s", newname, spa_name(spa));
4516
4517 kmem_free(vml, children * sizeof (vdev_t *));
4518
4519 /* if we're not going to mount the filesystems in userland, export */
4520 if (exp)
4521 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
4522 B_FALSE, B_FALSE);
4523
4524 return (error);
4525
4526 out:
4527 spa_unload(newspa);
4528 spa_deactivate(newspa);
4529 spa_remove(newspa);
4530
4531 txg = spa_vdev_config_enter(spa);
4532
4533 /* re-online all offlined disks */
4534 for (c = 0; c < children; c++) {
4535 if (vml[c] != NULL)
4536 vml[c]->vdev_offline = B_FALSE;
4537 }
4538 vdev_reopen(spa->spa_root_vdev);
4539
4540 nvlist_free(spa->spa_config_splitting);
4541 spa->spa_config_splitting = NULL;
4542 (void) spa_vdev_exit(spa, NULL, txg, error);
4543
4544 kmem_free(vml, children * sizeof (vdev_t *));
4545 return (error);
4546 }
4547
4548 static nvlist_t *
4549 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
4550 {
4551 for (int i = 0; i < count; i++) {
4552 uint64_t guid;
4553
4554 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
4555 &guid) == 0);
4556
4557 if (guid == target_guid)
4558 return (nvpp[i]);
4559 }
4560
4561 return (NULL);
4562 }
4563
4564 static void
4565 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
4566 nvlist_t *dev_to_remove)
4567 {
4568 nvlist_t **newdev = NULL;
4569
4570 if (count > 1)
4571 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
4572
4573 for (int i = 0, j = 0; i < count; i++) {
4574 if (dev[i] == dev_to_remove)
4575 continue;
4576 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
4577 }
4578
4579 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
4580 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
4581
4582 for (int i = 0; i < count - 1; i++)
4583 nvlist_free(newdev[i]);
4584
4585 if (count > 1)
4586 kmem_free(newdev, (count - 1) * sizeof (void *));
4587 }
4588
4589 /*
4590 * Evacuate the device.
4591 */
4592 static int
4593 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
4594 {
4595 uint64_t txg;
4596 int error = 0;
4597
4598 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4599 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
4600 ASSERT(vd == vd->vdev_top);
4601
4602 /*
4603 * Evacuate the device. We don't hold the config lock as writer
4604 * since we need to do I/O but we do keep the
4605 * spa_namespace_lock held. Once this completes the device
4606 * should no longer have any blocks allocated on it.
4607 */
4608 if (vd->vdev_islog) {
4609 if (vd->vdev_stat.vs_alloc != 0)
4610 error = spa_offline_log(spa);
4611 } else {
4612 error = ENOTSUP;
4613 }
4614
4615 if (error)
4616 return (error);
4617
4618 /*
4619 * The evacuation succeeded. Remove any remaining MOS metadata
4620 * associated with this vdev, and wait for these changes to sync.
4621 */
4622 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
4623 txg = spa_vdev_config_enter(spa);
4624 vd->vdev_removing = B_TRUE;
4625 vdev_dirty(vd, 0, NULL, txg);
4626 vdev_config_dirty(vd);
4627 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4628
4629 return (0);
4630 }
4631
4632 /*
4633 * Complete the removal by cleaning up the namespace.
4634 */
4635 static void
4636 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
4637 {
4638 vdev_t *rvd = spa->spa_root_vdev;
4639 uint64_t id = vd->vdev_id;
4640 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
4641
4642 ASSERT(MUTEX_HELD(&spa_namespace_lock));
4643 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4644 ASSERT(vd == vd->vdev_top);
4645
4646 /*
4647 * Only remove any devices which are empty.
4648 */
4649 if (vd->vdev_stat.vs_alloc != 0)
4650 return;
4651
4652 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4653
4654 if (list_link_active(&vd->vdev_state_dirty_node))
4655 vdev_state_clean(vd);
4656 if (list_link_active(&vd->vdev_config_dirty_node))
4657 vdev_config_clean(vd);
4658
4659 vdev_free(vd);
4660
4661 if (last_vdev) {
4662 vdev_compact_children(rvd);
4663 } else {
4664 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
4665 vdev_add_child(rvd, vd);
4666 }
4667 vdev_config_dirty(rvd);
4668
4669 /*
4670 * Reassess the health of our root vdev.
4671 */
4672 vdev_reopen(rvd);
4673 }
4674
4675 /*
4676 * Remove a device from the pool -
4677 *
4678 * Removing a device from the vdev namespace requires several steps
4679 * and can take a significant amount of time. As a result we use
4680 * the spa_vdev_config_[enter/exit] functions which allow us to
4681 * grab and release the spa_config_lock while still holding the namespace
4682 * lock. During each step the configuration is synced out.
4683 */
4684
4685 /*
4686 * Remove a device from the pool. Currently, this supports removing only hot
4687 * spares, slogs, and level 2 ARC devices.
4688 */
4689 int
4690 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
4691 {
4692 vdev_t *vd;
4693 metaslab_group_t *mg;
4694 nvlist_t **spares, **l2cache, *nv;
4695 uint64_t txg = 0;
4696 uint_t nspares, nl2cache;
4697 int error = 0;
4698 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
4699
4700 ASSERT(spa_writeable(spa));
4701
4702 if (!locked)
4703 txg = spa_vdev_enter(spa);
4704
4705 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
4706
4707 if (spa->spa_spares.sav_vdevs != NULL &&
4708 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
4709 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
4710 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
4711 /*
4712 * Only remove the hot spare if it's not currently in use
4713 * in this pool.
4714 */
4715 if (vd == NULL || unspare) {
4716 spa_vdev_remove_aux(spa->spa_spares.sav_config,
4717 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
4718 spa_load_spares(spa);
4719 spa->spa_spares.sav_sync = B_TRUE;
4720 } else {
4721 error = EBUSY;
4722 }
4723 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
4724 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
4725 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
4726 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
4727 /*
4728 * Cache devices can always be removed.
4729 */
4730 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
4731 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
4732 spa_load_l2cache(spa);
4733 spa->spa_l2cache.sav_sync = B_TRUE;
4734 } else if (vd != NULL && vd->vdev_islog) {
4735 ASSERT(!locked);
4736 ASSERT(vd == vd->vdev_top);
4737
4738 /*
4739 * XXX - Once we have bp-rewrite this should
4740 * become the common case.
4741 */
4742
4743 mg = vd->vdev_mg;
4744
4745 /*
4746 * Stop allocating from this vdev.
4747 */
4748 metaslab_group_passivate(mg);
4749
4750 /*
4751 * Wait for the youngest allocations and frees to sync,
4752 * and then wait for the deferral of those frees to finish.
4753 */
4754 spa_vdev_config_exit(spa, NULL,
4755 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
4756
4757 /*
4758 * Attempt to evacuate the vdev.
4759 */
4760 error = spa_vdev_remove_evacuate(spa, vd);
4761
4762 txg = spa_vdev_config_enter(spa);
4763
4764 /*
4765 * If we couldn't evacuate the vdev, unwind.
4766 */
4767 if (error) {
4768 metaslab_group_activate(mg);
4769 return (spa_vdev_exit(spa, NULL, txg, error));
4770 }
4771
4772 /*
4773 * Clean up the vdev namespace.
4774 */
4775 spa_vdev_remove_from_namespace(spa, vd);
4776
4777 } else if (vd != NULL) {
4778 /*
4779 * Normal vdevs cannot be removed (yet).
4780 */
4781 error = ENOTSUP;
4782 } else {
4783 /*
4784 * There is no vdev of any kind with the specified guid.
4785 */
4786 error = ENOENT;
4787 }
4788
4789 if (!locked)
4790 return (spa_vdev_exit(spa, NULL, txg, error));
4791
4792 return (error);
4793 }
4794
4795 /*
4796 * Find any device that's done replacing, or a vdev marked 'unspare' that's
4797 * current spared, so we can detach it.
4798 */
4799 static vdev_t *
4800 spa_vdev_resilver_done_hunt(vdev_t *vd)
4801 {
4802 vdev_t *newvd, *oldvd;
4803
4804 for (int c = 0; c < vd->vdev_children; c++) {
4805 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
4806 if (oldvd != NULL)
4807 return (oldvd);
4808 }
4809
4810 /*
4811 * Check for a completed replacement. We always consider the first
4812 * vdev in the list to be the oldest vdev, and the last one to be
4813 * the newest (see spa_vdev_attach() for how that works). In
4814 * the case where the newest vdev is faulted, we will not automatically
4815 * remove it after a resilver completes. This is OK as it will require
4816 * user intervention to determine which disk the admin wishes to keep.
4817 */
4818 if (vd->vdev_ops == &vdev_replacing_ops) {
4819 ASSERT(vd->vdev_children > 1);
4820
4821 newvd = vd->vdev_child[vd->vdev_children - 1];
4822 oldvd = vd->vdev_child[0];
4823
4824 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
4825 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
4826 !vdev_dtl_required(oldvd))
4827 return (oldvd);
4828 }
4829
4830 /*
4831 * Check for a completed resilver with the 'unspare' flag set.
4832 */
4833 if (vd->vdev_ops == &vdev_spare_ops) {
4834 vdev_t *first = vd->vdev_child[0];
4835 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
4836
4837 if (last->vdev_unspare) {
4838 oldvd = first;
4839 newvd = last;
4840 } else if (first->vdev_unspare) {
4841 oldvd = last;
4842 newvd = first;
4843 } else {
4844 oldvd = NULL;
4845 }
4846
4847 if (oldvd != NULL &&
4848 vdev_dtl_empty(newvd, DTL_MISSING) &&
4849 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
4850 !vdev_dtl_required(oldvd))
4851 return (oldvd);
4852
4853 /*
4854 * If there are more than two spares attached to a disk,
4855 * and those spares are not required, then we want to
4856 * attempt to free them up now so that they can be used
4857 * by other pools. Once we're back down to a single
4858 * disk+spare, we stop removing them.
4859 */
4860 if (vd->vdev_children > 2) {
4861 newvd = vd->vdev_child[1];
4862
4863 if (newvd->vdev_isspare && last->vdev_isspare &&
4864 vdev_dtl_empty(last, DTL_MISSING) &&
4865 vdev_dtl_empty(last, DTL_OUTAGE) &&
4866 !vdev_dtl_required(newvd))
4867 return (newvd);
4868 }
4869 }
4870
4871 return (NULL);
4872 }
4873
4874 static void
4875 spa_vdev_resilver_done(spa_t *spa)
4876 {
4877 vdev_t *vd, *pvd, *ppvd;
4878 uint64_t guid, sguid, pguid, ppguid;
4879
4880 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4881
4882 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
4883 pvd = vd->vdev_parent;
4884 ppvd = pvd->vdev_parent;
4885 guid = vd->vdev_guid;
4886 pguid = pvd->vdev_guid;
4887 ppguid = ppvd->vdev_guid;
4888 sguid = 0;
4889 /*
4890 * If we have just finished replacing a hot spared device, then
4891 * we need to detach the parent's first child (the original hot
4892 * spare) as well.
4893 */
4894 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
4895 ppvd->vdev_children == 2) {
4896 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
4897 sguid = ppvd->vdev_child[1]->vdev_guid;
4898 }
4899 spa_config_exit(spa, SCL_ALL, FTAG);
4900 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
4901 return;
4902 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
4903 return;
4904 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4905 }
4906
4907 spa_config_exit(spa, SCL_ALL, FTAG);
4908 }
4909
4910 /*
4911 * Update the stored path or FRU for this vdev.
4912 */
4913 int
4914 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
4915 boolean_t ispath)
4916 {
4917 vdev_t *vd;
4918 boolean_t sync = B_FALSE;
4919
4920 ASSERT(spa_writeable(spa));
4921
4922 spa_vdev_state_enter(spa, SCL_ALL);
4923
4924 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4925 return (spa_vdev_state_exit(spa, NULL, ENOENT));
4926
4927 if (!vd->vdev_ops->vdev_op_leaf)
4928 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4929
4930 if (ispath) {
4931 if (strcmp(value, vd->vdev_path) != 0) {
4932 spa_strfree(vd->vdev_path);
4933 vd->vdev_path = spa_strdup(value);
4934 sync = B_TRUE;
4935 }
4936 } else {
4937 if (vd->vdev_fru == NULL) {
4938 vd->vdev_fru = spa_strdup(value);
4939 sync = B_TRUE;
4940 } else if (strcmp(value, vd->vdev_fru) != 0) {
4941 spa_strfree(vd->vdev_fru);
4942 vd->vdev_fru = spa_strdup(value);
4943 sync = B_TRUE;
4944 }
4945 }
4946
4947 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
4948 }
4949
4950 int
4951 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
4952 {
4953 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
4954 }
4955
4956 int
4957 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
4958 {
4959 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
4960 }
4961
4962 /*
4963 * ==========================================================================
4964 * SPA Scanning
4965 * ==========================================================================
4966 */
4967
4968 int
4969 spa_scan_stop(spa_t *spa)
4970 {
4971 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
4972 if (dsl_scan_resilvering(spa->spa_dsl_pool))
4973 return (EBUSY);
4974 return (dsl_scan_cancel(spa->spa_dsl_pool));
4975 }
4976
4977 int
4978 spa_scan(spa_t *spa, pool_scan_func_t func)
4979 {
4980 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
4981
4982 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
4983 return (ENOTSUP);
4984
4985 /*
4986 * If a resilver was requested, but there is no DTL on a
4987 * writeable leaf device, we have nothing to do.
4988 */
4989 if (func == POOL_SCAN_RESILVER &&
4990 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
4991 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4992 return (0);
4993 }
4994
4995 return (dsl_scan(spa->spa_dsl_pool, func));
4996 }
4997
4998 /*
4999 * ==========================================================================
5000 * SPA async task processing
5001 * ==========================================================================
5002 */
5003
5004 static void
5005 spa_async_remove(spa_t *spa, vdev_t *vd)
5006 {
5007 if (vd->vdev_remove_wanted) {
5008 vd->vdev_remove_wanted = B_FALSE;
5009 vd->vdev_delayed_close = B_FALSE;
5010 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
5011
5012 /*
5013 * We want to clear the stats, but we don't want to do a full
5014 * vdev_clear() as that will cause us to throw away
5015 * degraded/faulted state as well as attempt to reopen the
5016 * device, all of which is a waste.
5017 */
5018 vd->vdev_stat.vs_read_errors = 0;
5019 vd->vdev_stat.vs_write_errors = 0;
5020 vd->vdev_stat.vs_checksum_errors = 0;
5021
5022 vdev_state_dirty(vd->vdev_top);
5023 }
5024
5025 for (int c = 0; c < vd->vdev_children; c++)
5026 spa_async_remove(spa, vd->vdev_child[c]);
5027 }
5028
5029 static void
5030 spa_async_probe(spa_t *spa, vdev_t *vd)
5031 {
5032 if (vd->vdev_probe_wanted) {
5033 vd->vdev_probe_wanted = B_FALSE;
5034 vdev_reopen(vd); /* vdev_open() does the actual probe */
5035 }
5036
5037 for (int c = 0; c < vd->vdev_children; c++)
5038 spa_async_probe(spa, vd->vdev_child[c]);
5039 }
5040
5041 static void
5042 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5043 {
5044 sysevent_id_t eid;
5045 nvlist_t *attr;
5046 char *physpath;
5047
5048 if (!spa->spa_autoexpand)
5049 return;
5050
5051 for (int c = 0; c < vd->vdev_children; c++) {
5052 vdev_t *cvd = vd->vdev_child[c];
5053 spa_async_autoexpand(spa, cvd);
5054 }
5055
5056 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5057 return;
5058
5059 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5060 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
5061
5062 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5063 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
5064
5065 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
5066 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
5067
5068 nvlist_free(attr);
5069 kmem_free(physpath, MAXPATHLEN);
5070 }
5071
5072 static void
5073 spa_async_thread(spa_t *spa)
5074 {
5075 int tasks;
5076
5077 ASSERT(spa->spa_sync_on);
5078
5079 mutex_enter(&spa->spa_async_lock);
5080 tasks = spa->spa_async_tasks;
5081 spa->spa_async_tasks = 0;
5082 mutex_exit(&spa->spa_async_lock);
5083
5084 /*
5085 * See if the config needs to be updated.
5086 */
5087 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
5088 uint64_t old_space, new_space;
5089
5090 mutex_enter(&spa_namespace_lock);
5091 old_space = metaslab_class_get_space(spa_normal_class(spa));
5092 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5093 new_space = metaslab_class_get_space(spa_normal_class(spa));
5094 mutex_exit(&spa_namespace_lock);
5095
5096 /*
5097 * If the pool grew as a result of the config update,
5098 * then log an internal history event.
5099 */
5100 if (new_space != old_space) {
5101 spa_history_log_internal(LOG_POOL_VDEV_ONLINE,
5102 spa, NULL,
5103 "pool '%s' size: %llu(+%llu)",
5104 spa_name(spa), new_space, new_space - old_space);
5105 }
5106 }
5107
5108 /*
5109 * See if any devices need to be marked REMOVED.
5110 */
5111 if (tasks & SPA_ASYNC_REMOVE) {
5112 spa_vdev_state_enter(spa, SCL_NONE);
5113 spa_async_remove(spa, spa->spa_root_vdev);
5114 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
5115 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
5116 for (int i = 0; i < spa->spa_spares.sav_count; i++)
5117 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5118 (void) spa_vdev_state_exit(spa, NULL, 0);
5119 }
5120
5121 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5122 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5123 spa_async_autoexpand(spa, spa->spa_root_vdev);
5124 spa_config_exit(spa, SCL_CONFIG, FTAG);
5125 }
5126
5127 /*
5128 * See if any devices need to be probed.
5129 */
5130 if (tasks & SPA_ASYNC_PROBE) {
5131 spa_vdev_state_enter(spa, SCL_NONE);
5132 spa_async_probe(spa, spa->spa_root_vdev);
5133 (void) spa_vdev_state_exit(spa, NULL, 0);
5134 }
5135
5136 /*
5137 * If any devices are done replacing, detach them.
5138 */
5139 if (tasks & SPA_ASYNC_RESILVER_DONE)
5140 spa_vdev_resilver_done(spa);
5141
5142 /*
5143 * Kick off a resilver.
5144 */
5145 if (tasks & SPA_ASYNC_RESILVER)
5146 dsl_resilver_restart(spa->spa_dsl_pool, 0);
5147
5148 /*
5149 * Let the world know that we're done.
5150 */
5151 mutex_enter(&spa->spa_async_lock);
5152 spa->spa_async_thread = NULL;
5153 cv_broadcast(&spa->spa_async_cv);
5154 mutex_exit(&spa->spa_async_lock);
5155 thread_exit();
5156 }
5157
5158 void
5159 spa_async_suspend(spa_t *spa)
5160 {
5161 mutex_enter(&spa->spa_async_lock);
5162 spa->spa_async_suspended++;
5163 while (spa->spa_async_thread != NULL)
5164 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5165 mutex_exit(&spa->spa_async_lock);
5166 }
5167
5168 void
5169 spa_async_resume(spa_t *spa)
5170 {
5171 mutex_enter(&spa->spa_async_lock);
5172 ASSERT(spa->spa_async_suspended != 0);
5173 spa->spa_async_suspended--;
5174 mutex_exit(&spa->spa_async_lock);
5175 }
5176
5177 static void
5178 spa_async_dispatch(spa_t *spa)
5179 {
5180 mutex_enter(&spa->spa_async_lock);
5181 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5182 spa->spa_async_thread == NULL &&
5183 rootdir != NULL && !vn_is_readonly(rootdir))
5184 spa->spa_async_thread = thread_create(NULL, 0,
5185 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5186 mutex_exit(&spa->spa_async_lock);
5187 }
5188
5189 void
5190 spa_async_request(spa_t *spa, int task)
5191 {
5192 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
5193 mutex_enter(&spa->spa_async_lock);
5194 spa->spa_async_tasks |= task;
5195 mutex_exit(&spa->spa_async_lock);
5196 }
5197
5198 /*
5199 * ==========================================================================
5200 * SPA syncing routines
5201 * ==========================================================================
5202 */
5203
5204 static int
5205 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5206 {
5207 bpobj_t *bpo = arg;
5208 bpobj_enqueue(bpo, bp, tx);
5209 return (0);
5210 }
5211
5212 static int
5213 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5214 {
5215 zio_t *zio = arg;
5216
5217 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5218 zio->io_flags));
5219 return (0);
5220 }
5221
5222 static void
5223 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5224 {
5225 char *packed = NULL;
5226 size_t bufsize;
5227 size_t nvsize = 0;
5228 dmu_buf_t *db;
5229
5230 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5231
5232 /*
5233 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5234 * information. This avoids the dbuf_will_dirty() path and
5235 * saves us a pre-read to get data we don't actually care about.
5236 */
5237 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
5238 packed = kmem_alloc(bufsize, KM_SLEEP);
5239
5240 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5241 KM_SLEEP) == 0);
5242 bzero(packed + nvsize, bufsize - nvsize);
5243
5244 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5245
5246 kmem_free(packed, bufsize);
5247
5248 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5249 dmu_buf_will_dirty(db, tx);
5250 *(uint64_t *)db->db_data = nvsize;
5251 dmu_buf_rele(db, FTAG);
5252 }
5253
5254 static void
5255 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5256 const char *config, const char *entry)
5257 {
5258 nvlist_t *nvroot;
5259 nvlist_t **list;
5260 int i;
5261
5262 if (!sav->sav_sync)
5263 return;
5264
5265 /*
5266 * Update the MOS nvlist describing the list of available devices.
5267 * spa_validate_aux() will have already made sure this nvlist is
5268 * valid and the vdevs are labeled appropriately.
5269 */
5270 if (sav->sav_object == 0) {
5271 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5272 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5273 sizeof (uint64_t), tx);
5274 VERIFY(zap_update(spa->spa_meta_objset,
5275 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5276 &sav->sav_object, tx) == 0);
5277 }
5278
5279 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5280 if (sav->sav_count == 0) {
5281 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5282 } else {
5283 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
5284 for (i = 0; i < sav->sav_count; i++)
5285 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
5286 B_FALSE, VDEV_CONFIG_L2CACHE);
5287 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5288 sav->sav_count) == 0);
5289 for (i = 0; i < sav->sav_count; i++)
5290 nvlist_free(list[i]);
5291 kmem_free(list, sav->sav_count * sizeof (void *));
5292 }
5293
5294 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5295 nvlist_free(nvroot);
5296
5297 sav->sav_sync = B_FALSE;
5298 }
5299
5300 static void
5301 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5302 {
5303 nvlist_t *config;
5304
5305 if (list_is_empty(&spa->spa_config_dirty_list))
5306 return;
5307
5308 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5309
5310 config = spa_config_generate(spa, spa->spa_root_vdev,
5311 dmu_tx_get_txg(tx), B_FALSE);
5312
5313 spa_config_exit(spa, SCL_STATE, FTAG);
5314
5315 if (spa->spa_config_syncing)
5316 nvlist_free(spa->spa_config_syncing);
5317 spa->spa_config_syncing = config;
5318
5319 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5320 }
5321
5322 /*
5323 * Set zpool properties.
5324 */
5325 static void
5326 spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
5327 {
5328 spa_t *spa = arg1;
5329 objset_t *mos = spa->spa_meta_objset;
5330 nvlist_t *nvp = arg2;
5331 nvpair_t *elem;
5332 uint64_t intval;
5333 char *strval;
5334 zpool_prop_t prop;
5335 const char *propname;
5336 zprop_type_t proptype;
5337
5338 mutex_enter(&spa->spa_props_lock);
5339
5340 elem = NULL;
5341 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5342 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5343 case ZPOOL_PROP_VERSION:
5344 /*
5345 * Only set version for non-zpool-creation cases
5346 * (set/import). spa_create() needs special care
5347 * for version setting.
5348 */
5349 if (tx->tx_txg != TXG_INITIAL) {
5350 VERIFY(nvpair_value_uint64(elem,
5351 &intval) == 0);
5352 ASSERT(intval <= SPA_VERSION);
5353 ASSERT(intval >= spa_version(spa));
5354 spa->spa_uberblock.ub_version = intval;
5355 vdev_config_dirty(spa->spa_root_vdev);
5356 }
5357 break;
5358
5359 case ZPOOL_PROP_ALTROOT:
5360 /*
5361 * 'altroot' is a non-persistent property. It should
5362 * have been set temporarily at creation or import time.
5363 */
5364 ASSERT(spa->spa_root != NULL);
5365 break;
5366
5367 case ZPOOL_PROP_READONLY:
5368 case ZPOOL_PROP_CACHEFILE:
5369 /*
5370 * 'readonly' and 'cachefile' are also non-persisitent
5371 * properties.
5372 */
5373 break;
5374 default:
5375 /*
5376 * Set pool property values in the poolprops mos object.
5377 */
5378 if (spa->spa_pool_props_object == 0) {
5379 VERIFY((spa->spa_pool_props_object =
5380 zap_create(mos, DMU_OT_POOL_PROPS,
5381 DMU_OT_NONE, 0, tx)) > 0);
5382
5383 VERIFY(zap_update(mos,
5384 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
5385 8, 1, &spa->spa_pool_props_object, tx)
5386 == 0);
5387 }
5388
5389 /* normalize the property name */
5390 propname = zpool_prop_to_name(prop);
5391 proptype = zpool_prop_get_type(prop);
5392
5393 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5394 ASSERT(proptype == PROP_TYPE_STRING);
5395 VERIFY(nvpair_value_string(elem, &strval) == 0);
5396 VERIFY(zap_update(mos,
5397 spa->spa_pool_props_object, propname,
5398 1, strlen(strval) + 1, strval, tx) == 0);
5399
5400 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5401 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5402
5403 if (proptype == PROP_TYPE_INDEX) {
5404 const char *unused;
5405 VERIFY(zpool_prop_index_to_string(
5406 prop, intval, &unused) == 0);
5407 }
5408 VERIFY(zap_update(mos,
5409 spa->spa_pool_props_object, propname,
5410 8, 1, &intval, tx) == 0);
5411 } else {
5412 ASSERT(0); /* not allowed */
5413 }
5414
5415 switch (prop) {
5416 case ZPOOL_PROP_DELEGATION:
5417 spa->spa_delegation = intval;
5418 break;
5419 case ZPOOL_PROP_BOOTFS:
5420 spa->spa_bootfs = intval;
5421 break;
5422 case ZPOOL_PROP_FAILUREMODE:
5423 spa->spa_failmode = intval;
5424 break;
5425 case ZPOOL_PROP_AUTOEXPAND:
5426 spa->spa_autoexpand = intval;
5427 if (tx->tx_txg != TXG_INITIAL)
5428 spa_async_request(spa,
5429 SPA_ASYNC_AUTOEXPAND);
5430 break;
5431 case ZPOOL_PROP_DEDUPDITTO:
5432 spa->spa_dedup_ditto = intval;
5433 break;
5434 default:
5435 break;
5436 }
5437 }
5438
5439 /* log internal history if this is not a zpool create */
5440 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
5441 tx->tx_txg != TXG_INITIAL) {
5442 spa_history_log_internal(LOG_POOL_PROPSET,
5443 spa, tx, "%s %lld %s",
5444 nvpair_name(elem), intval, spa_name(spa));
5445 }
5446 }
5447
5448 mutex_exit(&spa->spa_props_lock);
5449 }
5450
5451 /*
5452 * Perform one-time upgrade on-disk changes. spa_version() does not
5453 * reflect the new version this txg, so there must be no changes this
5454 * txg to anything that the upgrade code depends on after it executes.
5455 * Therefore this must be called after dsl_pool_sync() does the sync
5456 * tasks.
5457 */
5458 static void
5459 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
5460 {
5461 dsl_pool_t *dp = spa->spa_dsl_pool;
5462
5463 ASSERT(spa->spa_sync_pass == 1);
5464
5465 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
5466 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
5467 dsl_pool_create_origin(dp, tx);
5468
5469 /* Keeping the origin open increases spa_minref */
5470 spa->spa_minref += 3;
5471 }
5472
5473 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
5474 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
5475 dsl_pool_upgrade_clones(dp, tx);
5476 }
5477
5478 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
5479 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
5480 dsl_pool_upgrade_dir_clones(dp, tx);
5481
5482 /* Keeping the freedir open increases spa_minref */
5483 spa->spa_minref += 3;
5484 }
5485 }
5486
5487 /*
5488 * Sync the specified transaction group. New blocks may be dirtied as
5489 * part of the process, so we iterate until it converges.
5490 */
5491 void
5492 spa_sync(spa_t *spa, uint64_t txg)
5493 {
5494 dsl_pool_t *dp = spa->spa_dsl_pool;
5495 objset_t *mos = spa->spa_meta_objset;
5496 bpobj_t *defer_bpo = &spa->spa_deferred_bpobj;
5497 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
5498 vdev_t *rvd = spa->spa_root_vdev;
5499 vdev_t *vd;
5500 dmu_tx_t *tx;
5501 int error;
5502
5503 VERIFY(spa_writeable(spa));
5504
5505 /*
5506 * Lock out configuration changes.
5507 */
5508 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5509
5510 spa->spa_syncing_txg = txg;
5511 spa->spa_sync_pass = 0;
5512
5513 /*
5514 * If there are any pending vdev state changes, convert them
5515 * into config changes that go out with this transaction group.
5516 */
5517 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5518 while (list_head(&spa->spa_state_dirty_list) != NULL) {
5519 /*
5520 * We need the write lock here because, for aux vdevs,
5521 * calling vdev_config_dirty() modifies sav_config.
5522 * This is ugly and will become unnecessary when we
5523 * eliminate the aux vdev wart by integrating all vdevs
5524 * into the root vdev tree.
5525 */
5526 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
5527 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
5528 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
5529 vdev_state_clean(vd);
5530 vdev_config_dirty(vd);
5531 }
5532 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
5533 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
5534 }
5535 spa_config_exit(spa, SCL_STATE, FTAG);
5536
5537 tx = dmu_tx_create_assigned(dp, txg);
5538
5539 /*
5540 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
5541 * set spa_deflate if we have no raid-z vdevs.
5542 */
5543 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
5544 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
5545 int i;
5546
5547 for (i = 0; i < rvd->vdev_children; i++) {
5548 vd = rvd->vdev_child[i];
5549 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
5550 break;
5551 }
5552 if (i == rvd->vdev_children) {
5553 spa->spa_deflate = TRUE;
5554 VERIFY(0 == zap_add(spa->spa_meta_objset,
5555 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5556 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
5557 }
5558 }
5559
5560 /*
5561 * If anything has changed in this txg, or if someone is waiting
5562 * for this txg to sync (eg, spa_vdev_remove()), push the
5563 * deferred frees from the previous txg. If not, leave them
5564 * alone so that we don't generate work on an otherwise idle
5565 * system.
5566 */
5567 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
5568 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
5569 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
5570 ((dsl_scan_active(dp->dp_scan) ||
5571 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
5572 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5573 VERIFY3U(bpobj_iterate(defer_bpo,
5574 spa_free_sync_cb, zio, tx), ==, 0);
5575 VERIFY3U(zio_wait(zio), ==, 0);
5576 }
5577
5578 /*
5579 * Iterate to convergence.
5580 */
5581 do {
5582 int pass = ++spa->spa_sync_pass;
5583
5584 spa_sync_config_object(spa, tx);
5585 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
5586 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
5587 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
5588 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
5589 spa_errlog_sync(spa, txg);
5590 dsl_pool_sync(dp, txg);
5591
5592 if (pass <= SYNC_PASS_DEFERRED_FREE) {
5593 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5594 bplist_iterate(free_bpl, spa_free_sync_cb,
5595 zio, tx);
5596 VERIFY(zio_wait(zio) == 0);
5597 } else {
5598 bplist_iterate(free_bpl, bpobj_enqueue_cb,
5599 defer_bpo, tx);
5600 }
5601
5602 ddt_sync(spa, txg);
5603 dsl_scan_sync(dp, tx);
5604
5605 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
5606 vdev_sync(vd, txg);
5607
5608 if (pass == 1)
5609 spa_sync_upgrades(spa, tx);
5610
5611 } while (dmu_objset_is_dirty(mos, txg));
5612
5613 /*
5614 * Rewrite the vdev configuration (which includes the uberblock)
5615 * to commit the transaction group.
5616 *
5617 * If there are no dirty vdevs, we sync the uberblock to a few
5618 * random top-level vdevs that are known to be visible in the
5619 * config cache (see spa_vdev_add() for a complete description).
5620 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
5621 */
5622 for (;;) {
5623 /*
5624 * We hold SCL_STATE to prevent vdev open/close/etc.
5625 * while we're attempting to write the vdev labels.
5626 */
5627 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5628
5629 if (list_is_empty(&spa->spa_config_dirty_list)) {
5630 vdev_t *svd[SPA_DVAS_PER_BP];
5631 int svdcount = 0;
5632 int children = rvd->vdev_children;
5633 int c0 = spa_get_random(children);
5634
5635 for (int c = 0; c < children; c++) {
5636 vd = rvd->vdev_child[(c0 + c) % children];
5637 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
5638 continue;
5639 svd[svdcount++] = vd;
5640 if (svdcount == SPA_DVAS_PER_BP)
5641 break;
5642 }
5643 error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
5644 if (error != 0)
5645 error = vdev_config_sync(svd, svdcount, txg,
5646 B_TRUE);
5647 } else {
5648 error = vdev_config_sync(rvd->vdev_child,
5649 rvd->vdev_children, txg, B_FALSE);
5650 if (error != 0)
5651 error = vdev_config_sync(rvd->vdev_child,
5652 rvd->vdev_children, txg, B_TRUE);
5653 }
5654
5655 spa_config_exit(spa, SCL_STATE, FTAG);
5656
5657 if (error == 0)
5658 break;
5659 zio_suspend(spa, NULL);
5660 zio_resume_wait(spa);
5661 }
5662 dmu_tx_commit(tx);
5663
5664 /*
5665 * Clear the dirty config list.
5666 */
5667 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
5668 vdev_config_clean(vd);
5669
5670 /*
5671 * Now that the new config has synced transactionally,
5672 * let it become visible to the config cache.
5673 */
5674 if (spa->spa_config_syncing != NULL) {
5675 spa_config_set(spa, spa->spa_config_syncing);
5676 spa->spa_config_txg = txg;
5677 spa->spa_config_syncing = NULL;
5678 }
5679
5680 spa->spa_ubsync = spa->spa_uberblock;
5681
5682 dsl_pool_sync_done(dp, txg);
5683
5684 /*
5685 * Update usable space statistics.
5686 */
5687 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
5688 vdev_sync_done(vd, txg);
5689
5690 spa_update_dspace(spa);
5691
5692 /*
5693 * It had better be the case that we didn't dirty anything
5694 * since vdev_config_sync().
5695 */
5696 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
5697 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
5698 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
5699
5700 spa->spa_sync_pass = 0;
5701
5702 spa_config_exit(spa, SCL_CONFIG, FTAG);
5703
5704 spa_handle_ignored_writes(spa);
5705
5706 /*
5707 * If any async tasks have been requested, kick them off.
5708 */
5709 spa_async_dispatch(spa);
5710 }
5711
5712 /*
5713 * Sync all pools. We don't want to hold the namespace lock across these
5714 * operations, so we take a reference on the spa_t and drop the lock during the
5715 * sync.
5716 */
5717 void
5718 spa_sync_allpools(void)
5719 {
5720 spa_t *spa = NULL;
5721 mutex_enter(&spa_namespace_lock);
5722 while ((spa = spa_next(spa)) != NULL) {
5723 if (spa_state(spa) != POOL_STATE_ACTIVE ||
5724 !spa_writeable(spa) || spa_suspended(spa))
5725 continue;
5726 spa_open_ref(spa, FTAG);
5727 mutex_exit(&spa_namespace_lock);
5728 txg_wait_synced(spa_get_dsl(spa), 0);
5729 mutex_enter(&spa_namespace_lock);
5730 spa_close(spa, FTAG);
5731 }
5732 mutex_exit(&spa_namespace_lock);
5733 }
5734
5735 /*
5736 * ==========================================================================
5737 * Miscellaneous routines
5738 * ==========================================================================
5739 */
5740
5741 /*
5742 * Remove all pools in the system.
5743 */
5744 void
5745 spa_evict_all(void)
5746 {
5747 spa_t *spa;
5748
5749 /*
5750 * Remove all cached state. All pools should be closed now,
5751 * so every spa in the AVL tree should be unreferenced.
5752 */
5753 mutex_enter(&spa_namespace_lock);
5754 while ((spa = spa_next(NULL)) != NULL) {
5755 /*
5756 * Stop async tasks. The async thread may need to detach
5757 * a device that's been replaced, which requires grabbing
5758 * spa_namespace_lock, so we must drop it here.
5759 */
5760 spa_open_ref(spa, FTAG);
5761 mutex_exit(&spa_namespace_lock);
5762 spa_async_suspend(spa);
5763 mutex_enter(&spa_namespace_lock);
5764 spa_close(spa, FTAG);
5765
5766 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
5767 spa_unload(spa);
5768 spa_deactivate(spa);
5769 }
5770 spa_remove(spa);
5771 }
5772 mutex_exit(&spa_namespace_lock);
5773 }
5774
5775 vdev_t *
5776 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
5777 {
5778 vdev_t *vd;
5779 int i;
5780
5781 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
5782 return (vd);
5783
5784 if (aux) {
5785 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
5786 vd = spa->spa_l2cache.sav_vdevs[i];
5787 if (vd->vdev_guid == guid)
5788 return (vd);
5789 }
5790
5791 for (i = 0; i < spa->spa_spares.sav_count; i++) {
5792 vd = spa->spa_spares.sav_vdevs[i];
5793 if (vd->vdev_guid == guid)
5794 return (vd);
5795 }
5796 }
5797
5798 return (NULL);
5799 }
5800
5801 void
5802 spa_upgrade(spa_t *spa, uint64_t version)
5803 {
5804 ASSERT(spa_writeable(spa));
5805
5806 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5807
5808 /*
5809 * This should only be called for a non-faulted pool, and since a
5810 * future version would result in an unopenable pool, this shouldn't be
5811 * possible.
5812 */
5813 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
5814 ASSERT(version >= spa->spa_uberblock.ub_version);
5815
5816 spa->spa_uberblock.ub_version = version;
5817 vdev_config_dirty(spa->spa_root_vdev);
5818
5819 spa_config_exit(spa, SCL_ALL, FTAG);
5820
5821 txg_wait_synced(spa_get_dsl(spa), 0);
5822 }
5823
5824 boolean_t
5825 spa_has_spare(spa_t *spa, uint64_t guid)
5826 {
5827 int i;
5828 uint64_t spareguid;
5829 spa_aux_vdev_t *sav = &spa->spa_spares;
5830
5831 for (i = 0; i < sav->sav_count; i++)
5832 if (sav->sav_vdevs[i]->vdev_guid == guid)
5833 return (B_TRUE);
5834
5835 for (i = 0; i < sav->sav_npending; i++) {
5836 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
5837 &spareguid) == 0 && spareguid == guid)
5838 return (B_TRUE);
5839 }
5840
5841 return (B_FALSE);
5842 }
5843
5844 /*
5845 * Check if a pool has an active shared spare device.
5846 * Note: reference count of an active spare is 2, as a spare and as a replace
5847 */
5848 static boolean_t
5849 spa_has_active_shared_spare(spa_t *spa)
5850 {
5851 int i, refcnt;
5852 uint64_t pool;
5853 spa_aux_vdev_t *sav = &spa->spa_spares;
5854
5855 for (i = 0; i < sav->sav_count; i++) {
5856 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
5857 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
5858 refcnt > 2)
5859 return (B_TRUE);
5860 }
5861
5862 return (B_FALSE);
5863 }
5864
5865 /*
5866 * Post a sysevent corresponding to the given event. The 'name' must be one of
5867 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
5868 * filled in from the spa and (optionally) the vdev. This doesn't do anything
5869 * in the userland libzpool, as we don't want consumers to misinterpret ztest
5870 * or zdb as real changes.
5871 */
5872 void
5873 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
5874 {
5875 #ifdef _KERNEL
5876 sysevent_t *ev;
5877 sysevent_attr_list_t *attr = NULL;
5878 sysevent_value_t value;
5879 sysevent_id_t eid;
5880
5881 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
5882 SE_SLEEP);
5883
5884 value.value_type = SE_DATA_TYPE_STRING;
5885 value.value.sv_string = spa_name(spa);
5886 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
5887 goto done;
5888
5889 value.value_type = SE_DATA_TYPE_UINT64;
5890 value.value.sv_uint64 = spa_guid(spa);
5891 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
5892 goto done;
5893
5894 if (vd) {
5895 value.value_type = SE_DATA_TYPE_UINT64;
5896 value.value.sv_uint64 = vd->vdev_guid;
5897 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
5898 SE_SLEEP) != 0)
5899 goto done;
5900
5901 if (vd->vdev_path) {
5902 value.value_type = SE_DATA_TYPE_STRING;
5903 value.value.sv_string = vd->vdev_path;
5904 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
5905 &value, SE_SLEEP) != 0)
5906 goto done;
5907 }
5908 }
5909
5910 if (sysevent_attach_attributes(ev, attr) != 0)
5911 goto done;
5912 attr = NULL;
5913
5914 (void) log_sysevent(ev, SE_SLEEP, &eid);
5915
5916 done:
5917 if (attr)
5918 sysevent_free_attr(attr);
5919 sysevent_free(ev);
5920 #endif
5921 }