1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
29 #include <sys/zio.h>
30 #include <sys/zio_checksum.h>
31 #include <sys/zio_compress.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/zap.h>
35 #include <sys/zil.h>
36 #include <sys/vdev_impl.h>
37 #include <sys/metaslab.h>
38 #include <sys/uberblock_impl.h>
39 #include <sys/txg.h>
40 #include <sys/avl.h>
41 #include <sys/unique.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_dir.h>
44 #include <sys/dsl_prop.h>
45 #include <sys/dsl_scan.h>
46 #include <sys/fs/zfs.h>
47 #include <sys/metaslab_impl.h>
48 #include <sys/arc.h>
49 #include <sys/ddt.h>
50 #include "zfs_prop.h"
51 #include "zfeature_common.h"
52
53 /*
54 * SPA locking
55 *
56 * There are four basic locks for managing spa_t structures:
57 *
58 * spa_namespace_lock (global mutex)
59 *
60 * This lock must be acquired to do any of the following:
61 *
62 * - Lookup a spa_t by name
63 * - Add or remove a spa_t from the namespace
64 * - Increase spa_refcount from non-zero
65 * - Check if spa_refcount is zero
66 * - Rename a spa_t
67 * - add/remove/attach/detach devices
68 * - Held for the duration of create/destroy/import/export
69 *
70 * It does not need to handle recursion. A create or destroy may
71 * reference objects (files or zvols) in other pools, but by
72 * definition they must have an existing reference, and will never need
73 * to lookup a spa_t by name.
74 *
75 * spa_refcount (per-spa refcount_t protected by mutex)
76 *
77 * This reference count keep track of any active users of the spa_t. The
78 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
79 * the refcount is never really 'zero' - opening a pool implicitly keeps
80 * some references in the DMU. Internally we check against spa_minref, but
81 * present the image of a zero/non-zero value to consumers.
82 *
83 * spa_config_lock[] (per-spa array of rwlocks)
84 *
85 * This protects the spa_t from config changes, and must be held in
86 * the following circumstances:
87 *
88 * - RW_READER to perform I/O to the spa
89 * - RW_WRITER to change the vdev config
90 *
91 * The locking order is fairly straightforward:
92 *
93 * spa_namespace_lock -> spa_refcount
94 *
95 * The namespace lock must be acquired to increase the refcount from 0
96 * or to check if it is zero.
97 *
98 * spa_refcount -> spa_config_lock[]
99 *
100 * There must be at least one valid reference on the spa_t to acquire
101 * the config lock.
102 *
103 * spa_namespace_lock -> spa_config_lock[]
104 *
105 * The namespace lock must always be taken before the config lock.
106 *
107 *
108 * The spa_namespace_lock can be acquired directly and is globally visible.
109 *
110 * The namespace is manipulated using the following functions, all of which
111 * require the spa_namespace_lock to be held.
112 *
113 * spa_lookup() Lookup a spa_t by name.
114 *
115 * spa_add() Create a new spa_t in the namespace.
116 *
117 * spa_remove() Remove a spa_t from the namespace. This also
118 * frees up any memory associated with the spa_t.
119 *
120 * spa_next() Returns the next spa_t in the system, or the
121 * first if NULL is passed.
122 *
123 * spa_evict_all() Shutdown and remove all spa_t structures in
124 * the system.
125 *
126 * spa_guid_exists() Determine whether a pool/device guid exists.
127 *
128 * The spa_refcount is manipulated using the following functions:
129 *
130 * spa_open_ref() Adds a reference to the given spa_t. Must be
131 * called with spa_namespace_lock held if the
132 * refcount is currently zero.
133 *
134 * spa_close() Remove a reference from the spa_t. This will
135 * not free the spa_t or remove it from the
136 * namespace. No locking is required.
137 *
138 * spa_refcount_zero() Returns true if the refcount is currently
139 * zero. Must be called with spa_namespace_lock
140 * held.
141 *
142 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
143 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
144 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
145 *
146 * To read the configuration, it suffices to hold one of these locks as reader.
147 * To modify the configuration, you must hold all locks as writer. To modify
148 * vdev state without altering the vdev tree's topology (e.g. online/offline),
149 * you must hold SCL_STATE and SCL_ZIO as writer.
150 *
151 * We use these distinct config locks to avoid recursive lock entry.
152 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
153 * block allocations (SCL_ALLOC), which may require reading space maps
154 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
155 *
156 * The spa config locks cannot be normal rwlocks because we need the
157 * ability to hand off ownership. For example, SCL_ZIO is acquired
158 * by the issuing thread and later released by an interrupt thread.
159 * They do, however, obey the usual write-wanted semantics to prevent
160 * writer (i.e. system administrator) starvation.
161 *
162 * The lock acquisition rules are as follows:
163 *
164 * SCL_CONFIG
165 * Protects changes to the vdev tree topology, such as vdev
166 * add/remove/attach/detach. Protects the dirty config list
167 * (spa_config_dirty_list) and the set of spares and l2arc devices.
168 *
169 * SCL_STATE
170 * Protects changes to pool state and vdev state, such as vdev
171 * online/offline/fault/degrade/clear. Protects the dirty state list
172 * (spa_state_dirty_list) and global pool state (spa_state).
173 *
174 * SCL_ALLOC
175 * Protects changes to metaslab groups and classes.
176 * Held as reader by metaslab_alloc() and metaslab_claim().
177 *
178 * SCL_ZIO
179 * Held by bp-level zios (those which have no io_vd upon entry)
180 * to prevent changes to the vdev tree. The bp-level zio implicitly
181 * protects all of its vdev child zios, which do not hold SCL_ZIO.
182 *
183 * SCL_FREE
184 * Protects changes to metaslab groups and classes.
185 * Held as reader by metaslab_free(). SCL_FREE is distinct from
186 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
187 * blocks in zio_done() while another i/o that holds either
188 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
189 *
190 * SCL_VDEV
191 * Held as reader to prevent changes to the vdev tree during trivial
192 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
193 * other locks, and lower than all of them, to ensure that it's safe
194 * to acquire regardless of caller context.
195 *
196 * In addition, the following rules apply:
197 *
198 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
199 * The lock ordering is SCL_CONFIG > spa_props_lock.
200 *
201 * (b) I/O operations on leaf vdevs. For any zio operation that takes
202 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
203 * or zio_write_phys() -- the caller must ensure that the config cannot
204 * cannot change in the interim, and that the vdev cannot be reopened.
205 * SCL_STATE as reader suffices for both.
206 *
207 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
208 *
209 * spa_vdev_enter() Acquire the namespace lock and the config lock
210 * for writing.
211 *
212 * spa_vdev_exit() Release the config lock, wait for all I/O
213 * to complete, sync the updated configs to the
214 * cache, and release the namespace lock.
215 *
216 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
217 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
218 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
219 *
220 * spa_rename() is also implemented within this file since it requires
221 * manipulation of the namespace.
222 */
223
224 static avl_tree_t spa_namespace_avl;
225 kmutex_t spa_namespace_lock;
226 static kcondvar_t spa_namespace_cv;
227 static int spa_active_count;
228 int spa_max_replication_override = SPA_DVAS_PER_BP;
229
230 static kmutex_t spa_spare_lock;
231 static avl_tree_t spa_spare_avl;
232 static kmutex_t spa_l2cache_lock;
233 static avl_tree_t spa_l2cache_avl;
234
235 kmem_cache_t *spa_buffer_pool;
236 int spa_mode_global;
237
238 #ifdef ZFS_DEBUG
239 /* Everything except dprintf is on by default in debug builds */
240 int zfs_flags = ~ZFS_DEBUG_DPRINTF;
241 #else
242 int zfs_flags = 0;
243 #endif
244
245 /*
246 * zfs_recover can be set to nonzero to attempt to recover from
247 * otherwise-fatal errors, typically caused by on-disk corruption. When
248 * set, calls to zfs_panic_recover() will turn into warning messages.
249 */
250 int zfs_recover = 0;
251
252
253 /*
254 * ==========================================================================
255 * SPA config locking
256 * ==========================================================================
257 */
258 static void
259 spa_config_lock_init(spa_t *spa)
260 {
261 for (int i = 0; i < SCL_LOCKS; i++) {
262 spa_config_lock_t *scl = &spa->spa_config_lock[i];
263 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
264 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
265 refcount_create(&scl->scl_count);
266 scl->scl_writer = NULL;
267 scl->scl_write_wanted = 0;
268 }
269 }
270
271 static void
272 spa_config_lock_destroy(spa_t *spa)
273 {
274 for (int i = 0; i < SCL_LOCKS; i++) {
275 spa_config_lock_t *scl = &spa->spa_config_lock[i];
276 mutex_destroy(&scl->scl_lock);
277 cv_destroy(&scl->scl_cv);
278 refcount_destroy(&scl->scl_count);
279 ASSERT(scl->scl_writer == NULL);
280 ASSERT(scl->scl_write_wanted == 0);
281 }
282 }
283
284 int
285 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
286 {
287 for (int i = 0; i < SCL_LOCKS; i++) {
288 spa_config_lock_t *scl = &spa->spa_config_lock[i];
289 if (!(locks & (1 << i)))
290 continue;
291 mutex_enter(&scl->scl_lock);
292 if (rw == RW_READER) {
293 if (scl->scl_writer || scl->scl_write_wanted) {
294 mutex_exit(&scl->scl_lock);
295 spa_config_exit(spa, locks ^ (1 << i), tag);
296 return (0);
297 }
298 } else {
299 ASSERT(scl->scl_writer != curthread);
300 if (!refcount_is_zero(&scl->scl_count)) {
301 mutex_exit(&scl->scl_lock);
302 spa_config_exit(spa, locks ^ (1 << i), tag);
303 return (0);
304 }
305 scl->scl_writer = curthread;
306 }
307 (void) refcount_add(&scl->scl_count, tag);
308 mutex_exit(&scl->scl_lock);
309 }
310 return (1);
311 }
312
313 void
314 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
315 {
316 int wlocks_held = 0;
317
318 for (int i = 0; i < SCL_LOCKS; i++) {
319 spa_config_lock_t *scl = &spa->spa_config_lock[i];
320 if (scl->scl_writer == curthread)
321 wlocks_held |= (1 << i);
322 if (!(locks & (1 << i)))
323 continue;
324 mutex_enter(&scl->scl_lock);
325 if (rw == RW_READER) {
326 while (scl->scl_writer || scl->scl_write_wanted) {
327 cv_wait(&scl->scl_cv, &scl->scl_lock);
328 }
329 } else {
330 ASSERT(scl->scl_writer != curthread);
331 while (!refcount_is_zero(&scl->scl_count)) {
332 scl->scl_write_wanted++;
333 cv_wait(&scl->scl_cv, &scl->scl_lock);
334 scl->scl_write_wanted--;
335 }
336 scl->scl_writer = curthread;
337 }
338 (void) refcount_add(&scl->scl_count, tag);
339 mutex_exit(&scl->scl_lock);
340 }
341 ASSERT(wlocks_held <= locks);
342 }
343
344 void
345 spa_config_exit(spa_t *spa, int locks, void *tag)
346 {
347 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
348 spa_config_lock_t *scl = &spa->spa_config_lock[i];
349 if (!(locks & (1 << i)))
350 continue;
351 mutex_enter(&scl->scl_lock);
352 ASSERT(!refcount_is_zero(&scl->scl_count));
353 if (refcount_remove(&scl->scl_count, tag) == 0) {
354 ASSERT(scl->scl_writer == NULL ||
355 scl->scl_writer == curthread);
356 scl->scl_writer = NULL; /* OK in either case */
357 cv_broadcast(&scl->scl_cv);
358 }
359 mutex_exit(&scl->scl_lock);
360 }
361 }
362
363 int
364 spa_config_held(spa_t *spa, int locks, krw_t rw)
365 {
366 int locks_held = 0;
367
368 for (int i = 0; i < SCL_LOCKS; i++) {
369 spa_config_lock_t *scl = &spa->spa_config_lock[i];
370 if (!(locks & (1 << i)))
371 continue;
372 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
373 (rw == RW_WRITER && scl->scl_writer == curthread))
374 locks_held |= 1 << i;
375 }
376
377 return (locks_held);
378 }
379
380 /*
381 * ==========================================================================
382 * SPA namespace functions
383 * ==========================================================================
384 */
385
386 /*
387 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
388 * Returns NULL if no matching spa_t is found.
389 */
390 spa_t *
391 spa_lookup(const char *name)
392 {
393 static spa_t search; /* spa_t is large; don't allocate on stack */
394 spa_t *spa;
395 avl_index_t where;
396 char c;
397 char *cp;
398
399 ASSERT(MUTEX_HELD(&spa_namespace_lock));
400
401 /*
402 * If it's a full dataset name, figure out the pool name and
403 * just use that.
404 */
405 cp = strpbrk(name, "/@");
406 if (cp) {
407 c = *cp;
408 *cp = '\0';
409 }
410
411 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
412 spa = avl_find(&spa_namespace_avl, &search, &where);
413
414 if (cp)
415 *cp = c;
416
417 return (spa);
418 }
419
420 /*
421 * Create an uninitialized spa_t with the given name. Requires
422 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
423 * exist by calling spa_lookup() first.
424 */
425 spa_t *
426 spa_add(const char *name, nvlist_t *config, const char *altroot)
427 {
428 spa_t *spa;
429 spa_config_dirent_t *dp;
430
431 ASSERT(MUTEX_HELD(&spa_namespace_lock));
432
433 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
434
435 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
436 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
437 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
438 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
439 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
440 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
441 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
442 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
443 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
444
445 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
446 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
447 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
448 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
449
450 for (int t = 0; t < TXG_SIZE; t++)
451 bplist_create(&spa->spa_free_bplist[t]);
452
453 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
454 spa->spa_state = POOL_STATE_UNINITIALIZED;
455 spa->spa_freeze_txg = UINT64_MAX;
456 spa->spa_final_txg = UINT64_MAX;
457 spa->spa_load_max_txg = UINT64_MAX;
458 spa->spa_proc = &p0;
459 spa->spa_proc_state = SPA_PROC_NONE;
460
461 refcount_create(&spa->spa_refcount);
462 spa_config_lock_init(spa);
463
464 avl_add(&spa_namespace_avl, spa);
465
466 /*
467 * Set the alternate root, if there is one.
468 */
469 if (altroot) {
470 spa->spa_root = spa_strdup(altroot);
471 spa_active_count++;
472 }
473
474 /*
475 * Every pool starts with the default cachefile
476 */
477 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
478 offsetof(spa_config_dirent_t, scd_link));
479
480 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
481 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
482 list_insert_head(&spa->spa_config_list, dp);
483
484 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
485 KM_SLEEP) == 0);
486
487 if (config != NULL) {
488 nvlist_t *features;
489
490 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
491 &features) == 0) {
492 VERIFY(nvlist_dup(features, &spa->spa_label_features,
493 0) == 0);
494 }
495
496 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
497 }
498
499 if (spa->spa_label_features == NULL) {
500 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
501 KM_SLEEP) == 0);
502 }
503
504 return (spa);
505 }
506
507 /*
508 * Removes a spa_t from the namespace, freeing up any memory used. Requires
509 * spa_namespace_lock. This is called only after the spa_t has been closed and
510 * deactivated.
511 */
512 void
513 spa_remove(spa_t *spa)
514 {
515 spa_config_dirent_t *dp;
516
517 ASSERT(MUTEX_HELD(&spa_namespace_lock));
518 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
519
520 nvlist_free(spa->spa_config_splitting);
521
522 avl_remove(&spa_namespace_avl, spa);
523 cv_broadcast(&spa_namespace_cv);
524
525 if (spa->spa_root) {
526 spa_strfree(spa->spa_root);
527 spa_active_count--;
528 }
529
530 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
531 list_remove(&spa->spa_config_list, dp);
532 if (dp->scd_path != NULL)
533 spa_strfree(dp->scd_path);
534 kmem_free(dp, sizeof (spa_config_dirent_t));
535 }
536
537 list_destroy(&spa->spa_config_list);
538
539 nvlist_free(spa->spa_label_features);
540 nvlist_free(spa->spa_load_info);
541 spa_config_set(spa, NULL);
542
543 refcount_destroy(&spa->spa_refcount);
544
545 spa_config_lock_destroy(spa);
546
547 for (int t = 0; t < TXG_SIZE; t++)
548 bplist_destroy(&spa->spa_free_bplist[t]);
549
550 cv_destroy(&spa->spa_async_cv);
551 cv_destroy(&spa->spa_proc_cv);
552 cv_destroy(&spa->spa_scrub_io_cv);
553 cv_destroy(&spa->spa_suspend_cv);
554
555 mutex_destroy(&spa->spa_async_lock);
556 mutex_destroy(&spa->spa_errlist_lock);
557 mutex_destroy(&spa->spa_errlog_lock);
558 mutex_destroy(&spa->spa_history_lock);
559 mutex_destroy(&spa->spa_proc_lock);
560 mutex_destroy(&spa->spa_props_lock);
561 mutex_destroy(&spa->spa_scrub_lock);
562 mutex_destroy(&spa->spa_suspend_lock);
563 mutex_destroy(&spa->spa_vdev_top_lock);
564
565 kmem_free(spa, sizeof (spa_t));
566 }
567
568 /*
569 * Given a pool, return the next pool in the namespace, or NULL if there is
570 * none. If 'prev' is NULL, return the first pool.
571 */
572 spa_t *
573 spa_next(spa_t *prev)
574 {
575 ASSERT(MUTEX_HELD(&spa_namespace_lock));
576
577 if (prev)
578 return (AVL_NEXT(&spa_namespace_avl, prev));
579 else
580 return (avl_first(&spa_namespace_avl));
581 }
582
583 /*
584 * ==========================================================================
585 * SPA refcount functions
586 * ==========================================================================
587 */
588
589 /*
590 * Add a reference to the given spa_t. Must have at least one reference, or
591 * have the namespace lock held.
592 */
593 void
594 spa_open_ref(spa_t *spa, void *tag)
595 {
596 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
597 MUTEX_HELD(&spa_namespace_lock));
598 (void) refcount_add(&spa->spa_refcount, tag);
599 }
600
601 /*
602 * Remove a reference to the given spa_t. Must have at least one reference, or
603 * have the namespace lock held.
604 */
605 void
606 spa_close(spa_t *spa, void *tag)
607 {
608 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
609 MUTEX_HELD(&spa_namespace_lock));
610 (void) refcount_remove(&spa->spa_refcount, tag);
611 }
612
613 /*
614 * Check to see if the spa refcount is zero. Must be called with
615 * spa_namespace_lock held. We really compare against spa_minref, which is the
616 * number of references acquired when opening a pool
617 */
618 boolean_t
619 spa_refcount_zero(spa_t *spa)
620 {
621 ASSERT(MUTEX_HELD(&spa_namespace_lock));
622
623 return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
624 }
625
626 /*
627 * ==========================================================================
628 * SPA spare and l2cache tracking
629 * ==========================================================================
630 */
631
632 /*
633 * Hot spares and cache devices are tracked using the same code below,
634 * for 'auxiliary' devices.
635 */
636
637 typedef struct spa_aux {
638 uint64_t aux_guid;
639 uint64_t aux_pool;
640 avl_node_t aux_avl;
641 int aux_count;
642 } spa_aux_t;
643
644 static int
645 spa_aux_compare(const void *a, const void *b)
646 {
647 const spa_aux_t *sa = a;
648 const spa_aux_t *sb = b;
649
650 if (sa->aux_guid < sb->aux_guid)
651 return (-1);
652 else if (sa->aux_guid > sb->aux_guid)
653 return (1);
654 else
655 return (0);
656 }
657
658 void
659 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
660 {
661 avl_index_t where;
662 spa_aux_t search;
663 spa_aux_t *aux;
664
665 search.aux_guid = vd->vdev_guid;
666 if ((aux = avl_find(avl, &search, &where)) != NULL) {
667 aux->aux_count++;
668 } else {
669 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
670 aux->aux_guid = vd->vdev_guid;
671 aux->aux_count = 1;
672 avl_insert(avl, aux, where);
673 }
674 }
675
676 void
677 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
678 {
679 spa_aux_t search;
680 spa_aux_t *aux;
681 avl_index_t where;
682
683 search.aux_guid = vd->vdev_guid;
684 aux = avl_find(avl, &search, &where);
685
686 ASSERT(aux != NULL);
687
688 if (--aux->aux_count == 0) {
689 avl_remove(avl, aux);
690 kmem_free(aux, sizeof (spa_aux_t));
691 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
692 aux->aux_pool = 0ULL;
693 }
694 }
695
696 boolean_t
697 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
698 {
699 spa_aux_t search, *found;
700
701 search.aux_guid = guid;
702 found = avl_find(avl, &search, NULL);
703
704 if (pool) {
705 if (found)
706 *pool = found->aux_pool;
707 else
708 *pool = 0ULL;
709 }
710
711 if (refcnt) {
712 if (found)
713 *refcnt = found->aux_count;
714 else
715 *refcnt = 0;
716 }
717
718 return (found != NULL);
719 }
720
721 void
722 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
723 {
724 spa_aux_t search, *found;
725 avl_index_t where;
726
727 search.aux_guid = vd->vdev_guid;
728 found = avl_find(avl, &search, &where);
729 ASSERT(found != NULL);
730 ASSERT(found->aux_pool == 0ULL);
731
732 found->aux_pool = spa_guid(vd->vdev_spa);
733 }
734
735 /*
736 * Spares are tracked globally due to the following constraints:
737 *
738 * - A spare may be part of multiple pools.
739 * - A spare may be added to a pool even if it's actively in use within
740 * another pool.
741 * - A spare in use in any pool can only be the source of a replacement if
742 * the target is a spare in the same pool.
743 *
744 * We keep track of all spares on the system through the use of a reference
745 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
746 * spare, then we bump the reference count in the AVL tree. In addition, we set
747 * the 'vdev_isspare' member to indicate that the device is a spare (active or
748 * inactive). When a spare is made active (used to replace a device in the
749 * pool), we also keep track of which pool its been made a part of.
750 *
751 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
752 * called under the spa_namespace lock as part of vdev reconfiguration. The
753 * separate spare lock exists for the status query path, which does not need to
754 * be completely consistent with respect to other vdev configuration changes.
755 */
756
757 static int
758 spa_spare_compare(const void *a, const void *b)
759 {
760 return (spa_aux_compare(a, b));
761 }
762
763 void
764 spa_spare_add(vdev_t *vd)
765 {
766 mutex_enter(&spa_spare_lock);
767 ASSERT(!vd->vdev_isspare);
768 spa_aux_add(vd, &spa_spare_avl);
769 vd->vdev_isspare = B_TRUE;
770 mutex_exit(&spa_spare_lock);
771 }
772
773 void
774 spa_spare_remove(vdev_t *vd)
775 {
776 mutex_enter(&spa_spare_lock);
777 ASSERT(vd->vdev_isspare);
778 spa_aux_remove(vd, &spa_spare_avl);
779 vd->vdev_isspare = B_FALSE;
780 mutex_exit(&spa_spare_lock);
781 }
782
783 boolean_t
784 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
785 {
786 boolean_t found;
787
788 mutex_enter(&spa_spare_lock);
789 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
790 mutex_exit(&spa_spare_lock);
791
792 return (found);
793 }
794
795 void
796 spa_spare_activate(vdev_t *vd)
797 {
798 mutex_enter(&spa_spare_lock);
799 ASSERT(vd->vdev_isspare);
800 spa_aux_activate(vd, &spa_spare_avl);
801 mutex_exit(&spa_spare_lock);
802 }
803
804 /*
805 * Level 2 ARC devices are tracked globally for the same reasons as spares.
806 * Cache devices currently only support one pool per cache device, and so
807 * for these devices the aux reference count is currently unused beyond 1.
808 */
809
810 static int
811 spa_l2cache_compare(const void *a, const void *b)
812 {
813 return (spa_aux_compare(a, b));
814 }
815
816 void
817 spa_l2cache_add(vdev_t *vd)
818 {
819 mutex_enter(&spa_l2cache_lock);
820 ASSERT(!vd->vdev_isl2cache);
821 spa_aux_add(vd, &spa_l2cache_avl);
822 vd->vdev_isl2cache = B_TRUE;
823 mutex_exit(&spa_l2cache_lock);
824 }
825
826 void
827 spa_l2cache_remove(vdev_t *vd)
828 {
829 mutex_enter(&spa_l2cache_lock);
830 ASSERT(vd->vdev_isl2cache);
831 spa_aux_remove(vd, &spa_l2cache_avl);
832 vd->vdev_isl2cache = B_FALSE;
833 mutex_exit(&spa_l2cache_lock);
834 }
835
836 boolean_t
837 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
838 {
839 boolean_t found;
840
841 mutex_enter(&spa_l2cache_lock);
842 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
843 mutex_exit(&spa_l2cache_lock);
844
845 return (found);
846 }
847
848 void
849 spa_l2cache_activate(vdev_t *vd)
850 {
851 mutex_enter(&spa_l2cache_lock);
852 ASSERT(vd->vdev_isl2cache);
853 spa_aux_activate(vd, &spa_l2cache_avl);
854 mutex_exit(&spa_l2cache_lock);
855 }
856
857 /*
858 * ==========================================================================
859 * SPA vdev locking
860 * ==========================================================================
861 */
862
863 /*
864 * Lock the given spa_t for the purpose of adding or removing a vdev.
865 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
866 * It returns the next transaction group for the spa_t.
867 */
868 uint64_t
869 spa_vdev_enter(spa_t *spa)
870 {
871 mutex_enter(&spa->spa_vdev_top_lock);
872 mutex_enter(&spa_namespace_lock);
873 return (spa_vdev_config_enter(spa));
874 }
875
876 /*
877 * Internal implementation for spa_vdev_enter(). Used when a vdev
878 * operation requires multiple syncs (i.e. removing a device) while
879 * keeping the spa_namespace_lock held.
880 */
881 uint64_t
882 spa_vdev_config_enter(spa_t *spa)
883 {
884 ASSERT(MUTEX_HELD(&spa_namespace_lock));
885
886 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
887
888 return (spa_last_synced_txg(spa) + 1);
889 }
890
891 /*
892 * Used in combination with spa_vdev_config_enter() to allow the syncing
893 * of multiple transactions without releasing the spa_namespace_lock.
894 */
895 void
896 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
897 {
898 ASSERT(MUTEX_HELD(&spa_namespace_lock));
899
900 int config_changed = B_FALSE;
901
902 ASSERT(txg > spa_last_synced_txg(spa));
903
904 spa->spa_pending_vdev = NULL;
905
906 /*
907 * Reassess the DTLs.
908 */
909 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
910
911 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
912 config_changed = B_TRUE;
913 spa->spa_config_generation++;
914 }
915
916 /*
917 * Verify the metaslab classes.
918 */
919 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
920 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
921
922 spa_config_exit(spa, SCL_ALL, spa);
923
924 /*
925 * Panic the system if the specified tag requires it. This
926 * is useful for ensuring that configurations are updated
927 * transactionally.
928 */
929 if (zio_injection_enabled)
930 zio_handle_panic_injection(spa, tag, 0);
931
932 /*
933 * Note: this txg_wait_synced() is important because it ensures
934 * that there won't be more than one config change per txg.
935 * This allows us to use the txg as the generation number.
936 */
937 if (error == 0)
938 txg_wait_synced(spa->spa_dsl_pool, txg);
939
940 if (vd != NULL) {
941 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
942 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
943 vdev_free(vd);
944 spa_config_exit(spa, SCL_ALL, spa);
945 }
946
947 /*
948 * If the config changed, update the config cache.
949 */
950 if (config_changed)
951 spa_config_sync(spa, B_FALSE, B_TRUE);
952 }
953
954 /*
955 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
956 * locking of spa_vdev_enter(), we also want make sure the transactions have
957 * synced to disk, and then update the global configuration cache with the new
958 * information.
959 */
960 int
961 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
962 {
963 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
964 mutex_exit(&spa_namespace_lock);
965 mutex_exit(&spa->spa_vdev_top_lock);
966
967 return (error);
968 }
969
970 /*
971 * Lock the given spa_t for the purpose of changing vdev state.
972 */
973 void
974 spa_vdev_state_enter(spa_t *spa, int oplocks)
975 {
976 int locks = SCL_STATE_ALL | oplocks;
977
978 /*
979 * Root pools may need to read of the underlying devfs filesystem
980 * when opening up a vdev. Unfortunately if we're holding the
981 * SCL_ZIO lock it will result in a deadlock when we try to issue
982 * the read from the root filesystem. Instead we "prefetch"
983 * the associated vnodes that we need prior to opening the
984 * underlying devices and cache them so that we can prevent
985 * any I/O when we are doing the actual open.
986 */
987 if (spa_is_root(spa)) {
988 int low = locks & ~(SCL_ZIO - 1);
989 int high = locks & ~low;
990
991 spa_config_enter(spa, high, spa, RW_WRITER);
992 vdev_hold(spa->spa_root_vdev);
993 spa_config_enter(spa, low, spa, RW_WRITER);
994 } else {
995 spa_config_enter(spa, locks, spa, RW_WRITER);
996 }
997 spa->spa_vdev_locks = locks;
998 }
999
1000 int
1001 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1002 {
1003 boolean_t config_changed = B_FALSE;
1004
1005 if (vd != NULL || error == 0)
1006 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1007 0, 0, B_FALSE);
1008
1009 if (vd != NULL) {
1010 vdev_state_dirty(vd->vdev_top);
1011 config_changed = B_TRUE;
1012 spa->spa_config_generation++;
1013 }
1014
1015 if (spa_is_root(spa))
1016 vdev_rele(spa->spa_root_vdev);
1017
1018 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1019 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1020
1021 /*
1022 * If anything changed, wait for it to sync. This ensures that,
1023 * from the system administrator's perspective, zpool(1M) commands
1024 * are synchronous. This is important for things like zpool offline:
1025 * when the command completes, you expect no further I/O from ZFS.
1026 */
1027 if (vd != NULL)
1028 txg_wait_synced(spa->spa_dsl_pool, 0);
1029
1030 /*
1031 * If the config changed, update the config cache.
1032 */
1033 if (config_changed) {
1034 mutex_enter(&spa_namespace_lock);
1035 spa_config_sync(spa, B_FALSE, B_TRUE);
1036 mutex_exit(&spa_namespace_lock);
1037 }
1038
1039 return (error);
1040 }
1041
1042 /*
1043 * ==========================================================================
1044 * Miscellaneous functions
1045 * ==========================================================================
1046 */
1047
1048 void
1049 spa_activate_mos_feature(spa_t *spa, const char *feature)
1050 {
1051 (void) nvlist_add_boolean(spa->spa_label_features, feature);
1052 vdev_config_dirty(spa->spa_root_vdev);
1053 }
1054
1055 void
1056 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1057 {
1058 (void) nvlist_remove_all(spa->spa_label_features, feature);
1059 vdev_config_dirty(spa->spa_root_vdev);
1060 }
1061
1062 /*
1063 * Rename a spa_t.
1064 */
1065 int
1066 spa_rename(const char *name, const char *newname)
1067 {
1068 spa_t *spa;
1069 int err;
1070
1071 /*
1072 * Lookup the spa_t and grab the config lock for writing. We need to
1073 * actually open the pool so that we can sync out the necessary labels.
1074 * It's OK to call spa_open() with the namespace lock held because we
1075 * allow recursive calls for other reasons.
1076 */
1077 mutex_enter(&spa_namespace_lock);
1078 if ((err = spa_open(name, &spa, FTAG)) != 0) {
1079 mutex_exit(&spa_namespace_lock);
1080 return (err);
1081 }
1082
1083 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1084
1085 avl_remove(&spa_namespace_avl, spa);
1086 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1087 avl_add(&spa_namespace_avl, spa);
1088
1089 /*
1090 * Sync all labels to disk with the new names by marking the root vdev
1091 * dirty and waiting for it to sync. It will pick up the new pool name
1092 * during the sync.
1093 */
1094 vdev_config_dirty(spa->spa_root_vdev);
1095
1096 spa_config_exit(spa, SCL_ALL, FTAG);
1097
1098 txg_wait_synced(spa->spa_dsl_pool, 0);
1099
1100 /*
1101 * Sync the updated config cache.
1102 */
1103 spa_config_sync(spa, B_FALSE, B_TRUE);
1104
1105 spa_close(spa, FTAG);
1106
1107 mutex_exit(&spa_namespace_lock);
1108
1109 return (0);
1110 }
1111
1112 /*
1113 * Return the spa_t associated with given pool_guid, if it exists. If
1114 * device_guid is non-zero, determine whether the pool exists *and* contains
1115 * a device with the specified device_guid.
1116 */
1117 spa_t *
1118 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1119 {
1120 spa_t *spa;
1121 avl_tree_t *t = &spa_namespace_avl;
1122
1123 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1124
1125 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1126 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1127 continue;
1128 if (spa->spa_root_vdev == NULL)
1129 continue;
1130 if (spa_guid(spa) == pool_guid) {
1131 if (device_guid == 0)
1132 break;
1133
1134 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1135 device_guid) != NULL)
1136 break;
1137
1138 /*
1139 * Check any devices we may be in the process of adding.
1140 */
1141 if (spa->spa_pending_vdev) {
1142 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1143 device_guid) != NULL)
1144 break;
1145 }
1146 }
1147 }
1148
1149 return (spa);
1150 }
1151
1152 /*
1153 * Determine whether a pool with the given pool_guid exists.
1154 */
1155 boolean_t
1156 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1157 {
1158 return (spa_by_guid(pool_guid, device_guid) != NULL);
1159 }
1160
1161 char *
1162 spa_strdup(const char *s)
1163 {
1164 size_t len;
1165 char *new;
1166
1167 len = strlen(s);
1168 new = kmem_alloc(len + 1, KM_SLEEP);
1169 bcopy(s, new, len);
1170 new[len] = '\0';
1171
1172 return (new);
1173 }
1174
1175 void
1176 spa_strfree(char *s)
1177 {
1178 kmem_free(s, strlen(s) + 1);
1179 }
1180
1181 uint64_t
1182 spa_get_random(uint64_t range)
1183 {
1184 uint64_t r;
1185
1186 ASSERT(range != 0);
1187
1188 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1189
1190 return (r % range);
1191 }
1192
1193 uint64_t
1194 spa_generate_guid(spa_t *spa)
1195 {
1196 uint64_t guid = spa_get_random(-1ULL);
1197
1198 if (spa != NULL) {
1199 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1200 guid = spa_get_random(-1ULL);
1201 } else {
1202 while (guid == 0 || spa_guid_exists(guid, 0))
1203 guid = spa_get_random(-1ULL);
1204 }
1205
1206 return (guid);
1207 }
1208
1209 void
1210 sprintf_blkptr(char *buf, const blkptr_t *bp)
1211 {
1212 char type[256];
1213 char *checksum = NULL;
1214 char *compress = NULL;
1215
1216 if (bp != NULL) {
1217 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1218 dmu_object_byteswap_t bswap =
1219 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1220 (void) snprintf(type, sizeof (type), "bswap %s %s",
1221 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1222 "metadata" : "data",
1223 dmu_ot_byteswap[bswap].ob_name);
1224 } else {
1225 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1226 sizeof (type));
1227 }
1228 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1229 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1230 }
1231
1232 SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1233 }
1234
1235 void
1236 spa_freeze(spa_t *spa)
1237 {
1238 uint64_t freeze_txg = 0;
1239
1240 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1241 if (spa->spa_freeze_txg == UINT64_MAX) {
1242 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1243 spa->spa_freeze_txg = freeze_txg;
1244 }
1245 spa_config_exit(spa, SCL_ALL, FTAG);
1246 if (freeze_txg != 0)
1247 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1248 }
1249
1250 void
1251 zfs_panic_recover(const char *fmt, ...)
1252 {
1253 va_list adx;
1254
1255 va_start(adx, fmt);
1256 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1257 va_end(adx);
1258 }
1259
1260 /*
1261 * This is a stripped-down version of strtoull, suitable only for converting
1262 * lowercase hexidecimal numbers that don't overflow.
1263 */
1264 uint64_t
1265 strtonum(const char *str, char **nptr)
1266 {
1267 uint64_t val = 0;
1268 char c;
1269 int digit;
1270
1271 while ((c = *str) != '\0') {
1272 if (c >= '0' && c <= '9')
1273 digit = c - '0';
1274 else if (c >= 'a' && c <= 'f')
1275 digit = 10 + c - 'a';
1276 else
1277 break;
1278
1279 val *= 16;
1280 val += digit;
1281
1282 str++;
1283 }
1284
1285 if (nptr)
1286 *nptr = (char *)str;
1287
1288 return (val);
1289 }
1290
1291 /*
1292 * ==========================================================================
1293 * Accessor functions
1294 * ==========================================================================
1295 */
1296
1297 boolean_t
1298 spa_shutting_down(spa_t *spa)
1299 {
1300 return (spa->spa_async_suspended);
1301 }
1302
1303 dsl_pool_t *
1304 spa_get_dsl(spa_t *spa)
1305 {
1306 return (spa->spa_dsl_pool);
1307 }
1308
1309 boolean_t
1310 spa_is_initializing(spa_t *spa)
1311 {
1312 return (spa->spa_is_initializing);
1313 }
1314
1315 blkptr_t *
1316 spa_get_rootblkptr(spa_t *spa)
1317 {
1318 return (&spa->spa_ubsync.ub_rootbp);
1319 }
1320
1321 void
1322 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1323 {
1324 spa->spa_uberblock.ub_rootbp = *bp;
1325 }
1326
1327 void
1328 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1329 {
1330 if (spa->spa_root == NULL)
1331 buf[0] = '\0';
1332 else
1333 (void) strncpy(buf, spa->spa_root, buflen);
1334 }
1335
1336 int
1337 spa_sync_pass(spa_t *spa)
1338 {
1339 return (spa->spa_sync_pass);
1340 }
1341
1342 char *
1343 spa_name(spa_t *spa)
1344 {
1345 return (spa->spa_name);
1346 }
1347
1348 uint64_t
1349 spa_guid(spa_t *spa)
1350 {
1351 /*
1352 * If we fail to parse the config during spa_load(), we can go through
1353 * the error path (which posts an ereport) and end up here with no root
1354 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1355 * this case.
1356 */
1357 if (spa->spa_root_vdev != NULL)
1358 return (spa->spa_root_vdev->vdev_guid);
1359 else
1360 return (spa->spa_config_guid);
1361 }
1362
1363 uint64_t
1364 spa_load_guid(spa_t *spa)
1365 {
1366 /*
1367 * This is a GUID that exists solely as a reference for the
1368 * purposes of the arc. It is generated at load time, and
1369 * is never written to persistent storage.
1370 */
1371 return (spa->spa_load_guid);
1372 }
1373
1374 uint64_t
1375 spa_last_synced_txg(spa_t *spa)
1376 {
1377 return (spa->spa_ubsync.ub_txg);
1378 }
1379
1380 uint64_t
1381 spa_first_txg(spa_t *spa)
1382 {
1383 return (spa->spa_first_txg);
1384 }
1385
1386 uint64_t
1387 spa_syncing_txg(spa_t *spa)
1388 {
1389 return (spa->spa_syncing_txg);
1390 }
1391
1392 pool_state_t
1393 spa_state(spa_t *spa)
1394 {
1395 return (spa->spa_state);
1396 }
1397
1398 spa_load_state_t
1399 spa_load_state(spa_t *spa)
1400 {
1401 return (spa->spa_load_state);
1402 }
1403
1404 uint64_t
1405 spa_freeze_txg(spa_t *spa)
1406 {
1407 return (spa->spa_freeze_txg);
1408 }
1409
1410 /* ARGSUSED */
1411 uint64_t
1412 spa_get_asize(spa_t *spa, uint64_t lsize)
1413 {
1414 /*
1415 * The worst case is single-sector max-parity RAID-Z blocks, in which
1416 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
1417 * times the size; so just assume that. Add to this the fact that
1418 * we can have up to 3 DVAs per bp, and one more factor of 2 because
1419 * the block may be dittoed with up to 3 DVAs by ddt_sync().
1420 */
1421 return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
1422 }
1423
1424 uint64_t
1425 spa_get_dspace(spa_t *spa)
1426 {
1427 return (spa->spa_dspace);
1428 }
1429
1430 void
1431 spa_update_dspace(spa_t *spa)
1432 {
1433 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1434 ddt_get_dedup_dspace(spa);
1435 }
1436
1437 /*
1438 * Return the failure mode that has been set to this pool. The default
1439 * behavior will be to block all I/Os when a complete failure occurs.
1440 */
1441 uint8_t
1442 spa_get_failmode(spa_t *spa)
1443 {
1444 return (spa->spa_failmode);
1445 }
1446
1447 boolean_t
1448 spa_suspended(spa_t *spa)
1449 {
1450 return (spa->spa_suspended);
1451 }
1452
1453 uint64_t
1454 spa_version(spa_t *spa)
1455 {
1456 return (spa->spa_ubsync.ub_version);
1457 }
1458
1459 boolean_t
1460 spa_deflate(spa_t *spa)
1461 {
1462 return (spa->spa_deflate);
1463 }
1464
1465 metaslab_class_t *
1466 spa_normal_class(spa_t *spa)
1467 {
1468 return (spa->spa_normal_class);
1469 }
1470
1471 metaslab_class_t *
1472 spa_log_class(spa_t *spa)
1473 {
1474 return (spa->spa_log_class);
1475 }
1476
1477 int
1478 spa_max_replication(spa_t *spa)
1479 {
1480 /*
1481 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1482 * handle BPs with more than one DVA allocated. Set our max
1483 * replication level accordingly.
1484 */
1485 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1486 return (1);
1487 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1488 }
1489
1490 int
1491 spa_prev_software_version(spa_t *spa)
1492 {
1493 return (spa->spa_prev_software_version);
1494 }
1495
1496 uint64_t
1497 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1498 {
1499 uint64_t asize = DVA_GET_ASIZE(dva);
1500 uint64_t dsize = asize;
1501
1502 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1503
1504 if (asize != 0 && spa->spa_deflate) {
1505 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1506 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1507 }
1508
1509 return (dsize);
1510 }
1511
1512 uint64_t
1513 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1514 {
1515 uint64_t dsize = 0;
1516
1517 for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1518 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1519
1520 return (dsize);
1521 }
1522
1523 uint64_t
1524 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1525 {
1526 uint64_t dsize = 0;
1527
1528 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1529
1530 for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1531 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1532
1533 spa_config_exit(spa, SCL_VDEV, FTAG);
1534
1535 return (dsize);
1536 }
1537
1538 /*
1539 * ==========================================================================
1540 * Initialization and Termination
1541 * ==========================================================================
1542 */
1543
1544 static int
1545 spa_name_compare(const void *a1, const void *a2)
1546 {
1547 const spa_t *s1 = a1;
1548 const spa_t *s2 = a2;
1549 int s;
1550
1551 s = strcmp(s1->spa_name, s2->spa_name);
1552 if (s > 0)
1553 return (1);
1554 if (s < 0)
1555 return (-1);
1556 return (0);
1557 }
1558
1559 int
1560 spa_busy(void)
1561 {
1562 return (spa_active_count);
1563 }
1564
1565 void
1566 spa_boot_init()
1567 {
1568 spa_config_load();
1569 }
1570
1571 void
1572 spa_init(int mode)
1573 {
1574 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1575 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1576 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1577 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1578
1579 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1580 offsetof(spa_t, spa_avl));
1581
1582 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1583 offsetof(spa_aux_t, aux_avl));
1584
1585 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1586 offsetof(spa_aux_t, aux_avl));
1587
1588 spa_mode_global = mode;
1589
1590 refcount_init();
1591 unique_init();
1592 zio_init();
1593 dmu_init();
1594 zil_init();
1595 vdev_cache_stat_init();
1596 zfs_prop_init();
1597 zpool_prop_init();
1598 zpool_feature_init();
1599 spa_config_load();
1600 l2arc_start();
1601 }
1602
1603 void
1604 spa_fini(void)
1605 {
1606 l2arc_stop();
1607
1608 spa_evict_all();
1609
1610 vdev_cache_stat_fini();
1611 zil_fini();
1612 dmu_fini();
1613 zio_fini();
1614 unique_fini();
1615 refcount_fini();
1616
1617 avl_destroy(&spa_namespace_avl);
1618 avl_destroy(&spa_spare_avl);
1619 avl_destroy(&spa_l2cache_avl);
1620
1621 cv_destroy(&spa_namespace_cv);
1622 mutex_destroy(&spa_namespace_lock);
1623 mutex_destroy(&spa_spare_lock);
1624 mutex_destroy(&spa_l2cache_lock);
1625 }
1626
1627 /*
1628 * Return whether this pool has slogs. No locking needed.
1629 * It's not a problem if the wrong answer is returned as it's only for
1630 * performance and not correctness
1631 */
1632 boolean_t
1633 spa_has_slogs(spa_t *spa)
1634 {
1635 return (spa->spa_log_class->mc_rotor != NULL);
1636 }
1637
1638 spa_log_state_t
1639 spa_get_log_state(spa_t *spa)
1640 {
1641 return (spa->spa_log_state);
1642 }
1643
1644 void
1645 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1646 {
1647 spa->spa_log_state = state;
1648 }
1649
1650 boolean_t
1651 spa_is_root(spa_t *spa)
1652 {
1653 return (spa->spa_is_root);
1654 }
1655
1656 boolean_t
1657 spa_writeable(spa_t *spa)
1658 {
1659 return (!!(spa->spa_mode & FWRITE));
1660 }
1661
1662 int
1663 spa_mode(spa_t *spa)
1664 {
1665 return (spa->spa_mode);
1666 }
1667
1668 uint64_t
1669 spa_bootfs(spa_t *spa)
1670 {
1671 return (spa->spa_bootfs);
1672 }
1673
1674 uint64_t
1675 spa_delegation(spa_t *spa)
1676 {
1677 return (spa->spa_delegation);
1678 }
1679
1680 objset_t *
1681 spa_meta_objset(spa_t *spa)
1682 {
1683 return (spa->spa_meta_objset);
1684 }
1685
1686 enum zio_checksum
1687 spa_dedup_checksum(spa_t *spa)
1688 {
1689 return (spa->spa_dedup_checksum);
1690 }
1691
1692 /*
1693 * Reset pool scan stat per scan pass (or reboot).
1694 */
1695 void
1696 spa_scan_stat_init(spa_t *spa)
1697 {
1698 /* data not stored on disk */
1699 spa->spa_scan_pass_start = gethrestime_sec();
1700 spa->spa_scan_pass_exam = 0;
1701 vdev_scan_stat_init(spa->spa_root_vdev);
1702 }
1703
1704 /*
1705 * Get scan stats for zpool status reports
1706 */
1707 int
1708 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1709 {
1710 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1711
1712 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1713 return (ENOENT);
1714 bzero(ps, sizeof (pool_scan_stat_t));
1715
1716 /* data stored on disk */
1717 ps->pss_func = scn->scn_phys.scn_func;
1718 ps->pss_start_time = scn->scn_phys.scn_start_time;
1719 ps->pss_end_time = scn->scn_phys.scn_end_time;
1720 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1721 ps->pss_examined = scn->scn_phys.scn_examined;
1722 ps->pss_to_process = scn->scn_phys.scn_to_process;
1723 ps->pss_processed = scn->scn_phys.scn_processed;
1724 ps->pss_errors = scn->scn_phys.scn_errors;
1725 ps->pss_state = scn->scn_phys.scn_state;
1726
1727 /* data not stored on disk */
1728 ps->pss_pass_start = spa->spa_scan_pass_start;
1729 ps->pss_pass_exam = spa->spa_scan_pass_exam;
1730
1731 return (0);
1732 }
1733
1734 boolean_t
1735 spa_debug_enabled(spa_t *spa)
1736 {
1737 return (spa->spa_debug);
1738 }