1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
29 #include <sys/spa_boot.h>
30 #include <sys/zio.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zap.h>
36 #include <sys/zil.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
40 #include <sys/txg.h>
41 #include <sys/avl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/fs/zfs.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/arc.h>
50 #include <sys/ddt.h>
51 #include "zfs_prop.h"
52 #include "zfeature_common.h"
53
54 /*
55 * SPA locking
56 *
57 * There are four basic locks for managing spa_t structures:
58 *
59 * spa_namespace_lock (global mutex)
60 *
61 * This lock must be acquired to do any of the following:
62 *
63 * - Lookup a spa_t by name
64 * - Add or remove a spa_t from the namespace
65 * - Increase spa_refcount from non-zero
66 * - Check if spa_refcount is zero
67 * - Rename a spa_t
68 * - add/remove/attach/detach devices
69 * - Held for the duration of create/destroy/import/export
70 *
71 * It does not need to handle recursion. A create or destroy may
72 * reference objects (files or zvols) in other pools, but by
73 * definition they must have an existing reference, and will never need
74 * to lookup a spa_t by name.
75 *
76 * spa_refcount (per-spa refcount_t protected by mutex)
77 *
78 * This reference count keep track of any active users of the spa_t. The
79 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
80 * the refcount is never really 'zero' - opening a pool implicitly keeps
81 * some references in the DMU. Internally we check against spa_minref, but
82 * present the image of a zero/non-zero value to consumers.
83 *
84 * spa_config_lock[] (per-spa array of rwlocks)
85 *
86 * This protects the spa_t from config changes, and must be held in
87 * the following circumstances:
88 *
89 * - RW_READER to perform I/O to the spa
90 * - RW_WRITER to change the vdev config
91 *
92 * The locking order is fairly straightforward:
93 *
94 * spa_namespace_lock -> spa_refcount
95 *
96 * The namespace lock must be acquired to increase the refcount from 0
97 * or to check if it is zero.
98 *
99 * spa_refcount -> spa_config_lock[]
100 *
101 * There must be at least one valid reference on the spa_t to acquire
102 * the config lock.
103 *
104 * spa_namespace_lock -> spa_config_lock[]
105 *
106 * The namespace lock must always be taken before the config lock.
107 *
108 *
109 * The spa_namespace_lock can be acquired directly and is globally visible.
110 *
111 * The namespace is manipulated using the following functions, all of which
112 * require the spa_namespace_lock to be held.
113 *
114 * spa_lookup() Lookup a spa_t by name.
115 *
116 * spa_add() Create a new spa_t in the namespace.
117 *
118 * spa_remove() Remove a spa_t from the namespace. This also
119 * frees up any memory associated with the spa_t.
120 *
121 * spa_next() Returns the next spa_t in the system, or the
122 * first if NULL is passed.
123 *
124 * spa_evict_all() Shutdown and remove all spa_t structures in
125 * the system.
126 *
127 * spa_guid_exists() Determine whether a pool/device guid exists.
128 *
129 * The spa_refcount is manipulated using the following functions:
130 *
131 * spa_open_ref() Adds a reference to the given spa_t. Must be
132 * called with spa_namespace_lock held if the
133 * refcount is currently zero.
134 *
135 * spa_close() Remove a reference from the spa_t. This will
136 * not free the spa_t or remove it from the
137 * namespace. No locking is required.
138 *
139 * spa_refcount_zero() Returns true if the refcount is currently
140 * zero. Must be called with spa_namespace_lock
141 * held.
142 *
143 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
144 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
145 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
146 *
147 * To read the configuration, it suffices to hold one of these locks as reader.
148 * To modify the configuration, you must hold all locks as writer. To modify
149 * vdev state without altering the vdev tree's topology (e.g. online/offline),
150 * you must hold SCL_STATE and SCL_ZIO as writer.
151 *
152 * We use these distinct config locks to avoid recursive lock entry.
153 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
154 * block allocations (SCL_ALLOC), which may require reading space maps
155 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
156 *
157 * The spa config locks cannot be normal rwlocks because we need the
158 * ability to hand off ownership. For example, SCL_ZIO is acquired
159 * by the issuing thread and later released by an interrupt thread.
160 * They do, however, obey the usual write-wanted semantics to prevent
161 * writer (i.e. system administrator) starvation.
162 *
163 * The lock acquisition rules are as follows:
164 *
165 * SCL_CONFIG
166 * Protects changes to the vdev tree topology, such as vdev
167 * add/remove/attach/detach. Protects the dirty config list
168 * (spa_config_dirty_list) and the set of spares and l2arc devices.
169 *
170 * SCL_STATE
171 * Protects changes to pool state and vdev state, such as vdev
172 * online/offline/fault/degrade/clear. Protects the dirty state list
173 * (spa_state_dirty_list) and global pool state (spa_state).
174 *
175 * SCL_ALLOC
176 * Protects changes to metaslab groups and classes.
177 * Held as reader by metaslab_alloc() and metaslab_claim().
178 *
179 * SCL_ZIO
180 * Held by bp-level zios (those which have no io_vd upon entry)
181 * to prevent changes to the vdev tree. The bp-level zio implicitly
182 * protects all of its vdev child zios, which do not hold SCL_ZIO.
183 *
184 * SCL_FREE
185 * Protects changes to metaslab groups and classes.
186 * Held as reader by metaslab_free(). SCL_FREE is distinct from
187 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
188 * blocks in zio_done() while another i/o that holds either
189 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
190 *
191 * SCL_VDEV
192 * Held as reader to prevent changes to the vdev tree during trivial
193 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
194 * other locks, and lower than all of them, to ensure that it's safe
195 * to acquire regardless of caller context.
196 *
197 * In addition, the following rules apply:
198 *
199 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
200 * The lock ordering is SCL_CONFIG > spa_props_lock.
201 *
202 * (b) I/O operations on leaf vdevs. For any zio operation that takes
203 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
204 * or zio_write_phys() -- the caller must ensure that the config cannot
205 * cannot change in the interim, and that the vdev cannot be reopened.
206 * SCL_STATE as reader suffices for both.
207 *
208 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
209 *
210 * spa_vdev_enter() Acquire the namespace lock and the config lock
211 * for writing.
212 *
213 * spa_vdev_exit() Release the config lock, wait for all I/O
214 * to complete, sync the updated configs to the
215 * cache, and release the namespace lock.
216 *
217 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
218 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
219 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
220 *
221 * spa_rename() is also implemented within this file since it requires
222 * manipulation of the namespace.
223 */
224
225 static avl_tree_t spa_namespace_avl;
226 kmutex_t spa_namespace_lock;
227 static kcondvar_t spa_namespace_cv;
228 static int spa_active_count;
229 int spa_max_replication_override = SPA_DVAS_PER_BP;
230
231 static kmutex_t spa_spare_lock;
232 static avl_tree_t spa_spare_avl;
233 static kmutex_t spa_l2cache_lock;
234 static avl_tree_t spa_l2cache_avl;
235
236 kmem_cache_t *spa_buffer_pool;
237 int spa_mode_global;
238
239 #ifdef ZFS_DEBUG
240 /* Everything except dprintf and spa is on by default in debug builds */
241 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
242 #else
243 int zfs_flags = 0;
244 #endif
245
246 /*
247 * zfs_recover can be set to nonzero to attempt to recover from
248 * otherwise-fatal errors, typically caused by on-disk corruption. When
249 * set, calls to zfs_panic_recover() will turn into warning messages.
250 * This should only be used as a last resort, as it typically results
251 * in leaked space, or worse.
252 */
253 boolean_t zfs_recover = B_FALSE;
254
255 /*
256 * If destroy encounters an EIO while reading metadata (e.g. indirect
257 * blocks), space referenced by the missing metadata can not be freed.
258 * Normally this causes the background destroy to become "stalled", as
259 * it is unable to make forward progress. While in this stalled state,
260 * all remaining space to free from the error-encountering filesystem is
261 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
262 * permanently leak the space from indirect blocks that can not be read,
263 * and continue to free everything else that it can.
264 *
265 * The default, "stalling" behavior is useful if the storage partially
266 * fails (i.e. some but not all i/os fail), and then later recovers. In
267 * this case, we will be able to continue pool operations while it is
268 * partially failed, and when it recovers, we can continue to free the
269 * space, with no leaks. However, note that this case is actually
270 * fairly rare.
271 *
272 * Typically pools either (a) fail completely (but perhaps temporarily,
273 * e.g. a top-level vdev going offline), or (b) have localized,
274 * permanent errors (e.g. disk returns the wrong data due to bit flip or
275 * firmware bug). In case (a), this setting does not matter because the
276 * pool will be suspended and the sync thread will not be able to make
277 * forward progress regardless. In case (b), because the error is
278 * permanent, the best we can do is leak the minimum amount of space,
279 * which is what setting this flag will do. Therefore, it is reasonable
280 * for this flag to normally be set, but we chose the more conservative
281 * approach of not setting it, so that there is no possibility of
282 * leaking space in the "partial temporary" failure case.
283 */
284 boolean_t zfs_free_leak_on_eio = B_FALSE;
285
286 /*
287 * Expiration time in milliseconds. This value has two meanings. First it is
288 * used to determine when the spa_deadman() logic should fire. By default the
289 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
290 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
291 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
292 * in a system panic.
293 */
294 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
295
296 /*
297 * Check time in milliseconds. This defines the frequency at which we check
298 * for hung I/O.
299 */
300 uint64_t zfs_deadman_checktime_ms = 5000ULL;
301
302 /*
303 * Override the zfs deadman behavior via /etc/system. By default the
304 * deadman is enabled except on VMware and sparc deployments.
305 */
306 int zfs_deadman_enabled = -1;
307
308 /*
309 * The worst case is single-sector max-parity RAID-Z blocks, in which
310 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
311 * times the size; so just assume that. Add to this the fact that
312 * we can have up to 3 DVAs per bp, and one more factor of 2 because
313 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
314 * the worst case is:
315 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
316 */
317 int spa_asize_inflation = 24;
318
319 /*
320 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
321 * the pool to be consumed. This ensures that we don't run the pool
322 * completely out of space, due to unaccounted changes (e.g. to the MOS).
323 * It also limits the worst-case time to allocate space. If we have
324 * less than this amount of free space, most ZPL operations (e.g. write,
325 * create) will return ENOSPC.
326 *
327 * Certain operations (e.g. file removal, most administrative actions) can
328 * use half the slop space. They will only return ENOSPC if less than half
329 * the slop space is free. Typically, once the pool has less than the slop
330 * space free, the user will use these operations to free up space in the pool.
331 * These are the operations that call dsl_pool_adjustedsize() with the netfree
332 * argument set to TRUE.
333 *
334 * A very restricted set of operations are always permitted, regardless of
335 * the amount of free space. These are the operations that call
336 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these
337 * operations result in a net increase in the amount of space used,
338 * it is possible to run the pool completely out of space, causing it to
339 * be permanently read-only.
340 *
341 * See also the comments in zfs_space_check_t.
342 */
343 int spa_slop_shift = 5;
344
345 /*
346 * ==========================================================================
347 * SPA config locking
348 * ==========================================================================
349 */
350 static void
351 spa_config_lock_init(spa_t *spa)
352 {
353 for (int i = 0; i < SCL_LOCKS; i++) {
354 spa_config_lock_t *scl = &spa->spa_config_lock[i];
355 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
356 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
357 refcount_create_untracked(&scl->scl_count);
358 scl->scl_writer = NULL;
359 scl->scl_write_wanted = 0;
360 }
361 }
362
363 static void
364 spa_config_lock_destroy(spa_t *spa)
365 {
366 for (int i = 0; i < SCL_LOCKS; i++) {
367 spa_config_lock_t *scl = &spa->spa_config_lock[i];
368 mutex_destroy(&scl->scl_lock);
369 cv_destroy(&scl->scl_cv);
370 refcount_destroy(&scl->scl_count);
371 ASSERT(scl->scl_writer == NULL);
372 ASSERT(scl->scl_write_wanted == 0);
373 }
374 }
375
376 int
377 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
378 {
379 for (int i = 0; i < SCL_LOCKS; i++) {
380 spa_config_lock_t *scl = &spa->spa_config_lock[i];
381 if (!(locks & (1 << i)))
382 continue;
383 mutex_enter(&scl->scl_lock);
384 if (rw == RW_READER) {
385 if (scl->scl_writer || scl->scl_write_wanted) {
386 mutex_exit(&scl->scl_lock);
387 spa_config_exit(spa, locks ^ (1 << i), tag);
388 return (0);
389 }
390 } else {
391 ASSERT(scl->scl_writer != curthread);
392 if (!refcount_is_zero(&scl->scl_count)) {
393 mutex_exit(&scl->scl_lock);
394 spa_config_exit(spa, locks ^ (1 << i), tag);
395 return (0);
396 }
397 scl->scl_writer = curthread;
398 }
399 (void) refcount_add(&scl->scl_count, tag);
400 mutex_exit(&scl->scl_lock);
401 }
402 return (1);
403 }
404
405 void
406 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
407 {
408 int wlocks_held = 0;
409
410 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
411
412 for (int i = 0; i < SCL_LOCKS; i++) {
413 spa_config_lock_t *scl = &spa->spa_config_lock[i];
414 if (scl->scl_writer == curthread)
415 wlocks_held |= (1 << i);
416 if (!(locks & (1 << i)))
417 continue;
418 mutex_enter(&scl->scl_lock);
419 if (rw == RW_READER) {
420 while (scl->scl_writer || scl->scl_write_wanted) {
421 cv_wait(&scl->scl_cv, &scl->scl_lock);
422 }
423 } else {
424 ASSERT(scl->scl_writer != curthread);
425 while (!refcount_is_zero(&scl->scl_count)) {
426 scl->scl_write_wanted++;
427 cv_wait(&scl->scl_cv, &scl->scl_lock);
428 scl->scl_write_wanted--;
429 }
430 scl->scl_writer = curthread;
431 }
432 (void) refcount_add(&scl->scl_count, tag);
433 mutex_exit(&scl->scl_lock);
434 }
435 ASSERT(wlocks_held <= locks);
436 }
437
438 void
439 spa_config_exit(spa_t *spa, int locks, void *tag)
440 {
441 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
442 spa_config_lock_t *scl = &spa->spa_config_lock[i];
443 if (!(locks & (1 << i)))
444 continue;
445 mutex_enter(&scl->scl_lock);
446 ASSERT(!refcount_is_zero(&scl->scl_count));
447 if (refcount_remove(&scl->scl_count, tag) == 0) {
448 ASSERT(scl->scl_writer == NULL ||
449 scl->scl_writer == curthread);
450 scl->scl_writer = NULL; /* OK in either case */
451 cv_broadcast(&scl->scl_cv);
452 }
453 mutex_exit(&scl->scl_lock);
454 }
455 }
456
457 int
458 spa_config_held(spa_t *spa, int locks, krw_t rw)
459 {
460 int locks_held = 0;
461
462 for (int i = 0; i < SCL_LOCKS; i++) {
463 spa_config_lock_t *scl = &spa->spa_config_lock[i];
464 if (!(locks & (1 << i)))
465 continue;
466 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
467 (rw == RW_WRITER && scl->scl_writer == curthread))
468 locks_held |= 1 << i;
469 }
470
471 return (locks_held);
472 }
473
474 /*
475 * ==========================================================================
476 * SPA namespace functions
477 * ==========================================================================
478 */
479
480 /*
481 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
482 * Returns NULL if no matching spa_t is found.
483 */
484 spa_t *
485 spa_lookup(const char *name)
486 {
487 static spa_t search; /* spa_t is large; don't allocate on stack */
488 spa_t *spa;
489 avl_index_t where;
490 char *cp;
491
492 ASSERT(MUTEX_HELD(&spa_namespace_lock));
493
494 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
495
496 /*
497 * If it's a full dataset name, figure out the pool name and
498 * just use that.
499 */
500 cp = strpbrk(search.spa_name, "/@#");
501 if (cp != NULL)
502 *cp = '\0';
503
504 spa = avl_find(&spa_namespace_avl, &search, &where);
505
506 return (spa);
507 }
508
509 /*
510 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
511 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
512 * looking for potentially hung I/Os.
513 */
514 void
515 spa_deadman(void *arg)
516 {
517 spa_t *spa = arg;
518
519 /*
520 * Disable the deadman timer if the pool is suspended.
521 */
522 if (spa_suspended(spa)) {
523 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
524 return;
525 }
526
527 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
528 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
529 ++spa->spa_deadman_calls);
530 if (zfs_deadman_enabled)
531 vdev_deadman(spa->spa_root_vdev);
532 }
533
534 /*
535 * Create an uninitialized spa_t with the given name. Requires
536 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
537 * exist by calling spa_lookup() first.
538 */
539 spa_t *
540 spa_add(const char *name, nvlist_t *config, const char *altroot)
541 {
542 spa_t *spa;
543 spa_config_dirent_t *dp;
544 cyc_handler_t hdlr;
545 cyc_time_t when;
546
547 ASSERT(MUTEX_HELD(&spa_namespace_lock));
548
549 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
550
551 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
552 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
553 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
554 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
555 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
556 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
557 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
558 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
559 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
560 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
561
562 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
563 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
564 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
565 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
566
567 for (int t = 0; t < TXG_SIZE; t++)
568 bplist_create(&spa->spa_free_bplist[t]);
569
570 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
571 spa->spa_state = POOL_STATE_UNINITIALIZED;
572 spa->spa_freeze_txg = UINT64_MAX;
573 spa->spa_final_txg = UINT64_MAX;
574 spa->spa_load_max_txg = UINT64_MAX;
575 spa->spa_proc = &p0;
576 spa->spa_proc_state = SPA_PROC_NONE;
577
578 hdlr.cyh_func = spa_deadman;
579 hdlr.cyh_arg = spa;
580 hdlr.cyh_level = CY_LOW_LEVEL;
581
582 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
583
584 /*
585 * This determines how often we need to check for hung I/Os after
586 * the cyclic has already fired. Since checking for hung I/Os is
587 * an expensive operation we don't want to check too frequently.
588 * Instead wait for 5 seconds before checking again.
589 */
590 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
591 when.cyt_when = CY_INFINITY;
592 mutex_enter(&cpu_lock);
593 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
594 mutex_exit(&cpu_lock);
595
596 refcount_create(&spa->spa_refcount);
597 spa_config_lock_init(spa);
598
599 avl_add(&spa_namespace_avl, spa);
600
601 /*
602 * Set the alternate root, if there is one.
603 */
604 if (altroot) {
605 spa->spa_root = spa_strdup(altroot);
606 spa_active_count++;
607 }
608
609 /*
610 * Every pool starts with the default cachefile
611 */
612 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
613 offsetof(spa_config_dirent_t, scd_link));
614
615 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
616 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
617 list_insert_head(&spa->spa_config_list, dp);
618
619 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
620 KM_SLEEP) == 0);
621
622 if (config != NULL) {
623 nvlist_t *features;
624
625 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
626 &features) == 0) {
627 VERIFY(nvlist_dup(features, &spa->spa_label_features,
628 0) == 0);
629 }
630
631 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
632 }
633
634 if (spa->spa_label_features == NULL) {
635 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
636 KM_SLEEP) == 0);
637 }
638
639 spa->spa_iokstat = kstat_create("zfs", 0, name,
640 "disk", KSTAT_TYPE_IO, 1, 0);
641 if (spa->spa_iokstat) {
642 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
643 kstat_install(spa->spa_iokstat);
644 }
645
646 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
647
648 /*
649 * As a pool is being created, treat all features as disabled by
650 * setting SPA_FEATURE_DISABLED for all entries in the feature
651 * refcount cache.
652 */
653 for (int i = 0; i < SPA_FEATURES; i++) {
654 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
655 }
656
657 return (spa);
658 }
659
660 /*
661 * Removes a spa_t from the namespace, freeing up any memory used. Requires
662 * spa_namespace_lock. This is called only after the spa_t has been closed and
663 * deactivated.
664 */
665 void
666 spa_remove(spa_t *spa)
667 {
668 spa_config_dirent_t *dp;
669
670 ASSERT(MUTEX_HELD(&spa_namespace_lock));
671 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
672
673 nvlist_free(spa->spa_config_splitting);
674
675 avl_remove(&spa_namespace_avl, spa);
676 cv_broadcast(&spa_namespace_cv);
677
678 if (spa->spa_root) {
679 spa_strfree(spa->spa_root);
680 spa_active_count--;
681 }
682
683 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
684 list_remove(&spa->spa_config_list, dp);
685 if (dp->scd_path != NULL)
686 spa_strfree(dp->scd_path);
687 kmem_free(dp, sizeof (spa_config_dirent_t));
688 }
689
690 list_destroy(&spa->spa_config_list);
691
692 nvlist_free(spa->spa_label_features);
693 nvlist_free(spa->spa_load_info);
694 spa_config_set(spa, NULL);
695
696 mutex_enter(&cpu_lock);
697 if (spa->spa_deadman_cycid != CYCLIC_NONE)
698 cyclic_remove(spa->spa_deadman_cycid);
699 mutex_exit(&cpu_lock);
700 spa->spa_deadman_cycid = CYCLIC_NONE;
701
702 refcount_destroy(&spa->spa_refcount);
703
704 spa_config_lock_destroy(spa);
705
706 kstat_delete(spa->spa_iokstat);
707 spa->spa_iokstat = NULL;
708
709 for (int t = 0; t < TXG_SIZE; t++)
710 bplist_destroy(&spa->spa_free_bplist[t]);
711
712 cv_destroy(&spa->spa_async_cv);
713 cv_destroy(&spa->spa_proc_cv);
714 cv_destroy(&spa->spa_scrub_io_cv);
715 cv_destroy(&spa->spa_suspend_cv);
716
717 mutex_destroy(&spa->spa_async_lock);
718 mutex_destroy(&spa->spa_errlist_lock);
719 mutex_destroy(&spa->spa_errlog_lock);
720 mutex_destroy(&spa->spa_history_lock);
721 mutex_destroy(&spa->spa_proc_lock);
722 mutex_destroy(&spa->spa_props_lock);
723 mutex_destroy(&spa->spa_scrub_lock);
724 mutex_destroy(&spa->spa_suspend_lock);
725 mutex_destroy(&spa->spa_vdev_top_lock);
726 mutex_destroy(&spa->spa_iokstat_lock);
727
728 kmem_free(spa, sizeof (spa_t));
729 }
730
731 /*
732 * Given a pool, return the next pool in the namespace, or NULL if there is
733 * none. If 'prev' is NULL, return the first pool.
734 */
735 spa_t *
736 spa_next(spa_t *prev)
737 {
738 ASSERT(MUTEX_HELD(&spa_namespace_lock));
739
740 if (prev)
741 return (AVL_NEXT(&spa_namespace_avl, prev));
742 else
743 return (avl_first(&spa_namespace_avl));
744 }
745
746 /*
747 * ==========================================================================
748 * SPA refcount functions
749 * ==========================================================================
750 */
751
752 /*
753 * Add a reference to the given spa_t. Must have at least one reference, or
754 * have the namespace lock held.
755 */
756 void
757 spa_open_ref(spa_t *spa, void *tag)
758 {
759 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
760 MUTEX_HELD(&spa_namespace_lock));
761 (void) refcount_add(&spa->spa_refcount, tag);
762 }
763
764 /*
765 * Remove a reference to the given spa_t. Must have at least one reference, or
766 * have the namespace lock held.
767 */
768 void
769 spa_close(spa_t *spa, void *tag)
770 {
771 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
772 MUTEX_HELD(&spa_namespace_lock));
773 (void) refcount_remove(&spa->spa_refcount, tag);
774 }
775
776 /*
777 * Check to see if the spa refcount is zero. Must be called with
778 * spa_namespace_lock held. We really compare against spa_minref, which is the
779 * number of references acquired when opening a pool
780 */
781 boolean_t
782 spa_refcount_zero(spa_t *spa)
783 {
784 ASSERT(MUTEX_HELD(&spa_namespace_lock));
785
786 return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
787 }
788
789 /*
790 * ==========================================================================
791 * SPA spare and l2cache tracking
792 * ==========================================================================
793 */
794
795 /*
796 * Hot spares and cache devices are tracked using the same code below,
797 * for 'auxiliary' devices.
798 */
799
800 typedef struct spa_aux {
801 uint64_t aux_guid;
802 uint64_t aux_pool;
803 avl_node_t aux_avl;
804 int aux_count;
805 } spa_aux_t;
806
807 static int
808 spa_aux_compare(const void *a, const void *b)
809 {
810 const spa_aux_t *sa = a;
811 const spa_aux_t *sb = b;
812
813 if (sa->aux_guid < sb->aux_guid)
814 return (-1);
815 else if (sa->aux_guid > sb->aux_guid)
816 return (1);
817 else
818 return (0);
819 }
820
821 void
822 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
823 {
824 avl_index_t where;
825 spa_aux_t search;
826 spa_aux_t *aux;
827
828 search.aux_guid = vd->vdev_guid;
829 if ((aux = avl_find(avl, &search, &where)) != NULL) {
830 aux->aux_count++;
831 } else {
832 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
833 aux->aux_guid = vd->vdev_guid;
834 aux->aux_count = 1;
835 avl_insert(avl, aux, where);
836 }
837 }
838
839 void
840 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
841 {
842 spa_aux_t search;
843 spa_aux_t *aux;
844 avl_index_t where;
845
846 search.aux_guid = vd->vdev_guid;
847 aux = avl_find(avl, &search, &where);
848
849 ASSERT(aux != NULL);
850
851 if (--aux->aux_count == 0) {
852 avl_remove(avl, aux);
853 kmem_free(aux, sizeof (spa_aux_t));
854 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
855 aux->aux_pool = 0ULL;
856 }
857 }
858
859 boolean_t
860 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
861 {
862 spa_aux_t search, *found;
863
864 search.aux_guid = guid;
865 found = avl_find(avl, &search, NULL);
866
867 if (pool) {
868 if (found)
869 *pool = found->aux_pool;
870 else
871 *pool = 0ULL;
872 }
873
874 if (refcnt) {
875 if (found)
876 *refcnt = found->aux_count;
877 else
878 *refcnt = 0;
879 }
880
881 return (found != NULL);
882 }
883
884 void
885 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
886 {
887 spa_aux_t search, *found;
888 avl_index_t where;
889
890 search.aux_guid = vd->vdev_guid;
891 found = avl_find(avl, &search, &where);
892 ASSERT(found != NULL);
893 ASSERT(found->aux_pool == 0ULL);
894
895 found->aux_pool = spa_guid(vd->vdev_spa);
896 }
897
898 /*
899 * Spares are tracked globally due to the following constraints:
900 *
901 * - A spare may be part of multiple pools.
902 * - A spare may be added to a pool even if it's actively in use within
903 * another pool.
904 * - A spare in use in any pool can only be the source of a replacement if
905 * the target is a spare in the same pool.
906 *
907 * We keep track of all spares on the system through the use of a reference
908 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
909 * spare, then we bump the reference count in the AVL tree. In addition, we set
910 * the 'vdev_isspare' member to indicate that the device is a spare (active or
911 * inactive). When a spare is made active (used to replace a device in the
912 * pool), we also keep track of which pool its been made a part of.
913 *
914 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
915 * called under the spa_namespace lock as part of vdev reconfiguration. The
916 * separate spare lock exists for the status query path, which does not need to
917 * be completely consistent with respect to other vdev configuration changes.
918 */
919
920 static int
921 spa_spare_compare(const void *a, const void *b)
922 {
923 return (spa_aux_compare(a, b));
924 }
925
926 void
927 spa_spare_add(vdev_t *vd)
928 {
929 mutex_enter(&spa_spare_lock);
930 ASSERT(!vd->vdev_isspare);
931 spa_aux_add(vd, &spa_spare_avl);
932 vd->vdev_isspare = B_TRUE;
933 mutex_exit(&spa_spare_lock);
934 }
935
936 void
937 spa_spare_remove(vdev_t *vd)
938 {
939 mutex_enter(&spa_spare_lock);
940 ASSERT(vd->vdev_isspare);
941 spa_aux_remove(vd, &spa_spare_avl);
942 vd->vdev_isspare = B_FALSE;
943 mutex_exit(&spa_spare_lock);
944 }
945
946 boolean_t
947 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
948 {
949 boolean_t found;
950
951 mutex_enter(&spa_spare_lock);
952 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
953 mutex_exit(&spa_spare_lock);
954
955 return (found);
956 }
957
958 void
959 spa_spare_activate(vdev_t *vd)
960 {
961 mutex_enter(&spa_spare_lock);
962 ASSERT(vd->vdev_isspare);
963 spa_aux_activate(vd, &spa_spare_avl);
964 mutex_exit(&spa_spare_lock);
965 }
966
967 /*
968 * Level 2 ARC devices are tracked globally for the same reasons as spares.
969 * Cache devices currently only support one pool per cache device, and so
970 * for these devices the aux reference count is currently unused beyond 1.
971 */
972
973 static int
974 spa_l2cache_compare(const void *a, const void *b)
975 {
976 return (spa_aux_compare(a, b));
977 }
978
979 void
980 spa_l2cache_add(vdev_t *vd)
981 {
982 mutex_enter(&spa_l2cache_lock);
983 ASSERT(!vd->vdev_isl2cache);
984 spa_aux_add(vd, &spa_l2cache_avl);
985 vd->vdev_isl2cache = B_TRUE;
986 mutex_exit(&spa_l2cache_lock);
987 }
988
989 void
990 spa_l2cache_remove(vdev_t *vd)
991 {
992 mutex_enter(&spa_l2cache_lock);
993 ASSERT(vd->vdev_isl2cache);
994 spa_aux_remove(vd, &spa_l2cache_avl);
995 vd->vdev_isl2cache = B_FALSE;
996 mutex_exit(&spa_l2cache_lock);
997 }
998
999 boolean_t
1000 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1001 {
1002 boolean_t found;
1003
1004 mutex_enter(&spa_l2cache_lock);
1005 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1006 mutex_exit(&spa_l2cache_lock);
1007
1008 return (found);
1009 }
1010
1011 void
1012 spa_l2cache_activate(vdev_t *vd)
1013 {
1014 mutex_enter(&spa_l2cache_lock);
1015 ASSERT(vd->vdev_isl2cache);
1016 spa_aux_activate(vd, &spa_l2cache_avl);
1017 mutex_exit(&spa_l2cache_lock);
1018 }
1019
1020 /*
1021 * ==========================================================================
1022 * SPA vdev locking
1023 * ==========================================================================
1024 */
1025
1026 /*
1027 * Lock the given spa_t for the purpose of adding or removing a vdev.
1028 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1029 * It returns the next transaction group for the spa_t.
1030 */
1031 uint64_t
1032 spa_vdev_enter(spa_t *spa)
1033 {
1034 mutex_enter(&spa->spa_vdev_top_lock);
1035 mutex_enter(&spa_namespace_lock);
1036 return (spa_vdev_config_enter(spa));
1037 }
1038
1039 /*
1040 * Internal implementation for spa_vdev_enter(). Used when a vdev
1041 * operation requires multiple syncs (i.e. removing a device) while
1042 * keeping the spa_namespace_lock held.
1043 */
1044 uint64_t
1045 spa_vdev_config_enter(spa_t *spa)
1046 {
1047 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1048
1049 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1050
1051 return (spa_last_synced_txg(spa) + 1);
1052 }
1053
1054 /*
1055 * Used in combination with spa_vdev_config_enter() to allow the syncing
1056 * of multiple transactions without releasing the spa_namespace_lock.
1057 */
1058 void
1059 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1060 {
1061 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1062
1063 int config_changed = B_FALSE;
1064
1065 ASSERT(txg > spa_last_synced_txg(spa));
1066
1067 spa->spa_pending_vdev = NULL;
1068
1069 /*
1070 * Reassess the DTLs.
1071 */
1072 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1073
1074 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1075 config_changed = B_TRUE;
1076 spa->spa_config_generation++;
1077 }
1078
1079 /*
1080 * Verify the metaslab classes.
1081 */
1082 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1083 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1084
1085 spa_config_exit(spa, SCL_ALL, spa);
1086
1087 /*
1088 * Panic the system if the specified tag requires it. This
1089 * is useful for ensuring that configurations are updated
1090 * transactionally.
1091 */
1092 if (zio_injection_enabled)
1093 zio_handle_panic_injection(spa, tag, 0);
1094
1095 /*
1096 * Note: this txg_wait_synced() is important because it ensures
1097 * that there won't be more than one config change per txg.
1098 * This allows us to use the txg as the generation number.
1099 */
1100 if (error == 0)
1101 txg_wait_synced(spa->spa_dsl_pool, txg);
1102
1103 if (vd != NULL) {
1104 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1105 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1106 vdev_free(vd);
1107 spa_config_exit(spa, SCL_ALL, spa);
1108 }
1109
1110 /*
1111 * If the config changed, update the config cache.
1112 */
1113 if (config_changed)
1114 spa_config_sync(spa, B_FALSE, B_TRUE);
1115 }
1116
1117 /*
1118 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1119 * locking of spa_vdev_enter(), we also want make sure the transactions have
1120 * synced to disk, and then update the global configuration cache with the new
1121 * information.
1122 */
1123 int
1124 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1125 {
1126 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1127 mutex_exit(&spa_namespace_lock);
1128 mutex_exit(&spa->spa_vdev_top_lock);
1129
1130 return (error);
1131 }
1132
1133 /*
1134 * Lock the given spa_t for the purpose of changing vdev state.
1135 */
1136 void
1137 spa_vdev_state_enter(spa_t *spa, int oplocks)
1138 {
1139 int locks = SCL_STATE_ALL | oplocks;
1140
1141 /*
1142 * Root pools may need to read of the underlying devfs filesystem
1143 * when opening up a vdev. Unfortunately if we're holding the
1144 * SCL_ZIO lock it will result in a deadlock when we try to issue
1145 * the read from the root filesystem. Instead we "prefetch"
1146 * the associated vnodes that we need prior to opening the
1147 * underlying devices and cache them so that we can prevent
1148 * any I/O when we are doing the actual open.
1149 */
1150 if (spa_is_root(spa)) {
1151 int low = locks & ~(SCL_ZIO - 1);
1152 int high = locks & ~low;
1153
1154 spa_config_enter(spa, high, spa, RW_WRITER);
1155 vdev_hold(spa->spa_root_vdev);
1156 spa_config_enter(spa, low, spa, RW_WRITER);
1157 } else {
1158 spa_config_enter(spa, locks, spa, RW_WRITER);
1159 }
1160 spa->spa_vdev_locks = locks;
1161 }
1162
1163 int
1164 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1165 {
1166 boolean_t config_changed = B_FALSE;
1167
1168 if (vd != NULL || error == 0)
1169 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1170 0, 0, B_FALSE);
1171
1172 if (vd != NULL) {
1173 vdev_state_dirty(vd->vdev_top);
1174 config_changed = B_TRUE;
1175 spa->spa_config_generation++;
1176 }
1177
1178 if (spa_is_root(spa))
1179 vdev_rele(spa->spa_root_vdev);
1180
1181 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1182 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1183
1184 /*
1185 * If anything changed, wait for it to sync. This ensures that,
1186 * from the system administrator's perspective, zpool(1M) commands
1187 * are synchronous. This is important for things like zpool offline:
1188 * when the command completes, you expect no further I/O from ZFS.
1189 */
1190 if (vd != NULL)
1191 txg_wait_synced(spa->spa_dsl_pool, 0);
1192
1193 /*
1194 * If the config changed, update the config cache.
1195 */
1196 if (config_changed) {
1197 mutex_enter(&spa_namespace_lock);
1198 spa_config_sync(spa, B_FALSE, B_TRUE);
1199 mutex_exit(&spa_namespace_lock);
1200 }
1201
1202 return (error);
1203 }
1204
1205 /*
1206 * ==========================================================================
1207 * Miscellaneous functions
1208 * ==========================================================================
1209 */
1210
1211 void
1212 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1213 {
1214 if (!nvlist_exists(spa->spa_label_features, feature)) {
1215 fnvlist_add_boolean(spa->spa_label_features, feature);
1216 /*
1217 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1218 * dirty the vdev config because lock SCL_CONFIG is not held.
1219 * Thankfully, in this case we don't need to dirty the config
1220 * because it will be written out anyway when we finish
1221 * creating the pool.
1222 */
1223 if (tx->tx_txg != TXG_INITIAL)
1224 vdev_config_dirty(spa->spa_root_vdev);
1225 }
1226 }
1227
1228 void
1229 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1230 {
1231 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1232 vdev_config_dirty(spa->spa_root_vdev);
1233 }
1234
1235 /*
1236 * Rename a spa_t.
1237 */
1238 int
1239 spa_rename(const char *name, const char *newname)
1240 {
1241 spa_t *spa;
1242 int err;
1243
1244 /*
1245 * Lookup the spa_t and grab the config lock for writing. We need to
1246 * actually open the pool so that we can sync out the necessary labels.
1247 * It's OK to call spa_open() with the namespace lock held because we
1248 * allow recursive calls for other reasons.
1249 */
1250 mutex_enter(&spa_namespace_lock);
1251 if ((err = spa_open(name, &spa, FTAG)) != 0) {
1252 mutex_exit(&spa_namespace_lock);
1253 return (err);
1254 }
1255
1256 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1257
1258 avl_remove(&spa_namespace_avl, spa);
1259 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1260 avl_add(&spa_namespace_avl, spa);
1261
1262 /*
1263 * Sync all labels to disk with the new names by marking the root vdev
1264 * dirty and waiting for it to sync. It will pick up the new pool name
1265 * during the sync.
1266 */
1267 vdev_config_dirty(spa->spa_root_vdev);
1268
1269 spa_config_exit(spa, SCL_ALL, FTAG);
1270
1271 txg_wait_synced(spa->spa_dsl_pool, 0);
1272
1273 /*
1274 * Sync the updated config cache.
1275 */
1276 spa_config_sync(spa, B_FALSE, B_TRUE);
1277
1278 spa_close(spa, FTAG);
1279
1280 mutex_exit(&spa_namespace_lock);
1281
1282 return (0);
1283 }
1284
1285 /*
1286 * Return the spa_t associated with given pool_guid, if it exists. If
1287 * device_guid is non-zero, determine whether the pool exists *and* contains
1288 * a device with the specified device_guid.
1289 */
1290 spa_t *
1291 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1292 {
1293 spa_t *spa;
1294 avl_tree_t *t = &spa_namespace_avl;
1295
1296 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1297
1298 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1299 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1300 continue;
1301 if (spa->spa_root_vdev == NULL)
1302 continue;
1303 if (spa_guid(spa) == pool_guid) {
1304 if (device_guid == 0)
1305 break;
1306
1307 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1308 device_guid) != NULL)
1309 break;
1310
1311 /*
1312 * Check any devices we may be in the process of adding.
1313 */
1314 if (spa->spa_pending_vdev) {
1315 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1316 device_guid) != NULL)
1317 break;
1318 }
1319 }
1320 }
1321
1322 return (spa);
1323 }
1324
1325 /*
1326 * Determine whether a pool with the given pool_guid exists.
1327 */
1328 boolean_t
1329 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1330 {
1331 return (spa_by_guid(pool_guid, device_guid) != NULL);
1332 }
1333
1334 char *
1335 spa_strdup(const char *s)
1336 {
1337 size_t len;
1338 char *new;
1339
1340 len = strlen(s);
1341 new = kmem_alloc(len + 1, KM_SLEEP);
1342 bcopy(s, new, len);
1343 new[len] = '\0';
1344
1345 return (new);
1346 }
1347
1348 void
1349 spa_strfree(char *s)
1350 {
1351 kmem_free(s, strlen(s) + 1);
1352 }
1353
1354 uint64_t
1355 spa_get_random(uint64_t range)
1356 {
1357 uint64_t r;
1358
1359 ASSERT(range != 0);
1360
1361 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1362
1363 return (r % range);
1364 }
1365
1366 uint64_t
1367 spa_generate_guid(spa_t *spa)
1368 {
1369 uint64_t guid = spa_get_random(-1ULL);
1370
1371 if (spa != NULL) {
1372 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1373 guid = spa_get_random(-1ULL);
1374 } else {
1375 while (guid == 0 || spa_guid_exists(guid, 0))
1376 guid = spa_get_random(-1ULL);
1377 }
1378
1379 return (guid);
1380 }
1381
1382 void
1383 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1384 {
1385 char type[256];
1386 char *checksum = NULL;
1387 char *compress = NULL;
1388
1389 if (bp != NULL) {
1390 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1391 dmu_object_byteswap_t bswap =
1392 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1393 (void) snprintf(type, sizeof (type), "bswap %s %s",
1394 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1395 "metadata" : "data",
1396 dmu_ot_byteswap[bswap].ob_name);
1397 } else {
1398 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1399 sizeof (type));
1400 }
1401 if (!BP_IS_EMBEDDED(bp)) {
1402 checksum =
1403 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1404 }
1405 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1406 }
1407
1408 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1409 compress);
1410 }
1411
1412 void
1413 spa_freeze(spa_t *spa)
1414 {
1415 uint64_t freeze_txg = 0;
1416
1417 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1418 if (spa->spa_freeze_txg == UINT64_MAX) {
1419 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1420 spa->spa_freeze_txg = freeze_txg;
1421 }
1422 spa_config_exit(spa, SCL_ALL, FTAG);
1423 if (freeze_txg != 0)
1424 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1425 }
1426
1427 void
1428 zfs_panic_recover(const char *fmt, ...)
1429 {
1430 va_list adx;
1431
1432 va_start(adx, fmt);
1433 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1434 va_end(adx);
1435 }
1436
1437 /*
1438 * This is a stripped-down version of strtoull, suitable only for converting
1439 * lowercase hexadecimal numbers that don't overflow.
1440 */
1441 uint64_t
1442 strtonum(const char *str, char **nptr)
1443 {
1444 uint64_t val = 0;
1445 char c;
1446 int digit;
1447
1448 while ((c = *str) != '\0') {
1449 if (c >= '0' && c <= '9')
1450 digit = c - '0';
1451 else if (c >= 'a' && c <= 'f')
1452 digit = 10 + c - 'a';
1453 else
1454 break;
1455
1456 val *= 16;
1457 val += digit;
1458
1459 str++;
1460 }
1461
1462 if (nptr)
1463 *nptr = (char *)str;
1464
1465 return (val);
1466 }
1467
1468 /*
1469 * ==========================================================================
1470 * Accessor functions
1471 * ==========================================================================
1472 */
1473
1474 boolean_t
1475 spa_shutting_down(spa_t *spa)
1476 {
1477 return (spa->spa_async_suspended);
1478 }
1479
1480 dsl_pool_t *
1481 spa_get_dsl(spa_t *spa)
1482 {
1483 return (spa->spa_dsl_pool);
1484 }
1485
1486 boolean_t
1487 spa_is_initializing(spa_t *spa)
1488 {
1489 return (spa->spa_is_initializing);
1490 }
1491
1492 blkptr_t *
1493 spa_get_rootblkptr(spa_t *spa)
1494 {
1495 return (&spa->spa_ubsync.ub_rootbp);
1496 }
1497
1498 void
1499 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1500 {
1501 spa->spa_uberblock.ub_rootbp = *bp;
1502 }
1503
1504 void
1505 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1506 {
1507 if (spa->spa_root == NULL)
1508 buf[0] = '\0';
1509 else
1510 (void) strncpy(buf, spa->spa_root, buflen);
1511 }
1512
1513 int
1514 spa_sync_pass(spa_t *spa)
1515 {
1516 return (spa->spa_sync_pass);
1517 }
1518
1519 char *
1520 spa_name(spa_t *spa)
1521 {
1522 return (spa->spa_name);
1523 }
1524
1525 uint64_t
1526 spa_guid(spa_t *spa)
1527 {
1528 dsl_pool_t *dp = spa_get_dsl(spa);
1529 uint64_t guid;
1530
1531 /*
1532 * If we fail to parse the config during spa_load(), we can go through
1533 * the error path (which posts an ereport) and end up here with no root
1534 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1535 * this case.
1536 */
1537 if (spa->spa_root_vdev == NULL)
1538 return (spa->spa_config_guid);
1539
1540 guid = spa->spa_last_synced_guid != 0 ?
1541 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1542
1543 /*
1544 * Return the most recently synced out guid unless we're
1545 * in syncing context.
1546 */
1547 if (dp && dsl_pool_sync_context(dp))
1548 return (spa->spa_root_vdev->vdev_guid);
1549 else
1550 return (guid);
1551 }
1552
1553 uint64_t
1554 spa_load_guid(spa_t *spa)
1555 {
1556 /*
1557 * This is a GUID that exists solely as a reference for the
1558 * purposes of the arc. It is generated at load time, and
1559 * is never written to persistent storage.
1560 */
1561 return (spa->spa_load_guid);
1562 }
1563
1564 uint64_t
1565 spa_last_synced_txg(spa_t *spa)
1566 {
1567 return (spa->spa_ubsync.ub_txg);
1568 }
1569
1570 uint64_t
1571 spa_first_txg(spa_t *spa)
1572 {
1573 return (spa->spa_first_txg);
1574 }
1575
1576 uint64_t
1577 spa_syncing_txg(spa_t *spa)
1578 {
1579 return (spa->spa_syncing_txg);
1580 }
1581
1582 pool_state_t
1583 spa_state(spa_t *spa)
1584 {
1585 return (spa->spa_state);
1586 }
1587
1588 spa_load_state_t
1589 spa_load_state(spa_t *spa)
1590 {
1591 return (spa->spa_load_state);
1592 }
1593
1594 uint64_t
1595 spa_freeze_txg(spa_t *spa)
1596 {
1597 return (spa->spa_freeze_txg);
1598 }
1599
1600 /* ARGSUSED */
1601 uint64_t
1602 spa_get_asize(spa_t *spa, uint64_t lsize)
1603 {
1604 return (lsize * spa_asize_inflation);
1605 }
1606
1607 /*
1608 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
1609 * or at least 32MB.
1610 *
1611 * See the comment above spa_slop_shift for details.
1612 */
1613 uint64_t
1614 spa_get_slop_space(spa_t *spa) {
1615 uint64_t space = spa_get_dspace(spa);
1616 return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1));
1617 }
1618
1619 uint64_t
1620 spa_get_dspace(spa_t *spa)
1621 {
1622 return (spa->spa_dspace);
1623 }
1624
1625 void
1626 spa_update_dspace(spa_t *spa)
1627 {
1628 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1629 ddt_get_dedup_dspace(spa);
1630 }
1631
1632 /*
1633 * Return the failure mode that has been set to this pool. The default
1634 * behavior will be to block all I/Os when a complete failure occurs.
1635 */
1636 uint8_t
1637 spa_get_failmode(spa_t *spa)
1638 {
1639 return (spa->spa_failmode);
1640 }
1641
1642 boolean_t
1643 spa_suspended(spa_t *spa)
1644 {
1645 return (spa->spa_suspended);
1646 }
1647
1648 uint64_t
1649 spa_version(spa_t *spa)
1650 {
1651 return (spa->spa_ubsync.ub_version);
1652 }
1653
1654 boolean_t
1655 spa_deflate(spa_t *spa)
1656 {
1657 return (spa->spa_deflate);
1658 }
1659
1660 metaslab_class_t *
1661 spa_normal_class(spa_t *spa)
1662 {
1663 return (spa->spa_normal_class);
1664 }
1665
1666 metaslab_class_t *
1667 spa_log_class(spa_t *spa)
1668 {
1669 return (spa->spa_log_class);
1670 }
1671
1672 int
1673 spa_max_replication(spa_t *spa)
1674 {
1675 /*
1676 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1677 * handle BPs with more than one DVA allocated. Set our max
1678 * replication level accordingly.
1679 */
1680 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1681 return (1);
1682 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1683 }
1684
1685 int
1686 spa_prev_software_version(spa_t *spa)
1687 {
1688 return (spa->spa_prev_software_version);
1689 }
1690
1691 uint64_t
1692 spa_deadman_synctime(spa_t *spa)
1693 {
1694 return (spa->spa_deadman_synctime);
1695 }
1696
1697 uint64_t
1698 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1699 {
1700 uint64_t asize = DVA_GET_ASIZE(dva);
1701 uint64_t dsize = asize;
1702
1703 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1704
1705 if (asize != 0 && spa->spa_deflate) {
1706 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1707 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1708 }
1709
1710 return (dsize);
1711 }
1712
1713 uint64_t
1714 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1715 {
1716 uint64_t dsize = 0;
1717
1718 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1719 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1720
1721 return (dsize);
1722 }
1723
1724 uint64_t
1725 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1726 {
1727 uint64_t dsize = 0;
1728
1729 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1730
1731 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1732 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1733
1734 spa_config_exit(spa, SCL_VDEV, FTAG);
1735
1736 return (dsize);
1737 }
1738
1739 /*
1740 * ==========================================================================
1741 * Initialization and Termination
1742 * ==========================================================================
1743 */
1744
1745 static int
1746 spa_name_compare(const void *a1, const void *a2)
1747 {
1748 const spa_t *s1 = a1;
1749 const spa_t *s2 = a2;
1750 int s;
1751
1752 s = strcmp(s1->spa_name, s2->spa_name);
1753 if (s > 0)
1754 return (1);
1755 if (s < 0)
1756 return (-1);
1757 return (0);
1758 }
1759
1760 int
1761 spa_busy(void)
1762 {
1763 return (spa_active_count);
1764 }
1765
1766 void
1767 spa_boot_init()
1768 {
1769 spa_config_load();
1770 }
1771
1772 void
1773 spa_init(int mode)
1774 {
1775 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1776 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1777 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1778 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1779
1780 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1781 offsetof(spa_t, spa_avl));
1782
1783 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1784 offsetof(spa_aux_t, aux_avl));
1785
1786 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1787 offsetof(spa_aux_t, aux_avl));
1788
1789 spa_mode_global = mode;
1790
1791 #ifdef _KERNEL
1792 spa_arch_init();
1793 #else
1794 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1795 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1796 if (arc_procfd == -1) {
1797 perror("could not enable watchpoints: "
1798 "opening /proc/self/ctl failed: ");
1799 } else {
1800 arc_watch = B_TRUE;
1801 }
1802 }
1803 #endif
1804
1805 refcount_init();
1806 unique_init();
1807 range_tree_init();
1808 zio_init();
1809 dmu_init();
1810 zil_init();
1811 vdev_cache_stat_init();
1812 zfs_prop_init();
1813 zpool_prop_init();
1814 zpool_feature_init();
1815 spa_config_load();
1816 l2arc_start();
1817 }
1818
1819 void
1820 spa_fini(void)
1821 {
1822 l2arc_stop();
1823
1824 spa_evict_all();
1825
1826 vdev_cache_stat_fini();
1827 zil_fini();
1828 dmu_fini();
1829 zio_fini();
1830 range_tree_fini();
1831 unique_fini();
1832 refcount_fini();
1833
1834 avl_destroy(&spa_namespace_avl);
1835 avl_destroy(&spa_spare_avl);
1836 avl_destroy(&spa_l2cache_avl);
1837
1838 cv_destroy(&spa_namespace_cv);
1839 mutex_destroy(&spa_namespace_lock);
1840 mutex_destroy(&spa_spare_lock);
1841 mutex_destroy(&spa_l2cache_lock);
1842 }
1843
1844 /*
1845 * Return whether this pool has slogs. No locking needed.
1846 * It's not a problem if the wrong answer is returned as it's only for
1847 * performance and not correctness
1848 */
1849 boolean_t
1850 spa_has_slogs(spa_t *spa)
1851 {
1852 return (spa->spa_log_class->mc_rotor != NULL);
1853 }
1854
1855 spa_log_state_t
1856 spa_get_log_state(spa_t *spa)
1857 {
1858 return (spa->spa_log_state);
1859 }
1860
1861 void
1862 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1863 {
1864 spa->spa_log_state = state;
1865 }
1866
1867 boolean_t
1868 spa_is_root(spa_t *spa)
1869 {
1870 return (spa->spa_is_root);
1871 }
1872
1873 boolean_t
1874 spa_writeable(spa_t *spa)
1875 {
1876 return (!!(spa->spa_mode & FWRITE));
1877 }
1878
1879 /*
1880 * Returns true if there is a pending sync task in any of the current
1881 * syncing txg, the current quiescing txg, or the current open txg.
1882 */
1883 boolean_t
1884 spa_has_pending_synctask(spa_t *spa)
1885 {
1886 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
1887 }
1888
1889 int
1890 spa_mode(spa_t *spa)
1891 {
1892 return (spa->spa_mode);
1893 }
1894
1895 uint64_t
1896 spa_bootfs(spa_t *spa)
1897 {
1898 return (spa->spa_bootfs);
1899 }
1900
1901 uint64_t
1902 spa_delegation(spa_t *spa)
1903 {
1904 return (spa->spa_delegation);
1905 }
1906
1907 objset_t *
1908 spa_meta_objset(spa_t *spa)
1909 {
1910 return (spa->spa_meta_objset);
1911 }
1912
1913 enum zio_checksum
1914 spa_dedup_checksum(spa_t *spa)
1915 {
1916 return (spa->spa_dedup_checksum);
1917 }
1918
1919 /*
1920 * Reset pool scan stat per scan pass (or reboot).
1921 */
1922 void
1923 spa_scan_stat_init(spa_t *spa)
1924 {
1925 /* data not stored on disk */
1926 spa->spa_scan_pass_start = gethrestime_sec();
1927 spa->spa_scan_pass_exam = 0;
1928 vdev_scan_stat_init(spa->spa_root_vdev);
1929 }
1930
1931 /*
1932 * Get scan stats for zpool status reports
1933 */
1934 int
1935 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1936 {
1937 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1938
1939 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1940 return (SET_ERROR(ENOENT));
1941 bzero(ps, sizeof (pool_scan_stat_t));
1942
1943 /* data stored on disk */
1944 ps->pss_func = scn->scn_phys.scn_func;
1945 ps->pss_start_time = scn->scn_phys.scn_start_time;
1946 ps->pss_end_time = scn->scn_phys.scn_end_time;
1947 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1948 ps->pss_examined = scn->scn_phys.scn_examined;
1949 ps->pss_to_process = scn->scn_phys.scn_to_process;
1950 ps->pss_processed = scn->scn_phys.scn_processed;
1951 ps->pss_errors = scn->scn_phys.scn_errors;
1952 ps->pss_state = scn->scn_phys.scn_state;
1953
1954 /* data not stored on disk */
1955 ps->pss_pass_start = spa->spa_scan_pass_start;
1956 ps->pss_pass_exam = spa->spa_scan_pass_exam;
1957
1958 return (0);
1959 }
1960
1961 boolean_t
1962 spa_debug_enabled(spa_t *spa)
1963 {
1964 return (spa->spa_debug);
1965 }
1966
1967 int
1968 spa_maxblocksize(spa_t *spa)
1969 {
1970 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
1971 return (SPA_MAXBLOCKSIZE);
1972 else
1973 return (SPA_OLD_MAXBLOCKSIZE);
1974 }