Print this page
NEX-7397 Hotspare didn't kick in automatically when one of the drive in pool went "Faulty" (is_ssd fix)
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-2846 Enable Automatic/Intelligent Hot Sparing capability
Reviewed by: Jeffry Molanus <jeffry.molanus@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
6414 vdev_config_sync could be simpler
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Approved by: Robert Mustacchi <rm@joyent.com>
6368 Remove superfluous statement
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Will Andrews <will@freebsd.org>
Approved by: Robert Mustacchi <rm@joyent.com>
6386 Fix function call with uninitialized value in vdev_inuse
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Approved by: Robert Mustacchi <rm@joyent.com>
6328 Fix cstyle errors in zfs codebase (fix studio)
6328 Fix cstyle errors in zfs codebase
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Alex Reece <alex@delphix.com>
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed by: Jorgen Lundman <lundman@lundman.net>
Approved by: Robert Mustacchi <rm@joyent.com>
NEX-3984 On-demand TRIM
Reviewed by: Alek Pinchuk <alek@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Conflicts:
usr/src/common/zfs/zpool_prop.c
usr/src/uts/common/sys/fs/zfs.h
NEX-3541 Implement persistent L2ARC
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>
Conflicts:
usr/src/uts/common/fs/zfs/sys/spa.h
4121 vdev_label_init should treat request as succeeded when pool is read only
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
Fixup merge results
re #12585 rb4049 ZFS++ work port - refactoring to improve separation of open/closed code, bug fixes, performance improvements - open code
Bug 11205: add missing libzfs_closed_stubs.c to fix opensource-only build.
ZFS plus work: special vdevs, cos, cos/vdev properties
*** 19,29 ****
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
/*
* Virtual Device Labels
* ---------------------
--- 19,30 ----
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
! * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
/*
* Virtual Device Labels
* ---------------------
*** 141,151 ****
#include <sys/zap.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
- #include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
--- 142,151 ----
*** 215,225 ****
nvlist_t *
vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
vdev_config_flag_t flags)
{
nvlist_t *nv = NULL;
- vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
nv = fnvlist_alloc();
fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
--- 215,224 ----
*** 263,273 ****
if (vd->vdev_wholedisk != -1ULL)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
! if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
--- 262,272 ----
if (vd->vdev_wholedisk != -1ULL)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
! if (vd->vdev_not_present)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
*** 279,314 ****
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
! if (vd->vdev_removing) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
}
- }
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
- if (vic->vic_mapping_object != 0) {
- fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
- vic->vic_mapping_object);
- }
-
- if (vic->vic_births_object != 0) {
- fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
- vic->vic_births_object);
- }
-
- if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
- fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
- vic->vic_prev_indirect_vdev);
- }
-
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
--- 278,306 ----
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
! fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPECIAL,
! vd->vdev_isspecial);
! if (vd->vdev_removing)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
}
+ if (flags & VDEV_CONFIG_L2CACHE)
+ /* indicate that we support L2ARC persistency */
+ VERIFY(nvlist_add_boolean_value(nv,
+ ZPOOL_CONFIG_L2CACHE_PERSISTENT, B_TRUE) == 0);
+
+ fnvlist_add_boolean_value(nv, ZPOOL_CONFIG_IS_SSD, vd->vdev_is_ssd);
+
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
*** 324,402 ****
}
}
if (getstats) {
vdev_stat_t vs;
vdev_get_stats(vd, &vs);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t));
/* provide either current or previous scan information */
- pool_scan_stat_t ps;
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nv,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
-
- pool_removal_stat_t prs;
- if (spa_removal_get_stats(spa, &prs) == 0) {
- fnvlist_add_uint64_array(nv,
- ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
- sizeof (prs) / sizeof (uint64_t));
}
- /*
- * Note: this can be called from open context
- * (spa_get_stats()), so we need the rwlock to prevent
- * the mapping from being changed by condensing.
- */
- rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
- if (vd->vdev_indirect_mapping != NULL) {
- ASSERT(vd->vdev_indirect_births != NULL);
- vdev_indirect_mapping_t *vim =
- vd->vdev_indirect_mapping;
- fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
- vdev_indirect_mapping_size(vim));
- }
- rw_exit(&vd->vdev_indirect_rwlock);
- if (vd->vdev_mg != NULL &&
- vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
- /*
- * Compute approximately how much memory would be used
- * for the indirect mapping if this device were to
- * be removed.
- *
- * Note: If the frag metric is invalid, then not
- * enough metaslabs have been converted to have
- * histograms.
- */
- uint64_t seg_count = 0;
-
- /*
- * There are the same number of allocated segments
- * as free segments, so we will have at least one
- * entry per free segment.
- */
- for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
- seg_count += vd->vdev_mg->mg_histogram[i];
- }
-
- /*
- * The maximum length of a mapping is SPA_MAXBLOCKSIZE,
- * so we need at least one entry per SPA_MAXBLOCKSIZE
- * of allocated data.
- */
- seg_count += vd->vdev_stat.vs_alloc / SPA_MAXBLOCKSIZE;
-
- fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
- seg_count *
- sizeof (vdev_indirect_mapping_entry_phys_t));
- }
- }
-
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
int c, idx;
ASSERT(!vd->vdev_ishole);
--- 316,339 ----
}
}
if (getstats) {
vdev_stat_t vs;
+ pool_scan_stat_t ps;
vdev_get_stats(vd, &vs);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t));
/* provide either current or previous scan information */
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nv,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
}
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
int c, idx;
ASSERT(!vd->vdev_ishole);
*** 464,474 ****
--- 401,417 ----
if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
vd->vdev_orig_guid);
}
+
+ /* grab per-leaf-vdev trim stats */
+ if (getstats) {
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_TRIM_PROG,
+ vd->vdev_trim_prog);
}
+ }
return (nv);
}
/*
*** 487,500 ****
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
! if (tvd->vdev_ishole) {
array[idx++] = c;
}
- }
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
--- 430,442 ----
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
! if (tvd->vdev_ishole)
array[idx++] = c;
}
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
*** 1055,1078 ****
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
! if (cb.ubl_vd != NULL) {
! vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
! "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
!
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
- if (*config == NULL && spa->spa_extreme_rewind) {
- vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
- "Trying again without txg restrictions.");
- *config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
- }
- if (*config == NULL) {
- vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
- }
- }
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* On success, increment root zio's count of good writes.
--- 997,1008 ----
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
! if (cb.ubl_vd != NULL)
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* On success, increment root zio's count of good writes.
*** 1091,1101 ****
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
{
! for (uint64_t c = 0; c < vd->vdev_children; c++)
vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
if (!vd->vdev_ops->vdev_op_leaf)
return;
--- 1021,1031 ----
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
{
! for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
if (!vd->vdev_ops->vdev_op_leaf)
return;
*** 1138,1152 ****
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
! for (int v = 0; v < svdcount; v++) {
! if (vdev_writeable(svd[v])) {
zio_flush(zio, svd[v]);
- }
- }
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}
--- 1068,1079 ----
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
! for (int v = 0; v < svdcount; v++)
zio_flush(zio, svd[v]);
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}