1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 /*
  26  * The ZFS retire agent is responsible for managing hot spares across all pools.
  27  * When we see a device fault or a device removal, we try to open the associated
  28  * pool and look for any hot spares.  We iterate over any available hot spares
  29  * and attempt a 'zpool replace' for each one.
  30  *
  31  * For vdevs diagnosed as faulty, the agent is also responsible for proactively
  32  * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
  33  */
  34 
  35 #include <fm/fmd_api.h>
  36 #include <sys/fs/zfs.h>
  37 #include <sys/fm/protocol.h>
  38 #include <sys/fm/fs/zfs.h>
  39 #include <libzfs.h>
  40 #include <fm/libtopo.h>
  41 #include <string.h>
  42 
  43 typedef struct zfs_retire_repaired {
  44         struct zfs_retire_repaired      *zrr_next;
  45         uint64_t                        zrr_pool;
  46         uint64_t                        zrr_vdev;
  47 } zfs_retire_repaired_t;
  48 
  49 typedef struct zfs_retire_data {
  50         libzfs_handle_t                 *zrd_hdl;
  51         zfs_retire_repaired_t           *zrd_repaired;
  52 } zfs_retire_data_t;
  53 
  54 static void
  55 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
  56 {
  57         zfs_retire_repaired_t *zrp;
  58 
  59         while ((zrp = zdp->zrd_repaired) != NULL) {
  60                 zdp->zrd_repaired = zrp->zrr_next;
  61                 fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
  62         }
  63 }
  64 
  65 /*
  66  * Find a pool with a matching GUID.
  67  */
  68 typedef struct find_cbdata {
  69         uint64_t        cb_guid;
  70         const char      *cb_fru;
  71         zpool_handle_t  *cb_zhp;
  72         nvlist_t        *cb_vdev;
  73 } find_cbdata_t;
  74 
  75 static int
  76 find_pool(zpool_handle_t *zhp, void *data)
  77 {
  78         find_cbdata_t *cbp = data;
  79 
  80         if (cbp->cb_guid ==
  81             zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
  82                 cbp->cb_zhp = zhp;
  83                 return (1);
  84         }
  85 
  86         zpool_close(zhp);
  87         return (0);
  88 }
  89 
  90 /*
  91  * Find a vdev within a tree with a matching GUID.
  92  */
  93 static nvlist_t *
  94 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, const char *search_fru,
  95     uint64_t search_guid)
  96 {
  97         uint64_t guid;
  98         nvlist_t **child;
  99         uint_t c, children;
 100         nvlist_t *ret;
 101         char *fru;
 102 
 103         if (search_fru != NULL) {
 104                 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &fru) == 0 &&
 105                     libzfs_fru_compare(zhdl, fru, search_fru))
 106                         return (nv);
 107         } else {
 108                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
 109                     guid == search_guid)
 110                         return (nv);
 111         }
 112 
 113         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 114             &child, &children) != 0)
 115                 return (NULL);
 116 
 117         for (c = 0; c < children; c++) {
 118                 if ((ret = find_vdev(zhdl, child[c], search_fru,
 119                     search_guid)) != NULL)
 120                         return (ret);
 121         }
 122 
 123         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
 124             &child, &children) != 0)
 125                 return (NULL);
 126 
 127         for (c = 0; c < children; c++) {
 128                 if ((ret = find_vdev(zhdl, child[c], search_fru,
 129                     search_guid)) != NULL)
 130                         return (ret);
 131         }
 132 
 133         return (NULL);
 134 }
 135 
 136 /*
 137  * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
 138  */
 139 static zpool_handle_t *
 140 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
 141     nvlist_t **vdevp)
 142 {
 143         find_cbdata_t cb;
 144         zpool_handle_t *zhp;
 145         nvlist_t *config, *nvroot;
 146 
 147         /*
 148          * Find the corresponding pool and make sure the vdev still exists.
 149          */
 150         cb.cb_guid = pool_guid;
 151         if (zpool_iter(zhdl, find_pool, &cb) != 1)
 152                 return (NULL);
 153 
 154         zhp = cb.cb_zhp;
 155         config = zpool_get_config(zhp, NULL);
 156         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 157             &nvroot) != 0) {
 158                 zpool_close(zhp);
 159                 return (NULL);
 160         }
 161 
 162         if (vdev_guid != 0) {
 163                 if ((*vdevp = find_vdev(zhdl, nvroot, NULL,
 164                     vdev_guid)) == NULL) {
 165                         zpool_close(zhp);
 166                         return (NULL);
 167                 }
 168         }
 169 
 170         return (zhp);
 171 }
 172 
 173 static int
 174 search_pool(zpool_handle_t *zhp, void *data)
 175 {
 176         find_cbdata_t *cbp = data;
 177         nvlist_t *config;
 178         nvlist_t *nvroot;
 179 
 180         config = zpool_get_config(zhp, NULL);
 181         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 182             &nvroot) != 0) {
 183                 zpool_close(zhp);
 184                 return (0);
 185         }
 186 
 187         if ((cbp->cb_vdev = find_vdev(zpool_get_handle(zhp), nvroot,
 188             cbp->cb_fru, 0)) != NULL) {
 189                 cbp->cb_zhp = zhp;
 190                 return (1);
 191         }
 192 
 193         zpool_close(zhp);
 194         return (0);
 195 }
 196 
 197 /*
 198  * Given a FRU FMRI, find the matching pool and vdev.
 199  */
 200 static zpool_handle_t *
 201 find_by_fru(libzfs_handle_t *zhdl, const char *fru, nvlist_t **vdevp)
 202 {
 203         find_cbdata_t cb;
 204 
 205         cb.cb_fru = fru;
 206         cb.cb_zhp = NULL;
 207         if (zpool_iter(zhdl, search_pool, &cb) != 1)
 208                 return (NULL);
 209 
 210         *vdevp = cb.cb_vdev;
 211         return (cb.cb_zhp);
 212 }
 213 
 214 /*
 215  * Given a vdev, attempt to replace it with every known spare until one
 216  * succeeds.
 217  */
 218 static void
 219 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
 220 {
 221         nvlist_t *config, *nvroot, *replacement;
 222         nvlist_t **spares;
 223         uint_t s, nspares;
 224         char *dev_name;
 225 
 226         config = zpool_get_config(zhp, NULL);
 227         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 228             &nvroot) != 0)
 229                 return;
 230 
 231         /*
 232          * Find out if there are any hot spares available in the pool.
 233          */
 234         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
 235             &spares, &nspares) != 0)
 236                 return;
 237 
 238         replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
 239 
 240         (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
 241             VDEV_TYPE_ROOT);
 242 
 243         dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
 244 
 245         /*
 246          * Try to replace each spare, ending when we successfully
 247          * replace it.
 248          */
 249         for (s = 0; s < nspares; s++) {
 250                 char *spare_name;
 251 
 252                 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
 253                     &spare_name) != 0)
 254                         continue;
 255 
 256                 (void) nvlist_add_nvlist_array(replacement,
 257                     ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
 258 
 259                 if (zpool_vdev_attach(zhp, dev_name, spare_name,
 260                     replacement, B_TRUE) == 0)
 261                         break;
 262         }
 263 
 264         free(dev_name);
 265         nvlist_free(replacement);
 266 }
 267 
 268 /*
 269  * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
 270  * ASRU is now usable.  ZFS has found the device to be present and
 271  * functioning.
 272  */
 273 /*ARGSUSED*/
 274 void
 275 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
 276 {
 277         zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
 278         zfs_retire_repaired_t *zrp;
 279         uint64_t pool_guid, vdev_guid;
 280         nvlist_t *asru;
 281 
 282         if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
 283             &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
 284             FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
 285                 return;
 
 330             asru, FMD_HAS_FAULT_ASRU, NULL)) {
 331                 topo_hdl_t *thp;
 332                 char *fmri = NULL;
 333                 int err;
 334 
 335                 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
 336                 if (topo_fmri_nvl2str(thp, asru, &fmri, &err) == 0)
 337                         (void) fmd_repair_asru(hdl, fmri);
 338                 fmd_hdl_topo_rele(hdl, thp);
 339 
 340                 topo_hdl_strfree(thp, fmri);
 341         }
 342         nvlist_free(asru);
 343         zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
 344         zrp->zrr_next = zdp->zrd_repaired;
 345         zrp->zrr_pool = pool_guid;
 346         zrp->zrr_vdev = vdev_guid;
 347         zdp->zrd_repaired = zrp;
 348 }
 349 
 350 /*ARGSUSED*/
 351 static void
 352 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
 353     const char *class)
 354 {
 355         uint64_t pool_guid, vdev_guid;
 356         zpool_handle_t *zhp;
 357         nvlist_t *resource, *fault, *fru;
 358         nvlist_t **faults;
 359         uint_t f, nfaults;
 360         zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
 361         libzfs_handle_t *zhdl = zdp->zrd_hdl;
 362         boolean_t fault_device, degrade_device;
 363         boolean_t is_repair;
 364         char *scheme, *fmri;
 365         nvlist_t *vdev;
 366         char *uuid;
 367         int repair_done = 0;
 368         boolean_t retire;
 369         boolean_t is_disk;
 370         vdev_aux_t aux;
 371         topo_hdl_t *thp;
 372         int err;
 373 
 374         /*
 375          * If this is a resource notifying us of device removal, then simply
 376          * check for an available spare and continue.
 377          */
 378         if (strcmp(class, "resource.fs.zfs.removed") == 0) {
 379                 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
 380                     &pool_guid) != 0 ||
 381                     nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
 382                     &vdev_guid) != 0)
 383                         return;
 384 
 385                 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
 386                     &vdev)) == NULL)
 387                         return;
 388 
 389                 if (fmd_prop_get_int32(hdl, "spare_on_remove"))
 390                         replace_with_spare(hdl, zhp, vdev);
 391                 zpool_close(zhp);
 392                 return;
 393         }
 394 
 395         if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
 396                 return;
 397 
 398         if (strcmp(class, "resource.fs.zfs.statechange") == 0 ||
 399             strcmp(class,
 400             "resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) {
 401                 zfs_vdev_repair(hdl, nvl);
 402                 return;
 403         }
 404 
 405         zfs_retire_clear_data(hdl, zdp);
 
 409         else
 410                 is_repair = B_FALSE;
 411 
 412         /*
 413          * We subscribe to zfs faults as well as all repair events.
 414          */
 415         if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
 416             &faults, &nfaults) != 0)
 417                 return;
 418 
 419         for (f = 0; f < nfaults; f++) {
 420                 fault = faults[f];
 421 
 422                 fault_device = B_FALSE;
 423                 degrade_device = B_FALSE;
 424                 is_disk = B_FALSE;
 425 
 426                 if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
 427                     &retire) == 0 && retire == 0)
 428                         continue;
 429 
 430                 if (fmd_nvl_class_match(hdl, fault,
 431                     "fault.io.disk.ssm-wearout") &&
 432                     fmd_prop_get_int32(hdl, "ssm_wearout_skip_retire") ==
 433                     FMD_B_TRUE) {
 434                         fmd_hdl_debug(hdl, "zfs-retire: ignoring SSM fault");
 435                         continue;
 436                 }
 437 
 438                 /*
 439                  * While we subscribe to fault.fs.zfs.*, we only take action
 440                  * for faults targeting a specific vdev (open failure or SERD
 441                  * failure).  We also subscribe to fault.io.* events, so that
 442                  * faulty disks will be faulted in the ZFS configuration.
 443                  */
 444                 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
 445                         fault_device = B_TRUE;
 446                 } else if (fmd_nvl_class_match(hdl, fault,
 447                     "fault.fs.zfs.vdev.checksum")) {
 448                         degrade_device = B_TRUE;
 449                 } else if (fmd_nvl_class_match(hdl, fault,
 450                     "fault.fs.zfs.device")) {
 451                         fault_device = B_FALSE;
 452                 } else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
 453                         is_disk = B_TRUE;
 454                         fault_device = B_TRUE;
 455                 } else {
 456                         continue;
 457                 }
 458 
 459                 if (is_disk) {
 460                         /*
 461                          * This is a disk fault.  Lookup the FRU, convert it to
 462                          * an FMRI string, and attempt to find a matching vdev.
 463                          */
 464                         if (nvlist_lookup_nvlist(fault, FM_FAULT_FRU,
 465                             &fru) != 0 ||
 466                             nvlist_lookup_string(fru, FM_FMRI_SCHEME,
 467                             &scheme) != 0)
 468                                 continue;
 469 
 470                         if (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0)
 471                                 continue;
 472 
 473                         thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
 474                         if (topo_fmri_nvl2str(thp, fru, &fmri, &err) != 0) {
 475                                 fmd_hdl_topo_rele(hdl, thp);
 476                                 continue;
 477                         }
 478 
 479                         zhp = find_by_fru(zhdl, fmri, &vdev);
 480                         topo_hdl_strfree(thp, fmri);
 481                         fmd_hdl_topo_rele(hdl, thp);
 482 
 483                         if (zhp == NULL)
 484                                 continue;
 485 
 486                         (void) nvlist_lookup_uint64(vdev,
 487                             ZPOOL_CONFIG_GUID, &vdev_guid);
 488                         aux = VDEV_AUX_EXTERNAL;
 489                 } else {
 490                         /*
 491                          * This is a ZFS fault.  Lookup the resource, and
 492                          * attempt to find the matching vdev.
 493                          */
 494                         if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
 495                             &resource) != 0 ||
 496                             nvlist_lookup_string(resource, FM_FMRI_SCHEME,
 497                             &scheme) != 0)
 498                                 continue;
 499 
 500                         if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
 501                                 continue;
 502 
 503                         if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
 504                             &pool_guid) != 0)
 505                                 continue;
 506 
 507                         if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
 508                             &vdev_guid) != 0) {
 509                                 if (is_repair)
 510                                         vdev_guid = 0;
 511                                 else
 512                                         continue;
 513                         }
 514 
 515                         if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
 516                             &vdev)) == NULL)
 517                                 continue;
 518 
 519                         aux = VDEV_AUX_ERR_EXCEEDED;
 520                 }
 521 
 522                 if (vdev_guid == 0) {
 523                         /*
 524                          * For pool-level repair events, clear the entire pool.
 525                          */
 526                         (void) zpool_clear(zhp, NULL, NULL);
 527                         zpool_close(zhp);
 528                         continue;
 529                 }
 530 
 531                 /*
 532                  * If this is a repair event, then mark the vdev as repaired and
 533                  * continue.
 534                  */
 535                 if (is_repair) {
 536                         repair_done = 1;
 537                         (void) zpool_vdev_clear(zhp, vdev_guid);
 538                         zpool_close(zhp);
 539                         continue;
 540                 }
 541 
 542                 /*
 543                  * Actively fault the device if needed.
 544                  */
 545                 if (fault_device)
 546                         (void) zpool_vdev_fault(zhp, vdev_guid, aux);
 547                 if (degrade_device)
 548                         (void) zpool_vdev_degrade(zhp, vdev_guid, aux);
 549 
 550                 /*
 551                  * Attempt to substitute a hot spare.
 552                  */
 553                 replace_with_spare(hdl, zhp, vdev);
 554                 zpool_close(zhp);
 555         }
 556 
 557         if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
 558             nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
 559                 fmd_case_uuresolved(hdl, uuid);
 560 }
 561 
 562 static const fmd_hdl_ops_t fmd_ops = {
 563         zfs_retire_recv,        /* fmdo_recv */
 564         NULL,                   /* fmdo_timeout */
 565         NULL,                   /* fmdo_close */
 566         NULL,                   /* fmdo_stats */
 567         NULL,                   /* fmdo_gc */
 568 };
 569 
 570 static const fmd_prop_t fmd_props[] = {
 571         { "spare_on_remove", FMD_TYPE_BOOL, "true" },
 572         { "ssm_wearout_skip_retire", FMD_TYPE_BOOL, "true"},
 573         { NULL, 0, NULL }
 574 };
 575 
 576 static const fmd_hdl_info_t fmd_info = {
 577         "ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
 578 };
 579 
 580 void
 581 _fmd_init(fmd_hdl_t *hdl)
 582 {
 583         zfs_retire_data_t *zdp;
 584         libzfs_handle_t *zhdl;
 585 
 586         if ((zhdl = libzfs_init()) == NULL)
 587                 return;
 588 
 589         if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
 590                 libzfs_fini(zhdl);
 591                 return;
 592         }
 593 
 594         zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
 595         zdp->zrd_hdl = zhdl;
 596 
 597         fmd_hdl_setspecific(hdl, zdp);
 | 
   1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2017 Nexenta Systems, Inc.
  25  */
  26 
  27 /*
  28  * The ZFS retire agent is responsible for managing hot spares across all pools.
  29  * When we see a device fault or a device removal, we try to open the associated
  30  * pool and look for any hot spares.  We iterate over any available hot spares
  31  * and attempt a 'zpool replace' for each one.
  32  *
  33  * For vdevs diagnosed as faulty, the agent is also responsible for proactively
  34  * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
  35  */
  36 
  37 #include <fm/fmd_api.h>
  38 #include <sys/fs/zfs.h>
  39 #include <sys/fm/protocol.h>
  40 #include <sys/fm/fs/zfs.h>
  41 #include <libzfs.h>
  42 #include <fm/libtopo.h>
  43 #include <string.h>
  44 #include <sys/int_fmtio.h>
  45 #include <devid.h>
  46 
  47 typedef struct zfs_retire_repaired {
  48         struct zfs_retire_repaired      *zrr_next;
  49         uint64_t                        zrr_pool;
  50         uint64_t                        zrr_vdev;
  51 } zfs_retire_repaired_t;
  52 
  53 typedef struct zfs_retire_data {
  54         libzfs_handle_t                 *zrd_hdl;
  55         zfs_retire_repaired_t           *zrd_repaired;
  56 } zfs_retire_data_t;
  57 
  58 static void
  59 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
  60 {
  61         zfs_retire_repaired_t *zrp;
  62 
  63         while ((zrp = zdp->zrd_repaired) != NULL) {
  64                 zdp->zrd_repaired = zrp->zrr_next;
  65                 fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
  66         }
  67 }
  68 
  69 /*
  70  * Find a pool with a matching GUID.
  71  */
  72 typedef struct find_cbdata {
  73         fmd_hdl_t       *cb_hdl;
  74         uint64_t        cb_guid;
  75         const char      *cb_fru;
  76         ddi_devid_t     cb_devid;
  77         zpool_handle_t  *cb_zhp;
  78         nvlist_t        *cb_vdev;
  79 } find_cbdata_t;
  80 
  81 static int
  82 find_pool(zpool_handle_t *zhp, void *data)
  83 {
  84         find_cbdata_t *cbp = data;
  85 
  86         if (cbp->cb_guid ==
  87             zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
  88                 cbp->cb_zhp = zhp;
  89                 return (1);
  90         }
  91 
  92         zpool_close(zhp);
  93         return (0);
  94 }
  95 
  96 /*
  97  * Find a vdev within a tree with a matching GUID.
  98  */
  99 static nvlist_t *
 100 find_vdev(fmd_hdl_t *hdl, libzfs_handle_t *zhdl, nvlist_t *nv,
 101     const char *search_fru, ddi_devid_t search_devid, uint64_t search_guid)
 102 {
 103         uint64_t guid;
 104         nvlist_t **child;
 105         uint_t c, children;
 106         nvlist_t *ret;
 107         char *fru, *devidstr, *path;
 108         ddi_devid_t devid;
 109 
 110         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0)
 111                 fmd_hdl_debug(hdl, "find_vdev: vdev path: %s", path);
 112 
 113         if (search_fru != NULL &&
 114             nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &fru) == 0) {
 115                 fmd_hdl_debug(hdl, "find_vdev: found fru: %s", fru);
 116                 if (libzfs_fru_compare(zhdl, fru, search_fru))
 117                         return (nv);
 118         }
 119 
 120         if (search_devid != NULL &&
 121             nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devidstr) == 0) {
 122                 fmd_hdl_debug(hdl, "find_vdev: found devid: %s", devidstr);
 123 
 124                 if (devid_str_decode(devidstr, &devid, NULL) == 0) {
 125                         if (devid_compare(search_devid, devid) == 0) {
 126                                 devid_free(devid);
 127                                 return (nv);
 128                         }
 129 
 130                         devid_free(devid);
 131                 }
 132         }
 133 
 134         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
 135             guid == search_guid)
 136                 return (nv);
 137 
 138         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 139             &child, &children) != 0)
 140                 return (NULL);
 141 
 142         for (c = 0; c < children; c++) {
 143                 if ((ret = find_vdev(hdl, zhdl, child[c], search_fru,
 144                     search_devid, search_guid)) != NULL)
 145                         return (ret);
 146         }
 147 
 148         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
 149             &child, &children) != 0)
 150                 return (NULL);
 151 
 152         for (c = 0; c < children; c++) {
 153                 if ((ret = find_vdev(hdl, zhdl, child[c], search_fru,
 154                     search_devid, search_guid)) != NULL)
 155                         return (ret);
 156         }
 157 
 158         return (NULL);
 159 }
 160 
 161 /*
 162  * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
 163  */
 164 static zpool_handle_t *
 165 find_by_guid(fmd_hdl_t *hdl, libzfs_handle_t *zhdl, uint64_t pool_guid,
 166     uint64_t vdev_guid, nvlist_t **vdevp)
 167 {
 168         find_cbdata_t cb;
 169         zpool_handle_t *zhp;
 170         nvlist_t *config, *nvroot;
 171 
 172         /*
 173          * Find the corresponding pool and make sure the vdev still exists.
 174          */
 175         cb.cb_guid = pool_guid;
 176         if (zpool_iter(zhdl, find_pool, &cb) != 1)
 177                 return (NULL);
 178 
 179         zhp = cb.cb_zhp;
 180         config = zpool_get_config(zhp, NULL);
 181         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 182             &nvroot) != 0) {
 183                 zpool_close(zhp);
 184                 return (NULL);
 185         }
 186 
 187         if (vdev_guid != 0) {
 188                 if ((*vdevp = find_vdev(hdl, zhdl, nvroot, NULL, NULL,
 189                     vdev_guid)) == NULL) {
 190                         zpool_close(zhp);
 191                         return (NULL);
 192                 }
 193         }
 194 
 195         return (zhp);
 196 }
 197 
 198 static int
 199 search_pool(zpool_handle_t *zhp, void *data)
 200 {
 201         find_cbdata_t *cbp = data;
 202         nvlist_t *config;
 203         nvlist_t *nvroot;
 204 
 205         config = zpool_get_config(zhp, NULL);
 206         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 207             &nvroot) != 0) {
 208                 zpool_close(zhp);
 209                 fmd_hdl_debug(cbp->cb_hdl, "search_pool: "
 210                     "unable to get vdev tree");
 211                 return (0);
 212         }
 213 
 214         if ((cbp->cb_vdev = find_vdev(cbp->cb_hdl, zpool_get_handle(zhp),
 215             nvroot, cbp->cb_fru, cbp->cb_devid, cbp->cb_guid)) != NULL) {
 216                 cbp->cb_zhp = zhp;
 217                 return (1);
 218         }
 219 
 220         zpool_close(zhp);
 221         return (0);
 222 }
 223 
 224 /*
 225  * Given a FRU FMRI, devid, or guid: find the matching pool and vdev.
 226  */
 227 static zpool_handle_t *
 228 find_by_anything(fmd_hdl_t *hdl, libzfs_handle_t *zhdl, const char *fru,
 229     ddi_devid_t devid, uint64_t guid, nvlist_t **vdevp)
 230 {
 231         find_cbdata_t cb;
 232 
 233         (void) memset(&cb, 0, sizeof (cb));
 234         cb.cb_hdl = hdl;
 235         cb.cb_fru = fru;
 236         cb.cb_devid = devid;
 237         cb.cb_guid = guid;
 238         cb.cb_zhp = NULL;
 239 
 240         if (zpool_iter(zhdl, search_pool, &cb) != 1)
 241                 return (NULL);
 242 
 243         *vdevp = cb.cb_vdev;
 244         return (cb.cb_zhp);
 245 }
 246 
 247 /*
 248  * Create a solved FMD case and add the fault to it
 249  */
 250 static void
 251 generate_fault(fmd_hdl_t *hdl, nvlist_t *vdev, char *faultname)
 252 {
 253         char *devid, *fdevid, *physpath, *s;
 254         fmd_case_t *c;
 255         fmd_hdl_topo_node_info_t *node;
 256         nvlist_t *fault = NULL;
 257         uint64_t wd;
 258 
 259         assert(hdl != NULL);
 260         assert(vdev != NULL);
 261         assert(faultname != NULL);
 262 
 263         if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH,
 264             &physpath) != 0 ||
 265             nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wd) != 0)
 266                 return;
 267 
 268         if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_DEVID,
 269             &devid) == 0) {
 270                 fdevid = strdup(devid);
 271         } else {
 272                 fdevid = devid_str_from_path(physpath);
 273         }
 274         if (fdevid == NULL) {
 275                 fmd_hdl_debug(hdl, "%s: failed to get devid", __func__);
 276                 return;
 277         }
 278 
 279         if (wd && (s = strrchr(fdevid, '/')) != NULL)
 280                 *s = '\0';
 281 
 282         c = fmd_case_open(hdl, NULL);
 283         if ((node = fmd_hdl_topo_node_get_by_devid(hdl, fdevid)) == NULL) {
 284                 fault = fmd_nvl_create_fault(hdl, faultname, 100, NULL, vdev,
 285                     NULL);
 286         } else {
 287                 fault = fmd_nvl_create_fault(hdl, faultname, 100,
 288                     node->resource, node->fru, node->resource);
 289                 nvlist_free(node->fru);
 290                 nvlist_free(node->resource);
 291                 fmd_hdl_free(hdl, node,
 292                     sizeof (fmd_hdl_topo_node_info_t));
 293         }
 294         fmd_case_add_suspect(hdl, c, fault);
 295         fmd_case_setspecific(hdl, c, fdevid);
 296         fmd_case_solve(hdl, c);
 297 
 298         devid_str_free(fdevid);
 299         fmd_hdl_debug(hdl, "%s: dispatched %s", __func__, faultname);
 300 }
 301 
 302 /*
 303  * Determine if the FRU fields for the spare and the failed device match.
 304  */
 305 static boolean_t
 306 match_fru(fmd_hdl_t *hdl, char *ffru, nvlist_t *spare)
 307 {
 308         char *sfru;
 309         boolean_t ret = B_FALSE;
 310 
 311         if (nvlist_lookup_string(spare, ZPOOL_CONFIG_FRU, &sfru) != 0) {
 312                 fmd_hdl_debug(hdl, "%s: spare FRU not set", __func__);
 313                 return (B_FALSE);
 314         }
 315 
 316         /* We match on enclosure only at the moment */
 317         ret = libzfs_fru_cmp_enclosure(ffru, sfru);
 318         if (!ret)
 319                 fmd_hdl_debug(hdl, "%s: enclosure not matched", __func__);
 320 
 321         return (ret);
 322 }
 323 
 324 static boolean_t
 325 do_replace(zpool_handle_t *zhp, const char *fpath, const char *spath,
 326     nvlist_t *spare)
 327 {
 328         nvlist_t *nvroot;
 329         boolean_t ret = B_FALSE;
 330 
 331         if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
 332                 return (B_FALSE);
 333 
 334         if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
 335             nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
 336             &spare, 1) != 0)
 337                 goto fail;
 338 
 339         ret = (zpool_vdev_attach(zhp, fpath, spath, nvroot, B_TRUE) == 0);
 340 
 341 fail:
 342         nvlist_free(nvroot);
 343         return (ret);
 344 }
 345 
 346 /*
 347  * Attempt to replace failed device with spare.
 348  *
 349  * Spare selection is done in the following order:
 350  * - If failed device has sparegroup property set, look for the spares that
 351  *   belongs to the same sparegroup. If no suitable spare is found, skip
 352  *   the spares that have sparegroup property set while doing other match types.
 353  * - If failed device has FRU set, look for the spares in the same enclosure.
 354  * - Finally, try using any available spare.
 355  *
 356  * Note that all match types do a media-type match first, so that we don't
 357  * replace HDD with SSD and vice versa.
 358  */
 359 static void
 360 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
 361 {
 362         nvlist_t *config, *nvroot, **spares;
 363         uint_t i, nspares;
 364         boolean_t uu1, uu2, log;
 365         char *devpath;
 366         char fdevpath[PATH_MAX];        /* devpath of failed device */
 367         char *ffru = NULL;              /* FRU of failed device */
 368         char fsg[MAXNAMELEN];           /* sparegroup of failed device */
 369         boolean_t use_sg = B_FALSE;     /* do sparegroup matching */
 370         boolean_t done_sg = B_FALSE;    /* done sparegroup matching */
 371         boolean_t use_fru = B_FALSE;    /* do FRU matching */
 372         boolean_t done_fru = B_FALSE;   /* done FRU matching */
 373         boolean_t fssd = B_FALSE;       /* failed device is SSD */
 374         uint64_t wd;
 375 
 376         if ((config = zpool_get_config(zhp, NULL)) == NULL ||
 377             nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0)
 378                 return;
 379 
 380         /* Check if there are any hot spares available in the pool */
 381         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
 382             &nspares) != 0) {
 383                 fmd_hdl_debug(hdl, "%s: no spares found", __func__);
 384                 return;
 385         }
 386 
 387         if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &devpath) != 0 ||
 388             nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wd) != 0 ||
 389             nvlist_lookup_boolean_value(vdev, ZPOOL_CONFIG_IS_SSD, &fssd) != 0)
 390                 return;
 391         (void) strlcpy(fdevpath, devpath, sizeof (fdevpath));
 392         if (wd)
 393                 fdevpath[strlen(fdevpath) - 2] = '\0';
 394 
 395         /* Spares can't replace log devices */
 396         (void) zpool_find_vdev(zhp, fdevpath, &uu1, &uu2, &log, NULL);
 397         if (log)
 398                 return;
 399 
 400         /* Check if we should do sparegroup matching */
 401         if (vdev_get_prop(zhp, fdevpath, VDEV_PROP_SPAREGROUP, fsg,
 402             sizeof (fsg)) == 0 && strcmp(fsg, "-") != 0)
 403                 use_sg = B_TRUE;
 404 
 405         use_fru = (fmd_prop_get_int32(hdl, "fru_compare") == FMD_B_TRUE);
 406         /* Disable FRU matching if failed device doesn't have FRU set */
 407         if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_FRU, &ffru) != 0)
 408                 use_fru = B_FALSE;
 409 
 410 again:
 411         /* Go through the spares list */
 412         for (i = 0; i < nspares; i++) {
 413                 char sdevpath[PATH_MAX];        /* devpath of spare */
 414                 char ssg[MAXNAMELEN];           /* sparegroup of spare */
 415                 boolean_t sssd = B_FALSE;       /* spare is SSD */
 416                 boolean_t ssg_set = B_FALSE;
 417 
 418                 if (nvlist_lookup_string(spares[i], ZPOOL_CONFIG_PATH,
 419                     &devpath) != 0 ||
 420                     nvlist_lookup_uint64(spares[i], ZPOOL_CONFIG_WHOLE_DISK,
 421                     &wd) != 0)
 422                         continue;
 423 
 424                 (void) strlcpy(sdevpath, devpath, sizeof (sdevpath));
 425                 if (wd)
 426                         sdevpath[strlen(sdevpath) - 2] = '\0';
 427 
 428                 /* Don't swap HDD for SSD and vice versa */
 429                 if (nvlist_lookup_boolean_value(spares[i], ZPOOL_CONFIG_IS_SSD,
 430                     &sssd) != 0 || fssd != sssd) {
 431                         continue;
 432                 }
 433 
 434                 /* Get the sparegroup property for the spare */
 435                 if (vdev_get_prop(zhp, sdevpath, VDEV_PROP_SPAREGROUP, ssg,
 436                     sizeof (ssg)) == 0 && strcmp(ssg, "-") != 0)
 437                         ssg_set = B_TRUE;
 438 
 439                 if (use_sg) {
 440                         if (!ssg_set || strcmp(fsg, ssg) != 0)
 441                                 continue;
 442                         /* Found spare in the the same group */
 443                         if (do_replace(zhp, fdevpath, sdevpath, spares[i]))
 444                                 return;
 445                         continue;
 446                 }
 447 
 448                 /*
 449                  * If we tried matching on sparegroup and have not found
 450                  * any suitable spare, skip all spares with sparegroup
 451                  * set.
 452                  */
 453                 if (done_sg && ssg_set)
 454                         continue;
 455 
 456                 if (use_fru) {
 457                         if (!match_fru(hdl, ffru, spares[i]))
 458                                 continue;
 459                         /* Found spare with matching FRU */
 460                         if (do_replace(zhp, fdevpath, sdevpath, spares[i]))
 461                                 return;
 462                         continue;
 463                 }
 464 
 465                 /*
 466                  * sparegroup and FRU matching was either not used or didn't
 467                  * find any suitable spares, use the first available one.
 468                  */
 469                 if (do_replace(zhp, fdevpath, sdevpath, spares[i])) {
 470                         /* If we tried intellegent sparing, generate fault */
 471                         if (done_sg || done_fru) {
 472                                 generate_fault(hdl, vdev,
 473                                     "fault.fs.zfs.vdev.dumb_spared");
 474                         }
 475                         return;
 476                 }
 477         }
 478 
 479         if (use_sg) {
 480                 done_sg = B_TRUE;
 481                 use_sg = B_FALSE;
 482                 goto again;
 483         } else if (use_fru) {
 484                 done_fru = B_TRUE;
 485                 use_fru = B_FALSE;
 486                 goto again;
 487         }
 488 
 489         generate_fault(hdl, vdev, "fault.fs.zfs.vdev.not_spared");
 490 }
 491 
 492 /*
 493  * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
 494  * ASRU is now usable.  ZFS has found the device to be present and
 495  * functioning.
 496  */
 497 /*ARGSUSED*/
 498 void
 499 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
 500 {
 501         zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
 502         zfs_retire_repaired_t *zrp;
 503         uint64_t pool_guid, vdev_guid;
 504         nvlist_t *asru;
 505 
 506         if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
 507             &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
 508             FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
 509                 return;
 
 554             asru, FMD_HAS_FAULT_ASRU, NULL)) {
 555                 topo_hdl_t *thp;
 556                 char *fmri = NULL;
 557                 int err;
 558 
 559                 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
 560                 if (topo_fmri_nvl2str(thp, asru, &fmri, &err) == 0)
 561                         (void) fmd_repair_asru(hdl, fmri);
 562                 fmd_hdl_topo_rele(hdl, thp);
 563 
 564                 topo_hdl_strfree(thp, fmri);
 565         }
 566         nvlist_free(asru);
 567         zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
 568         zrp->zrr_next = zdp->zrd_repaired;
 569         zrp->zrr_pool = pool_guid;
 570         zrp->zrr_vdev = vdev_guid;
 571         zdp->zrd_repaired = zrp;
 572 }
 573 
 574 static int
 575 zfs_get_vdev_state(fmd_hdl_t *hdl, libzfs_handle_t *zhdl, zpool_handle_t *zhp,
 576     uint64_t vdev_guid, nvlist_t **vdev)
 577 {
 578         nvlist_t *config, *nvroot;
 579         vdev_stat_t *vs;
 580         uint_t cnt;
 581         boolean_t missing;
 582 
 583         if (zpool_refresh_stats(zhp, &missing) != 0 ||
 584             missing != B_FALSE) {
 585                 fmd_hdl_debug(hdl, "zfs_get_vdev_state: can't refresh stats");
 586                 return (VDEV_STATE_UNKNOWN);
 587         }
 588 
 589         config = zpool_get_config(zhp, NULL);
 590         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 591             &nvroot) != 0) {
 592                 fmd_hdl_debug(hdl, "zfs_get_vdev_state: can't get vdev tree");
 593                 return (VDEV_STATE_UNKNOWN);
 594         }
 595 
 596         *vdev = find_vdev(hdl, zhdl, nvroot, NULL, NULL, vdev_guid);
 597 
 598         if (nvlist_lookup_uint64_array(*vdev, ZPOOL_CONFIG_VDEV_STATS,
 599             (uint64_t **)&vs, &cnt) != 0) {
 600                 fmd_hdl_debug(hdl, "zfs_get_vdev_state: can't get vdev stats");
 601                 return (VDEV_STATE_UNKNOWN);
 602         }
 603 
 604         return (vs->vs_state);
 605 }
 606 
 607 int
 608 zfs_retire_device(fmd_hdl_t *hdl, char *path, boolean_t retire)
 609 {
 610         di_retire_t drt = {0};
 611         int err;
 612 
 613         drt.rt_abort = (void (*)(void *, const char *, ...))fmd_hdl_abort;
 614         drt.rt_debug = (void (*)(void *, const char *, ...))fmd_hdl_debug;
 615         drt.rt_hdl = hdl;
 616 
 617         fmd_hdl_debug(hdl, "zfs_retire_device: "
 618             "attempting to %sretire %s", retire ? "" : "un", path);
 619 
 620         err = retire ?
 621             di_retire_device(path, &drt, 0) :
 622             di_unretire_device(path, &drt);
 623 
 624         if (err != 0)
 625                 fmd_hdl_debug(hdl, "zfs_retire_device: ",
 626                     "di_%sretire_device failed: %d %s",
 627                     retire ? "" : "un", err, path);
 628 
 629         return (err);
 630 }
 631 
 632 /*ARGSUSED*/
 633 static void
 634 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
 635     const char *class)
 636 {
 637         uint64_t pool_guid, vdev_guid;
 638         zpool_handle_t *zhp;
 639         nvlist_t *resource, *fault, *fru, *asru;
 640         nvlist_t **faults;
 641         uint_t f, nfaults;
 642         zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
 643         libzfs_handle_t *zhdl = zdp->zrd_hdl;
 644         boolean_t fault_device, degrade_device;
 645         boolean_t is_repair;
 646         char *scheme = NULL, *fmri = NULL, *devidstr = NULL, *path = NULL;
 647         ddi_devid_t devid;
 648         nvlist_t *vdev;
 649         char *uuid;
 650         int repair_done = 0;
 651         boolean_t retire;
 652         boolean_t is_disk;
 653         boolean_t retire_device = B_FALSE;
 654         vdev_aux_t aux;
 655         topo_hdl_t *thp = NULL;
 656         int err;
 657 
 658         /*
 659          * If this is a resource notifying us of device removal, then simply
 660          * check for an available spare and continue.
 661          */
 662         if (strcmp(class, "resource.fs.zfs.removed") == 0) {
 663                 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
 664                     &pool_guid) != 0 ||
 665                     nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
 666                     &vdev_guid) != 0)
 667                         return;
 668 
 669                 if ((zhp = find_by_guid(hdl, zhdl, pool_guid, vdev_guid,
 670                     &vdev)) == NULL)
 671                         return;
 672 
 673                 if (fmd_prop_get_int32(hdl, "spare_on_remove"))
 674                         replace_with_spare(hdl, zhp, vdev);
 675                 zpool_close(zhp);
 676                 return;
 677         }
 678 
 679         if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
 680                 return;
 681 
 682         if (strcmp(class, "resource.fs.zfs.statechange") == 0 ||
 683             strcmp(class,
 684             "resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) {
 685                 zfs_vdev_repair(hdl, nvl);
 686                 return;
 687         }
 688 
 689         zfs_retire_clear_data(hdl, zdp);
 
 693         else
 694                 is_repair = B_FALSE;
 695 
 696         /*
 697          * We subscribe to zfs faults as well as all repair events.
 698          */
 699         if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
 700             &faults, &nfaults) != 0)
 701                 return;
 702 
 703         for (f = 0; f < nfaults; f++) {
 704                 fault = faults[f];
 705 
 706                 fault_device = B_FALSE;
 707                 degrade_device = B_FALSE;
 708                 is_disk = B_FALSE;
 709 
 710                 if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
 711                     &retire) == 0 && retire == 0)
 712                         continue;
 713                 if (fmd_nvl_class_match(hdl, fault, "fault.io.disk.slow-io") &&
 714                     fmd_prop_get_int32(hdl, "slow_io_skip_retire") ==
 715                     FMD_B_TRUE) {
 716                         fmd_hdl_debug(hdl, "ignoring slow io fault");
 717                         continue;
 718                 }
 719 
 720                 if (fmd_nvl_class_match(hdl, fault,
 721                     "fault.io.disk.ssm-wearout") &&
 722                     fmd_prop_get_int32(hdl, "ssm_wearout_skip_retire") ==
 723                     FMD_B_TRUE) {
 724                         fmd_hdl_debug(hdl, "zfs-retire: ignoring SSM fault");
 725                         continue;
 726                 }
 727 
 728                 if (fmd_nvl_class_match(hdl, fault,
 729                     "fault.io.disk.ssm-wearout") &&
 730                     fmd_prop_get_int32(hdl, "ssm_wearout_skip_retire") ==
 731                     FMD_B_TRUE) {
 732                         fmd_hdl_debug(hdl, "zfs-retire: ignoring SSM fault");
 733                         continue;
 734                 }
 735 
 736                 /*
 737                  * While we subscribe to fault.fs.zfs.*, we only take action
 738                  * for faults targeting a specific vdev (open failure or SERD
 739                  * failure).  We also subscribe to fault.io.* events, so that
 740                  * faulty disks will be faulted in the ZFS configuration.
 741                  */
 742                 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
 743                         fault_device = B_TRUE;
 744                 } else if (fmd_nvl_class_match(hdl, fault,
 745                     "fault.fs.zfs.vdev.checksum")) {
 746                         degrade_device = B_TRUE;
 747                 } else if (fmd_nvl_class_match(hdl, fault,
 748                     "fault.fs.zfs.vdev.timeout")) {
 749                         fault_device = B_TRUE;
 750                 } else if (fmd_nvl_class_match(hdl, fault,
 751                     "fault.fs.zfs.device")) {
 752                         fault_device = B_FALSE;
 753                 } else if (fmd_nvl_class_match(hdl, fault, "fault.io.disk.*") ||
 754                     fmd_nvl_class_match(hdl, fault, "fault.io.scsi.*")) {
 755                         is_disk = B_TRUE;
 756                         fault_device = B_TRUE;
 757                 } else {
 758                         continue;
 759                 }
 760 
 761                 if (is_disk) {
 762                         /*
 763                          * This is a disk fault.  Lookup the FRU and ASRU,
 764                          * convert them to FMRI and devid strings, and attempt
 765                          * to find a matching vdev. If no vdev is found, the
 766                          * device might still be retired/unretired.
 767                          */
 768                         if (nvlist_lookup_nvlist(fault, FM_FAULT_FRU,
 769                             &fru) != 0 ||
 770                             nvlist_lookup_string(fru, FM_FMRI_SCHEME,
 771                             &scheme) != 0) {
 772                                 fmd_hdl_debug(hdl,
 773                                     "zfs_retire_recv: unable to get FRU");
 774                                 goto nofru;
 775                         }
 776 
 777                         if (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0) {
 778                                 fmd_hdl_debug(hdl,
 779                                     "zfs_retire_recv: not hc scheme: %s",
 780                                     scheme);
 781                                 goto nofru;
 782                         }
 783 
 784                         thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
 785                         if (topo_fmri_nvl2str(thp, fru, &fmri, &err) != 0) {
 786                                 fmd_hdl_topo_rele(hdl, thp);
 787                                 fmd_hdl_debug(hdl,
 788                                     "zfs_retire_recv: unable to get FMRI");
 789                                 goto nofru;
 790                         }
 791 
 792                         fmd_hdl_debug(hdl, "zfs_retire_recv: got FMRI %s",
 793                             fmri);
 794 
 795                 nofru:
 796                         if (nvlist_lookup_nvlist(fault, FM_FAULT_ASRU,
 797                             &asru) != 0 ||
 798                             nvlist_lookup_string(asru, FM_FMRI_SCHEME,
 799                             &scheme) != 0) {
 800                                 fmd_hdl_debug(hdl,
 801                                     "zfs_retire_recv: unable to get ASRU");
 802                                 goto nodevid;
 803                         }
 804 
 805                         if (strcmp(scheme, FM_FMRI_SCHEME_DEV) != 0) {
 806                                 fmd_hdl_debug(hdl,
 807                                     "zfs_retire_recv: not dev scheme: %s",
 808                                     scheme);
 809                                 goto nodevid;
 810                         }
 811 
 812                         if (nvlist_lookup_string(asru, FM_FMRI_DEV_ID,
 813                             &devidstr) != 0) {
 814                                 fmd_hdl_debug(hdl,
 815                                     "zfs_retire_recv: couldn't get devid");
 816                                 goto nodevid;
 817                         }
 818 
 819                         fmd_hdl_debug(hdl, "zfs_retire_recv: got devid %s",
 820                             devidstr);
 821 
 822                         if (devid_str_decode(devidstr, &devid, NULL) != 0) {
 823                                 fmd_hdl_debug(hdl,
 824                                     "zfs_retire_recv: devid_str_decode failed");
 825                                 goto nodevid;
 826                         }
 827 
 828                         if (nvlist_lookup_string(asru, FM_FMRI_DEV_PATH,
 829                             &path) != 0) {
 830                                 fmd_hdl_debug(hdl,
 831                                     "zfs_retire_recv: couldn't get path, "
 832                                     "won't be able to retire device");
 833                                 goto nodevid;
 834                         }
 835 
 836                         fmd_hdl_debug(hdl, "zfs_retire_recv: got path %s",
 837                             path);
 838 
 839                 nodevid:
 840                         zhp = find_by_anything(hdl, zhdl, fmri, devid, 0,
 841                             &vdev);
 842                         if (fmri) {
 843                                 topo_hdl_strfree(thp, fmri);
 844                                 fmd_hdl_topo_rele(hdl, thp);
 845                         }
 846                         if (devid)
 847                                 devid_free(devid);
 848 
 849                         if (zhp == NULL) {
 850                                 fmd_hdl_debug(hdl, "zfs_retire_recv: no zhp");
 851                                 if (path != NULL)
 852                                         (void) zfs_retire_device(hdl, path,
 853                                             !is_repair);
 854                                 continue;
 855                         }
 856 
 857                         (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID,
 858                             &vdev_guid);
 859 
 860                         fmd_hdl_debug(hdl, "zfs_retire_recv: found vdev GUID: %"
 861                             PRIx64, vdev_guid);
 862 
 863                         aux = VDEV_AUX_EXTERNAL;
 864                 } else {
 865                         /*
 866                          * This is a ZFS fault.  Lookup the resource, and
 867                          * attempt to find the matching vdev.
 868                          */
 869                         if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
 870                             &resource) != 0 ||
 871                             nvlist_lookup_string(resource, FM_FMRI_SCHEME,
 872                             &scheme) != 0)
 873                                 continue;
 874 
 875                         if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
 876                                 continue;
 877 
 878                         if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
 879                             &pool_guid) != 0)
 880                                 continue;
 881 
 882                         if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
 883                             &vdev_guid) != 0) {
 884                                 if (is_repair)
 885                                         vdev_guid = 0;
 886                                 else
 887                                         continue;
 888                         }
 889 
 890                         if ((zhp = find_by_guid(hdl, zhdl, pool_guid, vdev_guid,
 891                             &vdev)) == NULL)
 892                                 continue;
 893 
 894                         if (fmd_nvl_class_match(hdl, fault,
 895                             "fault.fs.zfs.vdev.open_failed"))
 896                                 aux = VDEV_AUX_OPEN_FAILED;
 897                         else
 898                                 aux = VDEV_AUX_ERR_EXCEEDED;
 899                 }
 900 
 901                 if (vdev_guid == 0) {
 902                         /*
 903                          * For pool-level repair events, clear the entire pool.
 904                          */
 905                         (void) zpool_clear(zhp, NULL, NULL);
 906                         zpool_close(zhp);
 907                         continue;
 908                 }
 909 
 910                 /*
 911                  * If this is a repair event, then mark the vdev as repaired and
 912                  * continue.
 913                  */
 914                 if (is_repair) {
 915                         if (is_disk && path != NULL &&
 916                             zfs_retire_device(hdl, path, B_FALSE) != 0)
 917                                 continue;
 918 
 919                         repair_done = 1;
 920                         (void) zpool_vdev_clear(zhp, vdev_guid);
 921                         zpool_close(zhp);
 922                         continue;
 923                 }
 924 
 925                 /*
 926                  * Actively fault the device if needed.
 927                  */
 928                 if (fault_device) {
 929                         (void) zpool_vdev_fault(zhp, vdev_guid, aux);
 930 
 931                         if (zfs_get_vdev_state(hdl, zhdl, zhp, vdev_guid, &vdev)
 932                             == VDEV_STATE_FAULTED)
 933                                 retire_device = B_TRUE;
 934                 }
 935 
 936                 if (degrade_device)
 937                         (void) zpool_vdev_degrade(zhp, vdev_guid, aux);
 938 
 939                 /*
 940                  * Attempt to substitute a hot spare.
 941                  */
 942                 replace_with_spare(hdl, zhp, vdev);
 943                 zpool_close(zhp);
 944 
 945                 if (is_disk && retire_device && path != NULL)
 946                         (void) zfs_retire_device(hdl, path, B_TRUE);
 947         }
 948 
 949         if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
 950             nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
 951                 fmd_case_uuresolved(hdl, uuid);
 952 }
 953 
 954 static const fmd_hdl_ops_t fmd_ops = {
 955         zfs_retire_recv,        /* fmdo_recv */
 956         NULL,                   /* fmdo_timeout */
 957         NULL,                   /* fmdo_close */
 958         NULL,                   /* fmdo_stats */
 959         NULL,                   /* fmdo_gc */
 960 };
 961 
 962 static const fmd_prop_t fmd_props[] = {
 963         { "spare_on_remove", FMD_TYPE_BOOL, "true" },
 964         { "slow_io_skip_retire", FMD_TYPE_BOOL, "true"},
 965         { "ssm_wearout_skip_retire", FMD_TYPE_BOOL, "true"},
 966         { "fru_compare", FMD_TYPE_BOOL, "true"},
 967         { NULL, 0, NULL }
 968 };
 969 
 970 static const fmd_hdl_info_t fmd_info = {
 971         "ZFS Retire Agent", "1.1", &fmd_ops, fmd_props
 972 };
 973 
 974 void
 975 _fmd_init(fmd_hdl_t *hdl)
 976 {
 977         zfs_retire_data_t *zdp;
 978         libzfs_handle_t *zhdl;
 979 
 980         if ((zhdl = libzfs_init()) == NULL)
 981                 return;
 982 
 983         if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
 984                 libzfs_fini(zhdl);
 985                 return;
 986         }
 987 
 988         zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
 989         zdp->zrd_hdl = zhdl;
 990 
 991         fmd_hdl_setspecific(hdl, zdp);
 |