1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012 by Delphix. All rights reserved.
  25  * Copyright 2017 Nexenta Systems, Inc.
  26  */
  27 
  28 /*
  29  * ZFS syseventd module.
  30  *
  31  * The purpose of this module is to process ZFS related events.
  32  *
  33  * EC_DEV_ADD
  34  *  ESC_DISK            Search for associated vdevs matching devid, physpath,
  35  *                      or FRU, and appropriately online or replace the device.
  36  *
  37  * EC_DEV_STATUS
  38  *  ESC_DEV_DLE         Device capacity dynamically changed.  Process the change
  39  *                      according to 'autoexpand' property.
  40  *
  41  * EC_ZFS
  42  *  ESC_ZFS_VDEV_CHECK  This event indicates that a device failed to open during
  43  *                      pool load, but the autoreplace property was set.  In
  44  *                      this case the associated FMA fault was deferred until
  45  *                      the module had a chance to process the autoreplace
  46  *                      logic.  If the device could not be replaced, then the
  47  *                      second online attempt will trigger the FMA fault that
  48  *                      was skipped earlier.
  49  *  ESC_ZFS_VDEV_ADD
  50  *  ESC_ZFS_VDEV_ATTACH
  51  *  ESC_ZFS_VDEV_CLEAR
  52  *  ESC_ZFS_VDEV_ONLINE
  53  *  ESC_ZFS_POOL_CREATE
  54  *  ESC_ZFS_POOL_IMPORT All of the above events will trigger the update of
  55  *                      FRU for all associated devices.
  56  */
  57 
  58 #include <alloca.h>
  59 #include <devid.h>
  60 #include <fcntl.h>
  61 #include <libnvpair.h>
  62 #include <libsysevent.h>
  63 #include <libzfs.h>
  64 #include <limits.h>
  65 #include <stdlib.h>
  66 #include <string.h>
  67 #include <sys/list.h>
  68 #include <sys/sunddi.h>
  69 #include <sys/fs/zfs.h>
  70 #include <sys/sysevent/eventdefs.h>
  71 #include <sys/sysevent/dev.h>
  72 #include <thread_pool.h>
  73 #include <unistd.h>
  74 #include "syseventd.h"
  75 
  76 #if defined(__i386) || defined(__amd64)
  77 #define WD_MINOR        ":q"
  78 #elif defined(__sparc)
  79 #define WD_MINOR        ":c"
  80 #else
  81 #error Unknown architecture
  82 #endif
  83 
  84 #define DEVICE_PREFIX   "/devices"
  85 
  86 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, const char *);
  87 
  88 libzfs_handle_t *g_zfshdl;
  89 list_t g_pool_list;
  90 tpool_t *g_tpool;
  91 boolean_t g_enumeration_done;
  92 thread_t g_zfs_tid;
  93 
  94 typedef struct unavailpool {
  95         zpool_handle_t  *uap_zhp;
  96         list_node_t     uap_node;
  97 } unavailpool_t;
  98 
  99 int
 100 zfs_toplevel_state(zpool_handle_t *zhp)
 101 {
 102         nvlist_t *nvroot;
 103         vdev_stat_t *vs;
 104         unsigned int c;
 105 
 106         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
 107             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
 108         verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
 109             (uint64_t **)&vs, &c) == 0);
 110         return (vs->vs_state);
 111 }
 112 
 113 static int
 114 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
 115 {
 116         if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
 117                 unavailpool_t *uap;
 118                 uap = malloc(sizeof (unavailpool_t));
 119                 uap->uap_zhp = zhp;
 120                 list_insert_tail((list_t *)data, uap);
 121         } else {
 122                 zpool_close(zhp);
 123         }
 124         return (0);
 125 }
 126 
 127 /*
 128  * The device associated with the given vdev (matched by devid, physical path,
 129  * or FRU) has been added to the system.
 130  */
 131 static void
 132 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, const char *newrawpath)
 133 {
 134         vdev_state_t newstate;
 135         nvlist_t *nvroot = NULL, *newvd = NULL;
 136         uint64_t wholedisk = 0ULL;
 137         uint64_t offline = 0ULL;
 138         boolean_t avail_spare, l2cache;
 139         const char *zc_type = ZPOOL_CONFIG_CHILDREN;
 140         char *devpath;                  /* current /dev path */
 141         char *physpath;                 /* current /devices node */
 142         char fullpath[PATH_MAX];        /* current /dev path without slice */
 143         char fullphyspath[PATH_MAX];    /* full /devices phys path */
 144         char newdevpath[PATH_MAX];      /* new /dev path */
 145         char newphyspath[PATH_MAX];     /* new /devices node */
 146         char diskname[PATH_MAX];        /* disk device without /dev and slice */
 147         const char *adevid = NULL;      /* devid to attach */
 148         const char *adevpath;           /* /dev path to attach */
 149         const char *aphyspath = NULL;   /* /devices node to attach */
 150         zpool_boot_label_t boot_type;
 151         uint64_t boot_size;
 152 
 153         if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &devpath) != 0)
 154                 return;
 155         (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
 156         (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
 157         (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
 158 
 159         /* Do nothing if vdev is explicitly marked offline */
 160         if (offline)
 161                 return;
 162 
 163         (void) strlcpy(fullpath, devpath, sizeof (fullpath));
 164         /* Chop off slice for whole disks */
 165         if (wholedisk)
 166                 fullpath[strlen(fullpath) - 2] = '\0';
 167 
 168         /*
 169          * Device could still have valid label, so first attempt to online the
 170          * device undoing any spare operation. If online succeeds and new state
 171          * is either HEALTHY or DEGRADED, we are done.
 172          */
 173         if (zpool_vdev_online(zhp, fullpath,
 174             ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
 175             (newstate == VDEV_STATE_HEALTHY || newstate == VDEV_STATE_DEGRADED))
 176                 return;
 177 
 178         /*
 179          * If the pool doesn't have the autoreplace property set or this is a
 180          * non-whole disk vdev, there's nothing else we can do so attempt a true
 181          * online (without the unspare flag), which will trigger a FMA fault.
 182          */
 183         if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) == 0 ||
 184             !wholedisk) {
 185                 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
 186                     &newstate);
 187                 return;
 188         }
 189 
 190         /*
 191          * Attempt to replace the device.
 192          *
 193          * If newrawpath is set (not NULL), then we matched by FRU and need to
 194          * use new /dev and /devices paths for attach.
 195          *
 196          * First, construct the short disk name to label, chopping off any
 197          * leading /dev path and slice (which newrawpath doesn't include).
 198          */
 199         if (newrawpath != NULL) {
 200                 (void) strlcpy(diskname, newrawpath +
 201                     strlen(ZFS_RDISK_ROOTD), sizeof (diskname));
 202         } else {
 203                 (void) strlcpy(diskname, fullpath +
 204                     strlen(ZFS_DISK_ROOTD), sizeof (diskname));
 205         }
 206 
 207         /* Write out the label */
 208         if (zpool_is_bootable(zhp))
 209                 boot_type = ZPOOL_COPY_BOOT_LABEL;
 210         else
 211                 boot_type = ZPOOL_NO_BOOT_LABEL;
 212 
 213         boot_size = zpool_get_prop_int(zhp, ZPOOL_PROP_BOOTSIZE, NULL);
 214         if (zpool_label_disk(g_zfshdl, zhp, diskname, boot_type, boot_size,
 215             NULL) != 0) {
 216                 syseventd_print(9, "%s: failed to write the label\n", __func__);
 217                 return;
 218         }
 219 
 220         /* Define "path" and "physpath" to be used for attach */
 221         if (newrawpath != NULL) {
 222                 /* Construct newdevpath from newrawpath */
 223                 (void) snprintf(newdevpath, sizeof (newdevpath), "%s%s%s",
 224                     ZFS_DISK_ROOTD, newrawpath + strlen(ZFS_RDISK_ROOTD),
 225                     (boot_size > 0) ? "s1" : "s0");
 226                 /* Use replacing vdev's "path" and "physpath" */
 227                 adevpath = newdevpath;
 228                 /* Resolve /dev path to /devices node */
 229                 aphyspath = realpath(newdevpath, newphyspath) +
 230                     strlen(DEVICE_PREFIX);
 231         } else {
 232                 /* Use original vdev's "path" and "physpath" */
 233                 adevpath = devpath;
 234                 aphyspath = physpath;
 235         }
 236 
 237         /* Construct new devid */
 238         (void) snprintf(fullphyspath, sizeof (fullphyspath), "%s%s",
 239             DEVICE_PREFIX, aphyspath);
 240         adevid = devid_str_from_path(fullphyspath);
 241 
 242         /*
 243          * Check if replaced vdev is "available" (not swapped in) spare
 244          * or l2cache device.
 245          */
 246         (void) zpool_find_vdev(zhp, fullpath, &avail_spare, &l2cache, NULL,
 247             NULL);
 248         if (avail_spare)
 249                 zc_type = ZPOOL_CONFIG_SPARES;
 250         else if (l2cache)
 251                 zc_type = ZPOOL_CONFIG_L2CACHE;
 252 
 253         /* Construct the root vdev */
 254         if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0 ||
 255             nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0)
 256                 goto fail;
 257 
 258         if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
 259             (adevid != NULL &&
 260             nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, adevid) != 0) ||
 261             nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, adevpath) != 0 ||
 262             (aphyspath != NULL &&
 263             nvlist_add_string(newvd, ZPOOL_CONFIG_PHYS_PATH, aphyspath) != 0) ||
 264             nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
 265             nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
 266             nvlist_add_nvlist_array(nvroot, zc_type, &newvd, 1) != 0)
 267                 goto fail;
 268 
 269         if (avail_spare || l2cache) {
 270                 /*
 271                  * For spares/l2cache, we need to explicitly remove the device
 272                  * and add the new one.
 273                  */
 274                 (void) zpool_vdev_remove(zhp, fullpath);
 275                 (void) zpool_add(zhp, nvroot);
 276         } else {
 277                 /* Do the replace for regular vdevs */
 278                 (void) zpool_vdev_attach(zhp, fullpath, adevpath, nvroot,
 279                     B_TRUE);
 280         }
 281 
 282 fail:
 283         if (adevid != NULL)
 284                 devid_str_free((char *)adevid);
 285         nvlist_free(newvd);
 286         nvlist_free(nvroot);
 287 }
 288 
 289 /*
 290  * Utility functions to find a vdev matching given criteria.
 291  */
 292 typedef struct dev_data {
 293         const char              *dd_compare;
 294         const char              *dd_prop;
 295         const char              *dd_devpath;
 296         zfs_process_func_t      dd_func;
 297         int                     (*dd_cmp_func)(libzfs_handle_t *, const char *,
 298                                     const char *, size_t);
 299         boolean_t               dd_found;
 300         uint64_t                dd_pool_guid;
 301         uint64_t                dd_vdev_guid;
 302 } dev_data_t;
 303 
 304 static void
 305 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
 306 {
 307         dev_data_t *dp = data;
 308         boolean_t nested = B_FALSE;
 309         char *cmp_str;
 310         nvlist_t **cnvl, **snvl, **lnvl;
 311         uint_t i, nc, ns, nl;
 312         uint64_t guid;
 313 
 314         /* Iterate over child vdevs */
 315         if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
 316             &cnvl, &nc) == 0) {
 317                 for (i = 0; i < nc; i++)
 318                         zfs_iter_vdev(zhp, cnvl[i], data);
 319                 nested = B_TRUE;
 320         }
 321         /* Iterate over spare vdevs */
 322         if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
 323             &snvl, &ns) == 0) {
 324                 for (i = 0; i < ns; i++)
 325                         zfs_iter_vdev(zhp, snvl[i], data);
 326                 nested = B_TRUE;
 327         }
 328         /* Iterate over l2cache vdevs */
 329         if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
 330             &lnvl, &nl) == 0) {
 331                 for (i = 0; i < nl; i++)
 332                         zfs_iter_vdev(zhp, lnvl[i], data);
 333                 nested = B_TRUE;
 334         }
 335 
 336         if (nested)
 337                 return;
 338 
 339         if (dp->dd_vdev_guid != 0 && (nvlist_lookup_uint64(nvl,
 340             ZPOOL_CONFIG_GUID, &guid) != 0 || guid != dp->dd_vdev_guid))
 341                         return;
 342 
 343         if (dp->dd_compare != NULL && (nvlist_lookup_string(nvl, dp->dd_prop,
 344             &cmp_str) != 0 || dp->dd_cmp_func(g_zfshdl, dp->dd_compare, cmp_str,
 345             strlen(dp->dd_compare)) != 0))
 346                         return;
 347 
 348         dp->dd_found = B_TRUE;
 349         (dp->dd_func)(zhp, nvl, dp->dd_devpath);
 350 }
 351 
 352 void
 353 zfs_enable_ds(void *arg)
 354 {
 355         unavailpool_t *pool = (unavailpool_t *)arg;
 356 
 357         (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
 358         zpool_close(pool->uap_zhp);
 359         free(pool);
 360 }
 361 
 362 static int
 363 zfs_iter_pool(zpool_handle_t *zhp, void *data)
 364 {
 365         nvlist_t *config, *nvl;
 366         dev_data_t *dp = data;
 367         uint64_t pool_guid;
 368         unavailpool_t *pool;
 369 
 370         if ((config = zpool_get_config(zhp, NULL)) != NULL) {
 371                 if (dp->dd_pool_guid == 0 ||
 372                     (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 373                     &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
 374                         (void) nvlist_lookup_nvlist(config,
 375                             ZPOOL_CONFIG_VDEV_TREE, &nvl);
 376                         zfs_iter_vdev(zhp, nvl, data);
 377                 }
 378         }
 379         if (g_enumeration_done)  {
 380                 for (pool = list_head(&g_pool_list); pool != NULL;
 381                     pool = list_next(&g_pool_list, pool)) {
 382 
 383                         if (strcmp(zpool_get_name(zhp),
 384                             zpool_get_name(pool->uap_zhp)))
 385                                 continue;
 386                         if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
 387                                 list_remove(&g_pool_list, pool);
 388                                 (void) tpool_dispatch(g_tpool, zfs_enable_ds,
 389                                     pool);
 390                                 break;
 391                         }
 392                 }
 393         }
 394 
 395         zpool_close(zhp);
 396         return (0);
 397 }
 398 
 399 /*
 400  * Wrap strncmp() to be used as comparison function for devid_iter() and
 401  * physpath_iter().
 402  */
 403 /* ARGSUSED */
 404 static int
 405 strncmp_wrap(libzfs_handle_t *hdl, const char *a, const char *b, size_t len)
 406 {
 407         return (strncmp(a, b, len));
 408 }
 409 
 410 /*
 411  * Given a physical device path, iterate over all (pool, vdev) pairs which
 412  * correspond to the given path's FRU.
 413  */
 414 static boolean_t
 415 devfru_iter(const char *devpath, const char *physpath, zfs_process_func_t func)
 416 {
 417         dev_data_t data = { 0 };
 418         const char *fru;
 419 
 420         /*
 421          * Need to refresh the fru cache otherwise we won't find the newly
 422          * inserted disk.
 423          */
 424         libzfs_fru_refresh(g_zfshdl);
 425 
 426         fru = libzfs_fru_lookup(g_zfshdl, physpath);
 427         if (fru == NULL)
 428                 return (B_FALSE);
 429 
 430         data.dd_compare = fru;
 431         data.dd_func = func;
 432         data.dd_cmp_func = libzfs_fru_cmp_slot;
 433         data.dd_prop = ZPOOL_CONFIG_FRU;
 434         data.dd_found = B_FALSE;
 435         data.dd_devpath = devpath;
 436 
 437         (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
 438 
 439         return (data.dd_found);
 440 }
 441 
 442 /*
 443  * Given a physical device path, iterate over all (pool, vdev) pairs which
 444  * correspond to the given path.
 445  */
 446 /*ARGSUSED*/
 447 static boolean_t
 448 physpath_iter(const char *devpath, const char *physpath,
 449     zfs_process_func_t func)
 450 {
 451         dev_data_t data = { 0 };
 452 
 453         data.dd_compare = physpath;
 454         data.dd_func = func;
 455         data.dd_cmp_func = strncmp_wrap;
 456         data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
 457         data.dd_found = B_FALSE;
 458         data.dd_devpath = NULL;
 459 
 460         (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
 461 
 462         return (data.dd_found);
 463 }
 464 
 465 /*
 466  * Given a devid, iterate over all (pool, vdev) pairs which correspond to the
 467  * given vdev.
 468  */
 469 /*ARGSUSED*/
 470 static boolean_t
 471 devid_iter(const char *devpath, const char *physpath, zfs_process_func_t func)
 472 {
 473         char fullphyspath[PATH_MAX];
 474         char *devidstr;
 475         char *s;
 476         dev_data_t data = { 0 };
 477 
 478         /* Try to open a known minor node */
 479         (void) snprintf(fullphyspath, sizeof (fullphyspath), "%s%s%s",
 480             DEVICE_PREFIX, physpath, WD_MINOR);
 481 
 482         devidstr = devid_str_from_path(fullphyspath);
 483         if (devidstr == NULL)
 484                 return (B_FALSE);
 485         /* Chop off the minor node */
 486         if ((s = strrchr(devidstr, '/')) != NULL)
 487                 *(s + 1) = '\0';
 488 
 489         data.dd_compare = devidstr;
 490         data.dd_func = func;
 491         data.dd_cmp_func = strncmp_wrap;
 492         data.dd_prop = ZPOOL_CONFIG_DEVID;
 493         data.dd_found = B_FALSE;
 494         data.dd_devpath = NULL;
 495 
 496         (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
 497 
 498         devid_str_free(devidstr);
 499 
 500         return (data.dd_found);
 501 }
 502 
 503 /*
 504  * This function is called when we receive a devfs add event.
 505  */
 506 static int
 507 zfs_deliver_add(nvlist_t *nvl)
 508 {
 509         char *devpath, *physpath;
 510 
 511         if (nvlist_lookup_string(nvl, DEV_NAME, &devpath) != 0 ||
 512             nvlist_lookup_string(nvl, DEV_PHYS_PATH, &physpath) != 0)
 513                 return (-1);
 514 
 515         /*
 516          * Iterate over all vdevs with a matching devid, then those with a
 517          * matching /devices path, and finally those with a matching FRU slot
 518          * number, only paying attention to vdevs marked as whole disks.
 519          */
 520         if (!devid_iter(devpath, physpath, zfs_process_add) &&
 521             !physpath_iter(devpath, physpath, zfs_process_add) &&
 522             !devfru_iter(devpath, physpath, zfs_process_add)) {
 523                 syseventd_print(9, "%s: match failed devpath=%s physpath=%s\n",
 524                     __func__, devpath, physpath);
 525         }
 526 
 527         return (0);
 528 }
 529 
 530 /*
 531  * Called when we receive a VDEV_CHECK event, which indicates a device could not
 532  * be opened during initial pool open, but the autoreplace property was set on
 533  * the pool.  In this case, we treat it as if it were an add event.
 534  */
 535 static int
 536 zfs_deliver_check(nvlist_t *nvl)
 537 {
 538         dev_data_t data = { 0 };
 539 
 540         if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
 541             &data.dd_pool_guid) != 0 ||
 542             nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
 543             &data.dd_vdev_guid) != 0 ||
 544             data.dd_vdev_guid == 0)
 545                 return (0);
 546 
 547         data.dd_func = zfs_process_add;
 548 
 549         (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
 550 
 551         return (0);
 552 }
 553 
 554 static int
 555 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
 556 {
 557         char *devname = data;
 558         boolean_t avail_spare, l2cache;
 559         vdev_state_t newstate;
 560         nvlist_t *tgt;
 561 
 562         syseventd_print(9, "%s: searching for %s in pool %s\n", __func__,
 563             devname, zpool_get_name(zhp));
 564 
 565         if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
 566             &avail_spare, &l2cache, NULL)) != NULL) {
 567                 char *path, fullpath[MAXPATHLEN];
 568                 uint64_t wholedisk = 0ULL;
 569 
 570                 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
 571                     &path) == 0);
 572                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
 573                     &wholedisk) == 0);
 574 
 575                 (void) strlcpy(fullpath, path, sizeof (fullpath));
 576                 if (wholedisk) {
 577                         fullpath[strlen(fullpath) - 2] = '\0';
 578 
 579                         /*
 580                          * We need to reopen the pool associated with this
 581                          * device so that the kernel can update the size
 582                          * of the expanded device.
 583                          */
 584                         (void) zpool_reopen(zhp);
 585                 }
 586 
 587                 if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
 588                         syseventd_print(9, "%s: setting device '%s' to ONLINE "
 589                             "state in pool %s\n", __func__, fullpath,
 590                             zpool_get_name(zhp));
 591                         if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
 592                                 (void) zpool_vdev_online(zhp, fullpath, 0,
 593                                     &newstate);
 594                 }
 595                 zpool_close(zhp);
 596                 return (1);
 597         }
 598         zpool_close(zhp);
 599         return (0);
 600 }
 601 
 602 /*
 603  * This function is called for each vdev of a pool for which any of the
 604  * following events was received:
 605  *  - ESC_ZFS_vdev_add
 606  *  - ESC_ZFS_vdev_attach
 607  *  - ESC_ZFS_vdev_clear
 608  *  - ESC_ZFS_vdev_online
 609  *  - ESC_ZFS_pool_create
 610  *  - ESC_ZFS_pool_import
 611  * It will update the vdevs FRU property if it is out of date.
 612  */
 613 /*ARGSUSED*/
 614 static void
 615 zfs_update_vdev_fru(zpool_handle_t *zhp, nvlist_t *vdev, const char *devpath)
 616 {
 617         char *physpath, *cptr, *oldfru = NULL;
 618         const char *newfru;
 619         uint64_t vdev_guid;
 620 
 621         (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &vdev_guid);
 622         (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
 623         (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_FRU, &oldfru);
 624 
 625         /* Remove :<slice> from physpath */
 626         cptr = strrchr(physpath, ':');
 627         if (cptr != NULL)
 628                 *cptr = '\0';
 629 
 630         newfru = libzfs_fru_lookup(g_zfshdl, physpath);
 631         if (newfru == NULL) {
 632                 syseventd_print(9, "%s: physpath=%s newFRU=<none>\n", __func__,
 633                     physpath);
 634                 return;
 635         }
 636 
 637         /* Do nothing if the FRU hasn't changed */
 638         if (oldfru != NULL && libzfs_fru_compare(g_zfshdl, oldfru, newfru)) {
 639                 syseventd_print(9, "%s: physpath=%s newFRU=<unchanged>\n",
 640                     __func__, physpath);
 641                 return;
 642         }
 643 
 644         syseventd_print(9, "%s: physpath=%s newFRU=%s\n", __func__, physpath,
 645             newfru);
 646 
 647         (void) zpool_fru_set(zhp, vdev_guid, newfru);
 648 }
 649 
 650 /*
 651  * This function handles the following events:
 652  *  - ESC_ZFS_vdev_add
 653  *  - ESC_ZFS_vdev_attach
 654  *  - ESC_ZFS_vdev_clear
 655  *  - ESC_ZFS_vdev_online
 656  *  - ESC_ZFS_pool_create
 657  *  - ESC_ZFS_pool_import
 658  * It will iterate over the pool vdevs to update the FRU property.
 659  */
 660 int
 661 zfs_deliver_update(nvlist_t *nvl)
 662 {
 663         dev_data_t dd = { 0 };
 664         char *pname;
 665         zpool_handle_t *zhp;
 666         nvlist_t *config, *vdev;
 667 
 668         if (nvlist_lookup_string(nvl, "pool_name", &pname) != 0) {
 669                 syseventd_print(9, "%s: no pool name\n", __func__);
 670                 return (-1);
 671         }
 672 
 673         /*
 674          * If this event was triggered by a pool export or destroy we cannot
 675          * open the pool. This is not an error, just return 0 as we don't care
 676          * about these events.
 677          */
 678         zhp = zpool_open_canfail(g_zfshdl, pname);
 679         if (zhp == NULL)
 680                 return (0);
 681 
 682         config = zpool_get_config(zhp, NULL);
 683         if (config == NULL) {
 684                 syseventd_print(9, "%s: failed to get pool config for %s\n",
 685                     __func__, pname);
 686                 zpool_close(zhp);
 687                 return (-1);
 688         }
 689 
 690         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev) != 0) {
 691                 syseventd_print(0, "%s: failed to get vdev tree for %s\n",
 692                     __func__, pname);
 693                 zpool_close(zhp);
 694                 return (-1);
 695         }
 696 
 697         libzfs_fru_refresh(g_zfshdl);
 698 
 699         dd.dd_func = zfs_update_vdev_fru;
 700         zfs_iter_vdev(zhp, vdev, &dd);
 701 
 702         zpool_close(zhp);
 703         return (0);
 704 }
 705 
 706 int
 707 zfs_deliver_dle(nvlist_t *nvl)
 708 {
 709         char *physpath;
 710 
 711         if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &physpath) != 0) {
 712                 syseventd_print(9, "%s: no physpath\n", __func__);
 713                 return (-1);
 714         }
 715         if (strncmp(physpath, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
 716                 syseventd_print(9, "%s: invalid device '%s'", __func__,
 717                     physpath);
 718                 return (-1);
 719         }
 720 
 721         /*
 722          * We try to find the device using the physical
 723          * path that has been supplied. We need to strip off
 724          * the /devices prefix before starting our search.
 725          */
 726         physpath += strlen(DEVICE_PREFIX);
 727         if (zpool_iter(g_zfshdl, zfsdle_vdev_online, physpath) != 1) {
 728                 syseventd_print(9, "%s: device '%s' not  found\n",
 729                     __func__, physpath);
 730                 return (1);
 731         }
 732         return (0);
 733 }
 734 
 735 
 736 /*ARGSUSED*/
 737 static int
 738 zfs_deliver_event(sysevent_t *ev, int unused)
 739 {
 740         const char *class = sysevent_get_class_name(ev);
 741         const char *subclass = sysevent_get_subclass_name(ev);
 742         nvlist_t *nvl;
 743         int ret;
 744         boolean_t is_check = B_FALSE;
 745         boolean_t is_dle = B_FALSE;
 746         boolean_t is_update = B_FALSE;
 747 
 748         if (strcmp(class, EC_DEV_ADD) == 0) {
 749                 /* We're only interested in disk additions */
 750                 if (strcmp(subclass, ESC_DISK) != 0)
 751                         return (0);
 752         } else if (strcmp(class, EC_ZFS) == 0) {
 753                 if (strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
 754                         /*
 755                          * This event signifies that a device failed to open
 756                          * during pool load, but the 'autoreplace' property was
 757                          * set, so we should pretend it's just been added.
 758                          */
 759                         is_check = B_TRUE;
 760                 } else if ((strcmp(subclass, ESC_ZFS_VDEV_ADD) == 0) ||
 761                     (strcmp(subclass, ESC_ZFS_VDEV_ATTACH) == 0) ||
 762                     (strcmp(subclass, ESC_ZFS_VDEV_CLEAR) == 0) ||
 763                     (strcmp(subclass, ESC_ZFS_VDEV_ONLINE) == 0) ||
 764                     (strcmp(subclass, ESC_ZFS_POOL_CREATE) == 0) ||
 765                     (strcmp(subclass, ESC_ZFS_POOL_IMPORT) == 0)) {
 766                         /*
 767                          * When we receive these events we check the pool
 768                          * configuration and update the vdev FRUs if necessary.
 769                          */
 770                         is_update = B_TRUE;
 771                 }
 772         } else if (strcmp(class, EC_DEV_STATUS) == 0 &&
 773             strcmp(subclass, ESC_DEV_DLE) == 0) {
 774                 is_dle = B_TRUE;
 775         } else {
 776                 return (0);
 777         }
 778 
 779         if (sysevent_get_attr_list(ev, &nvl) != 0)
 780                 return (-1);
 781 
 782         if (is_dle)
 783                 ret = zfs_deliver_dle(nvl);
 784         else if (is_update)
 785                 ret = zfs_deliver_update(nvl);
 786         else if (is_check)
 787                 ret = zfs_deliver_check(nvl);
 788         else
 789                 ret = zfs_deliver_add(nvl);
 790 
 791         nvlist_free(nvl);
 792         return (ret);
 793 }
 794 
 795 /*ARGSUSED*/
 796 void *
 797 zfs_enum_pools(void *arg)
 798 {
 799         (void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
 800         if (!list_is_empty(&g_pool_list))
 801                 g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
 802                     0, NULL);
 803         g_enumeration_done = B_TRUE;
 804         return (NULL);
 805 }
 806 
 807 static struct slm_mod_ops zfs_mod_ops = {
 808         SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
 809 };
 810 
 811 struct slm_mod_ops *
 812 slm_init()
 813 {
 814         if ((g_zfshdl = libzfs_init()) == NULL)
 815                 return (NULL);
 816         /*
 817          * collect a list of unavailable pools (asynchronously,
 818          * since this can take a while)
 819          */
 820         list_create(&g_pool_list, sizeof (struct unavailpool),
 821             offsetof(struct unavailpool, uap_node));
 822         if (thr_create(NULL, 0, zfs_enum_pools, NULL, 0, &g_zfs_tid) != 0)
 823                 return (NULL);
 824         return (&zfs_mod_ops);
 825 }
 826 
 827 void
 828 slm_fini()
 829 {
 830         unavailpool_t *pool;
 831 
 832         (void) thr_join(g_zfs_tid, NULL, NULL);
 833         if (g_tpool != NULL) {
 834                 tpool_wait(g_tpool);
 835                 tpool_destroy(g_tpool);
 836         }
 837         while ((pool = (list_head(&g_pool_list))) != NULL) {
 838                 list_remove(&g_pool_list, pool);
 839                 zpool_close(pool->uap_zhp);
 840                 free(pool);
 841         }
 842         list_destroy(&g_pool_list);
 843         libzfs_fini(g_zfshdl);
 844 }