1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
  24  * Copyright (c) 2011 by Delphix. All rights reserved.
  25  */
  26 
  27 /*
  28  * Pool import support functions.
  29  *
  30  * To import a pool, we rely on reading the configuration information from the
  31  * ZFS label of each device.  If we successfully read the label, then we
  32  * organize the configuration information in the following hierarchy:
  33  *
  34  *      pool guid -> toplevel vdev guid -> label txg
  35  *
  36  * Duplicate entries matching this same tuple will be discarded.  Once we have
  37  * examined every device, we pick the best label txg config for each toplevel
  38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
  39  * update any paths that have changed.  Finally, we attempt to import the pool
  40  * using our derived config, and record the results.
  41  */
  42 
  43 #include <ctype.h>
  44 #include <devid.h>
  45 #include <dirent.h>
  46 #include <errno.h>
  47 #include <libintl.h>
  48 #include <stddef.h>
  49 #include <stdlib.h>
  50 #include <string.h>
  51 #include <sys/stat.h>
  52 #include <unistd.h>
  53 #include <fcntl.h>
  54 #include <sys/vtoc.h>
  55 #include <sys/dktp/fdisk.h>
  56 #include <sys/efi_partition.h>
  57 #include <thread_pool.h>
  58 
  59 #include <sys/vdev_impl.h>
  60 
  61 #include "libzfs.h"
  62 #include "libzfs_impl.h"
  63 
  64 /*
  65  * Intermediate structures used to gather configuration information.
  66  */
  67 typedef struct config_entry {
  68         uint64_t                ce_txg;
  69         nvlist_t                *ce_config;
  70         struct config_entry     *ce_next;
  71 } config_entry_t;
  72 
  73 typedef struct vdev_entry {
  74         uint64_t                ve_guid;
  75         config_entry_t          *ve_configs;
  76         struct vdev_entry       *ve_next;
  77 } vdev_entry_t;
  78 
  79 typedef struct pool_entry {
  80         uint64_t                pe_guid;
  81         vdev_entry_t            *pe_vdevs;
  82         struct pool_entry       *pe_next;
  83 } pool_entry_t;
  84 
  85 typedef struct name_entry {
  86         char                    *ne_name;
  87         uint64_t                ne_guid;
  88         struct name_entry       *ne_next;
  89 } name_entry_t;
  90 
  91 typedef struct pool_list {
  92         pool_entry_t            *pools;
  93         name_entry_t            *names;
  94 } pool_list_t;
  95 
  96 static char *
  97 get_devid(const char *path)
  98 {
  99         int fd;
 100         ddi_devid_t devid;
 101         char *minor, *ret;
 102 
 103         if ((fd = open(path, O_RDONLY)) < 0)
 104                 return (NULL);
 105 
 106         minor = NULL;
 107         ret = NULL;
 108         if (devid_get(fd, &devid) == 0) {
 109                 if (devid_get_minor_name(fd, &minor) == 0)
 110                         ret = devid_str_encode(devid, minor);
 111                 if (minor != NULL)
 112                         devid_str_free(minor);
 113                 devid_free(devid);
 114         }
 115         (void) close(fd);
 116 
 117         return (ret);
 118 }
 119 
 120 
 121 /*
 122  * Go through and fix up any path and/or devid information for the given vdev
 123  * configuration.
 124  */
 125 static int
 126 fix_paths(nvlist_t *nv, name_entry_t *names)
 127 {
 128         nvlist_t **child;
 129         uint_t c, children;
 130         uint64_t guid;
 131         name_entry_t *ne, *best;
 132         char *path, *devid;
 133         int matched;
 134 
 135         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 136             &child, &children) == 0) {
 137                 for (c = 0; c < children; c++)
 138                         if (fix_paths(child[c], names) != 0)
 139                                 return (-1);
 140                 return (0);
 141         }
 142 
 143         /*
 144          * This is a leaf (file or disk) vdev.  In either case, go through
 145          * the name list and see if we find a matching guid.  If so, replace
 146          * the path and see if we can calculate a new devid.
 147          *
 148          * There may be multiple names associated with a particular guid, in
 149          * which case we have overlapping slices or multiple paths to the same
 150          * disk.  If this is the case, then we want to pick the path that is
 151          * the most similar to the original, where "most similar" is the number
 152          * of matching characters starting from the end of the path.  This will
 153          * preserve slice numbers even if the disks have been reorganized, and
 154          * will also catch preferred disk names if multiple paths exist.
 155          */
 156         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
 157         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
 158                 path = NULL;
 159 
 160         matched = 0;
 161         best = NULL;
 162         for (ne = names; ne != NULL; ne = ne->ne_next) {
 163                 if (ne->ne_guid == guid) {
 164                         const char *src, *dst;
 165                         int count;
 166 
 167                         if (path == NULL) {
 168                                 best = ne;
 169                                 break;
 170                         }
 171 
 172                         src = ne->ne_name + strlen(ne->ne_name) - 1;
 173                         dst = path + strlen(path) - 1;
 174                         for (count = 0; src >= ne->ne_name && dst >= path;
 175                             src--, dst--, count++)
 176                                 if (*src != *dst)
 177                                         break;
 178 
 179                         /*
 180                          * At this point, 'count' is the number of characters
 181                          * matched from the end.
 182                          */
 183                         if (count > matched || best == NULL) {
 184                                 best = ne;
 185                                 matched = count;
 186                         }
 187                 }
 188         }
 189 
 190         if (best == NULL)
 191                 return (0);
 192 
 193         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
 194                 return (-1);
 195 
 196         if ((devid = get_devid(best->ne_name)) == NULL) {
 197                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
 198         } else {
 199                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
 200                         return (-1);
 201                 devid_str_free(devid);
 202         }
 203 
 204         return (0);
 205 }
 206 
 207 /*
 208  * Add the given configuration to the list of known devices.
 209  */
 210 static int
 211 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
 212     nvlist_t *config)
 213 {
 214         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
 215         pool_entry_t *pe;
 216         vdev_entry_t *ve;
 217         config_entry_t *ce;
 218         name_entry_t *ne;
 219 
 220         /*
 221          * If this is a hot spare not currently in use or level 2 cache
 222          * device, add it to the list of names to translate, but don't do
 223          * anything else.
 224          */
 225         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
 226             &state) == 0 &&
 227             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
 228             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
 229                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 230                         return (-1);
 231 
 232                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 233                         free(ne);
 234                         return (-1);
 235                 }
 236                 ne->ne_guid = vdev_guid;
 237                 ne->ne_next = pl->names;
 238                 pl->names = ne;
 239                 return (0);
 240         }
 241 
 242         /*
 243          * If we have a valid config but cannot read any of these fields, then
 244          * it means we have a half-initialized label.  In vdev_label_init()
 245          * we write a label with txg == 0 so that we can identify the device
 246          * in case the user refers to the same disk later on.  If we fail to
 247          * create the pool, we'll be left with a label in this state
 248          * which should not be considered part of a valid pool.
 249          */
 250         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 251             &pool_guid) != 0 ||
 252             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
 253             &vdev_guid) != 0 ||
 254             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
 255             &top_guid) != 0 ||
 256             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
 257             &txg) != 0 || txg == 0) {
 258                 nvlist_free(config);
 259                 return (0);
 260         }
 261 
 262         /*
 263          * First, see if we know about this pool.  If not, then add it to the
 264          * list of known pools.
 265          */
 266         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 267                 if (pe->pe_guid == pool_guid)
 268                         break;
 269         }
 270 
 271         if (pe == NULL) {
 272                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
 273                         nvlist_free(config);
 274                         return (-1);
 275                 }
 276                 pe->pe_guid = pool_guid;
 277                 pe->pe_next = pl->pools;
 278                 pl->pools = pe;
 279         }
 280 
 281         /*
 282          * Second, see if we know about this toplevel vdev.  Add it if its
 283          * missing.
 284          */
 285         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 286                 if (ve->ve_guid == top_guid)
 287                         break;
 288         }
 289 
 290         if (ve == NULL) {
 291                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
 292                         nvlist_free(config);
 293                         return (-1);
 294                 }
 295                 ve->ve_guid = top_guid;
 296                 ve->ve_next = pe->pe_vdevs;
 297                 pe->pe_vdevs = ve;
 298         }
 299 
 300         /*
 301          * Third, see if we have a config with a matching transaction group.  If
 302          * so, then we do nothing.  Otherwise, add it to the list of known
 303          * configs.
 304          */
 305         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
 306                 if (ce->ce_txg == txg)
 307                         break;
 308         }
 309 
 310         if (ce == NULL) {
 311                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
 312                         nvlist_free(config);
 313                         return (-1);
 314                 }
 315                 ce->ce_txg = txg;
 316                 ce->ce_config = config;
 317                 ce->ce_next = ve->ve_configs;
 318                 ve->ve_configs = ce;
 319         } else {
 320                 nvlist_free(config);
 321         }
 322 
 323         /*
 324          * At this point we've successfully added our config to the list of
 325          * known configs.  The last thing to do is add the vdev guid -> path
 326          * mappings so that we can fix up the configuration as necessary before
 327          * doing the import.
 328          */
 329         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 330                 return (-1);
 331 
 332         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 333                 free(ne);
 334                 return (-1);
 335         }
 336 
 337         ne->ne_guid = vdev_guid;
 338         ne->ne_next = pl->names;
 339         pl->names = ne;
 340 
 341         return (0);
 342 }
 343 
 344 /*
 345  * Returns true if the named pool matches the given GUID.
 346  */
 347 static int
 348 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
 349     boolean_t *isactive)
 350 {
 351         zpool_handle_t *zhp;
 352         uint64_t theguid;
 353 
 354         if (zpool_open_silent(hdl, name, &zhp) != 0)
 355                 return (-1);
 356 
 357         if (zhp == NULL) {
 358                 *isactive = B_FALSE;
 359                 return (0);
 360         }
 361 
 362         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
 363             &theguid) == 0);
 364 
 365         zpool_close(zhp);
 366 
 367         *isactive = (theguid == guid);
 368         return (0);
 369 }
 370 
 371 static nvlist_t *
 372 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 373 {
 374         nvlist_t *nvl;
 375         zfs_cmd_t zc = { 0 };
 376         int err;
 377 
 378         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
 379                 return (NULL);
 380 
 381         if (zcmd_alloc_dst_nvlist(hdl, &zc,
 382             zc.zc_nvlist_conf_size * 2) != 0) {
 383                 zcmd_free_nvlists(&zc);
 384                 return (NULL);
 385         }
 386 
 387         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
 388             &zc)) != 0 && errno == ENOMEM) {
 389                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
 390                         zcmd_free_nvlists(&zc);
 391                         return (NULL);
 392                 }
 393         }
 394 
 395         if (err) {
 396                 zcmd_free_nvlists(&zc);
 397                 return (NULL);
 398         }
 399 
 400         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
 401                 zcmd_free_nvlists(&zc);
 402                 return (NULL);
 403         }
 404 
 405         zcmd_free_nvlists(&zc);
 406         return (nvl);
 407 }
 408 
 409 /*
 410  * Determine if the vdev id is a hole in the namespace.
 411  */
 412 boolean_t
 413 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
 414 {
 415         for (int c = 0; c < holes; c++) {
 416 
 417                 /* Top-level is a hole */
 418                 if (hole_array[c] == id)
 419                         return (B_TRUE);
 420         }
 421         return (B_FALSE);
 422 }
 423 
 424 /*
 425  * Convert our list of pools into the definitive set of configurations.  We
 426  * start by picking the best config for each toplevel vdev.  Once that's done,
 427  * we assemble the toplevel vdevs into a full config for the pool.  We make a
 428  * pass to fix up any incorrect paths, and then add it to the main list to
 429  * return to the user.
 430  */
 431 static nvlist_t *
 432 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
 433 {
 434         pool_entry_t *pe;
 435         vdev_entry_t *ve;
 436         config_entry_t *ce;
 437         nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
 438         nvlist_t **spares, **l2cache;
 439         uint_t i, nspares, nl2cache;
 440         boolean_t config_seen;
 441         uint64_t best_txg;
 442         char *name, *hostname, *comment;
 443         uint64_t version, guid;
 444         uint_t children = 0;
 445         nvlist_t **child = NULL;
 446         uint_t holes;
 447         uint64_t *hole_array, max_id;
 448         uint_t c;
 449         boolean_t isactive;
 450         uint64_t hostid;
 451         nvlist_t *nvl;
 452         boolean_t found_one = B_FALSE;
 453         boolean_t valid_top_config = B_FALSE;
 454 
 455         if (nvlist_alloc(&ret, 0, 0) != 0)
 456                 goto nomem;
 457 
 458         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 459                 uint64_t id, max_txg = 0;
 460 
 461                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
 462                         goto nomem;
 463                 config_seen = B_FALSE;
 464 
 465                 /*
 466                  * Iterate over all toplevel vdevs.  Grab the pool configuration
 467                  * from the first one we find, and then go through the rest and
 468                  * add them as necessary to the 'vdevs' member of the config.
 469                  */
 470                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 471 
 472                         /*
 473                          * Determine the best configuration for this vdev by
 474                          * selecting the config with the latest transaction
 475                          * group.
 476                          */
 477                         best_txg = 0;
 478                         for (ce = ve->ve_configs; ce != NULL;
 479                             ce = ce->ce_next) {
 480 
 481                                 if (ce->ce_txg > best_txg) {
 482                                         tmp = ce->ce_config;
 483                                         best_txg = ce->ce_txg;
 484                                 }
 485                         }
 486 
 487                         /*
 488                          * We rely on the fact that the max txg for the
 489                          * pool will contain the most up-to-date information
 490                          * about the valid top-levels in the vdev namespace.
 491                          */
 492                         if (best_txg > max_txg) {
 493                                 (void) nvlist_remove(config,
 494                                     ZPOOL_CONFIG_VDEV_CHILDREN,
 495                                     DATA_TYPE_UINT64);
 496                                 (void) nvlist_remove(config,
 497                                     ZPOOL_CONFIG_HOLE_ARRAY,
 498                                     DATA_TYPE_UINT64_ARRAY);
 499 
 500                                 max_txg = best_txg;
 501                                 hole_array = NULL;
 502                                 holes = 0;
 503                                 max_id = 0;
 504                                 valid_top_config = B_FALSE;
 505 
 506                                 if (nvlist_lookup_uint64(tmp,
 507                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
 508                                         verify(nvlist_add_uint64(config,
 509                                             ZPOOL_CONFIG_VDEV_CHILDREN,
 510                                             max_id) == 0);
 511                                         valid_top_config = B_TRUE;
 512                                 }
 513 
 514                                 if (nvlist_lookup_uint64_array(tmp,
 515                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
 516                                     &holes) == 0) {
 517                                         verify(nvlist_add_uint64_array(config,
 518                                             ZPOOL_CONFIG_HOLE_ARRAY,
 519                                             hole_array, holes) == 0);
 520                                 }
 521                         }
 522 
 523                         if (!config_seen) {
 524                                 /*
 525                                  * Copy the relevant pieces of data to the pool
 526                                  * configuration:
 527                                  *
 528                                  *      version
 529                                  *      pool guid
 530                                  *      name
 531                                  *      comment (if available)
 532                                  *      pool state
 533                                  *      hostid (if available)
 534                                  *      hostname (if available)
 535                                  */
 536                                 uint64_t state;
 537 
 538                                 verify(nvlist_lookup_uint64(tmp,
 539                                     ZPOOL_CONFIG_VERSION, &version) == 0);
 540                                 if (nvlist_add_uint64(config,
 541                                     ZPOOL_CONFIG_VERSION, version) != 0)
 542                                         goto nomem;
 543                                 verify(nvlist_lookup_uint64(tmp,
 544                                     ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
 545                                 if (nvlist_add_uint64(config,
 546                                     ZPOOL_CONFIG_POOL_GUID, guid) != 0)
 547                                         goto nomem;
 548                                 verify(nvlist_lookup_string(tmp,
 549                                     ZPOOL_CONFIG_POOL_NAME, &name) == 0);
 550                                 if (nvlist_add_string(config,
 551                                     ZPOOL_CONFIG_POOL_NAME, name) != 0)
 552                                         goto nomem;
 553 
 554                                 /*
 555                                  * COMMENT is optional, don't bail if it's not
 556                                  * there, instead, set it to NULL.
 557                                  */
 558                                 if (nvlist_lookup_string(tmp,
 559                                     ZPOOL_CONFIG_COMMENT, &comment) != 0)
 560                                         comment = NULL;
 561                                 else if (nvlist_add_string(config,
 562                                     ZPOOL_CONFIG_COMMENT, comment) != 0)
 563                                         goto nomem;
 564 
 565                                 verify(nvlist_lookup_uint64(tmp,
 566                                     ZPOOL_CONFIG_POOL_STATE, &state) == 0);
 567                                 if (nvlist_add_uint64(config,
 568                                     ZPOOL_CONFIG_POOL_STATE, state) != 0)
 569                                         goto nomem;
 570 
 571                                 hostid = 0;
 572                                 if (nvlist_lookup_uint64(tmp,
 573                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
 574                                         if (nvlist_add_uint64(config,
 575                                             ZPOOL_CONFIG_HOSTID, hostid) != 0)
 576                                                 goto nomem;
 577                                         verify(nvlist_lookup_string(tmp,
 578                                             ZPOOL_CONFIG_HOSTNAME,
 579                                             &hostname) == 0);
 580                                         if (nvlist_add_string(config,
 581                                             ZPOOL_CONFIG_HOSTNAME,
 582                                             hostname) != 0)
 583                                                 goto nomem;
 584                                 }
 585 
 586                                 config_seen = B_TRUE;
 587                         }
 588 
 589                         /*
 590                          * Add this top-level vdev to the child array.
 591                          */
 592                         verify(nvlist_lookup_nvlist(tmp,
 593                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
 594                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
 595                             &id) == 0);
 596 
 597                         if (id >= children) {
 598                                 nvlist_t **newchild;
 599 
 600                                 newchild = zfs_alloc(hdl, (id + 1) *
 601                                     sizeof (nvlist_t *));
 602                                 if (newchild == NULL)
 603                                         goto nomem;
 604 
 605                                 for (c = 0; c < children; c++)
 606                                         newchild[c] = child[c];
 607 
 608                                 free(child);
 609                                 child = newchild;
 610                                 children = id + 1;
 611                         }
 612                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
 613                                 goto nomem;
 614 
 615                 }
 616 
 617                 /*
 618                  * If we have information about all the top-levels then
 619                  * clean up the nvlist which we've constructed. This
 620                  * means removing any extraneous devices that are
 621                  * beyond the valid range or adding devices to the end
 622                  * of our array which appear to be missing.
 623                  */
 624                 if (valid_top_config) {
 625                         if (max_id < children) {
 626                                 for (c = max_id; c < children; c++)
 627                                         nvlist_free(child[c]);
 628                                 children = max_id;
 629                         } else if (max_id > children) {
 630                                 nvlist_t **newchild;
 631 
 632                                 newchild = zfs_alloc(hdl, (max_id) *
 633                                     sizeof (nvlist_t *));
 634                                 if (newchild == NULL)
 635                                         goto nomem;
 636 
 637                                 for (c = 0; c < children; c++)
 638                                         newchild[c] = child[c];
 639 
 640                                 free(child);
 641                                 child = newchild;
 642                                 children = max_id;
 643                         }
 644                 }
 645 
 646                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 647                     &guid) == 0);
 648 
 649                 /*
 650                  * The vdev namespace may contain holes as a result of
 651                  * device removal. We must add them back into the vdev
 652                  * tree before we process any missing devices.
 653                  */
 654                 if (holes > 0) {
 655                         ASSERT(valid_top_config);
 656 
 657                         for (c = 0; c < children; c++) {
 658                                 nvlist_t *holey;
 659 
 660                                 if (child[c] != NULL ||
 661                                     !vdev_is_hole(hole_array, holes, c))
 662                                         continue;
 663 
 664                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
 665                                     0) != 0)
 666                                         goto nomem;
 667 
 668                                 /*
 669                                  * Holes in the namespace are treated as
 670                                  * "hole" top-level vdevs and have a
 671                                  * special flag set on them.
 672                                  */
 673                                 if (nvlist_add_string(holey,
 674                                     ZPOOL_CONFIG_TYPE,
 675                                     VDEV_TYPE_HOLE) != 0 ||
 676                                     nvlist_add_uint64(holey,
 677                                     ZPOOL_CONFIG_ID, c) != 0 ||
 678                                     nvlist_add_uint64(holey,
 679                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
 680                                         goto nomem;
 681                                 child[c] = holey;
 682                         }
 683                 }
 684 
 685                 /*
 686                  * Look for any missing top-level vdevs.  If this is the case,
 687                  * create a faked up 'missing' vdev as a placeholder.  We cannot
 688                  * simply compress the child array, because the kernel performs
 689                  * certain checks to make sure the vdev IDs match their location
 690                  * in the configuration.
 691                  */
 692                 for (c = 0; c < children; c++) {
 693                         if (child[c] == NULL) {
 694                                 nvlist_t *missing;
 695                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
 696                                     0) != 0)
 697                                         goto nomem;
 698                                 if (nvlist_add_string(missing,
 699                                     ZPOOL_CONFIG_TYPE,
 700                                     VDEV_TYPE_MISSING) != 0 ||
 701                                     nvlist_add_uint64(missing,
 702                                     ZPOOL_CONFIG_ID, c) != 0 ||
 703                                     nvlist_add_uint64(missing,
 704                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 705                                         nvlist_free(missing);
 706                                         goto nomem;
 707                                 }
 708                                 child[c] = missing;
 709                         }
 710                 }
 711 
 712                 /*
 713                  * Put all of this pool's top-level vdevs into a root vdev.
 714                  */
 715                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
 716                         goto nomem;
 717                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
 718                     VDEV_TYPE_ROOT) != 0 ||
 719                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
 720                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
 721                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
 722                     child, children) != 0) {
 723                         nvlist_free(nvroot);
 724                         goto nomem;
 725                 }
 726 
 727                 for (c = 0; c < children; c++)
 728                         nvlist_free(child[c]);
 729                 free(child);
 730                 children = 0;
 731                 child = NULL;
 732 
 733                 /*
 734                  * Go through and fix up any paths and/or devids based on our
 735                  * known list of vdev GUID -> path mappings.
 736                  */
 737                 if (fix_paths(nvroot, pl->names) != 0) {
 738                         nvlist_free(nvroot);
 739                         goto nomem;
 740                 }
 741 
 742                 /*
 743                  * Add the root vdev to this pool's configuration.
 744                  */
 745                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 746                     nvroot) != 0) {
 747                         nvlist_free(nvroot);
 748                         goto nomem;
 749                 }
 750                 nvlist_free(nvroot);
 751 
 752                 /*
 753                  * zdb uses this path to report on active pools that were
 754                  * imported or created using -R.
 755                  */
 756                 if (active_ok)
 757                         goto add_pool;
 758 
 759                 /*
 760                  * Determine if this pool is currently active, in which case we
 761                  * can't actually import it.
 762                  */
 763                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 764                     &name) == 0);
 765                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 766                     &guid) == 0);
 767 
 768                 if (pool_active(hdl, name, guid, &isactive) != 0)
 769                         goto error;
 770 
 771                 if (isactive) {
 772                         nvlist_free(config);
 773                         config = NULL;
 774                         continue;
 775                 }
 776 
 777                 if ((nvl = refresh_config(hdl, config)) == NULL) {
 778                         nvlist_free(config);
 779                         config = NULL;
 780                         continue;
 781                 }
 782 
 783                 nvlist_free(config);
 784                 config = nvl;
 785 
 786                 /*
 787                  * Go through and update the paths for spares, now that we have
 788                  * them.
 789                  */
 790                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 791                     &nvroot) == 0);
 792                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
 793                     &spares, &nspares) == 0) {
 794                         for (i = 0; i < nspares; i++) {
 795                                 if (fix_paths(spares[i], pl->names) != 0)
 796                                         goto nomem;
 797                         }
 798                 }
 799 
 800                 /*
 801                  * Update the paths for l2cache devices.
 802                  */
 803                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
 804                     &l2cache, &nl2cache) == 0) {
 805                         for (i = 0; i < nl2cache; i++) {
 806                                 if (fix_paths(l2cache[i], pl->names) != 0)
 807                                         goto nomem;
 808                         }
 809                 }
 810 
 811                 /*
 812                  * Restore the original information read from the actual label.
 813                  */
 814                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
 815                     DATA_TYPE_UINT64);
 816                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
 817                     DATA_TYPE_STRING);
 818                 if (hostid != 0) {
 819                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
 820                             hostid) == 0);
 821                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
 822                             hostname) == 0);
 823                 }
 824 
 825 add_pool:
 826                 /*
 827                  * Add this pool to the list of configs.
 828                  */
 829                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 830                     &name) == 0);
 831                 if (nvlist_add_nvlist(ret, name, config) != 0)
 832                         goto nomem;
 833 
 834                 found_one = B_TRUE;
 835                 nvlist_free(config);
 836                 config = NULL;
 837         }
 838 
 839         if (!found_one) {
 840                 nvlist_free(ret);
 841                 ret = NULL;
 842         }
 843 
 844         return (ret);
 845 
 846 nomem:
 847         (void) no_memory(hdl);
 848 error:
 849         nvlist_free(config);
 850         nvlist_free(ret);
 851         for (c = 0; c < children; c++)
 852                 nvlist_free(child[c]);
 853         free(child);
 854 
 855         return (NULL);
 856 }
 857 
 858 /*
 859  * Return the offset of the given label.
 860  */
 861 static uint64_t
 862 label_offset(uint64_t size, int l)
 863 {
 864         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
 865         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
 866             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
 867 }
 868 
 869 /*
 870  * Given a file descriptor, read the label information and return an nvlist
 871  * describing the configuration, if there is one.
 872  */
 873 int
 874 zpool_read_label(int fd, nvlist_t **config)
 875 {
 876         struct stat64 statbuf;
 877         int l;
 878         vdev_label_t *label;
 879         uint64_t state, txg, size;
 880 
 881         *config = NULL;
 882 
 883         if (fstat64(fd, &statbuf) == -1)
 884                 return (0);
 885         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
 886 
 887         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
 888                 return (-1);
 889 
 890         for (l = 0; l < VDEV_LABELS; l++) {
 891                 if (pread64(fd, label, sizeof (vdev_label_t),
 892                     label_offset(size, l)) != sizeof (vdev_label_t))
 893                         continue;
 894 
 895                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
 896                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
 897                         continue;
 898 
 899                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
 900                     &state) != 0 || state > POOL_STATE_L2CACHE) {
 901                         nvlist_free(*config);
 902                         continue;
 903                 }
 904 
 905                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
 906                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
 907                     &txg) != 0 || txg == 0)) {
 908                         nvlist_free(*config);
 909                         continue;
 910                 }
 911 
 912                 free(label);
 913                 return (0);
 914         }
 915 
 916         free(label);
 917         *config = NULL;
 918         return (0);
 919 }
 920 
 921 typedef struct rdsk_node {
 922         char *rn_name;
 923         int rn_dfd;
 924         libzfs_handle_t *rn_hdl;
 925         nvlist_t *rn_config;
 926         avl_tree_t *rn_avl;
 927         avl_node_t rn_node;
 928         boolean_t rn_nozpool;
 929 } rdsk_node_t;
 930 
 931 static int
 932 slice_cache_compare(const void *arg1, const void *arg2)
 933 {
 934         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
 935         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
 936         char *nm1slice, *nm2slice;
 937         int rv;
 938 
 939         /*
 940          * slices zero and two are the most likely to provide results,
 941          * so put those first
 942          */
 943         nm1slice = strstr(nm1, "s0");
 944         nm2slice = strstr(nm2, "s0");
 945         if (nm1slice && !nm2slice) {
 946                 return (-1);
 947         }
 948         if (!nm1slice && nm2slice) {
 949                 return (1);
 950         }
 951         nm1slice = strstr(nm1, "s2");
 952         nm2slice = strstr(nm2, "s2");
 953         if (nm1slice && !nm2slice) {
 954                 return (-1);
 955         }
 956         if (!nm1slice && nm2slice) {
 957                 return (1);
 958         }
 959 
 960         rv = strcmp(nm1, nm2);
 961         if (rv == 0)
 962                 return (0);
 963         return (rv > 0 ? 1 : -1);
 964 }
 965 
 966 static void
 967 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
 968     diskaddr_t size, uint_t blksz)
 969 {
 970         rdsk_node_t tmpnode;
 971         rdsk_node_t *node;
 972         char sname[MAXNAMELEN];
 973 
 974         tmpnode.rn_name = &sname[0];
 975         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
 976             diskname, partno);
 977         /*
 978          * protect against division by zero for disk labels that
 979          * contain a bogus sector size
 980          */
 981         if (blksz == 0)
 982                 blksz = DEV_BSIZE;
 983         /* too small to contain a zpool? */
 984         if ((size < (SPA_MINDEVSIZE / blksz)) &&
 985             (node = avl_find(r, &tmpnode, NULL)))
 986                 node->rn_nozpool = B_TRUE;
 987 }
 988 
 989 static void
 990 nozpool_all_slices(avl_tree_t *r, const char *sname)
 991 {
 992         char diskname[MAXNAMELEN];
 993         char *ptr;
 994         int i;
 995 
 996         (void) strncpy(diskname, sname, MAXNAMELEN);
 997         if (((ptr = strrchr(diskname, 's')) == NULL) &&
 998             ((ptr = strrchr(diskname, 'p')) == NULL))
 999                 return;
1000         ptr[0] = 's';
1001         ptr[1] = '\0';
1002         for (i = 0; i < NDKMAP; i++)
1003                 check_one_slice(r, diskname, i, 0, 1);
1004         ptr[0] = 'p';
1005         for (i = 0; i <= FD_NUMPART; i++)
1006                 check_one_slice(r, diskname, i, 0, 1);
1007 }
1008 
1009 static void
1010 check_slices(avl_tree_t *r, int fd, const char *sname)
1011 {
1012         struct extvtoc vtoc;
1013         struct dk_gpt *gpt;
1014         char diskname[MAXNAMELEN];
1015         char *ptr;
1016         int i;
1017 
1018         (void) strncpy(diskname, sname, MAXNAMELEN);
1019         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1020                 return;
1021         ptr[1] = '\0';
1022 
1023         if (read_extvtoc(fd, &vtoc) >= 0) {
1024                 for (i = 0; i < NDKMAP; i++)
1025                         check_one_slice(r, diskname, i,
1026                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1027         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1028                 /*
1029                  * on x86 we'll still have leftover links that point
1030                  * to slices s[9-15], so use NDKMAP instead
1031                  */
1032                 for (i = 0; i < NDKMAP; i++)
1033                         check_one_slice(r, diskname, i,
1034                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1035                 /* nodes p[1-4] are never used with EFI labels */
1036                 ptr[0] = 'p';
1037                 for (i = 1; i <= FD_NUMPART; i++)
1038                         check_one_slice(r, diskname, i, 0, 1);
1039                 efi_free(gpt);
1040         }
1041 }
1042 
1043 static void
1044 zpool_open_func(void *arg)
1045 {
1046         rdsk_node_t *rn = arg;
1047         struct stat64 statbuf;
1048         nvlist_t *config;
1049         int fd;
1050 
1051         if (rn->rn_nozpool)
1052                 return;
1053         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1054                 /* symlink to a device that's no longer there */
1055                 if (errno == ENOENT)
1056                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1057                 return;
1058         }
1059         /*
1060          * Ignore failed stats.  We only want regular
1061          * files, character devs and block devs.
1062          */
1063         if (fstat64(fd, &statbuf) != 0 ||
1064             (!S_ISREG(statbuf.st_mode) &&
1065             !S_ISCHR(statbuf.st_mode) &&
1066             !S_ISBLK(statbuf.st_mode))) {
1067                 (void) close(fd);
1068                 return;
1069         }
1070         /* this file is too small to hold a zpool */
1071         if (S_ISREG(statbuf.st_mode) &&
1072             statbuf.st_size < SPA_MINDEVSIZE) {
1073                 (void) close(fd);
1074                 return;
1075         } else if (!S_ISREG(statbuf.st_mode)) {
1076                 /*
1077                  * Try to read the disk label first so we don't have to
1078                  * open a bunch of minor nodes that can't have a zpool.
1079                  */
1080                 check_slices(rn->rn_avl, fd, rn->rn_name);
1081         }
1082 
1083         if ((zpool_read_label(fd, &config)) != 0) {
1084                 (void) close(fd);
1085                 (void) no_memory(rn->rn_hdl);
1086                 return;
1087         }
1088         (void) close(fd);
1089 
1090 
1091         rn->rn_config = config;
1092         if (config != NULL) {
1093                 assert(rn->rn_nozpool == B_FALSE);
1094         }
1095 }
1096 
1097 /*
1098  * Given a file descriptor, clear (zero) the label information.  This function
1099  * is currently only used in the appliance stack as part of the ZFS sysevent
1100  * module.
1101  */
1102 int
1103 zpool_clear_label(int fd)
1104 {
1105         struct stat64 statbuf;
1106         int l;
1107         vdev_label_t *label;
1108         uint64_t size;
1109 
1110         if (fstat64(fd, &statbuf) == -1)
1111                 return (0);
1112         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1113 
1114         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1115                 return (-1);
1116 
1117         for (l = 0; l < VDEV_LABELS; l++) {
1118                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1119                     label_offset(size, l)) != sizeof (vdev_label_t))
1120                         return (-1);
1121         }
1122 
1123         free(label);
1124         return (0);
1125 }
1126 
1127 /*
1128  * Given a list of directories to search, find all pools stored on disk.  This
1129  * includes partial pools which are not available to import.  If no args are
1130  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1131  * poolname or guid (but not both) are provided by the caller when trying
1132  * to import a specific pool.
1133  */
1134 static nvlist_t *
1135 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1136 {
1137         int i, dirs = iarg->paths;
1138         DIR *dirp = NULL;
1139         struct dirent64 *dp;
1140         char path[MAXPATHLEN];
1141         char *end, **dir = iarg->path;
1142         size_t pathleft;
1143         nvlist_t *ret = NULL;
1144         static char *default_dir = "/dev/dsk";
1145         pool_list_t pools = { 0 };
1146         pool_entry_t *pe, *penext;
1147         vdev_entry_t *ve, *venext;
1148         config_entry_t *ce, *cenext;
1149         name_entry_t *ne, *nenext;
1150         avl_tree_t slice_cache;
1151         rdsk_node_t *slice;
1152         void *cookie;
1153 
1154         if (dirs == 0) {
1155                 dirs = 1;
1156                 dir = &default_dir;
1157         }
1158 
1159         /*
1160          * Go through and read the label configuration information from every
1161          * possible device, organizing the information according to pool GUID
1162          * and toplevel GUID.
1163          */
1164         for (i = 0; i < dirs; i++) {
1165                 tpool_t *t;
1166                 char *rdsk;
1167                 int dfd;
1168 
1169                 /* use realpath to normalize the path */
1170                 if (realpath(dir[i], path) == 0) {
1171                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1172                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1173                         goto error;
1174                 }
1175                 end = &path[strlen(path)];
1176                 *end++ = '/';
1177                 *end = 0;
1178                 pathleft = &path[sizeof (path)] - end;
1179 
1180                 /*
1181                  * Using raw devices instead of block devices when we're
1182                  * reading the labels skips a bunch of slow operations during
1183                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1184                  */
1185                 if (strcmp(path, "/dev/dsk/") == 0)
1186                         rdsk = "/dev/rdsk/";
1187                 else
1188                         rdsk = path;
1189 
1190                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1191                     (dirp = fdopendir(dfd)) == NULL) {
1192                         zfs_error_aux(hdl, strerror(errno));
1193                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1194                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1195                             rdsk);
1196                         goto error;
1197                 }
1198 
1199                 avl_create(&slice_cache, slice_cache_compare,
1200                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1201                 /*
1202                  * This is not MT-safe, but we have no MT consumers of libzfs
1203                  */
1204                 while ((dp = readdir64(dirp)) != NULL) {
1205                         const char *name = dp->d_name;
1206                         if (name[0] == '.' &&
1207                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1208                                 continue;
1209 
1210                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1211                         slice->rn_name = zfs_strdup(hdl, name);
1212                         slice->rn_avl = &slice_cache;
1213                         slice->rn_dfd = dfd;
1214                         slice->rn_hdl = hdl;
1215                         slice->rn_nozpool = B_FALSE;
1216                         avl_add(&slice_cache, slice);
1217                 }
1218                 /*
1219                  * create a thread pool to do all of this in parallel;
1220                  * rn_nozpool is not protected, so this is racy in that
1221                  * multiple tasks could decide that the same slice can
1222                  * not hold a zpool, which is benign.  Also choose
1223                  * double the number of processors; we hold a lot of
1224                  * locks in the kernel, so going beyond this doesn't
1225                  * buy us much.
1226                  */
1227                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1228                     0, NULL);
1229                 for (slice = avl_first(&slice_cache); slice;
1230                     (slice = avl_walk(&slice_cache, slice,
1231                     AVL_AFTER)))
1232                         (void) tpool_dispatch(t, zpool_open_func, slice);
1233                 tpool_wait(t);
1234                 tpool_destroy(t);
1235 
1236                 cookie = NULL;
1237                 while ((slice = avl_destroy_nodes(&slice_cache,
1238                     &cookie)) != NULL) {
1239                         if (slice->rn_config != NULL) {
1240                                 nvlist_t *config = slice->rn_config;
1241                                 boolean_t matched = B_TRUE;
1242 
1243                                 if (iarg->poolname != NULL) {
1244                                         char *pname;
1245 
1246                                         matched = nvlist_lookup_string(config,
1247                                             ZPOOL_CONFIG_POOL_NAME,
1248                                             &pname) == 0 &&
1249                                             strcmp(iarg->poolname, pname) == 0;
1250                                 } else if (iarg->guid != 0) {
1251                                         uint64_t this_guid;
1252 
1253                                         matched = nvlist_lookup_uint64(config,
1254                                             ZPOOL_CONFIG_POOL_GUID,
1255                                             &this_guid) == 0 &&
1256                                             iarg->guid == this_guid;
1257                                 }
1258                                 if (!matched) {
1259                                         nvlist_free(config);
1260                                         config = NULL;
1261                                         continue;
1262                                 }
1263                                 /* use the non-raw path for the config */
1264                                 (void) strlcpy(end, slice->rn_name, pathleft);
1265                                 if (add_config(hdl, &pools, path, config) != 0)
1266                                         goto error;
1267                         }
1268                         free(slice->rn_name);
1269                         free(slice);
1270                 }
1271                 avl_destroy(&slice_cache);
1272 
1273                 (void) closedir(dirp);
1274                 dirp = NULL;
1275         }
1276 
1277         ret = get_configs(hdl, &pools, iarg->can_be_active);
1278 
1279 error:
1280         for (pe = pools.pools; pe != NULL; pe = penext) {
1281                 penext = pe->pe_next;
1282                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1283                         venext = ve->ve_next;
1284                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1285                                 cenext = ce->ce_next;
1286                                 if (ce->ce_config)
1287                                         nvlist_free(ce->ce_config);
1288                                 free(ce);
1289                         }
1290                         free(ve);
1291                 }
1292                 free(pe);
1293         }
1294 
1295         for (ne = pools.names; ne != NULL; ne = nenext) {
1296                 nenext = ne->ne_next;
1297                 if (ne->ne_name)
1298                         free(ne->ne_name);
1299                 free(ne);
1300         }
1301 
1302         if (dirp)
1303                 (void) closedir(dirp);
1304 
1305         return (ret);
1306 }
1307 
1308 nvlist_t *
1309 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1310 {
1311         importargs_t iarg = { 0 };
1312 
1313         iarg.paths = argc;
1314         iarg.path = argv;
1315 
1316         return (zpool_find_import_impl(hdl, &iarg));
1317 }
1318 
1319 /*
1320  * Given a cache file, return the contents as a list of importable pools.
1321  * poolname or guid (but not both) are provided by the caller when trying
1322  * to import a specific pool.
1323  */
1324 nvlist_t *
1325 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1326     char *poolname, uint64_t guid)
1327 {
1328         char *buf;
1329         int fd;
1330         struct stat64 statbuf;
1331         nvlist_t *raw, *src, *dst;
1332         nvlist_t *pools;
1333         nvpair_t *elem;
1334         char *name;
1335         uint64_t this_guid;
1336         boolean_t active;
1337 
1338         verify(poolname == NULL || guid == 0);
1339 
1340         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1341                 zfs_error_aux(hdl, "%s", strerror(errno));
1342                 (void) zfs_error(hdl, EZFS_BADCACHE,
1343                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1344                 return (NULL);
1345         }
1346 
1347         if (fstat64(fd, &statbuf) != 0) {
1348                 zfs_error_aux(hdl, "%s", strerror(errno));
1349                 (void) close(fd);
1350                 (void) zfs_error(hdl, EZFS_BADCACHE,
1351                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1352                 return (NULL);
1353         }
1354 
1355         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1356                 (void) close(fd);
1357                 return (NULL);
1358         }
1359 
1360         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1361                 (void) close(fd);
1362                 free(buf);
1363                 (void) zfs_error(hdl, EZFS_BADCACHE,
1364                     dgettext(TEXT_DOMAIN,
1365                     "failed to read cache file contents"));
1366                 return (NULL);
1367         }
1368 
1369         (void) close(fd);
1370 
1371         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1372                 free(buf);
1373                 (void) zfs_error(hdl, EZFS_BADCACHE,
1374                     dgettext(TEXT_DOMAIN,
1375                     "invalid or corrupt cache file contents"));
1376                 return (NULL);
1377         }
1378 
1379         free(buf);
1380 
1381         /*
1382          * Go through and get the current state of the pools and refresh their
1383          * state.
1384          */
1385         if (nvlist_alloc(&pools, 0, 0) != 0) {
1386                 (void) no_memory(hdl);
1387                 nvlist_free(raw);
1388                 return (NULL);
1389         }
1390 
1391         elem = NULL;
1392         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1393                 verify(nvpair_value_nvlist(elem, &src) == 0);
1394 
1395                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1396                     &name) == 0);
1397                 if (poolname != NULL && strcmp(poolname, name) != 0)
1398                         continue;
1399 
1400                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1401                     &this_guid) == 0);
1402                 if (guid != 0) {
1403                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1404                             &this_guid) == 0);
1405                         if (guid != this_guid)
1406                                 continue;
1407                 }
1408 
1409                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1410                         nvlist_free(raw);
1411                         nvlist_free(pools);
1412                         return (NULL);
1413                 }
1414 
1415                 if (active)
1416                         continue;
1417 
1418                 if ((dst = refresh_config(hdl, src)) == NULL) {
1419                         nvlist_free(raw);
1420                         nvlist_free(pools);
1421                         return (NULL);
1422                 }
1423 
1424                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1425                         (void) no_memory(hdl);
1426                         nvlist_free(dst);
1427                         nvlist_free(raw);
1428                         nvlist_free(pools);
1429                         return (NULL);
1430                 }
1431                 nvlist_free(dst);
1432         }
1433 
1434         nvlist_free(raw);
1435         return (pools);
1436 }
1437 
1438 static int
1439 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1440 {
1441         importargs_t *import = data;
1442         int found = 0;
1443 
1444         if (import->poolname != NULL) {
1445                 char *pool_name;
1446 
1447                 verify(nvlist_lookup_string(zhp->zpool_config,
1448                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1449                 if (strcmp(pool_name, import->poolname) == 0)
1450                         found = 1;
1451         } else {
1452                 uint64_t pool_guid;
1453 
1454                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1455                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1456                 if (pool_guid == import->guid)
1457                         found = 1;
1458         }
1459 
1460         zpool_close(zhp);
1461         return (found);
1462 }
1463 
1464 nvlist_t *
1465 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1466 {
1467         verify(import->poolname == NULL || import->guid == 0);
1468 
1469         if (import->unique)
1470                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1471 
1472         if (import->cachefile != NULL)
1473                 return (zpool_find_import_cached(hdl, import->cachefile,
1474                     import->poolname, import->guid));
1475 
1476         return (zpool_find_import_impl(hdl, import));
1477 }
1478 
1479 boolean_t
1480 find_guid(nvlist_t *nv, uint64_t guid)
1481 {
1482         uint64_t tmp;
1483         nvlist_t **child;
1484         uint_t c, children;
1485 
1486         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1487         if (tmp == guid)
1488                 return (B_TRUE);
1489 
1490         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1491             &child, &children) == 0) {
1492                 for (c = 0; c < children; c++)
1493                         if (find_guid(child[c], guid))
1494                                 return (B_TRUE);
1495         }
1496 
1497         return (B_FALSE);
1498 }
1499 
1500 typedef struct aux_cbdata {
1501         const char      *cb_type;
1502         uint64_t        cb_guid;
1503         zpool_handle_t  *cb_zhp;
1504 } aux_cbdata_t;
1505 
1506 static int
1507 find_aux(zpool_handle_t *zhp, void *data)
1508 {
1509         aux_cbdata_t *cbp = data;
1510         nvlist_t **list;
1511         uint_t i, count;
1512         uint64_t guid;
1513         nvlist_t *nvroot;
1514 
1515         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1516             &nvroot) == 0);
1517 
1518         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1519             &list, &count) == 0) {
1520                 for (i = 0; i < count; i++) {
1521                         verify(nvlist_lookup_uint64(list[i],
1522                             ZPOOL_CONFIG_GUID, &guid) == 0);
1523                         if (guid == cbp->cb_guid) {
1524                                 cbp->cb_zhp = zhp;
1525                                 return (1);
1526                         }
1527                 }
1528         }
1529 
1530         zpool_close(zhp);
1531         return (0);
1532 }
1533 
1534 /*
1535  * Determines if the pool is in use.  If so, it returns true and the state of
1536  * the pool as well as the name of the pool.  Both strings are allocated and
1537  * must be freed by the caller.
1538  */
1539 int
1540 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1541     boolean_t *inuse)
1542 {
1543         nvlist_t *config;
1544         char *name;
1545         boolean_t ret;
1546         uint64_t guid, vdev_guid;
1547         zpool_handle_t *zhp;
1548         nvlist_t *pool_config;
1549         uint64_t stateval, isspare;
1550         aux_cbdata_t cb = { 0 };
1551         boolean_t isactive;
1552 
1553         *inuse = B_FALSE;
1554 
1555         if (zpool_read_label(fd, &config) != 0) {
1556                 (void) no_memory(hdl);
1557                 return (-1);
1558         }
1559 
1560         if (config == NULL)
1561                 return (0);
1562 
1563         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1564             &stateval) == 0);
1565         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1566             &vdev_guid) == 0);
1567 
1568         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1569                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1570                     &name) == 0);
1571                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1572                     &guid) == 0);
1573         }
1574 
1575         switch (stateval) {
1576         case POOL_STATE_EXPORTED:
1577                 /*
1578                  * A pool with an exported state may in fact be imported
1579                  * read-only, so check the in-core state to see if it's
1580                  * active and imported read-only.  If it is, set
1581                  * its state to active.
1582                  */
1583                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1584                     (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1585                     zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1586                         stateval = POOL_STATE_ACTIVE;
1587 
1588                 ret = B_TRUE;
1589                 break;
1590 
1591         case POOL_STATE_ACTIVE:
1592                 /*
1593                  * For an active pool, we have to determine if it's really part
1594                  * of a currently active pool (in which case the pool will exist
1595                  * and the guid will be the same), or whether it's part of an
1596                  * active pool that was disconnected without being explicitly
1597                  * exported.
1598                  */
1599                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1600                         nvlist_free(config);
1601                         return (-1);
1602                 }
1603 
1604                 if (isactive) {
1605                         /*
1606                          * Because the device may have been removed while
1607                          * offlined, we only report it as active if the vdev is
1608                          * still present in the config.  Otherwise, pretend like
1609                          * it's not in use.
1610                          */
1611                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1612                             (pool_config = zpool_get_config(zhp, NULL))
1613                             != NULL) {
1614                                 nvlist_t *nvroot;
1615 
1616                                 verify(nvlist_lookup_nvlist(pool_config,
1617                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1618                                 ret = find_guid(nvroot, vdev_guid);
1619                         } else {
1620                                 ret = B_FALSE;
1621                         }
1622 
1623                         /*
1624                          * If this is an active spare within another pool, we
1625                          * treat it like an unused hot spare.  This allows the
1626                          * user to create a pool with a hot spare that currently
1627                          * in use within another pool.  Since we return B_TRUE,
1628                          * libdiskmgt will continue to prevent generic consumers
1629                          * from using the device.
1630                          */
1631                         if (ret && nvlist_lookup_uint64(config,
1632                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1633                                 stateval = POOL_STATE_SPARE;
1634 
1635                         if (zhp != NULL)
1636                                 zpool_close(zhp);
1637                 } else {
1638                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1639                         ret = B_TRUE;
1640                 }
1641                 break;
1642 
1643         case POOL_STATE_SPARE:
1644                 /*
1645                  * For a hot spare, it can be either definitively in use, or
1646                  * potentially active.  To determine if it's in use, we iterate
1647                  * over all pools in the system and search for one with a spare
1648                  * with a matching guid.
1649                  *
1650                  * Due to the shared nature of spares, we don't actually report
1651                  * the potentially active case as in use.  This means the user
1652                  * can freely create pools on the hot spares of exported pools,
1653                  * but to do otherwise makes the resulting code complicated, and
1654                  * we end up having to deal with this case anyway.
1655                  */
1656                 cb.cb_zhp = NULL;
1657                 cb.cb_guid = vdev_guid;
1658                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1659                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1660                         name = (char *)zpool_get_name(cb.cb_zhp);
1661                         ret = TRUE;
1662                 } else {
1663                         ret = FALSE;
1664                 }
1665                 break;
1666 
1667         case POOL_STATE_L2CACHE:
1668 
1669                 /*
1670                  * Check if any pool is currently using this l2cache device.
1671                  */
1672                 cb.cb_zhp = NULL;
1673                 cb.cb_guid = vdev_guid;
1674                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1675                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1676                         name = (char *)zpool_get_name(cb.cb_zhp);
1677                         ret = TRUE;
1678                 } else {
1679                         ret = FALSE;
1680                 }
1681                 break;
1682 
1683         default:
1684                 ret = B_FALSE;
1685         }
1686 
1687 
1688         if (ret) {
1689                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1690                         if (cb.cb_zhp)
1691                                 zpool_close(cb.cb_zhp);
1692                         nvlist_free(config);
1693                         return (-1);
1694                 }
1695                 *state = (pool_state_t)stateval;
1696         }
1697 
1698         if (cb.cb_zhp)
1699                 zpool_close(cb.cb_zhp);
1700 
1701         nvlist_free(config);
1702         *inuse = ret;
1703         return (0);
1704 }