1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
  25  * Copyright 2015 RackTop Systems.
  26  * Copyright 2017 Nexenta Systems, Inc.
  27  */
  28 
  29 /*
  30  * Pool import support functions.
  31  *
  32  * To import a pool, we rely on reading the configuration information from the
  33  * ZFS label of each device.  If we successfully read the label, then we
  34  * organize the configuration information in the following hierarchy:
  35  *
  36  *      pool guid -> toplevel vdev guid -> label txg
  37  *
  38  * Duplicate entries matching this same tuple will be discarded.  Once we have
  39  * examined every device, we pick the best label txg config for each toplevel
  40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
  41  * update any paths that have changed.  Finally, we attempt to import the pool
  42  * using our derived config, and record the results.
  43  */
  44 
  45 #include <ctype.h>
  46 #include <devid.h>
  47 #include <dirent.h>
  48 #include <errno.h>
  49 #include <libintl.h>
  50 #include <stddef.h>
  51 #include <stdlib.h>
  52 #include <string.h>
  53 #include <sys/stat.h>
  54 #include <unistd.h>
  55 #include <fcntl.h>
  56 #include <sys/vtoc.h>
  57 #include <sys/dktp/fdisk.h>
  58 #include <sys/efi_partition.h>
  59 #include <thread_pool.h>
  60 
  61 #include <sys/vdev_impl.h>
  62 
  63 #include "libzfs.h"
  64 #include "libzfs_impl.h"
  65 
  66 /*
  67  * Intermediate structures used to gather configuration information.
  68  */
  69 typedef struct config_entry {
  70         uint64_t                ce_txg;
  71         nvlist_t                *ce_config;
  72         struct config_entry     *ce_next;
  73 } config_entry_t;
  74 
  75 typedef struct vdev_entry {
  76         uint64_t                ve_guid;
  77         config_entry_t          *ve_configs;
  78         struct vdev_entry       *ve_next;
  79 } vdev_entry_t;
  80 
  81 typedef struct pool_entry {
  82         uint64_t                pe_guid;
  83         vdev_entry_t            *pe_vdevs;
  84         struct pool_entry       *pe_next;
  85 } pool_entry_t;
  86 
  87 typedef struct name_entry {
  88         char                    *ne_name;
  89         uint64_t                ne_guid;
  90         struct name_entry       *ne_next;
  91 } name_entry_t;
  92 
  93 typedef struct pool_list {
  94         pool_entry_t            *pools;
  95         name_entry_t            *names;
  96 } pool_list_t;
  97 
  98 /*
  99  * Go through and fix up any path and/or devid information for the given vdev
 100  * configuration.
 101  */
 102 static int
 103 fix_paths(nvlist_t *nv, name_entry_t *names)
 104 {
 105         nvlist_t **child;
 106         uint_t c, children;
 107         uint64_t guid;
 108         name_entry_t *ne, *best;
 109         char *path, *devid;
 110         int matched;
 111 
 112         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 113             &child, &children) == 0) {
 114                 for (c = 0; c < children; c++)
 115                         if (fix_paths(child[c], names) != 0)
 116                                 return (-1);
 117                 return (0);
 118         }
 119 
 120         /*
 121          * This is a leaf (file or disk) vdev.  In either case, go through
 122          * the name list and see if we find a matching guid.  If so, replace
 123          * the path and see if we can calculate a new devid.
 124          *
 125          * There may be multiple names associated with a particular guid, in
 126          * which case we have overlapping slices or multiple paths to the same
 127          * disk.  If this is the case, then we want to pick the path that is
 128          * the most similar to the original, where "most similar" is the number
 129          * of matching characters starting from the end of the path.  This will
 130          * preserve slice numbers even if the disks have been reorganized, and
 131          * will also catch preferred disk names if multiple paths exist.
 132          */
 133         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
 134         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
 135                 path = NULL;
 136 
 137         matched = 0;
 138         best = NULL;
 139         for (ne = names; ne != NULL; ne = ne->ne_next) {
 140                 if (ne->ne_guid == guid) {
 141                         const char *src, *dst;
 142                         int count;
 143 
 144                         if (path == NULL) {
 145                                 best = ne;
 146                                 break;
 147                         }
 148 
 149                         src = ne->ne_name + strlen(ne->ne_name) - 1;
 150                         dst = path + strlen(path) - 1;
 151                         for (count = 0; src >= ne->ne_name && dst >= path;
 152                             src--, dst--, count++)
 153                                 if (*src != *dst)
 154                                         break;
 155 
 156                         /*
 157                          * At this point, 'count' is the number of characters
 158                          * matched from the end.
 159                          */
 160                         if (count > matched || best == NULL) {
 161                                 best = ne;
 162                                 matched = count;
 163                         }
 164                 }
 165         }
 166 
 167         if (best == NULL)
 168                 return (0);
 169 
 170         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
 171                 return (-1);
 172 
 173         if ((devid = devid_str_from_path(best->ne_name)) == NULL) {
 174                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
 175         } else {
 176                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
 177                         devid_str_free(devid);
 178                         return (-1);
 179                 }
 180                 devid_str_free(devid);
 181         }
 182 
 183         return (0);
 184 }
 185 
 186 /*
 187  * Add the given configuration to the list of known devices.
 188  */
 189 static int
 190 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
 191     nvlist_t *config)
 192 {
 193         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
 194         pool_entry_t *pe;
 195         vdev_entry_t *ve;
 196         config_entry_t *ce;
 197         name_entry_t *ne;
 198 
 199         /*
 200          * If this is a hot spare not currently in use or level 2 cache
 201          * device, add it to the list of names to translate, but don't do
 202          * anything else.
 203          */
 204         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
 205             &state) == 0 &&
 206             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
 207             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
 208                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 209                         return (-1);
 210 
 211                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 212                         free(ne);
 213                         return (-1);
 214                 }
 215 
 216                 ne->ne_guid = vdev_guid;
 217                 ne->ne_next = pl->names;
 218                 pl->names = ne;
 219 
 220                 nvlist_free(config);
 221                 return (0);
 222         }
 223 
 224         /*
 225          * If we have a valid config but cannot read any of these fields, then
 226          * it means we have a half-initialized label.  In vdev_label_init()
 227          * we write a label with txg == 0 so that we can identify the device
 228          * in case the user refers to the same disk later on.  If we fail to
 229          * create the pool, we'll be left with a label in this state
 230          * which should not be considered part of a valid pool.
 231          */
 232         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 233             &pool_guid) != 0 ||
 234             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
 235             &vdev_guid) != 0 ||
 236             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
 237             &top_guid) != 0 ||
 238             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
 239             &txg) != 0 || txg == 0) {
 240                 nvlist_free(config);
 241                 return (0);
 242         }
 243 
 244         /*
 245          * First, see if we know about this pool.  If not, then add it to the
 246          * list of known pools.
 247          */
 248         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 249                 if (pe->pe_guid == pool_guid)
 250                         break;
 251         }
 252 
 253         if (pe == NULL) {
 254                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
 255                         nvlist_free(config);
 256                         return (-1);
 257                 }
 258                 pe->pe_guid = pool_guid;
 259                 pe->pe_next = pl->pools;
 260                 pl->pools = pe;
 261         }
 262 
 263         /*
 264          * Second, see if we know about this toplevel vdev.  Add it if its
 265          * missing.
 266          */
 267         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 268                 if (ve->ve_guid == top_guid)
 269                         break;
 270         }
 271 
 272         if (ve == NULL) {
 273                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
 274                         nvlist_free(config);
 275                         return (-1);
 276                 }
 277                 ve->ve_guid = top_guid;
 278                 ve->ve_next = pe->pe_vdevs;
 279                 pe->pe_vdevs = ve;
 280         }
 281 
 282         /*
 283          * Third, see if we have a config with a matching transaction group.  If
 284          * so, then we do nothing.  Otherwise, add it to the list of known
 285          * configs.
 286          */
 287         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
 288                 if (ce->ce_txg == txg)
 289                         break;
 290         }
 291 
 292         if (ce == NULL) {
 293                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
 294                         nvlist_free(config);
 295                         return (-1);
 296                 }
 297                 ce->ce_txg = txg;
 298                 ce->ce_config = config;
 299                 ce->ce_next = ve->ve_configs;
 300                 ve->ve_configs = ce;
 301         } else {
 302                 nvlist_free(config);
 303         }
 304 
 305         /*
 306          * At this point we've successfully added our config to the list of
 307          * known configs.  The last thing to do is add the vdev guid -> path
 308          * mappings so that we can fix up the configuration as necessary before
 309          * doing the import.
 310          */
 311         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 312                 return (-1);
 313 
 314         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 315                 free(ne);
 316                 return (-1);
 317         }
 318 
 319         ne->ne_guid = vdev_guid;
 320         ne->ne_next = pl->names;
 321         pl->names = ne;
 322 
 323         return (0);
 324 }
 325 
 326 /*
 327  * Returns true if the named pool matches the given GUID.
 328  */
 329 static int
 330 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
 331     boolean_t *isactive)
 332 {
 333         zpool_handle_t *zhp;
 334         uint64_t theguid;
 335 
 336         if (zpool_open_silent(hdl, name, &zhp) != 0)
 337                 return (-1);
 338 
 339         if (zhp == NULL) {
 340                 *isactive = B_FALSE;
 341                 return (0);
 342         }
 343 
 344         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
 345             &theguid) == 0);
 346 
 347         zpool_close(zhp);
 348 
 349         *isactive = (theguid == guid);
 350         return (0);
 351 }
 352 
 353 static nvlist_t *
 354 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 355 {
 356         nvlist_t *nvl;
 357         zfs_cmd_t zc = { 0 };
 358         int err, dstbuf_size;
 359 
 360         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
 361                 return (NULL);
 362 
 363         dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
 364 
 365         if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
 366                 zcmd_free_nvlists(&zc);
 367                 return (NULL);
 368         }
 369 
 370         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
 371             &zc)) != 0 && errno == ENOMEM) {
 372                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
 373                         zcmd_free_nvlists(&zc);
 374                         return (NULL);
 375                 }
 376         }
 377 
 378         if (err) {
 379                 zcmd_free_nvlists(&zc);
 380                 return (NULL);
 381         }
 382 
 383         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
 384                 zcmd_free_nvlists(&zc);
 385                 return (NULL);
 386         }
 387 
 388         zcmd_free_nvlists(&zc);
 389         return (nvl);
 390 }
 391 
 392 /*
 393  * Determine if the vdev id is a hole in the namespace.
 394  */
 395 boolean_t
 396 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
 397 {
 398         for (int c = 0; c < holes; c++) {
 399 
 400                 /* Top-level is a hole */
 401                 if (hole_array[c] == id)
 402                         return (B_TRUE);
 403         }
 404         return (B_FALSE);
 405 }
 406 
 407 /*
 408  * Convert our list of pools into the definitive set of configurations.  We
 409  * start by picking the best config for each toplevel vdev.  Once that's done,
 410  * we assemble the toplevel vdevs into a full config for the pool.  We make a
 411  * pass to fix up any incorrect paths, and then add it to the main list to
 412  * return to the user.
 413  */
 414 static nvlist_t *
 415 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
 416     nvlist_t *policy)
 417 {
 418         pool_entry_t *pe;
 419         vdev_entry_t *ve;
 420         config_entry_t *ce;
 421         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
 422         nvlist_t **spares, **l2cache;
 423         uint_t i, nspares, nl2cache;
 424         boolean_t config_seen;
 425         uint64_t best_txg;
 426         char *name, *hostname = NULL;
 427         uint64_t guid;
 428         uint_t children = 0;
 429         nvlist_t **child = NULL;
 430         uint_t holes;
 431         uint64_t *hole_array, max_id;
 432         uint_t c;
 433         boolean_t isactive;
 434         uint64_t hostid;
 435         nvlist_t *nvl;
 436         boolean_t found_one = B_FALSE;
 437         boolean_t valid_top_config = B_FALSE;
 438 
 439         if (nvlist_alloc(&ret, 0, 0) != 0)
 440                 goto nomem;
 441 
 442         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 443                 uint64_t id, max_txg = 0;
 444 
 445                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
 446                         goto nomem;
 447                 config_seen = B_FALSE;
 448 
 449                 /*
 450                  * Iterate over all toplevel vdevs.  Grab the pool configuration
 451                  * from the first one we find, and then go through the rest and
 452                  * add them as necessary to the 'vdevs' member of the config.
 453                  */
 454                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 455 
 456                         /*
 457                          * Determine the best configuration for this vdev by
 458                          * selecting the config with the latest transaction
 459                          * group.
 460                          */
 461                         best_txg = 0;
 462                         for (ce = ve->ve_configs; ce != NULL;
 463                             ce = ce->ce_next) {
 464 
 465                                 if (ce->ce_txg > best_txg) {
 466                                         tmp = ce->ce_config;
 467                                         best_txg = ce->ce_txg;
 468                                 }
 469                         }
 470 
 471                         /*
 472                          * We rely on the fact that the max txg for the
 473                          * pool will contain the most up-to-date information
 474                          * about the valid top-levels in the vdev namespace.
 475                          */
 476                         if (best_txg > max_txg) {
 477                                 (void) nvlist_remove(config,
 478                                     ZPOOL_CONFIG_VDEV_CHILDREN,
 479                                     DATA_TYPE_UINT64);
 480                                 (void) nvlist_remove(config,
 481                                     ZPOOL_CONFIG_HOLE_ARRAY,
 482                                     DATA_TYPE_UINT64_ARRAY);
 483 
 484                                 max_txg = best_txg;
 485                                 hole_array = NULL;
 486                                 holes = 0;
 487                                 max_id = 0;
 488                                 valid_top_config = B_FALSE;
 489 
 490                                 if (nvlist_lookup_uint64(tmp,
 491                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
 492                                         verify(nvlist_add_uint64(config,
 493                                             ZPOOL_CONFIG_VDEV_CHILDREN,
 494                                             max_id) == 0);
 495                                         valid_top_config = B_TRUE;
 496                                 }
 497 
 498                                 if (nvlist_lookup_uint64_array(tmp,
 499                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
 500                                     &holes) == 0) {
 501                                         verify(nvlist_add_uint64_array(config,
 502                                             ZPOOL_CONFIG_HOLE_ARRAY,
 503                                             hole_array, holes) == 0);
 504                                 }
 505                         }
 506 
 507                         if (!config_seen) {
 508                                 /*
 509                                  * Copy the relevant pieces of data to the pool
 510                                  * configuration:
 511                                  *
 512                                  *      version
 513                                  *      pool guid
 514                                  *      name
 515                                  *      comment (if available)
 516                                  *      pool state
 517                                  *      hostid (if available)
 518                                  *      hostname (if available)
 519                                  */
 520                                 uint64_t state, version;
 521                                 char *comment = NULL;
 522 
 523                                 version = fnvlist_lookup_uint64(tmp,
 524                                     ZPOOL_CONFIG_VERSION);
 525                                 fnvlist_add_uint64(config,
 526                                     ZPOOL_CONFIG_VERSION, version);
 527                                 guid = fnvlist_lookup_uint64(tmp,
 528                                     ZPOOL_CONFIG_POOL_GUID);
 529                                 fnvlist_add_uint64(config,
 530                                     ZPOOL_CONFIG_POOL_GUID, guid);
 531                                 name = fnvlist_lookup_string(tmp,
 532                                     ZPOOL_CONFIG_POOL_NAME);
 533                                 fnvlist_add_string(config,
 534                                     ZPOOL_CONFIG_POOL_NAME, name);
 535 
 536                                 if (nvlist_lookup_string(tmp,
 537                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
 538                                         fnvlist_add_string(config,
 539                                             ZPOOL_CONFIG_COMMENT, comment);
 540 
 541                                 state = fnvlist_lookup_uint64(tmp,
 542                                     ZPOOL_CONFIG_POOL_STATE);
 543                                 fnvlist_add_uint64(config,
 544                                     ZPOOL_CONFIG_POOL_STATE, state);
 545 
 546                                 hostid = 0;
 547                                 if (nvlist_lookup_uint64(tmp,
 548                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
 549                                         fnvlist_add_uint64(config,
 550                                             ZPOOL_CONFIG_HOSTID, hostid);
 551                                         hostname = fnvlist_lookup_string(tmp,
 552                                             ZPOOL_CONFIG_HOSTNAME);
 553                                         fnvlist_add_string(config,
 554                                             ZPOOL_CONFIG_HOSTNAME, hostname);
 555                                 }
 556 
 557                                 config_seen = B_TRUE;
 558                         }
 559 
 560                         /*
 561                          * Add this top-level vdev to the child array.
 562                          */
 563                         verify(nvlist_lookup_nvlist(tmp,
 564                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
 565                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
 566                             &id) == 0);
 567 
 568                         if (id >= children) {
 569                                 nvlist_t **newchild;
 570 
 571                                 newchild = zfs_alloc(hdl, (id + 1) *
 572                                     sizeof (nvlist_t *));
 573                                 if (newchild == NULL)
 574                                         goto nomem;
 575 
 576                                 for (c = 0; c < children; c++)
 577                                         newchild[c] = child[c];
 578 
 579                                 free(child);
 580                                 child = newchild;
 581                                 children = id + 1;
 582                         }
 583                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
 584                                 goto nomem;
 585 
 586                 }
 587 
 588                 /*
 589                  * If we have information about all the top-levels then
 590                  * clean up the nvlist which we've constructed. This
 591                  * means removing any extraneous devices that are
 592                  * beyond the valid range or adding devices to the end
 593                  * of our array which appear to be missing.
 594                  */
 595                 if (valid_top_config) {
 596                         if (max_id < children) {
 597                                 for (c = max_id; c < children; c++)
 598                                         nvlist_free(child[c]);
 599                                 children = max_id;
 600                         } else if (max_id > children) {
 601                                 nvlist_t **newchild;
 602 
 603                                 newchild = zfs_alloc(hdl, (max_id) *
 604                                     sizeof (nvlist_t *));
 605                                 if (newchild == NULL)
 606                                         goto nomem;
 607 
 608                                 for (c = 0; c < children; c++)
 609                                         newchild[c] = child[c];
 610 
 611                                 free(child);
 612                                 child = newchild;
 613                                 children = max_id;
 614                         }
 615                 }
 616 
 617                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 618                     &guid) == 0);
 619 
 620                 /*
 621                  * The vdev namespace may contain holes as a result of
 622                  * device removal. We must add them back into the vdev
 623                  * tree before we process any missing devices.
 624                  */
 625                 if (holes > 0) {
 626                         ASSERT(valid_top_config);
 627 
 628                         for (c = 0; c < children; c++) {
 629                                 nvlist_t *holey;
 630 
 631                                 if (child[c] != NULL ||
 632                                     !vdev_is_hole(hole_array, holes, c))
 633                                         continue;
 634 
 635                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
 636                                     0) != 0)
 637                                         goto nomem;
 638 
 639                                 /*
 640                                  * Holes in the namespace are treated as
 641                                  * "hole" top-level vdevs and have a
 642                                  * special flag set on them.
 643                                  */
 644                                 if (nvlist_add_string(holey,
 645                                     ZPOOL_CONFIG_TYPE,
 646                                     VDEV_TYPE_HOLE) != 0 ||
 647                                     nvlist_add_uint64(holey,
 648                                     ZPOOL_CONFIG_ID, c) != 0 ||
 649                                     nvlist_add_uint64(holey,
 650                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 651                                         nvlist_free(holey);
 652                                         goto nomem;
 653                                 }
 654                                 child[c] = holey;
 655                         }
 656                 }
 657 
 658                 /*
 659                  * Look for any missing top-level vdevs.  If this is the case,
 660                  * create a faked up 'missing' vdev as a placeholder.  We cannot
 661                  * simply compress the child array, because the kernel performs
 662                  * certain checks to make sure the vdev IDs match their location
 663                  * in the configuration.
 664                  */
 665                 for (c = 0; c < children; c++) {
 666                         if (child[c] == NULL) {
 667                                 nvlist_t *missing;
 668                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
 669                                     0) != 0)
 670                                         goto nomem;
 671                                 if (nvlist_add_string(missing,
 672                                     ZPOOL_CONFIG_TYPE,
 673                                     VDEV_TYPE_MISSING) != 0 ||
 674                                     nvlist_add_uint64(missing,
 675                                     ZPOOL_CONFIG_ID, c) != 0 ||
 676                                     nvlist_add_uint64(missing,
 677                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 678                                         nvlist_free(missing);
 679                                         goto nomem;
 680                                 }
 681                                 child[c] = missing;
 682                         }
 683                 }
 684 
 685                 /*
 686                  * Put all of this pool's top-level vdevs into a root vdev.
 687                  */
 688                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
 689                         goto nomem;
 690                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
 691                     VDEV_TYPE_ROOT) != 0 ||
 692                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
 693                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
 694                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
 695                     child, children) != 0) {
 696                         nvlist_free(nvroot);
 697                         goto nomem;
 698                 }
 699 
 700                 for (c = 0; c < children; c++)
 701                         nvlist_free(child[c]);
 702                 free(child);
 703                 children = 0;
 704                 child = NULL;
 705 
 706                 /*
 707                  * Go through and fix up any paths and/or devids based on our
 708                  * known list of vdev GUID -> path mappings.
 709                  */
 710                 if (fix_paths(nvroot, pl->names) != 0) {
 711                         nvlist_free(nvroot);
 712                         goto nomem;
 713                 }
 714 
 715                 /*
 716                  * Add the root vdev to this pool's configuration.
 717                  */
 718                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 719                     nvroot) != 0) {
 720                         nvlist_free(nvroot);
 721                         goto nomem;
 722                 }
 723                 nvlist_free(nvroot);
 724 
 725                 /*
 726                  * zdb uses this path to report on active pools that were
 727                  * imported or created using -R.
 728                  */
 729                 if (active_ok)
 730                         goto add_pool;
 731 
 732                 /*
 733                  * Determine if this pool is currently active, in which case we
 734                  * can't actually import it.
 735                  */
 736                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 737                     &name) == 0);
 738                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 739                     &guid) == 0);
 740 
 741                 if (pool_active(hdl, name, guid, &isactive) != 0)
 742                         goto error;
 743 
 744                 if (isactive) {
 745                         nvlist_free(config);
 746                         config = NULL;
 747                         continue;
 748                 }
 749 
 750                 if (policy != NULL) {
 751                         if (nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY,
 752                             policy) != 0)
 753                                 goto nomem;
 754                 }
 755 
 756                 if ((nvl = refresh_config(hdl, config)) == NULL) {
 757                         nvlist_free(config);
 758                         config = NULL;
 759                         continue;
 760                 }
 761 
 762                 nvlist_free(config);
 763                 config = nvl;
 764 
 765                 /*
 766                  * Go through and update the paths for spares, now that we have
 767                  * them.
 768                  */
 769                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 770                     &nvroot) == 0);
 771                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
 772                     &spares, &nspares) == 0) {
 773                         for (i = 0; i < nspares; i++) {
 774                                 if (fix_paths(spares[i], pl->names) != 0)
 775                                         goto nomem;
 776                         }
 777                 }
 778 
 779                 /*
 780                  * Update the paths for l2cache devices.
 781                  */
 782                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
 783                     &l2cache, &nl2cache) == 0) {
 784                         for (i = 0; i < nl2cache; i++) {
 785                                 if (fix_paths(l2cache[i], pl->names) != 0)
 786                                         goto nomem;
 787                         }
 788                 }
 789 
 790                 /*
 791                  * Restore the original information read from the actual label.
 792                  */
 793                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
 794                     DATA_TYPE_UINT64);
 795                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
 796                     DATA_TYPE_STRING);
 797                 if (hostid != 0) {
 798                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
 799                             hostid) == 0);
 800                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
 801                             hostname) == 0);
 802                 }
 803 
 804 add_pool:
 805                 /*
 806                  * Add this pool to the list of configs.
 807                  */
 808                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 809                     &name) == 0);
 810                 if (nvlist_add_nvlist(ret, name, config) != 0)
 811                         goto nomem;
 812 
 813                 found_one = B_TRUE;
 814                 nvlist_free(config);
 815                 config = NULL;
 816         }
 817 
 818         if (!found_one) {
 819                 nvlist_free(ret);
 820                 ret = NULL;
 821         }
 822 
 823         return (ret);
 824 
 825 nomem:
 826         (void) no_memory(hdl);
 827 error:
 828         nvlist_free(config);
 829         nvlist_free(ret);
 830         for (c = 0; c < children; c++)
 831                 nvlist_free(child[c]);
 832         free(child);
 833 
 834         return (NULL);
 835 }
 836 
 837 /*
 838  * Return the offset of the given label.
 839  */
 840 static uint64_t
 841 label_offset(uint64_t size, int l)
 842 {
 843         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
 844         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
 845             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
 846 }
 847 
 848 /*
 849  * Given a file descriptor, read the label information and return an nvlist
 850  * describing the configuration, if there is one.
 851  * Return 0 on success, or -1 on failure
 852  */
 853 int
 854 zpool_read_label(int fd, nvlist_t **config)
 855 {
 856         struct stat64 statbuf;
 857         int l;
 858         vdev_label_t *label;
 859         uint64_t state, txg, size;
 860 
 861         *config = NULL;
 862 
 863         if (fstat64(fd, &statbuf) == -1)
 864                 return (-1);
 865         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
 866 
 867         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
 868                 return (-1);
 869 
 870         for (l = 0; l < VDEV_LABELS; l++) {
 871                 if (pread64(fd, label, sizeof (vdev_label_t),
 872                     label_offset(size, l)) != sizeof (vdev_label_t))
 873                         continue;
 874 
 875                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
 876                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
 877                         continue;
 878 
 879                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
 880                     &state) != 0 || state > POOL_STATE_L2CACHE) {
 881                         nvlist_free(*config);
 882                         continue;
 883                 }
 884 
 885                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
 886                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
 887                     &txg) != 0 || txg == 0)) {
 888                         nvlist_free(*config);
 889                         continue;
 890                 }
 891 
 892                 free(label);
 893                 return (0);
 894         }
 895 
 896         free(label);
 897         *config = NULL;
 898         return (-1);
 899 }
 900 
 901 typedef struct rdsk_node {
 902         char *rn_name;
 903         int rn_dfd;
 904         libzfs_handle_t *rn_hdl;
 905         nvlist_t *rn_config;
 906         avl_tree_t *rn_avl;
 907         avl_node_t rn_node;
 908         boolean_t rn_nozpool;
 909 } rdsk_node_t;
 910 
 911 static int
 912 slice_cache_compare(const void *arg1, const void *arg2)
 913 {
 914         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
 915         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
 916         char *nm1slice, *nm2slice;
 917         int rv;
 918 
 919         /*
 920          * slices zero and two are the most likely to provide results,
 921          * so put those first
 922          */
 923         nm1slice = strstr(nm1, "s0");
 924         nm2slice = strstr(nm2, "s0");
 925         if (nm1slice && !nm2slice) {
 926                 return (-1);
 927         }
 928         if (!nm1slice && nm2slice) {
 929                 return (1);
 930         }
 931         nm1slice = strstr(nm1, "s2");
 932         nm2slice = strstr(nm2, "s2");
 933         if (nm1slice && !nm2slice) {
 934                 return (-1);
 935         }
 936         if (!nm1slice && nm2slice) {
 937                 return (1);
 938         }
 939 
 940         rv = strcmp(nm1, nm2);
 941         if (rv == 0)
 942                 return (0);
 943         return (rv > 0 ? 1 : -1);
 944 }
 945 
 946 static void
 947 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
 948     diskaddr_t size, uint_t blksz)
 949 {
 950         rdsk_node_t tmpnode;
 951         rdsk_node_t *node;
 952         char sname[MAXNAMELEN];
 953 
 954         tmpnode.rn_name = &sname[0];
 955         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
 956             diskname, partno);
 957         /*
 958          * protect against division by zero for disk labels that
 959          * contain a bogus sector size
 960          */
 961         if (blksz == 0)
 962                 blksz = DEV_BSIZE;
 963         /* too small to contain a zpool? */
 964         if ((size < (SPA_MINDEVSIZE / blksz)) &&
 965             (node = avl_find(r, &tmpnode, NULL)))
 966                 node->rn_nozpool = B_TRUE;
 967 }
 968 
 969 static void
 970 nozpool_all_slices(avl_tree_t *r, const char *sname)
 971 {
 972         char diskname[MAXNAMELEN];
 973         char *ptr;
 974         int i;
 975 
 976         (void) strncpy(diskname, sname, MAXNAMELEN);
 977         if (((ptr = strrchr(diskname, 's')) == NULL) &&
 978             ((ptr = strrchr(diskname, 'p')) == NULL))
 979                 return;
 980         ptr[0] = 's';
 981         ptr[1] = '\0';
 982         for (i = 0; i < NDKMAP; i++)
 983                 check_one_slice(r, diskname, i, 0, 1);
 984         ptr[0] = 'p';
 985         for (i = 0; i <= FD_NUMPART; i++)
 986                 check_one_slice(r, diskname, i, 0, 1);
 987 }
 988 
 989 static void
 990 check_slices(avl_tree_t *r, int fd, const char *sname)
 991 {
 992         struct extvtoc vtoc;
 993         struct dk_gpt *gpt;
 994         char diskname[MAXNAMELEN];
 995         char *ptr;
 996         int i;
 997 
 998         (void) strncpy(diskname, sname, MAXNAMELEN);
 999         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1000                 return;
1001         ptr[1] = '\0';
1002 
1003         if (read_extvtoc(fd, &vtoc) >= 0) {
1004                 for (i = 0; i < NDKMAP; i++)
1005                         check_one_slice(r, diskname, i,
1006                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1007         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1008                 /*
1009                  * on x86 we'll still have leftover links that point
1010                  * to slices s[9-15], so use NDKMAP instead
1011                  */
1012                 for (i = 0; i < NDKMAP; i++)
1013                         check_one_slice(r, diskname, i,
1014                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1015                 /* nodes p[1-4] are never used with EFI labels */
1016                 ptr[0] = 'p';
1017                 for (i = 1; i <= FD_NUMPART; i++)
1018                         check_one_slice(r, diskname, i, 0, 1);
1019                 efi_free(gpt);
1020         }
1021 }
1022 
1023 static void
1024 zpool_open_func(void *arg)
1025 {
1026         rdsk_node_t *rn = arg;
1027         struct stat64 statbuf;
1028         nvlist_t *config;
1029         int fd;
1030 
1031         if (rn->rn_nozpool)
1032                 return;
1033         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1034                 /* symlink to a device that's no longer there */
1035                 if (errno == ENOENT)
1036                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1037                 return;
1038         }
1039         /*
1040          * Ignore failed stats.  We only want regular
1041          * files, character devs and block devs.
1042          */
1043         if (fstat64(fd, &statbuf) != 0 ||
1044             (!S_ISREG(statbuf.st_mode) &&
1045             !S_ISCHR(statbuf.st_mode) &&
1046             !S_ISBLK(statbuf.st_mode))) {
1047                 (void) close(fd);
1048                 return;
1049         }
1050         /* this file is too small to hold a zpool */
1051         if (S_ISREG(statbuf.st_mode) &&
1052             statbuf.st_size < SPA_MINDEVSIZE) {
1053                 (void) close(fd);
1054                 return;
1055         } else if (!S_ISREG(statbuf.st_mode)) {
1056                 /*
1057                  * Try to read the disk label first so we don't have to
1058                  * open a bunch of minor nodes that can't have a zpool.
1059                  */
1060                 check_slices(rn->rn_avl, fd, rn->rn_name);
1061         }
1062 
1063         if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) {
1064                 (void) close(fd);
1065                 (void) no_memory(rn->rn_hdl);
1066                 return;
1067         }
1068         (void) close(fd);
1069 
1070         rn->rn_config = config;
1071 }
1072 
1073 /*
1074  * Given a file descriptor, clear (zero) the label information.
1075  */
1076 int
1077 zpool_clear_label(int fd)
1078 {
1079         struct stat64 statbuf;
1080         int l;
1081         vdev_label_t *label;
1082         uint64_t size;
1083 
1084         if (fstat64(fd, &statbuf) == -1)
1085                 return (0);
1086         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1087 
1088         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1089                 return (-1);
1090 
1091         for (l = 0; l < VDEV_LABELS; l++) {
1092                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1093                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1094                         free(label);
1095                         return (-1);
1096                 }
1097         }
1098 
1099         free(label);
1100         return (0);
1101 }
1102 
1103 /*
1104  * Given a list of directories to search, find all pools stored on disk.  This
1105  * includes partial pools which are not available to import.  If no args are
1106  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1107  * poolname or guid (but not both) are provided by the caller when trying
1108  * to import a specific pool.
1109  */
1110 static nvlist_t *
1111 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1112 {
1113         int i, dirs = iarg->paths;
1114         struct dirent64 *dp;
1115         char path[MAXPATHLEN];
1116         char *end, **dir = iarg->path;
1117         size_t pathleft;
1118         nvlist_t *ret = NULL;
1119         static char *default_dir = ZFS_DISK_ROOT;
1120         pool_list_t pools = { 0 };
1121         pool_entry_t *pe, *penext;
1122         vdev_entry_t *ve, *venext;
1123         config_entry_t *ce, *cenext;
1124         name_entry_t *ne, *nenext;
1125         avl_tree_t slice_cache;
1126         rdsk_node_t *slice;
1127         void *cookie;
1128 
1129         if (dirs == 0) {
1130                 dirs = 1;
1131                 dir = &default_dir;
1132         }
1133 
1134         /*
1135          * Go through and read the label configuration information from every
1136          * possible device, organizing the information according to pool GUID
1137          * and toplevel GUID.
1138          */
1139         for (i = 0; i < dirs; i++) {
1140                 tpool_t *t;
1141                 char rdsk[MAXPATHLEN];
1142                 int dfd;
1143                 boolean_t config_failed = B_FALSE;
1144                 DIR *dirp;
1145 
1146                 /* use realpath to normalize the path */
1147                 if (realpath(dir[i], path) == 0) {
1148                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1149                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1150                         goto error;
1151                 }
1152                 end = &path[strlen(path)];
1153                 *end++ = '/';
1154                 *end = 0;
1155                 pathleft = &path[sizeof (path)] - end;
1156 
1157                 /*
1158                  * Using raw devices instead of block devices when we're
1159                  * reading the labels skips a bunch of slow operations during
1160                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1161                  */
1162                 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1163                         (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1164                 else
1165                         (void) strlcpy(rdsk, path, sizeof (rdsk));
1166 
1167                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1168                     (dirp = fdopendir(dfd)) == NULL) {
1169                         if (dfd >= 0)
1170                                 (void) close(dfd);
1171                         zfs_error_aux(hdl, strerror(errno));
1172                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1173                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1174                             rdsk);
1175                         goto error;
1176                 }
1177 
1178                 avl_create(&slice_cache, slice_cache_compare,
1179                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1180                 /*
1181                  * This is not MT-safe, but we have no MT consumers of libzfs
1182                  */
1183                 while ((dp = readdir64(dirp)) != NULL) {
1184                         const char *name = dp->d_name;
1185                         if (name[0] == '.' &&
1186                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1187                                 continue;
1188 
1189                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1190                         slice->rn_name = zfs_strdup(hdl, name);
1191                         slice->rn_avl = &slice_cache;
1192                         slice->rn_dfd = dfd;
1193                         slice->rn_hdl = hdl;
1194                         slice->rn_nozpool = B_FALSE;
1195                         avl_add(&slice_cache, slice);
1196                 }
1197                 /*
1198                  * create a thread pool to do all of this in parallel;
1199                  * rn_nozpool is not protected, so this is racy in that
1200                  * multiple tasks could decide that the same slice can
1201                  * not hold a zpool, which is benign.  Also choose
1202                  * double the number of processors; we hold a lot of
1203                  * locks in the kernel, so going beyond this doesn't
1204                  * buy us much.
1205                  */
1206                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1207                     0, NULL);
1208                 for (slice = avl_first(&slice_cache); slice;
1209                     (slice = avl_walk(&slice_cache, slice,
1210                     AVL_AFTER)))
1211                         (void) tpool_dispatch(t, zpool_open_func, slice);
1212                 tpool_wait(t);
1213                 tpool_destroy(t);
1214 
1215                 cookie = NULL;
1216                 while ((slice = avl_destroy_nodes(&slice_cache,
1217                     &cookie)) != NULL) {
1218                         if (slice->rn_config != NULL && !config_failed) {
1219                                 nvlist_t *config = slice->rn_config;
1220                                 boolean_t matched = B_TRUE;
1221 
1222                                 if (iarg->poolname != NULL) {
1223                                         char *pname;
1224 
1225                                         matched = nvlist_lookup_string(config,
1226                                             ZPOOL_CONFIG_POOL_NAME,
1227                                             &pname) == 0 &&
1228                                             strcmp(iarg->poolname, pname) == 0;
1229                                 } else if (iarg->guid != 0) {
1230                                         uint64_t this_guid;
1231 
1232                                         matched = nvlist_lookup_uint64(config,
1233                                             ZPOOL_CONFIG_POOL_GUID,
1234                                             &this_guid) == 0 &&
1235                                             iarg->guid == this_guid;
1236                                 }
1237                                 if (!matched) {
1238                                         nvlist_free(config);
1239                                 } else {
1240                                         /*
1241                                          * use the non-raw path for the config
1242                                          */
1243                                         (void) strlcpy(end, slice->rn_name,
1244                                             pathleft);
1245                                         if (add_config(hdl, &pools, path,
1246                                             config) != 0)
1247                                                 config_failed = B_TRUE;
1248                                 }
1249                         }
1250                         free(slice->rn_name);
1251                         free(slice);
1252                 }
1253                 avl_destroy(&slice_cache);
1254 
1255                 (void) closedir(dirp);
1256 
1257                 if (config_failed)
1258                         goto error;
1259         }
1260 
1261         ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
1262 
1263 error:
1264         for (pe = pools.pools; pe != NULL; pe = penext) {
1265                 penext = pe->pe_next;
1266                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1267                         venext = ve->ve_next;
1268                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1269                                 cenext = ce->ce_next;
1270                                 nvlist_free(ce->ce_config);
1271                                 free(ce);
1272                         }
1273                         free(ve);
1274                 }
1275                 free(pe);
1276         }
1277 
1278         for (ne = pools.names; ne != NULL; ne = nenext) {
1279                 nenext = ne->ne_next;
1280                 free(ne->ne_name);
1281                 free(ne);
1282         }
1283 
1284         return (ret);
1285 }
1286 
1287 nvlist_t *
1288 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1289 {
1290         importargs_t iarg = { 0 };
1291 
1292         iarg.paths = argc;
1293         iarg.path = argv;
1294 
1295         return (zpool_find_import_impl(hdl, &iarg));
1296 }
1297 
1298 /*
1299  * Given a cache file, return the contents as a list of importable pools.
1300  * poolname or guid (but not both) are provided by the caller when trying
1301  * to import a specific pool.
1302  */
1303 nvlist_t *
1304 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1305     char *poolname, uint64_t guid)
1306 {
1307         char *buf;
1308         int fd;
1309         struct stat64 statbuf;
1310         nvlist_t *raw, *src, *dst;
1311         nvlist_t *pools;
1312         nvpair_t *elem;
1313         char *name;
1314         uint64_t this_guid;
1315         boolean_t active;
1316 
1317         verify(poolname == NULL || guid == 0);
1318 
1319         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1320                 zfs_error_aux(hdl, "%s", strerror(errno));
1321                 (void) zfs_error(hdl, EZFS_BADCACHE,
1322                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1323                 return (NULL);
1324         }
1325 
1326         if (fstat64(fd, &statbuf) != 0) {
1327                 zfs_error_aux(hdl, "%s", strerror(errno));
1328                 (void) close(fd);
1329                 (void) zfs_error(hdl, EZFS_BADCACHE,
1330                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1331                 return (NULL);
1332         }
1333 
1334         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1335                 (void) close(fd);
1336                 return (NULL);
1337         }
1338 
1339         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1340                 (void) close(fd);
1341                 free(buf);
1342                 (void) zfs_error(hdl, EZFS_BADCACHE,
1343                     dgettext(TEXT_DOMAIN,
1344                     "failed to read cache file contents"));
1345                 return (NULL);
1346         }
1347 
1348         (void) close(fd);
1349 
1350         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1351                 free(buf);
1352                 (void) zfs_error(hdl, EZFS_BADCACHE,
1353                     dgettext(TEXT_DOMAIN,
1354                     "invalid or corrupt cache file contents"));
1355                 return (NULL);
1356         }
1357 
1358         free(buf);
1359 
1360         /*
1361          * Go through and get the current state of the pools and refresh their
1362          * state.
1363          */
1364         if (nvlist_alloc(&pools, 0, 0) != 0) {
1365                 (void) no_memory(hdl);
1366                 nvlist_free(raw);
1367                 return (NULL);
1368         }
1369 
1370         elem = NULL;
1371         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1372                 src = fnvpair_value_nvlist(elem);
1373 
1374                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1375                 if (poolname != NULL && strcmp(poolname, name) != 0)
1376                         continue;
1377 
1378                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1379                 if (guid != 0 && guid != this_guid)
1380                         continue;
1381 
1382                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1383                         nvlist_free(raw);
1384                         nvlist_free(pools);
1385                         return (NULL);
1386                 }
1387 
1388                 if (active)
1389                         continue;
1390 
1391                 if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
1392                     cachefile) != 0) {
1393                         (void) no_memory(hdl);
1394                         nvlist_free(raw);
1395                         nvlist_free(pools);
1396                         return (NULL);
1397                 }
1398 
1399                 if ((dst = refresh_config(hdl, src)) == NULL) {
1400                         nvlist_free(raw);
1401                         nvlist_free(pools);
1402                         return (NULL);
1403                 }
1404 
1405                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1406                         (void) no_memory(hdl);
1407                         nvlist_free(dst);
1408                         nvlist_free(raw);
1409                         nvlist_free(pools);
1410                         return (NULL);
1411                 }
1412                 nvlist_free(dst);
1413         }
1414 
1415         nvlist_free(raw);
1416         return (pools);
1417 }
1418 
1419 static int
1420 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1421 {
1422         importargs_t *import = data;
1423         int found = 0;
1424 
1425         if (import->poolname != NULL) {
1426                 char *pool_name;
1427 
1428                 verify(nvlist_lookup_string(zhp->zpool_config,
1429                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1430                 if (strcmp(pool_name, import->poolname) == 0)
1431                         found = 1;
1432         } else {
1433                 uint64_t pool_guid;
1434 
1435                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1436                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1437                 if (pool_guid == import->guid)
1438                         found = 1;
1439         }
1440 
1441         zpool_close(zhp);
1442         return (found);
1443 }
1444 
1445 nvlist_t *
1446 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1447 {
1448         verify(import->poolname == NULL || import->guid == 0);
1449 
1450         if (import->unique)
1451                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1452 
1453         if (import->cachefile != NULL)
1454                 return (zpool_find_import_cached(hdl, import->cachefile,
1455                     import->poolname, import->guid));
1456 
1457         return (zpool_find_import_impl(hdl, import));
1458 }
1459 
1460 boolean_t
1461 find_guid(nvlist_t *nv, uint64_t guid)
1462 {
1463         uint64_t tmp;
1464         nvlist_t **child;
1465         uint_t c, children;
1466 
1467         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1468         if (tmp == guid)
1469                 return (B_TRUE);
1470 
1471         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1472             &child, &children) == 0) {
1473                 for (c = 0; c < children; c++)
1474                         if (find_guid(child[c], guid))
1475                                 return (B_TRUE);
1476         }
1477 
1478         return (B_FALSE);
1479 }
1480 
1481 typedef struct aux_cbdata {
1482         const char      *cb_type;
1483         uint64_t        cb_guid;
1484         zpool_handle_t  *cb_zhp;
1485 } aux_cbdata_t;
1486 
1487 static int
1488 find_aux(zpool_handle_t *zhp, void *data)
1489 {
1490         aux_cbdata_t *cbp = data;
1491         nvlist_t **list;
1492         uint_t i, count;
1493         uint64_t guid;
1494         nvlist_t *nvroot;
1495 
1496         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1497             &nvroot) == 0);
1498 
1499         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1500             &list, &count) == 0) {
1501                 for (i = 0; i < count; i++) {
1502                         verify(nvlist_lookup_uint64(list[i],
1503                             ZPOOL_CONFIG_GUID, &guid) == 0);
1504                         if (guid == cbp->cb_guid) {
1505                                 cbp->cb_zhp = zhp;
1506                                 return (1);
1507                         }
1508                 }
1509         }
1510 
1511         zpool_close(zhp);
1512         return (0);
1513 }
1514 
1515 /*
1516  * Determines if the pool is in use.  If so, it returns true and the state of
1517  * the pool as well as the name of the pool.  Both strings are allocated and
1518  * must be freed by the caller.
1519  */
1520 int
1521 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1522     boolean_t *inuse)
1523 {
1524         nvlist_t *config;
1525         char *name;
1526         boolean_t ret;
1527         uint64_t guid, vdev_guid;
1528         zpool_handle_t *zhp;
1529         nvlist_t *pool_config;
1530         uint64_t stateval, isspare;
1531         aux_cbdata_t cb = { 0 };
1532         boolean_t isactive;
1533 
1534         *inuse = B_FALSE;
1535 
1536         if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) {
1537                 (void) no_memory(hdl);
1538                 return (-1);
1539         }
1540 
1541         if (config == NULL)
1542                 return (0);
1543 
1544         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1545             &stateval) == 0);
1546         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1547             &vdev_guid) == 0);
1548 
1549         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1550                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1551                     &name) == 0);
1552                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1553                     &guid) == 0);
1554         }
1555 
1556         switch (stateval) {
1557         case POOL_STATE_EXPORTED:
1558                 /*
1559                  * A pool with an exported state may in fact be imported
1560                  * read-only, so check the in-core state to see if it's
1561                  * active and imported read-only.  If it is, set
1562                  * its state to active.
1563                  */
1564                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1565                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1566                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1567                                 stateval = POOL_STATE_ACTIVE;
1568 
1569                         /*
1570                          * All we needed the zpool handle for is the
1571                          * readonly prop check.
1572                          */
1573                         zpool_close(zhp);
1574                 }
1575 
1576                 ret = B_TRUE;
1577                 break;
1578 
1579         case POOL_STATE_ACTIVE:
1580                 /*
1581                  * For an active pool, we have to determine if it's really part
1582                  * of a currently active pool (in which case the pool will exist
1583                  * and the guid will be the same), or whether it's part of an
1584                  * active pool that was disconnected without being explicitly
1585                  * exported.
1586                  */
1587                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1588                         nvlist_free(config);
1589                         return (-1);
1590                 }
1591 
1592                 if (isactive) {
1593                         /*
1594                          * Because the device may have been removed while
1595                          * offlined, we only report it as active if the vdev is
1596                          * still present in the config.  Otherwise, pretend like
1597                          * it's not in use.
1598                          */
1599                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1600                             (pool_config = zpool_get_config(zhp, NULL))
1601                             != NULL) {
1602                                 nvlist_t *nvroot;
1603 
1604                                 verify(nvlist_lookup_nvlist(pool_config,
1605                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1606                                 ret = find_guid(nvroot, vdev_guid);
1607                         } else {
1608                                 ret = B_FALSE;
1609                         }
1610 
1611                         /*
1612                          * If this is an active spare within another pool, we
1613                          * treat it like an unused hot spare.  This allows the
1614                          * user to create a pool with a hot spare that currently
1615                          * in use within another pool.  Since we return B_TRUE,
1616                          * libdiskmgt will continue to prevent generic consumers
1617                          * from using the device.
1618                          */
1619                         if (ret && nvlist_lookup_uint64(config,
1620                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1621                                 stateval = POOL_STATE_SPARE;
1622 
1623                         if (zhp != NULL)
1624                                 zpool_close(zhp);
1625                 } else {
1626                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1627                         ret = B_TRUE;
1628                 }
1629                 break;
1630 
1631         case POOL_STATE_SPARE:
1632                 /*
1633                  * For a hot spare, it can be either definitively in use, or
1634                  * potentially active.  To determine if it's in use, we iterate
1635                  * over all pools in the system and search for one with a spare
1636                  * with a matching guid.
1637                  *
1638                  * Due to the shared nature of spares, we don't actually report
1639                  * the potentially active case as in use.  This means the user
1640                  * can freely create pools on the hot spares of exported pools,
1641                  * but to do otherwise makes the resulting code complicated, and
1642                  * we end up having to deal with this case anyway.
1643                  */
1644                 cb.cb_zhp = NULL;
1645                 cb.cb_guid = vdev_guid;
1646                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1647                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1648                         name = (char *)zpool_get_name(cb.cb_zhp);
1649                         ret = B_TRUE;
1650                 } else {
1651                         ret = B_FALSE;
1652                 }
1653                 break;
1654 
1655         case POOL_STATE_L2CACHE:
1656 
1657                 /*
1658                  * Check if any pool is currently using this l2cache device.
1659                  */
1660                 cb.cb_zhp = NULL;
1661                 cb.cb_guid = vdev_guid;
1662                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1663                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1664                         name = (char *)zpool_get_name(cb.cb_zhp);
1665                         ret = B_TRUE;
1666                 } else {
1667                         ret = B_FALSE;
1668                 }
1669                 break;
1670 
1671         default:
1672                 ret = B_FALSE;
1673         }
1674 
1675 
1676         if (ret) {
1677                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1678                         if (cb.cb_zhp)
1679                                 zpool_close(cb.cb_zhp);
1680                         nvlist_free(config);
1681                         return (-1);
1682                 }
1683                 *state = (pool_state_t)stateval;
1684         }
1685 
1686         if (cb.cb_zhp)
1687                 zpool_close(cb.cb_zhp);
1688 
1689         nvlist_free(config);
1690         *inuse = ret;
1691         return (0);
1692 }