Print this page
    
1693 persistent 'comment' field for a zpool
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/lib/libzfs/common/libzfs_import.c
          +++ new/usr/src/lib/libzfs/common/libzfs_import.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
       23 + * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
       24 + * Copyright (c) 2011 by Delphix. All rights reserved.
  23   25   */
  24   26  
  25   27  /*
  26   28   * Pool import support functions.
  27   29   *
  28   30   * To import a pool, we rely on reading the configuration information from the
  29   31   * ZFS label of each device.  If we successfully read the label, then we
  30   32   * organize the configuration information in the following hierarchy:
  31   33   *
  32   34   *      pool guid -> toplevel vdev guid -> label txg
  33   35   *
  34   36   * Duplicate entries matching this same tuple will be discarded.  Once we have
  35   37   * examined every device, we pick the best label txg config for each toplevel
  36   38   * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
  37   39   * update any paths that have changed.  Finally, we attempt to import the pool
  38   40   * using our derived config, and record the results.
  39   41   */
  40   42  
  41   43  #include <ctype.h>
  42   44  #include <devid.h>
  43   45  #include <dirent.h>
  44   46  #include <errno.h>
  45   47  #include <libintl.h>
  46   48  #include <stddef.h>
  47   49  #include <stdlib.h>
  48   50  #include <string.h>
  49   51  #include <sys/stat.h>
  50   52  #include <unistd.h>
  51   53  #include <fcntl.h>
  52   54  #include <sys/vtoc.h>
  53   55  #include <sys/dktp/fdisk.h>
  54   56  #include <sys/efi_partition.h>
  55   57  #include <thread_pool.h>
  56   58  
  57   59  #include <sys/vdev_impl.h>
  58   60  
  59   61  #include "libzfs.h"
  60   62  #include "libzfs_impl.h"
  61   63  
  62   64  /*
  63   65   * Intermediate structures used to gather configuration information.
  64   66   */
  65   67  typedef struct config_entry {
  66   68          uint64_t                ce_txg;
  67   69          nvlist_t                *ce_config;
  68   70          struct config_entry     *ce_next;
  69   71  } config_entry_t;
  70   72  
  71   73  typedef struct vdev_entry {
  72   74          uint64_t                ve_guid;
  73   75          config_entry_t          *ve_configs;
  74   76          struct vdev_entry       *ve_next;
  75   77  } vdev_entry_t;
  76   78  
  77   79  typedef struct pool_entry {
  78   80          uint64_t                pe_guid;
  79   81          vdev_entry_t            *pe_vdevs;
  80   82          struct pool_entry       *pe_next;
  81   83  } pool_entry_t;
  82   84  
  83   85  typedef struct name_entry {
  84   86          char                    *ne_name;
  85   87          uint64_t                ne_guid;
  86   88          struct name_entry       *ne_next;
  87   89  } name_entry_t;
  88   90  
  89   91  typedef struct pool_list {
  90   92          pool_entry_t            *pools;
  91   93          name_entry_t            *names;
  92   94  } pool_list_t;
  93   95  
  94   96  static char *
  95   97  get_devid(const char *path)
  96   98  {
  97   99          int fd;
  98  100          ddi_devid_t devid;
  99  101          char *minor, *ret;
 100  102  
 101  103          if ((fd = open(path, O_RDONLY)) < 0)
 102  104                  return (NULL);
 103  105  
 104  106          minor = NULL;
 105  107          ret = NULL;
 106  108          if (devid_get(fd, &devid) == 0) {
 107  109                  if (devid_get_minor_name(fd, &minor) == 0)
 108  110                          ret = devid_str_encode(devid, minor);
 109  111                  if (minor != NULL)
 110  112                          devid_str_free(minor);
 111  113                  devid_free(devid);
 112  114          }
 113  115          (void) close(fd);
 114  116  
 115  117          return (ret);
 116  118  }
 117  119  
 118  120  
 119  121  /*
 120  122   * Go through and fix up any path and/or devid information for the given vdev
 121  123   * configuration.
 122  124   */
 123  125  static int
 124  126  fix_paths(nvlist_t *nv, name_entry_t *names)
 125  127  {
 126  128          nvlist_t **child;
 127  129          uint_t c, children;
 128  130          uint64_t guid;
 129  131          name_entry_t *ne, *best;
 130  132          char *path, *devid;
 131  133          int matched;
 132  134  
 133  135          if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 134  136              &child, &children) == 0) {
 135  137                  for (c = 0; c < children; c++)
 136  138                          if (fix_paths(child[c], names) != 0)
 137  139                                  return (-1);
 138  140                  return (0);
 139  141          }
 140  142  
 141  143          /*
 142  144           * This is a leaf (file or disk) vdev.  In either case, go through
 143  145           * the name list and see if we find a matching guid.  If so, replace
 144  146           * the path and see if we can calculate a new devid.
 145  147           *
 146  148           * There may be multiple names associated with a particular guid, in
 147  149           * which case we have overlapping slices or multiple paths to the same
 148  150           * disk.  If this is the case, then we want to pick the path that is
 149  151           * the most similar to the original, where "most similar" is the number
 150  152           * of matching characters starting from the end of the path.  This will
 151  153           * preserve slice numbers even if the disks have been reorganized, and
 152  154           * will also catch preferred disk names if multiple paths exist.
 153  155           */
 154  156          verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
 155  157          if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
 156  158                  path = NULL;
 157  159  
 158  160          matched = 0;
 159  161          best = NULL;
 160  162          for (ne = names; ne != NULL; ne = ne->ne_next) {
 161  163                  if (ne->ne_guid == guid) {
 162  164                          const char *src, *dst;
 163  165                          int count;
 164  166  
 165  167                          if (path == NULL) {
 166  168                                  best = ne;
 167  169                                  break;
 168  170                          }
 169  171  
 170  172                          src = ne->ne_name + strlen(ne->ne_name) - 1;
 171  173                          dst = path + strlen(path) - 1;
 172  174                          for (count = 0; src >= ne->ne_name && dst >= path;
 173  175                              src--, dst--, count++)
 174  176                                  if (*src != *dst)
 175  177                                          break;
 176  178  
 177  179                          /*
 178  180                           * At this point, 'count' is the number of characters
 179  181                           * matched from the end.
 180  182                           */
 181  183                          if (count > matched || best == NULL) {
 182  184                                  best = ne;
 183  185                                  matched = count;
 184  186                          }
 185  187                  }
 186  188          }
 187  189  
 188  190          if (best == NULL)
 189  191                  return (0);
 190  192  
 191  193          if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
 192  194                  return (-1);
 193  195  
 194  196          if ((devid = get_devid(best->ne_name)) == NULL) {
 195  197                  (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
 196  198          } else {
 197  199                  if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
 198  200                          return (-1);
 199  201                  devid_str_free(devid);
 200  202          }
 201  203  
 202  204          return (0);
 203  205  }
 204  206  
 205  207  /*
 206  208   * Add the given configuration to the list of known devices.
 207  209   */
 208  210  static int
 209  211  add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
 210  212      nvlist_t *config)
 211  213  {
 212  214          uint64_t pool_guid, vdev_guid, top_guid, txg, state;
 213  215          pool_entry_t *pe;
 214  216          vdev_entry_t *ve;
 215  217          config_entry_t *ce;
 216  218          name_entry_t *ne;
 217  219  
 218  220          /*
 219  221           * If this is a hot spare not currently in use or level 2 cache
 220  222           * device, add it to the list of names to translate, but don't do
 221  223           * anything else.
 222  224           */
 223  225          if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
 224  226              &state) == 0 &&
 225  227              (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
 226  228              nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
 227  229                  if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 228  230                          return (-1);
 229  231  
 230  232                  if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 231  233                          free(ne);
 232  234                          return (-1);
 233  235                  }
 234  236                  ne->ne_guid = vdev_guid;
 235  237                  ne->ne_next = pl->names;
 236  238                  pl->names = ne;
 237  239                  return (0);
 238  240          }
 239  241  
 240  242          /*
 241  243           * If we have a valid config but cannot read any of these fields, then
 242  244           * it means we have a half-initialized label.  In vdev_label_init()
 243  245           * we write a label with txg == 0 so that we can identify the device
 244  246           * in case the user refers to the same disk later on.  If we fail to
 245  247           * create the pool, we'll be left with a label in this state
 246  248           * which should not be considered part of a valid pool.
 247  249           */
 248  250          if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 249  251              &pool_guid) != 0 ||
 250  252              nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
 251  253              &vdev_guid) != 0 ||
 252  254              nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
 253  255              &top_guid) != 0 ||
 254  256              nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
 255  257              &txg) != 0 || txg == 0) {
 256  258                  nvlist_free(config);
 257  259                  return (0);
 258  260          }
 259  261  
 260  262          /*
 261  263           * First, see if we know about this pool.  If not, then add it to the
 262  264           * list of known pools.
 263  265           */
 264  266          for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 265  267                  if (pe->pe_guid == pool_guid)
 266  268                          break;
 267  269          }
 268  270  
 269  271          if (pe == NULL) {
 270  272                  if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
 271  273                          nvlist_free(config);
 272  274                          return (-1);
 273  275                  }
 274  276                  pe->pe_guid = pool_guid;
 275  277                  pe->pe_next = pl->pools;
 276  278                  pl->pools = pe;
 277  279          }
 278  280  
 279  281          /*
 280  282           * Second, see if we know about this toplevel vdev.  Add it if its
 281  283           * missing.
 282  284           */
 283  285          for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 284  286                  if (ve->ve_guid == top_guid)
 285  287                          break;
 286  288          }
 287  289  
 288  290          if (ve == NULL) {
 289  291                  if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
 290  292                          nvlist_free(config);
 291  293                          return (-1);
 292  294                  }
 293  295                  ve->ve_guid = top_guid;
 294  296                  ve->ve_next = pe->pe_vdevs;
 295  297                  pe->pe_vdevs = ve;
 296  298          }
 297  299  
 298  300          /*
 299  301           * Third, see if we have a config with a matching transaction group.  If
 300  302           * so, then we do nothing.  Otherwise, add it to the list of known
 301  303           * configs.
 302  304           */
 303  305          for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
 304  306                  if (ce->ce_txg == txg)
 305  307                          break;
 306  308          }
 307  309  
 308  310          if (ce == NULL) {
 309  311                  if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
 310  312                          nvlist_free(config);
 311  313                          return (-1);
 312  314                  }
 313  315                  ce->ce_txg = txg;
 314  316                  ce->ce_config = config;
 315  317                  ce->ce_next = ve->ve_configs;
 316  318                  ve->ve_configs = ce;
 317  319          } else {
 318  320                  nvlist_free(config);
 319  321          }
 320  322  
 321  323          /*
 322  324           * At this point we've successfully added our config to the list of
 323  325           * known configs.  The last thing to do is add the vdev guid -> path
 324  326           * mappings so that we can fix up the configuration as necessary before
 325  327           * doing the import.
 326  328           */
 327  329          if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 328  330                  return (-1);
 329  331  
 330  332          if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 331  333                  free(ne);
 332  334                  return (-1);
 333  335          }
 334  336  
 335  337          ne->ne_guid = vdev_guid;
 336  338          ne->ne_next = pl->names;
 337  339          pl->names = ne;
 338  340  
 339  341          return (0);
 340  342  }
 341  343  
 342  344  /*
 343  345   * Returns true if the named pool matches the given GUID.
 344  346   */
 345  347  static int
 346  348  pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
 347  349      boolean_t *isactive)
 348  350  {
 349  351          zpool_handle_t *zhp;
 350  352          uint64_t theguid;
 351  353  
 352  354          if (zpool_open_silent(hdl, name, &zhp) != 0)
 353  355                  return (-1);
 354  356  
 355  357          if (zhp == NULL) {
 356  358                  *isactive = B_FALSE;
 357  359                  return (0);
 358  360          }
 359  361  
 360  362          verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
 361  363              &theguid) == 0);
 362  364  
 363  365          zpool_close(zhp);
 364  366  
 365  367          *isactive = (theguid == guid);
 366  368          return (0);
 367  369  }
 368  370  
 369  371  static nvlist_t *
 370  372  refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 371  373  {
 372  374          nvlist_t *nvl;
 373  375          zfs_cmd_t zc = { 0 };
 374  376          int err;
 375  377  
 376  378          if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
 377  379                  return (NULL);
 378  380  
 379  381          if (zcmd_alloc_dst_nvlist(hdl, &zc,
 380  382              zc.zc_nvlist_conf_size * 2) != 0) {
 381  383                  zcmd_free_nvlists(&zc);
 382  384                  return (NULL);
 383  385          }
 384  386  
 385  387          while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
 386  388              &zc)) != 0 && errno == ENOMEM) {
 387  389                  if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
 388  390                          zcmd_free_nvlists(&zc);
 389  391                          return (NULL);
 390  392                  }
 391  393          }
 392  394  
 393  395          if (err) {
 394  396                  zcmd_free_nvlists(&zc);
 395  397                  return (NULL);
 396  398          }
 397  399  
 398  400          if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
 399  401                  zcmd_free_nvlists(&zc);
 400  402                  return (NULL);
 401  403          }
 402  404  
 403  405          zcmd_free_nvlists(&zc);
 404  406          return (nvl);
 405  407  }
 406  408  
 407  409  /*
 408  410   * Determine if the vdev id is a hole in the namespace.
 409  411   */
 410  412  boolean_t
 411  413  vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
 412  414  {
 413  415          for (int c = 0; c < holes; c++) {
 414  416  
 415  417                  /* Top-level is a hole */
 416  418                  if (hole_array[c] == id)
 417  419                          return (B_TRUE);
 418  420          }
 419  421          return (B_FALSE);
 420  422  }
 421  423  
 422  424  /*
 423  425   * Convert our list of pools into the definitive set of configurations.  We
 424  426   * start by picking the best config for each toplevel vdev.  Once that's done,
 425  427   * we assemble the toplevel vdevs into a full config for the pool.  We make a
 426  428   * pass to fix up any incorrect paths, and then add it to the main list to
 427  429   * return to the user.
 428  430   */
 429  431  static nvlist_t *
  
    | 
      ↓ open down ↓ | 
    397 lines elided | 
    
      ↑ open up ↑ | 
  
 430  432  get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
 431  433  {
 432  434          pool_entry_t *pe;
 433  435          vdev_entry_t *ve;
 434  436          config_entry_t *ce;
 435  437          nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
 436  438          nvlist_t **spares, **l2cache;
 437  439          uint_t i, nspares, nl2cache;
 438  440          boolean_t config_seen;
 439  441          uint64_t best_txg;
 440      -        char *name, *hostname;
      442 +        char *name, *hostname, *comment;
 441  443          uint64_t version, guid;
 442  444          uint_t children = 0;
 443  445          nvlist_t **child = NULL;
 444  446          uint_t holes;
 445  447          uint64_t *hole_array, max_id;
 446  448          uint_t c;
 447  449          boolean_t isactive;
 448  450          uint64_t hostid;
 449  451          nvlist_t *nvl;
 450  452          boolean_t found_one = B_FALSE;
 451  453          boolean_t valid_top_config = B_FALSE;
 452  454  
 453  455          if (nvlist_alloc(&ret, 0, 0) != 0)
 454  456                  goto nomem;
 455  457  
 456  458          for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 457  459                  uint64_t id, max_txg = 0;
 458  460  
 459  461                  if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
 460  462                          goto nomem;
 461  463                  config_seen = B_FALSE;
 462  464  
 463  465                  /*
 464  466                   * Iterate over all toplevel vdevs.  Grab the pool configuration
 465  467                   * from the first one we find, and then go through the rest and
 466  468                   * add them as necessary to the 'vdevs' member of the config.
 467  469                   */
 468  470                  for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 469  471  
 470  472                          /*
 471  473                           * Determine the best configuration for this vdev by
 472  474                           * selecting the config with the latest transaction
 473  475                           * group.
 474  476                           */
 475  477                          best_txg = 0;
 476  478                          for (ce = ve->ve_configs; ce != NULL;
 477  479                              ce = ce->ce_next) {
 478  480  
 479  481                                  if (ce->ce_txg > best_txg) {
 480  482                                          tmp = ce->ce_config;
 481  483                                          best_txg = ce->ce_txg;
 482  484                                  }
 483  485                          }
 484  486  
 485  487                          /*
 486  488                           * We rely on the fact that the max txg for the
 487  489                           * pool will contain the most up-to-date information
 488  490                           * about the valid top-levels in the vdev namespace.
 489  491                           */
 490  492                          if (best_txg > max_txg) {
 491  493                                  (void) nvlist_remove(config,
 492  494                                      ZPOOL_CONFIG_VDEV_CHILDREN,
 493  495                                      DATA_TYPE_UINT64);
 494  496                                  (void) nvlist_remove(config,
 495  497                                      ZPOOL_CONFIG_HOLE_ARRAY,
 496  498                                      DATA_TYPE_UINT64_ARRAY);
 497  499  
 498  500                                  max_txg = best_txg;
 499  501                                  hole_array = NULL;
 500  502                                  holes = 0;
 501  503                                  max_id = 0;
 502  504                                  valid_top_config = B_FALSE;
 503  505  
 504  506                                  if (nvlist_lookup_uint64(tmp,
 505  507                                      ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
 506  508                                          verify(nvlist_add_uint64(config,
 507  509                                              ZPOOL_CONFIG_VDEV_CHILDREN,
 508  510                                              max_id) == 0);
 509  511                                          valid_top_config = B_TRUE;
 510  512                                  }
 511  513  
 512  514                                  if (nvlist_lookup_uint64_array(tmp,
 513  515                                      ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
 514  516                                      &holes) == 0) {
 515  517                                          verify(nvlist_add_uint64_array(config,
 516  518                                              ZPOOL_CONFIG_HOLE_ARRAY,
 517  519                                              hole_array, holes) == 0);
 518  520                                  }
  
    | 
      ↓ open down ↓ | 
    68 lines elided | 
    
      ↑ open up ↑ | 
  
 519  521                          }
 520  522  
 521  523                          if (!config_seen) {
 522  524                                  /*
 523  525                                   * Copy the relevant pieces of data to the pool
 524  526                                   * configuration:
 525  527                                   *
 526  528                                   *      version
 527  529                                   *      pool guid
 528  530                                   *      name
      531 +                                 *      comment (if available)
 529  532                                   *      pool state
 530  533                                   *      hostid (if available)
 531  534                                   *      hostname (if available)
 532  535                                   */
 533  536                                  uint64_t state;
 534  537  
 535  538                                  verify(nvlist_lookup_uint64(tmp,
 536  539                                      ZPOOL_CONFIG_VERSION, &version) == 0);
 537  540                                  if (nvlist_add_uint64(config,
 538  541                                      ZPOOL_CONFIG_VERSION, version) != 0)
 539  542                                          goto nomem;
  
    | 
      ↓ open down ↓ | 
    1 lines elided | 
    
      ↑ open up ↑ | 
  
 540  543                                  verify(nvlist_lookup_uint64(tmp,
 541  544                                      ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
 542  545                                  if (nvlist_add_uint64(config,
 543  546                                      ZPOOL_CONFIG_POOL_GUID, guid) != 0)
 544  547                                          goto nomem;
 545  548                                  verify(nvlist_lookup_string(tmp,
 546  549                                      ZPOOL_CONFIG_POOL_NAME, &name) == 0);
 547  550                                  if (nvlist_add_string(config,
 548  551                                      ZPOOL_CONFIG_POOL_NAME, name) != 0)
 549  552                                          goto nomem;
      553 +
      554 +                                /*
      555 +                                 * COMMENT is optional, don't bail if it's not
      556 +                                 * there, instead, set it to NULL.
      557 +                                 */
      558 +                                if (nvlist_lookup_string(tmp,
      559 +                                    ZPOOL_CONFIG_COMMENT, &comment) != 0)
      560 +                                        comment = NULL;
      561 +                                else if (nvlist_add_string(config,
      562 +                                    ZPOOL_CONFIG_COMMENT, comment) != 0)
      563 +                                        goto nomem;
      564 +
 550  565                                  verify(nvlist_lookup_uint64(tmp,
 551  566                                      ZPOOL_CONFIG_POOL_STATE, &state) == 0);
 552  567                                  if (nvlist_add_uint64(config,
 553  568                                      ZPOOL_CONFIG_POOL_STATE, state) != 0)
 554  569                                          goto nomem;
      570 +
 555  571                                  hostid = 0;
 556  572                                  if (nvlist_lookup_uint64(tmp,
 557  573                                      ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
 558  574                                          if (nvlist_add_uint64(config,
 559  575                                              ZPOOL_CONFIG_HOSTID, hostid) != 0)
 560  576                                                  goto nomem;
 561  577                                          verify(nvlist_lookup_string(tmp,
 562  578                                              ZPOOL_CONFIG_HOSTNAME,
 563  579                                              &hostname) == 0);
 564  580                                          if (nvlist_add_string(config,
 565  581                                              ZPOOL_CONFIG_HOSTNAME,
 566  582                                              hostname) != 0)
 567  583                                                  goto nomem;
 568  584                                  }
 569  585  
 570  586                                  config_seen = B_TRUE;
 571  587                          }
 572  588  
 573  589                          /*
 574  590                           * Add this top-level vdev to the child array.
 575  591                           */
 576  592                          verify(nvlist_lookup_nvlist(tmp,
 577  593                              ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
 578  594                          verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
 579  595                              &id) == 0);
 580  596  
 581  597                          if (id >= children) {
 582  598                                  nvlist_t **newchild;
 583  599  
 584  600                                  newchild = zfs_alloc(hdl, (id + 1) *
 585  601                                      sizeof (nvlist_t *));
 586  602                                  if (newchild == NULL)
 587  603                                          goto nomem;
 588  604  
 589  605                                  for (c = 0; c < children; c++)
 590  606                                          newchild[c] = child[c];
 591  607  
 592  608                                  free(child);
 593  609                                  child = newchild;
 594  610                                  children = id + 1;
 595  611                          }
 596  612                          if (nvlist_dup(nvtop, &child[id], 0) != 0)
 597  613                                  goto nomem;
 598  614  
 599  615                  }
 600  616  
 601  617                  /*
 602  618                   * If we have information about all the top-levels then
 603  619                   * clean up the nvlist which we've constructed. This
 604  620                   * means removing any extraneous devices that are
 605  621                   * beyond the valid range or adding devices to the end
 606  622                   * of our array which appear to be missing.
 607  623                   */
 608  624                  if (valid_top_config) {
 609  625                          if (max_id < children) {
 610  626                                  for (c = max_id; c < children; c++)
 611  627                                          nvlist_free(child[c]);
 612  628                                  children = max_id;
 613  629                          } else if (max_id > children) {
 614  630                                  nvlist_t **newchild;
 615  631  
 616  632                                  newchild = zfs_alloc(hdl, (max_id) *
 617  633                                      sizeof (nvlist_t *));
 618  634                                  if (newchild == NULL)
 619  635                                          goto nomem;
 620  636  
 621  637                                  for (c = 0; c < children; c++)
 622  638                                          newchild[c] = child[c];
 623  639  
 624  640                                  free(child);
 625  641                                  child = newchild;
 626  642                                  children = max_id;
 627  643                          }
 628  644                  }
 629  645  
 630  646                  verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 631  647                      &guid) == 0);
 632  648  
 633  649                  /*
 634  650                   * The vdev namespace may contain holes as a result of
 635  651                   * device removal. We must add them back into the vdev
 636  652                   * tree before we process any missing devices.
 637  653                   */
 638  654                  if (holes > 0) {
 639  655                          ASSERT(valid_top_config);
 640  656  
 641  657                          for (c = 0; c < children; c++) {
 642  658                                  nvlist_t *holey;
 643  659  
 644  660                                  if (child[c] != NULL ||
 645  661                                      !vdev_is_hole(hole_array, holes, c))
 646  662                                          continue;
 647  663  
 648  664                                  if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
 649  665                                      0) != 0)
 650  666                                          goto nomem;
 651  667  
 652  668                                  /*
 653  669                                   * Holes in the namespace are treated as
 654  670                                   * "hole" top-level vdevs and have a
 655  671                                   * special flag set on them.
 656  672                                   */
 657  673                                  if (nvlist_add_string(holey,
 658  674                                      ZPOOL_CONFIG_TYPE,
 659  675                                      VDEV_TYPE_HOLE) != 0 ||
 660  676                                      nvlist_add_uint64(holey,
 661  677                                      ZPOOL_CONFIG_ID, c) != 0 ||
 662  678                                      nvlist_add_uint64(holey,
 663  679                                      ZPOOL_CONFIG_GUID, 0ULL) != 0)
 664  680                                          goto nomem;
 665  681                                  child[c] = holey;
 666  682                          }
 667  683                  }
 668  684  
 669  685                  /*
 670  686                   * Look for any missing top-level vdevs.  If this is the case,
 671  687                   * create a faked up 'missing' vdev as a placeholder.  We cannot
 672  688                   * simply compress the child array, because the kernel performs
 673  689                   * certain checks to make sure the vdev IDs match their location
 674  690                   * in the configuration.
 675  691                   */
 676  692                  for (c = 0; c < children; c++) {
 677  693                          if (child[c] == NULL) {
 678  694                                  nvlist_t *missing;
 679  695                                  if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
 680  696                                      0) != 0)
 681  697                                          goto nomem;
 682  698                                  if (nvlist_add_string(missing,
 683  699                                      ZPOOL_CONFIG_TYPE,
 684  700                                      VDEV_TYPE_MISSING) != 0 ||
 685  701                                      nvlist_add_uint64(missing,
 686  702                                      ZPOOL_CONFIG_ID, c) != 0 ||
 687  703                                      nvlist_add_uint64(missing,
 688  704                                      ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 689  705                                          nvlist_free(missing);
 690  706                                          goto nomem;
 691  707                                  }
 692  708                                  child[c] = missing;
 693  709                          }
 694  710                  }
 695  711  
 696  712                  /*
 697  713                   * Put all of this pool's top-level vdevs into a root vdev.
 698  714                   */
 699  715                  if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
 700  716                          goto nomem;
 701  717                  if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
 702  718                      VDEV_TYPE_ROOT) != 0 ||
 703  719                      nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
 704  720                      nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
 705  721                      nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
 706  722                      child, children) != 0) {
 707  723                          nvlist_free(nvroot);
 708  724                          goto nomem;
 709  725                  }
 710  726  
 711  727                  for (c = 0; c < children; c++)
 712  728                          nvlist_free(child[c]);
 713  729                  free(child);
 714  730                  children = 0;
 715  731                  child = NULL;
 716  732  
 717  733                  /*
 718  734                   * Go through and fix up any paths and/or devids based on our
 719  735                   * known list of vdev GUID -> path mappings.
 720  736                   */
 721  737                  if (fix_paths(nvroot, pl->names) != 0) {
 722  738                          nvlist_free(nvroot);
 723  739                          goto nomem;
 724  740                  }
 725  741  
 726  742                  /*
 727  743                   * Add the root vdev to this pool's configuration.
 728  744                   */
 729  745                  if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 730  746                      nvroot) != 0) {
 731  747                          nvlist_free(nvroot);
 732  748                          goto nomem;
 733  749                  }
 734  750                  nvlist_free(nvroot);
 735  751  
 736  752                  /*
 737  753                   * zdb uses this path to report on active pools that were
 738  754                   * imported or created using -R.
 739  755                   */
 740  756                  if (active_ok)
 741  757                          goto add_pool;
 742  758  
 743  759                  /*
 744  760                   * Determine if this pool is currently active, in which case we
 745  761                   * can't actually import it.
 746  762                   */
 747  763                  verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 748  764                      &name) == 0);
 749  765                  verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 750  766                      &guid) == 0);
 751  767  
 752  768                  if (pool_active(hdl, name, guid, &isactive) != 0)
 753  769                          goto error;
 754  770  
 755  771                  if (isactive) {
 756  772                          nvlist_free(config);
 757  773                          config = NULL;
 758  774                          continue;
 759  775                  }
 760  776  
 761  777                  if ((nvl = refresh_config(hdl, config)) == NULL) {
 762  778                          nvlist_free(config);
 763  779                          config = NULL;
 764  780                          continue;
 765  781                  }
 766  782  
 767  783                  nvlist_free(config);
 768  784                  config = nvl;
 769  785  
 770  786                  /*
 771  787                   * Go through and update the paths for spares, now that we have
 772  788                   * them.
 773  789                   */
 774  790                  verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 775  791                      &nvroot) == 0);
 776  792                  if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
 777  793                      &spares, &nspares) == 0) {
 778  794                          for (i = 0; i < nspares; i++) {
 779  795                                  if (fix_paths(spares[i], pl->names) != 0)
 780  796                                          goto nomem;
 781  797                          }
 782  798                  }
 783  799  
 784  800                  /*
 785  801                   * Update the paths for l2cache devices.
 786  802                   */
 787  803                  if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
 788  804                      &l2cache, &nl2cache) == 0) {
 789  805                          for (i = 0; i < nl2cache; i++) {
 790  806                                  if (fix_paths(l2cache[i], pl->names) != 0)
 791  807                                          goto nomem;
 792  808                          }
 793  809                  }
 794  810  
 795  811                  /*
 796  812                   * Restore the original information read from the actual label.
 797  813                   */
 798  814                  (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
 799  815                      DATA_TYPE_UINT64);
 800  816                  (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
 801  817                      DATA_TYPE_STRING);
 802  818                  if (hostid != 0) {
 803  819                          verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
 804  820                              hostid) == 0);
 805  821                          verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
 806  822                              hostname) == 0);
 807  823                  }
 808  824  
 809  825  add_pool:
 810  826                  /*
 811  827                   * Add this pool to the list of configs.
 812  828                   */
 813  829                  verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 814  830                      &name) == 0);
 815  831                  if (nvlist_add_nvlist(ret, name, config) != 0)
 816  832                          goto nomem;
 817  833  
 818  834                  found_one = B_TRUE;
 819  835                  nvlist_free(config);
 820  836                  config = NULL;
 821  837          }
 822  838  
 823  839          if (!found_one) {
 824  840                  nvlist_free(ret);
 825  841                  ret = NULL;
 826  842          }
 827  843  
 828  844          return (ret);
 829  845  
 830  846  nomem:
 831  847          (void) no_memory(hdl);
 832  848  error:
 833  849          nvlist_free(config);
 834  850          nvlist_free(ret);
 835  851          for (c = 0; c < children; c++)
 836  852                  nvlist_free(child[c]);
 837  853          free(child);
 838  854  
 839  855          return (NULL);
 840  856  }
 841  857  
 842  858  /*
 843  859   * Return the offset of the given label.
 844  860   */
 845  861  static uint64_t
 846  862  label_offset(uint64_t size, int l)
 847  863  {
 848  864          ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
 849  865          return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
 850  866              0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
 851  867  }
 852  868  
 853  869  /*
 854  870   * Given a file descriptor, read the label information and return an nvlist
 855  871   * describing the configuration, if there is one.
 856  872   */
 857  873  int
 858  874  zpool_read_label(int fd, nvlist_t **config)
 859  875  {
 860  876          struct stat64 statbuf;
 861  877          int l;
 862  878          vdev_label_t *label;
 863  879          uint64_t state, txg, size;
 864  880  
 865  881          *config = NULL;
 866  882  
 867  883          if (fstat64(fd, &statbuf) == -1)
 868  884                  return (0);
 869  885          size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
 870  886  
 871  887          if ((label = malloc(sizeof (vdev_label_t))) == NULL)
 872  888                  return (-1);
 873  889  
 874  890          for (l = 0; l < VDEV_LABELS; l++) {
 875  891                  if (pread64(fd, label, sizeof (vdev_label_t),
 876  892                      label_offset(size, l)) != sizeof (vdev_label_t))
 877  893                          continue;
 878  894  
 879  895                  if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
 880  896                      sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
 881  897                          continue;
 882  898  
 883  899                  if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
 884  900                      &state) != 0 || state > POOL_STATE_L2CACHE) {
 885  901                          nvlist_free(*config);
 886  902                          continue;
 887  903                  }
 888  904  
 889  905                  if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
 890  906                      (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
 891  907                      &txg) != 0 || txg == 0)) {
 892  908                          nvlist_free(*config);
 893  909                          continue;
 894  910                  }
 895  911  
 896  912                  free(label);
 897  913                  return (0);
 898  914          }
 899  915  
 900  916          free(label);
 901  917          *config = NULL;
 902  918          return (0);
 903  919  }
 904  920  
 905  921  typedef struct rdsk_node {
 906  922          char *rn_name;
 907  923          int rn_dfd;
 908  924          libzfs_handle_t *rn_hdl;
 909  925          nvlist_t *rn_config;
 910  926          avl_tree_t *rn_avl;
 911  927          avl_node_t rn_node;
 912  928          boolean_t rn_nozpool;
 913  929  } rdsk_node_t;
 914  930  
 915  931  static int
 916  932  slice_cache_compare(const void *arg1, const void *arg2)
 917  933  {
 918  934          const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
 919  935          const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
 920  936          char *nm1slice, *nm2slice;
 921  937          int rv;
 922  938  
 923  939          /*
 924  940           * slices zero and two are the most likely to provide results,
 925  941           * so put those first
 926  942           */
 927  943          nm1slice = strstr(nm1, "s0");
 928  944          nm2slice = strstr(nm2, "s0");
 929  945          if (nm1slice && !nm2slice) {
 930  946                  return (-1);
 931  947          }
 932  948          if (!nm1slice && nm2slice) {
 933  949                  return (1);
 934  950          }
 935  951          nm1slice = strstr(nm1, "s2");
 936  952          nm2slice = strstr(nm2, "s2");
 937  953          if (nm1slice && !nm2slice) {
 938  954                  return (-1);
 939  955          }
 940  956          if (!nm1slice && nm2slice) {
 941  957                  return (1);
 942  958          }
 943  959  
 944  960          rv = strcmp(nm1, nm2);
 945  961          if (rv == 0)
 946  962                  return (0);
 947  963          return (rv > 0 ? 1 : -1);
 948  964  }
 949  965  
 950  966  static void
 951  967  check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
 952  968      diskaddr_t size, uint_t blksz)
 953  969  {
 954  970          rdsk_node_t tmpnode;
 955  971          rdsk_node_t *node;
 956  972          char sname[MAXNAMELEN];
 957  973  
 958  974          tmpnode.rn_name = &sname[0];
 959  975          (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
 960  976              diskname, partno);
 961  977          /*
 962  978           * protect against division by zero for disk labels that
 963  979           * contain a bogus sector size
 964  980           */
 965  981          if (blksz == 0)
 966  982                  blksz = DEV_BSIZE;
 967  983          /* too small to contain a zpool? */
 968  984          if ((size < (SPA_MINDEVSIZE / blksz)) &&
 969  985              (node = avl_find(r, &tmpnode, NULL)))
 970  986                  node->rn_nozpool = B_TRUE;
 971  987  }
 972  988  
 973  989  static void
 974  990  nozpool_all_slices(avl_tree_t *r, const char *sname)
 975  991  {
 976  992          char diskname[MAXNAMELEN];
 977  993          char *ptr;
 978  994          int i;
 979  995  
 980  996          (void) strncpy(diskname, sname, MAXNAMELEN);
 981  997          if (((ptr = strrchr(diskname, 's')) == NULL) &&
 982  998              ((ptr = strrchr(diskname, 'p')) == NULL))
 983  999                  return;
 984 1000          ptr[0] = 's';
 985 1001          ptr[1] = '\0';
 986 1002          for (i = 0; i < NDKMAP; i++)
 987 1003                  check_one_slice(r, diskname, i, 0, 1);
 988 1004          ptr[0] = 'p';
 989 1005          for (i = 0; i <= FD_NUMPART; i++)
 990 1006                  check_one_slice(r, diskname, i, 0, 1);
 991 1007  }
 992 1008  
 993 1009  static void
 994 1010  check_slices(avl_tree_t *r, int fd, const char *sname)
 995 1011  {
 996 1012          struct extvtoc vtoc;
 997 1013          struct dk_gpt *gpt;
 998 1014          char diskname[MAXNAMELEN];
 999 1015          char *ptr;
1000 1016          int i;
1001 1017  
1002 1018          (void) strncpy(diskname, sname, MAXNAMELEN);
1003 1019          if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1004 1020                  return;
1005 1021          ptr[1] = '\0';
1006 1022  
1007 1023          if (read_extvtoc(fd, &vtoc) >= 0) {
1008 1024                  for (i = 0; i < NDKMAP; i++)
1009 1025                          check_one_slice(r, diskname, i,
1010 1026                              vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1011 1027          } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1012 1028                  /*
1013 1029                   * on x86 we'll still have leftover links that point
1014 1030                   * to slices s[9-15], so use NDKMAP instead
1015 1031                   */
1016 1032                  for (i = 0; i < NDKMAP; i++)
1017 1033                          check_one_slice(r, diskname, i,
1018 1034                              gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1019 1035                  /* nodes p[1-4] are never used with EFI labels */
1020 1036                  ptr[0] = 'p';
1021 1037                  for (i = 1; i <= FD_NUMPART; i++)
1022 1038                          check_one_slice(r, diskname, i, 0, 1);
1023 1039                  efi_free(gpt);
1024 1040          }
1025 1041  }
1026 1042  
1027 1043  static void
1028 1044  zpool_open_func(void *arg)
1029 1045  {
1030 1046          rdsk_node_t *rn = arg;
1031 1047          struct stat64 statbuf;
1032 1048          nvlist_t *config;
1033 1049          int fd;
1034 1050  
1035 1051          if (rn->rn_nozpool)
1036 1052                  return;
1037 1053          if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1038 1054                  /* symlink to a device that's no longer there */
1039 1055                  if (errno == ENOENT)
1040 1056                          nozpool_all_slices(rn->rn_avl, rn->rn_name);
1041 1057                  return;
1042 1058          }
1043 1059          /*
1044 1060           * Ignore failed stats.  We only want regular
1045 1061           * files, character devs and block devs.
1046 1062           */
1047 1063          if (fstat64(fd, &statbuf) != 0 ||
1048 1064              (!S_ISREG(statbuf.st_mode) &&
1049 1065              !S_ISCHR(statbuf.st_mode) &&
1050 1066              !S_ISBLK(statbuf.st_mode))) {
1051 1067                  (void) close(fd);
1052 1068                  return;
1053 1069          }
1054 1070          /* this file is too small to hold a zpool */
1055 1071          if (S_ISREG(statbuf.st_mode) &&
1056 1072              statbuf.st_size < SPA_MINDEVSIZE) {
1057 1073                  (void) close(fd);
1058 1074                  return;
1059 1075          } else if (!S_ISREG(statbuf.st_mode)) {
1060 1076                  /*
1061 1077                   * Try to read the disk label first so we don't have to
1062 1078                   * open a bunch of minor nodes that can't have a zpool.
1063 1079                   */
1064 1080                  check_slices(rn->rn_avl, fd, rn->rn_name);
1065 1081          }
1066 1082  
1067 1083          if ((zpool_read_label(fd, &config)) != 0) {
1068 1084                  (void) close(fd);
1069 1085                  (void) no_memory(rn->rn_hdl);
1070 1086                  return;
1071 1087          }
1072 1088          (void) close(fd);
1073 1089  
1074 1090  
1075 1091          rn->rn_config = config;
1076 1092          if (config != NULL) {
1077 1093                  assert(rn->rn_nozpool == B_FALSE);
1078 1094          }
1079 1095  }
1080 1096  
1081 1097  /*
1082 1098   * Given a file descriptor, clear (zero) the label information.  This function
1083 1099   * is currently only used in the appliance stack as part of the ZFS sysevent
1084 1100   * module.
1085 1101   */
1086 1102  int
1087 1103  zpool_clear_label(int fd)
1088 1104  {
1089 1105          struct stat64 statbuf;
1090 1106          int l;
1091 1107          vdev_label_t *label;
1092 1108          uint64_t size;
1093 1109  
1094 1110          if (fstat64(fd, &statbuf) == -1)
1095 1111                  return (0);
1096 1112          size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1097 1113  
1098 1114          if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1099 1115                  return (-1);
1100 1116  
1101 1117          for (l = 0; l < VDEV_LABELS; l++) {
1102 1118                  if (pwrite64(fd, label, sizeof (vdev_label_t),
1103 1119                      label_offset(size, l)) != sizeof (vdev_label_t))
1104 1120                          return (-1);
1105 1121          }
1106 1122  
1107 1123          free(label);
1108 1124          return (0);
1109 1125  }
1110 1126  
1111 1127  /*
1112 1128   * Given a list of directories to search, find all pools stored on disk.  This
1113 1129   * includes partial pools which are not available to import.  If no args are
1114 1130   * given (argc is 0), then the default directory (/dev/dsk) is searched.
1115 1131   * poolname or guid (but not both) are provided by the caller when trying
1116 1132   * to import a specific pool.
1117 1133   */
1118 1134  static nvlist_t *
1119 1135  zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1120 1136  {
1121 1137          int i, dirs = iarg->paths;
1122 1138          DIR *dirp = NULL;
1123 1139          struct dirent64 *dp;
1124 1140          char path[MAXPATHLEN];
1125 1141          char *end, **dir = iarg->path;
1126 1142          size_t pathleft;
1127 1143          nvlist_t *ret = NULL;
1128 1144          static char *default_dir = "/dev/dsk";
1129 1145          pool_list_t pools = { 0 };
1130 1146          pool_entry_t *pe, *penext;
1131 1147          vdev_entry_t *ve, *venext;
1132 1148          config_entry_t *ce, *cenext;
1133 1149          name_entry_t *ne, *nenext;
1134 1150          avl_tree_t slice_cache;
1135 1151          rdsk_node_t *slice;
1136 1152          void *cookie;
1137 1153  
1138 1154          if (dirs == 0) {
1139 1155                  dirs = 1;
1140 1156                  dir = &default_dir;
1141 1157          }
1142 1158  
1143 1159          /*
1144 1160           * Go through and read the label configuration information from every
1145 1161           * possible device, organizing the information according to pool GUID
1146 1162           * and toplevel GUID.
1147 1163           */
1148 1164          for (i = 0; i < dirs; i++) {
1149 1165                  tpool_t *t;
1150 1166                  char *rdsk;
1151 1167                  int dfd;
1152 1168  
1153 1169                  /* use realpath to normalize the path */
1154 1170                  if (realpath(dir[i], path) == 0) {
1155 1171                          (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1156 1172                              dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1157 1173                          goto error;
1158 1174                  }
1159 1175                  end = &path[strlen(path)];
1160 1176                  *end++ = '/';
1161 1177                  *end = 0;
1162 1178                  pathleft = &path[sizeof (path)] - end;
1163 1179  
1164 1180                  /*
1165 1181                   * Using raw devices instead of block devices when we're
1166 1182                   * reading the labels skips a bunch of slow operations during
1167 1183                   * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1168 1184                   */
1169 1185                  if (strcmp(path, "/dev/dsk/") == 0)
1170 1186                          rdsk = "/dev/rdsk/";
1171 1187                  else
1172 1188                          rdsk = path;
1173 1189  
1174 1190                  if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1175 1191                      (dirp = fdopendir(dfd)) == NULL) {
1176 1192                          zfs_error_aux(hdl, strerror(errno));
1177 1193                          (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1178 1194                              dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1179 1195                              rdsk);
1180 1196                          goto error;
1181 1197                  }
1182 1198  
1183 1199                  avl_create(&slice_cache, slice_cache_compare,
1184 1200                      sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1185 1201                  /*
1186 1202                   * This is not MT-safe, but we have no MT consumers of libzfs
1187 1203                   */
1188 1204                  while ((dp = readdir64(dirp)) != NULL) {
1189 1205                          const char *name = dp->d_name;
1190 1206                          if (name[0] == '.' &&
1191 1207                              (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1192 1208                                  continue;
1193 1209  
1194 1210                          slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1195 1211                          slice->rn_name = zfs_strdup(hdl, name);
1196 1212                          slice->rn_avl = &slice_cache;
1197 1213                          slice->rn_dfd = dfd;
1198 1214                          slice->rn_hdl = hdl;
1199 1215                          slice->rn_nozpool = B_FALSE;
1200 1216                          avl_add(&slice_cache, slice);
1201 1217                  }
1202 1218                  /*
1203 1219                   * create a thread pool to do all of this in parallel;
1204 1220                   * rn_nozpool is not protected, so this is racy in that
1205 1221                   * multiple tasks could decide that the same slice can
1206 1222                   * not hold a zpool, which is benign.  Also choose
1207 1223                   * double the number of processors; we hold a lot of
1208 1224                   * locks in the kernel, so going beyond this doesn't
1209 1225                   * buy us much.
1210 1226                   */
1211 1227                  t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1212 1228                      0, NULL);
1213 1229                  for (slice = avl_first(&slice_cache); slice;
1214 1230                      (slice = avl_walk(&slice_cache, slice,
1215 1231                      AVL_AFTER)))
1216 1232                          (void) tpool_dispatch(t, zpool_open_func, slice);
1217 1233                  tpool_wait(t);
1218 1234                  tpool_destroy(t);
1219 1235  
1220 1236                  cookie = NULL;
1221 1237                  while ((slice = avl_destroy_nodes(&slice_cache,
1222 1238                      &cookie)) != NULL) {
1223 1239                          if (slice->rn_config != NULL) {
1224 1240                                  nvlist_t *config = slice->rn_config;
1225 1241                                  boolean_t matched = B_TRUE;
1226 1242  
1227 1243                                  if (iarg->poolname != NULL) {
1228 1244                                          char *pname;
1229 1245  
1230 1246                                          matched = nvlist_lookup_string(config,
1231 1247                                              ZPOOL_CONFIG_POOL_NAME,
1232 1248                                              &pname) == 0 &&
1233 1249                                              strcmp(iarg->poolname, pname) == 0;
1234 1250                                  } else if (iarg->guid != 0) {
1235 1251                                          uint64_t this_guid;
1236 1252  
1237 1253                                          matched = nvlist_lookup_uint64(config,
1238 1254                                              ZPOOL_CONFIG_POOL_GUID,
1239 1255                                              &this_guid) == 0 &&
1240 1256                                              iarg->guid == this_guid;
1241 1257                                  }
1242 1258                                  if (!matched) {
1243 1259                                          nvlist_free(config);
1244 1260                                          config = NULL;
1245 1261                                          continue;
1246 1262                                  }
1247 1263                                  /* use the non-raw path for the config */
1248 1264                                  (void) strlcpy(end, slice->rn_name, pathleft);
1249 1265                                  if (add_config(hdl, &pools, path, config) != 0)
1250 1266                                          goto error;
1251 1267                          }
1252 1268                          free(slice->rn_name);
1253 1269                          free(slice);
1254 1270                  }
1255 1271                  avl_destroy(&slice_cache);
1256 1272  
1257 1273                  (void) closedir(dirp);
1258 1274                  dirp = NULL;
1259 1275          }
1260 1276  
1261 1277          ret = get_configs(hdl, &pools, iarg->can_be_active);
1262 1278  
1263 1279  error:
1264 1280          for (pe = pools.pools; pe != NULL; pe = penext) {
1265 1281                  penext = pe->pe_next;
1266 1282                  for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1267 1283                          venext = ve->ve_next;
1268 1284                          for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1269 1285                                  cenext = ce->ce_next;
1270 1286                                  if (ce->ce_config)
1271 1287                                          nvlist_free(ce->ce_config);
1272 1288                                  free(ce);
1273 1289                          }
1274 1290                          free(ve);
1275 1291                  }
1276 1292                  free(pe);
1277 1293          }
1278 1294  
1279 1295          for (ne = pools.names; ne != NULL; ne = nenext) {
1280 1296                  nenext = ne->ne_next;
1281 1297                  if (ne->ne_name)
1282 1298                          free(ne->ne_name);
1283 1299                  free(ne);
1284 1300          }
1285 1301  
1286 1302          if (dirp)
1287 1303                  (void) closedir(dirp);
1288 1304  
1289 1305          return (ret);
1290 1306  }
1291 1307  
1292 1308  nvlist_t *
1293 1309  zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1294 1310  {
1295 1311          importargs_t iarg = { 0 };
1296 1312  
1297 1313          iarg.paths = argc;
1298 1314          iarg.path = argv;
1299 1315  
1300 1316          return (zpool_find_import_impl(hdl, &iarg));
1301 1317  }
1302 1318  
1303 1319  /*
1304 1320   * Given a cache file, return the contents as a list of importable pools.
1305 1321   * poolname or guid (but not both) are provided by the caller when trying
1306 1322   * to import a specific pool.
1307 1323   */
1308 1324  nvlist_t *
1309 1325  zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1310 1326      char *poolname, uint64_t guid)
1311 1327  {
1312 1328          char *buf;
1313 1329          int fd;
1314 1330          struct stat64 statbuf;
1315 1331          nvlist_t *raw, *src, *dst;
1316 1332          nvlist_t *pools;
1317 1333          nvpair_t *elem;
1318 1334          char *name;
1319 1335          uint64_t this_guid;
1320 1336          boolean_t active;
1321 1337  
1322 1338          verify(poolname == NULL || guid == 0);
1323 1339  
1324 1340          if ((fd = open(cachefile, O_RDONLY)) < 0) {
1325 1341                  zfs_error_aux(hdl, "%s", strerror(errno));
1326 1342                  (void) zfs_error(hdl, EZFS_BADCACHE,
1327 1343                      dgettext(TEXT_DOMAIN, "failed to open cache file"));
1328 1344                  return (NULL);
1329 1345          }
1330 1346  
1331 1347          if (fstat64(fd, &statbuf) != 0) {
1332 1348                  zfs_error_aux(hdl, "%s", strerror(errno));
1333 1349                  (void) close(fd);
1334 1350                  (void) zfs_error(hdl, EZFS_BADCACHE,
1335 1351                      dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1336 1352                  return (NULL);
1337 1353          }
1338 1354  
1339 1355          if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1340 1356                  (void) close(fd);
1341 1357                  return (NULL);
1342 1358          }
1343 1359  
1344 1360          if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1345 1361                  (void) close(fd);
1346 1362                  free(buf);
1347 1363                  (void) zfs_error(hdl, EZFS_BADCACHE,
1348 1364                      dgettext(TEXT_DOMAIN,
1349 1365                      "failed to read cache file contents"));
1350 1366                  return (NULL);
1351 1367          }
1352 1368  
1353 1369          (void) close(fd);
1354 1370  
1355 1371          if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1356 1372                  free(buf);
1357 1373                  (void) zfs_error(hdl, EZFS_BADCACHE,
1358 1374                      dgettext(TEXT_DOMAIN,
1359 1375                      "invalid or corrupt cache file contents"));
1360 1376                  return (NULL);
1361 1377          }
1362 1378  
1363 1379          free(buf);
1364 1380  
1365 1381          /*
1366 1382           * Go through and get the current state of the pools and refresh their
1367 1383           * state.
1368 1384           */
1369 1385          if (nvlist_alloc(&pools, 0, 0) != 0) {
1370 1386                  (void) no_memory(hdl);
1371 1387                  nvlist_free(raw);
1372 1388                  return (NULL);
1373 1389          }
1374 1390  
1375 1391          elem = NULL;
1376 1392          while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1377 1393                  verify(nvpair_value_nvlist(elem, &src) == 0);
1378 1394  
1379 1395                  verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1380 1396                      &name) == 0);
1381 1397                  if (poolname != NULL && strcmp(poolname, name) != 0)
1382 1398                          continue;
1383 1399  
1384 1400                  verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1385 1401                      &this_guid) == 0);
1386 1402                  if (guid != 0) {
1387 1403                          verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1388 1404                              &this_guid) == 0);
1389 1405                          if (guid != this_guid)
1390 1406                                  continue;
1391 1407                  }
1392 1408  
1393 1409                  if (pool_active(hdl, name, this_guid, &active) != 0) {
1394 1410                          nvlist_free(raw);
1395 1411                          nvlist_free(pools);
1396 1412                          return (NULL);
1397 1413                  }
1398 1414  
1399 1415                  if (active)
1400 1416                          continue;
1401 1417  
1402 1418                  if ((dst = refresh_config(hdl, src)) == NULL) {
1403 1419                          nvlist_free(raw);
1404 1420                          nvlist_free(pools);
1405 1421                          return (NULL);
1406 1422                  }
1407 1423  
1408 1424                  if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1409 1425                          (void) no_memory(hdl);
1410 1426                          nvlist_free(dst);
1411 1427                          nvlist_free(raw);
1412 1428                          nvlist_free(pools);
1413 1429                          return (NULL);
1414 1430                  }
1415 1431                  nvlist_free(dst);
1416 1432          }
1417 1433  
1418 1434          nvlist_free(raw);
1419 1435          return (pools);
1420 1436  }
1421 1437  
1422 1438  static int
1423 1439  name_or_guid_exists(zpool_handle_t *zhp, void *data)
1424 1440  {
1425 1441          importargs_t *import = data;
1426 1442          int found = 0;
1427 1443  
1428 1444          if (import->poolname != NULL) {
1429 1445                  char *pool_name;
1430 1446  
1431 1447                  verify(nvlist_lookup_string(zhp->zpool_config,
1432 1448                      ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1433 1449                  if (strcmp(pool_name, import->poolname) == 0)
1434 1450                          found = 1;
1435 1451          } else {
1436 1452                  uint64_t pool_guid;
1437 1453  
1438 1454                  verify(nvlist_lookup_uint64(zhp->zpool_config,
1439 1455                      ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1440 1456                  if (pool_guid == import->guid)
1441 1457                          found = 1;
1442 1458          }
1443 1459  
1444 1460          zpool_close(zhp);
1445 1461          return (found);
1446 1462  }
1447 1463  
1448 1464  nvlist_t *
1449 1465  zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1450 1466  {
1451 1467          verify(import->poolname == NULL || import->guid == 0);
1452 1468  
1453 1469          if (import->unique)
1454 1470                  import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1455 1471  
1456 1472          if (import->cachefile != NULL)
1457 1473                  return (zpool_find_import_cached(hdl, import->cachefile,
1458 1474                      import->poolname, import->guid));
1459 1475  
1460 1476          return (zpool_find_import_impl(hdl, import));
1461 1477  }
1462 1478  
1463 1479  boolean_t
1464 1480  find_guid(nvlist_t *nv, uint64_t guid)
1465 1481  {
1466 1482          uint64_t tmp;
1467 1483          nvlist_t **child;
1468 1484          uint_t c, children;
1469 1485  
1470 1486          verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1471 1487          if (tmp == guid)
1472 1488                  return (B_TRUE);
1473 1489  
1474 1490          if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1475 1491              &child, &children) == 0) {
1476 1492                  for (c = 0; c < children; c++)
1477 1493                          if (find_guid(child[c], guid))
1478 1494                                  return (B_TRUE);
1479 1495          }
1480 1496  
1481 1497          return (B_FALSE);
1482 1498  }
1483 1499  
1484 1500  typedef struct aux_cbdata {
1485 1501          const char      *cb_type;
1486 1502          uint64_t        cb_guid;
1487 1503          zpool_handle_t  *cb_zhp;
1488 1504  } aux_cbdata_t;
1489 1505  
1490 1506  static int
1491 1507  find_aux(zpool_handle_t *zhp, void *data)
1492 1508  {
1493 1509          aux_cbdata_t *cbp = data;
1494 1510          nvlist_t **list;
1495 1511          uint_t i, count;
1496 1512          uint64_t guid;
1497 1513          nvlist_t *nvroot;
1498 1514  
1499 1515          verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1500 1516              &nvroot) == 0);
1501 1517  
1502 1518          if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1503 1519              &list, &count) == 0) {
1504 1520                  for (i = 0; i < count; i++) {
1505 1521                          verify(nvlist_lookup_uint64(list[i],
1506 1522                              ZPOOL_CONFIG_GUID, &guid) == 0);
1507 1523                          if (guid == cbp->cb_guid) {
1508 1524                                  cbp->cb_zhp = zhp;
1509 1525                                  return (1);
1510 1526                          }
1511 1527                  }
1512 1528          }
1513 1529  
1514 1530          zpool_close(zhp);
1515 1531          return (0);
1516 1532  }
1517 1533  
1518 1534  /*
1519 1535   * Determines if the pool is in use.  If so, it returns true and the state of
1520 1536   * the pool as well as the name of the pool.  Both strings are allocated and
1521 1537   * must be freed by the caller.
1522 1538   */
1523 1539  int
1524 1540  zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1525 1541      boolean_t *inuse)
1526 1542  {
1527 1543          nvlist_t *config;
1528 1544          char *name;
1529 1545          boolean_t ret;
1530 1546          uint64_t guid, vdev_guid;
1531 1547          zpool_handle_t *zhp;
1532 1548          nvlist_t *pool_config;
1533 1549          uint64_t stateval, isspare;
1534 1550          aux_cbdata_t cb = { 0 };
1535 1551          boolean_t isactive;
1536 1552  
1537 1553          *inuse = B_FALSE;
1538 1554  
1539 1555          if (zpool_read_label(fd, &config) != 0) {
1540 1556                  (void) no_memory(hdl);
1541 1557                  return (-1);
1542 1558          }
1543 1559  
1544 1560          if (config == NULL)
1545 1561                  return (0);
1546 1562  
1547 1563          verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1548 1564              &stateval) == 0);
1549 1565          verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1550 1566              &vdev_guid) == 0);
1551 1567  
1552 1568          if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1553 1569                  verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1554 1570                      &name) == 0);
1555 1571                  verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1556 1572                      &guid) == 0);
1557 1573          }
1558 1574  
1559 1575          switch (stateval) {
1560 1576          case POOL_STATE_EXPORTED:
1561 1577                  /*
1562 1578                   * A pool with an exported state may in fact be imported
1563 1579                   * read-only, so check the in-core state to see if it's
1564 1580                   * active and imported read-only.  If it is, set
1565 1581                   * its state to active.
1566 1582                   */
1567 1583                  if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1568 1584                      (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1569 1585                      zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1570 1586                          stateval = POOL_STATE_ACTIVE;
1571 1587  
1572 1588                  ret = B_TRUE;
1573 1589                  break;
1574 1590  
1575 1591          case POOL_STATE_ACTIVE:
1576 1592                  /*
1577 1593                   * For an active pool, we have to determine if it's really part
1578 1594                   * of a currently active pool (in which case the pool will exist
1579 1595                   * and the guid will be the same), or whether it's part of an
1580 1596                   * active pool that was disconnected without being explicitly
1581 1597                   * exported.
1582 1598                   */
1583 1599                  if (pool_active(hdl, name, guid, &isactive) != 0) {
1584 1600                          nvlist_free(config);
1585 1601                          return (-1);
1586 1602                  }
1587 1603  
1588 1604                  if (isactive) {
1589 1605                          /*
1590 1606                           * Because the device may have been removed while
1591 1607                           * offlined, we only report it as active if the vdev is
1592 1608                           * still present in the config.  Otherwise, pretend like
1593 1609                           * it's not in use.
1594 1610                           */
1595 1611                          if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1596 1612                              (pool_config = zpool_get_config(zhp, NULL))
1597 1613                              != NULL) {
1598 1614                                  nvlist_t *nvroot;
1599 1615  
1600 1616                                  verify(nvlist_lookup_nvlist(pool_config,
1601 1617                                      ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1602 1618                                  ret = find_guid(nvroot, vdev_guid);
1603 1619                          } else {
1604 1620                                  ret = B_FALSE;
1605 1621                          }
1606 1622  
1607 1623                          /*
1608 1624                           * If this is an active spare within another pool, we
1609 1625                           * treat it like an unused hot spare.  This allows the
1610 1626                           * user to create a pool with a hot spare that currently
1611 1627                           * in use within another pool.  Since we return B_TRUE,
1612 1628                           * libdiskmgt will continue to prevent generic consumers
1613 1629                           * from using the device.
1614 1630                           */
1615 1631                          if (ret && nvlist_lookup_uint64(config,
1616 1632                              ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1617 1633                                  stateval = POOL_STATE_SPARE;
1618 1634  
1619 1635                          if (zhp != NULL)
1620 1636                                  zpool_close(zhp);
1621 1637                  } else {
1622 1638                          stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1623 1639                          ret = B_TRUE;
1624 1640                  }
1625 1641                  break;
1626 1642  
1627 1643          case POOL_STATE_SPARE:
1628 1644                  /*
1629 1645                   * For a hot spare, it can be either definitively in use, or
1630 1646                   * potentially active.  To determine if it's in use, we iterate
1631 1647                   * over all pools in the system and search for one with a spare
1632 1648                   * with a matching guid.
1633 1649                   *
1634 1650                   * Due to the shared nature of spares, we don't actually report
1635 1651                   * the potentially active case as in use.  This means the user
1636 1652                   * can freely create pools on the hot spares of exported pools,
1637 1653                   * but to do otherwise makes the resulting code complicated, and
1638 1654                   * we end up having to deal with this case anyway.
1639 1655                   */
1640 1656                  cb.cb_zhp = NULL;
1641 1657                  cb.cb_guid = vdev_guid;
1642 1658                  cb.cb_type = ZPOOL_CONFIG_SPARES;
1643 1659                  if (zpool_iter(hdl, find_aux, &cb) == 1) {
1644 1660                          name = (char *)zpool_get_name(cb.cb_zhp);
1645 1661                          ret = TRUE;
1646 1662                  } else {
1647 1663                          ret = FALSE;
1648 1664                  }
1649 1665                  break;
1650 1666  
1651 1667          case POOL_STATE_L2CACHE:
1652 1668  
1653 1669                  /*
1654 1670                   * Check if any pool is currently using this l2cache device.
1655 1671                   */
1656 1672                  cb.cb_zhp = NULL;
1657 1673                  cb.cb_guid = vdev_guid;
1658 1674                  cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1659 1675                  if (zpool_iter(hdl, find_aux, &cb) == 1) {
1660 1676                          name = (char *)zpool_get_name(cb.cb_zhp);
1661 1677                          ret = TRUE;
1662 1678                  } else {
1663 1679                          ret = FALSE;
1664 1680                  }
1665 1681                  break;
1666 1682  
1667 1683          default:
1668 1684                  ret = B_FALSE;
1669 1685          }
1670 1686  
1671 1687  
1672 1688          if (ret) {
1673 1689                  if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1674 1690                          if (cb.cb_zhp)
1675 1691                                  zpool_close(cb.cb_zhp);
1676 1692                          nvlist_free(config);
1677 1693                          return (-1);
1678 1694                  }
1679 1695                  *state = (pool_state_t)stateval;
1680 1696          }
1681 1697  
1682 1698          if (cb.cb_zhp)
1683 1699                  zpool_close(cb.cb_zhp);
1684 1700  
1685 1701          nvlist_free(config);
1686 1702          *inuse = ret;
1687 1703          return (0);
1688 1704  }
  
    | 
      ↓ open down ↓ | 
    1124 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX