4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
28 * Copyright (c) 2017 Datto Inc.
29 */
30
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <fcntl.h>
35 #include <libintl.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <strings.h>
39 #include <unistd.h>
40 #include <libgen.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *);
53 static boolean_t zpool_vdev_is_interior(const char *name);
54
55 #define BACKUP_SLICE "s2"
56
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
60 } prop_flags_t;
61
62 /*
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
66 */
67
68 static int
69 zpool_get_all_props(zpool_handle_t *zhp)
70 {
71 zfs_cmd_t zc = { 0 };
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
73
407 }
408
409 boolean_t
410 zpool_is_bootable(zpool_handle_t *zhp)
411 {
412 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
413
414 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
415 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
416 sizeof (bootfs)) != 0);
417 }
418
419
420 /*
421 * Given an nvlist of zpool properties to be set, validate that they are
422 * correct, and parse any numeric properties (index, boolean, etc) if they are
423 * specified as strings.
424 */
425 static nvlist_t *
426 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
427 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
428 {
429 nvpair_t *elem;
430 nvlist_t *retprops;
431 zpool_prop_t prop;
432 char *strval;
433 uint64_t intval;
434 char *slash, *check;
435 struct stat64 statbuf;
436 zpool_handle_t *zhp;
437
438 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
439 (void) no_memory(hdl);
440 return (NULL);
441 }
442
443 elem = NULL;
444 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
445 const char *propname = nvpair_name(elem);
446
447 prop = zpool_name_to_prop(propname);
448 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
449 int err;
450 char *fname = strchr(propname, '@') + 1;
451
452 err = zfeature_lookup_name(fname, NULL);
453 if (err != 0) {
454 ASSERT3U(err, ==, ENOENT);
455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 "invalid feature '%s'"), fname);
457 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
458 goto error;
459 }
460
461 if (nvpair_type(elem) != DATA_TYPE_STRING) {
462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
463 "'%s' must be a string"), propname);
464 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
465 goto error;
466 }
467
468 (void) nvpair_value_string(elem, &strval);
469 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
471 "property '%s' can only be set to "
472 "'enabled'"), propname);
473 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
474 goto error;
475 }
476
477 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
478 (void) no_memory(hdl);
479 goto error;
480 }
481 continue;
482 }
483
484 /*
485 * Make sure this property is valid and applies to this type.
486 */
487 if (prop == ZPOOL_PROP_INVAL) {
488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489 "invalid property '%s'"), propname);
490 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
491 goto error;
492 }
493
494 if (zpool_prop_readonly(prop)) {
495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
496 "is readonly"), propname);
497 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
498 goto error;
499 }
500
501 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
502 &strval, &intval, errbuf) != 0)
503 goto error;
504
505 /*
506 * Perform additional checking for specific properties.
507 */
707 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
708
709 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
710 nvlist_free(nvl);
711 return (-1);
712 }
713
714 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
715
716 zcmd_free_nvlists(&zc);
717 nvlist_free(nvl);
718
719 if (ret)
720 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
721 else
722 (void) zpool_props_refresh(zhp);
723
724 return (ret);
725 }
726
727 int
728 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
729 {
730 libzfs_handle_t *hdl = zhp->zpool_hdl;
731 zprop_list_t *entry;
732 char buf[ZFS_MAXPROPLEN];
733 nvlist_t *features = NULL;
734 zprop_list_t **last;
735 boolean_t firstexpand = (NULL == *plp);
736
737 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
738 return (-1);
739
740 last = plp;
741 while (*last != NULL)
742 last = &(*last)->pl_next;
743
744 if ((*plp)->pl_all)
745 features = zpool_get_features(zhp);
746
747 if ((*plp)->pl_all && firstexpand) {
1100 int
1101 zpool_get_state(zpool_handle_t *zhp)
1102 {
1103 return (zhp->zpool_state);
1104 }
1105
1106 /*
1107 * Create the named pool, using the provided vdev list. It is assumed
1108 * that the consumer has already validated the contents of the nvlist, so we
1109 * don't have to worry about error semantics.
1110 */
1111 int
1112 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1113 nvlist_t *props, nvlist_t *fsprops)
1114 {
1115 zfs_cmd_t zc = { 0 };
1116 nvlist_t *zc_fsprops = NULL;
1117 nvlist_t *zc_props = NULL;
1118 char msg[1024];
1119 int ret = -1;
1120
1121 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1122 "cannot create '%s'"), pool);
1123
1124 if (!zpool_name_valid(hdl, B_FALSE, pool))
1125 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1126
1127 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1128 return (-1);
1129
1130 if (props) {
1131 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1132
1133 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1134 SPA_VERSION_1, flags, msg)) == NULL) {
1135 goto create_failed;
1136 }
1137 }
1138
1139 if (fsprops) {
1140 uint64_t zoned;
1141 char *zonestr;
1142
1143 zoned = ((nvlist_lookup_string(fsprops,
1144 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1145 strcmp(zonestr, "on") == 0);
1146
1147 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1148 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1149 goto create_failed;
1150 }
1151 if (!zc_props &&
1152 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1153 goto create_failed;
1154 }
1155 if (nvlist_add_nvlist(zc_props,
1156 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1157 goto create_failed;
1158 }
1159 }
1160
1161 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1162 goto create_failed;
1163
1164 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1165
1166 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1167
1168 zcmd_free_nvlists(&zc);
1169 nvlist_free(zc_props);
1170 nvlist_free(zc_fsprops);
1207 char buf[64];
1208
1209 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1210
1211 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1212 "one or more devices is less than the "
1213 "minimum size (%s)"), buf);
1214 }
1215 return (zfs_error(hdl, EZFS_BADDEV, msg));
1216
1217 case ENOSPC:
1218 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1219 "one or more devices is out of space"));
1220 return (zfs_error(hdl, EZFS_BADDEV, msg));
1221
1222 case ENOTBLK:
1223 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1224 "cache device must be a disk or disk slice"));
1225 return (zfs_error(hdl, EZFS_BADDEV, msg));
1226
1227 default:
1228 return (zpool_standard_error(hdl, errno, msg));
1229 }
1230 }
1231
1232 create_failed:
1233 zcmd_free_nvlists(&zc);
1234 nvlist_free(zc_props);
1235 nvlist_free(zc_fsprops);
1236 return (ret);
1237 }
1238
1239 /*
1240 * Destroy the given pool. It is up to the caller to ensure that there are no
1241 * datasets left in the pool.
1242 */
1243 int
1244 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1245 {
1246 zfs_cmd_t zc = { 0 };
1316 }
1317
1318 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1319 return (-1);
1320 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1321
1322 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1323 switch (errno) {
1324 case EBUSY:
1325 /*
1326 * This can happen if the user has specified the same
1327 * device multiple times. We can't reliably detect this
1328 * until we try to add it and see we already have a
1329 * label.
1330 */
1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1332 "one or more vdevs refer to the same device"));
1333 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1334 break;
1335
1336 case EINVAL:
1337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1338 "invalid config; a pool with removing/removed "
1339 "vdevs does not support adding raidz vdevs"));
1340 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1341 break;
1342
1343 case EOVERFLOW:
1344 /*
1345 * This occurrs when one of the devices is below
1346 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1347 * device was the problem device since there's no
1348 * reliable way to determine device size from userland.
1349 */
1350 {
1351 char buf[64];
1352
1353 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1354
1355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1356 "device is less than the minimum "
1357 "size (%s)"), buf);
1358 }
1359 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1360 break;
1361
1362 case ENOTSUP:
1381 default:
1382 (void) zpool_standard_error(hdl, errno, msg);
1383 }
1384
1385 ret = -1;
1386 } else {
1387 ret = 0;
1388 }
1389
1390 zcmd_free_nvlists(&zc);
1391
1392 return (ret);
1393 }
1394
1395 /*
1396 * Exports the pool from the system. The caller must ensure that there are no
1397 * mounted datasets in the pool.
1398 */
1399 static int
1400 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1401 const char *log_str)
1402 {
1403 zfs_cmd_t zc = { 0 };
1404 char msg[1024];
1405
1406 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1407 "cannot export '%s'"), zhp->zpool_name);
1408
1409 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1410 zc.zc_cookie = force;
1411 zc.zc_guid = hardforce;
1412 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1413
1414 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1415 switch (errno) {
1416 case EXDEV:
1417 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1418 "use '-f' to override the following errors:\n"
1419 "'%s' has an active shared spare which could be"
1420 " used by other pools once '%s' is exported."),
1421 zhp->zpool_name, zhp->zpool_name);
1422 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1423 msg));
1424 default:
1425 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1426 msg));
1427 }
1428 }
1429
1430 return (0);
1431 }
1432
1433 int
1434 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1435 {
1436 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1437 }
1438
1439 int
1440 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1441 {
1442 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1443 }
1444
1445 static void
1446 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1447 nvlist_t *config)
1448 {
1449 nvlist_t *nv = NULL;
1450 uint64_t rewindto;
1451 int64_t loss = -1;
1452 struct tm t;
1453 char timestr[128];
1454
1455 if (!hdl->libzfs_printerr || config == NULL)
1456 return;
1457
1458 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1459 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1460 return;
1461 }
1462
1791 */
1792 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1793 break;
1794
1795 case EINVAL:
1796 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1797 break;
1798
1799 case EROFS:
1800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1801 "one or more devices is read only"));
1802 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1803 break;
1804
1805 case ENXIO:
1806 if (nv && nvlist_lookup_nvlist(nv,
1807 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1808 nvlist_lookup_nvlist(nvinfo,
1809 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1810 (void) printf(dgettext(TEXT_DOMAIN,
1811 "The devices below are missing or "
1812 "corrupted, use '-m' to import the pool "
1813 "anyway:\n"));
1814 print_vdev_tree(hdl, NULL, missing, 2);
1815 (void) printf("\n");
1816 }
1817 (void) zpool_standard_error(hdl, error, desc);
1818 break;
1819
1820 case EEXIST:
1821 (void) zpool_standard_error(hdl, error, desc);
1822 break;
1823 case ENAMETOOLONG:
1824 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1825 "new name of at least one dataset is longer than "
1826 "the maximum allowable length"));
1827 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1828 break;
1829 default:
1830 (void) zpool_standard_error(hdl, error, desc);
1831 zpool_explain_recover(hdl,
1832 newname ? origname : thename, -error, nv);
1833 break;
1910 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1911 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1912 (void) nvlist_lookup_uint64_array(nvroot,
1913 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1914 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
1915 if (cmd == POOL_SCRUB_PAUSE)
1916 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
1917 else
1918 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1919 } else {
1920 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1921 }
1922 } else if (err == ENOENT) {
1923 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1924 } else {
1925 return (zpool_standard_error(hdl, err, msg));
1926 }
1927 }
1928
1929 /*
1930 * This provides a very minimal check whether a given string is likely a
1931 * c#t#d# style string. Users of this are expected to do their own
1932 * verification of the s# part.
1933 */
1934 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1935
1936 /*
1937 * More elaborate version for ones which may start with "/dev/dsk/"
1938 * and the like.
1939 */
1940 static int
1941 ctd_check_path(char *str)
1942 {
1943 /*
1944 * If it starts with a slash, check the last component.
1945 */
1946 if (str && str[0] == '/') {
1947 char *tmp = strrchr(str, '/');
1948
1949 /*
1950 * If it ends in "/old", check the second-to-last
1951 * component of the string instead.
1952 */
1953 if (tmp != str && strcmp(tmp, "/old") == 0) {
1954 for (tmp--; *tmp != '/'; tmp--)
1955 ;
1956 }
1957 str = tmp + 1;
1958 }
1959 return (CTD_CHECK(str));
1960 }
1961
1962 /*
1963 * Find a vdev that matches the search criteria specified. We use the
1964 * the nvpair name to determine how we should look for the device.
1965 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1966 * spare; but FALSE if its an INUSE spare.
1967 */
1968 static nvlist_t *
1969 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1970 boolean_t *l2cache, boolean_t *log)
1971 {
1972 uint_t c, children;
1973 nvlist_t **child;
1974 nvlist_t *ret;
1975 uint64_t is_log;
1976 char *srchkey;
1977 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1978
1979 /* Nothing to look for */
1980 if (search == NULL || pair == NULL)
1981 return (NULL);
1982
1983 /* Obtain the key we will use to search */
1984 srchkey = nvpair_name(pair);
1985
1986 switch (nvpair_type(pair)) {
1987 case DATA_TYPE_UINT64:
1988 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1989 uint64_t srchval, theguid;
1990
1991 verify(nvpair_value_uint64(pair, &srchval) == 0);
1992 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1993 &theguid) == 0);
1994 if (theguid == srchval)
1995 return (nv);
2064 * vdev id pair (i.e. mirror-4).
2065 */
2066 if ((type = strdup(srchval)) == NULL)
2067 return (NULL);
2068
2069 if ((p = strrchr(type, '-')) == NULL) {
2070 free(type);
2071 break;
2072 }
2073 idx = p + 1;
2074 *p = '\0';
2075
2076 /*
2077 * If the types don't match then keep looking.
2078 */
2079 if (strncmp(val, type, strlen(val)) != 0) {
2080 free(type);
2081 break;
2082 }
2083
2084 verify(zpool_vdev_is_interior(type));
2085 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2086 &id) == 0);
2087
2088 errno = 0;
2089 vdev_id = strtoull(idx, &end, 10);
2090
2091 free(type);
2092 if (errno != 0)
2093 return (NULL);
2094
2095 /*
2096 * Now verify that we have the correct vdev id.
2097 */
2098 if (vdev_id == id)
2099 return (nv);
2100 }
2101
2102 /*
2103 * Common case
2104 */
2105 if (strcmp(srchval, val) == 0)
2106 return (nv);
2107 break;
2108 }
2109
2110 default:
2111 break;
2112 }
2113
2114 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2115 &child, &children) != 0)
2116 return (NULL);
2117
2118 for (c = 0; c < children; c++) {
2119 if ((ret = vdev_to_nvlist_iter(child[c], search,
2120 avail_spare, l2cache, NULL)) != NULL) {
2121 /*
2122 * The 'is_log' value is only set for the toplevel
2123 * vdev, not the leaf vdevs. So we always lookup the
2124 * log device from the root of the vdev tree (where
2125 * 'log' is non-NULL).
2126 */
2127 if (log != NULL &&
2128 nvlist_lookup_uint64(child[c],
2129 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2130 is_log) {
2131 *log = B_TRUE;
2132 }
2133 return (ret);
2134 }
2135 }
2136
2137 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2138 &child, &children) == 0) {
2139 for (c = 0; c < children; c++) {
2140 if ((ret = vdev_to_nvlist_iter(child[c], search,
2141 avail_spare, l2cache, NULL)) != NULL) {
2142 *avail_spare = B_TRUE;
2143 return (ret);
2144 }
2145 }
2146 }
2147
2148 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2149 &child, &children) == 0) {
2150 for (c = 0; c < children; c++) {
2151 if ((ret = vdev_to_nvlist_iter(child[c], search,
2152 avail_spare, l2cache, NULL)) != NULL) {
2153 *l2cache = B_TRUE;
2154 return (ret);
2155 }
2156 }
2157 }
2158
2159 return (NULL);
2160 }
2161
2162 /*
2163 * Given a physical path (minus the "/devices" prefix), find the
2164 * associated vdev.
2165 */
2166 nvlist_t *
2167 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2168 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2169 {
2170 nvlist_t *search, *nvroot, *ret;
2171
2172 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2173 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2174
2175 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2176 &nvroot) == 0);
2177
2178 *avail_spare = B_FALSE;
2179 *l2cache = B_FALSE;
2180 if (log != NULL)
2181 *log = B_FALSE;
2182 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2183 nvlist_free(search);
2184
2185 return (ret);
2186 }
2187
2188 /*
2189 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2190 */
2191 static boolean_t
2192 zpool_vdev_is_interior(const char *name)
2193 {
2194 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2195 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2196 strncmp(name,
2197 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2198 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2199 return (B_TRUE);
2200 return (B_FALSE);
2201 }
2202
2203 nvlist_t *
2204 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2205 boolean_t *l2cache, boolean_t *log)
2206 {
2207 char buf[MAXPATHLEN];
2208 char *end;
2209 nvlist_t *nvroot, *search, *ret;
2210 uint64_t guid;
2211
2212 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2213
2214 guid = strtoull(path, &end, 10);
2215 if (guid != 0 && *end == '\0') {
2216 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2217 } else if (zpool_vdev_is_interior(path)) {
2218 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2219 } else if (path[0] != '/') {
2220 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT,
2221 path);
2222 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2223 } else {
2224 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2225 }
2226
2227 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2228 &nvroot) == 0);
2229
2230 *avail_spare = B_FALSE;
2231 *l2cache = B_FALSE;
2232 if (log != NULL)
2233 *log = B_FALSE;
2234 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2235 nvlist_free(search);
2236
2237 return (ret);
2238 }
2239
2240 static int
2241 vdev_online(nvlist_t *nv)
2242 {
2243 uint64_t ival;
2244
2245 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2246 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2247 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2248 return (0);
2249
2250 return (1);
2251 }
2252
2253 /*
2254 * Helper function for zpool_get_physpaths().
2432 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2433 vdev_state_t *newstate)
2434 {
2435 zfs_cmd_t zc = { 0 };
2436 char msg[1024];
2437 char *pathname;
2438 nvlist_t *tgt;
2439 boolean_t avail_spare, l2cache, islog;
2440 libzfs_handle_t *hdl = zhp->zpool_hdl;
2441
2442 if (flags & ZFS_ONLINE_EXPAND) {
2443 (void) snprintf(msg, sizeof (msg),
2444 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2445 } else {
2446 (void) snprintf(msg, sizeof (msg),
2447 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2448 }
2449
2450 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2451 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2452 &islog)) == NULL)
2453 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2454
2455 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2456
2457 if (avail_spare)
2458 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2459
2460 if ((flags & ZFS_ONLINE_EXPAND ||
2461 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2462 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2463 uint64_t wholedisk = 0;
2464
2465 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2466 &wholedisk);
2467
2468 /*
2469 * XXX - L2ARC 1.0 devices can't support expansion.
2470 */
2471 if (l2cache) {
2472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2473 "cannot expand cache devices"));
2474 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2475 }
2476
2477 if (wholedisk) {
2478 pathname += strlen(ZFS_DISK_ROOT) + 1;
2479 (void) zpool_relabel_disk(hdl, pathname);
2497 return (0);
2498 }
2499
2500 /*
2501 * Take the specified vdev offline
2502 */
2503 int
2504 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2505 {
2506 zfs_cmd_t zc = { 0 };
2507 char msg[1024];
2508 nvlist_t *tgt;
2509 boolean_t avail_spare, l2cache;
2510 libzfs_handle_t *hdl = zhp->zpool_hdl;
2511
2512 (void) snprintf(msg, sizeof (msg),
2513 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2514
2515 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2516 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2517 NULL)) == NULL)
2518 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2519
2520 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2521
2522 if (avail_spare)
2523 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2524
2525 zc.zc_cookie = VDEV_STATE_OFFLINE;
2526 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2527
2528 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2529 return (0);
2530
2531 switch (errno) {
2532 case EBUSY:
2533
2534 /*
2535 * There are no other replicas of this device.
2536 */
2537 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2647 int ret;
2648 nvlist_t *tgt;
2649 boolean_t avail_spare, l2cache, islog;
2650 uint64_t val;
2651 char *newname;
2652 nvlist_t **child;
2653 uint_t children;
2654 nvlist_t *config_root;
2655 libzfs_handle_t *hdl = zhp->zpool_hdl;
2656 boolean_t rootpool = zpool_is_bootable(zhp);
2657
2658 if (replacing)
2659 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2660 "cannot replace %s with %s"), old_disk, new_disk);
2661 else
2662 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2663 "cannot attach %s to %s"), new_disk, old_disk);
2664
2665 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2666 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2667 &islog)) == NULL)
2668 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2669
2670 if (avail_spare)
2671 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2672
2673 if (l2cache)
2674 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2675
2676 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2677 zc.zc_cookie = replacing;
2678
2679 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2680 &child, &children) != 0 || children != 1) {
2681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2682 "new device must be a single disk"));
2683 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2684 }
2685
2686 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2687 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2688
2689 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2690 return (-1);
2691
2692 /*
2693 * If the target is a hot spare that has been swapped in, we can only
2694 * replace it with another hot spare.
2695 */
2696 if (replacing &&
2697 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2698 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2699 NULL) == NULL || !avail_spare) &&
2700 is_replacing_spare(config_root, tgt, 1)) {
2701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2702 "can only be replaced by another hot spare"));
2703 free(newname);
2704 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2705 }
2706
2707 free(newname);
2708
2709 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2710 return (-1);
2711
2712 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2713
2714 zcmd_free_nvlists(&zc);
2715
2716 if (ret == 0) {
2717 if (rootpool) {
2718 /*
2719 * XXX need a better way to prevent user from
2746 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2747 "cannot replace a replacing device"));
2748 } else {
2749 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2750 "can only attach to mirrors and top-level "
2751 "disks"));
2752 }
2753 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2754 break;
2755
2756 case EINVAL:
2757 /*
2758 * The new device must be a single disk.
2759 */
2760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2761 "new device must be a single disk"));
2762 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2763 break;
2764
2765 case EBUSY:
2766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
2767 "or pool has removing/removed vdevs"),
2768 new_disk);
2769 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2770 break;
2771
2772 case EOVERFLOW:
2773 /*
2774 * The new device is too small.
2775 */
2776 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2777 "device is too small"));
2778 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2779 break;
2780
2781 case EDOM:
2782 /*
2783 * The new device has a different alignment requirement.
2784 */
2785 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2786 "devices have different sector alignment"));
2787 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2801 return (-1);
2802 }
2803
2804 /*
2805 * Detach the specified device.
2806 */
2807 int
2808 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2809 {
2810 zfs_cmd_t zc = { 0 };
2811 char msg[1024];
2812 nvlist_t *tgt;
2813 boolean_t avail_spare, l2cache;
2814 libzfs_handle_t *hdl = zhp->zpool_hdl;
2815
2816 (void) snprintf(msg, sizeof (msg),
2817 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2818
2819 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2820 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2821 NULL)) == NULL)
2822 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2823
2824 if (avail_spare)
2825 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2826
2827 if (l2cache)
2828 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2829
2830 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2831
2832 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2833 return (0);
2834
2835 switch (errno) {
2836
2837 case ENOTSUP:
2838 /*
2839 * Can't detach from this type of vdev.
2840 */
2841 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3090 free(varray);
3091 }
3092 zcmd_free_nvlists(&zc);
3093 nvlist_free(zc_props);
3094 nvlist_free(newconfig);
3095 if (freelist) {
3096 nvlist_free(*newroot);
3097 *newroot = NULL;
3098 }
3099
3100 if (retval != 0)
3101 return (retval);
3102
3103 if (memory_err)
3104 return (no_memory(hdl));
3105
3106 return (0);
3107 }
3108
3109 /*
3110 * Remove the given device.
3111 */
3112 int
3113 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3114 {
3115 zfs_cmd_t zc = { 0 };
3116 char msg[1024];
3117 nvlist_t *tgt;
3118 boolean_t avail_spare, l2cache, islog;
3119 libzfs_handle_t *hdl = zhp->zpool_hdl;
3120 uint64_t version;
3121
3122 (void) snprintf(msg, sizeof (msg),
3123 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3124
3125 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3126 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3127 &islog)) == NULL)
3128 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3129
3130 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3131 if (islog && version < SPA_VERSION_HOLES) {
3132 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3133 "pool must be upgraded to support log removal"));
3134 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3135 }
3136
3137 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3139 "root pool can not have removed devices, "
3140 "because GRUB does not understand them"));
3141 return (zfs_error(hdl, EINVAL, msg));
3142 }
3143
3144 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3145
3146 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3147 return (0);
3148
3149 switch (errno) {
3150
3151 case EINVAL:
3152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3153 "invalid config; all top-level vdevs must "
3154 "have the same sector size and not be raidz."));
3155 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3156 break;
3157
3158 case EBUSY:
3159 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3160 "Pool busy; removal may already be in progress"));
3161 (void) zfs_error(hdl, EZFS_BUSY, msg);
3162 break;
3163
3164 default:
3165 (void) zpool_standard_error(hdl, errno, msg);
3166 }
3167 return (-1);
3168 }
3169
3170 int
3171 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3172 {
3173 zfs_cmd_t zc = { 0 };
3174 char msg[1024];
3175 libzfs_handle_t *hdl = zhp->zpool_hdl;
3176
3177 (void) snprintf(msg, sizeof (msg),
3178 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3179
3180 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3181 zc.zc_cookie = 1;
3182
3183 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3184 return (0);
3185
3186 return (zpool_standard_error(hdl, errno, msg));
3187 }
3188
3189 int
3190 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3191 uint64_t *sizep)
3192 {
3193 char msg[1024];
3194 nvlist_t *tgt;
3195 boolean_t avail_spare, l2cache, islog;
3196 libzfs_handle_t *hdl = zhp->zpool_hdl;
3197
3198 (void) snprintf(msg, sizeof (msg),
3199 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3200 path);
3201
3202 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3203 &islog)) == NULL)
3204 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3205
3206 if (avail_spare || l2cache || islog) {
3207 *sizep = 0;
3208 return (0);
3209 }
3210
3211 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3213 "indirect size not available"));
3214 return (zfs_error(hdl, EINVAL, msg));
3215 }
3216 return (0);
3217 }
3218
3219 /*
3220 * Clear the errors for the pool, or the particular device if specified.
3221 */
3222 int
3223 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3224 {
3225 zfs_cmd_t zc = { 0 };
3226 char msg[1024];
3227 nvlist_t *tgt;
3228 zpool_rewind_policy_t policy;
3229 boolean_t avail_spare, l2cache;
3230 libzfs_handle_t *hdl = zhp->zpool_hdl;
3231 nvlist_t *nvi = NULL;
3232 int error;
3233
3234 if (path)
3235 (void) snprintf(msg, sizeof (msg),
3236 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3237 path);
3238 else
3239 (void) snprintf(msg, sizeof (msg),
3240 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3241 zhp->zpool_name);
3242
3243 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3244 if (path) {
3245 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3246 &l2cache, NULL)) == NULL)
3247 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3248
3249 /*
3250 * Don't allow error clearing for hot spares. Do allow
3251 * error clearing for l2cache devices.
3252 */
3253 if (avail_spare)
3254 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3255
3256 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3257 &zc.zc_guid) == 0);
3258 }
3259
3260 zpool_get_rewind_policy(rewindnvl, &policy);
3261 zc.zc_cookie = policy.zrp_request;
3262
3263 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3264 return (-1);
3265
3266 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3382 /*
3383 * In a case the strdup() fails, we will just return NULL below.
3384 */
3385 path = strdup(list[0].devname);
3386
3387 devid_free_nmlist(list);
3388
3389 return (path);
3390 }
3391
3392 /*
3393 * Convert from a path to a devid string.
3394 */
3395 static char *
3396 path_to_devid(const char *path)
3397 {
3398 int fd;
3399 ddi_devid_t devid;
3400 char *minor, *ret;
3401
3402 if ((fd = open(path, O_RDONLY)) < 0)
3403 return (NULL);
3404
3405 minor = NULL;
3406 ret = NULL;
3407 if (devid_get(fd, &devid) == 0) {
3408 if (devid_get_minor_name(fd, &minor) == 0)
3409 ret = devid_str_encode(devid, minor);
3410 if (minor != NULL)
3411 devid_str_free(minor);
3412 devid_free(devid);
3413 }
3414 (void) close(fd);
3415
3416 return (ret);
3417 }
3418
3419 /*
3420 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3421 * ignore any failure here, since a common case is for an unprivileged user to
3422 * type 'zpool status', and we'll display the correct information anyway.
3699 }
3700 }
3701
3702 int
3703 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3704 {
3705 zfs_cmd_t zc = { 0 };
3706 nvlist_t *args;
3707 int err;
3708
3709 args = fnvlist_alloc();
3710 fnvlist_add_string(args, "message", message);
3711 err = zcmd_write_src_nvlist(hdl, &zc, args);
3712 if (err == 0)
3713 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3714 nvlist_free(args);
3715 zcmd_free_nvlists(&zc);
3716 return (err);
3717 }
3718
3719 /*
3720 * Perform ioctl to get some command history of a pool.
3721 *
3722 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3723 * logical offset of the history buffer to start reading from.
3724 *
3725 * Upon return, 'off' is the next logical offset to read from and
3726 * 'len' is the actual amount of bytes read into 'buf'.
3727 */
3728 static int
3729 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3730 {
3731 zfs_cmd_t zc = { 0 };
3732 libzfs_handle_t *hdl = zhp->zpool_hdl;
3733
3734 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3735
3736 zc.zc_history = (uint64_t)(uintptr_t)buf;
3737 zc.zc_history_len = *len;
3738 zc.zc_history_offset = *off;
4269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4270 "could not obtain vdev configuration for '%s'"), poolname);
4271 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4272 goto out;
4273 }
4274
4275 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4276 &top, &toplevels) == 0);
4277
4278 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4279 goto out;
4280 }
4281 ret = 0;
4282
4283 out:
4284 if (zhp)
4285 zpool_close(zhp);
4286 libzfs_fini(hdl);
4287 return (ret);
4288 }
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright 2017 Nexenta Systems, Inc.
28 * Copyright (c) 2017 Datto Inc.
29 */
30
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <fcntl.h>
35 #include <libintl.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <strings.h>
39 #include <unistd.h>
40 #include <libgen.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <dlfcn.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
51
52 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *);
53
54 #define BACKUP_SLICE "s2"
55
56 typedef struct prop_flags {
57 int create:1; /* Validate property on creation */
58 int import:1; /* Validate property on import */
59 } prop_flags_t;
60
61 /*
62 * ====================================================================
63 * zpool property functions
64 * ====================================================================
65 */
66
67 static int
68 zpool_get_all_props(zpool_handle_t *zhp)
69 {
70 zfs_cmd_t zc = { 0 };
71 libzfs_handle_t *hdl = zhp->zpool_hdl;
72
406 }
407
408 boolean_t
409 zpool_is_bootable(zpool_handle_t *zhp)
410 {
411 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
412
413 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
414 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
415 sizeof (bootfs)) != 0);
416 }
417
418
419 /*
420 * Given an nvlist of zpool properties to be set, validate that they are
421 * correct, and parse any numeric properties (index, boolean, etc) if they are
422 * specified as strings.
423 */
424 static nvlist_t *
425 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
426 nvlist_t *props, uint64_t version, prop_flags_t flags, const char *errbuf)
427 {
428 nvpair_t *elem;
429 nvlist_t *retprops;
430 zpool_prop_t prop;
431 char *strval;
432 uint64_t intval;
433 char *slash, *check;
434 struct stat64 statbuf;
435 zpool_handle_t *zhp;
436
437 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
438 (void) no_memory(hdl);
439 return (NULL);
440 }
441
442 elem = NULL;
443 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
444 const char *propname = nvpair_name(elem);
445
446 prop = zpool_name_to_prop(propname);
447 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
448 int err;
449 char *fname = strchr(propname, '@') + 1;
450
451 err = zfeature_lookup_name(fname, NULL);
452 if (err != 0) {
453 ASSERT3U(err, ==, ENOENT);
454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
455 "invalid feature '%s'"), fname);
456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 goto error;
458 }
459
460 if (nvpair_type(elem) != DATA_TYPE_STRING) {
461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
462 "'%s' must be a string"), propname);
463 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
464 goto error;
465 }
466
467 (void) nvpair_value_string(elem, &strval);
468 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
470 "property '%s' can only be set to "
471 "'enabled'"), propname);
472 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
473 goto error;
474 }
475
476 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
477 (void) no_memory(hdl);
478 goto error;
479 }
480 continue;
481 }
482
483 /*
484 * Make sure this property is valid and applies to this type.
485 */
486 if (prop == ZPROP_INVAL) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "invalid property '%s'"), propname);
489 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
490 goto error;
491 }
492
493 if (zpool_prop_readonly(prop)) {
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
495 "is readonly"), propname);
496 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
497 goto error;
498 }
499
500 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
501 &strval, &intval, errbuf) != 0)
502 goto error;
503
504 /*
505 * Perform additional checking for specific properties.
506 */
706 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
707
708 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
709 nvlist_free(nvl);
710 return (-1);
711 }
712
713 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
714
715 zcmd_free_nvlists(&zc);
716 nvlist_free(nvl);
717
718 if (ret)
719 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
720 else
721 (void) zpool_props_refresh(zhp);
722
723 return (ret);
724 }
725
726 /*
727 * Set zpool properties nvlist
728 */
729 int
730 zpool_set_proplist(zpool_handle_t *zhp, nvlist_t *nvl)
731 {
732 zfs_cmd_t zc = { 0 };
733 int ret = -1;
734 char errbuf[1024];
735 nvlist_t *realprops;
736 uint64_t version;
737 prop_flags_t flags = { 0 };
738
739 assert(nvl != NULL);
740
741 (void) snprintf(errbuf, sizeof (errbuf),
742 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
743 zhp->zpool_name);
744
745 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
746 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
747 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
748 nvlist_free(nvl);
749 return (-1);
750 }
751
752 nvlist_free(nvl);
753 nvl = realprops;
754
755 /*
756 * Execute the corresponding ioctl() to set this property.
757 */
758 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
759
760 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
761 nvlist_free(nvl);
762 return (-1);
763 }
764
765 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
766
767 zcmd_free_nvlists(&zc);
768 nvlist_free(nvl);
769
770 if (ret)
771 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
772 else
773 (void) zpool_props_refresh(zhp);
774
775 return (ret);
776 }
777
778 int
779 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
780 {
781 libzfs_handle_t *hdl = zhp->zpool_hdl;
782 zprop_list_t *entry;
783 char buf[ZFS_MAXPROPLEN];
784 nvlist_t *features = NULL;
785 zprop_list_t **last;
786 boolean_t firstexpand = (NULL == *plp);
787
788 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
789 return (-1);
790
791 last = plp;
792 while (*last != NULL)
793 last = &(*last)->pl_next;
794
795 if ((*plp)->pl_all)
796 features = zpool_get_features(zhp);
797
798 if ((*plp)->pl_all && firstexpand) {
1151 int
1152 zpool_get_state(zpool_handle_t *zhp)
1153 {
1154 return (zhp->zpool_state);
1155 }
1156
1157 /*
1158 * Create the named pool, using the provided vdev list. It is assumed
1159 * that the consumer has already validated the contents of the nvlist, so we
1160 * don't have to worry about error semantics.
1161 */
1162 int
1163 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1164 nvlist_t *props, nvlist_t *fsprops)
1165 {
1166 zfs_cmd_t zc = { 0 };
1167 nvlist_t *zc_fsprops = NULL;
1168 nvlist_t *zc_props = NULL;
1169 char msg[1024];
1170 int ret = -1;
1171 boolean_t wbc_mode_prop_exists = B_FALSE;
1172
1173 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1174 "cannot create '%s'"), pool);
1175
1176 if (!zpool_name_valid(hdl, B_FALSE, pool))
1177 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1178
1179 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1180 return (-1);
1181
1182 if (props) {
1183 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1184
1185 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1186 SPA_VERSION_1, flags, msg)) == NULL) {
1187 goto create_failed;
1188 }
1189 }
1190
1191 if (fsprops) {
1192 uint64_t zoned;
1193 char *zonestr;
1194
1195 zoned = ((nvlist_lookup_string(fsprops,
1196 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1197 strcmp(zonestr, "on") == 0);
1198
1199 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1200 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1201 goto create_failed;
1202 }
1203
1204 if (nvlist_exists(zc_fsprops,
1205 zfs_prop_to_name(ZFS_PROP_WBC_MODE))) {
1206 wbc_mode_prop_exists = B_TRUE;
1207 }
1208
1209 if (!zc_props &&
1210 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1211 goto create_failed;
1212 }
1213 if (nvlist_add_nvlist(zc_props,
1214 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1215 goto create_failed;
1216 }
1217 }
1218
1219 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1220 goto create_failed;
1221
1222 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1223
1224 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1225
1226 zcmd_free_nvlists(&zc);
1227 nvlist_free(zc_props);
1228 nvlist_free(zc_fsprops);
1265 char buf[64];
1266
1267 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1268
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1270 "one or more devices is less than the "
1271 "minimum size (%s)"), buf);
1272 }
1273 return (zfs_error(hdl, EZFS_BADDEV, msg));
1274
1275 case ENOSPC:
1276 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1277 "one or more devices is out of space"));
1278 return (zfs_error(hdl, EZFS_BADDEV, msg));
1279
1280 case ENOTBLK:
1281 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1282 "cache device must be a disk or disk slice"));
1283 return (zfs_error(hdl, EZFS_BADDEV, msg));
1284
1285 case EALREADY:
1286 if (wbc_mode_prop_exists) {
1287 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1288 "WBC is already in the OFF state"));
1289 return (zfs_error(hdl, EZFS_WBCALREADY, msg));
1290 }
1291
1292 default:
1293 return (zpool_standard_error(hdl, errno, msg));
1294 }
1295 }
1296
1297 create_failed:
1298 zcmd_free_nvlists(&zc);
1299 nvlist_free(zc_props);
1300 nvlist_free(zc_fsprops);
1301 return (ret);
1302 }
1303
1304 /*
1305 * Destroy the given pool. It is up to the caller to ensure that there are no
1306 * datasets left in the pool.
1307 */
1308 int
1309 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1310 {
1311 zfs_cmd_t zc = { 0 };
1381 }
1382
1383 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1384 return (-1);
1385 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1386
1387 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1388 switch (errno) {
1389 case EBUSY:
1390 /*
1391 * This can happen if the user has specified the same
1392 * device multiple times. We can't reliably detect this
1393 * until we try to add it and see we already have a
1394 * label.
1395 */
1396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1397 "one or more vdevs refer to the same device"));
1398 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1399 break;
1400
1401 case EOVERFLOW:
1402 /*
1403 * This occurrs when one of the devices is below
1404 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1405 * device was the problem device since there's no
1406 * reliable way to determine device size from userland.
1407 */
1408 {
1409 char buf[64];
1410
1411 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1412
1413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1414 "device is less than the minimum "
1415 "size (%s)"), buf);
1416 }
1417 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1418 break;
1419
1420 case ENOTSUP:
1439 default:
1440 (void) zpool_standard_error(hdl, errno, msg);
1441 }
1442
1443 ret = -1;
1444 } else {
1445 ret = 0;
1446 }
1447
1448 zcmd_free_nvlists(&zc);
1449
1450 return (ret);
1451 }
1452
1453 /*
1454 * Exports the pool from the system. The caller must ensure that there are no
1455 * mounted datasets in the pool.
1456 */
1457 static int
1458 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1459 boolean_t saveconfig, const char *log_str)
1460 {
1461 zfs_cmd_t zc = { 0 };
1462 char msg[1024];
1463
1464 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1465 "cannot export '%s'"), zhp->zpool_name);
1466
1467 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1468 zc.zc_cookie = force;
1469 zc.zc_guid = hardforce;
1470 zc.zc_obj = saveconfig;
1471 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1472
1473 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1474 switch (errno) {
1475 case EXDEV:
1476 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1477 "use '-f' to override the following errors:\n"
1478 "'%s' has an active shared spare which could be"
1479 " used by other pools once '%s' is exported."),
1480 zhp->zpool_name, zhp->zpool_name);
1481 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1482 msg));
1483 default:
1484 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1485 msg));
1486 }
1487 }
1488
1489 return (0);
1490 }
1491
1492 int
1493 zpool_export(zpool_handle_t *zhp, boolean_t force, boolean_t saveconfig,
1494 const char *log_str)
1495 {
1496 return (zpool_export_common(zhp, force, B_FALSE, saveconfig, log_str));
1497 }
1498
1499 int
1500 zpool_export_force(zpool_handle_t *zhp, boolean_t saveconfig,
1501 const char *log_str)
1502 {
1503 return (zpool_export_common(zhp, B_TRUE, B_TRUE, saveconfig, log_str));
1504 }
1505
1506 static void
1507 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1508 nvlist_t *config)
1509 {
1510 nvlist_t *nv = NULL;
1511 uint64_t rewindto;
1512 int64_t loss = -1;
1513 struct tm t;
1514 char timestr[128];
1515
1516 if (!hdl->libzfs_printerr || config == NULL)
1517 return;
1518
1519 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1520 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1521 return;
1522 }
1523
1852 */
1853 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1854 break;
1855
1856 case EINVAL:
1857 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1858 break;
1859
1860 case EROFS:
1861 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1862 "one or more devices is read only"));
1863 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1864 break;
1865
1866 case ENXIO:
1867 if (nv && nvlist_lookup_nvlist(nv,
1868 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1869 nvlist_lookup_nvlist(nvinfo,
1870 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1871 (void) printf(dgettext(TEXT_DOMAIN,
1872 "The devices below are missing, use "
1873 "'-m' to import the pool anyway:\n"));
1874 print_vdev_tree(hdl, NULL, missing, 2);
1875 (void) printf("\n");
1876 }
1877 (void) zpool_standard_error(hdl, error, desc);
1878 break;
1879
1880 case EEXIST:
1881 (void) zpool_standard_error(hdl, error, desc);
1882 break;
1883 case ENAMETOOLONG:
1884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1885 "new name of at least one dataset is longer than "
1886 "the maximum allowable length"));
1887 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1888 break;
1889 default:
1890 (void) zpool_standard_error(hdl, error, desc);
1891 zpool_explain_recover(hdl,
1892 newname ? origname : thename, -error, nv);
1893 break;
1970 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1971 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1972 (void) nvlist_lookup_uint64_array(nvroot,
1973 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1974 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
1975 if (cmd == POOL_SCRUB_PAUSE)
1976 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
1977 else
1978 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1979 } else {
1980 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1981 }
1982 } else if (err == ENOENT) {
1983 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1984 } else {
1985 return (zpool_standard_error(hdl, err, msg));
1986 }
1987 }
1988
1989 /*
1990 * Trim the pool.
1991 */
1992 int
1993 zpool_trim(zpool_handle_t *zhp, boolean_t start, uint64_t rate)
1994 {
1995 zfs_cmd_t zc = { 0 };
1996 char msg[1024];
1997 libzfs_handle_t *hdl = zhp->zpool_hdl;
1998 trim_cmd_info_t tci = { .tci_start = start, .tci_rate = rate };
1999
2000 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2001 zc.zc_cookie = (uintptr_t)&tci;
2002
2003 if (zfs_ioctl(hdl, ZFS_IOC_POOL_TRIM, &zc) == 0)
2004 return (0);
2005
2006 return (zpool_standard_error(hdl, errno, msg));
2007 }
2008
2009 /*
2010 * This provides a very minimal check whether a given string is likely a
2011 * c#t#d# style string. Users of this are expected to do their own
2012 * verification of the s# part.
2013 */
2014 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
2015
2016 /*
2017 * More elaborate version for ones which may start with "/dev/dsk/"
2018 * and the like.
2019 */
2020 static int
2021 ctd_check_path(char *str)
2022 {
2023 /*
2024 * If it starts with a slash, check the last component.
2025 */
2026 if (str && str[0] == '/') {
2027 char *tmp = strrchr(str, '/');
2028
2029 /*
2030 * If it ends in "/old", check the second-to-last
2031 * component of the string instead.
2032 */
2033 if (tmp != str && strcmp(tmp, "/old") == 0) {
2034 for (tmp--; *tmp != '/'; tmp--)
2035 ;
2036 }
2037 str = tmp + 1;
2038 }
2039 return (CTD_CHECK(str));
2040 }
2041
2042 /*
2043 * Find a vdev that matches the search criteria specified. We use the
2044 * the nvpair name to determine how we should look for the device.
2045 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2046 * spare; but FALSE if its an INUSE spare.
2047 */
2048 static nvlist_t *
2049 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2050 boolean_t *l2cache, boolean_t *log, boolean_t *special)
2051 {
2052 uint_t c, children;
2053 nvlist_t **child;
2054 nvlist_t *ret;
2055 uint64_t is_log, is_special;
2056 char *srchkey;
2057 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2058
2059 /* Nothing to look for */
2060 if (search == NULL || pair == NULL)
2061 return (NULL);
2062
2063 /* Obtain the key we will use to search */
2064 srchkey = nvpair_name(pair);
2065
2066 switch (nvpair_type(pair)) {
2067 case DATA_TYPE_UINT64:
2068 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2069 uint64_t srchval, theguid;
2070
2071 verify(nvpair_value_uint64(pair, &srchval) == 0);
2072 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2073 &theguid) == 0);
2074 if (theguid == srchval)
2075 return (nv);
2144 * vdev id pair (i.e. mirror-4).
2145 */
2146 if ((type = strdup(srchval)) == NULL)
2147 return (NULL);
2148
2149 if ((p = strrchr(type, '-')) == NULL) {
2150 free(type);
2151 break;
2152 }
2153 idx = p + 1;
2154 *p = '\0';
2155
2156 /*
2157 * If the types don't match then keep looking.
2158 */
2159 if (strncmp(val, type, strlen(val)) != 0) {
2160 free(type);
2161 break;
2162 }
2163
2164 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2165 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2166 strncmp(type, VDEV_TYPE_MIRROR,
2167 strlen(VDEV_TYPE_MIRROR)) == 0);
2168 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2169 &id) == 0);
2170
2171 errno = 0;
2172 vdev_id = strtoull(idx, &end, 10);
2173
2174 free(type);
2175 if (errno != 0)
2176 return (NULL);
2177
2178 /*
2179 * Now verify that we have the correct vdev id.
2180 */
2181 if (vdev_id == id)
2182 return (nv);
2183 }
2184
2185 /*
2186 * Common case
2187 */
2188 if (strcmp(srchval, val) == 0)
2189 return (nv);
2190 break;
2191 }
2192
2193 default:
2194 break;
2195 }
2196
2197 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2198 &child, &children) != 0)
2199 return (NULL);
2200
2201 for (c = 0; c < children; c++) {
2202 if ((ret = vdev_to_nvlist_iter(child[c], search,
2203 avail_spare, l2cache, NULL, NULL)) != NULL) {
2204 /*
2205 * The 'is_log' value is only set for the toplevel
2206 * vdev, not the leaf vdevs. So we always lookup the
2207 * log device from the root of the vdev tree (where
2208 * 'log' is non-NULL).
2209 */
2210 if (log != NULL &&
2211 nvlist_lookup_uint64(child[c],
2212 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2213 is_log) {
2214 *log = B_TRUE;
2215 }
2216
2217 if (special != NULL &&
2218 nvlist_lookup_uint64(child[c],
2219 ZPOOL_CONFIG_IS_SPECIAL, &is_special) == 0 &&
2220 is_special) {
2221 *special = B_TRUE;
2222 }
2223
2224 return (ret);
2225 }
2226 }
2227
2228 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2229 &child, &children) == 0) {
2230 for (c = 0; c < children; c++) {
2231 if ((ret = vdev_to_nvlist_iter(child[c], search,
2232 avail_spare, l2cache, NULL, NULL)) != NULL) {
2233 *avail_spare = B_TRUE;
2234 return (ret);
2235 }
2236 }
2237 }
2238
2239 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2240 &child, &children) == 0) {
2241 for (c = 0; c < children; c++) {
2242 if ((ret = vdev_to_nvlist_iter(child[c], search,
2243 avail_spare, l2cache, NULL, NULL)) != NULL) {
2244 *l2cache = B_TRUE;
2245 return (ret);
2246 }
2247 }
2248 }
2249
2250 return (NULL);
2251 }
2252
2253 /*
2254 * Given a physical path (minus the "/devices" prefix), find the
2255 * associated vdev.
2256 */
2257 nvlist_t *
2258 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2259 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2260 {
2261 nvlist_t *search, *nvroot, *ret;
2262
2263 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2264 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2265
2266 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2267 &nvroot) == 0);
2268
2269 *avail_spare = B_FALSE;
2270 *l2cache = B_FALSE;
2271 if (log != NULL)
2272 *log = B_FALSE;
2273 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
2274 NULL);
2275 nvlist_free(search);
2276
2277 return (ret);
2278 }
2279
2280 /*
2281 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2282 */
2283 boolean_t
2284 zpool_vdev_is_interior(const char *name)
2285 {
2286 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2287 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2288 return (B_TRUE);
2289 return (B_FALSE);
2290 }
2291
2292 nvlist_t *
2293 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2294 boolean_t *l2cache, boolean_t *log, boolean_t *special)
2295 {
2296 char buf[MAXPATHLEN];
2297 char *end;
2298 nvlist_t *nvroot, *search, *ret;
2299 uint64_t guid;
2300
2301 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2302
2303 guid = strtoull(path, &end, 10);
2304 if (guid != 0 && *end == '\0') {
2305 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2306 } else if (zpool_vdev_is_interior(path)) {
2307 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2308 } else if (path[0] != '/') {
2309 (void) snprintf(buf, sizeof (buf), "%s/%s", ZFS_DISK_ROOT,
2310 path);
2311 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2312 } else {
2313 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2314 }
2315
2316 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2317 &nvroot) == 0);
2318
2319 *avail_spare = B_FALSE;
2320 *l2cache = B_FALSE;
2321 if (log != NULL)
2322 *log = B_FALSE;
2323
2324 if (special != NULL)
2325 *special = B_FALSE;
2326
2327 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
2328 special);
2329 nvlist_free(search);
2330
2331 return (ret);
2332 }
2333
2334 static int
2335 vdev_online(nvlist_t *nv)
2336 {
2337 uint64_t ival;
2338
2339 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2340 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2341 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2342 return (0);
2343
2344 return (1);
2345 }
2346
2347 /*
2348 * Helper function for zpool_get_physpaths().
2526 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2527 vdev_state_t *newstate)
2528 {
2529 zfs_cmd_t zc = { 0 };
2530 char msg[1024];
2531 char *pathname;
2532 nvlist_t *tgt;
2533 boolean_t avail_spare, l2cache, islog;
2534 libzfs_handle_t *hdl = zhp->zpool_hdl;
2535
2536 if (flags & ZFS_ONLINE_EXPAND) {
2537 (void) snprintf(msg, sizeof (msg),
2538 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2539 } else {
2540 (void) snprintf(msg, sizeof (msg),
2541 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2542 }
2543
2544 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2545 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2546 &islog, NULL)) == NULL)
2547 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2548
2549 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2550
2551 if ((flags & ZFS_ONLINE_EXPAND ||
2552 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2553 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2554 uint64_t wholedisk = 0;
2555
2556 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2557 &wholedisk);
2558
2559 /*
2560 * XXX - L2ARC 1.0 devices can't support expansion.
2561 */
2562 if (l2cache) {
2563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2564 "cannot expand cache devices"));
2565 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2566 }
2567
2568 if (wholedisk) {
2569 pathname += strlen(ZFS_DISK_ROOT) + 1;
2570 (void) zpool_relabel_disk(hdl, pathname);
2588 return (0);
2589 }
2590
2591 /*
2592 * Take the specified vdev offline
2593 */
2594 int
2595 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2596 {
2597 zfs_cmd_t zc = { 0 };
2598 char msg[1024];
2599 nvlist_t *tgt;
2600 boolean_t avail_spare, l2cache;
2601 libzfs_handle_t *hdl = zhp->zpool_hdl;
2602
2603 (void) snprintf(msg, sizeof (msg),
2604 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2605
2606 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2607 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2608 NULL, NULL)) == NULL)
2609 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2610
2611 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2612
2613 if (avail_spare)
2614 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2615
2616 zc.zc_cookie = VDEV_STATE_OFFLINE;
2617 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2618
2619 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2620 return (0);
2621
2622 switch (errno) {
2623 case EBUSY:
2624
2625 /*
2626 * There are no other replicas of this device.
2627 */
2628 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2738 int ret;
2739 nvlist_t *tgt;
2740 boolean_t avail_spare, l2cache, islog;
2741 uint64_t val;
2742 char *newname;
2743 nvlist_t **child;
2744 uint_t children;
2745 nvlist_t *config_root;
2746 libzfs_handle_t *hdl = zhp->zpool_hdl;
2747 boolean_t rootpool = zpool_is_bootable(zhp);
2748
2749 if (replacing)
2750 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2751 "cannot replace %s with %s"), old_disk, new_disk);
2752 else
2753 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2754 "cannot attach %s to %s"), new_disk, old_disk);
2755
2756 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2757 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2758 &islog, NULL)) == 0)
2759 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2760
2761 if (avail_spare)
2762 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2763
2764 if (l2cache)
2765 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2766
2767 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2768 zc.zc_cookie = replacing;
2769
2770 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2771 &child, &children) != 0 || children != 1) {
2772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2773 "new device must be a single disk"));
2774 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2775 }
2776
2777 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2778 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2779
2780 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2781 return (-1);
2782
2783 /*
2784 * If the target is a hot spare that has been swapped in, we can only
2785 * replace it with another hot spare.
2786 */
2787 if (replacing &&
2788 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2789 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2790 NULL, NULL) == NULL || !avail_spare) &&
2791 is_replacing_spare(config_root, tgt, 1)) {
2792 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2793 "can only be replaced by another hot spare"));
2794 free(newname);
2795 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2796 }
2797
2798 free(newname);
2799
2800 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2801 return (-1);
2802
2803 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2804
2805 zcmd_free_nvlists(&zc);
2806
2807 if (ret == 0) {
2808 if (rootpool) {
2809 /*
2810 * XXX need a better way to prevent user from
2837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2838 "cannot replace a replacing device"));
2839 } else {
2840 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2841 "can only attach to mirrors and top-level "
2842 "disks"));
2843 }
2844 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2845 break;
2846
2847 case EINVAL:
2848 /*
2849 * The new device must be a single disk.
2850 */
2851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2852 "new device must be a single disk"));
2853 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2854 break;
2855
2856 case EBUSY:
2857 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2858 new_disk);
2859 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2860 break;
2861
2862 case EOVERFLOW:
2863 /*
2864 * The new device is too small.
2865 */
2866 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2867 "device is too small"));
2868 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2869 break;
2870
2871 case EDOM:
2872 /*
2873 * The new device has a different alignment requirement.
2874 */
2875 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2876 "devices have different sector alignment"));
2877 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2891 return (-1);
2892 }
2893
2894 /*
2895 * Detach the specified device.
2896 */
2897 int
2898 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2899 {
2900 zfs_cmd_t zc = { 0 };
2901 char msg[1024];
2902 nvlist_t *tgt;
2903 boolean_t avail_spare, l2cache;
2904 libzfs_handle_t *hdl = zhp->zpool_hdl;
2905
2906 (void) snprintf(msg, sizeof (msg),
2907 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2908
2909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2910 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2911 NULL, NULL)) == 0)
2912 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2913
2914 if (avail_spare)
2915 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2916
2917 if (l2cache)
2918 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2919
2920 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2921
2922 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2923 return (0);
2924
2925 switch (errno) {
2926
2927 case ENOTSUP:
2928 /*
2929 * Can't detach from this type of vdev.
2930 */
2931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3180 free(varray);
3181 }
3182 zcmd_free_nvlists(&zc);
3183 nvlist_free(zc_props);
3184 nvlist_free(newconfig);
3185 if (freelist) {
3186 nvlist_free(*newroot);
3187 *newroot = NULL;
3188 }
3189
3190 if (retval != 0)
3191 return (retval);
3192
3193 if (memory_err)
3194 return (no_memory(hdl));
3195
3196 return (0);
3197 }
3198
3199 /*
3200 * Remove the given device. Currently, this is supported only for hot spares
3201 * and level 2 cache devices.
3202 */
3203 int
3204 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3205 {
3206 zfs_cmd_t zc = { 0 };
3207 char msg[1024];
3208 nvlist_t *tgt;
3209 boolean_t avail_spare, l2cache, islog, isspecial;
3210 libzfs_handle_t *hdl = zhp->zpool_hdl;
3211 uint64_t version;
3212
3213 (void) snprintf(msg, sizeof (msg),
3214 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3215
3216 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3217 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3218 &islog, &isspecial)) == 0)
3219 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3220 /*
3221 * XXX - this should just go away.
3222 */
3223 if (!avail_spare && !l2cache && !islog && !isspecial) {
3224 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3225 "only inactive hot spares, cache, top-level, "
3226 "log, or special devices can be removed"));
3227 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3228 }
3229
3230 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3231 if (islog && version < SPA_VERSION_HOLES) {
3232 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3233 "pool must be upgrade to support log removal"));
3234 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3235 }
3236
3237 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3238
3239 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3240 return (0);
3241
3242 if (isspecial && errno == EEXIST) {
3243 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3244 "special device contains metadata"));
3245 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
3246 } else if (isspecial && errno == EBUSY) {
3247 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3248 "wbc feature flag is active"));
3249 return (zfs_error(hdl, EZFS_WBCCHILD, msg));
3250 }
3251
3252 return (zpool_standard_error(hdl, errno, msg));
3253 }
3254
3255 /*
3256 * Clear the errors for the pool, or the particular device if specified.
3257 */
3258 int
3259 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3260 {
3261 zfs_cmd_t zc = { 0 };
3262 char msg[1024];
3263 nvlist_t *tgt;
3264 zpool_rewind_policy_t policy;
3265 boolean_t avail_spare, l2cache;
3266 libzfs_handle_t *hdl = zhp->zpool_hdl;
3267 nvlist_t *nvi = NULL;
3268 int error;
3269
3270 if (path)
3271 (void) snprintf(msg, sizeof (msg),
3272 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3273 path);
3274 else
3275 (void) snprintf(msg, sizeof (msg),
3276 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3277 zhp->zpool_name);
3278
3279 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3280 if (path) {
3281 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3282 &l2cache, NULL, NULL)) == 0)
3283 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3284
3285 /*
3286 * Don't allow error clearing for hot spares. Do allow
3287 * error clearing for l2cache devices.
3288 */
3289 if (avail_spare)
3290 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3291
3292 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3293 &zc.zc_guid) == 0);
3294 }
3295
3296 zpool_get_rewind_policy(rewindnvl, &policy);
3297 zc.zc_cookie = policy.zrp_request;
3298
3299 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3300 return (-1);
3301
3302 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3418 /*
3419 * In a case the strdup() fails, we will just return NULL below.
3420 */
3421 path = strdup(list[0].devname);
3422
3423 devid_free_nmlist(list);
3424
3425 return (path);
3426 }
3427
3428 /*
3429 * Convert from a path to a devid string.
3430 */
3431 static char *
3432 path_to_devid(const char *path)
3433 {
3434 int fd;
3435 ddi_devid_t devid;
3436 char *minor, *ret;
3437
3438 if ((fd = open(path, O_RDONLY | O_NDELAY)) < 0)
3439 return (NULL);
3440
3441 minor = NULL;
3442 ret = NULL;
3443 if (devid_get(fd, &devid) == 0) {
3444 if (devid_get_minor_name(fd, &minor) == 0)
3445 ret = devid_str_encode(devid, minor);
3446 if (minor != NULL)
3447 devid_str_free(minor);
3448 devid_free(devid);
3449 }
3450 (void) close(fd);
3451
3452 return (ret);
3453 }
3454
3455 /*
3456 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3457 * ignore any failure here, since a common case is for an unprivileged user to
3458 * type 'zpool status', and we'll display the correct information anyway.
3735 }
3736 }
3737
3738 int
3739 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3740 {
3741 zfs_cmd_t zc = { 0 };
3742 nvlist_t *args;
3743 int err;
3744
3745 args = fnvlist_alloc();
3746 fnvlist_add_string(args, "message", message);
3747 err = zcmd_write_src_nvlist(hdl, &zc, args);
3748 if (err == 0)
3749 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3750 nvlist_free(args);
3751 zcmd_free_nvlists(&zc);
3752 return (err);
3753 }
3754
3755 int
3756 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3757 {
3758 if (history_str == NULL)
3759 return (EINVAL);
3760
3761 if (hdl->libzfs_log_str != NULL)
3762 free(hdl->libzfs_log_str);
3763
3764 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3765 return (no_memory(hdl));
3766
3767 return (0);
3768 }
3769
3770 /*
3771 * Perform ioctl to get some command history of a pool.
3772 *
3773 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3774 * logical offset of the history buffer to start reading from.
3775 *
3776 * Upon return, 'off' is the next logical offset to read from and
3777 * 'len' is the actual amount of bytes read into 'buf'.
3778 */
3779 static int
3780 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3781 {
3782 zfs_cmd_t zc = { 0 };
3783 libzfs_handle_t *hdl = zhp->zpool_hdl;
3784
3785 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3786
3787 zc.zc_history = (uint64_t)(uintptr_t)buf;
3788 zc.zc_history_len = *len;
3789 zc.zc_history_offset = *off;
4320 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4321 "could not obtain vdev configuration for '%s'"), poolname);
4322 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4323 goto out;
4324 }
4325
4326 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4327 &top, &toplevels) == 0);
4328
4329 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4330 goto out;
4331 }
4332 ret = 0;
4333
4334 out:
4335 if (zhp)
4336 zpool_close(zhp);
4337 libzfs_fini(hdl);
4338 return (ret);
4339 }
4340
4341 /*
4342 * Vdev props
4343 */
4344 static int
4345 vdev_get_guid(zpool_handle_t *zhp, const char *path, uint64_t *guid)
4346 {
4347 nvlist_t *nvl;
4348 boolean_t avail_spare, l2cache, islog;
4349
4350 if ((nvl = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4351 &islog, NULL)) == NULL)
4352 return (1);
4353 verify(nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, guid) == 0);
4354 return (0);
4355 }
4356
4357 /*
4358 * Given an nvlist of vdev properties to be set, validate that they are
4359 * correct, and parse any numeric properties (index, boolean, etc) if they are
4360 * specified as strings.
4361 */
4362 /*ARGSUSED*/
4363 static nvlist_t *
4364 vdev_valid_proplist(libzfs_handle_t *hdl, nvlist_t *props,
4365 uint64_t version, prop_flags_t flags, const char *errbuf)
4366 {
4367 nvpair_t *elem;
4368 nvlist_t *retprops;
4369 vdev_prop_t prop;
4370 char *strval;
4371 uint64_t intval;
4372
4373 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
4374 (void) no_memory(hdl);
4375 return (NULL);
4376 }
4377
4378 elem = NULL;
4379 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
4380 const char *propname = nvpair_name(elem);
4381
4382 /*
4383 * Make sure this property is valid and applies to this type.
4384 */
4385 if ((prop = vdev_name_to_prop(propname)) == ZPROP_INVAL) {
4386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4387 "invalid vdev property '%s'"), propname);
4388 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
4389 goto error;
4390 }
4391
4392 if (vdev_prop_readonly(prop)) {
4393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
4394 "is readonly"), propname);
4395 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
4396 goto error;
4397 }
4398
4399 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_VDEV, retprops,
4400 &strval, &intval, errbuf) != 0)
4401 goto error;
4402
4403 /*
4404 * Perform additional checking for specific properties.
4405 */
4406 switch (prop) {
4407 case VDEV_PROP_PREFERRED_READ:
4408 if (intval > 100) {
4409 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4410 "'%s' must be from 0 to 100"), propname);
4411 (void) zfs_error(hdl, EZFS_BADPROP,
4412 errbuf);
4413 goto error;
4414 }
4415 break;
4416 case VDEV_PROP_READ_MINACTIVE:
4417 case VDEV_PROP_READ_MAXACTIVE:
4418 case VDEV_PROP_AREAD_MINACTIVE:
4419 case VDEV_PROP_AREAD_MAXACTIVE:
4420 case VDEV_PROP_WRITE_MINACTIVE:
4421 case VDEV_PROP_WRITE_MAXACTIVE:
4422 case VDEV_PROP_AWRITE_MINACTIVE:
4423 case VDEV_PROP_AWRITE_MAXACTIVE:
4424 case VDEV_PROP_SCRUB_MINACTIVE:
4425 case VDEV_PROP_SCRUB_MAXACTIVE:
4426 if (intval > 1000) {
4427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4428 "'%s' must be from 0 to 1000"), propname);
4429 (void) zfs_error(hdl, EZFS_BADPROP,
4430 errbuf);
4431 goto error;
4432 }
4433 break;
4434 default:
4435 break;
4436 }
4437 }
4438
4439 return (retprops);
4440 error:
4441 nvlist_free(retprops);
4442 return (NULL);
4443 }
4444
4445 static int
4446 vdev_get_all_props(zpool_handle_t *zhp, uint64_t vdev_guid, nvlist_t **nvp)
4447 {
4448 zfs_cmd_t zc = { 0 };
4449 libzfs_handle_t *hdl = zhp->zpool_hdl;
4450
4451 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4452
4453 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
4454 return (-1);
4455
4456 zc.zc_guid = vdev_guid;
4457 while (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_GET_PROPS, &zc) != 0) {
4458 if (errno == ENOMEM) {
4459 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4460 zcmd_free_nvlists(&zc);
4461 return (-1);
4462 }
4463 } else {
4464 zcmd_free_nvlists(&zc);
4465 (void) zfs_error(hdl, EZFS_BADDEV,
4466 "failed to get vdev properties");
4467 return (-1);
4468 }
4469 }
4470
4471 if (zcmd_read_dst_nvlist(hdl, &zc, nvp) != 0) {
4472 zcmd_free_nvlists(&zc);
4473 return (-1);
4474 }
4475
4476 zcmd_free_nvlists(&zc);
4477
4478 return (0);
4479 }
4480
4481 int
4482 vdev_get_prop(zpool_handle_t *zhp, const char *vdev, vdev_prop_t prop,
4483 char *buf, size_t len)
4484 {
4485 uint64_t vdev_guid;
4486 uint64_t intval;
4487 const char *strval;
4488 nvlist_t *nvl;
4489 char errbuf[1024];
4490
4491 (void) snprintf(errbuf, sizeof (errbuf),
4492 dgettext(TEXT_DOMAIN, "cannot get property for '%s'"),
4493 zhp->zpool_name);
4494
4495 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL)
4496 return (-1);
4497
4498 if (vdev_get_guid(zhp, vdev, &vdev_guid) != 0) {
4499 (void) zfs_error(zhp->zpool_hdl, EZFS_BADDEV, errbuf);
4500 return (-1);
4501 }
4502
4503 if (vdev_get_all_props(zhp, vdev_guid, &nvl) != 0)
4504 return (-1);
4505
4506 switch (vdev_prop_get_type(prop)) {
4507 case PROP_TYPE_STRING:
4508 if (nvlist_lookup_string(nvl, vdev_prop_to_name(prop),
4509 (char **)&strval) != 0)
4510 if ((strval = (char *)vdev_prop_default_string(prop))
4511 == NULL)
4512 strval = "-";
4513
4514 (void) strlcpy(buf, strval, len);
4515 break;
4516
4517 case PROP_TYPE_NUMBER:
4518 if (nvlist_lookup_uint64(nvl, vdev_prop_to_name(prop),
4519 &intval) != 0)
4520 intval = vdev_prop_default_numeric(prop);
4521 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
4522 break;
4523
4524 case PROP_TYPE_INDEX:
4525 if (nvlist_lookup_uint64(nvl, vdev_prop_to_name(prop),
4526 &intval) != 0)
4527 intval = vdev_prop_default_numeric(prop);
4528 if (vdev_prop_index_to_string(prop, intval, &strval) != 0) {
4529 (void) zfs_error(zhp->zpool_hdl, EZFS_BADPROP, errbuf);
4530 nvlist_free(nvl);
4531 return (-1);
4532 }
4533 (void) strlcpy(buf, strval, len);
4534 break;
4535
4536 default:
4537 abort();
4538 }
4539
4540 nvlist_free(nvl);
4541
4542 return (0);
4543 }
4544
4545 /*
4546 * Set vdev property : propname=propval.
4547 */
4548 int
4549 vdev_set_prop(zpool_handle_t *zhp, const char *vdev,
4550 const char *propname, const char *propval)
4551 {
4552 zfs_cmd_t zc = { 0 };
4553 int ret = -1;
4554 char errbuf[1024];
4555 nvlist_t *nvl = NULL;
4556 nvlist_t *realprops;
4557 uint64_t version;
4558 uint64_t guid;
4559 prop_flags_t flags = { 0 };
4560
4561 (void) snprintf(errbuf, sizeof (errbuf),
4562 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
4563 zhp->zpool_name);
4564
4565 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
4566 return (no_memory(zhp->zpool_hdl));
4567
4568 if (nvlist_add_string(nvl, propname, propval) != 0) {
4569 nvlist_free(nvl);
4570 return (no_memory(zhp->zpool_hdl));
4571 }
4572
4573 if (vdev_get_guid(zhp, vdev, &guid) != 0) {
4574 (void) zfs_error(zhp->zpool_hdl, EZFS_BADDEV, errbuf);
4575 return (-1);
4576 }
4577
4578 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4579 if ((realprops = vdev_valid_proplist(zhp->zpool_hdl, nvl,
4580 version, flags, errbuf)) == NULL) {
4581 nvlist_free(nvl);
4582 return (-1);
4583 }
4584
4585 nvlist_free(nvl);
4586 nvl = realprops;
4587
4588 /*
4589 * Execute the corresponding ioctl() to set this property.
4590 */
4591 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4592 zc.zc_guid = guid;
4593
4594 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
4595 nvlist_free(nvl);
4596 return (-1);
4597 }
4598
4599 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_PROPS, &zc);
4600
4601 zcmd_free_nvlists(&zc);
4602 nvlist_free(nvl);
4603
4604 if (ret)
4605 (void) zpool_vprop_standard_error(zhp->zpool_hdl,
4606 errno, errbuf);
4607
4608 return (ret);
4609 }
4610
4611 /*
4612 * Set vdev properties nvlist
4613 */
4614 int
4615 vdev_set_proplist(zpool_handle_t *zhp, const char *vdev, nvlist_t *nvl)
4616 {
4617 zfs_cmd_t zc = { 0 };
4618 int ret = -1;
4619 char errbuf[1024];
4620 nvlist_t *realprops;
4621 uint64_t version;
4622 uint64_t guid;
4623 prop_flags_t flags = { 0 };
4624 libzfs_handle_t *hdl = zhp->zpool_hdl;
4625
4626 assert(nvl != NULL);
4627
4628 (void) snprintf(errbuf, sizeof (errbuf),
4629 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
4630 zhp->zpool_name);
4631
4632 if (vdev_get_guid(zhp, vdev, &guid) != 0) {
4633 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
4634 return (-1);
4635 }
4636
4637 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4638 if ((realprops = vdev_valid_proplist(zhp->zpool_hdl, nvl,
4639 version, flags, errbuf)) == NULL) {
4640 nvlist_free(nvl);
4641 return (-1);
4642 }
4643
4644 nvlist_free(nvl);
4645 nvl = realprops;
4646
4647 /*
4648 * Execute the corresponding ioctl() to set this property.
4649 */
4650 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4651 zc.zc_guid = guid;
4652
4653 if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0) {
4654 nvlist_free(nvl);
4655 return (-1);
4656 }
4657
4658 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_PROPS, &zc);
4659
4660 zcmd_free_nvlists(&zc);
4661 nvlist_free(nvl);
4662
4663 if (ret)
4664 (void) zpool_vprop_standard_error(hdl, errno, errbuf);
4665
4666 return (ret);
4667 }
4668
4669 typedef struct vdev_cb {
4670 zpool_handle_t *vcb_zhp;
4671 char *vcb_name;
4672 uint64_t vcb_guid;
4673 boolean_t vcb_is_leaf;
4674 boolean_t vcb_success;
4675 void *vcb_data;
4676 } vdev_cb_t;
4677
4678 typedef int (*vdev_callback_t)(vdev_cb_t *);
4679
4680 /*
4681 * Invoke dev_callback for all vdevs in the pool
4682 */
4683 int
4684 for_each_vdev(zpool_handle_t *zhp, nvlist_t *root,
4685 vdev_callback_t vdev_callback, vdev_cb_t *cb)
4686 {
4687 int ret = 0;
4688 nvlist_t *config, *nvroot, **child, **child_list;
4689 uint32_t children, child_children, c;
4690 libzfs_handle_t *hdl = zhp->zpool_hdl;
4691
4692 if (!root) {
4693 config = zpool_get_config(zhp, NULL);
4694 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4695 &nvroot) == 0);
4696 } else {
4697 nvroot = root;
4698 }
4699
4700 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4701 &child, &children) != 0) {
4702 (void) fprintf(stderr, gettext("Failed to get the vdev "
4703 "children details from the root nvlist\n"));
4704 return (-1);
4705 }
4706
4707 cb->vcb_zhp = zhp;
4708 for (c = 0; c < children; c++) {
4709 cb->vcb_is_leaf = B_TRUE;
4710 if (nvlist_lookup_nvlist_array(child[c], ZPOOL_CONFIG_CHILDREN,
4711 &child_list, &child_children) == 0 &&
4712 child_children > 0) {
4713 ret = for_each_vdev(zhp, child[c], vdev_callback, cb);
4714 if (ret)
4715 return (ret);
4716 cb->vcb_is_leaf = B_FALSE;
4717 }
4718
4719 cb->vcb_name = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
4720 verify(nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4721 &cb->vcb_guid) == 0);
4722
4723 ret |= vdev_callback(cb);
4724 free(cb->vcb_name);
4725 if (ret)
4726 return (ret);
4727 }
4728
4729 return (0);
4730 }
4731
4732 int
4733 get_vdev_guid_callback(vdev_cb_t *cb)
4734 {
4735 if (!cb->vcb_is_leaf)
4736 return (0);
4737
4738 if (strncmp(cb->vcb_name, (char *)cb->vcb_data,
4739 strlen(cb->vcb_name)) == 0) {
4740 cb->vcb_success = B_TRUE;
4741 }
4742
4743 return (0);
4744 }
4745
4746 /*
4747 * Class of Storage (COS)
4748 */
4749 /*ARGSUSED*/
4750 static nvlist_t *
4751 cos_valid_proplist(libzfs_handle_t *hdl, nvlist_t *props,
4752 uint64_t version, prop_flags_t flags, const char *errbuf)
4753 {
4754 nvpair_t *elem;
4755 nvlist_t *retprops;
4756 cos_prop_t prop;
4757 char *strval;
4758 uint64_t intval;
4759
4760 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
4761 (void) no_memory(hdl);
4762 return (NULL);
4763 }
4764
4765 elem = NULL;
4766 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
4767 const char *propname = nvpair_name(elem);
4768
4769 /*
4770 * Make sure this property is valid and applies to this type.
4771 */
4772 if ((prop = cos_name_to_prop(propname)) == ZPROP_INVAL) {
4773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4774 "invalid cos property '%s'"), propname);
4775 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
4776 goto error;
4777 }
4778
4779 if (cos_prop_readonly(prop)) {
4780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
4781 "is readonly"), propname);
4782 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
4783 goto error;
4784 }
4785
4786 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_COS, retprops,
4787 &strval, &intval, errbuf) != 0)
4788 goto error;
4789
4790 /*
4791 * Perform additional checking for specific properties.
4792 */
4793 switch (prop) {
4794 case COS_PROP_PREFERRED_READ:
4795 if (intval > 100) {
4796 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4797 "'%s' must be from 0 to 100"), propname);
4798 (void) zfs_error(hdl, EZFS_BADPROP,
4799 errbuf);
4800 goto error;
4801 }
4802 break;
4803 case COS_PROP_READ_MINACTIVE:
4804 case COS_PROP_AREAD_MINACTIVE:
4805 case COS_PROP_WRITE_MINACTIVE:
4806 case COS_PROP_AWRITE_MINACTIVE:
4807 case COS_PROP_SCRUB_MINACTIVE:
4808 case COS_PROP_READ_MAXACTIVE:
4809 case COS_PROP_AREAD_MAXACTIVE:
4810 case COS_PROP_WRITE_MAXACTIVE:
4811 case COS_PROP_AWRITE_MAXACTIVE:
4812 case COS_PROP_SCRUB_MAXACTIVE:
4813 if (intval > 1000) {
4814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4815 "'%s' must be from 0 to 1000"), propname);
4816 (void) zfs_error(hdl, EZFS_BADPROP,
4817 errbuf);
4818 goto error;
4819 }
4820 break;
4821 default:
4822 break;
4823 }
4824 }
4825
4826 return (retprops);
4827 error:
4828 nvlist_free(retprops);
4829 return (NULL);
4830 }
4831
4832 int
4833 cos_alloc(zpool_handle_t *zhp, char *cosname, nvlist_t *nvl)
4834 {
4835 zfs_cmd_t zc = { 0 };
4836 libzfs_handle_t *hdl = zhp->zpool_hdl;
4837 char errbuf[1024];
4838 int error = 0;
4839
4840 (void) snprintf(errbuf, sizeof (errbuf),
4841 dgettext(TEXT_DOMAIN, "cannot allocate CoS descriptor for '%s'"),
4842 zhp->zpool_name);
4843
4844 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4845 (void) strlcpy(zc.zc_string, cosname, sizeof (zc.zc_string));
4846
4847 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
4848 nvlist_free(nvl);
4849 return (-1);
4850 }
4851
4852 error = ioctl(hdl->libzfs_fd, ZFS_IOC_COS_ALLOC, &zc);
4853
4854 if (error)
4855 (void) zpool_vprop_standard_error(hdl, errno, errbuf);
4856
4857 return (error);
4858 }
4859
4860 int
4861 cos_free(zpool_handle_t *zhp, char *cosname, uint64_t guid, boolean_t force)
4862 {
4863 zfs_cmd_t zc = { 0 };
4864 libzfs_handle_t *hdl = zhp->zpool_hdl;
4865 char errbuf[1024];
4866 int error = 0;
4867
4868 (void) snprintf(errbuf, sizeof (errbuf),
4869 dgettext(TEXT_DOMAIN, "cannot free CoS descriptor for '%s'"),
4870 zhp->zpool_name);
4871
4872 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4873 (void) strlcpy(zc.zc_string, cosname, sizeof (zc.zc_string));
4874 zc.zc_guid = guid;
4875
4876 zc.zc_cookie = (uint64_t)force;
4877
4878 error = ioctl(hdl->libzfs_fd, ZFS_IOC_COS_FREE, &zc);
4879
4880 if (error)
4881 (void) zpool_vprop_standard_error(hdl, errno, errbuf);
4882
4883 return (error);
4884 }
4885
4886 int
4887 cos_list(zpool_handle_t *zhp, nvlist_t **nvp)
4888 {
4889 zfs_cmd_t zc = { 0 };
4890 libzfs_handle_t *hdl = zhp->zpool_hdl;
4891
4892 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4893
4894 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
4895 return (-1);
4896
4897 while (ioctl(hdl->libzfs_fd, ZFS_IOC_COS_LIST, &zc) != 0) {
4898 if (errno == ENOMEM) {
4899 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4900 zcmd_free_nvlists(&zc);
4901 return (-1);
4902 }
4903 } else {
4904 zcmd_free_nvlists(&zc);
4905 return (-1);
4906 }
4907 }
4908
4909 if (zcmd_read_dst_nvlist(hdl, &zc, nvp) != 0) {
4910 zcmd_free_nvlists(&zc);
4911 return (-1);
4912 }
4913
4914 zcmd_free_nvlists(&zc);
4915
4916 return (0);
4917 }
4918
4919 int
4920 cos_get_all_props(zpool_handle_t *zhp, const char *cos, nvlist_t **nvp)
4921 {
4922 uint64_t cos_id;
4923 zfs_cmd_t zc = { 0 };
4924 libzfs_handle_t *hdl = zhp->zpool_hdl;
4925 char *endp;
4926 char errbuf[1024];
4927
4928 (void) snprintf(errbuf, sizeof (errbuf),
4929 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
4930 zhp->zpool_name);
4931
4932 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4933
4934 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
4935 return (-1);
4936
4937 cos_id = strtoll(cos, &endp, 10);
4938 zc.zc_guid = cos_id;
4939 if (cos_id == 0 && cos == endp)
4940 (void) strlcpy(zc.zc_string, cos, sizeof (zc.zc_string));
4941 else
4942 zc.zc_string[0] = '\0';
4943
4944 while (ioctl(hdl->libzfs_fd, ZFS_IOC_COS_GET_PROPS, &zc) != 0) {
4945 if (errno == ENOMEM) {
4946 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4947 zcmd_free_nvlists(&zc);
4948 return (-1);
4949 }
4950 } else {
4951 zcmd_free_nvlists(&zc);
4952 (void) zpool_standard_error(hdl, errno, errbuf);
4953 return (-1);
4954 }
4955 }
4956
4957 if (zcmd_read_dst_nvlist(hdl, &zc, nvp) != 0) {
4958 zcmd_free_nvlists(&zc);
4959 return (-1);
4960 }
4961
4962 zcmd_free_nvlists(&zc);
4963
4964 return (0);
4965 }
4966
4967 int
4968 cos_get_prop(zpool_handle_t *zhp, const char *cos, cos_prop_t prop,
4969 char *buf, size_t len, nvlist_t **nvp)
4970 {
4971 uint64_t intval;
4972 const char *strval;
4973 nvlist_t *nvl;
4974 char errbuf[1024];
4975
4976 (void) snprintf(errbuf, sizeof (errbuf),
4977 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
4978 zhp->zpool_name);
4979
4980 assert(nvp != NULL);
4981
4982 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL)
4983 return (-1);
4984
4985 if (*nvp == NULL && cos_get_all_props(zhp, cos, nvp) != 0)
4986 return (-1);
4987 nvl = *nvp;
4988
4989 switch (cos_prop_get_type(prop)) {
4990 case PROP_TYPE_STRING:
4991 if (nvlist_lookup_string(nvl, cos_prop_to_name(prop),
4992 (char **)&strval) != 0)
4993 if ((strval = (char *)cos_prop_default_string(prop))
4994 == NULL)
4995 strval = "-";
4996
4997 (void) strlcpy(buf, strval, len);
4998 break;
4999
5000 case PROP_TYPE_NUMBER:
5001 if (nvlist_lookup_uint64(nvl, cos_prop_to_name(prop),
5002 &intval) != 0)
5003 intval = cos_prop_default_numeric(prop);
5004 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
5005 break;
5006
5007 case PROP_TYPE_INDEX:
5008 if (nvlist_lookup_uint64(nvl, cos_prop_to_name(prop),
5009 &intval) != 0)
5010 intval = cos_prop_default_numeric(prop);
5011 if (cos_prop_index_to_string(prop, intval, &strval) != 0) {
5012 (void) zfs_error(zhp->zpool_hdl, EZFS_BADPROP, errbuf);
5013 return (-1);
5014 }
5015 (void) strlcpy(buf, strval, len);
5016 break;
5017
5018 default:
5019 abort();
5020 }
5021
5022 return (0);
5023 }
5024
5025 /*
5026 * Set cos properties nvlist
5027 */
5028 int
5029 cos_set_proplist(zpool_handle_t *zhp, const char *cos, nvlist_t *nvl)
5030 {
5031 zfs_cmd_t zc = { 0 };
5032 int ret = -1;
5033 char errbuf[1024];
5034 char *endp;
5035 nvlist_t *realprops;
5036 uint64_t version;
5037 uint64_t cos_id;
5038 prop_flags_t flags = { 0 };
5039
5040 assert(nvl != NULL);
5041
5042 (void) snprintf(errbuf, sizeof (errbuf),
5043 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
5044 zhp->zpool_name);
5045
5046 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5047 if ((realprops = cos_valid_proplist(zhp->zpool_hdl, nvl,
5048 version, flags, errbuf)) == NULL) {
5049 nvlist_free(nvl);
5050 return (-1);
5051 }
5052
5053 nvlist_free(nvl);
5054 nvl = realprops;
5055
5056 /*
5057 * Execute the corresponding ioctl() to set this property.
5058 */
5059 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
5060
5061 cos_id = strtoll(cos, &endp, 10);
5062 zc.zc_guid = cos_id;
5063 if (cos_id == 0 && cos == endp)
5064 (void) strlcpy(zc.zc_string, cos, sizeof (zc.zc_string));
5065 else
5066 zc.zc_string[0] = '\0';
5067
5068 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
5069 nvlist_free(nvl);
5070 return (-1);
5071 }
5072
5073 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_COS_SET_PROPS, &zc);
5074
5075 zcmd_free_nvlists(&zc);
5076 nvlist_free(nvl);
5077
5078 if (ret)
5079 (void) zpool_vprop_standard_error_fmt(zhp->zpool_hdl,
5080 errno, errbuf);
5081
5082 return (ret);
5083 }
|