Print this page
2619 asynchronous destruction of ZFS file systems
2747 SPA versioning with zfs feature flags
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <gwilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Approved by: Dan McDonald <danmcd@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_config.c
+++ new/usr/src/lib/libzfs/common/libzfs_config.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 +
21 22 /*
22 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 24 * Use is subject to license terms.
24 25 */
25 26
26 27 /*
28 + * Copyright (c) 2012 by Delphix. All rights reserved.
29 + */
30 +
31 +/*
27 32 * The pool configuration repository is stored in /etc/zfs/zpool.cache as a
28 33 * single packed nvlist. While it would be nice to just read in this
29 34 * file from userland, this wouldn't work from a local zone. So we have to have
30 35 * a zpool ioctl to return the complete configuration for all pools. In the
31 36 * global zone, this will be identical to reading the file and unpacking it in
32 37 * userland.
33 38 */
34 39
35 40 #include <errno.h>
36 41 #include <sys/stat.h>
37 42 #include <fcntl.h>
38 43 #include <stddef.h>
39 44 #include <string.h>
40 45 #include <unistd.h>
41 46 #include <libintl.h>
42 47 #include <libuutil.h>
43 48
44 49 #include "libzfs_impl.h"
45 50
46 51 typedef struct config_node {
47 52 char *cn_name;
48 53 nvlist_t *cn_config;
49 54 uu_avl_node_t cn_avl;
50 55 } config_node_t;
51 56
52 57 /* ARGSUSED */
53 58 static int
54 59 config_node_compare(const void *a, const void *b, void *unused)
55 60 {
56 61 int ret;
57 62
58 63 const config_node_t *ca = (config_node_t *)a;
59 64 const config_node_t *cb = (config_node_t *)b;
60 65
61 66 ret = strcmp(ca->cn_name, cb->cn_name);
62 67
63 68 if (ret < 0)
64 69 return (-1);
65 70 else if (ret > 0)
66 71 return (1);
67 72 else
68 73 return (0);
69 74 }
70 75
71 76 void
72 77 namespace_clear(libzfs_handle_t *hdl)
73 78 {
74 79 if (hdl->libzfs_ns_avl) {
75 80 config_node_t *cn;
76 81 void *cookie = NULL;
77 82
78 83 while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl,
79 84 &cookie)) != NULL) {
80 85 nvlist_free(cn->cn_config);
81 86 free(cn->cn_name);
82 87 free(cn);
83 88 }
84 89
85 90 uu_avl_destroy(hdl->libzfs_ns_avl);
86 91 hdl->libzfs_ns_avl = NULL;
87 92 }
88 93
89 94 if (hdl->libzfs_ns_avlpool) {
90 95 uu_avl_pool_destroy(hdl->libzfs_ns_avlpool);
91 96 hdl->libzfs_ns_avlpool = NULL;
92 97 }
93 98 }
94 99
95 100 /*
96 101 * Loads the pool namespace, or re-loads it if the cache has changed.
97 102 */
98 103 static int
99 104 namespace_reload(libzfs_handle_t *hdl)
100 105 {
101 106 nvlist_t *config;
102 107 config_node_t *cn;
103 108 nvpair_t *elem;
104 109 zfs_cmd_t zc = { 0 };
105 110 void *cookie;
106 111
107 112 if (hdl->libzfs_ns_gen == 0) {
108 113 /*
109 114 * This is the first time we've accessed the configuration
110 115 * cache. Initialize the AVL tree and then fall through to the
111 116 * common code.
112 117 */
113 118 if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool",
114 119 sizeof (config_node_t),
115 120 offsetof(config_node_t, cn_avl),
116 121 config_node_compare, UU_DEFAULT)) == NULL)
117 122 return (no_memory(hdl));
118 123
119 124 if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool,
120 125 NULL, UU_DEFAULT)) == NULL)
121 126 return (no_memory(hdl));
122 127 }
123 128
124 129 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
125 130 return (-1);
126 131
127 132 for (;;) {
128 133 zc.zc_cookie = hdl->libzfs_ns_gen;
129 134 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) {
130 135 switch (errno) {
131 136 case EEXIST:
132 137 /*
133 138 * The namespace hasn't changed.
134 139 */
135 140 zcmd_free_nvlists(&zc);
136 141 return (0);
137 142
138 143 case ENOMEM:
139 144 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
140 145 zcmd_free_nvlists(&zc);
141 146 return (-1);
142 147 }
143 148 break;
144 149
145 150 default:
146 151 zcmd_free_nvlists(&zc);
147 152 return (zfs_standard_error(hdl, errno,
148 153 dgettext(TEXT_DOMAIN, "failed to read "
149 154 "pool configuration")));
150 155 }
151 156 } else {
152 157 hdl->libzfs_ns_gen = zc.zc_cookie;
153 158 break;
154 159 }
155 160 }
156 161
157 162 if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
158 163 zcmd_free_nvlists(&zc);
159 164 return (-1);
160 165 }
161 166
162 167 zcmd_free_nvlists(&zc);
163 168
164 169 /*
165 170 * Clear out any existing configuration information.
166 171 */
167 172 cookie = NULL;
168 173 while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) {
169 174 nvlist_free(cn->cn_config);
170 175 free(cn->cn_name);
171 176 free(cn);
172 177 }
173 178
174 179 elem = NULL;
175 180 while ((elem = nvlist_next_nvpair(config, elem)) != NULL) {
176 181 nvlist_t *child;
177 182 uu_avl_index_t where;
178 183
179 184 if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) {
180 185 nvlist_free(config);
181 186 return (-1);
182 187 }
183 188
184 189 if ((cn->cn_name = zfs_strdup(hdl,
185 190 nvpair_name(elem))) == NULL) {
186 191 free(cn);
187 192 nvlist_free(config);
188 193 return (-1);
189 194 }
190 195
191 196 verify(nvpair_value_nvlist(elem, &child) == 0);
192 197 if (nvlist_dup(child, &cn->cn_config, 0) != 0) {
193 198 free(cn->cn_name);
194 199 free(cn);
195 200 nvlist_free(config);
196 201 return (no_memory(hdl));
197 202 }
198 203 verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where)
199 204 == NULL);
200 205
201 206 uu_avl_insert(hdl->libzfs_ns_avl, cn, where);
202 207 }
203 208
204 209 nvlist_free(config);
205 210 return (0);
206 211 }
207 212
208 213 /*
209 214 * Retrieve the configuration for the given pool. The configuration is a nvlist
|
↓ open down ↓ |
173 lines elided |
↑ open up ↑ |
210 215 * describing the vdevs, as well as the statistics associated with each one.
211 216 */
212 217 nvlist_t *
213 218 zpool_get_config(zpool_handle_t *zhp, nvlist_t **oldconfig)
214 219 {
215 220 if (oldconfig)
216 221 *oldconfig = zhp->zpool_old_config;
217 222 return (zhp->zpool_config);
218 223 }
219 224
225 +/*
226 + * Retrieves a list of enabled features and their refcounts and caches it in
227 + * the pool handle.
228 + */
229 +nvlist_t *
230 +zpool_get_features(zpool_handle_t *zhp)
231 +{
232 + nvlist_t *config, *features;
233 +
234 + config = zpool_get_config(zhp, NULL);
235 +
236 + if (config == NULL || !nvlist_exists(config,
237 + ZPOOL_CONFIG_FEATURE_STATS)) {
238 + int error;
239 + boolean_t missing = B_FALSE;
240 +
241 + error = zpool_refresh_stats(zhp, &missing);
242 +
243 + if (error != 0 || missing)
244 + return (NULL);
245 +
246 + config = zpool_get_config(zhp, NULL);
247 + }
248 +
249 + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
250 + &features) == 0);
251 +
252 + return (features);
253 +}
254 +
220 255 /*
221 256 * Refresh the vdev statistics associated with the given pool. This is used in
222 257 * iostat to show configuration changes and determine the delta from the last
223 258 * time the function was called. This function can fail, in case the pool has
224 259 * been destroyed.
225 260 */
226 261 int
227 262 zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing)
228 263 {
229 264 zfs_cmd_t zc = { 0 };
230 265 int error;
231 266 nvlist_t *config;
232 267 libzfs_handle_t *hdl = zhp->zpool_hdl;
233 268
234 269 *missing = B_FALSE;
235 270 (void) strcpy(zc.zc_name, zhp->zpool_name);
236 271
237 272 if (zhp->zpool_config_size == 0)
238 273 zhp->zpool_config_size = 1 << 16;
239 274
240 275 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size) != 0)
241 276 return (-1);
242 277
243 278 for (;;) {
244 279 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_STATS,
245 280 &zc) == 0) {
246 281 /*
247 282 * The real error is returned in the zc_cookie field.
248 283 */
249 284 error = zc.zc_cookie;
250 285 break;
251 286 }
252 287
253 288 if (errno == ENOMEM) {
254 289 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
255 290 zcmd_free_nvlists(&zc);
256 291 return (-1);
257 292 }
258 293 } else {
259 294 zcmd_free_nvlists(&zc);
260 295 if (errno == ENOENT || errno == EINVAL)
261 296 *missing = B_TRUE;
262 297 zhp->zpool_state = POOL_STATE_UNAVAIL;
263 298 return (0);
264 299 }
265 300 }
266 301
267 302 if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
268 303 zcmd_free_nvlists(&zc);
269 304 return (-1);
270 305 }
271 306
272 307 zcmd_free_nvlists(&zc);
273 308
274 309 zhp->zpool_config_size = zc.zc_nvlist_dst_size;
275 310
276 311 if (zhp->zpool_config != NULL) {
277 312 uint64_t oldtxg, newtxg;
278 313
279 314 verify(nvlist_lookup_uint64(zhp->zpool_config,
280 315 ZPOOL_CONFIG_POOL_TXG, &oldtxg) == 0);
281 316 verify(nvlist_lookup_uint64(config,
282 317 ZPOOL_CONFIG_POOL_TXG, &newtxg) == 0);
283 318
284 319 if (zhp->zpool_old_config != NULL)
285 320 nvlist_free(zhp->zpool_old_config);
286 321
287 322 if (oldtxg != newtxg) {
288 323 nvlist_free(zhp->zpool_config);
289 324 zhp->zpool_old_config = NULL;
290 325 } else {
291 326 zhp->zpool_old_config = zhp->zpool_config;
292 327 }
293 328 }
294 329
295 330 zhp->zpool_config = config;
296 331 if (error)
297 332 zhp->zpool_state = POOL_STATE_UNAVAIL;
298 333 else
299 334 zhp->zpool_state = POOL_STATE_ACTIVE;
300 335
301 336 return (0);
302 337 }
303 338
304 339 /*
305 340 * Iterate over all pools in the system.
306 341 */
307 342 int
308 343 zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data)
309 344 {
310 345 config_node_t *cn;
311 346 zpool_handle_t *zhp;
312 347 int ret;
313 348
314 349 /*
315 350 * If someone makes a recursive call to zpool_iter(), we want to avoid
316 351 * refreshing the namespace because that will invalidate the parent
317 352 * context. We allow recursive calls, but simply re-use the same
318 353 * namespace AVL tree.
319 354 */
320 355 if (!hdl->libzfs_pool_iter && namespace_reload(hdl) != 0)
321 356 return (-1);
322 357
323 358 hdl->libzfs_pool_iter++;
324 359 for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
325 360 cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
326 361
327 362 if (zpool_open_silent(hdl, cn->cn_name, &zhp) != 0) {
328 363 hdl->libzfs_pool_iter--;
329 364 return (-1);
330 365 }
331 366
332 367 if (zhp == NULL)
333 368 continue;
334 369
335 370 if ((ret = func(zhp, data)) != 0) {
336 371 hdl->libzfs_pool_iter--;
337 372 return (ret);
338 373 }
339 374 }
340 375 hdl->libzfs_pool_iter--;
341 376
342 377 return (0);
343 378 }
344 379
345 380 /*
346 381 * Iterate over root datasets, calling the given function for each. The zfs
347 382 * handle passed each time must be explicitly closed by the callback.
348 383 */
349 384 int
350 385 zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data)
351 386 {
352 387 config_node_t *cn;
353 388 zfs_handle_t *zhp;
354 389 int ret;
355 390
356 391 if (namespace_reload(hdl) != 0)
357 392 return (-1);
358 393
359 394 for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
360 395 cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
361 396
362 397 if ((zhp = make_dataset_handle(hdl, cn->cn_name)) == NULL)
363 398 continue;
364 399
365 400 if ((ret = func(zhp, data)) != 0)
366 401 return (ret);
367 402 }
368 403
369 404 return (0);
370 405 }
|
↓ open down ↓ |
141 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX