Print this page
11945 pool import performance regression due to repeated libshare initialization
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Jason King <jason.brian.king@gmail.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_mount.c
+++ new/usr/src/lib/libzfs/common/libzfs_mount.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 25 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
26 26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 27 * Copyright 2017 Joyent, Inc.
28 28 * Copyright 2017 RackTop Systems.
29 29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 30 */
31 31
32 32 /*
33 33 * Routines to manage ZFS mounts. We separate all the nasty routines that have
34 34 * to deal with the OS. The following functions are the main entry points --
35 35 * they are used by mount and unmount and when changing a filesystem's
36 36 * mountpoint.
37 37 *
38 38 * zfs_is_mounted()
39 39 * zfs_mount()
40 40 * zfs_unmount()
41 41 * zfs_unmountall()
42 42 *
43 43 * This file also contains the functions used to manage sharing filesystems via
44 44 * NFS and iSCSI:
45 45 *
46 46 * zfs_is_shared()
47 47 * zfs_share()
48 48 * zfs_unshare()
49 49 *
50 50 * zfs_is_shared_nfs()
51 51 * zfs_is_shared_smb()
52 52 * zfs_share_proto()
53 53 * zfs_shareall();
54 54 * zfs_unshare_nfs()
55 55 * zfs_unshare_smb()
56 56 * zfs_unshareall_nfs()
57 57 * zfs_unshareall_smb()
58 58 * zfs_unshareall()
59 59 * zfs_unshareall_bypath()
60 60 *
61 61 * The following functions are available for pool consumers, and will
62 62 * mount/unmount and share/unshare all datasets within pool:
63 63 *
64 64 * zpool_enable_datasets()
65 65 * zpool_disable_datasets()
66 66 */
67 67
68 68 #include <dirent.h>
69 69 #include <dlfcn.h>
70 70 #include <errno.h>
71 71 #include <fcntl.h>
72 72 #include <libgen.h>
73 73 #include <libintl.h>
74 74 #include <stdio.h>
75 75 #include <stdlib.h>
76 76 #include <strings.h>
77 77 #include <unistd.h>
78 78 #include <zone.h>
79 79 #include <sys/mntent.h>
80 80 #include <sys/mount.h>
81 81 #include <sys/stat.h>
82 82 #include <sys/statvfs.h>
83 83 #include <sys/dsl_crypt.h>
84 84
85 85 #include <libzfs.h>
86 86
87 87 #include "libzfs_impl.h"
88 88 #include "libzfs_taskq.h"
89 89
90 90 #include <libshare.h>
91 91 #include <sys/systeminfo.h>
92 92 #define MAXISALEN 257 /* based on sysinfo(2) man page */
93 93
94 94 static int mount_tq_nthr = 512; /* taskq threads for multi-threaded mounting */
95 95
96 96 static void zfs_mount_task(void *);
97 97 static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
98 98 zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
99 99 zfs_share_proto_t);
100 100
101 101 /*
102 102 * The share protocols table must be in the same order as the zfs_share_proto_t
103 103 * enum in libzfs_impl.h
104 104 */
105 105 typedef struct {
106 106 zfs_prop_t p_prop;
107 107 char *p_name;
108 108 int p_share_err;
109 109 int p_unshare_err;
110 110 } proto_table_t;
111 111
112 112 proto_table_t proto_table[PROTO_END] = {
113 113 {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
114 114 {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
115 115 };
116 116
117 117 zfs_share_proto_t nfs_only[] = {
118 118 PROTO_NFS,
119 119 PROTO_END
120 120 };
121 121
122 122 zfs_share_proto_t smb_only[] = {
123 123 PROTO_SMB,
124 124 PROTO_END
125 125 };
126 126 zfs_share_proto_t share_all_proto[] = {
127 127 PROTO_NFS,
128 128 PROTO_SMB,
129 129 PROTO_END
130 130 };
131 131
132 132 /*
133 133 * Search the sharetab for the given mountpoint and protocol, returning
134 134 * a zfs_share_type_t value.
135 135 */
136 136 static zfs_share_type_t
137 137 is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
138 138 {
139 139 char buf[MAXPATHLEN], *tab;
140 140 char *ptr;
141 141
142 142 if (hdl->libzfs_sharetab == NULL)
143 143 return (SHARED_NOT_SHARED);
144 144
145 145 (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
146 146
147 147 while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
148 148
149 149 /* the mountpoint is the first entry on each line */
150 150 if ((tab = strchr(buf, '\t')) == NULL)
151 151 continue;
152 152
153 153 *tab = '\0';
154 154 if (strcmp(buf, mountpoint) == 0) {
155 155 /*
156 156 * the protocol field is the third field
157 157 * skip over second field
158 158 */
159 159 ptr = ++tab;
160 160 if ((tab = strchr(ptr, '\t')) == NULL)
161 161 continue;
162 162 ptr = ++tab;
163 163 if ((tab = strchr(ptr, '\t')) == NULL)
164 164 continue;
165 165 *tab = '\0';
166 166 if (strcmp(ptr,
167 167 proto_table[proto].p_name) == 0) {
168 168 switch (proto) {
169 169 case PROTO_NFS:
170 170 return (SHARED_NFS);
171 171 case PROTO_SMB:
172 172 return (SHARED_SMB);
173 173 default:
174 174 return (0);
175 175 }
176 176 }
177 177 }
178 178 }
179 179
180 180 return (SHARED_NOT_SHARED);
181 181 }
182 182
183 183 static boolean_t
184 184 dir_is_empty_stat(const char *dirname)
185 185 {
186 186 struct stat st;
187 187
188 188 /*
189 189 * We only want to return false if the given path is a non empty
190 190 * directory, all other errors are handled elsewhere.
191 191 */
192 192 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
193 193 return (B_TRUE);
194 194 }
195 195
196 196 /*
197 197 * An empty directory will still have two entries in it, one
198 198 * entry for each of "." and "..".
199 199 */
200 200 if (st.st_size > 2) {
201 201 return (B_FALSE);
202 202 }
203 203
204 204 return (B_TRUE);
205 205 }
206 206
207 207 static boolean_t
208 208 dir_is_empty_readdir(const char *dirname)
209 209 {
210 210 DIR *dirp;
211 211 struct dirent64 *dp;
212 212 int dirfd;
213 213
214 214 if ((dirfd = openat(AT_FDCWD, dirname,
215 215 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
216 216 return (B_TRUE);
217 217 }
218 218
219 219 if ((dirp = fdopendir(dirfd)) == NULL) {
220 220 (void) close(dirfd);
221 221 return (B_TRUE);
222 222 }
223 223
224 224 while ((dp = readdir64(dirp)) != NULL) {
225 225
226 226 if (strcmp(dp->d_name, ".") == 0 ||
227 227 strcmp(dp->d_name, "..") == 0)
228 228 continue;
229 229
230 230 (void) closedir(dirp);
231 231 return (B_FALSE);
232 232 }
233 233
234 234 (void) closedir(dirp);
235 235 return (B_TRUE);
236 236 }
237 237
238 238 /*
239 239 * Returns true if the specified directory is empty. If we can't open the
240 240 * directory at all, return true so that the mount can fail with a more
241 241 * informative error message.
242 242 */
243 243 static boolean_t
244 244 dir_is_empty(const char *dirname)
245 245 {
246 246 struct statvfs64 st;
247 247
248 248 /*
249 249 * If the statvfs call fails or the filesystem is not a ZFS
250 250 * filesystem, fall back to the slow path which uses readdir.
251 251 */
252 252 if ((statvfs64(dirname, &st) != 0) ||
253 253 (strcmp(st.f_basetype, "zfs") != 0)) {
254 254 return (dir_is_empty_readdir(dirname));
255 255 }
256 256
257 257 /*
258 258 * At this point, we know the provided path is on a ZFS
259 259 * filesystem, so we can use stat instead of readdir to
260 260 * determine if the directory is empty or not. We try to avoid
261 261 * using readdir because that requires opening "dirname"; this
262 262 * open file descriptor can potentially end up in a child
263 263 * process if there's a concurrent fork, thus preventing the
264 264 * zfs_mount() from otherwise succeeding (the open file
265 265 * descriptor inherited by the child process will cause the
266 266 * parent's mount to fail with EBUSY). The performance
267 267 * implications of replacing the open, read, and close with a
268 268 * single stat is nice; but is not the main motivation for the
269 269 * added complexity.
270 270 */
271 271 return (dir_is_empty_stat(dirname));
272 272 }
273 273
274 274 /*
275 275 * Checks to see if the mount is active. If the filesystem is mounted, we fill
276 276 * in 'where' with the current mountpoint, and return 1. Otherwise, we return
277 277 * 0.
278 278 */
279 279 boolean_t
280 280 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
281 281 {
282 282 struct mnttab entry;
283 283
284 284 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
285 285 return (B_FALSE);
286 286
287 287 if (where != NULL)
288 288 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
289 289
290 290 return (B_TRUE);
291 291 }
292 292
293 293 boolean_t
294 294 zfs_is_mounted(zfs_handle_t *zhp, char **where)
295 295 {
296 296 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
297 297 }
298 298
299 299 /*
300 300 * Returns true if the given dataset is mountable, false otherwise. Returns the
301 301 * mountpoint in 'buf'.
302 302 */
303 303 static boolean_t
304 304 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
305 305 zprop_source_t *source)
306 306 {
307 307 char sourceloc[MAXNAMELEN];
308 308 zprop_source_t sourcetype;
309 309
310 310 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
311 311 return (B_FALSE);
312 312
313 313 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
314 314 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
315 315
316 316 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
317 317 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
318 318 return (B_FALSE);
319 319
320 320 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
321 321 return (B_FALSE);
322 322
323 323 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
324 324 getzoneid() == GLOBAL_ZONEID)
325 325 return (B_FALSE);
326 326
327 327 if (source)
328 328 *source = sourcetype;
329 329
330 330 return (B_TRUE);
331 331 }
332 332
333 333 /*
334 334 * Mount the given filesystem.
335 335 */
336 336 int
337 337 zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
338 338 {
339 339 struct stat buf;
340 340 char mountpoint[ZFS_MAXPROPLEN];
341 341 char mntopts[MNT_LINE_MAX];
342 342 libzfs_handle_t *hdl = zhp->zfs_hdl;
343 343 uint64_t keystatus;
344 344 int rc;
345 345
346 346 if (options == NULL)
347 347 mntopts[0] = '\0';
348 348 else
349 349 (void) strlcpy(mntopts, options, sizeof (mntopts));
350 350
351 351 /*
352 352 * If the pool is imported read-only then all mounts must be read-only
353 353 */
354 354 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
355 355 flags |= MS_RDONLY;
356 356
357 357 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
358 358 return (0);
359 359
360 360 /*
361 361 * If the filesystem is encrypted the key must be loaded in order to
362 362 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether
363 363 * or not we attempt to load the keys. Note: we must call
364 364 * zfs_refresh_properties() here since some callers of this function
365 365 * (most notably zpool_enable_datasets()) may implicitly load our key
366 366 * by loading the parent's key first.
367 367 */
368 368 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
369 369 zfs_refresh_properties(zhp);
370 370 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
371 371
372 372 /*
373 373 * If the key is unavailable and MS_CRYPT is set give the
374 374 * user a chance to enter the key. Otherwise just fail
375 375 * immediately.
376 376 */
377 377 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
378 378 if (flags & MS_CRYPT) {
379 379 rc = zfs_crypto_load_key(zhp, B_FALSE, NULL);
380 380 if (rc != 0)
381 381 return (rc);
382 382 } else {
383 383 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
384 384 "encryption key not loaded"));
385 385 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
386 386 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
387 387 mountpoint));
388 388 }
389 389 }
390 390
391 391 }
392 392
393 393 /* Create the directory if it doesn't already exist */
394 394 if (lstat(mountpoint, &buf) != 0) {
395 395 if (mkdirp(mountpoint, 0755) != 0) {
396 396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
397 397 "failed to create mountpoint"));
398 398 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
399 399 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
400 400 mountpoint));
401 401 }
402 402 }
403 403
404 404 /*
405 405 * Determine if the mountpoint is empty. If so, refuse to perform the
406 406 * mount. We don't perform this check if MS_OVERLAY is specified, which
407 407 * would defeat the point. We also avoid this check if 'remount' is
408 408 * specified.
409 409 */
410 410 if ((flags & MS_OVERLAY) == 0 &&
411 411 strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
412 412 !dir_is_empty(mountpoint)) {
413 413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
414 414 "directory is not empty"));
415 415 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
416 416 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
417 417 }
418 418
419 419 /* perform the mount */
420 420 if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
421 421 MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
422 422 /*
423 423 * Generic errors are nasty, but there are just way too many
424 424 * from mount(), and they're well-understood. We pick a few
425 425 * common ones to improve upon.
426 426 */
427 427 if (errno == EBUSY) {
428 428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429 429 "mountpoint or dataset is busy"));
430 430 } else if (errno == EPERM) {
431 431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432 432 "Insufficient privileges"));
433 433 } else if (errno == ENOTSUP) {
434 434 char buf[256];
435 435 int spa_version;
436 436
437 437 VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
438 438 (void) snprintf(buf, sizeof (buf),
439 439 dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
440 440 "file system on a version %d pool. Pool must be"
441 441 " upgraded to mount this file system."),
442 442 (u_longlong_t)zfs_prop_get_int(zhp,
443 443 ZFS_PROP_VERSION), spa_version);
444 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
445 445 } else {
446 446 zfs_error_aux(hdl, strerror(errno));
447 447 }
448 448 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
449 449 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
450 450 zhp->zfs_name));
451 451 }
452 452
453 453 /* add the mounted entry into our cache */
454 454 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
455 455 mntopts);
456 456 return (0);
457 457 }
458 458
459 459 /*
460 460 * Unmount a single filesystem.
461 461 */
462 462 static int
463 463 unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
464 464 {
465 465 if (umount2(mountpoint, flags) != 0) {
466 466 zfs_error_aux(hdl, strerror(errno));
467 467 return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
468 468 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
469 469 mountpoint));
470 470 }
471 471
472 472 return (0);
473 473 }
474 474
475 475 /*
476 476 * Unmount the given filesystem.
477 477 */
478 478 int
479 479 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
480 480 {
481 481 libzfs_handle_t *hdl = zhp->zfs_hdl;
482 482 struct mnttab entry;
483 483 char *mntpt = NULL;
484 484
485 485 /* check to see if we need to unmount the filesystem */
486 486 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
487 487 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
488 488 /*
489 489 * mountpoint may have come from a call to
490 490 * getmnt/getmntany if it isn't NULL. If it is NULL,
491 491 * we know it comes from libzfs_mnttab_find which can
492 492 * then get freed later. We strdup it to play it safe.
493 493 */
494 494 if (mountpoint == NULL)
495 495 mntpt = zfs_strdup(hdl, entry.mnt_mountp);
496 496 else
497 497 mntpt = zfs_strdup(hdl, mountpoint);
498 498
499 499 /*
500 500 * Unshare and unmount the filesystem
501 501 */
502 502 if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
503 503 return (-1);
504 504
505 505 if (unmount_one(hdl, mntpt, flags) != 0) {
506 506 free(mntpt);
507 507 (void) zfs_shareall(zhp);
508 508 return (-1);
509 509 }
510 510 libzfs_mnttab_remove(hdl, zhp->zfs_name);
511 511 free(mntpt);
512 512 }
513 513
514 514 return (0);
515 515 }
516 516
517 517 /*
518 518 * Unmount this filesystem and any children inheriting the mountpoint property.
519 519 * To do this, just act like we're changing the mountpoint property, but don't
520 520 * remount the filesystems afterwards.
521 521 */
522 522 int
523 523 zfs_unmountall(zfs_handle_t *zhp, int flags)
524 524 {
525 525 prop_changelist_t *clp;
526 526 int ret;
527 527
528 528 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
529 529 if (clp == NULL)
530 530 return (-1);
531 531
532 532 ret = changelist_prefix(clp);
533 533 changelist_free(clp);
534 534
535 535 return (ret);
536 536 }
537 537
538 538 boolean_t
539 539 zfs_is_shared(zfs_handle_t *zhp)
540 540 {
541 541 zfs_share_type_t rc = 0;
542 542 zfs_share_proto_t *curr_proto;
543 543
544 544 if (ZFS_IS_VOLUME(zhp))
545 545 return (B_FALSE);
546 546
547 547 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
548 548 curr_proto++)
549 549 rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
550 550
551 551 return (rc ? B_TRUE : B_FALSE);
552 552 }
553 553
554 554 int
555 555 zfs_share(zfs_handle_t *zhp)
556 556 {
557 557 assert(!ZFS_IS_VOLUME(zhp));
558 558 return (zfs_share_proto(zhp, share_all_proto));
559 559 }
560 560
561 561 int
562 562 zfs_unshare(zfs_handle_t *zhp)
563 563 {
564 564 assert(!ZFS_IS_VOLUME(zhp));
565 565 return (zfs_unshareall(zhp));
566 566 }
567 567
568 568 /*
569 569 * Check to see if the filesystem is currently shared.
570 570 */
571 571 zfs_share_type_t
572 572 zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
573 573 {
574 574 char *mountpoint;
575 575 zfs_share_type_t rc;
576 576
577 577 if (!zfs_is_mounted(zhp, &mountpoint))
578 578 return (SHARED_NOT_SHARED);
579 579
580 580 if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto))
581 581 != SHARED_NOT_SHARED) {
582 582 if (where != NULL)
583 583 *where = mountpoint;
584 584 else
585 585 free(mountpoint);
586 586 return (rc);
587 587 } else {
588 588 free(mountpoint);
589 589 return (SHARED_NOT_SHARED);
590 590 }
591 591 }
592 592
593 593 boolean_t
594 594 zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
595 595 {
596 596 return (zfs_is_shared_proto(zhp, where,
597 597 PROTO_NFS) != SHARED_NOT_SHARED);
598 598 }
599 599
600 600 boolean_t
601 601 zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
602 602 {
603 603 return (zfs_is_shared_proto(zhp, where,
604 604 PROTO_SMB) != SHARED_NOT_SHARED);
|
↓ open down ↓ |
604 lines elided |
↑ open up ↑ |
605 605 }
606 606
607 607 /*
608 608 * Make sure things will work if libshare isn't installed by using
609 609 * wrapper functions that check to see that the pointers to functions
610 610 * initialized in _zfs_init_libshare() are actually present.
611 611 */
612 612
613 613 static sa_handle_t (*_sa_init)(int);
614 614 static sa_handle_t (*_sa_init_arg)(int, void *);
615 +static int (*_sa_service)(sa_handle_t);
615 616 static void (*_sa_fini)(sa_handle_t);
616 617 static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
617 618 static int (*_sa_enable_share)(sa_share_t, char *);
618 619 static int (*_sa_disable_share)(sa_share_t, char *);
619 620 static char *(*_sa_errorstr)(int);
620 621 static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
621 622 static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
622 623 static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
623 624 static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t,
624 625 char *, char *, zprop_source_t, char *, char *, char *);
625 626 static void (*_sa_update_sharetab_ts)(sa_handle_t);
626 627
627 628 /*
628 629 * _zfs_init_libshare()
629 630 *
630 631 * Find the libshare.so.1 entry points that we use here and save the
631 632 * values to be used later. This is triggered by the runtime loader.
632 633 * Make sure the correct ISA version is loaded.
633 634 */
634 635
635 636 #pragma init(_zfs_init_libshare)
636 637 static void
637 638 _zfs_init_libshare(void)
638 639 {
639 640 void *libshare;
640 641 char path[MAXPATHLEN];
641 642 char isa[MAXISALEN];
642 643
643 644 #if defined(_LP64)
644 645 if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
645 646 isa[0] = '\0';
646 647 #else
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
647 648 isa[0] = '\0';
648 649 #endif
649 650 (void) snprintf(path, MAXPATHLEN,
650 651 "/usr/lib/%s/libshare.so.1", isa);
651 652
652 653 if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
653 654 _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
654 655 _sa_init_arg = (sa_handle_t (*)(int, void *))dlsym(libshare,
655 656 "sa_init_arg");
656 657 _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
658 + _sa_service = (int (*)(sa_handle_t))dlsym(libshare,
659 + "sa_service");
657 660 _sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
658 661 dlsym(libshare, "sa_find_share");
659 662 _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
660 663 "sa_enable_share");
661 664 _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
662 665 "sa_disable_share");
663 666 _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
664 667 _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
665 668 dlsym(libshare, "sa_parse_legacy_options");
666 669 _sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
667 670 dlsym(libshare, "sa_needs_refresh");
668 671 _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
669 672 dlsym(libshare, "sa_get_zfs_handle");
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
670 673 _sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t,
671 674 sa_share_t, char *, char *, zprop_source_t, char *,
672 675 char *, char *))dlsym(libshare, "sa_zfs_process_share");
673 676 _sa_update_sharetab_ts = (void (*)(sa_handle_t))
674 677 dlsym(libshare, "sa_update_sharetab_ts");
675 678 if (_sa_init == NULL || _sa_init_arg == NULL ||
676 679 _sa_fini == NULL || _sa_find_share == NULL ||
677 680 _sa_enable_share == NULL || _sa_disable_share == NULL ||
678 681 _sa_errorstr == NULL || _sa_parse_legacy_options == NULL ||
679 682 _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
680 - _sa_zfs_process_share == NULL ||
683 + _sa_zfs_process_share == NULL || _sa_service == NULL ||
681 684 _sa_update_sharetab_ts == NULL) {
682 685 _sa_init = NULL;
683 686 _sa_init_arg = NULL;
687 + _sa_service = NULL;
684 688 _sa_fini = NULL;
685 689 _sa_disable_share = NULL;
686 690 _sa_enable_share = NULL;
687 691 _sa_errorstr = NULL;
688 692 _sa_parse_legacy_options = NULL;
689 693 (void) dlclose(libshare);
690 694 _sa_needs_refresh = NULL;
691 695 _sa_get_zfs_handle = NULL;
692 696 _sa_zfs_process_share = NULL;
693 697 _sa_update_sharetab_ts = NULL;
694 698 }
695 699 }
696 700 }
697 701
698 702 /*
699 703 * zfs_init_libshare(zhandle, service)
700 704 *
701 705 * Initialize the libshare API if it hasn't already been initialized.
702 706 * In all cases it returns 0 if it succeeded and an error if not. The
703 707 * service value is which part(s) of the API to initialize and is a
704 708 * direct map to the libshare sa_init(service) interface.
705 709 */
706 710 static int
707 711 zfs_init_libshare_impl(libzfs_handle_t *zhandle, int service, void *arg)
708 712 {
709 713 /*
710 714 * libshare is either not installed or we're in a branded zone. The
711 715 * rest of the wrapper functions around the libshare calls already
712 716 * handle NULL function pointers, but we don't want the callers of
713 717 * zfs_init_libshare() to fail prematurely if libshare is not available.
714 718 */
715 719 if (_sa_init == NULL)
716 720 return (SA_OK);
717 721
718 722 /*
719 723 * Attempt to refresh libshare. This is necessary if there was a cache
720 724 * miss for a new ZFS dataset that was just created, or if state of the
721 725 * sharetab file has changed since libshare was last initialized. We
722 726 * want to make sure so check timestamps to see if a different process
723 727 * has updated any of the configuration. If there was some non-ZFS
724 728 * change, we need to re-initialize the internal cache.
725 729 */
726 730 if (_sa_needs_refresh != NULL &&
727 731 _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
728 732 zfs_uninit_libshare(zhandle);
729 733 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
730 734 }
731 735
732 736 if (zhandle && zhandle->libzfs_sharehdl == NULL)
733 737 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
734 738
735 739 if (zhandle->libzfs_sharehdl == NULL)
736 740 return (SA_NO_MEMORY);
737 741
738 742 return (SA_OK);
739 743 }
740 744 int
741 745 zfs_init_libshare(libzfs_handle_t *zhandle, int service)
742 746 {
743 747 return (zfs_init_libshare_impl(zhandle, service, NULL));
744 748 }
745 749
746 750 int
747 751 zfs_init_libshare_arg(libzfs_handle_t *zhandle, int service, void *arg)
748 752 {
749 753 return (zfs_init_libshare_impl(zhandle, service, arg));
750 754 }
751 755
752 756
753 757 /*
754 758 * zfs_uninit_libshare(zhandle)
755 759 *
756 760 * Uninitialize the libshare API if it hasn't already been
757 761 * uninitialized. It is OK to call multiple times.
758 762 */
759 763 void
760 764 zfs_uninit_libshare(libzfs_handle_t *zhandle)
761 765 {
762 766 if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
763 767 if (_sa_fini != NULL)
764 768 _sa_fini(zhandle->libzfs_sharehdl);
765 769 zhandle->libzfs_sharehdl = NULL;
766 770 }
767 771 }
768 772
769 773 /*
770 774 * zfs_parse_options(options, proto)
771 775 *
772 776 * Call the legacy parse interface to get the protocol specific
773 777 * options using the NULL arg to indicate that this is a "parse" only.
774 778 */
775 779 int
776 780 zfs_parse_options(char *options, zfs_share_proto_t proto)
777 781 {
778 782 if (_sa_parse_legacy_options != NULL) {
779 783 return (_sa_parse_legacy_options(NULL, options,
780 784 proto_table[proto].p_name));
781 785 }
782 786 return (SA_CONFIG_ERR);
783 787 }
784 788
785 789 /*
786 790 * zfs_sa_find_share(handle, path)
787 791 *
788 792 * wrapper around sa_find_share to find a share path in the
789 793 * configuration.
790 794 */
791 795 static sa_share_t
792 796 zfs_sa_find_share(sa_handle_t handle, char *path)
793 797 {
794 798 if (_sa_find_share != NULL)
795 799 return (_sa_find_share(handle, path));
796 800 return (NULL);
797 801 }
798 802
799 803 /*
800 804 * zfs_sa_enable_share(share, proto)
801 805 *
802 806 * Wrapper for sa_enable_share which enables a share for a specified
803 807 * protocol.
804 808 */
805 809 static int
806 810 zfs_sa_enable_share(sa_share_t share, char *proto)
807 811 {
808 812 if (_sa_enable_share != NULL)
809 813 return (_sa_enable_share(share, proto));
810 814 return (SA_CONFIG_ERR);
811 815 }
812 816
813 817 /*
814 818 * zfs_sa_disable_share(share, proto)
815 819 *
816 820 * Wrapper for sa_enable_share which disables a share for a specified
817 821 * protocol.
818 822 */
819 823 static int
820 824 zfs_sa_disable_share(sa_share_t share, char *proto)
821 825 {
822 826 if (_sa_disable_share != NULL)
823 827 return (_sa_disable_share(share, proto));
824 828 return (SA_CONFIG_ERR);
825 829 }
826 830
827 831 /*
828 832 * Share the given filesystem according to the options in the specified
829 833 * protocol specific properties (sharenfs, sharesmb). We rely
830 834 * on "libshare" to the dirty work for us.
831 835 */
|
↓ open down ↓ |
138 lines elided |
↑ open up ↑ |
832 836 static int
833 837 zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
834 838 {
835 839 char mountpoint[ZFS_MAXPROPLEN];
836 840 char shareopts[ZFS_MAXPROPLEN];
837 841 char sourcestr[ZFS_MAXPROPLEN];
838 842 libzfs_handle_t *hdl = zhp->zfs_hdl;
839 843 sa_share_t share;
840 844 zfs_share_proto_t *curr_proto;
841 845 zprop_source_t sourcetype;
846 + int service = SA_INIT_ONE_SHARE_FROM_HANDLE;
842 847 int ret;
843 848
844 849 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
845 850 return (0);
846 851
852 + /*
853 + * Function may be called in a loop from higher up stack, with libshare
854 + * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
855 + * zfs_init_libshare_arg will refresh the handle's cache if necessary.
856 + * In this case we do not want to switch to per share initialization.
857 + * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
858 + */
859 + if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
860 + (_sa_service(hdl->libzfs_sharehdl) ==
861 + SA_INIT_SHARE_API_SELECTIVE)) {
862 + service = SA_INIT_SHARE_API;
863 + }
864 +
847 865 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
848 866 /*
849 867 * Return success if there are no share options.
850 868 */
851 869 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
852 870 shareopts, sizeof (shareopts), &sourcetype, sourcestr,
853 871 ZFS_MAXPROPLEN, B_FALSE) != 0 ||
854 872 strcmp(shareopts, "off") == 0)
855 873 continue;
856 - ret = zfs_init_libshare_arg(hdl, SA_INIT_ONE_SHARE_FROM_HANDLE,
857 - zhp);
874 + ret = zfs_init_libshare_arg(hdl, service, zhp);
858 875 if (ret != SA_OK) {
859 876 (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
860 877 dgettext(TEXT_DOMAIN, "cannot share '%s': %s"),
861 878 zfs_get_name(zhp), _sa_errorstr != NULL ?
862 879 _sa_errorstr(ret) : "");
863 880 return (-1);
864 881 }
865 882
866 883 /*
867 884 * If the 'zoned' property is set, then zfs_is_mountable()
868 885 * will have already bailed out if we are in the global zone.
869 886 * But local zones cannot be NFS servers, so we ignore it for
870 887 * local zones as well.
871 888 */
872 889 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
873 890 continue;
874 891
875 892 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
876 893 if (share == NULL) {
877 894 /*
878 895 * This may be a new file system that was just
879 896 * created so isn't in the internal cache
880 897 * (second time through). Rather than
881 898 * reloading the entire configuration, we can
882 899 * assume ZFS has done the checking and it is
883 900 * safe to add this to the internal
884 901 * configuration.
885 902 */
886 903 if (_sa_zfs_process_share(hdl->libzfs_sharehdl,
887 904 NULL, NULL, mountpoint,
888 905 proto_table[*curr_proto].p_name, sourcetype,
889 906 shareopts, sourcestr, zhp->zfs_name) != SA_OK) {
890 907 (void) zfs_error_fmt(hdl,
891 908 proto_table[*curr_proto].p_share_err,
892 909 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
893 910 zfs_get_name(zhp));
894 911 return (-1);
895 912 }
896 913 share = zfs_sa_find_share(hdl->libzfs_sharehdl,
897 914 mountpoint);
898 915 }
899 916 if (share != NULL) {
900 917 int err;
901 918 err = zfs_sa_enable_share(share,
902 919 proto_table[*curr_proto].p_name);
903 920 if (err != SA_OK) {
904 921 (void) zfs_error_fmt(hdl,
905 922 proto_table[*curr_proto].p_share_err,
906 923 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
907 924 zfs_get_name(zhp));
908 925 return (-1);
909 926 }
910 927 } else {
911 928 (void) zfs_error_fmt(hdl,
912 929 proto_table[*curr_proto].p_share_err,
913 930 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
914 931 zfs_get_name(zhp));
915 932 return (-1);
916 933 }
917 934
918 935 }
919 936 return (0);
920 937 }
921 938
922 939
923 940 int
924 941 zfs_share_nfs(zfs_handle_t *zhp)
925 942 {
926 943 return (zfs_share_proto(zhp, nfs_only));
927 944 }
928 945
929 946 int
930 947 zfs_share_smb(zfs_handle_t *zhp)
931 948 {
932 949 return (zfs_share_proto(zhp, smb_only));
933 950 }
934 951
935 952 int
936 953 zfs_shareall(zfs_handle_t *zhp)
937 954 {
938 955 return (zfs_share_proto(zhp, share_all_proto));
939 956 }
940 957
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
941 958 /*
942 959 * Unshare a filesystem by mountpoint.
943 960 */
944 961 static int
945 962 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
946 963 zfs_share_proto_t proto)
947 964 {
948 965 sa_share_t share;
949 966 int err;
950 967 char *mntpt;
968 + int service = SA_INIT_ONE_SHARE_FROM_NAME;
951 969
952 970 /*
953 971 * Mountpoint could get trashed if libshare calls getmntany
954 972 * which it does during API initialization, so strdup the
955 973 * value.
956 974 */
957 975 mntpt = zfs_strdup(hdl, mountpoint);
958 976
959 977 /*
960 - * make sure libshare initialized, initialize everything because we
961 - * don't know what other unsharing may happen later. Functions up the
962 - * stack are allowed to initialize instead a subset of shares at the
963 - * time the set is known.
978 + * Function may be called in a loop from higher up stack, with libshare
979 + * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
980 + * zfs_init_libshare_arg will refresh the handle's cache if necessary.
981 + * In this case we do not want to switch to per share initialization.
982 + * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
964 983 */
965 - if ((err = zfs_init_libshare_arg(hdl, SA_INIT_ONE_SHARE_FROM_NAME,
966 - (void *)name)) != SA_OK) {
984 + if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
985 + (_sa_service(hdl->libzfs_sharehdl) ==
986 + SA_INIT_SHARE_API_SELECTIVE)) {
987 + service = SA_INIT_SHARE_API;
988 + }
989 +
990 + err = zfs_init_libshare_arg(hdl, service, (void *)name);
991 + if (err != SA_OK) {
967 992 free(mntpt); /* don't need the copy anymore */
968 993 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
969 994 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
970 995 name, _sa_errorstr(err)));
971 996 }
972 997
973 998 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt);
974 999 free(mntpt); /* don't need the copy anymore */
975 1000
976 1001 if (share != NULL) {
977 1002 err = zfs_sa_disable_share(share, proto_table[proto].p_name);
978 1003 if (err != SA_OK) {
979 1004 return (zfs_error_fmt(hdl,
980 1005 proto_table[proto].p_unshare_err,
981 1006 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
982 1007 name, _sa_errorstr(err)));
983 1008 }
984 1009 } else {
985 1010 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
986 1011 dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"),
987 1012 name));
988 1013 }
989 1014 return (0);
990 1015 }
991 1016
992 1017 /*
993 1018 * Unshare the given filesystem.
994 1019 */
995 1020 int
996 1021 zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
997 1022 zfs_share_proto_t *proto)
998 1023 {
999 1024 libzfs_handle_t *hdl = zhp->zfs_hdl;
1000 1025 struct mnttab entry;
1001 1026 char *mntpt = NULL;
1002 1027
1003 1028 /* check to see if need to unmount the filesystem */
1004 1029 rewind(zhp->zfs_hdl->libzfs_mnttab);
1005 1030 if (mountpoint != NULL)
1006 1031 mountpoint = mntpt = zfs_strdup(hdl, mountpoint);
1007 1032
1008 1033 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
1009 1034 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
1010 1035 zfs_share_proto_t *curr_proto;
1011 1036
1012 1037 if (mountpoint == NULL)
1013 1038 mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
1014 1039
1015 1040 for (curr_proto = proto; *curr_proto != PROTO_END;
1016 1041 curr_proto++) {
1017 1042
1018 1043 if (is_shared(hdl, mntpt, *curr_proto) &&
1019 1044 unshare_one(hdl, zhp->zfs_name,
1020 1045 mntpt, *curr_proto) != 0) {
1021 1046 if (mntpt != NULL)
1022 1047 free(mntpt);
1023 1048 return (-1);
1024 1049 }
1025 1050 }
1026 1051 }
1027 1052 if (mntpt != NULL)
1028 1053 free(mntpt);
1029 1054
1030 1055 return (0);
1031 1056 }
1032 1057
1033 1058 int
1034 1059 zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
1035 1060 {
1036 1061 return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
1037 1062 }
1038 1063
1039 1064 int
1040 1065 zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
1041 1066 {
1042 1067 return (zfs_unshare_proto(zhp, mountpoint, smb_only));
1043 1068 }
1044 1069
1045 1070 /*
1046 1071 * Same as zfs_unmountall(), but for NFS and SMB unshares.
1047 1072 */
1048 1073 int
1049 1074 zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
1050 1075 {
1051 1076 prop_changelist_t *clp;
1052 1077 int ret;
1053 1078
1054 1079 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
1055 1080 if (clp == NULL)
1056 1081 return (-1);
1057 1082
1058 1083 ret = changelist_unshare(clp, proto);
1059 1084 changelist_free(clp);
1060 1085
1061 1086 return (ret);
1062 1087 }
1063 1088
1064 1089 int
1065 1090 zfs_unshareall_nfs(zfs_handle_t *zhp)
1066 1091 {
1067 1092 return (zfs_unshareall_proto(zhp, nfs_only));
1068 1093 }
1069 1094
1070 1095 int
1071 1096 zfs_unshareall_smb(zfs_handle_t *zhp)
1072 1097 {
1073 1098 return (zfs_unshareall_proto(zhp, smb_only));
1074 1099 }
1075 1100
1076 1101 int
1077 1102 zfs_unshareall(zfs_handle_t *zhp)
1078 1103 {
1079 1104 return (zfs_unshareall_proto(zhp, share_all_proto));
1080 1105 }
1081 1106
1082 1107 int
1083 1108 zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
1084 1109 {
1085 1110 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
1086 1111 }
1087 1112
1088 1113 /*
1089 1114 * Remove the mountpoint associated with the current dataset, if necessary.
1090 1115 * We only remove the underlying directory if:
1091 1116 *
1092 1117 * - The mountpoint is not 'none' or 'legacy'
1093 1118 * - The mountpoint is non-empty
1094 1119 * - The mountpoint is the default or inherited
1095 1120 * - The 'zoned' property is set, or we're in a local zone
1096 1121 *
1097 1122 * Any other directories we leave alone.
1098 1123 */
1099 1124 void
1100 1125 remove_mountpoint(zfs_handle_t *zhp)
1101 1126 {
1102 1127 char mountpoint[ZFS_MAXPROPLEN];
1103 1128 zprop_source_t source;
1104 1129
1105 1130 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
1106 1131 &source))
1107 1132 return;
1108 1133
1109 1134 if (source == ZPROP_SRC_DEFAULT ||
1110 1135 source == ZPROP_SRC_INHERITED) {
1111 1136 /*
1112 1137 * Try to remove the directory, silently ignoring any errors.
1113 1138 * The filesystem may have since been removed or moved around,
1114 1139 * and this error isn't really useful to the administrator in
1115 1140 * any way.
1116 1141 */
1117 1142 (void) rmdir(mountpoint);
1118 1143 }
1119 1144 }
1120 1145
1121 1146 /*
1122 1147 * Add the given zfs handle to the cb_handles array, dynamically reallocating
1123 1148 * the array if it is out of space.
1124 1149 */
1125 1150 void
1126 1151 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
1127 1152 {
1128 1153 if (cbp->cb_alloc == cbp->cb_used) {
1129 1154 size_t newsz;
1130 1155 zfs_handle_t **newhandles;
1131 1156
1132 1157 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
1133 1158 newhandles = zfs_realloc(zhp->zfs_hdl,
1134 1159 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
1135 1160 newsz * sizeof (zfs_handle_t *));
1136 1161 cbp->cb_handles = newhandles;
1137 1162 cbp->cb_alloc = newsz;
1138 1163 }
1139 1164 cbp->cb_handles[cbp->cb_used++] = zhp;
1140 1165 }
1141 1166
1142 1167 /*
1143 1168 * Recursive helper function used during file system enumeration
1144 1169 */
1145 1170 static int
1146 1171 zfs_iter_cb(zfs_handle_t *zhp, void *data)
1147 1172 {
1148 1173 get_all_cb_t *cbp = data;
1149 1174
1150 1175 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
1151 1176 zfs_close(zhp);
1152 1177 return (0);
1153 1178 }
1154 1179
1155 1180 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
1156 1181 zfs_close(zhp);
1157 1182 return (0);
1158 1183 }
1159 1184
1160 1185 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1161 1186 ZFS_KEYSTATUS_UNAVAILABLE) {
1162 1187 zfs_close(zhp);
1163 1188 return (0);
1164 1189 }
1165 1190
1166 1191 /*
1167 1192 * If this filesystem is inconsistent and has a receive resume
1168 1193 * token, we can not mount it.
1169 1194 */
1170 1195 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
1171 1196 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
1172 1197 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
1173 1198 zfs_close(zhp);
1174 1199 return (0);
1175 1200 }
1176 1201
1177 1202 libzfs_add_handle(cbp, zhp);
1178 1203 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
1179 1204 zfs_close(zhp);
1180 1205 return (-1);
1181 1206 }
1182 1207 return (0);
1183 1208 }
1184 1209
1185 1210 /*
1186 1211 * Sort comparator that compares two mountpoint paths. We sort these paths so
1187 1212 * that subdirectories immediately follow their parents. This means that we
1188 1213 * effectively treat the '/' character as the lowest value non-nul char.
1189 1214 * Since filesystems from non-global zones can have the same mountpoint
1190 1215 * as other filesystems, the comparator sorts global zone filesystems to
1191 1216 * the top of the list. This means that the global zone will traverse the
1192 1217 * filesystem list in the correct order and can stop when it sees the
1193 1218 * first zoned filesystem. In a non-global zone, only the delegated
1194 1219 * filesystems are seen.
1195 1220 *
1196 1221 * An example sorted list using this comparator would look like:
1197 1222 *
1198 1223 * /foo
1199 1224 * /foo/bar
1200 1225 * /foo/bar/baz
1201 1226 * /foo/baz
1202 1227 * /foo.bar
1203 1228 * /foo (NGZ1)
1204 1229 * /foo (NGZ2)
1205 1230 *
1206 1231 * The mounting code depends on this ordering to deterministically iterate
1207 1232 * over filesystems in order to spawn parallel mount tasks.
1208 1233 */
1209 1234 static int
1210 1235 mountpoint_cmp(const void *arga, const void *argb)
1211 1236 {
1212 1237 zfs_handle_t *const *zap = arga;
1213 1238 zfs_handle_t *za = *zap;
1214 1239 zfs_handle_t *const *zbp = argb;
1215 1240 zfs_handle_t *zb = *zbp;
1216 1241 char mounta[MAXPATHLEN];
1217 1242 char mountb[MAXPATHLEN];
1218 1243 const char *a = mounta;
1219 1244 const char *b = mountb;
1220 1245 boolean_t gota, gotb;
1221 1246 uint64_t zoneda, zonedb;
1222 1247
1223 1248 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
1224 1249 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
1225 1250 if (zoneda && !zonedb)
1226 1251 return (1);
1227 1252 if (!zoneda && zonedb)
1228 1253 return (-1);
1229 1254
1230 1255 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
1231 1256 if (gota) {
1232 1257 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
1233 1258 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
1234 1259 }
1235 1260 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
1236 1261 if (gotb) {
1237 1262 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
1238 1263 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
1239 1264 }
1240 1265
1241 1266 if (gota && gotb) {
1242 1267 while (*a != '\0' && (*a == *b)) {
1243 1268 a++;
1244 1269 b++;
1245 1270 }
1246 1271 if (*a == *b)
1247 1272 return (0);
1248 1273 if (*a == '\0')
1249 1274 return (-1);
1250 1275 if (*b == '\0')
1251 1276 return (1);
1252 1277 if (*a == '/')
1253 1278 return (-1);
1254 1279 if (*b == '/')
1255 1280 return (1);
1256 1281 return (*a < *b ? -1 : *a > *b);
1257 1282 }
1258 1283
1259 1284 if (gota)
1260 1285 return (-1);
1261 1286 if (gotb)
1262 1287 return (1);
1263 1288
1264 1289 /*
1265 1290 * If neither filesystem has a mountpoint, revert to sorting by
1266 1291 * dataset name.
1267 1292 */
1268 1293 return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
1269 1294 }
1270 1295
1271 1296 /*
1272 1297 * Return true if path2 is a child of path1.
1273 1298 */
1274 1299 static boolean_t
1275 1300 libzfs_path_contains(const char *path1, const char *path2)
1276 1301 {
1277 1302 return (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/');
1278 1303 }
1279 1304
1280 1305 /*
1281 1306 * Given a mountpoint specified by idx in the handles array, find the first
1282 1307 * non-descendent of that mountpoint and return its index. Descendant paths
1283 1308 * start with the parent's path. This function relies on the ordering
1284 1309 * enforced by mountpoint_cmp().
1285 1310 */
1286 1311 static int
1287 1312 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
1288 1313 {
1289 1314 char parent[ZFS_MAXPROPLEN];
1290 1315 char child[ZFS_MAXPROPLEN];
1291 1316 int i;
1292 1317
1293 1318 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
1294 1319 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
1295 1320
1296 1321 for (i = idx + 1; i < num_handles; i++) {
1297 1322 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
1298 1323 sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1299 1324 if (!libzfs_path_contains(parent, child))
1300 1325 break;
1301 1326 }
1302 1327 return (i);
1303 1328 }
1304 1329
1305 1330 typedef struct mnt_param {
1306 1331 libzfs_handle_t *mnt_hdl;
1307 1332 zfs_taskq_t *mnt_tq;
1308 1333 zfs_handle_t **mnt_zhps; /* filesystems to mount */
1309 1334 size_t mnt_num_handles;
1310 1335 int mnt_idx; /* Index of selected entry to mount */
1311 1336 zfs_iter_f mnt_func;
1312 1337 void *mnt_data;
1313 1338 } mnt_param_t;
1314 1339
1315 1340 /*
1316 1341 * Allocate and populate the parameter struct for mount function, and
1317 1342 * schedule mounting of the entry selected by idx.
1318 1343 */
1319 1344 static void
1320 1345 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
1321 1346 size_t num_handles, int idx, zfs_iter_f func, void *data, zfs_taskq_t *tq)
1322 1347 {
1323 1348 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
1324 1349
1325 1350 mnt_param->mnt_hdl = hdl;
1326 1351 mnt_param->mnt_tq = tq;
1327 1352 mnt_param->mnt_zhps = handles;
1328 1353 mnt_param->mnt_num_handles = num_handles;
1329 1354 mnt_param->mnt_idx = idx;
1330 1355 mnt_param->mnt_func = func;
1331 1356 mnt_param->mnt_data = data;
1332 1357
1333 1358 (void) zfs_taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param,
1334 1359 ZFS_TQ_SLEEP);
1335 1360 }
1336 1361
1337 1362 /*
1338 1363 * This is the structure used to keep state of mounting or sharing operations
1339 1364 * during a call to zpool_enable_datasets().
1340 1365 */
1341 1366 typedef struct mount_state {
1342 1367 /*
1343 1368 * ms_mntstatus is set to -1 if any mount fails. While multiple threads
1344 1369 * could update this variable concurrently, no synchronization is
1345 1370 * needed as it's only ever set to -1.
1346 1371 */
1347 1372 int ms_mntstatus;
1348 1373 int ms_mntflags;
1349 1374 const char *ms_mntopts;
1350 1375 } mount_state_t;
1351 1376
1352 1377 static int
1353 1378 zfs_mount_one(zfs_handle_t *zhp, void *arg)
1354 1379 {
1355 1380 mount_state_t *ms = arg;
1356 1381 int ret = 0;
1357 1382
1358 1383 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1359 1384 ZFS_KEYSTATUS_UNAVAILABLE)
1360 1385 return (0);
1361 1386
1362 1387 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
1363 1388 ret = ms->ms_mntstatus = -1;
1364 1389 return (ret);
1365 1390 }
1366 1391
1367 1392 static int
1368 1393 zfs_share_one(zfs_handle_t *zhp, void *arg)
1369 1394 {
1370 1395 mount_state_t *ms = arg;
1371 1396 int ret = 0;
1372 1397
1373 1398 if (zfs_share(zhp) != 0)
1374 1399 ret = ms->ms_mntstatus = -1;
1375 1400 return (ret);
1376 1401 }
1377 1402
1378 1403 /*
1379 1404 * Task queue function to mount one file system. On completion, it finds and
1380 1405 * schedules its children to be mounted. This depends on the sorting done in
1381 1406 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
1382 1407 * each descending from the previous) will have no parallelism since we always
1383 1408 * have to wait for the parent to finish mounting before we can schedule
1384 1409 * its children.
1385 1410 */
1386 1411 static void
1387 1412 zfs_mount_task(void *arg)
1388 1413 {
1389 1414 mnt_param_t *mp = arg;
1390 1415 int idx = mp->mnt_idx;
1391 1416 zfs_handle_t **handles = mp->mnt_zhps;
1392 1417 size_t num_handles = mp->mnt_num_handles;
1393 1418 char mountpoint[ZFS_MAXPROPLEN];
1394 1419
1395 1420 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
1396 1421 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
1397 1422
1398 1423 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
1399 1424 return;
1400 1425
1401 1426 /*
1402 1427 * We dispatch tasks to mount filesystems with mountpoints underneath
1403 1428 * this one. We do this by dispatching the next filesystem with a
1404 1429 * descendant mountpoint of the one we just mounted, then skip all of
1405 1430 * its descendants, dispatch the next descendant mountpoint, and so on.
1406 1431 * The non_descendant_idx() function skips over filesystems that are
1407 1432 * descendants of the filesystem we just dispatched.
1408 1433 */
1409 1434 for (int i = idx + 1; i < num_handles;
1410 1435 i = non_descendant_idx(handles, num_handles, i)) {
1411 1436 char child[ZFS_MAXPROPLEN];
1412 1437 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
1413 1438 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1414 1439
1415 1440 if (!libzfs_path_contains(mountpoint, child))
1416 1441 break; /* not a descendant, return */
1417 1442 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
1418 1443 mp->mnt_func, mp->mnt_data, mp->mnt_tq);
1419 1444 }
1420 1445 free(mp);
1421 1446 }
1422 1447
1423 1448 /*
1424 1449 * Issue the func callback for each ZFS handle contained in the handles
1425 1450 * array. This function is used to mount all datasets, and so this function
1426 1451 * guarantees that filesystems for parent mountpoints are called before their
1427 1452 * children. As such, before issuing any callbacks, we first sort the array
1428 1453 * of handles by mountpoint.
1429 1454 *
1430 1455 * Callbacks are issued in one of two ways:
1431 1456 *
1432 1457 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
1433 1458 * environment variable is set, then we issue callbacks sequentially.
1434 1459 *
1435 1460 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
1436 1461 * environment variable is not set, then we use a taskq to dispatch threads
1437 1462 * to mount filesystems is parallel. This function dispatches tasks to mount
1438 1463 * the filesystems at the top-level mountpoints, and these tasks in turn
1439 1464 * are responsible for recursively mounting filesystems in their children
1440 1465 * mountpoints.
1441 1466 */
1442 1467 void
1443 1468 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
1444 1469 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
1445 1470 {
1446 1471 zoneid_t zoneid = getzoneid();
1447 1472
1448 1473 /*
1449 1474 * The ZFS_SERIAL_MOUNT environment variable is an undocumented
1450 1475 * variable that can be used as a convenience to do a/b comparison
1451 1476 * of serial vs. parallel mounting.
1452 1477 */
1453 1478 boolean_t serial_mount = !parallel ||
1454 1479 (getenv("ZFS_SERIAL_MOUNT") != NULL);
1455 1480
1456 1481 /*
1457 1482 * Sort the datasets by mountpoint. See mountpoint_cmp for details
1458 1483 * of how these are sorted.
1459 1484 */
1460 1485 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
1461 1486
1462 1487 if (serial_mount) {
1463 1488 for (int i = 0; i < num_handles; i++) {
1464 1489 func(handles[i], data);
1465 1490 }
1466 1491 return;
1467 1492 }
1468 1493
1469 1494 /*
1470 1495 * Issue the callback function for each dataset using a parallel
1471 1496 * algorithm that uses a taskq to manage threads.
1472 1497 */
1473 1498 zfs_taskq_t *tq = zfs_taskq_create("mount_taskq", mount_tq_nthr, 0,
1474 1499 mount_tq_nthr, mount_tq_nthr, ZFS_TASKQ_PREPOPULATE);
1475 1500
1476 1501 /*
1477 1502 * There may be multiple "top level" mountpoints outside of the pool's
1478 1503 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
1479 1504 * these.
1480 1505 */
1481 1506 for (int i = 0; i < num_handles;
1482 1507 i = non_descendant_idx(handles, num_handles, i)) {
1483 1508 /*
1484 1509 * Since the mountpoints have been sorted so that the zoned
1485 1510 * filesystems are at the end, a zoned filesystem seen from
1486 1511 * the global zone means that we're done.
1487 1512 */
1488 1513 if (zoneid == GLOBAL_ZONEID &&
1489 1514 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
1490 1515 break;
1491 1516 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
1492 1517 tq);
1493 1518 }
1494 1519
1495 1520 zfs_taskq_wait(tq); /* wait for all scheduled mounts to complete */
1496 1521 zfs_taskq_destroy(tq);
1497 1522 }
1498 1523
1499 1524 /*
1500 1525 * Mount and share all datasets within the given pool. This assumes that no
1501 1526 * datasets within the pool are currently mounted.
1502 1527 */
1503 1528 #pragma weak zpool_mount_datasets = zpool_enable_datasets
1504 1529 int
1505 1530 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
1506 1531 {
1507 1532 get_all_cb_t cb = { 0 };
1508 1533 mount_state_t ms = { 0 };
1509 1534 zfs_handle_t *zfsp;
1510 1535 sa_init_selective_arg_t sharearg;
1511 1536 int ret = 0;
1512 1537
1513 1538 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1514 1539 ZFS_TYPE_DATASET)) == NULL)
1515 1540 goto out;
1516 1541
1517 1542
1518 1543 /*
1519 1544 * Gather all non-snapshot datasets within the pool. Start by adding
1520 1545 * the root filesystem for this pool to the list, and then iterate
1521 1546 * over all child filesystems.
1522 1547 */
1523 1548 libzfs_add_handle(&cb, zfsp);
1524 1549 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
|
↓ open down ↓ |
548 lines elided |
↑ open up ↑ |
1525 1550 goto out;
1526 1551
1527 1552 ms.ms_mntopts = mntopts;
1528 1553 ms.ms_mntflags = flags;
1529 1554 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1530 1555 zfs_mount_one, &ms, B_TRUE);
1531 1556 if (ms.ms_mntstatus != 0)
1532 1557 ret = ms.ms_mntstatus;
1533 1558
1534 1559 /*
1535 - * Share all filesystems that need to be shared. This needs to be
1536 - * a separate pass because libshare is not mt-safe, and so we need
1537 - * to share serially.
1560 + * Initialize libshare SA_INIT_SHARE_API_SELECTIVE here
1561 + * to avoid unnecessary load/unload of the libshare API
1562 + * per shared dataset downstream.
1538 1563 */
1539 1564 sharearg.zhandle_arr = cb.cb_handles;
1540 1565 sharearg.zhandle_len = cb.cb_used;
1541 1566 if ((ret = zfs_init_libshare_arg(zhp->zpool_hdl,
1542 1567 SA_INIT_SHARE_API_SELECTIVE, &sharearg)) != 0)
1543 1568 goto out;
1544 1569
1545 1570 ms.ms_mntstatus = 0;
1546 1571 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1547 1572 zfs_share_one, &ms, B_FALSE);
1548 1573 if (ms.ms_mntstatus != 0)
1549 1574 ret = ms.ms_mntstatus;
1550 1575
1551 1576 out:
1552 1577 for (int i = 0; i < cb.cb_used; i++)
1553 1578 zfs_close(cb.cb_handles[i]);
1554 1579 free(cb.cb_handles);
1555 1580
1556 1581 return (ret);
1557 1582 }
1558 1583
1559 1584 static int
1560 1585 mountpoint_compare(const void *a, const void *b)
1561 1586 {
1562 1587 const char *mounta = *((char **)a);
1563 1588 const char *mountb = *((char **)b);
1564 1589
1565 1590 return (strcmp(mountb, mounta));
1566 1591 }
1567 1592
1568 1593 /* alias for 2002/240 */
1569 1594 #pragma weak zpool_unmount_datasets = zpool_disable_datasets
1570 1595 /*
1571 1596 * Unshare and unmount all datasets within the given pool. We don't want to
1572 1597 * rely on traversing the DSL to discover the filesystems within the pool,
1573 1598 * because this may be expensive (if not all of them are mounted), and can fail
1574 1599 * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and
1575 1600 * gather all the filesystems that are currently mounted.
1576 1601 */
1577 1602 int
1578 1603 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
1579 1604 {
1580 1605 int used, alloc;
1581 1606 struct mnttab entry;
1582 1607 size_t namelen;
1583 1608 char **mountpoints = NULL;
1584 1609 zfs_handle_t **datasets = NULL;
1585 1610 libzfs_handle_t *hdl = zhp->zpool_hdl;
1586 1611 int i;
1587 1612 int ret = -1;
1588 1613 int flags = (force ? MS_FORCE : 0);
1589 1614 sa_init_selective_arg_t sharearg;
1590 1615
1591 1616 namelen = strlen(zhp->zpool_name);
1592 1617
1593 1618 rewind(hdl->libzfs_mnttab);
1594 1619 used = alloc = 0;
1595 1620 while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
1596 1621 /*
1597 1622 * Ignore non-ZFS entries.
1598 1623 */
1599 1624 if (entry.mnt_fstype == NULL ||
1600 1625 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
1601 1626 continue;
1602 1627
1603 1628 /*
1604 1629 * Ignore filesystems not within this pool.
1605 1630 */
1606 1631 if (entry.mnt_mountp == NULL ||
1607 1632 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
1608 1633 (entry.mnt_special[namelen] != '/' &&
1609 1634 entry.mnt_special[namelen] != '\0'))
1610 1635 continue;
1611 1636
1612 1637 /*
1613 1638 * At this point we've found a filesystem within our pool. Add
1614 1639 * it to our growing list.
1615 1640 */
1616 1641 if (used == alloc) {
1617 1642 if (alloc == 0) {
1618 1643 if ((mountpoints = zfs_alloc(hdl,
1619 1644 8 * sizeof (void *))) == NULL)
1620 1645 goto out;
1621 1646
1622 1647 if ((datasets = zfs_alloc(hdl,
1623 1648 8 * sizeof (void *))) == NULL)
1624 1649 goto out;
1625 1650
1626 1651 alloc = 8;
1627 1652 } else {
1628 1653 void *ptr;
1629 1654
1630 1655 if ((ptr = zfs_realloc(hdl, mountpoints,
1631 1656 alloc * sizeof (void *),
1632 1657 alloc * 2 * sizeof (void *))) == NULL)
1633 1658 goto out;
1634 1659 mountpoints = ptr;
1635 1660
1636 1661 if ((ptr = zfs_realloc(hdl, datasets,
1637 1662 alloc * sizeof (void *),
1638 1663 alloc * 2 * sizeof (void *))) == NULL)
1639 1664 goto out;
1640 1665 datasets = ptr;
1641 1666
1642 1667 alloc *= 2;
1643 1668 }
1644 1669 }
1645 1670
1646 1671 if ((mountpoints[used] = zfs_strdup(hdl,
1647 1672 entry.mnt_mountp)) == NULL)
1648 1673 goto out;
1649 1674
1650 1675 /*
1651 1676 * This is allowed to fail, in case there is some I/O error. It
1652 1677 * is only used to determine if we need to remove the underlying
1653 1678 * mountpoint, so failure is not fatal.
1654 1679 */
1655 1680 datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
1656 1681
1657 1682 used++;
1658 1683 }
1659 1684
1660 1685 /*
1661 1686 * At this point, we have the entire list of filesystems, so sort it by
1662 1687 * mountpoint.
1663 1688 */
1664 1689 sharearg.zhandle_arr = datasets;
1665 1690 sharearg.zhandle_len = used;
1666 1691 ret = zfs_init_libshare_arg(hdl, SA_INIT_SHARE_API_SELECTIVE,
1667 1692 &sharearg);
1668 1693 if (ret != 0)
1669 1694 goto out;
1670 1695 qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
1671 1696
1672 1697 /*
1673 1698 * Walk through and first unshare everything.
1674 1699 */
1675 1700 for (i = 0; i < used; i++) {
1676 1701 zfs_share_proto_t *curr_proto;
1677 1702 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
1678 1703 curr_proto++) {
1679 1704 if (is_shared(hdl, mountpoints[i], *curr_proto) &&
1680 1705 unshare_one(hdl, mountpoints[i],
1681 1706 mountpoints[i], *curr_proto) != 0)
1682 1707 goto out;
1683 1708 }
1684 1709 }
1685 1710
1686 1711 /*
1687 1712 * Now unmount everything, removing the underlying directories as
1688 1713 * appropriate.
1689 1714 */
1690 1715 for (i = 0; i < used; i++) {
1691 1716 if (unmount_one(hdl, mountpoints[i], flags) != 0)
1692 1717 goto out;
1693 1718 }
1694 1719
1695 1720 for (i = 0; i < used; i++) {
1696 1721 if (datasets[i])
1697 1722 remove_mountpoint(datasets[i]);
1698 1723 }
1699 1724
1700 1725 ret = 0;
1701 1726 out:
1702 1727 for (i = 0; i < used; i++) {
1703 1728 if (datasets[i])
1704 1729 zfs_close(datasets[i]);
1705 1730 free(mountpoints[i]);
1706 1731 }
1707 1732 free(datasets);
1708 1733 free(mountpoints);
1709 1734
1710 1735 return (ret);
1711 1736 }
|
↓ open down ↓ |
164 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX