1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2019 Nexenta Systems, Inc.
28 * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
30 * Copyright 2017 Joyent, Inc.
31 * Copyright 2017 RackTop Systems.
32 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
33 */
34
35 /*
36 * Routines to manage ZFS mounts. We separate all the nasty routines that have
37 * to deal with the OS. The following functions are the main entry points --
38 * they are used by mount and unmount and when changing a filesystem's
39 * mountpoint.
40 *
41 * zfs_is_mounted()
42 * zfs_mount()
43 * zfs_unmount()
44 * zfs_unmountall()
45 *
46 * This file also contains the functions used to manage sharing filesystems via
47 * NFS and iSCSI:
48 *
49 * zfs_is_shared()
50 * zfs_share()
51 * zfs_unshare()
52 *
53 * zfs_is_shared_nfs()
54 * zfs_is_shared_smb()
55 * zfs_share_proto()
56 * zfs_shareall();
57 * zfs_unshare_nfs()
58 * zfs_unshare_smb()
59 * zfs_unshareall_nfs()
60 * zfs_unshareall_smb()
61 * zfs_unshareall()
62 * zfs_unshareall_bypath()
63 *
64 * The following functions are available for pool consumers, and will
65 * mount/unmount and share/unshare all datasets within pool:
66 *
67 * zpool_enable_datasets()
68 * zpool_disable_datasets()
69 */
70
71 #include <dirent.h>
72 #include <dlfcn.h>
73 #include <errno.h>
74 #include <fcntl.h>
75 #include <libgen.h>
76 #include <libintl.h>
77 #include <stdio.h>
78 #include <stdlib.h>
79 #include <strings.h>
80 #include <unistd.h>
81 #include <zone.h>
82 #include <sys/mntent.h>
83 #include <sys/mount.h>
84 #include <sys/stat.h>
85 #include <sys/statvfs.h>
86 #include <sys/dsl_crypt.h>
87
88 #include <libzfs.h>
89
90 #include "libzfs_impl.h"
91 #include "libzfs_taskq.h"
92
93 #include <libshare.h>
94 #include <sys/systeminfo.h>
95 #define MAXISALEN 257 /* based on sysinfo(2) man page */
96
97 static int mount_tq_nthr = 512; /* taskq threads for multi-threaded mounting */
98
99 static void zfs_mount_task(void *);
100 static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
101 zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
102 zfs_share_proto_t);
103
104 /*
105 * The share protocols table must be in the same order as the zfs_share_proto_t
106 * enum in libzfs_impl.h
107 */
108 typedef struct {
109 zfs_prop_t p_prop;
110 char *p_name;
111 int p_share_err;
112 int p_unshare_err;
113 } proto_table_t;
114
115 proto_table_t proto_table[PROTO_END] = {
116 {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
117 {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
118 };
119
120 zfs_share_proto_t nfs_only[] = {
121 PROTO_NFS,
122 PROTO_END
123 };
124
125 zfs_share_proto_t smb_only[] = {
126 PROTO_SMB,
127 PROTO_END
128 };
129 zfs_share_proto_t share_all_proto[] = {
130 PROTO_NFS,
131 PROTO_SMB,
132 PROTO_END
133 };
134
135 /*
136 * Search the sharetab for the given mountpoint and protocol, returning
137 * a zfs_share_type_t value.
138 */
139 static zfs_share_type_t
140 is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
141 {
142 char buf[MAXPATHLEN], *tab;
143 char *ptr;
144
145 if (hdl->libzfs_sharetab == NULL)
146 return (SHARED_NOT_SHARED);
147
148 (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
149
150 while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
151
152 /* the mountpoint is the first entry on each line */
153 if ((tab = strchr(buf, '\t')) == NULL)
154 continue;
155
156 *tab = '\0';
157 if (strcmp(buf, mountpoint) == 0) {
158 /*
159 * the protocol field is the third field
160 * skip over second field
161 */
162 ptr = ++tab;
163 if ((tab = strchr(ptr, '\t')) == NULL)
164 continue;
165 ptr = ++tab;
166 if ((tab = strchr(ptr, '\t')) == NULL)
167 continue;
168 *tab = '\0';
169 if (strcmp(ptr,
170 proto_table[proto].p_name) == 0) {
171 switch (proto) {
172 case PROTO_NFS:
173 return (SHARED_NFS);
174 case PROTO_SMB:
175 return (SHARED_SMB);
176 default:
177 return (0);
178 }
179 }
180 }
181 }
182
183 return (SHARED_NOT_SHARED);
184 }
185
186 static boolean_t
187 dir_is_empty_stat(const char *dirname)
188 {
189 struct stat st;
190
191 /*
192 * We only want to return false if the given path is a non empty
193 * directory, all other errors are handled elsewhere.
194 */
195 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
196 return (B_TRUE);
197 }
198
199 /*
200 * An empty directory will still have two entries in it, one
201 * entry for each of "." and "..".
202 */
203 if (st.st_size > 2) {
204 return (B_FALSE);
205 }
206
207 return (B_TRUE);
208 }
209
210 static boolean_t
211 dir_is_empty_readdir(const char *dirname)
212 {
213 DIR *dirp;
214 struct dirent64 *dp;
215 int dirfd;
216
217 if ((dirfd = openat(AT_FDCWD, dirname,
218 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
219 return (B_TRUE);
220 }
221
222 if ((dirp = fdopendir(dirfd)) == NULL) {
223 (void) close(dirfd);
224 return (B_TRUE);
225 }
226
227 while ((dp = readdir64(dirp)) != NULL) {
228
229 if (strcmp(dp->d_name, ".") == 0 ||
230 strcmp(dp->d_name, "..") == 0)
231 continue;
232
233 (void) closedir(dirp);
234 return (B_FALSE);
235 }
236
237 (void) closedir(dirp);
238 return (B_TRUE);
239 }
240
241 /*
242 * Returns true if the specified directory is empty. If we can't open the
243 * directory at all, return true so that the mount can fail with a more
244 * informative error message.
245 */
246 static boolean_t
247 dir_is_empty(const char *dirname)
248 {
249 struct statvfs64 st;
250
251 /*
252 * If the statvfs call fails or the filesystem is not a ZFS
253 * filesystem, fall back to the slow path which uses readdir.
254 */
255 if ((statvfs64(dirname, &st) != 0) ||
256 (strcmp(st.f_basetype, "zfs") != 0)) {
257 return (dir_is_empty_readdir(dirname));
258 }
259
260 /*
261 * At this point, we know the provided path is on a ZFS
262 * filesystem, so we can use stat instead of readdir to
263 * determine if the directory is empty or not. We try to avoid
264 * using readdir because that requires opening "dirname"; this
265 * open file descriptor can potentially end up in a child
266 * process if there's a concurrent fork, thus preventing the
267 * zfs_mount() from otherwise succeeding (the open file
268 * descriptor inherited by the child process will cause the
269 * parent's mount to fail with EBUSY). The performance
270 * implications of replacing the open, read, and close with a
271 * single stat is nice; but is not the main motivation for the
272 * added complexity.
273 */
274 return (dir_is_empty_stat(dirname));
275 }
276
277 /*
278 * Checks to see if the mount is active. If the filesystem is mounted, we fill
279 * in 'where' with the current mountpoint, and return 1. Otherwise, we return
280 * 0.
281 */
282 boolean_t
283 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
284 {
285 struct mnttab entry;
286
287 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
288 return (B_FALSE);
289
290 if (where != NULL)
291 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
292
293 return (B_TRUE);
294 }
295
296 boolean_t
297 zfs_is_mounted(zfs_handle_t *zhp, char **where)
298 {
299 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
300 }
301
302 /*
303 * Returns true if the given dataset is mountable, false otherwise. Returns the
304 * mountpoint in 'buf'.
305 */
306 static boolean_t
307 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
308 zprop_source_t *source)
309 {
310 char sourceloc[MAXNAMELEN];
311 zprop_source_t sourcetype;
312
313 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
314 return (B_FALSE);
315
316 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
317 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
318
319 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
320 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
321 return (B_FALSE);
322
323 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
324 return (B_FALSE);
325
326 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
327 getzoneid() == GLOBAL_ZONEID)
328 return (B_FALSE);
329
330 if (source)
331 *source = sourcetype;
332
333 return (B_TRUE);
334 }
335
336 /*
337 * Mount the given filesystem.
338 */
339 int
340 zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
341 {
342 struct stat buf;
343 char mountpoint[ZFS_MAXPROPLEN];
344 char mntopts[MNT_LINE_MAX];
345 libzfs_handle_t *hdl = zhp->zfs_hdl;
346 uint64_t keystatus;
347 int rc;
348
349 if (options == NULL)
350 mntopts[0] = '\0';
351 else
352 (void) strlcpy(mntopts, options, sizeof (mntopts));
353
354 /*
355 * If the pool is imported read-only then all mounts must be read-only
356 */
357 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
358 flags |= MS_RDONLY;
359
360 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
361 return (0);
362
363 /*
364 * If the filesystem is encrypted the key must be loaded in order to
365 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether
366 * or not we attempt to load the keys. Note: we must call
367 * zfs_refresh_properties() here since some callers of this function
368 * (most notably zpool_enable_datasets()) may implicitly load our key
369 * by loading the parent's key first.
370 */
371 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
372 zfs_refresh_properties(zhp);
373 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
374
375 /*
376 * If the key is unavailable and MS_CRYPT is set give the
377 * user a chance to enter the key. Otherwise just fail
378 * immediately.
379 */
380 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
381 if (flags & MS_CRYPT) {
382 rc = zfs_crypto_load_key(zhp, B_FALSE, NULL);
383 if (rc != 0)
384 return (rc);
385 } else {
386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
387 "encryption key not loaded"));
388 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
389 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
390 mountpoint));
391 }
392 }
393
394 }
395
396 /* Create the directory if it doesn't already exist */
397 if (lstat(mountpoint, &buf) != 0) {
398 if (mkdirp(mountpoint, 0755) != 0) {
399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
400 "failed to create mountpoint"));
401 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
402 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
403 mountpoint));
404 }
405 }
406
407 /*
408 * Determine if the mountpoint is empty. If so, refuse to perform the
409 * mount. We don't perform this check if MS_OVERLAY is specified, which
410 * would defeat the point. We also avoid this check if 'remount' is
411 * specified.
412 */
413 if ((flags & MS_OVERLAY) == 0 &&
414 strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
415 !dir_is_empty(mountpoint)) {
416 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
417 "directory is not empty"));
418 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
419 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
420 }
421
422 /* perform the mount */
423 if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
424 MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
425 /*
426 * Generic errors are nasty, but there are just way too many
427 * from mount(), and they're well-understood. We pick a few
428 * common ones to improve upon.
429 */
430 if (errno == EBUSY) {
431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432 "mountpoint or dataset is busy"));
433 } else if (errno == EPERM) {
434 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 "Insufficient privileges"));
436 } else if (errno == ENOTSUP) {
437 char buf[256];
438 int spa_version;
439
440 VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
441 (void) snprintf(buf, sizeof (buf),
442 dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
443 "file system on a version %d pool. Pool must be"
444 " upgraded to mount this file system."),
445 (u_longlong_t)zfs_prop_get_int(zhp,
446 ZFS_PROP_VERSION), spa_version);
447 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
448 } else {
449 zfs_error_aux(hdl, strerror(errno));
450 }
451 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
452 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
453 zhp->zfs_name));
454 }
455
456 /* add the mounted entry into our cache */
457 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
458 mntopts);
459 return (0);
460 }
461
462 /*
463 * Unmount a single filesystem.
464 */
465 static int
466 unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
467 {
468 if (umount2(mountpoint, flags) != 0) {
469 zfs_error_aux(hdl, strerror(errno));
470 return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
471 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
472 mountpoint));
473 }
474
475 return (0);
476 }
477
478 /*
479 * Unmount the given filesystem.
480 */
481 int
482 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
483 {
484 libzfs_handle_t *hdl = zhp->zfs_hdl;
485 struct mnttab entry;
486 char *mntpt = NULL;
487
488 /* check to see if we need to unmount the filesystem */
489 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
490 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
491 /*
492 * mountpoint may have come from a call to
493 * getmnt/getmntany if it isn't NULL. If it is NULL,
494 * we know it comes from libzfs_mnttab_find which can
495 * then get freed later. We strdup it to play it safe.
496 */
497 if (mountpoint == NULL)
498 mntpt = zfs_strdup(hdl, entry.mnt_mountp);
499 else
500 mntpt = zfs_strdup(hdl, mountpoint);
501
502 /*
503 * Unshare and unmount the filesystem
504 */
505 if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
506 return (-1);
507
508 if (unmount_one(hdl, mntpt, flags) != 0) {
509 free(mntpt);
510 (void) zfs_shareall(zhp);
511 return (-1);
512 }
513 libzfs_mnttab_remove(hdl, zhp->zfs_name);
514 free(mntpt);
515 }
516
517 return (0);
518 }
519
520 /*
521 * Unmount this filesystem and any children inheriting the mountpoint property.
522 * To do this, just act like we're changing the mountpoint property, but don't
523 * remount the filesystems afterwards.
524 */
525 int
526 zfs_unmountall(zfs_handle_t *zhp, int flags)
527 {
528 prop_changelist_t *clp;
529 int ret;
530
531 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
532 if (clp == NULL)
533 return (-1);
534
535 ret = changelist_prefix(clp);
536 changelist_free(clp);
537
538 return (ret);
539 }
540
541 boolean_t
542 zfs_is_shared(zfs_handle_t *zhp)
543 {
544 zfs_share_type_t rc = 0;
545 zfs_share_proto_t *curr_proto;
546
547 if (ZFS_IS_VOLUME(zhp))
548 return (B_FALSE);
549
550 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
551 curr_proto++)
552 rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
553
554 return (rc ? B_TRUE : B_FALSE);
555 }
556
557 int
558 zfs_share(zfs_handle_t *zhp)
559 {
560 assert(!ZFS_IS_VOLUME(zhp));
561 return (zfs_share_proto(zhp, share_all_proto));
562 }
563
564 int
565 zfs_unshare(zfs_handle_t *zhp)
566 {
567 assert(!ZFS_IS_VOLUME(zhp));
568 return (zfs_unshareall(zhp));
569 }
570
571 /*
572 * Check to see if the filesystem is currently shared.
573 */
574 zfs_share_type_t
575 zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
576 {
577 char *mountpoint;
578 zfs_share_type_t rc;
579
580 if (!zfs_is_mounted(zhp, &mountpoint))
581 return (SHARED_NOT_SHARED);
582
583 if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto))
584 != SHARED_NOT_SHARED) {
585 if (where != NULL)
586 *where = mountpoint;
587 else
588 free(mountpoint);
589 return (rc);
590 } else {
591 free(mountpoint);
592 return (SHARED_NOT_SHARED);
593 }
594 }
595
596 boolean_t
597 zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
598 {
599 return (zfs_is_shared_proto(zhp, where,
600 PROTO_NFS) != SHARED_NOT_SHARED);
601 }
602
603 boolean_t
604 zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
605 {
606 return (zfs_is_shared_proto(zhp, where,
607 PROTO_SMB) != SHARED_NOT_SHARED);
608 }
609
610 /*
611 * Make sure things will work if libshare isn't installed by using
612 * wrapper functions that check to see that the pointers to functions
613 * initialized in _zfs_init_libshare() are actually present.
614 */
615
616 static sa_handle_t (*_sa_init)(int);
617 static sa_handle_t (*_sa_init_arg)(int, void *);
618 static int (*_sa_service)(sa_handle_t);
619 static void (*_sa_fini)(sa_handle_t);
620 static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
621 static int (*_sa_enable_share)(sa_share_t, char *);
622 static int (*_sa_disable_share)(sa_share_t, char *);
623 static char *(*_sa_errorstr)(int);
624 static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
625 static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
626 static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
627 static int (* _sa_get_zfs_share)(sa_handle_t, char *, zfs_handle_t *);
628 static void (*_sa_update_sharetab_ts)(sa_handle_t);
629
630 /*
631 * _zfs_init_libshare()
632 *
633 * Find the libshare.so.1 entry points that we use here and save the
634 * values to be used later. This is triggered by the runtime loader.
635 * Make sure the correct ISA version is loaded.
636 */
637
638 #pragma init(_zfs_init_libshare)
639 static void
640 _zfs_init_libshare(void)
641 {
642 void *libshare;
643 char path[MAXPATHLEN];
644 char isa[MAXISALEN];
645
646 #if defined(_LP64)
647 if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
648 isa[0] = '\0';
649 #else
650 isa[0] = '\0';
651 #endif
652 (void) snprintf(path, MAXPATHLEN,
653 "/usr/lib/%s/libshare.so.1", isa);
654
655 if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
656 _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
657 _sa_init_arg = (sa_handle_t (*)(int, void *))dlsym(libshare,
658 "sa_init_arg");
659 _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
660 _sa_service = (int (*)(sa_handle_t))dlsym(libshare,
661 "sa_service");
662 _sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
663 dlsym(libshare, "sa_find_share");
664 _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
665 "sa_enable_share");
666 _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
667 "sa_disable_share");
668 _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
669 _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
670 dlsym(libshare, "sa_parse_legacy_options");
671 _sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
672 dlsym(libshare, "sa_needs_refresh");
673 _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
674 dlsym(libshare, "sa_get_zfs_handle");
675 _sa_get_zfs_share = (int (*)(sa_handle_t, char *,
676 zfs_handle_t *)) dlsym(libshare, "sa_get_zfs_share");
677 _sa_update_sharetab_ts = (void (*)(sa_handle_t))
678 dlsym(libshare, "sa_update_sharetab_ts");
679 if (_sa_init == NULL || _sa_init_arg == NULL ||
680 _sa_fini == NULL || _sa_find_share == NULL ||
681 _sa_enable_share == NULL || _sa_disable_share == NULL ||
682 _sa_errorstr == NULL || _sa_parse_legacy_options == NULL ||
683 _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
684 _sa_get_zfs_share == NULL || _sa_service == NULL ||
685 _sa_update_sharetab_ts == NULL) {
686 _sa_init = NULL;
687 _sa_init_arg = NULL;
688 _sa_service = NULL;
689 _sa_fini = NULL;
690 _sa_disable_share = NULL;
691 _sa_enable_share = NULL;
692 _sa_errorstr = NULL;
693 _sa_parse_legacy_options = NULL;
694 (void) dlclose(libshare);
695 _sa_needs_refresh = NULL;
696 _sa_get_zfs_handle = NULL;
697 _sa_get_zfs_share = NULL;
698 _sa_update_sharetab_ts = NULL;
699 }
700 }
701 }
702
703 /*
704 * zfs_init_libshare(zhandle, service)
705 *
706 * Initialize the libshare API if it hasn't already been initialized.
707 * In all cases it returns 0 if it succeeded and an error if not. The
708 * service value is which part(s) of the API to initialize and is a
709 * direct map to the libshare sa_init(service) interface.
710 */
711 static int
712 zfs_init_libshare_impl(libzfs_handle_t *zhandle, int service, void *arg)
713 {
714 /*
715 * libshare is either not installed or we're in a branded zone. The
716 * rest of the wrapper functions around the libshare calls already
717 * handle NULL function pointers, but we don't want the callers of
718 * zfs_init_libshare() to fail prematurely if libshare is not available.
719 */
720 if (_sa_init == NULL)
721 return (SA_OK);
722
723 /*
724 * Attempt to refresh libshare. This is necessary if there was a cache
725 * miss for a new ZFS dataset that was just created, or if state of the
726 * sharetab file has changed since libshare was last initialized. We
727 * want to make sure so check timestamps to see if a different process
728 * has updated any of the configuration. If there was some non-ZFS
729 * change, we need to re-initialize the internal cache.
730 */
731 if (_sa_needs_refresh != NULL &&
732 _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
733 zfs_uninit_libshare(zhandle);
734 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
735 }
736
737 if (zhandle && zhandle->libzfs_sharehdl == NULL)
738 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
739
740 if (zhandle->libzfs_sharehdl == NULL)
741 return (SA_NO_MEMORY);
742
743 return (SA_OK);
744 }
745 int
746 zfs_init_libshare(libzfs_handle_t *zhandle, int service)
747 {
748 return (zfs_init_libshare_impl(zhandle, service, NULL));
749 }
750
751 int
752 zfs_init_libshare_arg(libzfs_handle_t *zhandle, int service, void *arg)
753 {
754 return (zfs_init_libshare_impl(zhandle, service, arg));
755 }
756
757
758 /*
759 * zfs_uninit_libshare(zhandle)
760 *
761 * Uninitialize the libshare API if it hasn't already been
762 * uninitialized. It is OK to call multiple times.
763 */
764 void
765 zfs_uninit_libshare(libzfs_handle_t *zhandle)
766 {
767 if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
768 if (_sa_fini != NULL)
769 _sa_fini(zhandle->libzfs_sharehdl);
770 zhandle->libzfs_sharehdl = NULL;
771 }
772 }
773
774 /*
775 * zfs_parse_options(options, proto)
776 *
777 * Call the legacy parse interface to get the protocol specific
778 * options using the NULL arg to indicate that this is a "parse" only.
779 */
780 int
781 zfs_parse_options(char *options, zfs_share_proto_t proto)
782 {
783 if (_sa_parse_legacy_options != NULL) {
784 return (_sa_parse_legacy_options(NULL, options,
785 proto_table[proto].p_name));
786 }
787 return (SA_CONFIG_ERR);
788 }
789
790 /*
791 * zfs_sa_find_share(handle, path)
792 *
793 * wrapper around sa_find_share to find a share path in the
794 * configuration.
795 */
796 static sa_share_t
797 zfs_sa_find_share(sa_handle_t handle, char *path)
798 {
799 if (_sa_find_share != NULL)
800 return (_sa_find_share(handle, path));
801 return (NULL);
802 }
803
804 /*
805 * zfs_sa_enable_share(share, proto)
806 *
807 * Wrapper for sa_enable_share which enables a share for a specified
808 * protocol.
809 */
810 static int
811 zfs_sa_enable_share(sa_share_t share, char *proto)
812 {
813 if (_sa_enable_share != NULL)
814 return (_sa_enable_share(share, proto));
815 return (SA_CONFIG_ERR);
816 }
817
818 /*
819 * zfs_sa_disable_share(share, proto)
820 *
821 * Wrapper for sa_enable_share which disables a share for a specified
822 * protocol.
823 */
824 static int
825 zfs_sa_disable_share(sa_share_t share, char *proto)
826 {
827 if (_sa_disable_share != NULL)
828 return (_sa_disable_share(share, proto));
829 return (SA_CONFIG_ERR);
830 }
831
832 /*
833 * Share the given filesystem according to the options in the specified
834 * protocol specific properties (sharenfs, sharesmb). We rely
835 * on "libshare" to the dirty work for us.
836 */
837 static int
838 zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
839 {
840 char mountpoint[ZFS_MAXPROPLEN];
841 char shareopts[ZFS_MAXPROPLEN];
842 char sourcestr[ZFS_MAXPROPLEN];
843 libzfs_handle_t *hdl = zhp->zfs_hdl;
844 sa_share_t share;
845 zfs_share_proto_t *curr_proto;
846 zprop_source_t sourcetype;
847 int service = SA_INIT_ONE_SHARE_FROM_HANDLE;
848 int ret;
849
850 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
851 return (0);
852
853 /*
854 * Function may be called in a loop from higher up stack, with libshare
855 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
856 * zfs_init_libshare_arg will refresh the handle's cache if necessary.
857 * In this case we do not want to switch to per share initialization.
858 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
859 */
860 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
861 (_sa_service(hdl->libzfs_sharehdl) ==
862 SA_INIT_SHARE_API_SELECTIVE)) {
863 service = SA_INIT_SHARE_API;
864 }
865
866 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
867 /*
868 * Return success if there are no share options.
869 */
870 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
871 shareopts, sizeof (shareopts), &sourcetype, sourcestr,
872 ZFS_MAXPROPLEN, B_FALSE) != 0 ||
873 strcmp(shareopts, "off") == 0)
874 continue;
875 ret = zfs_init_libshare_arg(hdl, service, zhp);
876 if (ret != SA_OK) {
877 (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
878 dgettext(TEXT_DOMAIN, "cannot share '%s': %s"),
879 zfs_get_name(zhp), _sa_errorstr != NULL ?
880 _sa_errorstr(ret) : "");
881 return (-1);
882 }
883
884 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
885 if (share == NULL) {
886 /*
887 * This may be a new file system that was just
888 * created so isn't in the internal cache.
889 * Rather than reloading the entire configuration,
890 * we can add just this one share to the cache.
891 */
892 if ((_sa_get_zfs_share == NULL) ||
893 (_sa_get_zfs_share(hdl->libzfs_sharehdl, "zfs", zhp)
894 != SA_OK)) {
895 (void) zfs_error_fmt(hdl,
896 proto_table[*curr_proto].p_share_err,
897 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
898 zfs_get_name(zhp));
899 return (-1);
900 }
901 share = zfs_sa_find_share(hdl->libzfs_sharehdl,
902 mountpoint);
903 }
904 if (share != NULL) {
905 int err;
906 err = zfs_sa_enable_share(share,
907 proto_table[*curr_proto].p_name);
908 if (err != SA_OK) {
909 (void) zfs_error_fmt(hdl,
910 proto_table[*curr_proto].p_share_err,
911 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
912 zfs_get_name(zhp));
913 return (-1);
914 }
915 } else {
916 (void) zfs_error_fmt(hdl,
917 proto_table[*curr_proto].p_share_err,
918 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
919 zfs_get_name(zhp));
920 return (-1);
921 }
922
923 }
924 return (0);
925 }
926
927
928 int
929 zfs_share_nfs(zfs_handle_t *zhp)
930 {
931 return (zfs_share_proto(zhp, nfs_only));
932 }
933
934 int
935 zfs_share_smb(zfs_handle_t *zhp)
936 {
937 return (zfs_share_proto(zhp, smb_only));
938 }
939
940 int
941 zfs_shareall(zfs_handle_t *zhp)
942 {
943 return (zfs_share_proto(zhp, share_all_proto));
944 }
945
946 /*
947 * Unshare a filesystem by mountpoint.
948 */
949 static int
950 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
951 zfs_share_proto_t proto)
952 {
953 sa_share_t share;
954 int err;
955 char *mntpt;
956 int service = SA_INIT_ONE_SHARE_FROM_NAME;
957
958 /*
959 * Mountpoint could get trashed if libshare calls getmntany
960 * which it does during API initialization, so strdup the
961 * value.
962 */
963 mntpt = zfs_strdup(hdl, mountpoint);
964
965 /*
966 * Function may be called in a loop from higher up stack, with libshare
967 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
968 * zfs_init_libshare_arg will refresh the handle's cache if necessary.
969 * In this case we do not want to switch to per share initialization.
970 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
971 */
972 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
973 (_sa_service(hdl->libzfs_sharehdl) ==
974 SA_INIT_SHARE_API_SELECTIVE)) {
975 service = SA_INIT_SHARE_API;
976 }
977
978 err = zfs_init_libshare_arg(hdl, service, (void *)name);
979 if (err != SA_OK) {
980 free(mntpt); /* don't need the copy anymore */
981 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
982 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
983 name, _sa_errorstr(err)));
984 }
985
986 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt);
987 free(mntpt); /* don't need the copy anymore */
988
989 if (share != NULL) {
990 err = zfs_sa_disable_share(share, proto_table[proto].p_name);
991 if (err != SA_OK) {
992 return (zfs_error_fmt(hdl,
993 proto_table[proto].p_unshare_err,
994 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
995 name, _sa_errorstr(err)));
996 }
997 } else {
998 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
999 dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"),
1000 name));
1001 }
1002 return (0);
1003 }
1004
1005 /*
1006 * Unshare the given filesystem.
1007 */
1008 int
1009 zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
1010 zfs_share_proto_t *proto)
1011 {
1012 libzfs_handle_t *hdl = zhp->zfs_hdl;
1013 struct mnttab entry;
1014 char *mntpt = NULL;
1015
1016 /* check to see if need to unmount the filesystem */
1017 rewind(zhp->zfs_hdl->libzfs_mnttab);
1018 if (mountpoint != NULL)
1019 mountpoint = mntpt = zfs_strdup(hdl, mountpoint);
1020
1021 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
1022 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
1023 zfs_share_proto_t *curr_proto;
1024
1025 if (mountpoint == NULL)
1026 mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
1027
1028 for (curr_proto = proto; *curr_proto != PROTO_END;
1029 curr_proto++) {
1030
1031 if (is_shared(hdl, mntpt, *curr_proto) &&
1032 unshare_one(hdl, zhp->zfs_name,
1033 mntpt, *curr_proto) != 0) {
1034 if (mntpt != NULL)
1035 free(mntpt);
1036 return (-1);
1037 }
1038 }
1039 }
1040 if (mntpt != NULL)
1041 free(mntpt);
1042
1043 return (0);
1044 }
1045
1046 int
1047 zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
1048 {
1049 return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
1050 }
1051
1052 int
1053 zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
1054 {
1055 return (zfs_unshare_proto(zhp, mountpoint, smb_only));
1056 }
1057
1058 /*
1059 * Same as zfs_unmountall(), but for NFS and SMB unshares.
1060 */
1061 int
1062 zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
1063 {
1064 prop_changelist_t *clp;
1065 int ret;
1066
1067 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
1068 if (clp == NULL)
1069 return (-1);
1070
1071 ret = changelist_unshare(clp, proto);
1072 changelist_free(clp);
1073
1074 return (ret);
1075 }
1076
1077 int
1078 zfs_unshareall_nfs(zfs_handle_t *zhp)
1079 {
1080 return (zfs_unshareall_proto(zhp, nfs_only));
1081 }
1082
1083 int
1084 zfs_unshareall_smb(zfs_handle_t *zhp)
1085 {
1086 return (zfs_unshareall_proto(zhp, smb_only));
1087 }
1088
1089 int
1090 zfs_unshareall(zfs_handle_t *zhp)
1091 {
1092 return (zfs_unshareall_proto(zhp, share_all_proto));
1093 }
1094
1095 int
1096 zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
1097 {
1098 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
1099 }
1100
1101 /*
1102 * Remove the mountpoint associated with the current dataset, if necessary.
1103 * We only remove the underlying directory if:
1104 *
1105 * - The mountpoint is not 'none' or 'legacy'
1106 * - The mountpoint is non-empty
1107 * - The mountpoint is the default or inherited
1108 * - The 'zoned' property is set, or we're in a local zone
1109 *
1110 * Any other directories we leave alone.
1111 */
1112 void
1113 remove_mountpoint(zfs_handle_t *zhp)
1114 {
1115 char mountpoint[ZFS_MAXPROPLEN];
1116 zprop_source_t source;
1117
1118 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
1119 &source))
1120 return;
1121
1122 if (source == ZPROP_SRC_DEFAULT ||
1123 source == ZPROP_SRC_INHERITED) {
1124 /*
1125 * Try to remove the directory, silently ignoring any errors.
1126 * The filesystem may have since been removed or moved around,
1127 * and this error isn't really useful to the administrator in
1128 * any way.
1129 */
1130 (void) rmdir(mountpoint);
1131 }
1132 }
1133
1134 /*
1135 * Add the given zfs handle to the cb_handles array, dynamically reallocating
1136 * the array if it is out of space.
1137 */
1138 void
1139 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
1140 {
1141 if (cbp->cb_alloc == cbp->cb_used) {
1142 size_t newsz;
1143 zfs_handle_t **newhandles;
1144
1145 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
1146 newhandles = zfs_realloc(zhp->zfs_hdl,
1147 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
1148 newsz * sizeof (zfs_handle_t *));
1149 cbp->cb_handles = newhandles;
1150 cbp->cb_alloc = newsz;
1151 }
1152 cbp->cb_handles[cbp->cb_used++] = zhp;
1153 }
1154
1155 /*
1156 * Recursive helper function used during file system enumeration
1157 */
1158 static int
1159 zfs_iter_cb(zfs_handle_t *zhp, void *data)
1160 {
1161 get_all_cb_t *cbp = data;
1162
1163 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
1164 zfs_close(zhp);
1165 return (0);
1166 }
1167
1168 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
1169 zfs_close(zhp);
1170 return (0);
1171 }
1172
1173 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1174 ZFS_KEYSTATUS_UNAVAILABLE) {
1175 zfs_close(zhp);
1176 return (0);
1177 }
1178
1179 /*
1180 * If this filesystem is inconsistent and has a receive resume
1181 * token, we can not mount it.
1182 */
1183 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
1184 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
1185 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
1186 zfs_close(zhp);
1187 return (0);
1188 }
1189
1190 libzfs_add_handle(cbp, zhp);
1191 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
1192 zfs_close(zhp);
1193 return (-1);
1194 }
1195 return (0);
1196 }
1197
1198 /*
1199 * Sort comparator that compares two mountpoint paths. We sort these paths so
1200 * that subdirectories immediately follow their parents. This means that we
1201 * effectively treat the '/' character as the lowest value non-nul char.
1202 * Since filesystems from non-global zones can have the same mountpoint
1203 * as other filesystems, the comparator sorts global zone filesystems to
1204 * the top of the list. This means that the global zone will traverse the
1205 * filesystem list in the correct order and can stop when it sees the
1206 * first zoned filesystem. In a non-global zone, only the delegated
1207 * filesystems are seen.
1208 *
1209 * An example sorted list using this comparator would look like:
1210 *
1211 * /foo
1212 * /foo/bar
1213 * /foo/bar/baz
1214 * /foo/baz
1215 * /foo.bar
1216 * /foo (NGZ1)
1217 * /foo (NGZ2)
1218 *
1219 * The mounting code depends on this ordering to deterministically iterate
1220 * over filesystems in order to spawn parallel mount tasks.
1221 */
1222 static int
1223 mountpoint_cmp(const void *arga, const void *argb)
1224 {
1225 zfs_handle_t *const *zap = arga;
1226 zfs_handle_t *za = *zap;
1227 zfs_handle_t *const *zbp = argb;
1228 zfs_handle_t *zb = *zbp;
1229 char mounta[MAXPATHLEN];
1230 char mountb[MAXPATHLEN];
1231 const char *a = mounta;
1232 const char *b = mountb;
1233 boolean_t gota, gotb;
1234 uint64_t zoneda, zonedb;
1235
1236 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
1237 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
1238 if (zoneda && !zonedb)
1239 return (1);
1240 if (!zoneda && zonedb)
1241 return (-1);
1242
1243 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
1244 if (gota) {
1245 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
1246 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
1247 }
1248 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
1249 if (gotb) {
1250 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
1251 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
1252 }
1253
1254 if (gota && gotb) {
1255 while (*a != '\0' && (*a == *b)) {
1256 a++;
1257 b++;
1258 }
1259 if (*a == *b)
1260 return (0);
1261 if (*a == '\0')
1262 return (-1);
1263 if (*b == '\0')
1264 return (1);
1265 if (*a == '/')
1266 return (-1);
1267 if (*b == '/')
1268 return (1);
1269 return (*a < *b ? -1 : *a > *b);
1270 }
1271
1272 if (gota)
1273 return (-1);
1274 if (gotb)
1275 return (1);
1276
1277 /*
1278 * If neither filesystem has a mountpoint, revert to sorting by
1279 * dataset name.
1280 */
1281 return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
1282 }
1283
1284 /*
1285 * Return true if path2 is a child of path1.
1286 */
1287 static boolean_t
1288 libzfs_path_contains(const char *path1, const char *path2)
1289 {
1290 return (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/');
1291 }
1292
1293 /*
1294 * Given a mountpoint specified by idx in the handles array, find the first
1295 * non-descendent of that mountpoint and return its index. Descendant paths
1296 * start with the parent's path. This function relies on the ordering
1297 * enforced by mountpoint_cmp().
1298 */
1299 static int
1300 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
1301 {
1302 char parent[ZFS_MAXPROPLEN];
1303 char child[ZFS_MAXPROPLEN];
1304 int i;
1305
1306 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
1307 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
1308
1309 for (i = idx + 1; i < num_handles; i++) {
1310 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
1311 sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1312 if (!libzfs_path_contains(parent, child))
1313 break;
1314 }
1315 return (i);
1316 }
1317
1318 typedef struct mnt_param {
1319 libzfs_handle_t *mnt_hdl;
1320 zfs_taskq_t *mnt_tq;
1321 zfs_handle_t **mnt_zhps; /* filesystems to mount */
1322 size_t mnt_num_handles;
1323 int mnt_idx; /* Index of selected entry to mount */
1324 zfs_iter_f mnt_func;
1325 void *mnt_data;
1326 } mnt_param_t;
1327
1328 /*
1329 * Allocate and populate the parameter struct for mount function, and
1330 * schedule mounting of the entry selected by idx.
1331 */
1332 static void
1333 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
1334 size_t num_handles, int idx, zfs_iter_f func, void *data, zfs_taskq_t *tq)
1335 {
1336 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
1337
1338 mnt_param->mnt_hdl = hdl;
1339 mnt_param->mnt_tq = tq;
1340 mnt_param->mnt_zhps = handles;
1341 mnt_param->mnt_num_handles = num_handles;
1342 mnt_param->mnt_idx = idx;
1343 mnt_param->mnt_func = func;
1344 mnt_param->mnt_data = data;
1345
1346 (void) zfs_taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param,
1347 ZFS_TQ_SLEEP);
1348 }
1349
1350 /*
1351 * This is the structure used to keep state of mounting or sharing operations
1352 * during a call to zpool_enable_datasets().
1353 */
1354 typedef struct mount_state {
1355 /*
1356 * ms_mntstatus is set to -1 if any mount fails. While multiple threads
1357 * could update this variable concurrently, no synchronization is
1358 * needed as it's only ever set to -1.
1359 */
1360 int ms_mntstatus;
1361 int ms_mntflags;
1362 const char *ms_mntopts;
1363 } mount_state_t;
1364
1365 static int
1366 zfs_mount_one(zfs_handle_t *zhp, void *arg)
1367 {
1368 mount_state_t *ms = arg;
1369 int ret = 0;
1370
1371 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1372 ZFS_KEYSTATUS_UNAVAILABLE)
1373 return (0);
1374
1375 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
1376 ret = ms->ms_mntstatus = -1;
1377 return (ret);
1378 }
1379
1380 static int
1381 zfs_share_one(zfs_handle_t *zhp, void *arg)
1382 {
1383 mount_state_t *ms = arg;
1384 int ret = 0;
1385
1386 if (zfs_share(zhp) != 0)
1387 ret = ms->ms_mntstatus = -1;
1388 return (ret);
1389 }
1390
1391 /*
1392 * Task queue function to mount one file system. On completion, it finds and
1393 * schedules its children to be mounted. This depends on the sorting done in
1394 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
1395 * each descending from the previous) will have no parallelism since we always
1396 * have to wait for the parent to finish mounting before we can schedule
1397 * its children.
1398 */
1399 static void
1400 zfs_mount_task(void *arg)
1401 {
1402 mnt_param_t *mp = arg;
1403 int idx = mp->mnt_idx;
1404 zfs_handle_t **handles = mp->mnt_zhps;
1405 size_t num_handles = mp->mnt_num_handles;
1406 char mountpoint[ZFS_MAXPROPLEN];
1407
1408 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
1409 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
1410
1411 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
1412 return;
1413
1414 /*
1415 * We dispatch tasks to mount filesystems with mountpoints underneath
1416 * this one. We do this by dispatching the next filesystem with a
1417 * descendant mountpoint of the one we just mounted, then skip all of
1418 * its descendants, dispatch the next descendant mountpoint, and so on.
1419 * The non_descendant_idx() function skips over filesystems that are
1420 * descendants of the filesystem we just dispatched.
1421 */
1422 for (int i = idx + 1; i < num_handles;
1423 i = non_descendant_idx(handles, num_handles, i)) {
1424 char child[ZFS_MAXPROPLEN];
1425 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
1426 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1427
1428 if (!libzfs_path_contains(mountpoint, child))
1429 break; /* not a descendant, return */
1430 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
1431 mp->mnt_func, mp->mnt_data, mp->mnt_tq);
1432 }
1433 free(mp);
1434 }
1435
1436 /*
1437 * Issue the func callback for each ZFS handle contained in the handles
1438 * array. This function is used to mount all datasets, and so this function
1439 * guarantees that filesystems for parent mountpoints are called before their
1440 * children. As such, before issuing any callbacks, we first sort the array
1441 * of handles by mountpoint.
1442 *
1443 * Callbacks are issued in one of two ways:
1444 *
1445 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
1446 * environment variable is set, then we issue callbacks sequentially.
1447 *
1448 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
1449 * environment variable is not set, then we use a taskq to dispatch threads
1450 * to mount filesystems is parallel. This function dispatches tasks to mount
1451 * the filesystems at the top-level mountpoints, and these tasks in turn
1452 * are responsible for recursively mounting filesystems in their children
1453 * mountpoints.
1454 */
1455 void
1456 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
1457 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
1458 {
1459 zoneid_t zoneid = getzoneid();
1460
1461 /*
1462 * The ZFS_SERIAL_MOUNT environment variable is an undocumented
1463 * variable that can be used as a convenience to do a/b comparison
1464 * of serial vs. parallel mounting.
1465 */
1466 boolean_t serial_mount = !parallel ||
1467 (getenv("ZFS_SERIAL_MOUNT") != NULL);
1468
1469 /*
1470 * Sort the datasets by mountpoint. See mountpoint_cmp for details
1471 * of how these are sorted.
1472 */
1473 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
1474
1475 if (serial_mount) {
1476 for (int i = 0; i < num_handles; i++) {
1477 func(handles[i], data);
1478 }
1479 return;
1480 }
1481
1482 /*
1483 * Issue the callback function for each dataset using a parallel
1484 * algorithm that uses a taskq to manage threads.
1485 */
1486 zfs_taskq_t *tq = zfs_taskq_create("mount_taskq", mount_tq_nthr, 0,
1487 mount_tq_nthr, mount_tq_nthr, ZFS_TASKQ_PREPOPULATE);
1488
1489 /*
1490 * There may be multiple "top level" mountpoints outside of the pool's
1491 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
1492 * these.
1493 */
1494 for (int i = 0; i < num_handles;
1495 i = non_descendant_idx(handles, num_handles, i)) {
1496 /*
1497 * Since the mountpoints have been sorted so that the zoned
1498 * filesystems are at the end, a zoned filesystem seen from
1499 * the global zone means that we're done.
1500 */
1501 if (zoneid == GLOBAL_ZONEID &&
1502 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
1503 break;
1504 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
1505 tq);
1506 }
1507
1508 zfs_taskq_wait(tq); /* wait for all scheduled mounts to complete */
1509 zfs_taskq_destroy(tq);
1510 }
1511
1512 /*
1513 * Mount and share all datasets within the given pool. This assumes that no
1514 * datasets within the pool are currently mounted.
1515 */
1516 #pragma weak zpool_mount_datasets = zpool_enable_datasets
1517 int
1518 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
1519 {
1520 get_all_cb_t cb = { 0 };
1521 mount_state_t ms = { 0 };
1522 zfs_handle_t *zfsp;
1523 sa_init_selective_arg_t sharearg;
1524 int ret = 0;
1525
1526 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1527 ZFS_TYPE_DATASET)) == NULL)
1528 goto out;
1529
1530
1531 /*
1532 * Gather all non-snapshot datasets within the pool. Start by adding
1533 * the root filesystem for this pool to the list, and then iterate
1534 * over all child filesystems.
1535 */
1536 libzfs_add_handle(&cb, zfsp);
1537 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
1538 goto out;
1539
1540 ms.ms_mntopts = mntopts;
1541 ms.ms_mntflags = flags;
1542 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1543 zfs_mount_one, &ms, B_TRUE);
1544 if (ms.ms_mntstatus != 0)
1545 ret = ms.ms_mntstatus;
1546
1547 /*
1548 * Initialize libshare SA_INIT_SHARE_API_SELECTIVE here
1549 * to avoid unnecessary load/unload of the libshare API
1550 * per shared dataset downstream.
1551 */
1552 sharearg.zhandle_arr = cb.cb_handles;
1553 sharearg.zhandle_len = cb.cb_used;
1554 if ((ret = zfs_init_libshare_arg(zhp->zpool_hdl,
1555 SA_INIT_SHARE_API_SELECTIVE, &sharearg)) != 0)
1556 goto out;
1557
1558 ms.ms_mntstatus = 0;
1559 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1560 zfs_share_one, &ms, B_FALSE);
1561 if (ms.ms_mntstatus != 0)
1562 ret = ms.ms_mntstatus;
1563
1564 out:
1565 for (int i = 0; i < cb.cb_used; i++)
1566 zfs_close(cb.cb_handles[i]);
1567 free(cb.cb_handles);
1568
1569 return (ret);
1570 }
1571
1572 static int
1573 mountpoint_compare(const void *a, const void *b)
1574 {
1575 const char *mounta = *((char **)a);
1576 const char *mountb = *((char **)b);
1577
1578 return (strcmp(mountb, mounta));
1579 }
1580
1581 /* alias for 2002/240 */
1582 #pragma weak zpool_unmount_datasets = zpool_disable_datasets
1583 /*
1584 * Unshare and unmount all datasets within the given pool. We don't want to
1585 * rely on traversing the DSL to discover the filesystems within the pool,
1586 * because this may be expensive (if not all of them are mounted), and can fail
1587 * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and
1588 * gather all the filesystems that are currently mounted.
1589 */
1590 int
1591 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
1592 {
1593 int used, alloc;
1594 struct mnttab entry;
1595 size_t namelen;
1596 char **mountpoints = NULL;
1597 zfs_handle_t **datasets = NULL;
1598 libzfs_handle_t *hdl = zhp->zpool_hdl;
1599 int i;
1600 int ret = -1;
1601 int flags = (force ? MS_FORCE : 0);
1602 sa_init_selective_arg_t sharearg;
1603
1604 namelen = strlen(zhp->zpool_name);
1605
1606 rewind(hdl->libzfs_mnttab);
1607 used = alloc = 0;
1608 while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
1609 /*
1610 * Ignore non-ZFS entries.
1611 */
1612 if (entry.mnt_fstype == NULL ||
1613 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
1614 continue;
1615
1616 /*
1617 * Ignore filesystems not within this pool.
1618 */
1619 if (entry.mnt_mountp == NULL ||
1620 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
1621 (entry.mnt_special[namelen] != '/' &&
1622 entry.mnt_special[namelen] != '\0'))
1623 continue;
1624
1625 /*
1626 * At this point we've found a filesystem within our pool. Add
1627 * it to our growing list.
1628 */
1629 if (used == alloc) {
1630 if (alloc == 0) {
1631 if ((mountpoints = zfs_alloc(hdl,
1632 8 * sizeof (void *))) == NULL)
1633 goto out;
1634
1635 if ((datasets = zfs_alloc(hdl,
1636 8 * sizeof (void *))) == NULL)
1637 goto out;
1638
1639 alloc = 8;
1640 } else {
1641 void *ptr;
1642
1643 if ((ptr = zfs_realloc(hdl, mountpoints,
1644 alloc * sizeof (void *),
1645 alloc * 2 * sizeof (void *))) == NULL)
1646 goto out;
1647 mountpoints = ptr;
1648
1649 if ((ptr = zfs_realloc(hdl, datasets,
1650 alloc * sizeof (void *),
1651 alloc * 2 * sizeof (void *))) == NULL)
1652 goto out;
1653 datasets = ptr;
1654
1655 alloc *= 2;
1656 }
1657 }
1658
1659 if ((mountpoints[used] = zfs_strdup(hdl,
1660 entry.mnt_mountp)) == NULL)
1661 goto out;
1662
1663 /*
1664 * This is allowed to fail, in case there is some I/O error. It
1665 * is only used to determine if we need to remove the underlying
1666 * mountpoint, so failure is not fatal.
1667 */
1668 datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
1669
1670 used++;
1671 }
1672
1673 /*
1674 * At this point, we have the entire list of filesystems, so sort it by
1675 * mountpoint.
1676 */
1677 sharearg.zhandle_arr = datasets;
1678 sharearg.zhandle_len = used;
1679 ret = zfs_init_libshare_arg(hdl, SA_INIT_SHARE_API_SELECTIVE,
1680 &sharearg);
1681 if (ret != 0)
1682 goto out;
1683 qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
1684
1685 /*
1686 * Walk through and first unshare everything.
1687 */
1688 for (i = 0; i < used; i++) {
1689 zfs_share_proto_t *curr_proto;
1690 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
1691 curr_proto++) {
1692 if (is_shared(hdl, mountpoints[i], *curr_proto) &&
1693 unshare_one(hdl, mountpoints[i],
1694 mountpoints[i], *curr_proto) != 0)
1695 goto out;
1696 }
1697 }
1698
1699 /*
1700 * Now unmount everything, removing the underlying directories as
1701 * appropriate.
1702 */
1703 for (i = 0; i < used; i++) {
1704 if (unmount_one(hdl, mountpoints[i], flags) != 0)
1705 goto out;
1706 }
1707
1708 for (i = 0; i < used; i++) {
1709 if (datasets[i])
1710 remove_mountpoint(datasets[i]);
1711 }
1712
1713 ret = 0;
1714 out:
1715 for (i = 0; i < used; i++) {
1716 if (datasets[i])
1717 zfs_close(datasets[i]);
1718 free(mountpoints[i]);
1719 }
1720 free(datasets);
1721 free(mountpoints);
1722
1723 return (ret);
1724 }