Print this page
11083 support NFS server in zone
Portions contributed by: Dan Kruchinin <dan.kruchinin@nexenta.com>
Portions contributed by: Stepan Zastupov <stepan.zastupov@gmail.com>
Portions contributed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Portions contributed by: Mike Zeller <mike@mikezeller.net>
Portions contributed by: Dan McDonald <danmcd@joyent.com>
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Portions contributed by: Vitaliy Gusev <gusev.vitaliy@gmail.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Jason King <jbk@joyent.com>
Reviewed by: C Fraire <cfraire@me.com>
Change-Id: I22f289d357503f9b48a0bc2482cc4328a6d43d16
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_mount.c
+++ new/usr/src/lib/libzfs/common/libzfs_mount.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 - * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 - * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
24 + */
25 +
26 +/*
27 + * Copyright 2019 Nexenta Systems, Inc.
28 + * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
26 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 30 * Copyright 2017 Joyent, Inc.
28 31 * Copyright 2017 RackTop Systems.
29 32 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 33 */
31 34
32 35 /*
33 36 * Routines to manage ZFS mounts. We separate all the nasty routines that have
34 37 * to deal with the OS. The following functions are the main entry points --
35 38 * they are used by mount and unmount and when changing a filesystem's
36 39 * mountpoint.
37 40 *
38 41 * zfs_is_mounted()
39 42 * zfs_mount()
40 43 * zfs_unmount()
41 44 * zfs_unmountall()
42 45 *
43 46 * This file also contains the functions used to manage sharing filesystems via
44 47 * NFS and iSCSI:
45 48 *
46 49 * zfs_is_shared()
47 50 * zfs_share()
48 51 * zfs_unshare()
49 52 *
50 53 * zfs_is_shared_nfs()
51 54 * zfs_is_shared_smb()
52 55 * zfs_share_proto()
53 56 * zfs_shareall();
54 57 * zfs_unshare_nfs()
55 58 * zfs_unshare_smb()
56 59 * zfs_unshareall_nfs()
57 60 * zfs_unshareall_smb()
58 61 * zfs_unshareall()
59 62 * zfs_unshareall_bypath()
60 63 *
61 64 * The following functions are available for pool consumers, and will
62 65 * mount/unmount and share/unshare all datasets within pool:
63 66 *
64 67 * zpool_enable_datasets()
65 68 * zpool_disable_datasets()
66 69 */
67 70
68 71 #include <dirent.h>
69 72 #include <dlfcn.h>
70 73 #include <errno.h>
71 74 #include <fcntl.h>
72 75 #include <libgen.h>
73 76 #include <libintl.h>
74 77 #include <stdio.h>
75 78 #include <stdlib.h>
76 79 #include <strings.h>
77 80 #include <unistd.h>
78 81 #include <zone.h>
79 82 #include <sys/mntent.h>
80 83 #include <sys/mount.h>
81 84 #include <sys/stat.h>
82 85 #include <sys/statvfs.h>
83 86 #include <sys/dsl_crypt.h>
84 87
85 88 #include <libzfs.h>
86 89
87 90 #include "libzfs_impl.h"
88 91 #include "libzfs_taskq.h"
89 92
90 93 #include <libshare.h>
91 94 #include <sys/systeminfo.h>
92 95 #define MAXISALEN 257 /* based on sysinfo(2) man page */
93 96
94 97 static int mount_tq_nthr = 512; /* taskq threads for multi-threaded mounting */
95 98
96 99 static void zfs_mount_task(void *);
97 100 static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
98 101 zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
99 102 zfs_share_proto_t);
100 103
101 104 /*
102 105 * The share protocols table must be in the same order as the zfs_share_proto_t
103 106 * enum in libzfs_impl.h
104 107 */
105 108 typedef struct {
106 109 zfs_prop_t p_prop;
107 110 char *p_name;
108 111 int p_share_err;
109 112 int p_unshare_err;
110 113 } proto_table_t;
111 114
112 115 proto_table_t proto_table[PROTO_END] = {
113 116 {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
114 117 {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
115 118 };
116 119
117 120 zfs_share_proto_t nfs_only[] = {
118 121 PROTO_NFS,
119 122 PROTO_END
120 123 };
121 124
122 125 zfs_share_proto_t smb_only[] = {
123 126 PROTO_SMB,
124 127 PROTO_END
125 128 };
126 129 zfs_share_proto_t share_all_proto[] = {
127 130 PROTO_NFS,
128 131 PROTO_SMB,
129 132 PROTO_END
130 133 };
131 134
132 135 /*
133 136 * Search the sharetab for the given mountpoint and protocol, returning
134 137 * a zfs_share_type_t value.
135 138 */
136 139 static zfs_share_type_t
137 140 is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
138 141 {
139 142 char buf[MAXPATHLEN], *tab;
140 143 char *ptr;
141 144
142 145 if (hdl->libzfs_sharetab == NULL)
143 146 return (SHARED_NOT_SHARED);
144 147
145 148 (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
146 149
147 150 while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
148 151
149 152 /* the mountpoint is the first entry on each line */
150 153 if ((tab = strchr(buf, '\t')) == NULL)
151 154 continue;
152 155
153 156 *tab = '\0';
154 157 if (strcmp(buf, mountpoint) == 0) {
155 158 /*
156 159 * the protocol field is the third field
157 160 * skip over second field
158 161 */
159 162 ptr = ++tab;
160 163 if ((tab = strchr(ptr, '\t')) == NULL)
161 164 continue;
162 165 ptr = ++tab;
163 166 if ((tab = strchr(ptr, '\t')) == NULL)
164 167 continue;
165 168 *tab = '\0';
166 169 if (strcmp(ptr,
167 170 proto_table[proto].p_name) == 0) {
168 171 switch (proto) {
169 172 case PROTO_NFS:
170 173 return (SHARED_NFS);
171 174 case PROTO_SMB:
172 175 return (SHARED_SMB);
173 176 default:
174 177 return (0);
175 178 }
176 179 }
177 180 }
178 181 }
179 182
180 183 return (SHARED_NOT_SHARED);
181 184 }
182 185
183 186 static boolean_t
184 187 dir_is_empty_stat(const char *dirname)
185 188 {
186 189 struct stat st;
187 190
188 191 /*
189 192 * We only want to return false if the given path is a non empty
190 193 * directory, all other errors are handled elsewhere.
191 194 */
192 195 if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
193 196 return (B_TRUE);
194 197 }
195 198
196 199 /*
197 200 * An empty directory will still have two entries in it, one
198 201 * entry for each of "." and "..".
199 202 */
200 203 if (st.st_size > 2) {
201 204 return (B_FALSE);
202 205 }
203 206
204 207 return (B_TRUE);
205 208 }
206 209
207 210 static boolean_t
208 211 dir_is_empty_readdir(const char *dirname)
209 212 {
210 213 DIR *dirp;
211 214 struct dirent64 *dp;
212 215 int dirfd;
213 216
214 217 if ((dirfd = openat(AT_FDCWD, dirname,
215 218 O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
216 219 return (B_TRUE);
217 220 }
218 221
219 222 if ((dirp = fdopendir(dirfd)) == NULL) {
220 223 (void) close(dirfd);
221 224 return (B_TRUE);
222 225 }
223 226
224 227 while ((dp = readdir64(dirp)) != NULL) {
225 228
226 229 if (strcmp(dp->d_name, ".") == 0 ||
227 230 strcmp(dp->d_name, "..") == 0)
228 231 continue;
229 232
230 233 (void) closedir(dirp);
231 234 return (B_FALSE);
232 235 }
233 236
234 237 (void) closedir(dirp);
235 238 return (B_TRUE);
236 239 }
237 240
238 241 /*
239 242 * Returns true if the specified directory is empty. If we can't open the
240 243 * directory at all, return true so that the mount can fail with a more
241 244 * informative error message.
242 245 */
243 246 static boolean_t
244 247 dir_is_empty(const char *dirname)
245 248 {
246 249 struct statvfs64 st;
247 250
248 251 /*
249 252 * If the statvfs call fails or the filesystem is not a ZFS
250 253 * filesystem, fall back to the slow path which uses readdir.
251 254 */
252 255 if ((statvfs64(dirname, &st) != 0) ||
253 256 (strcmp(st.f_basetype, "zfs") != 0)) {
254 257 return (dir_is_empty_readdir(dirname));
255 258 }
256 259
257 260 /*
258 261 * At this point, we know the provided path is on a ZFS
259 262 * filesystem, so we can use stat instead of readdir to
260 263 * determine if the directory is empty or not. We try to avoid
261 264 * using readdir because that requires opening "dirname"; this
262 265 * open file descriptor can potentially end up in a child
263 266 * process if there's a concurrent fork, thus preventing the
264 267 * zfs_mount() from otherwise succeeding (the open file
265 268 * descriptor inherited by the child process will cause the
266 269 * parent's mount to fail with EBUSY). The performance
267 270 * implications of replacing the open, read, and close with a
268 271 * single stat is nice; but is not the main motivation for the
269 272 * added complexity.
270 273 */
271 274 return (dir_is_empty_stat(dirname));
272 275 }
273 276
274 277 /*
275 278 * Checks to see if the mount is active. If the filesystem is mounted, we fill
276 279 * in 'where' with the current mountpoint, and return 1. Otherwise, we return
277 280 * 0.
278 281 */
279 282 boolean_t
280 283 is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
281 284 {
282 285 struct mnttab entry;
283 286
284 287 if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
285 288 return (B_FALSE);
286 289
287 290 if (where != NULL)
288 291 *where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
289 292
290 293 return (B_TRUE);
291 294 }
292 295
293 296 boolean_t
294 297 zfs_is_mounted(zfs_handle_t *zhp, char **where)
295 298 {
296 299 return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
297 300 }
298 301
299 302 /*
300 303 * Returns true if the given dataset is mountable, false otherwise. Returns the
301 304 * mountpoint in 'buf'.
302 305 */
303 306 static boolean_t
304 307 zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
305 308 zprop_source_t *source)
306 309 {
307 310 char sourceloc[MAXNAMELEN];
308 311 zprop_source_t sourcetype;
309 312
310 313 if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
311 314 return (B_FALSE);
312 315
313 316 verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
314 317 &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
315 318
316 319 if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
317 320 strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
318 321 return (B_FALSE);
319 322
320 323 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
321 324 return (B_FALSE);
322 325
323 326 if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
324 327 getzoneid() == GLOBAL_ZONEID)
325 328 return (B_FALSE);
326 329
327 330 if (source)
328 331 *source = sourcetype;
329 332
330 333 return (B_TRUE);
331 334 }
332 335
333 336 /*
334 337 * Mount the given filesystem.
335 338 */
336 339 int
337 340 zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
338 341 {
339 342 struct stat buf;
340 343 char mountpoint[ZFS_MAXPROPLEN];
341 344 char mntopts[MNT_LINE_MAX];
342 345 libzfs_handle_t *hdl = zhp->zfs_hdl;
343 346 uint64_t keystatus;
344 347 int rc;
345 348
346 349 if (options == NULL)
347 350 mntopts[0] = '\0';
348 351 else
349 352 (void) strlcpy(mntopts, options, sizeof (mntopts));
350 353
351 354 /*
352 355 * If the pool is imported read-only then all mounts must be read-only
353 356 */
354 357 if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
355 358 flags |= MS_RDONLY;
356 359
357 360 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
358 361 return (0);
359 362
360 363 /*
361 364 * If the filesystem is encrypted the key must be loaded in order to
362 365 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether
363 366 * or not we attempt to load the keys. Note: we must call
364 367 * zfs_refresh_properties() here since some callers of this function
365 368 * (most notably zpool_enable_datasets()) may implicitly load our key
366 369 * by loading the parent's key first.
367 370 */
368 371 if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
369 372 zfs_refresh_properties(zhp);
370 373 keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
371 374
372 375 /*
373 376 * If the key is unavailable and MS_CRYPT is set give the
374 377 * user a chance to enter the key. Otherwise just fail
375 378 * immediately.
376 379 */
377 380 if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
378 381 if (flags & MS_CRYPT) {
379 382 rc = zfs_crypto_load_key(zhp, B_FALSE, NULL);
380 383 if (rc != 0)
381 384 return (rc);
382 385 } else {
383 386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
384 387 "encryption key not loaded"));
385 388 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
386 389 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
387 390 mountpoint));
388 391 }
389 392 }
390 393
391 394 }
392 395
393 396 /* Create the directory if it doesn't already exist */
394 397 if (lstat(mountpoint, &buf) != 0) {
395 398 if (mkdirp(mountpoint, 0755) != 0) {
396 399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
397 400 "failed to create mountpoint"));
398 401 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
399 402 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
400 403 mountpoint));
401 404 }
402 405 }
403 406
404 407 /*
405 408 * Determine if the mountpoint is empty. If so, refuse to perform the
406 409 * mount. We don't perform this check if MS_OVERLAY is specified, which
407 410 * would defeat the point. We also avoid this check if 'remount' is
408 411 * specified.
409 412 */
410 413 if ((flags & MS_OVERLAY) == 0 &&
411 414 strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
412 415 !dir_is_empty(mountpoint)) {
413 416 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
414 417 "directory is not empty"));
415 418 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
416 419 dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
417 420 }
418 421
419 422 /* perform the mount */
420 423 if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
421 424 MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
422 425 /*
423 426 * Generic errors are nasty, but there are just way too many
424 427 * from mount(), and they're well-understood. We pick a few
425 428 * common ones to improve upon.
426 429 */
427 430 if (errno == EBUSY) {
428 431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429 432 "mountpoint or dataset is busy"));
430 433 } else if (errno == EPERM) {
431 434 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432 435 "Insufficient privileges"));
433 436 } else if (errno == ENOTSUP) {
434 437 char buf[256];
435 438 int spa_version;
436 439
437 440 VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
438 441 (void) snprintf(buf, sizeof (buf),
439 442 dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
440 443 "file system on a version %d pool. Pool must be"
441 444 " upgraded to mount this file system."),
442 445 (u_longlong_t)zfs_prop_get_int(zhp,
443 446 ZFS_PROP_VERSION), spa_version);
444 447 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
445 448 } else {
446 449 zfs_error_aux(hdl, strerror(errno));
447 450 }
448 451 return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
449 452 dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
450 453 zhp->zfs_name));
451 454 }
452 455
453 456 /* add the mounted entry into our cache */
454 457 libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
455 458 mntopts);
456 459 return (0);
457 460 }
458 461
459 462 /*
460 463 * Unmount a single filesystem.
461 464 */
462 465 static int
463 466 unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
464 467 {
465 468 if (umount2(mountpoint, flags) != 0) {
466 469 zfs_error_aux(hdl, strerror(errno));
467 470 return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
468 471 dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
469 472 mountpoint));
470 473 }
471 474
472 475 return (0);
473 476 }
474 477
475 478 /*
476 479 * Unmount the given filesystem.
477 480 */
478 481 int
479 482 zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
480 483 {
481 484 libzfs_handle_t *hdl = zhp->zfs_hdl;
482 485 struct mnttab entry;
483 486 char *mntpt = NULL;
484 487
485 488 /* check to see if we need to unmount the filesystem */
486 489 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
487 490 libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
488 491 /*
489 492 * mountpoint may have come from a call to
490 493 * getmnt/getmntany if it isn't NULL. If it is NULL,
491 494 * we know it comes from libzfs_mnttab_find which can
492 495 * then get freed later. We strdup it to play it safe.
493 496 */
494 497 if (mountpoint == NULL)
495 498 mntpt = zfs_strdup(hdl, entry.mnt_mountp);
496 499 else
497 500 mntpt = zfs_strdup(hdl, mountpoint);
498 501
499 502 /*
500 503 * Unshare and unmount the filesystem
501 504 */
502 505 if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
503 506 return (-1);
504 507
505 508 if (unmount_one(hdl, mntpt, flags) != 0) {
506 509 free(mntpt);
507 510 (void) zfs_shareall(zhp);
508 511 return (-1);
509 512 }
510 513 libzfs_mnttab_remove(hdl, zhp->zfs_name);
511 514 free(mntpt);
512 515 }
513 516
514 517 return (0);
515 518 }
516 519
517 520 /*
518 521 * Unmount this filesystem and any children inheriting the mountpoint property.
519 522 * To do this, just act like we're changing the mountpoint property, but don't
520 523 * remount the filesystems afterwards.
521 524 */
522 525 int
523 526 zfs_unmountall(zfs_handle_t *zhp, int flags)
524 527 {
525 528 prop_changelist_t *clp;
526 529 int ret;
527 530
528 531 clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
529 532 if (clp == NULL)
530 533 return (-1);
531 534
532 535 ret = changelist_prefix(clp);
533 536 changelist_free(clp);
534 537
535 538 return (ret);
536 539 }
537 540
538 541 boolean_t
539 542 zfs_is_shared(zfs_handle_t *zhp)
540 543 {
541 544 zfs_share_type_t rc = 0;
542 545 zfs_share_proto_t *curr_proto;
543 546
544 547 if (ZFS_IS_VOLUME(zhp))
545 548 return (B_FALSE);
546 549
547 550 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
548 551 curr_proto++)
549 552 rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
550 553
551 554 return (rc ? B_TRUE : B_FALSE);
552 555 }
553 556
554 557 int
555 558 zfs_share(zfs_handle_t *zhp)
556 559 {
557 560 assert(!ZFS_IS_VOLUME(zhp));
558 561 return (zfs_share_proto(zhp, share_all_proto));
559 562 }
560 563
561 564 int
562 565 zfs_unshare(zfs_handle_t *zhp)
563 566 {
564 567 assert(!ZFS_IS_VOLUME(zhp));
565 568 return (zfs_unshareall(zhp));
566 569 }
567 570
568 571 /*
569 572 * Check to see if the filesystem is currently shared.
570 573 */
571 574 zfs_share_type_t
572 575 zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
573 576 {
574 577 char *mountpoint;
575 578 zfs_share_type_t rc;
576 579
577 580 if (!zfs_is_mounted(zhp, &mountpoint))
578 581 return (SHARED_NOT_SHARED);
579 582
580 583 if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto))
581 584 != SHARED_NOT_SHARED) {
582 585 if (where != NULL)
583 586 *where = mountpoint;
584 587 else
585 588 free(mountpoint);
586 589 return (rc);
587 590 } else {
588 591 free(mountpoint);
589 592 return (SHARED_NOT_SHARED);
590 593 }
591 594 }
592 595
593 596 boolean_t
594 597 zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
595 598 {
596 599 return (zfs_is_shared_proto(zhp, where,
597 600 PROTO_NFS) != SHARED_NOT_SHARED);
598 601 }
599 602
600 603 boolean_t
601 604 zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
602 605 {
603 606 return (zfs_is_shared_proto(zhp, where,
604 607 PROTO_SMB) != SHARED_NOT_SHARED);
605 608 }
606 609
607 610 /*
608 611 * Make sure things will work if libshare isn't installed by using
609 612 * wrapper functions that check to see that the pointers to functions
610 613 * initialized in _zfs_init_libshare() are actually present.
611 614 */
612 615
613 616 static sa_handle_t (*_sa_init)(int);
|
↓ open down ↓ |
578 lines elided |
↑ open up ↑ |
614 617 static sa_handle_t (*_sa_init_arg)(int, void *);
615 618 static int (*_sa_service)(sa_handle_t);
616 619 static void (*_sa_fini)(sa_handle_t);
617 620 static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
618 621 static int (*_sa_enable_share)(sa_share_t, char *);
619 622 static int (*_sa_disable_share)(sa_share_t, char *);
620 623 static char *(*_sa_errorstr)(int);
621 624 static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
622 625 static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
623 626 static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
624 -static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t,
625 - char *, char *, zprop_source_t, char *, char *, char *);
627 +static int (* _sa_get_zfs_share)(sa_handle_t, char *, zfs_handle_t *);
626 628 static void (*_sa_update_sharetab_ts)(sa_handle_t);
627 629
628 630 /*
629 631 * _zfs_init_libshare()
630 632 *
631 633 * Find the libshare.so.1 entry points that we use here and save the
632 634 * values to be used later. This is triggered by the runtime loader.
633 635 * Make sure the correct ISA version is loaded.
634 636 */
635 637
636 638 #pragma init(_zfs_init_libshare)
637 639 static void
638 640 _zfs_init_libshare(void)
639 641 {
640 642 void *libshare;
641 643 char path[MAXPATHLEN];
642 644 char isa[MAXISALEN];
643 645
644 646 #if defined(_LP64)
645 647 if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
646 648 isa[0] = '\0';
647 649 #else
648 650 isa[0] = '\0';
649 651 #endif
650 652 (void) snprintf(path, MAXPATHLEN,
651 653 "/usr/lib/%s/libshare.so.1", isa);
652 654
653 655 if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
654 656 _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
655 657 _sa_init_arg = (sa_handle_t (*)(int, void *))dlsym(libshare,
656 658 "sa_init_arg");
657 659 _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
658 660 _sa_service = (int (*)(sa_handle_t))dlsym(libshare,
659 661 "sa_service");
660 662 _sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
661 663 dlsym(libshare, "sa_find_share");
662 664 _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
663 665 "sa_enable_share");
664 666 _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
665 667 "sa_disable_share");
666 668 _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
667 669 _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
668 670 dlsym(libshare, "sa_parse_legacy_options");
669 671 _sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
670 672 dlsym(libshare, "sa_needs_refresh");
671 673 _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
672 674 dlsym(libshare, "sa_get_zfs_handle");
673 - _sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t,
674 - sa_share_t, char *, char *, zprop_source_t, char *,
675 - char *, char *))dlsym(libshare, "sa_zfs_process_share");
675 + _sa_get_zfs_share = (int (*)(sa_handle_t, char *,
676 + zfs_handle_t *)) dlsym(libshare, "sa_get_zfs_share");
676 677 _sa_update_sharetab_ts = (void (*)(sa_handle_t))
677 678 dlsym(libshare, "sa_update_sharetab_ts");
678 679 if (_sa_init == NULL || _sa_init_arg == NULL ||
679 680 _sa_fini == NULL || _sa_find_share == NULL ||
680 681 _sa_enable_share == NULL || _sa_disable_share == NULL ||
681 682 _sa_errorstr == NULL || _sa_parse_legacy_options == NULL ||
682 683 _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
683 - _sa_zfs_process_share == NULL || _sa_service == NULL ||
684 + _sa_get_zfs_share == NULL || _sa_service == NULL ||
684 685 _sa_update_sharetab_ts == NULL) {
685 686 _sa_init = NULL;
686 687 _sa_init_arg = NULL;
687 688 _sa_service = NULL;
688 689 _sa_fini = NULL;
689 690 _sa_disable_share = NULL;
690 691 _sa_enable_share = NULL;
691 692 _sa_errorstr = NULL;
692 693 _sa_parse_legacy_options = NULL;
693 694 (void) dlclose(libshare);
694 695 _sa_needs_refresh = NULL;
695 696 _sa_get_zfs_handle = NULL;
696 - _sa_zfs_process_share = NULL;
697 + _sa_get_zfs_share = NULL;
697 698 _sa_update_sharetab_ts = NULL;
698 699 }
699 700 }
700 701 }
701 702
702 703 /*
703 704 * zfs_init_libshare(zhandle, service)
704 705 *
705 706 * Initialize the libshare API if it hasn't already been initialized.
706 707 * In all cases it returns 0 if it succeeded and an error if not. The
707 708 * service value is which part(s) of the API to initialize and is a
708 709 * direct map to the libshare sa_init(service) interface.
709 710 */
710 711 static int
711 712 zfs_init_libshare_impl(libzfs_handle_t *zhandle, int service, void *arg)
712 713 {
713 714 /*
714 715 * libshare is either not installed or we're in a branded zone. The
715 716 * rest of the wrapper functions around the libshare calls already
716 717 * handle NULL function pointers, but we don't want the callers of
717 718 * zfs_init_libshare() to fail prematurely if libshare is not available.
718 719 */
719 720 if (_sa_init == NULL)
720 721 return (SA_OK);
721 722
722 723 /*
723 724 * Attempt to refresh libshare. This is necessary if there was a cache
724 725 * miss for a new ZFS dataset that was just created, or if state of the
725 726 * sharetab file has changed since libshare was last initialized. We
726 727 * want to make sure so check timestamps to see if a different process
727 728 * has updated any of the configuration. If there was some non-ZFS
728 729 * change, we need to re-initialize the internal cache.
729 730 */
730 731 if (_sa_needs_refresh != NULL &&
731 732 _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
732 733 zfs_uninit_libshare(zhandle);
733 734 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
734 735 }
735 736
736 737 if (zhandle && zhandle->libzfs_sharehdl == NULL)
737 738 zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
738 739
739 740 if (zhandle->libzfs_sharehdl == NULL)
740 741 return (SA_NO_MEMORY);
741 742
742 743 return (SA_OK);
743 744 }
744 745 int
745 746 zfs_init_libshare(libzfs_handle_t *zhandle, int service)
746 747 {
747 748 return (zfs_init_libshare_impl(zhandle, service, NULL));
748 749 }
749 750
750 751 int
751 752 zfs_init_libshare_arg(libzfs_handle_t *zhandle, int service, void *arg)
752 753 {
753 754 return (zfs_init_libshare_impl(zhandle, service, arg));
754 755 }
755 756
756 757
757 758 /*
758 759 * zfs_uninit_libshare(zhandle)
759 760 *
760 761 * Uninitialize the libshare API if it hasn't already been
761 762 * uninitialized. It is OK to call multiple times.
762 763 */
763 764 void
764 765 zfs_uninit_libshare(libzfs_handle_t *zhandle)
765 766 {
766 767 if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
767 768 if (_sa_fini != NULL)
768 769 _sa_fini(zhandle->libzfs_sharehdl);
769 770 zhandle->libzfs_sharehdl = NULL;
770 771 }
771 772 }
772 773
773 774 /*
774 775 * zfs_parse_options(options, proto)
775 776 *
776 777 * Call the legacy parse interface to get the protocol specific
777 778 * options using the NULL arg to indicate that this is a "parse" only.
778 779 */
779 780 int
780 781 zfs_parse_options(char *options, zfs_share_proto_t proto)
781 782 {
782 783 if (_sa_parse_legacy_options != NULL) {
783 784 return (_sa_parse_legacy_options(NULL, options,
784 785 proto_table[proto].p_name));
785 786 }
786 787 return (SA_CONFIG_ERR);
787 788 }
788 789
789 790 /*
790 791 * zfs_sa_find_share(handle, path)
791 792 *
792 793 * wrapper around sa_find_share to find a share path in the
793 794 * configuration.
794 795 */
795 796 static sa_share_t
796 797 zfs_sa_find_share(sa_handle_t handle, char *path)
797 798 {
798 799 if (_sa_find_share != NULL)
799 800 return (_sa_find_share(handle, path));
800 801 return (NULL);
801 802 }
802 803
803 804 /*
804 805 * zfs_sa_enable_share(share, proto)
805 806 *
806 807 * Wrapper for sa_enable_share which enables a share for a specified
807 808 * protocol.
808 809 */
809 810 static int
810 811 zfs_sa_enable_share(sa_share_t share, char *proto)
811 812 {
812 813 if (_sa_enable_share != NULL)
813 814 return (_sa_enable_share(share, proto));
814 815 return (SA_CONFIG_ERR);
815 816 }
816 817
817 818 /*
818 819 * zfs_sa_disable_share(share, proto)
819 820 *
820 821 * Wrapper for sa_enable_share which disables a share for a specified
821 822 * protocol.
822 823 */
823 824 static int
824 825 zfs_sa_disable_share(sa_share_t share, char *proto)
825 826 {
826 827 if (_sa_disable_share != NULL)
827 828 return (_sa_disable_share(share, proto));
828 829 return (SA_CONFIG_ERR);
829 830 }
830 831
831 832 /*
832 833 * Share the given filesystem according to the options in the specified
833 834 * protocol specific properties (sharenfs, sharesmb). We rely
834 835 * on "libshare" to the dirty work for us.
835 836 */
836 837 static int
837 838 zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
838 839 {
839 840 char mountpoint[ZFS_MAXPROPLEN];
840 841 char shareopts[ZFS_MAXPROPLEN];
841 842 char sourcestr[ZFS_MAXPROPLEN];
842 843 libzfs_handle_t *hdl = zhp->zfs_hdl;
843 844 sa_share_t share;
844 845 zfs_share_proto_t *curr_proto;
845 846 zprop_source_t sourcetype;
846 847 int service = SA_INIT_ONE_SHARE_FROM_HANDLE;
847 848 int ret;
848 849
849 850 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
850 851 return (0);
851 852
852 853 /*
853 854 * Function may be called in a loop from higher up stack, with libshare
854 855 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
855 856 * zfs_init_libshare_arg will refresh the handle's cache if necessary.
856 857 * In this case we do not want to switch to per share initialization.
857 858 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
858 859 */
859 860 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
860 861 (_sa_service(hdl->libzfs_sharehdl) ==
861 862 SA_INIT_SHARE_API_SELECTIVE)) {
862 863 service = SA_INIT_SHARE_API;
863 864 }
864 865
865 866 for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
866 867 /*
867 868 * Return success if there are no share options.
868 869 */
869 870 if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
870 871 shareopts, sizeof (shareopts), &sourcetype, sourcestr,
871 872 ZFS_MAXPROPLEN, B_FALSE) != 0 ||
872 873 strcmp(shareopts, "off") == 0)
|
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
873 874 continue;
874 875 ret = zfs_init_libshare_arg(hdl, service, zhp);
875 876 if (ret != SA_OK) {
876 877 (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
877 878 dgettext(TEXT_DOMAIN, "cannot share '%s': %s"),
878 879 zfs_get_name(zhp), _sa_errorstr != NULL ?
879 880 _sa_errorstr(ret) : "");
880 881 return (-1);
881 882 }
882 883
883 - /*
884 - * If the 'zoned' property is set, then zfs_is_mountable()
885 - * will have already bailed out if we are in the global zone.
886 - * But local zones cannot be NFS servers, so we ignore it for
887 - * local zones as well.
888 - */
889 - if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
890 - continue;
891 -
892 884 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
893 885 if (share == NULL) {
894 886 /*
895 887 * This may be a new file system that was just
896 - * created so isn't in the internal cache
897 - * (second time through). Rather than
898 - * reloading the entire configuration, we can
899 - * assume ZFS has done the checking and it is
900 - * safe to add this to the internal
901 - * configuration.
888 + * created so isn't in the internal cache.
889 + * Rather than reloading the entire configuration,
890 + * we can add just this one share to the cache.
902 891 */
903 - if (_sa_zfs_process_share(hdl->libzfs_sharehdl,
904 - NULL, NULL, mountpoint,
905 - proto_table[*curr_proto].p_name, sourcetype,
906 - shareopts, sourcestr, zhp->zfs_name) != SA_OK) {
892 + if ((_sa_get_zfs_share == NULL) ||
893 + (_sa_get_zfs_share(hdl->libzfs_sharehdl, "zfs", zhp)
894 + != SA_OK)) {
907 895 (void) zfs_error_fmt(hdl,
908 896 proto_table[*curr_proto].p_share_err,
909 897 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
910 898 zfs_get_name(zhp));
911 899 return (-1);
912 900 }
913 901 share = zfs_sa_find_share(hdl->libzfs_sharehdl,
914 902 mountpoint);
915 903 }
916 904 if (share != NULL) {
917 905 int err;
918 906 err = zfs_sa_enable_share(share,
919 907 proto_table[*curr_proto].p_name);
920 908 if (err != SA_OK) {
921 909 (void) zfs_error_fmt(hdl,
922 910 proto_table[*curr_proto].p_share_err,
923 911 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
924 912 zfs_get_name(zhp));
925 913 return (-1);
926 914 }
927 915 } else {
928 916 (void) zfs_error_fmt(hdl,
929 917 proto_table[*curr_proto].p_share_err,
930 918 dgettext(TEXT_DOMAIN, "cannot share '%s'"),
931 919 zfs_get_name(zhp));
932 920 return (-1);
933 921 }
934 922
935 923 }
936 924 return (0);
937 925 }
938 926
939 927
940 928 int
941 929 zfs_share_nfs(zfs_handle_t *zhp)
942 930 {
943 931 return (zfs_share_proto(zhp, nfs_only));
944 932 }
945 933
946 934 int
947 935 zfs_share_smb(zfs_handle_t *zhp)
948 936 {
949 937 return (zfs_share_proto(zhp, smb_only));
950 938 }
951 939
952 940 int
953 941 zfs_shareall(zfs_handle_t *zhp)
954 942 {
955 943 return (zfs_share_proto(zhp, share_all_proto));
956 944 }
957 945
958 946 /*
959 947 * Unshare a filesystem by mountpoint.
960 948 */
961 949 static int
962 950 unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
963 951 zfs_share_proto_t proto)
964 952 {
965 953 sa_share_t share;
966 954 int err;
967 955 char *mntpt;
968 956 int service = SA_INIT_ONE_SHARE_FROM_NAME;
969 957
970 958 /*
971 959 * Mountpoint could get trashed if libshare calls getmntany
972 960 * which it does during API initialization, so strdup the
973 961 * value.
974 962 */
975 963 mntpt = zfs_strdup(hdl, mountpoint);
976 964
977 965 /*
978 966 * Function may be called in a loop from higher up stack, with libshare
979 967 * initialized for multiple shares (SA_INIT_SHARE_API_SELECTIVE).
980 968 * zfs_init_libshare_arg will refresh the handle's cache if necessary.
981 969 * In this case we do not want to switch to per share initialization.
982 970 * Specify SA_INIT_SHARE_API to do full refresh, if refresh required.
983 971 */
984 972 if ((hdl->libzfs_sharehdl != NULL) && (_sa_service != NULL) &&
985 973 (_sa_service(hdl->libzfs_sharehdl) ==
986 974 SA_INIT_SHARE_API_SELECTIVE)) {
987 975 service = SA_INIT_SHARE_API;
988 976 }
989 977
990 978 err = zfs_init_libshare_arg(hdl, service, (void *)name);
991 979 if (err != SA_OK) {
992 980 free(mntpt); /* don't need the copy anymore */
993 981 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
994 982 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
995 983 name, _sa_errorstr(err)));
996 984 }
997 985
998 986 share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt);
999 987 free(mntpt); /* don't need the copy anymore */
1000 988
1001 989 if (share != NULL) {
1002 990 err = zfs_sa_disable_share(share, proto_table[proto].p_name);
1003 991 if (err != SA_OK) {
1004 992 return (zfs_error_fmt(hdl,
1005 993 proto_table[proto].p_unshare_err,
1006 994 dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
1007 995 name, _sa_errorstr(err)));
1008 996 }
1009 997 } else {
1010 998 return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
1011 999 dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"),
1012 1000 name));
1013 1001 }
1014 1002 return (0);
1015 1003 }
1016 1004
1017 1005 /*
1018 1006 * Unshare the given filesystem.
1019 1007 */
1020 1008 int
1021 1009 zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
1022 1010 zfs_share_proto_t *proto)
1023 1011 {
1024 1012 libzfs_handle_t *hdl = zhp->zfs_hdl;
1025 1013 struct mnttab entry;
1026 1014 char *mntpt = NULL;
1027 1015
1028 1016 /* check to see if need to unmount the filesystem */
1029 1017 rewind(zhp->zfs_hdl->libzfs_mnttab);
1030 1018 if (mountpoint != NULL)
1031 1019 mountpoint = mntpt = zfs_strdup(hdl, mountpoint);
1032 1020
1033 1021 if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
1034 1022 libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
1035 1023 zfs_share_proto_t *curr_proto;
1036 1024
1037 1025 if (mountpoint == NULL)
1038 1026 mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
1039 1027
1040 1028 for (curr_proto = proto; *curr_proto != PROTO_END;
1041 1029 curr_proto++) {
1042 1030
1043 1031 if (is_shared(hdl, mntpt, *curr_proto) &&
1044 1032 unshare_one(hdl, zhp->zfs_name,
1045 1033 mntpt, *curr_proto) != 0) {
1046 1034 if (mntpt != NULL)
1047 1035 free(mntpt);
1048 1036 return (-1);
1049 1037 }
1050 1038 }
1051 1039 }
1052 1040 if (mntpt != NULL)
1053 1041 free(mntpt);
1054 1042
1055 1043 return (0);
1056 1044 }
1057 1045
1058 1046 int
1059 1047 zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
1060 1048 {
1061 1049 return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
1062 1050 }
1063 1051
1064 1052 int
1065 1053 zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
1066 1054 {
1067 1055 return (zfs_unshare_proto(zhp, mountpoint, smb_only));
1068 1056 }
1069 1057
1070 1058 /*
1071 1059 * Same as zfs_unmountall(), but for NFS and SMB unshares.
1072 1060 */
1073 1061 int
1074 1062 zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
1075 1063 {
1076 1064 prop_changelist_t *clp;
1077 1065 int ret;
1078 1066
1079 1067 clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
1080 1068 if (clp == NULL)
1081 1069 return (-1);
1082 1070
1083 1071 ret = changelist_unshare(clp, proto);
1084 1072 changelist_free(clp);
1085 1073
1086 1074 return (ret);
1087 1075 }
1088 1076
1089 1077 int
1090 1078 zfs_unshareall_nfs(zfs_handle_t *zhp)
1091 1079 {
1092 1080 return (zfs_unshareall_proto(zhp, nfs_only));
1093 1081 }
1094 1082
1095 1083 int
1096 1084 zfs_unshareall_smb(zfs_handle_t *zhp)
1097 1085 {
1098 1086 return (zfs_unshareall_proto(zhp, smb_only));
1099 1087 }
1100 1088
1101 1089 int
1102 1090 zfs_unshareall(zfs_handle_t *zhp)
1103 1091 {
1104 1092 return (zfs_unshareall_proto(zhp, share_all_proto));
1105 1093 }
1106 1094
1107 1095 int
1108 1096 zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
1109 1097 {
1110 1098 return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
1111 1099 }
1112 1100
1113 1101 /*
1114 1102 * Remove the mountpoint associated with the current dataset, if necessary.
1115 1103 * We only remove the underlying directory if:
1116 1104 *
1117 1105 * - The mountpoint is not 'none' or 'legacy'
1118 1106 * - The mountpoint is non-empty
1119 1107 * - The mountpoint is the default or inherited
1120 1108 * - The 'zoned' property is set, or we're in a local zone
1121 1109 *
1122 1110 * Any other directories we leave alone.
1123 1111 */
1124 1112 void
1125 1113 remove_mountpoint(zfs_handle_t *zhp)
1126 1114 {
1127 1115 char mountpoint[ZFS_MAXPROPLEN];
1128 1116 zprop_source_t source;
1129 1117
1130 1118 if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
1131 1119 &source))
1132 1120 return;
1133 1121
1134 1122 if (source == ZPROP_SRC_DEFAULT ||
1135 1123 source == ZPROP_SRC_INHERITED) {
1136 1124 /*
1137 1125 * Try to remove the directory, silently ignoring any errors.
1138 1126 * The filesystem may have since been removed or moved around,
1139 1127 * and this error isn't really useful to the administrator in
1140 1128 * any way.
1141 1129 */
1142 1130 (void) rmdir(mountpoint);
1143 1131 }
1144 1132 }
1145 1133
1146 1134 /*
1147 1135 * Add the given zfs handle to the cb_handles array, dynamically reallocating
1148 1136 * the array if it is out of space.
1149 1137 */
1150 1138 void
1151 1139 libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
1152 1140 {
1153 1141 if (cbp->cb_alloc == cbp->cb_used) {
1154 1142 size_t newsz;
1155 1143 zfs_handle_t **newhandles;
1156 1144
1157 1145 newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
1158 1146 newhandles = zfs_realloc(zhp->zfs_hdl,
1159 1147 cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
1160 1148 newsz * sizeof (zfs_handle_t *));
1161 1149 cbp->cb_handles = newhandles;
1162 1150 cbp->cb_alloc = newsz;
1163 1151 }
1164 1152 cbp->cb_handles[cbp->cb_used++] = zhp;
1165 1153 }
1166 1154
1167 1155 /*
1168 1156 * Recursive helper function used during file system enumeration
1169 1157 */
1170 1158 static int
1171 1159 zfs_iter_cb(zfs_handle_t *zhp, void *data)
1172 1160 {
1173 1161 get_all_cb_t *cbp = data;
1174 1162
1175 1163 if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
1176 1164 zfs_close(zhp);
1177 1165 return (0);
1178 1166 }
1179 1167
1180 1168 if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
1181 1169 zfs_close(zhp);
1182 1170 return (0);
1183 1171 }
1184 1172
1185 1173 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1186 1174 ZFS_KEYSTATUS_UNAVAILABLE) {
1187 1175 zfs_close(zhp);
1188 1176 return (0);
1189 1177 }
1190 1178
1191 1179 /*
1192 1180 * If this filesystem is inconsistent and has a receive resume
1193 1181 * token, we can not mount it.
1194 1182 */
1195 1183 if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
1196 1184 zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
1197 1185 NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
1198 1186 zfs_close(zhp);
1199 1187 return (0);
1200 1188 }
1201 1189
1202 1190 libzfs_add_handle(cbp, zhp);
1203 1191 if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
1204 1192 zfs_close(zhp);
1205 1193 return (-1);
1206 1194 }
1207 1195 return (0);
1208 1196 }
1209 1197
1210 1198 /*
1211 1199 * Sort comparator that compares two mountpoint paths. We sort these paths so
1212 1200 * that subdirectories immediately follow their parents. This means that we
1213 1201 * effectively treat the '/' character as the lowest value non-nul char.
1214 1202 * Since filesystems from non-global zones can have the same mountpoint
1215 1203 * as other filesystems, the comparator sorts global zone filesystems to
1216 1204 * the top of the list. This means that the global zone will traverse the
1217 1205 * filesystem list in the correct order and can stop when it sees the
1218 1206 * first zoned filesystem. In a non-global zone, only the delegated
1219 1207 * filesystems are seen.
1220 1208 *
1221 1209 * An example sorted list using this comparator would look like:
1222 1210 *
1223 1211 * /foo
1224 1212 * /foo/bar
1225 1213 * /foo/bar/baz
1226 1214 * /foo/baz
1227 1215 * /foo.bar
1228 1216 * /foo (NGZ1)
1229 1217 * /foo (NGZ2)
1230 1218 *
1231 1219 * The mounting code depends on this ordering to deterministically iterate
1232 1220 * over filesystems in order to spawn parallel mount tasks.
1233 1221 */
1234 1222 static int
1235 1223 mountpoint_cmp(const void *arga, const void *argb)
1236 1224 {
1237 1225 zfs_handle_t *const *zap = arga;
1238 1226 zfs_handle_t *za = *zap;
1239 1227 zfs_handle_t *const *zbp = argb;
1240 1228 zfs_handle_t *zb = *zbp;
1241 1229 char mounta[MAXPATHLEN];
1242 1230 char mountb[MAXPATHLEN];
1243 1231 const char *a = mounta;
1244 1232 const char *b = mountb;
1245 1233 boolean_t gota, gotb;
1246 1234 uint64_t zoneda, zonedb;
1247 1235
1248 1236 zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
1249 1237 zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
1250 1238 if (zoneda && !zonedb)
1251 1239 return (1);
1252 1240 if (!zoneda && zonedb)
1253 1241 return (-1);
1254 1242
1255 1243 gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
1256 1244 if (gota) {
1257 1245 verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
1258 1246 sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
1259 1247 }
1260 1248 gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
1261 1249 if (gotb) {
1262 1250 verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
1263 1251 sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
1264 1252 }
1265 1253
1266 1254 if (gota && gotb) {
1267 1255 while (*a != '\0' && (*a == *b)) {
1268 1256 a++;
1269 1257 b++;
1270 1258 }
1271 1259 if (*a == *b)
1272 1260 return (0);
1273 1261 if (*a == '\0')
1274 1262 return (-1);
1275 1263 if (*b == '\0')
1276 1264 return (1);
1277 1265 if (*a == '/')
1278 1266 return (-1);
1279 1267 if (*b == '/')
1280 1268 return (1);
1281 1269 return (*a < *b ? -1 : *a > *b);
1282 1270 }
1283 1271
1284 1272 if (gota)
1285 1273 return (-1);
1286 1274 if (gotb)
1287 1275 return (1);
1288 1276
1289 1277 /*
1290 1278 * If neither filesystem has a mountpoint, revert to sorting by
1291 1279 * dataset name.
1292 1280 */
1293 1281 return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
1294 1282 }
1295 1283
1296 1284 /*
1297 1285 * Return true if path2 is a child of path1.
1298 1286 */
1299 1287 static boolean_t
1300 1288 libzfs_path_contains(const char *path1, const char *path2)
1301 1289 {
1302 1290 return (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/');
1303 1291 }
1304 1292
1305 1293 /*
1306 1294 * Given a mountpoint specified by idx in the handles array, find the first
1307 1295 * non-descendent of that mountpoint and return its index. Descendant paths
1308 1296 * start with the parent's path. This function relies on the ordering
1309 1297 * enforced by mountpoint_cmp().
1310 1298 */
1311 1299 static int
1312 1300 non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
1313 1301 {
1314 1302 char parent[ZFS_MAXPROPLEN];
1315 1303 char child[ZFS_MAXPROPLEN];
1316 1304 int i;
1317 1305
1318 1306 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
1319 1307 sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
1320 1308
1321 1309 for (i = idx + 1; i < num_handles; i++) {
1322 1310 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
1323 1311 sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1324 1312 if (!libzfs_path_contains(parent, child))
1325 1313 break;
1326 1314 }
1327 1315 return (i);
1328 1316 }
1329 1317
1330 1318 typedef struct mnt_param {
1331 1319 libzfs_handle_t *mnt_hdl;
1332 1320 zfs_taskq_t *mnt_tq;
1333 1321 zfs_handle_t **mnt_zhps; /* filesystems to mount */
1334 1322 size_t mnt_num_handles;
1335 1323 int mnt_idx; /* Index of selected entry to mount */
1336 1324 zfs_iter_f mnt_func;
1337 1325 void *mnt_data;
1338 1326 } mnt_param_t;
1339 1327
1340 1328 /*
1341 1329 * Allocate and populate the parameter struct for mount function, and
1342 1330 * schedule mounting of the entry selected by idx.
1343 1331 */
1344 1332 static void
1345 1333 zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
1346 1334 size_t num_handles, int idx, zfs_iter_f func, void *data, zfs_taskq_t *tq)
1347 1335 {
1348 1336 mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
1349 1337
1350 1338 mnt_param->mnt_hdl = hdl;
1351 1339 mnt_param->mnt_tq = tq;
1352 1340 mnt_param->mnt_zhps = handles;
1353 1341 mnt_param->mnt_num_handles = num_handles;
1354 1342 mnt_param->mnt_idx = idx;
1355 1343 mnt_param->mnt_func = func;
1356 1344 mnt_param->mnt_data = data;
1357 1345
1358 1346 (void) zfs_taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param,
1359 1347 ZFS_TQ_SLEEP);
1360 1348 }
1361 1349
1362 1350 /*
1363 1351 * This is the structure used to keep state of mounting or sharing operations
1364 1352 * during a call to zpool_enable_datasets().
1365 1353 */
1366 1354 typedef struct mount_state {
1367 1355 /*
1368 1356 * ms_mntstatus is set to -1 if any mount fails. While multiple threads
1369 1357 * could update this variable concurrently, no synchronization is
1370 1358 * needed as it's only ever set to -1.
1371 1359 */
1372 1360 int ms_mntstatus;
1373 1361 int ms_mntflags;
1374 1362 const char *ms_mntopts;
1375 1363 } mount_state_t;
1376 1364
1377 1365 static int
1378 1366 zfs_mount_one(zfs_handle_t *zhp, void *arg)
1379 1367 {
1380 1368 mount_state_t *ms = arg;
1381 1369 int ret = 0;
1382 1370
1383 1371 if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1384 1372 ZFS_KEYSTATUS_UNAVAILABLE)
1385 1373 return (0);
1386 1374
1387 1375 if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
1388 1376 ret = ms->ms_mntstatus = -1;
1389 1377 return (ret);
1390 1378 }
1391 1379
1392 1380 static int
1393 1381 zfs_share_one(zfs_handle_t *zhp, void *arg)
1394 1382 {
1395 1383 mount_state_t *ms = arg;
1396 1384 int ret = 0;
1397 1385
1398 1386 if (zfs_share(zhp) != 0)
1399 1387 ret = ms->ms_mntstatus = -1;
1400 1388 return (ret);
1401 1389 }
1402 1390
1403 1391 /*
1404 1392 * Task queue function to mount one file system. On completion, it finds and
1405 1393 * schedules its children to be mounted. This depends on the sorting done in
1406 1394 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
1407 1395 * each descending from the previous) will have no parallelism since we always
1408 1396 * have to wait for the parent to finish mounting before we can schedule
1409 1397 * its children.
1410 1398 */
1411 1399 static void
1412 1400 zfs_mount_task(void *arg)
1413 1401 {
1414 1402 mnt_param_t *mp = arg;
1415 1403 int idx = mp->mnt_idx;
1416 1404 zfs_handle_t **handles = mp->mnt_zhps;
1417 1405 size_t num_handles = mp->mnt_num_handles;
1418 1406 char mountpoint[ZFS_MAXPROPLEN];
1419 1407
1420 1408 verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
1421 1409 sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
1422 1410
1423 1411 if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
1424 1412 return;
1425 1413
1426 1414 /*
1427 1415 * We dispatch tasks to mount filesystems with mountpoints underneath
1428 1416 * this one. We do this by dispatching the next filesystem with a
1429 1417 * descendant mountpoint of the one we just mounted, then skip all of
1430 1418 * its descendants, dispatch the next descendant mountpoint, and so on.
1431 1419 * The non_descendant_idx() function skips over filesystems that are
1432 1420 * descendants of the filesystem we just dispatched.
1433 1421 */
1434 1422 for (int i = idx + 1; i < num_handles;
1435 1423 i = non_descendant_idx(handles, num_handles, i)) {
1436 1424 char child[ZFS_MAXPROPLEN];
1437 1425 verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
1438 1426 child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1439 1427
1440 1428 if (!libzfs_path_contains(mountpoint, child))
1441 1429 break; /* not a descendant, return */
1442 1430 zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
1443 1431 mp->mnt_func, mp->mnt_data, mp->mnt_tq);
1444 1432 }
1445 1433 free(mp);
1446 1434 }
1447 1435
1448 1436 /*
1449 1437 * Issue the func callback for each ZFS handle contained in the handles
1450 1438 * array. This function is used to mount all datasets, and so this function
1451 1439 * guarantees that filesystems for parent mountpoints are called before their
1452 1440 * children. As such, before issuing any callbacks, we first sort the array
1453 1441 * of handles by mountpoint.
1454 1442 *
1455 1443 * Callbacks are issued in one of two ways:
1456 1444 *
1457 1445 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
1458 1446 * environment variable is set, then we issue callbacks sequentially.
1459 1447 *
1460 1448 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
1461 1449 * environment variable is not set, then we use a taskq to dispatch threads
1462 1450 * to mount filesystems is parallel. This function dispatches tasks to mount
1463 1451 * the filesystems at the top-level mountpoints, and these tasks in turn
1464 1452 * are responsible for recursively mounting filesystems in their children
1465 1453 * mountpoints.
1466 1454 */
1467 1455 void
1468 1456 zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
1469 1457 size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
1470 1458 {
1471 1459 zoneid_t zoneid = getzoneid();
1472 1460
1473 1461 /*
1474 1462 * The ZFS_SERIAL_MOUNT environment variable is an undocumented
1475 1463 * variable that can be used as a convenience to do a/b comparison
1476 1464 * of serial vs. parallel mounting.
1477 1465 */
1478 1466 boolean_t serial_mount = !parallel ||
1479 1467 (getenv("ZFS_SERIAL_MOUNT") != NULL);
1480 1468
1481 1469 /*
1482 1470 * Sort the datasets by mountpoint. See mountpoint_cmp for details
1483 1471 * of how these are sorted.
1484 1472 */
1485 1473 qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
1486 1474
1487 1475 if (serial_mount) {
1488 1476 for (int i = 0; i < num_handles; i++) {
1489 1477 func(handles[i], data);
1490 1478 }
1491 1479 return;
1492 1480 }
1493 1481
1494 1482 /*
1495 1483 * Issue the callback function for each dataset using a parallel
1496 1484 * algorithm that uses a taskq to manage threads.
1497 1485 */
1498 1486 zfs_taskq_t *tq = zfs_taskq_create("mount_taskq", mount_tq_nthr, 0,
1499 1487 mount_tq_nthr, mount_tq_nthr, ZFS_TASKQ_PREPOPULATE);
1500 1488
1501 1489 /*
1502 1490 * There may be multiple "top level" mountpoints outside of the pool's
1503 1491 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
1504 1492 * these.
1505 1493 */
1506 1494 for (int i = 0; i < num_handles;
1507 1495 i = non_descendant_idx(handles, num_handles, i)) {
1508 1496 /*
1509 1497 * Since the mountpoints have been sorted so that the zoned
1510 1498 * filesystems are at the end, a zoned filesystem seen from
1511 1499 * the global zone means that we're done.
1512 1500 */
1513 1501 if (zoneid == GLOBAL_ZONEID &&
1514 1502 zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
1515 1503 break;
1516 1504 zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
1517 1505 tq);
1518 1506 }
1519 1507
1520 1508 zfs_taskq_wait(tq); /* wait for all scheduled mounts to complete */
1521 1509 zfs_taskq_destroy(tq);
1522 1510 }
1523 1511
1524 1512 /*
1525 1513 * Mount and share all datasets within the given pool. This assumes that no
1526 1514 * datasets within the pool are currently mounted.
1527 1515 */
1528 1516 #pragma weak zpool_mount_datasets = zpool_enable_datasets
1529 1517 int
1530 1518 zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
1531 1519 {
1532 1520 get_all_cb_t cb = { 0 };
1533 1521 mount_state_t ms = { 0 };
1534 1522 zfs_handle_t *zfsp;
1535 1523 sa_init_selective_arg_t sharearg;
1536 1524 int ret = 0;
1537 1525
1538 1526 if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1539 1527 ZFS_TYPE_DATASET)) == NULL)
1540 1528 goto out;
1541 1529
1542 1530
1543 1531 /*
1544 1532 * Gather all non-snapshot datasets within the pool. Start by adding
1545 1533 * the root filesystem for this pool to the list, and then iterate
1546 1534 * over all child filesystems.
1547 1535 */
1548 1536 libzfs_add_handle(&cb, zfsp);
1549 1537 if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
1550 1538 goto out;
1551 1539
1552 1540 ms.ms_mntopts = mntopts;
1553 1541 ms.ms_mntflags = flags;
1554 1542 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1555 1543 zfs_mount_one, &ms, B_TRUE);
1556 1544 if (ms.ms_mntstatus != 0)
1557 1545 ret = ms.ms_mntstatus;
1558 1546
1559 1547 /*
1560 1548 * Initialize libshare SA_INIT_SHARE_API_SELECTIVE here
1561 1549 * to avoid unnecessary load/unload of the libshare API
1562 1550 * per shared dataset downstream.
1563 1551 */
1564 1552 sharearg.zhandle_arr = cb.cb_handles;
1565 1553 sharearg.zhandle_len = cb.cb_used;
1566 1554 if ((ret = zfs_init_libshare_arg(zhp->zpool_hdl,
1567 1555 SA_INIT_SHARE_API_SELECTIVE, &sharearg)) != 0)
1568 1556 goto out;
1569 1557
1570 1558 ms.ms_mntstatus = 0;
1571 1559 zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1572 1560 zfs_share_one, &ms, B_FALSE);
1573 1561 if (ms.ms_mntstatus != 0)
1574 1562 ret = ms.ms_mntstatus;
1575 1563
1576 1564 out:
1577 1565 for (int i = 0; i < cb.cb_used; i++)
1578 1566 zfs_close(cb.cb_handles[i]);
1579 1567 free(cb.cb_handles);
1580 1568
1581 1569 return (ret);
1582 1570 }
1583 1571
1584 1572 static int
1585 1573 mountpoint_compare(const void *a, const void *b)
1586 1574 {
1587 1575 const char *mounta = *((char **)a);
1588 1576 const char *mountb = *((char **)b);
1589 1577
1590 1578 return (strcmp(mountb, mounta));
1591 1579 }
1592 1580
1593 1581 /* alias for 2002/240 */
1594 1582 #pragma weak zpool_unmount_datasets = zpool_disable_datasets
1595 1583 /*
1596 1584 * Unshare and unmount all datasets within the given pool. We don't want to
1597 1585 * rely on traversing the DSL to discover the filesystems within the pool,
1598 1586 * because this may be expensive (if not all of them are mounted), and can fail
1599 1587 * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and
1600 1588 * gather all the filesystems that are currently mounted.
1601 1589 */
1602 1590 int
1603 1591 zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
1604 1592 {
1605 1593 int used, alloc;
1606 1594 struct mnttab entry;
1607 1595 size_t namelen;
1608 1596 char **mountpoints = NULL;
1609 1597 zfs_handle_t **datasets = NULL;
1610 1598 libzfs_handle_t *hdl = zhp->zpool_hdl;
1611 1599 int i;
1612 1600 int ret = -1;
1613 1601 int flags = (force ? MS_FORCE : 0);
1614 1602 sa_init_selective_arg_t sharearg;
1615 1603
1616 1604 namelen = strlen(zhp->zpool_name);
1617 1605
1618 1606 rewind(hdl->libzfs_mnttab);
1619 1607 used = alloc = 0;
1620 1608 while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
1621 1609 /*
1622 1610 * Ignore non-ZFS entries.
1623 1611 */
1624 1612 if (entry.mnt_fstype == NULL ||
1625 1613 strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
1626 1614 continue;
1627 1615
1628 1616 /*
1629 1617 * Ignore filesystems not within this pool.
1630 1618 */
1631 1619 if (entry.mnt_mountp == NULL ||
1632 1620 strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
1633 1621 (entry.mnt_special[namelen] != '/' &&
1634 1622 entry.mnt_special[namelen] != '\0'))
1635 1623 continue;
1636 1624
1637 1625 /*
1638 1626 * At this point we've found a filesystem within our pool. Add
1639 1627 * it to our growing list.
1640 1628 */
1641 1629 if (used == alloc) {
1642 1630 if (alloc == 0) {
1643 1631 if ((mountpoints = zfs_alloc(hdl,
1644 1632 8 * sizeof (void *))) == NULL)
1645 1633 goto out;
1646 1634
1647 1635 if ((datasets = zfs_alloc(hdl,
1648 1636 8 * sizeof (void *))) == NULL)
1649 1637 goto out;
1650 1638
1651 1639 alloc = 8;
1652 1640 } else {
1653 1641 void *ptr;
1654 1642
1655 1643 if ((ptr = zfs_realloc(hdl, mountpoints,
1656 1644 alloc * sizeof (void *),
1657 1645 alloc * 2 * sizeof (void *))) == NULL)
1658 1646 goto out;
1659 1647 mountpoints = ptr;
1660 1648
1661 1649 if ((ptr = zfs_realloc(hdl, datasets,
1662 1650 alloc * sizeof (void *),
1663 1651 alloc * 2 * sizeof (void *))) == NULL)
1664 1652 goto out;
1665 1653 datasets = ptr;
1666 1654
1667 1655 alloc *= 2;
1668 1656 }
1669 1657 }
1670 1658
1671 1659 if ((mountpoints[used] = zfs_strdup(hdl,
1672 1660 entry.mnt_mountp)) == NULL)
1673 1661 goto out;
1674 1662
1675 1663 /*
1676 1664 * This is allowed to fail, in case there is some I/O error. It
1677 1665 * is only used to determine if we need to remove the underlying
1678 1666 * mountpoint, so failure is not fatal.
1679 1667 */
1680 1668 datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
1681 1669
1682 1670 used++;
1683 1671 }
1684 1672
1685 1673 /*
1686 1674 * At this point, we have the entire list of filesystems, so sort it by
1687 1675 * mountpoint.
1688 1676 */
1689 1677 sharearg.zhandle_arr = datasets;
1690 1678 sharearg.zhandle_len = used;
1691 1679 ret = zfs_init_libshare_arg(hdl, SA_INIT_SHARE_API_SELECTIVE,
1692 1680 &sharearg);
1693 1681 if (ret != 0)
1694 1682 goto out;
1695 1683 qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
1696 1684
1697 1685 /*
1698 1686 * Walk through and first unshare everything.
1699 1687 */
1700 1688 for (i = 0; i < used; i++) {
1701 1689 zfs_share_proto_t *curr_proto;
1702 1690 for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
1703 1691 curr_proto++) {
1704 1692 if (is_shared(hdl, mountpoints[i], *curr_proto) &&
1705 1693 unshare_one(hdl, mountpoints[i],
1706 1694 mountpoints[i], *curr_proto) != 0)
1707 1695 goto out;
1708 1696 }
1709 1697 }
1710 1698
1711 1699 /*
1712 1700 * Now unmount everything, removing the underlying directories as
1713 1701 * appropriate.
1714 1702 */
1715 1703 for (i = 0; i < used; i++) {
1716 1704 if (unmount_one(hdl, mountpoints[i], flags) != 0)
1717 1705 goto out;
1718 1706 }
1719 1707
1720 1708 for (i = 0; i < used; i++) {
1721 1709 if (datasets[i])
1722 1710 remove_mountpoint(datasets[i]);
1723 1711 }
1724 1712
1725 1713 ret = 0;
1726 1714 out:
1727 1715 for (i = 0; i < used; i++) {
1728 1716 if (datasets[i])
1729 1717 zfs_close(datasets[i]);
1730 1718 free(mountpoints[i]);
1731 1719 }
1732 1720 free(datasets);
1733 1721 free(mountpoints);
1734 1722
1735 1723 return (ret);
1736 1724 }
|
↓ open down ↓ |
820 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX