Print this page
NEX-17003 Autosnapshots should not be managed via .zfs/snapshot
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
6093 zfsctl_shares_lookup should only VN_RELE() on zfs_zget() success
Reviewed by: Gordon Ross <gwr@nexenta.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Approved by: Robert Mustacchi <rm@joyent.com>
5766 Avoid 128K kmem allocations in mzap_upgrade()
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: George Wilson <george@delphix.com>
Reviewed by: Steven Hartland <killing@multiplay.co.uk>
Approved by: Rich Lowe <richlowe@richlowe.net>
5768 zfsctl_snapshot_inactive() can leak a vnode hold
Reviewed by: George Wilson <george@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Adam Leventhal <adam.leventhal@delphix.com>
Reviewed by: Bayard Bell <buffer.g.overflow@gmail.com>
Approved by: Rich Lowe <richlowe@richlowe.net>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/zfs_ctldir.c
+++ new/usr/src/uts/common/fs/zfs/zfs_ctldir.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
25 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25 26 */
26 27
27 28 /*
28 29 * ZFS control directory (a.k.a. ".zfs")
29 30 *
30 31 * This directory provides a common location for all ZFS meta-objects.
31 32 * Currently, this is only the 'snapshot' directory, but this may expand in the
32 33 * future. The elements are built using the GFS primitives, as the hierarchy
33 34 * does not actually exist on disk.
34 35 *
35 36 * For 'snapshot', we don't want to have all snapshots always mounted, because
36 37 * this would take up a huge amount of space in /etc/mnttab. We have three
37 38 * types of objects:
38 39 *
39 40 * ctldir ------> snapshotdir -------> snapshot
40 41 * |
41 42 * |
42 43 * V
43 44 * mounted fs
44 45 *
45 46 * The 'snapshot' node contains just enough information to lookup '..' and act
46 47 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
47 48 * perform an automount of the underlying filesystem and return the
48 49 * corresponding vnode.
49 50 *
50 51 * All mounts are handled automatically by the kernel, but unmounts are
51 52 * (currently) handled from user land. The main reason is that there is no
52 53 * reliable way to auto-unmount the filesystem when it's "no longer in use".
53 54 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
54 55 * unmounts any snapshots within the snapshot directory.
55 56 *
56 57 * The '.zfs', '.zfs/snapshot', and all directories created under
57 58 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
58 59 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
59 60 *
60 61 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
61 62 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
62 63 * However, vnodes within these mounted on file systems have their v_vfsp
63 64 * fields set to the head filesystem to make NFS happy (see
64 65 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
65 66 * so that it cannot be freed until all snapshots have been unmounted.
66 67 */
67 68
68 69 #include <fs/fs_subr.h>
69 70 #include <sys/zfs_ctldir.h>
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
70 71 #include <sys/zfs_ioctl.h>
71 72 #include <sys/zfs_vfsops.h>
72 73 #include <sys/vfs_opreg.h>
73 74 #include <sys/gfs.h>
74 75 #include <sys/stat.h>
75 76 #include <sys/dmu.h>
76 77 #include <sys/dsl_destroy.h>
77 78 #include <sys/dsl_deleg.h>
78 79 #include <sys/mount.h>
79 80 #include <sys/sunddi.h>
81 +#include <sys/autosnap.h>
80 82
81 83 #include "zfs_namecheck.h"
82 84
83 85 typedef struct zfsctl_node {
84 86 gfs_dir_t zc_gfs_private;
85 87 uint64_t zc_id;
86 88 timestruc_t zc_cmtime; /* ctime and mtime, always the same */
87 89 } zfsctl_node_t;
88 90
89 91 typedef struct zfsctl_snapdir {
90 92 zfsctl_node_t sd_node;
91 93 kmutex_t sd_lock;
92 94 avl_tree_t sd_snaps;
93 95 } zfsctl_snapdir_t;
94 96
95 97 typedef struct {
96 98 char *se_name;
97 99 vnode_t *se_root;
98 100 avl_node_t se_node;
99 101 } zfs_snapentry_t;
100 102
101 103 static int
102 104 snapentry_compare(const void *a, const void *b)
103 105 {
104 106 const zfs_snapentry_t *sa = a;
105 107 const zfs_snapentry_t *sb = b;
106 108 int ret = strcmp(sa->se_name, sb->se_name);
107 109
108 110 if (ret < 0)
109 111 return (-1);
110 112 else if (ret > 0)
111 113 return (1);
112 114 else
113 115 return (0);
114 116 }
115 117
116 118 vnodeops_t *zfsctl_ops_root;
117 119 vnodeops_t *zfsctl_ops_snapdir;
118 120 vnodeops_t *zfsctl_ops_snapshot;
119 121 vnodeops_t *zfsctl_ops_shares;
120 122
121 123 static const fs_operation_def_t zfsctl_tops_root[];
122 124 static const fs_operation_def_t zfsctl_tops_snapdir[];
123 125 static const fs_operation_def_t zfsctl_tops_snapshot[];
124 126 static const fs_operation_def_t zfsctl_tops_shares[];
125 127
126 128 static vnode_t *zfsctl_mknode_snapdir(vnode_t *);
127 129 static vnode_t *zfsctl_mknode_shares(vnode_t *);
128 130 static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset);
129 131 static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *);
130 132
131 133 static gfs_opsvec_t zfsctl_opsvec[] = {
132 134 { ".zfs", zfsctl_tops_root, &zfsctl_ops_root },
133 135 { ".zfs/snapshot", zfsctl_tops_snapdir, &zfsctl_ops_snapdir },
134 136 { ".zfs/snapshot/vnode", zfsctl_tops_snapshot, &zfsctl_ops_snapshot },
135 137 { ".zfs/shares", zfsctl_tops_shares, &zfsctl_ops_shares },
136 138 { NULL }
137 139 };
138 140
139 141 /*
140 142 * Root directory elements. We only have two entries
141 143 * snapshot and shares.
142 144 */
143 145 static gfs_dirent_t zfsctl_root_entries[] = {
144 146 { "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE },
145 147 { "shares", zfsctl_mknode_shares, GFS_CACHE_VNODE },
146 148 { NULL }
147 149 };
148 150
149 151 /* include . and .. in the calculation */
150 152 #define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \
151 153 sizeof (gfs_dirent_t)) + 1)
152 154
153 155
154 156 /*
155 157 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
156 158 * directories. This is called from the ZFS init routine, and initializes the
157 159 * vnode ops vectors that we'll be using.
158 160 */
159 161 void
160 162 zfsctl_init(void)
161 163 {
162 164 VERIFY(gfs_make_opsvec(zfsctl_opsvec) == 0);
163 165 }
164 166
165 167 void
166 168 zfsctl_fini(void)
167 169 {
168 170 /*
169 171 * Remove vfsctl vnode ops
170 172 */
171 173 if (zfsctl_ops_root)
172 174 vn_freevnodeops(zfsctl_ops_root);
173 175 if (zfsctl_ops_snapdir)
174 176 vn_freevnodeops(zfsctl_ops_snapdir);
175 177 if (zfsctl_ops_snapshot)
176 178 vn_freevnodeops(zfsctl_ops_snapshot);
177 179 if (zfsctl_ops_shares)
178 180 vn_freevnodeops(zfsctl_ops_shares);
179 181
180 182 zfsctl_ops_root = NULL;
181 183 zfsctl_ops_snapdir = NULL;
182 184 zfsctl_ops_snapshot = NULL;
183 185 zfsctl_ops_shares = NULL;
184 186 }
185 187
186 188 boolean_t
187 189 zfsctl_is_node(vnode_t *vp)
188 190 {
189 191 return (vn_matchops(vp, zfsctl_ops_root) ||
190 192 vn_matchops(vp, zfsctl_ops_snapdir) ||
191 193 vn_matchops(vp, zfsctl_ops_snapshot) ||
192 194 vn_matchops(vp, zfsctl_ops_shares));
193 195
194 196 }
195 197
196 198 /*
197 199 * Return the inode number associated with the 'snapshot' or
198 200 * 'shares' directory.
199 201 */
200 202 /* ARGSUSED */
201 203 static ino64_t
202 204 zfsctl_root_inode_cb(vnode_t *vp, int index)
203 205 {
204 206 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
205 207
206 208 ASSERT(index < 2);
207 209
208 210 if (index == 0)
209 211 return (ZFSCTL_INO_SNAPDIR);
210 212
211 213 return (zfsvfs->z_shares_dir);
212 214 }
213 215
214 216 /*
215 217 * Create the '.zfs' directory. This directory is cached as part of the VFS
216 218 * structure. This results in a hold on the vfs_t. The code in zfs_umount()
217 219 * therefore checks against a vfs_count of 2 instead of 1. This reference
218 220 * is removed when the ctldir is destroyed in the unmount.
219 221 */
220 222 void
221 223 zfsctl_create(zfsvfs_t *zfsvfs)
222 224 {
223 225 vnode_t *vp, *rvp;
224 226 zfsctl_node_t *zcp;
225 227 uint64_t crtime[2];
226 228
227 229 ASSERT(zfsvfs->z_ctldir == NULL);
228 230
229 231 vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
230 232 zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
231 233 zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
232 234 zcp = vp->v_data;
233 235 zcp->zc_id = ZFSCTL_INO_ROOT;
234 236
235 237 VERIFY(VFS_ROOT(zfsvfs->z_vfs, &rvp) == 0);
236 238 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
237 239 &crtime, sizeof (crtime)));
238 240 ZFS_TIME_DECODE(&zcp->zc_cmtime, crtime);
239 241 VN_RELE(rvp);
240 242
241 243 /*
242 244 * We're only faking the fact that we have a root of a filesystem for
243 245 * the sake of the GFS interfaces. Undo the flag manipulation it did
244 246 * for us.
245 247 */
246 248 vp->v_flag &= ~(VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT);
247 249
248 250 zfsvfs->z_ctldir = vp;
249 251 }
250 252
251 253 /*
252 254 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
253 255 * There might still be more references if we were force unmounted, but only
254 256 * new zfs_inactive() calls can occur and they don't reference .zfs
255 257 */
256 258 void
257 259 zfsctl_destroy(zfsvfs_t *zfsvfs)
258 260 {
259 261 VN_RELE(zfsvfs->z_ctldir);
260 262 zfsvfs->z_ctldir = NULL;
261 263 }
262 264
263 265 /*
264 266 * Given a root znode, retrieve the associated .zfs directory.
265 267 * Add a hold to the vnode and return it.
266 268 */
267 269 vnode_t *
268 270 zfsctl_root(znode_t *zp)
269 271 {
270 272 ASSERT(zfs_has_ctldir(zp));
271 273 VN_HOLD(zp->z_zfsvfs->z_ctldir);
272 274 return (zp->z_zfsvfs->z_ctldir);
273 275 }
274 276
275 277 /*
276 278 * Common open routine. Disallow any write access.
277 279 */
278 280 /* ARGSUSED */
279 281 static int
280 282 zfsctl_common_open(vnode_t **vpp, int flags, cred_t *cr, caller_context_t *ct)
281 283 {
282 284 if (flags & FWRITE)
283 285 return (SET_ERROR(EACCES));
284 286
285 287 return (0);
286 288 }
287 289
288 290 /*
289 291 * Common close routine. Nothing to do here.
290 292 */
291 293 /* ARGSUSED */
292 294 static int
293 295 zfsctl_common_close(vnode_t *vpp, int flags, int count, offset_t off,
294 296 cred_t *cr, caller_context_t *ct)
295 297 {
296 298 return (0);
297 299 }
298 300
299 301 /*
300 302 * Common access routine. Disallow writes.
301 303 */
302 304 /* ARGSUSED */
303 305 static int
304 306 zfsctl_common_access(vnode_t *vp, int mode, int flags, cred_t *cr,
305 307 caller_context_t *ct)
306 308 {
307 309 if (flags & V_ACE_MASK) {
308 310 if (mode & ACE_ALL_WRITE_PERMS)
309 311 return (SET_ERROR(EACCES));
310 312 } else {
311 313 if (mode & VWRITE)
312 314 return (SET_ERROR(EACCES));
313 315 }
314 316
315 317 return (0);
316 318 }
317 319
318 320 /*
319 321 * Common getattr function. Fill in basic information.
320 322 */
321 323 static void
322 324 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
323 325 {
324 326 timestruc_t now;
325 327
326 328 vap->va_uid = 0;
327 329 vap->va_gid = 0;
328 330 vap->va_rdev = 0;
329 331 /*
330 332 * We are a purely virtual object, so we have no
331 333 * blocksize or allocated blocks.
332 334 */
333 335 vap->va_blksize = 0;
334 336 vap->va_nblocks = 0;
335 337 vap->va_seq = 0;
336 338 vap->va_fsid = vp->v_vfsp->vfs_dev;
337 339 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
338 340 S_IROTH | S_IXOTH;
339 341 vap->va_type = VDIR;
340 342 /*
341 343 * We live in the now (for atime).
342 344 */
343 345 gethrestime(&now);
344 346 vap->va_atime = now;
345 347 }
346 348
347 349 /*ARGSUSED*/
348 350 static int
349 351 zfsctl_common_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
350 352 {
351 353 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
352 354 zfsctl_node_t *zcp = vp->v_data;
353 355 uint64_t object = zcp->zc_id;
354 356 zfid_short_t *zfid;
355 357 int i;
356 358
357 359 ZFS_ENTER(zfsvfs);
358 360
359 361 if (fidp->fid_len < SHORT_FID_LEN) {
360 362 fidp->fid_len = SHORT_FID_LEN;
361 363 ZFS_EXIT(zfsvfs);
362 364 return (SET_ERROR(ENOSPC));
363 365 }
364 366
365 367 zfid = (zfid_short_t *)fidp;
366 368
367 369 zfid->zf_len = SHORT_FID_LEN;
368 370
369 371 for (i = 0; i < sizeof (zfid->zf_object); i++)
370 372 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
371 373
372 374 /* .zfs znodes always have a generation number of 0 */
373 375 for (i = 0; i < sizeof (zfid->zf_gen); i++)
374 376 zfid->zf_gen[i] = 0;
375 377
376 378 ZFS_EXIT(zfsvfs);
377 379 return (0);
378 380 }
379 381
380 382
381 383 /*ARGSUSED*/
382 384 static int
383 385 zfsctl_shares_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
384 386 {
385 387 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
386 388 znode_t *dzp;
387 389 int error;
388 390
389 391 ZFS_ENTER(zfsvfs);
390 392
391 393 if (zfsvfs->z_shares_dir == 0) {
392 394 ZFS_EXIT(zfsvfs);
393 395 return (SET_ERROR(ENOTSUP));
394 396 }
395 397
396 398 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
397 399 error = VOP_FID(ZTOV(dzp), fidp, ct);
398 400 VN_RELE(ZTOV(dzp));
399 401 }
400 402
401 403 ZFS_EXIT(zfsvfs);
402 404 return (error);
403 405 }
404 406 /*
405 407 * .zfs inode namespace
406 408 *
407 409 * We need to generate unique inode numbers for all files and directories
408 410 * within the .zfs pseudo-filesystem. We use the following scheme:
409 411 *
410 412 * ENTRY ZFSCTL_INODE
411 413 * .zfs 1
412 414 * .zfs/snapshot 2
413 415 * .zfs/snapshot/<snap> objectid(snap)
414 416 */
415 417
416 418 #define ZFSCTL_INO_SNAP(id) (id)
417 419
418 420 /*
419 421 * Get root directory attributes.
420 422 */
421 423 /* ARGSUSED */
422 424 static int
423 425 zfsctl_root_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
424 426 caller_context_t *ct)
425 427 {
426 428 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
427 429 zfsctl_node_t *zcp = vp->v_data;
428 430
429 431 ZFS_ENTER(zfsvfs);
430 432 vap->va_nodeid = ZFSCTL_INO_ROOT;
431 433 vap->va_nlink = vap->va_size = NROOT_ENTRIES;
432 434 vap->va_mtime = vap->va_ctime = zcp->zc_cmtime;
433 435
434 436 zfsctl_common_getattr(vp, vap);
435 437 ZFS_EXIT(zfsvfs);
436 438
437 439 return (0);
438 440 }
439 441
440 442 /*
441 443 * Special case the handling of "..".
442 444 */
443 445 /* ARGSUSED */
444 446 int
445 447 zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
446 448 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
447 449 int *direntflags, pathname_t *realpnp)
448 450 {
449 451 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
450 452 int err;
451 453
452 454 /*
453 455 * No extended attributes allowed under .zfs
454 456 */
455 457 if (flags & LOOKUP_XATTR)
456 458 return (SET_ERROR(EINVAL));
457 459
458 460 ZFS_ENTER(zfsvfs);
459 461
460 462 if (strcmp(nm, "..") == 0) {
461 463 err = VFS_ROOT(dvp->v_vfsp, vpp);
462 464 } else {
463 465 err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
464 466 cr, ct, direntflags, realpnp);
465 467 }
466 468
467 469 ZFS_EXIT(zfsvfs);
468 470
469 471 return (err);
470 472 }
471 473
472 474 static int
473 475 zfsctl_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
474 476 caller_context_t *ct)
475 477 {
476 478 /*
477 479 * We only care about ACL_ENABLED so that libsec can
478 480 * display ACL correctly and not default to POSIX draft.
479 481 */
480 482 if (cmd == _PC_ACL_ENABLED) {
481 483 *valp = _ACL_ACE_ENABLED;
482 484 return (0);
483 485 }
484 486
485 487 return (fs_pathconf(vp, cmd, valp, cr, ct));
486 488 }
487 489
488 490 static const fs_operation_def_t zfsctl_tops_root[] = {
489 491 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
490 492 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
491 493 { VOPNAME_IOCTL, { .error = fs_inval } },
492 494 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_root_getattr } },
493 495 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
494 496 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
495 497 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_root_lookup } },
496 498 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
497 499 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } },
498 500 { VOPNAME_PATHCONF, { .vop_pathconf = zfsctl_pathconf } },
499 501 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
500 502 { NULL }
501 503 };
502 504
503 505 /*
504 506 * Gets the full dataset name that corresponds to the given snapshot name
505 507 * Example:
506 508 * zfsctl_snapshot_zname("snap1") -> "mypool/myfs@snap1"
507 509 */
508 510 static int
509 511 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
510 512 {
511 513 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
512 514
513 515 if (zfs_component_namecheck(name, NULL, NULL) != 0)
514 516 return (SET_ERROR(EILSEQ));
515 517 dmu_objset_name(os, zname);
516 518 if (strlen(zname) + 1 + strlen(name) >= len)
517 519 return (SET_ERROR(ENAMETOOLONG));
518 520 (void) strcat(zname, "@");
519 521 (void) strcat(zname, name);
520 522 return (0);
521 523 }
522 524
523 525 static int
524 526 zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
525 527 {
526 528 vnode_t *svp = sep->se_root;
527 529 int error;
528 530
529 531 ASSERT(vn_ismntpt(svp));
530 532
531 533 /* this will be dropped by dounmount() */
532 534 if ((error = vn_vfswlock(svp)) != 0)
533 535 return (error);
534 536
535 537 VN_HOLD(svp);
536 538 error = dounmount(vn_mountedvfs(svp), fflags, cr);
537 539 if (error) {
538 540 VN_RELE(svp);
539 541 return (error);
540 542 }
541 543
542 544 /*
543 545 * We can't use VN_RELE(), as that will try to invoke
544 546 * zfsctl_snapdir_inactive(), which would cause us to destroy
545 547 * the sd_lock mutex held by our caller.
546 548 */
547 549 ASSERT(svp->v_count == 1);
548 550 gfs_vop_inactive(svp, cr, NULL);
549 551
550 552 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
551 553 kmem_free(sep, sizeof (zfs_snapentry_t));
552 554
553 555 return (0);
554 556 }
555 557
556 558 static void
557 559 zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
558 560 {
559 561 avl_index_t where;
560 562 vfs_t *vfsp;
561 563 refstr_t *pathref;
562 564 char newpath[MAXNAMELEN];
563 565 char *tail;
564 566
565 567 ASSERT(MUTEX_HELD(&sdp->sd_lock));
566 568 ASSERT(sep != NULL);
567 569
568 570 vfsp = vn_mountedvfs(sep->se_root);
569 571 ASSERT(vfsp != NULL);
570 572
571 573 vfs_lock_wait(vfsp);
572 574
573 575 /*
574 576 * Change the name in the AVL tree.
575 577 */
576 578 avl_remove(&sdp->sd_snaps, sep);
577 579 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
578 580 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
579 581 (void) strcpy(sep->se_name, nm);
580 582 VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
581 583 avl_insert(&sdp->sd_snaps, sep, where);
582 584
583 585 /*
584 586 * Change the current mountpoint info:
585 587 * - update the tail of the mntpoint path
586 588 * - update the tail of the resource path
587 589 */
588 590 pathref = vfs_getmntpoint(vfsp);
589 591 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
590 592 VERIFY((tail = strrchr(newpath, '/')) != NULL);
591 593 *(tail+1) = '\0';
592 594 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
593 595 (void) strcat(newpath, nm);
594 596 refstr_rele(pathref);
595 597 vfs_setmntpoint(vfsp, newpath, 0);
596 598
597 599 pathref = vfs_getresource(vfsp);
598 600 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
599 601 VERIFY((tail = strrchr(newpath, '@')) != NULL);
600 602 *(tail+1) = '\0';
601 603 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
602 604 (void) strcat(newpath, nm);
603 605 refstr_rele(pathref);
604 606 vfs_setresource(vfsp, newpath, 0);
605 607
606 608 vfs_unlock(vfsp);
607 609 }
608 610
609 611 /*ARGSUSED*/
610 612 static int
611 613 zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
612 614 cred_t *cr, caller_context_t *ct, int flags)
613 615 {
614 616 zfsctl_snapdir_t *sdp = sdvp->v_data;
615 617 zfs_snapentry_t search, *sep;
616 618 zfsvfs_t *zfsvfs;
617 619 avl_index_t where;
618 620 char from[ZFS_MAX_DATASET_NAME_LEN], to[ZFS_MAX_DATASET_NAME_LEN];
619 621 char real[ZFS_MAX_DATASET_NAME_LEN], fsname[ZFS_MAX_DATASET_NAME_LEN];
620 622 int err;
621 623
622 624 zfsvfs = sdvp->v_vfsp->vfs_data;
623 625 ZFS_ENTER(zfsvfs);
624 626
625 627 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
626 628 err = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
627 629 sizeof (real), NULL);
628 630 if (err == 0) {
629 631 snm = real;
630 632 } else if (err != ENOTSUP) {
631 633 ZFS_EXIT(zfsvfs);
632 634 return (err);
633 635 }
634 636 }
635 637
636 638 ZFS_EXIT(zfsvfs);
637 639
638 640 dmu_objset_name(zfsvfs->z_os, fsname);
639 641
640 642 err = zfsctl_snapshot_zname(sdvp, snm, sizeof (from), from);
641 643 if (err == 0)
642 644 err = zfsctl_snapshot_zname(tdvp, tnm, sizeof (to), to);
643 645 if (err == 0)
644 646 err = zfs_secpolicy_rename_perms(from, to, cr);
645 647 if (err != 0)
646 648 return (err);
647 649
648 650 /*
649 651 * Cannot move snapshots out of the snapdir.
650 652 */
651 653 if (sdvp != tdvp)
652 654 return (SET_ERROR(EINVAL));
653 655
654 656 if (strcmp(snm, tnm) == 0)
655 657 return (0);
656 658
657 659 mutex_enter(&sdp->sd_lock);
658 660
659 661 search.se_name = (char *)snm;
660 662 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
661 663 mutex_exit(&sdp->sd_lock);
662 664 return (SET_ERROR(ENOENT));
663 665 }
664 666
665 667 err = dsl_dataset_rename_snapshot(fsname, snm, tnm, B_FALSE);
666 668 if (err == 0)
667 669 zfsctl_rename_snap(sdp, sep, tnm);
668 670
669 671 mutex_exit(&sdp->sd_lock);
670 672
671 673 return (err);
672 674 }
673 675
674 676 /* ARGSUSED */
675 677 static int
676 678 zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
677 679 caller_context_t *ct, int flags)
678 680 {
679 681 zfsctl_snapdir_t *sdp = dvp->v_data;
680 682 zfs_snapentry_t *sep;
681 683 zfs_snapentry_t search;
682 684 zfsvfs_t *zfsvfs;
683 685 char snapname[ZFS_MAX_DATASET_NAME_LEN];
684 686 char real[ZFS_MAX_DATASET_NAME_LEN];
685 687 int err;
686 688
687 689 zfsvfs = dvp->v_vfsp->vfs_data;
688 690 ZFS_ENTER(zfsvfs);
689 691
690 692 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
691 693
692 694 err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
693 695 sizeof (real), NULL);
694 696 if (err == 0) {
695 697 name = real;
696 698 } else if (err != ENOTSUP) {
697 699 ZFS_EXIT(zfsvfs);
698 700 return (err);
699 701 }
700 702 }
701 703
702 704 ZFS_EXIT(zfsvfs);
703 705
704 706 err = zfsctl_snapshot_zname(dvp, name, sizeof (snapname), snapname);
705 707 if (err == 0)
706 708 err = zfs_secpolicy_destroy_perms(snapname, cr);
707 709 if (err != 0)
708 710 return (err);
709 711
710 712 mutex_enter(&sdp->sd_lock);
711 713
712 714 search.se_name = name;
713 715 sep = avl_find(&sdp->sd_snaps, &search, NULL);
714 716 if (sep) {
715 717 avl_remove(&sdp->sd_snaps, sep);
716 718 err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
717 719 if (err != 0)
718 720 avl_add(&sdp->sd_snaps, sep);
719 721 else
720 722 err = dsl_destroy_snapshot(snapname, B_FALSE);
721 723 } else {
722 724 err = SET_ERROR(ENOENT);
723 725 }
724 726
725 727 mutex_exit(&sdp->sd_lock);
726 728
727 729 return (err);
728 730 }
729 731
730 732 /*
731 733 * This creates a snapshot under '.zfs/snapshot'.
732 734 */
733 735 /* ARGSUSED */
|
↓ open down ↓ |
644 lines elided |
↑ open up ↑ |
734 736 static int
735 737 zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp,
736 738 cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp)
737 739 {
738 740 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
739 741 char name[ZFS_MAX_DATASET_NAME_LEN];
740 742 int err;
741 743 static enum symfollow follow = NO_FOLLOW;
742 744 static enum uio_seg seg = UIO_SYSSPACE;
743 745
744 - if (zfs_component_namecheck(dirname, NULL, NULL) != 0)
746 + if (zfs_component_namecheck(dirname, NULL, NULL) != 0 ||
747 + autosnap_check_name(dirname))
745 748 return (SET_ERROR(EILSEQ));
746 749
747 750 dmu_objset_name(zfsvfs->z_os, name);
748 751
749 752 *vpp = NULL;
750 753
751 754 err = zfs_secpolicy_snapshot_perms(name, cr);
752 755 if (err != 0)
753 756 return (err);
754 757
755 758 if (err == 0) {
756 759 err = dmu_objset_snapshot_one(name, dirname);
757 760 if (err != 0)
758 761 return (err);
759 762 err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp);
760 763 }
761 764
762 765 return (err);
763 766 }
764 767
765 768 /*
766 769 * Lookup entry point for the 'snapshot' directory. Try to open the
767 770 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
768 771 * Perform a mount of the associated dataset on top of the vnode.
769 772 */
770 773 /* ARGSUSED */
771 774 static int
772 775 zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
773 776 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
774 777 int *direntflags, pathname_t *realpnp)
775 778 {
776 779 zfsctl_snapdir_t *sdp = dvp->v_data;
777 780 objset_t *snap;
778 781 char snapname[ZFS_MAX_DATASET_NAME_LEN];
779 782 char real[ZFS_MAX_DATASET_NAME_LEN];
780 783 char *mountpoint;
781 784 zfs_snapentry_t *sep, search;
782 785 struct mounta margs;
783 786 vfs_t *vfsp;
784 787 size_t mountpoint_len;
785 788 avl_index_t where;
786 789 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
787 790 int err;
788 791
789 792 /*
790 793 * No extended attributes allowed under .zfs
791 794 */
792 795 if (flags & LOOKUP_XATTR)
793 796 return (SET_ERROR(EINVAL));
794 797
795 798 ASSERT(dvp->v_type == VDIR);
796 799
797 800 /*
798 801 * If we get a recursive call, that means we got called
799 802 * from the domount() code while it was trying to look up the
800 803 * spec (which looks like a local path for zfs). We need to
801 804 * add some flag to domount() to tell it not to do this lookup.
802 805 */
803 806 if (MUTEX_HELD(&sdp->sd_lock))
804 807 return (SET_ERROR(ENOENT));
805 808
806 809 ZFS_ENTER(zfsvfs);
807 810
808 811 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) {
809 812 ZFS_EXIT(zfsvfs);
810 813 return (0);
811 814 }
812 815
813 816 if (flags & FIGNORECASE) {
814 817 boolean_t conflict = B_FALSE;
815 818
816 819 err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
817 820 sizeof (real), &conflict);
818 821 if (err == 0) {
819 822 nm = real;
820 823 } else if (err != ENOTSUP) {
821 824 ZFS_EXIT(zfsvfs);
822 825 return (err);
823 826 }
824 827 if (realpnp)
825 828 (void) strlcpy(realpnp->pn_buf, nm,
826 829 realpnp->pn_bufsize);
827 830 if (conflict && direntflags)
828 831 *direntflags = ED_CASE_CONFLICT;
829 832 }
830 833
831 834 mutex_enter(&sdp->sd_lock);
832 835 search.se_name = (char *)nm;
833 836 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
834 837 *vpp = sep->se_root;
835 838 VN_HOLD(*vpp);
836 839 err = traverse(vpp);
837 840 if (err != 0) {
838 841 VN_RELE(*vpp);
839 842 *vpp = NULL;
840 843 } else if (*vpp == sep->se_root) {
841 844 /*
842 845 * The snapshot was unmounted behind our backs,
843 846 * try to remount it.
844 847 */
845 848 goto domount;
846 849 } else {
847 850 /*
848 851 * VROOT was set during the traverse call. We need
849 852 * to clear it since we're pretending to be part
850 853 * of our parent's vfs.
851 854 */
852 855 (*vpp)->v_flag &= ~VROOT;
853 856 }
854 857 mutex_exit(&sdp->sd_lock);
855 858 ZFS_EXIT(zfsvfs);
856 859 return (err);
857 860 }
858 861
859 862 /*
860 863 * The requested snapshot is not currently mounted, look it up.
861 864 */
862 865 err = zfsctl_snapshot_zname(dvp, nm, sizeof (snapname), snapname);
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
863 866 if (err != 0) {
864 867 mutex_exit(&sdp->sd_lock);
865 868 ZFS_EXIT(zfsvfs);
866 869 /*
867 870 * handle "ls *" or "?" in a graceful manner,
868 871 * forcing EILSEQ to ENOENT.
869 872 * Since shell ultimately passes "*" or "?" as name to lookup
870 873 */
871 874 return (err == EILSEQ ? ENOENT : err);
872 875 }
876 +
877 + if (autosnap_check_name(strchr(snapname, '@'))) {
878 + mutex_exit(&sdp->sd_lock);
879 + ZFS_EXIT(zfsvfs);
880 + return (SET_ERROR(ENOENT));
881 + }
882 +
873 883 if (dmu_objset_hold(snapname, FTAG, &snap) != 0) {
874 884 mutex_exit(&sdp->sd_lock);
875 885 ZFS_EXIT(zfsvfs);
876 886 return (SET_ERROR(ENOENT));
877 887 }
878 888
879 889 sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
880 890 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
881 891 (void) strcpy(sep->se_name, nm);
882 892 *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
883 893 avl_insert(&sdp->sd_snaps, sep, where);
884 894
885 895 dmu_objset_rele(snap, FTAG);
886 896 domount:
887 897 mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) +
888 898 strlen("/.zfs/snapshot/") + strlen(nm) + 1;
889 899 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
890 900 (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
891 901 refstr_value(dvp->v_vfsp->vfs_mntpt), nm);
892 902
893 903 margs.spec = snapname;
894 904 margs.dir = mountpoint;
895 905 margs.flags = MS_SYSSPACE | MS_NOMNTTAB;
896 906 margs.fstype = "zfs";
897 907 margs.dataptr = NULL;
898 908 margs.datalen = 0;
899 909 margs.optptr = NULL;
900 910 margs.optlen = 0;
901 911
902 912 err = domount("zfs", &margs, *vpp, kcred, &vfsp);
903 913 kmem_free(mountpoint, mountpoint_len);
904 914
905 915 if (err == 0) {
906 916 /*
907 917 * Return the mounted root rather than the covered mount point.
908 918 * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns
909 919 * the ZFS vnode mounted on top of the GFS node. This ZFS
910 920 * vnode is the root of the newly created vfsp.
911 921 */
912 922 VFS_RELE(vfsp);
913 923 err = traverse(vpp);
914 924 }
915 925
916 926 if (err == 0) {
917 927 /*
918 928 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
919 929 *
920 930 * This is where we lie about our v_vfsp in order to
921 931 * make .zfs/snapshot/<snapname> accessible over NFS
922 932 * without requiring manual mounts of <snapname>.
923 933 */
924 934 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
925 935 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
926 936 (*vpp)->v_vfsp = zfsvfs->z_vfs;
927 937 (*vpp)->v_flag &= ~VROOT;
928 938 }
929 939 mutex_exit(&sdp->sd_lock);
930 940 ZFS_EXIT(zfsvfs);
931 941
932 942 /*
933 943 * If we had an error, drop our hold on the vnode and
934 944 * zfsctl_snapshot_inactive() will clean up.
935 945 */
936 946 if (err != 0) {
937 947 VN_RELE(*vpp);
938 948 *vpp = NULL;
939 949 }
940 950 return (err);
941 951 }
942 952
943 953 /* ARGSUSED */
944 954 static int
945 955 zfsctl_shares_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
946 956 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
947 957 int *direntflags, pathname_t *realpnp)
948 958 {
949 959 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
950 960 znode_t *dzp;
951 961 int error;
952 962
953 963 ZFS_ENTER(zfsvfs);
954 964
955 965 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) {
956 966 ZFS_EXIT(zfsvfs);
957 967 return (0);
958 968 }
959 969
960 970 if (zfsvfs->z_shares_dir == 0) {
961 971 ZFS_EXIT(zfsvfs);
962 972 return (SET_ERROR(ENOTSUP));
963 973 }
964 974 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
965 975 error = VOP_LOOKUP(ZTOV(dzp), nm, vpp, pnp,
966 976 flags, rdir, cr, ct, direntflags, realpnp);
967 977 VN_RELE(ZTOV(dzp));
968 978 }
969 979
970 980 ZFS_EXIT(zfsvfs);
971 981
972 982 return (error);
973 983 }
974 984
975 985 /* ARGSUSED */
976 986 static int
977 987 zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp,
978 988 offset_t *offp, offset_t *nextp, void *data, int flags)
979 989 {
|
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
980 990 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
981 991 char snapname[ZFS_MAX_DATASET_NAME_LEN];
982 992 uint64_t id, cookie;
983 993 boolean_t case_conflict;
984 994 int error;
985 995
986 996 ZFS_ENTER(zfsvfs);
987 997
988 998 cookie = *offp;
989 999 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
990 - error = dmu_snapshot_list_next(zfsvfs->z_os,
991 - sizeof (snapname), snapname, &id, &cookie, &case_conflict);
1000 + do {
1001 + error = dmu_snapshot_list_next(zfsvfs->z_os,
1002 + sizeof (snapname), snapname, &id, &cookie, &case_conflict);
1003 + } while (error == 0 && autosnap_check_name(snapname));
992 1004 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
993 1005 if (error) {
994 1006 ZFS_EXIT(zfsvfs);
995 1007 if (error == ENOENT) {
996 1008 *eofp = 1;
997 1009 return (0);
998 1010 }
999 1011 return (error);
1000 1012 }
1001 1013
1002 1014 if (flags & V_RDDIR_ENTFLAGS) {
1003 1015 edirent_t *eodp = dp;
1004 1016
1005 1017 (void) strcpy(eodp->ed_name, snapname);
1006 1018 eodp->ed_ino = ZFSCTL_INO_SNAP(id);
1007 1019 eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0;
1008 1020 } else {
1009 1021 struct dirent64 *odp = dp;
1010 1022
1011 1023 (void) strcpy(odp->d_name, snapname);
1012 1024 odp->d_ino = ZFSCTL_INO_SNAP(id);
1013 1025 }
1014 1026 *nextp = cookie;
1015 1027
1016 1028 ZFS_EXIT(zfsvfs);
1017 1029
1018 1030 return (0);
1019 1031 }
1020 1032
1021 1033 /* ARGSUSED */
1022 1034 static int
1023 1035 zfsctl_shares_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp,
1024 1036 caller_context_t *ct, int flags)
1025 1037 {
1026 1038 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1027 1039 znode_t *dzp;
1028 1040 int error;
1029 1041
1030 1042 ZFS_ENTER(zfsvfs);
1031 1043
1032 1044 if (zfsvfs->z_shares_dir == 0) {
1033 1045 ZFS_EXIT(zfsvfs);
1034 1046 return (SET_ERROR(ENOTSUP));
1035 1047 }
1036 1048 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
1037 1049 error = VOP_READDIR(ZTOV(dzp), uiop, cr, eofp, ct, flags);
1038 1050 VN_RELE(ZTOV(dzp));
1039 1051 } else {
1040 1052 *eofp = 1;
1041 1053 error = SET_ERROR(ENOENT);
1042 1054 }
1043 1055
1044 1056 ZFS_EXIT(zfsvfs);
1045 1057 return (error);
1046 1058 }
1047 1059
1048 1060 /*
1049 1061 * pvp is the '.zfs' directory (zfsctl_node_t).
1050 1062 *
1051 1063 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
1052 1064 *
1053 1065 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
1054 1066 * when a lookup is performed on .zfs for "snapshot".
1055 1067 */
1056 1068 vnode_t *
1057 1069 zfsctl_mknode_snapdir(vnode_t *pvp)
1058 1070 {
1059 1071 vnode_t *vp;
1060 1072 zfsctl_snapdir_t *sdp;
1061 1073
1062 1074 vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp,
1063 1075 zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN,
1064 1076 zfsctl_snapdir_readdir_cb, NULL);
1065 1077 sdp = vp->v_data;
1066 1078 sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR;
1067 1079 sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
1068 1080 mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
1069 1081 avl_create(&sdp->sd_snaps, snapentry_compare,
1070 1082 sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
1071 1083 return (vp);
1072 1084 }
1073 1085
1074 1086 vnode_t *
1075 1087 zfsctl_mknode_shares(vnode_t *pvp)
1076 1088 {
1077 1089 vnode_t *vp;
1078 1090 zfsctl_node_t *sdp;
1079 1091
1080 1092 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp,
1081 1093 zfsctl_ops_shares, NULL, NULL, MAXNAMELEN,
1082 1094 NULL, NULL);
1083 1095 sdp = vp->v_data;
1084 1096 sdp->zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
1085 1097 return (vp);
1086 1098
1087 1099 }
1088 1100
1089 1101 /* ARGSUSED */
1090 1102 static int
1091 1103 zfsctl_shares_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1092 1104 caller_context_t *ct)
1093 1105 {
1094 1106 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1095 1107 znode_t *dzp;
1096 1108 int error;
1097 1109
1098 1110 ZFS_ENTER(zfsvfs);
1099 1111 if (zfsvfs->z_shares_dir == 0) {
1100 1112 ZFS_EXIT(zfsvfs);
1101 1113 return (SET_ERROR(ENOTSUP));
1102 1114 }
1103 1115 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
1104 1116 error = VOP_GETATTR(ZTOV(dzp), vap, flags, cr, ct);
1105 1117 VN_RELE(ZTOV(dzp));
1106 1118 }
1107 1119 ZFS_EXIT(zfsvfs);
1108 1120 return (error);
1109 1121
1110 1122
1111 1123 }
1112 1124
1113 1125 /* ARGSUSED */
1114 1126 static int
1115 1127 zfsctl_snapdir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1116 1128 caller_context_t *ct)
1117 1129 {
1118 1130 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1119 1131 zfsctl_snapdir_t *sdp = vp->v_data;
1120 1132
1121 1133 ZFS_ENTER(zfsvfs);
1122 1134 zfsctl_common_getattr(vp, vap);
1123 1135 vap->va_nodeid = gfs_file_inode(vp);
1124 1136 vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2;
1125 1137 vap->va_ctime = vap->va_mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
1126 1138 ZFS_EXIT(zfsvfs);
1127 1139
1128 1140 return (0);
1129 1141 }
1130 1142
1131 1143 /* ARGSUSED */
1132 1144 static void
1133 1145 zfsctl_snapdir_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1134 1146 {
1135 1147 zfsctl_snapdir_t *sdp = vp->v_data;
1136 1148 void *private;
1137 1149
1138 1150 private = gfs_dir_inactive(vp);
1139 1151 if (private != NULL) {
1140 1152 ASSERT(avl_numnodes(&sdp->sd_snaps) == 0);
1141 1153 mutex_destroy(&sdp->sd_lock);
1142 1154 avl_destroy(&sdp->sd_snaps);
1143 1155 kmem_free(private, sizeof (zfsctl_snapdir_t));
1144 1156 }
1145 1157 }
1146 1158
1147 1159 static const fs_operation_def_t zfsctl_tops_snapdir[] = {
1148 1160 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
1149 1161 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
1150 1162 { VOPNAME_IOCTL, { .error = fs_inval } },
1151 1163 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_snapdir_getattr } },
1152 1164 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
1153 1165 { VOPNAME_RENAME, { .vop_rename = zfsctl_snapdir_rename } },
1154 1166 { VOPNAME_RMDIR, { .vop_rmdir = zfsctl_snapdir_remove } },
1155 1167 { VOPNAME_MKDIR, { .vop_mkdir = zfsctl_snapdir_mkdir } },
1156 1168 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
1157 1169 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_snapdir_lookup } },
1158 1170 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
1159 1171 { VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapdir_inactive } },
1160 1172 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
1161 1173 { NULL }
1162 1174 };
1163 1175
1164 1176 static const fs_operation_def_t zfsctl_tops_shares[] = {
1165 1177 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
1166 1178 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
1167 1179 { VOPNAME_IOCTL, { .error = fs_inval } },
1168 1180 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_shares_getattr } },
1169 1181 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
1170 1182 { VOPNAME_READDIR, { .vop_readdir = zfsctl_shares_readdir } },
1171 1183 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_shares_lookup } },
1172 1184 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
1173 1185 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } },
1174 1186 { VOPNAME_FID, { .vop_fid = zfsctl_shares_fid } },
1175 1187 { NULL }
1176 1188 };
1177 1189
1178 1190 /*
1179 1191 * pvp is the GFS vnode '.zfs/snapshot'.
1180 1192 *
1181 1193 * This creates a GFS node under '.zfs/snapshot' representing each
1182 1194 * snapshot. This newly created GFS node is what we mount snapshot
1183 1195 * vfs_t's ontop of.
1184 1196 */
1185 1197 static vnode_t *
1186 1198 zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset)
1187 1199 {
1188 1200 vnode_t *vp;
1189 1201 zfsctl_node_t *zcp;
1190 1202
1191 1203 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp,
1192 1204 zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL);
1193 1205 zcp = vp->v_data;
1194 1206 zcp->zc_id = objset;
1195 1207
1196 1208 return (vp);
1197 1209 }
1198 1210
1199 1211 static void
1200 1212 zfsctl_snapshot_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1201 1213 {
1202 1214 zfsctl_snapdir_t *sdp;
1203 1215 zfs_snapentry_t *sep, *next;
1204 1216 vnode_t *dvp;
1205 1217
1206 1218 VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0);
1207 1219 sdp = dvp->v_data;
1208 1220
1209 1221 mutex_enter(&sdp->sd_lock);
1210 1222
1211 1223 mutex_enter(&vp->v_lock);
1212 1224 if (vp->v_count > 1) {
1213 1225 VN_RELE_LOCKED(vp);
1214 1226 mutex_exit(&vp->v_lock);
1215 1227 mutex_exit(&sdp->sd_lock);
1216 1228 VN_RELE(dvp);
1217 1229 return;
1218 1230 }
1219 1231 mutex_exit(&vp->v_lock);
1220 1232 ASSERT(!vn_ismntpt(vp));
1221 1233
1222 1234 sep = avl_first(&sdp->sd_snaps);
1223 1235 while (sep != NULL) {
1224 1236 next = AVL_NEXT(&sdp->sd_snaps, sep);
1225 1237
1226 1238 if (sep->se_root == vp) {
1227 1239 avl_remove(&sdp->sd_snaps, sep);
1228 1240 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1229 1241 kmem_free(sep, sizeof (zfs_snapentry_t));
1230 1242 break;
1231 1243 }
1232 1244 sep = next;
1233 1245 }
1234 1246 ASSERT(sep != NULL);
1235 1247
1236 1248 mutex_exit(&sdp->sd_lock);
1237 1249 VN_RELE(dvp);
1238 1250
1239 1251 /*
1240 1252 * Dispose of the vnode for the snapshot mount point.
1241 1253 * This is safe to do because once this entry has been removed
1242 1254 * from the AVL tree, it can't be found again, so cannot become
1243 1255 * "active". If we lookup the same name again we will end up
1244 1256 * creating a new vnode.
1245 1257 */
1246 1258 gfs_vop_inactive(vp, cr, ct);
1247 1259 }
1248 1260
1249 1261
1250 1262 /*
1251 1263 * These VP's should never see the light of day. They should always
1252 1264 * be covered.
1253 1265 */
1254 1266 static const fs_operation_def_t zfsctl_tops_snapshot[] = {
1255 1267 VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapshot_inactive },
1256 1268 NULL, NULL
1257 1269 };
1258 1270
1259 1271 int
1260 1272 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1261 1273 {
1262 1274 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1263 1275 vnode_t *dvp, *vp;
1264 1276 zfsctl_snapdir_t *sdp;
1265 1277 zfsctl_node_t *zcp;
1266 1278 zfs_snapentry_t *sep;
1267 1279 int error;
1268 1280
1269 1281 ASSERT(zfsvfs->z_ctldir != NULL);
1270 1282 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1271 1283 NULL, 0, NULL, kcred, NULL, NULL, NULL);
1272 1284 if (error != 0)
1273 1285 return (error);
1274 1286 sdp = dvp->v_data;
1275 1287
1276 1288 mutex_enter(&sdp->sd_lock);
1277 1289 sep = avl_first(&sdp->sd_snaps);
1278 1290 while (sep != NULL) {
1279 1291 vp = sep->se_root;
1280 1292 zcp = vp->v_data;
1281 1293 if (zcp->zc_id == objsetid)
1282 1294 break;
1283 1295
1284 1296 sep = AVL_NEXT(&sdp->sd_snaps, sep);
1285 1297 }
1286 1298
1287 1299 if (sep != NULL) {
1288 1300 VN_HOLD(vp);
1289 1301 /*
1290 1302 * Return the mounted root rather than the covered mount point.
1291 1303 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1292 1304 * and returns the ZFS vnode mounted on top of the GFS node.
1293 1305 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1294 1306 */
1295 1307 error = traverse(&vp);
1296 1308 if (error == 0) {
1297 1309 if (vp == sep->se_root)
1298 1310 error = SET_ERROR(EINVAL);
1299 1311 else
1300 1312 *zfsvfsp = VTOZ(vp)->z_zfsvfs;
1301 1313 }
1302 1314 mutex_exit(&sdp->sd_lock);
1303 1315 VN_RELE(vp);
1304 1316 } else {
1305 1317 error = SET_ERROR(EINVAL);
1306 1318 mutex_exit(&sdp->sd_lock);
1307 1319 }
1308 1320
1309 1321 VN_RELE(dvp);
1310 1322
1311 1323 return (error);
1312 1324 }
1313 1325
1314 1326 /*
1315 1327 * Unmount any snapshots for the given filesystem. This is called from
1316 1328 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1317 1329 * snapshots.
1318 1330 */
1319 1331 int
1320 1332 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1321 1333 {
1322 1334 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1323 1335 vnode_t *dvp;
1324 1336 zfsctl_snapdir_t *sdp;
1325 1337 zfs_snapentry_t *sep, *next;
1326 1338 int error;
1327 1339
1328 1340 ASSERT(zfsvfs->z_ctldir != NULL);
1329 1341 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1330 1342 NULL, 0, NULL, cr, NULL, NULL, NULL);
1331 1343 if (error != 0)
1332 1344 return (error);
1333 1345 sdp = dvp->v_data;
1334 1346
1335 1347 mutex_enter(&sdp->sd_lock);
1336 1348
1337 1349 sep = avl_first(&sdp->sd_snaps);
1338 1350 while (sep != NULL) {
1339 1351 next = AVL_NEXT(&sdp->sd_snaps, sep);
1340 1352
1341 1353 /*
1342 1354 * If this snapshot is not mounted, then it must
1343 1355 * have just been unmounted by somebody else, and
1344 1356 * will be cleaned up by zfsctl_snapdir_inactive().
1345 1357 */
1346 1358 if (vn_ismntpt(sep->se_root)) {
1347 1359 avl_remove(&sdp->sd_snaps, sep);
1348 1360 error = zfsctl_unmount_snap(sep, fflags, cr);
1349 1361 if (error) {
1350 1362 avl_add(&sdp->sd_snaps, sep);
1351 1363 break;
1352 1364 }
1353 1365 }
1354 1366 sep = next;
1355 1367 }
1356 1368
1357 1369 mutex_exit(&sdp->sd_lock);
1358 1370 VN_RELE(dvp);
1359 1371
1360 1372 return (error);
1361 1373 }
|
↓ open down ↓ |
360 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX