Print this page
NEX-14666 Need to provide SMB 2.1 Client
NEX-17187 panic in smbfs_acl_store
NEX-17231 smbfs create xattr files finds wrong file
NEX-17224 smbfs lookup EINVAL should be ENOENT
NEX-17260 SMB1 client fails to list directory after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
and: (cleanup)
NEX-16818 Add fksmbcl development tool
NEX-17264 SMB client test tp_smbutil_013 fails after NEX-14666
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
and: (fix ref leaks)
NEX-16783 Panic in smbfs_delmap_callback (fix leak)
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
NEX-16783 Panic in smbfs_delmap_callback
Reviewed by: Jean McCormack <jean.mccormack@nexenta.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
5404 smbfs needs mmap support
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Reviewed by: C Fraire <cfraire@me.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Jason King <jason.brian.king@gmail.com>
Reviewed by: Andrew Stormont <andyjstormont@gmail.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
2552 smbfs: add support for NFS-like remove
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: C Fraire <cfraire@me.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_vnops.c
+++ new/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_vnops.c
1 1 /*
2 2 * Copyright (c) 2000-2001 Boris Popov
3 3 * All rights reserved.
4 4 *
5 5 * Redistribution and use in source and binary forms, with or without
6 6 * modification, are permitted provided that the following conditions
7 7 * are met:
8 8 * 1. Redistributions of source code must retain the above copyright
9 9 * notice, this list of conditions and the following disclaimer.
10 10 * 2. Redistributions in binary form must reproduce the above copyright
11 11 * notice, this list of conditions and the following disclaimer in the
12 12 * documentation and/or other materials provided with the distribution.
13 13 * 3. All advertising materials mentioning features or use of this software
14 14 * must display the following acknowledgement:
15 15 * This product includes software developed by Boris Popov.
16 16 * 4. Neither the name of the author nor the names of any co-contributors
17 17 * may be used to endorse or promote products derived from this software
18 18 * without specific prior written permission.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
27 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 30 * SUCH DAMAGE.
31 31 *
32 32 * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $
33 33 */
34 34
35 35 /*
36 36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
37 38 */
38 39
40 +/*
41 + * Vnode operations
42 + *
43 + * This file is similar to nfs3_vnops.c
44 + */
45 +
46 +#include <sys/param.h>
39 47 #include <sys/systm.h>
40 48 #include <sys/cred.h>
41 49 #include <sys/vnode.h>
42 50 #include <sys/vfs.h>
43 51 #include <sys/filio.h>
44 52 #include <sys/uio.h>
45 53 #include <sys/dirent.h>
46 54 #include <sys/errno.h>
47 55 #include <sys/sunddi.h>
48 56 #include <sys/sysmacros.h>
49 57 #include <sys/kmem.h>
50 58 #include <sys/cmn_err.h>
51 59 #include <sys/vfs_opreg.h>
52 60 #include <sys/policy.h>
61 +#include <sys/sdt.h>
62 +#include <sys/taskq_impl.h>
63 +#include <sys/zone.h>
53 64
65 +#ifdef _KERNEL
66 +#include <sys/vmsystm.h> // for desfree
67 +#include <vm/hat.h>
68 +#include <vm/as.h>
69 +#include <vm/page.h>
70 +#include <vm/pvn.h>
71 +#include <vm/seg.h>
72 +#include <vm/seg_map.h>
73 +#include <vm/seg_kpm.h>
74 +#include <vm/seg_vn.h>
75 +#endif // _KERNEL
76 +
54 77 #include <netsmb/smb_osdep.h>
55 78 #include <netsmb/smb.h>
56 79 #include <netsmb/smb_conn.h>
57 80 #include <netsmb/smb_subr.h>
58 81
59 82 #include <smbfs/smbfs.h>
60 83 #include <smbfs/smbfs_node.h>
61 84 #include <smbfs/smbfs_subr.h>
62 85
63 86 #include <sys/fs/smbfs_ioctl.h>
64 87 #include <fs/fs_subr.h>
65 88
89 +#ifndef MAXOFF32_T
90 +#define MAXOFF32_T 0x7fffffff
91 +#endif
92 +
66 93 /*
67 94 * We assign directory offsets like the NFS client, where the
68 95 * offset increments by _one_ after each directory entry.
69 96 * Further, the entries "." and ".." are always at offsets
70 97 * zero and one (respectively) and the "real" entries from
71 98 * the server appear at offsets starting with two. This
72 99 * macro is used to initialize the n_dirofs field after
73 100 * setting n_dirseq with a _findopen call.
74 101 */
75 102 #define FIRST_DIROFS 2
76 103
77 104 /*
78 105 * These characters are illegal in NTFS file names.
79 106 * ref: http://support.microsoft.com/kb/147438
80 107 *
81 108 * Careful! The check in the XATTR case skips the
82 109 * first character to allow colon in XATTR names.
83 110 */
84 111 static const char illegal_chars[] = {
85 112 ':', /* colon - keep this first! */
86 113 '\\', /* back slash */
87 114 '/', /* slash */
88 115 '*', /* asterisk */
89 116 '?', /* question mark */
90 117 '"', /* double quote */
91 118 '<', /* less than sign */
92 119 '>', /* greater than sign */
93 120 '|', /* vertical bar */
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
94 121 0
95 122 };
96 123
97 124 /*
98 125 * Turning this on causes nodes to be created in the cache
99 126 * during directory listings, normally avoiding a second
100 127 * OtW attribute fetch just after a readdir.
101 128 */
102 129 int smbfs_fastlookup = 1;
103 130
131 +struct vnodeops *smbfs_vnodeops = NULL;
132 +
104 133 /* local static function defines */
105 134
106 135 static int smbfslookup_cache(vnode_t *, char *, int, vnode_t **,
107 136 cred_t *);
108 137 static int smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
109 138 int cache_ok, caller_context_t *);
110 -static int smbfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm,
111 - cred_t *cr, caller_context_t *);
139 +static int smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
140 + int flags);
141 +static int smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp,
142 + char *nnm, struct smb_cred *scred, int flags);
112 143 static int smbfssetattr(vnode_t *, struct vattr *, int, cred_t *);
113 144 static int smbfs_accessx(void *, int, cred_t *);
114 145 static int smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
115 146 caller_context_t *);
147 +static int smbfsflush(smbnode_t *, struct smb_cred *);
116 148 static void smbfs_rele_fid(smbnode_t *, struct smb_cred *);
117 149 static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *);
118 150
151 +static int smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
152 +
153 +static int smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
154 + caller_context_t *);
155 +#ifdef _KERNEL
156 +static int smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
157 + page_t *[], size_t, struct seg *, caddr_t,
158 + enum seg_rw, cred_t *);
159 +static int smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *,
160 + int, cred_t *);
161 +static void smbfs_delmap_async(void *);
162 +
163 +static int smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
164 + cred_t *);
165 +static int smbfs_bio(struct buf *, int, cred_t *);
166 +static int smbfs_writenp(smbnode_t *np, caddr_t base, int tcount,
167 + struct uio *uiop, int pgcreated);
168 +#endif // _KERNEL
169 +
119 170 /*
171 + * Error flags used to pass information about certain special errors
172 + * which need to be handled specially.
173 + */
174 +#define SMBFS_EOF -98
175 +
176 +/* When implementing OtW locks, make this a real function. */
177 +#define smbfs_lm_has_sleep(vp) 0
178 +
179 +/*
120 180 * These are the vnode ops routines which implement the vnode interface to
121 181 * the networked file system. These routines just take their parameters,
122 182 * make them look networkish by putting the right info into interface structs,
123 183 * and then calling the appropriate remote routine(s) to do the work.
124 184 *
125 185 * Note on directory name lookup cacheing: If we detect a stale fhandle,
126 186 * we purge the directory cache relative to that vnode. This way, the
127 187 * user won't get burned by the cache repeatedly. See <smbfs/smbnode.h> for
128 188 * more details on smbnode locking.
129 189 */
130 190
131 -static int smbfs_open(vnode_t **, int, cred_t *, caller_context_t *);
132 -static int smbfs_close(vnode_t *, int, int, offset_t, cred_t *,
133 - caller_context_t *);
134 -static int smbfs_read(vnode_t *, struct uio *, int, cred_t *,
135 - caller_context_t *);
136 -static int smbfs_write(vnode_t *, struct uio *, int, cred_t *,
137 - caller_context_t *);
138 -static int smbfs_ioctl(vnode_t *, int, intptr_t, int, cred_t *, int *,
139 - caller_context_t *);
140 -static int smbfs_getattr(vnode_t *, struct vattr *, int, cred_t *,
141 - caller_context_t *);
142 -static int smbfs_setattr(vnode_t *, struct vattr *, int, cred_t *,
143 - caller_context_t *);
144 -static int smbfs_access(vnode_t *, int, int, cred_t *, caller_context_t *);
145 -static int smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
146 -static void smbfs_inactive(vnode_t *, cred_t *, caller_context_t *);
147 -static int smbfs_lookup(vnode_t *, char *, vnode_t **, struct pathname *,
148 - int, vnode_t *, cred_t *, caller_context_t *,
149 - int *, pathname_t *);
150 -static int smbfs_create(vnode_t *, char *, struct vattr *, enum vcexcl,
151 - int, vnode_t **, cred_t *, int, caller_context_t *,
152 - vsecattr_t *);
153 -static int smbfs_remove(vnode_t *, char *, cred_t *, caller_context_t *,
154 - int);
155 -static int smbfs_rename(vnode_t *, char *, vnode_t *, char *, cred_t *,
156 - caller_context_t *, int);
157 -static int smbfs_mkdir(vnode_t *, char *, struct vattr *, vnode_t **,
158 - cred_t *, caller_context_t *, int, vsecattr_t *);
159 -static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
160 - caller_context_t *, int);
161 -static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
162 - caller_context_t *, int);
163 -static int smbfs_rwlock(vnode_t *, int, caller_context_t *);
164 -static void smbfs_rwunlock(vnode_t *, int, caller_context_t *);
165 -static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
166 -static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
167 - struct flk_callback *, cred_t *, caller_context_t *);
168 -static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
169 - cred_t *, caller_context_t *);
170 -static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
171 - caller_context_t *);
172 -static int smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
173 - caller_context_t *);
174 -static int smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
175 - caller_context_t *);
176 -static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
177 - caller_context_t *);
178 191
179 -/* Dummy function to use until correct function is ported in */
180 -int noop_vnodeop() {
181 - return (0);
182 -}
183 -
184 -struct vnodeops *smbfs_vnodeops = NULL;
185 -
186 192 /*
187 - * Most unimplemented ops will return ENOSYS because of fs_nosys().
188 - * The only ops where that won't work are ACCESS (due to open(2)
189 - * failures) and ... (anything else left?)
190 - */
191 -const fs_operation_def_t smbfs_vnodeops_template[] = {
192 - { VOPNAME_OPEN, { .vop_open = smbfs_open } },
193 - { VOPNAME_CLOSE, { .vop_close = smbfs_close } },
194 - { VOPNAME_READ, { .vop_read = smbfs_read } },
195 - { VOPNAME_WRITE, { .vop_write = smbfs_write } },
196 - { VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl } },
197 - { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } },
198 - { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } },
199 - { VOPNAME_ACCESS, { .vop_access = smbfs_access } },
200 - { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } },
201 - { VOPNAME_CREATE, { .vop_create = smbfs_create } },
202 - { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } },
203 - { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */
204 - { VOPNAME_RENAME, { .vop_rename = smbfs_rename } },
205 - { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } },
206 - { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } },
207 - { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } },
208 - { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */
209 - { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */
210 - { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } },
211 - { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } },
212 - { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */
213 - { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } },
214 - { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } },
215 - { VOPNAME_SEEK, { .vop_seek = smbfs_seek } },
216 - { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } },
217 - { VOPNAME_SPACE, { .vop_space = smbfs_space } },
218 - { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */
219 - { VOPNAME_GETPAGE, { .error = fs_nosys } }, /* smbfs_getpage, */
220 - { VOPNAME_PUTPAGE, { .error = fs_nosys } }, /* smbfs_putpage, */
221 - { VOPNAME_MAP, { .error = fs_nosys } }, /* smbfs_map, */
222 - { VOPNAME_ADDMAP, { .error = fs_nosys } }, /* smbfs_addmap, */
223 - { VOPNAME_DELMAP, { .error = fs_nosys } }, /* smbfs_delmap, */
224 - { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */
225 - { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } },
226 - { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */
227 - { VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr } },
228 - { VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr } },
229 - { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } },
230 - { NULL, NULL }
231 -};
232 -
233 -/*
234 193 * XXX
235 194 * When new and relevant functionality is enabled, we should be
236 195 * calling vfs_set_feature() to inform callers that pieces of
237 196 * functionality are available, per PSARC 2007/227.
238 197 */
239 198 /* ARGSUSED */
240 199 static int
241 200 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
242 201 {
243 202 smbnode_t *np;
244 203 vnode_t *vp;
245 204 smbfattr_t fa;
246 - u_int32_t rights, rightsrcvd;
247 - u_int16_t fid, oldfid;
248 - int oldgenid;
205 + smb_fh_t *fid = NULL;
206 + smb_fh_t *oldfid;
207 + uint32_t rights;
249 208 struct smb_cred scred;
250 209 smbmntinfo_t *smi;
251 210 smb_share_t *ssp;
252 211 cred_t *oldcr;
253 - int tmperror;
254 212 int error = 0;
255 213
256 214 vp = *vpp;
257 215 np = VTOSMB(vp);
258 216 smi = VTOSMI(vp);
259 217 ssp = smi->smi_share;
260 218
261 219 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
262 220 return (EIO);
263 221
264 222 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
265 223 return (EIO);
266 224
267 225 if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */
268 226 SMBVDEBUG("open eacces vtype=%d\n", vp->v_type);
269 227 return (EACCES);
270 228 }
271 229
272 230 /*
273 231 * Get exclusive access to n_fid and related stuff.
274 232 * No returns after this until out.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
275 233 */
276 234 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
277 235 return (EINTR);
278 236 smb_credinit(&scred, cr);
279 237
280 238 /*
281 239 * Keep track of the vnode type at first open.
282 240 * It may change later, and we need close to do
283 241 * cleanup for the type we opened. Also deny
284 242 * open of new types until old type is closed.
285 - * XXX: Per-open instance nodes whould help.
286 243 */
287 244 if (np->n_ovtype == VNON) {
288 245 ASSERT(np->n_dirrefs == 0);
289 246 ASSERT(np->n_fidrefs == 0);
290 247 } else if (np->n_ovtype != vp->v_type) {
291 248 SMBVDEBUG("open n_ovtype=%d v_type=%d\n",
292 249 np->n_ovtype, vp->v_type);
293 250 error = EACCES;
294 251 goto out;
295 252 }
296 253
297 254 /*
298 255 * Directory open. See smbfs_readvdir()
299 256 */
300 257 if (vp->v_type == VDIR) {
301 258 if (np->n_dirseq == NULL) {
302 259 /* first open */
303 260 error = smbfs_smb_findopen(np, "*", 1,
304 261 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
305 262 &scred, &np->n_dirseq);
306 263 if (error != 0)
307 264 goto out;
308 265 }
309 266 np->n_dirofs = FIRST_DIROFS;
310 267 np->n_dirrefs++;
311 268 goto have_fid;
312 269 }
313 270
314 271 /*
315 272 * If caller specified O_TRUNC/FTRUNC, then be sure to set
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
316 273 * FWRITE (to drive successful setattr(size=0) after open)
317 274 */
318 275 if (flag & FTRUNC)
319 276 flag |= FWRITE;
320 277
321 278 /*
322 279 * If we already have it open, and the FID is still valid,
323 280 * check whether the rights are sufficient for FID reuse.
324 281 */
325 282 if (np->n_fidrefs > 0 &&
326 - np->n_vcgenid == ssp->ss_vcgenid) {
283 + (fid = np->n_fid) != NULL &&
284 + fid->fh_vcgenid == ssp->ss_vcgenid) {
327 285 int upgrade = 0;
328 286
329 287 if ((flag & FWRITE) &&
330 - !(np->n_rights & SA_RIGHT_FILE_WRITE_DATA))
288 + !(fid->fh_rights & SA_RIGHT_FILE_WRITE_DATA))
331 289 upgrade = 1;
332 290 if ((flag & FREAD) &&
333 - !(np->n_rights & SA_RIGHT_FILE_READ_DATA))
291 + !(fid->fh_rights & SA_RIGHT_FILE_READ_DATA))
334 292 upgrade = 1;
335 293 if (!upgrade) {
336 294 /*
337 295 * the existing open is good enough
338 296 */
339 297 np->n_fidrefs++;
340 298 goto have_fid;
341 299 }
300 + fid = NULL;
342 301 }
343 - rights = np->n_fidrefs ? np->n_rights : 0;
302 + rights = (fid != NULL) ? fid->fh_rights : 0;
344 303
345 304 /*
346 305 * we always ask for READ_CONTROL so we can always get the
347 306 * owner/group IDs to satisfy a stat. Ditto attributes.
348 307 */
349 308 rights |= (STD_RIGHT_READ_CONTROL_ACCESS |
350 309 SA_RIGHT_FILE_READ_ATTRIBUTES);
351 310 if ((flag & FREAD))
352 311 rights |= SA_RIGHT_FILE_READ_DATA;
353 312 if ((flag & FWRITE))
354 313 rights |= SA_RIGHT_FILE_WRITE_DATA |
355 314 SA_RIGHT_FILE_APPEND_DATA |
356 315 SA_RIGHT_FILE_WRITE_ATTRIBUTES;
357 316
358 317 bzero(&fa, sizeof (fa));
359 318 error = smbfs_smb_open(np,
360 319 NULL, 0, 0, /* name nmlen xattr */
361 320 rights, &scred,
362 - &fid, &rightsrcvd, &fa);
321 + &fid, &fa);
363 322 if (error)
364 323 goto out;
365 324 smbfs_attrcache_fa(vp, &fa);
366 325
367 326 /*
368 327 * We have a new FID and access rights.
369 328 */
370 329 oldfid = np->n_fid;
371 - oldgenid = np->n_vcgenid;
372 330 np->n_fid = fid;
373 - np->n_vcgenid = ssp->ss_vcgenid;
374 - np->n_rights = rightsrcvd;
375 331 np->n_fidrefs++;
376 - if (np->n_fidrefs > 1 &&
377 - oldgenid == ssp->ss_vcgenid) {
378 - /*
379 - * We already had it open (presumably because
380 - * it was open with insufficient rights.)
381 - * Close old wire-open.
382 - */
383 - tmperror = smbfs_smb_close(ssp,
384 - oldfid, NULL, &scred);
385 - if (tmperror)
386 - SMBVDEBUG("error %d closing %s\n",
387 - tmperror, np->n_rpath);
388 - }
332 + if (oldfid != NULL)
333 + smb_fh_rele(oldfid);
389 334
390 335 /*
391 336 * This thread did the open.
392 337 * Save our credentials too.
393 338 */
394 339 mutex_enter(&np->r_statelock);
395 340 oldcr = np->r_cred;
396 341 np->r_cred = cr;
397 342 crhold(cr);
398 343 if (oldcr)
399 344 crfree(oldcr);
400 345 mutex_exit(&np->r_statelock);
401 346
402 347 have_fid:
403 348 /*
404 349 * Keep track of the vnode type at first open.
405 350 * (see comments above)
406 351 */
407 352 if (np->n_ovtype == VNON)
408 353 np->n_ovtype = vp->v_type;
409 354
410 355 out:
411 356 smb_credrele(&scred);
412 357 smbfs_rw_exit(&np->r_lkserlock);
413 358 return (error);
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
414 359 }
415 360
416 361 /*ARGSUSED*/
417 362 static int
418 363 smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
419 364 caller_context_t *ct)
420 365 {
421 366 smbnode_t *np;
422 367 smbmntinfo_t *smi;
423 368 struct smb_cred scred;
369 + int error = 0;
424 370
425 371 np = VTOSMB(vp);
426 372 smi = VTOSMI(vp);
427 373
428 374 /*
429 375 * Don't "bail out" for VFS_UNMOUNTED here,
430 376 * as we want to do cleanup, etc.
431 377 */
432 378
433 379 /*
434 380 * zone_enter(2) prevents processes from changing zones with SMBFS files
435 381 * open; if we happen to get here from the wrong zone we can't do
436 382 * anything over the wire.
437 383 */
438 384 if (smi->smi_zone_ref.zref_zone != curproc->p_zone) {
439 385 /*
440 386 * We could attempt to clean up locks, except we're sure
441 387 * that the current process didn't acquire any locks on
442 388 * the file: any attempt to lock a file belong to another zone
443 389 * will fail, and one can't lock an SMBFS file and then change
444 390 * zones, as that fails too.
445 391 *
446 392 * Returning an error here is the sane thing to do. A
447 393 * subsequent call to VN_RELE() which translates to a
448 394 * smbfs_inactive() will clean up state: if the zone of the
449 395 * vnode's origin is still alive and kicking, an async worker
450 396 * thread will handle the request (from the correct zone), and
451 397 * everything (minus the final smbfs_getattr_otw() call) should
452 398 * be OK. If the zone is going away smbfs_async_inactive() will
453 399 * throw away cached pages inline.
454 400 */
455 401 return (EIO);
456 402 }
457 403
458 404 /*
459 405 * If we are using local locking for this filesystem, then
460 406 * release all of the SYSV style record locks. Otherwise,
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
461 407 * we are doing network locking and we need to release all
462 408 * of the network locks. All of the locks held by this
463 409 * process on this file are released no matter what the
464 410 * incoming reference count is.
465 411 */
466 412 if (smi->smi_flags & SMI_LLOCK) {
467 413 pid_t pid = ddi_get_pid();
468 414 cleanlocks(vp, pid, 0);
469 415 cleanshares(vp, pid);
470 416 }
417 + /*
418 + * else doing OtW locking. SMB servers drop all locks
419 + * on the file ID we close here, so no _lockrelease()
420 + */
471 421
472 422 /*
473 423 * This (passed in) count is the ref. count from the
474 424 * user's file_t before the closef call (fio.c).
475 - * We only care when the reference goes away.
425 + * The rest happens only on last close.
476 426 */
477 427 if (count > 1)
478 428 return (0);
479 429
430 + /* NFS has DNLC purge here. */
431 +
480 432 /*
433 + * If the file was open for write and there are pages,
434 + * then make sure dirty pages written back.
435 + *
436 + * NFS does this async when "close-to-open" is off
437 + * (MI_NOCTO flag is set) to avoid blocking the caller.
438 + * For now, always do this synchronously (no B_ASYNC).
439 + */
440 + if ((flag & FWRITE) && vn_has_cached_data(vp)) {
441 + error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
442 + if (error == EAGAIN)
443 + error = 0;
444 + }
445 + if (error == 0) {
446 + mutex_enter(&np->r_statelock);
447 + np->r_flags &= ~RSTALE;
448 + np->r_error = 0;
449 + mutex_exit(&np->r_statelock);
450 + }
451 +
452 + /*
481 453 * Decrement the reference count for the FID
482 454 * and possibly do the OtW close.
483 455 *
484 456 * Exclusive lock for modifying n_fid stuff.
485 457 * Don't want this one ever interruptible.
486 458 */
487 459 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
488 460 smb_credinit(&scred, cr);
489 461
490 462 smbfs_rele_fid(np, &scred);
491 463
492 464 smb_credrele(&scred);
493 465 smbfs_rw_exit(&np->r_lkserlock);
494 466
495 467 return (0);
496 468 }
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
497 469
498 470 /*
499 471 * Helper for smbfs_close. Decrement the reference count
500 472 * for an SMB-level file or directory ID, and when the last
501 473 * reference for the fid goes away, do the OtW close.
502 474 * Also called in smbfs_inactive (defensive cleanup).
503 475 */
504 476 static void
505 477 smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
506 478 {
507 - smb_share_t *ssp;
508 479 cred_t *oldcr;
509 480 struct smbfs_fctx *fctx;
510 481 int error;
511 - uint16_t ofid;
482 + smb_fh_t *ofid;
512 483
513 - ssp = np->n_mount->smi_share;
514 484 error = 0;
515 485
516 486 /* Make sure we serialize for n_dirseq use. */
517 487 ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
518 488
519 489 /*
520 490 * Note that vp->v_type may change if a remote node
521 491 * is deleted and recreated as a different type, and
522 492 * our getattr may change v_type accordingly.
523 493 * Now use n_ovtype to keep track of the v_type
524 494 * we had during open (see comments above).
525 495 */
526 496 switch (np->n_ovtype) {
527 497 case VDIR:
528 498 ASSERT(np->n_dirrefs > 0);
529 499 if (--np->n_dirrefs)
530 500 return;
531 501 if ((fctx = np->n_dirseq) != NULL) {
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
532 502 np->n_dirseq = NULL;
533 503 np->n_dirofs = 0;
534 504 error = smbfs_smb_findclose(fctx, scred);
535 505 }
536 506 break;
537 507
538 508 case VREG:
539 509 ASSERT(np->n_fidrefs > 0);
540 510 if (--np->n_fidrefs)
541 511 return;
542 - if ((ofid = np->n_fid) != SMB_FID_UNUSED) {
543 - np->n_fid = SMB_FID_UNUSED;
544 - /* After reconnect, n_fid is invalid */
545 - if (np->n_vcgenid == ssp->ss_vcgenid) {
546 - error = smbfs_smb_close(
547 - ssp, ofid, NULL, scred);
548 - }
512 + if ((ofid = np->n_fid) != NULL) {
513 + np->n_fid = NULL;
514 + smb_fh_rele(ofid);
549 515 }
550 516 break;
551 517
552 518 default:
553 519 SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
554 520 break;
555 521 }
556 522 if (error) {
557 523 SMBVDEBUG("error %d closing %s\n",
558 524 error, np->n_rpath);
559 525 }
560 526
561 527 /* Allow next open to use any v_type. */
562 528 np->n_ovtype = VNON;
563 529
564 530 /*
565 531 * Other "last close" stuff.
566 532 */
567 533 mutex_enter(&np->r_statelock);
568 534 if (np->n_flag & NATTRCHANGED)
569 535 smbfs_attrcache_rm_locked(np);
570 536 oldcr = np->r_cred;
571 537 np->r_cred = NULL;
572 538 mutex_exit(&np->r_statelock);
573 539 if (oldcr != NULL)
574 540 crfree(oldcr);
575 541 }
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
576 542
577 543 /* ARGSUSED */
578 544 static int
579 545 smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
580 546 caller_context_t *ct)
581 547 {
582 548 struct smb_cred scred;
583 549 struct vattr va;
584 550 smbnode_t *np;
585 551 smbmntinfo_t *smi;
586 - smb_share_t *ssp;
587 552 offset_t endoff;
588 553 ssize_t past_eof;
589 554 int error;
590 555
591 556 np = VTOSMB(vp);
592 557 smi = VTOSMI(vp);
593 - ssp = smi->smi_share;
594 558
595 559 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
596 560 return (EIO);
597 561
598 562 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
599 563 return (EIO);
600 564
601 565 ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
602 566
603 567 if (vp->v_type != VREG)
604 568 return (EISDIR);
605 569
606 570 if (uiop->uio_resid == 0)
607 571 return (0);
608 572
609 573 /*
610 574 * Like NFS3, just check for 63-bit overflow.
611 575 * Our SMB layer takes care to return EFBIG
612 576 * when it has to fallback to a 32-bit call.
613 577 */
614 578 endoff = uiop->uio_loffset + uiop->uio_resid;
615 579 if (uiop->uio_loffset < 0 || endoff < 0)
616 580 return (EINVAL);
617 581
618 582 /* get vnode attributes from server */
619 583 va.va_mask = AT_SIZE | AT_MTIME;
620 584 if (error = smbfsgetattr(vp, &va, cr))
621 585 return (error);
622 586
623 587 /* Update mtime with mtime from server here? */
624 588
625 589 /* if offset is beyond EOF, read nothing */
626 590 if (uiop->uio_loffset >= va.va_size)
627 591 return (0);
628 592
629 593 /*
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
630 594 * Limit the read to the remaining file size.
631 595 * Do this by temporarily reducing uio_resid
632 596 * by the amount the lies beyoned the EOF.
633 597 */
634 598 if (endoff > va.va_size) {
635 599 past_eof = (ssize_t)(endoff - va.va_size);
636 600 uiop->uio_resid -= past_eof;
637 601 } else
638 602 past_eof = 0;
639 603
640 - /* Shared lock for n_fid use in smb_rwuio */
641 - if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
642 - return (EINTR);
643 - smb_credinit(&scred, cr);
604 + /*
605 + * Bypass VM if caching has been disabled (e.g., locking) or if
606 + * using client-side direct I/O and the file is not mmap'd and
607 + * there are no cached pages.
608 + */
609 + if ((vp->v_flag & VNOCACHE) ||
610 + (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
611 + np->r_mapcnt == 0 && np->r_inmap == 0 &&
612 + !vn_has_cached_data(vp))) {
644 613
645 - /* After reconnect, n_fid is invalid */
646 - if (np->n_vcgenid != ssp->ss_vcgenid)
647 - error = ESTALE;
648 - else
649 - error = smb_rwuio(ssp, np->n_fid, UIO_READ,
614 + /* Shared lock for n_fid use in smb_rwuio */
615 + if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
616 + return (EINTR);
617 + smb_credinit(&scred, cr);
618 +
619 + error = smb_rwuio(np->n_fid, UIO_READ,
650 620 uiop, &scred, smb_timo_read);
651 621
652 - smb_credrele(&scred);
653 - smbfs_rw_exit(&np->r_lkserlock);
622 + smb_credrele(&scred);
623 + smbfs_rw_exit(&np->r_lkserlock);
654 624
625 + /* undo adjustment of resid */
626 + uiop->uio_resid += past_eof;
627 +
628 + return (error);
629 + }
630 +
631 +#ifdef _KERNEL
632 + /* (else) Do I/O through segmap. */
633 + do {
634 + caddr_t base;
635 + u_offset_t off;
636 + size_t n;
637 + int on;
638 + uint_t flags;
639 +
640 + off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
641 + on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
642 + n = MIN(MAXBSIZE - on, uiop->uio_resid);
643 +
644 + error = smbfs_validate_caches(vp, cr);
645 + if (error)
646 + break;
647 +
648 + /* NFS waits for RINCACHEPURGE here. */
649 +
650 + if (vpm_enable) {
651 + /*
652 + * Copy data.
653 + */
654 + error = vpm_data_copy(vp, off + on, n, uiop,
655 + 1, NULL, 0, S_READ);
656 + } else {
657 + base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
658 + S_READ);
659 +
660 + error = uiomove(base + on, n, UIO_READ, uiop);
661 + }
662 +
663 + if (!error) {
664 + /*
665 + * If read a whole block or read to eof,
666 + * won't need this buffer again soon.
667 + */
668 + mutex_enter(&np->r_statelock);
669 + if (n + on == MAXBSIZE ||
670 + uiop->uio_loffset == np->r_size)
671 + flags = SM_DONTNEED;
672 + else
673 + flags = 0;
674 + mutex_exit(&np->r_statelock);
675 + if (vpm_enable) {
676 + error = vpm_sync_pages(vp, off, n, flags);
677 + } else {
678 + error = segmap_release(segkmap, base, flags);
679 + }
680 + } else {
681 + if (vpm_enable) {
682 + (void) vpm_sync_pages(vp, off, n, 0);
683 + } else {
684 + (void) segmap_release(segkmap, base, 0);
685 + }
686 + }
687 + } while (!error && uiop->uio_resid > 0);
688 +#else // _KERNEL
689 + error = ENOSYS;
690 +#endif // _KERNEL
691 +
655 692 /* undo adjustment of resid */
656 693 uiop->uio_resid += past_eof;
657 694
658 695 return (error);
659 696 }
660 697
661 698
662 699 /* ARGSUSED */
663 700 static int
664 701 smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
665 702 caller_context_t *ct)
666 703 {
667 704 struct smb_cred scred;
668 - struct vattr va;
705 + struct vattr va;
669 706 smbnode_t *np;
670 707 smbmntinfo_t *smi;
671 - smb_share_t *ssp;
672 708 offset_t endoff, limit;
673 709 ssize_t past_limit;
674 710 int error, timo;
711 + u_offset_t last_off;
712 + size_t last_resid;
713 +#ifdef _KERNEL
714 + uint_t bsize;
715 +#endif
675 716
676 717 np = VTOSMB(vp);
677 718 smi = VTOSMI(vp);
678 - ssp = smi->smi_share;
679 719
680 720 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
681 721 return (EIO);
682 722
683 723 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
684 724 return (EIO);
685 725
686 726 ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
687 727
688 728 if (vp->v_type != VREG)
689 729 return (EISDIR);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
690 730
691 731 if (uiop->uio_resid == 0)
692 732 return (0);
693 733
694 734 /*
695 735 * Handle ioflag bits: (FAPPEND|FSYNC|FDSYNC)
696 736 */
697 737 if (ioflag & (FAPPEND | FSYNC)) {
698 738 if (np->n_flag & NMODIFIED) {
699 739 smbfs_attrcache_remove(np);
700 - /* XXX: smbfs_vinvalbuf? */
701 740 }
702 741 }
703 742 if (ioflag & FAPPEND) {
704 743 /*
705 744 * File size can be changed by another client
745 + *
746 + * Todo: Consider redesigning this to use a
747 + * handle opened for append instead.
706 748 */
707 749 va.va_mask = AT_SIZE;
708 750 if (error = smbfsgetattr(vp, &va, cr))
709 751 return (error);
710 752 uiop->uio_loffset = va.va_size;
711 753 }
712 754
713 755 /*
714 756 * Like NFS3, just check for 63-bit overflow.
715 757 */
716 758 endoff = uiop->uio_loffset + uiop->uio_resid;
717 759 if (uiop->uio_loffset < 0 || endoff < 0)
718 760 return (EINVAL);
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
719 761
720 762 /*
721 763 * Check to make sure that the process will not exceed
722 764 * its limit on file size. It is okay to write up to
723 765 * the limit, but not beyond. Thus, the write which
724 766 * reaches the limit will be short and the next write
725 767 * will return an error.
726 768 *
727 769 * So if we're starting at or beyond the limit, EFBIG.
728 770 * Otherwise, temporarily reduce resid to the amount
729 - * the falls after the limit.
771 + * that is after the limit.
730 772 */
731 773 limit = uiop->uio_llimit;
732 774 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
733 775 limit = MAXOFFSET_T;
734 - if (uiop->uio_loffset >= limit)
776 + if (uiop->uio_loffset >= limit) {
777 +#ifdef _KERNEL
778 + proc_t *p = ttoproc(curthread);
779 +
780 + mutex_enter(&p->p_lock);
781 + (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
782 + p->p_rctls, p, RCA_UNSAFE_SIGINFO);
783 + mutex_exit(&p->p_lock);
784 +#endif // _KERNEL
735 785 return (EFBIG);
786 + }
736 787 if (endoff > limit) {
737 788 past_limit = (ssize_t)(endoff - limit);
738 789 uiop->uio_resid -= past_limit;
739 790 } else
740 791 past_limit = 0;
741 792
742 - /* Timeout: longer for append. */
743 - timo = smb_timo_write;
744 - if (endoff > np->r_size)
745 - timo = smb_timo_append;
793 + /*
794 + * Bypass VM if caching has been disabled (e.g., locking) or if
795 + * using client-side direct I/O and the file is not mmap'd and
796 + * there are no cached pages.
797 + */
798 + if ((vp->v_flag & VNOCACHE) ||
799 + (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
800 + np->r_mapcnt == 0 && np->r_inmap == 0 &&
801 + !vn_has_cached_data(vp))) {
746 802
803 +#ifdef _KERNEL
804 +smbfs_fwrite:
805 +#endif // _KERNEL
806 + if (np->r_flags & RSTALE) {
807 + last_resid = uiop->uio_resid;
808 + last_off = uiop->uio_loffset;
809 + error = np->r_error;
810 + /*
811 + * A close may have cleared r_error, if so,
812 + * propagate ESTALE error return properly
813 + */
814 + if (error == 0)
815 + error = ESTALE;
816 + goto bottom;
817 + }
818 +
819 + /* Timeout: longer for append. */
820 + timo = smb_timo_write;
821 + if (endoff > np->r_size)
822 + timo = smb_timo_append;
823 +
824 + /* Shared lock for n_fid use in smb_rwuio */
825 + if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
826 + return (EINTR);
827 + smb_credinit(&scred, cr);
828 +
829 + error = smb_rwuio(np->n_fid, UIO_WRITE,
830 + uiop, &scred, timo);
831 +
832 + if (error == 0) {
833 + mutex_enter(&np->r_statelock);
834 + np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
835 + if (uiop->uio_loffset > (offset_t)np->r_size)
836 + np->r_size = (len_t)uiop->uio_loffset;
837 + mutex_exit(&np->r_statelock);
838 + if (ioflag & (FSYNC | FDSYNC)) {
839 + /* Don't error the I/O if this fails. */
840 + (void) smbfsflush(np, &scred);
841 + }
842 + }
843 +
844 + smb_credrele(&scred);
845 + smbfs_rw_exit(&np->r_lkserlock);
846 +
847 + /* undo adjustment of resid */
848 + uiop->uio_resid += past_limit;
849 +
850 + return (error);
851 + }
852 +
853 +#ifdef _KERNEL
854 + /* (else) Do I/O through segmap. */
855 + bsize = vp->v_vfsp->vfs_bsize;
856 +
857 + do {
858 + caddr_t base;
859 + u_offset_t off;
860 + size_t n;
861 + int on;
862 + uint_t flags;
863 +
864 + off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
865 + on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
866 + n = MIN(MAXBSIZE - on, uiop->uio_resid);
867 +
868 + last_resid = uiop->uio_resid;
869 + last_off = uiop->uio_loffset;
870 +
871 + if (np->r_flags & RSTALE) {
872 + error = np->r_error;
873 + /*
874 + * A close may have cleared r_error, if so,
875 + * propagate ESTALE error return properly
876 + */
877 + if (error == 0)
878 + error = ESTALE;
879 + break;
880 + }
881 +
882 + /*
883 + * From NFS: Don't create dirty pages faster than they
884 + * can be cleaned.
885 + *
886 + * Here NFS also checks for async writes (np->r_awcount)
887 + */
888 + mutex_enter(&np->r_statelock);
889 + while (np->r_gcount > 0) {
890 + if (SMBINTR(vp)) {
891 + klwp_t *lwp = ttolwp(curthread);
892 +
893 + if (lwp != NULL)
894 + lwp->lwp_nostop++;
895 + if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) {
896 + mutex_exit(&np->r_statelock);
897 + if (lwp != NULL)
898 + lwp->lwp_nostop--;
899 + error = EINTR;
900 + goto bottom;
901 + }
902 + if (lwp != NULL)
903 + lwp->lwp_nostop--;
904 + } else
905 + cv_wait(&np->r_cv, &np->r_statelock);
906 + }
907 + mutex_exit(&np->r_statelock);
908 +
909 + /*
910 + * Touch the page and fault it in if it is not in core
911 + * before segmap_getmapflt or vpm_data_copy can lock it.
912 + * This is to avoid the deadlock if the buffer is mapped
913 + * to the same file through mmap which we want to write.
914 + */
915 + uio_prefaultpages((long)n, uiop);
916 +
917 + if (vpm_enable) {
918 + /*
919 + * It will use kpm mappings, so no need to
920 + * pass an address.
921 + */
922 + error = smbfs_writenp(np, NULL, n, uiop, 0);
923 + } else {
924 + if (segmap_kpm) {
925 + int pon = uiop->uio_loffset & PAGEOFFSET;
926 + size_t pn = MIN(PAGESIZE - pon,
927 + uiop->uio_resid);
928 + int pagecreate;
929 +
930 + mutex_enter(&np->r_statelock);
931 + pagecreate = (pon == 0) && (pn == PAGESIZE ||
932 + uiop->uio_loffset + pn >= np->r_size);
933 + mutex_exit(&np->r_statelock);
934 +
935 + base = segmap_getmapflt(segkmap, vp, off + on,
936 + pn, !pagecreate, S_WRITE);
937 +
938 + error = smbfs_writenp(np, base + pon, n, uiop,
939 + pagecreate);
940 +
941 + } else {
942 + base = segmap_getmapflt(segkmap, vp, off + on,
943 + n, 0, S_READ);
944 + error = smbfs_writenp(np, base + on, n, uiop, 0);
945 + }
946 + }
947 +
948 + if (!error) {
949 + if (smi->smi_flags & SMI_NOAC)
950 + flags = SM_WRITE;
951 + else if ((uiop->uio_loffset % bsize) == 0 ||
952 + IS_SWAPVP(vp)) {
953 + /*
954 + * Have written a whole block.
955 + * Start an asynchronous write
956 + * and mark the buffer to
957 + * indicate that it won't be
958 + * needed again soon.
959 + */
960 + flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
961 + } else
962 + flags = 0;
963 + if ((ioflag & (FSYNC|FDSYNC)) ||
964 + (np->r_flags & ROUTOFSPACE)) {
965 + flags &= ~SM_ASYNC;
966 + flags |= SM_WRITE;
967 + }
968 + if (vpm_enable) {
969 + error = vpm_sync_pages(vp, off, n, flags);
970 + } else {
971 + error = segmap_release(segkmap, base, flags);
972 + }
973 + } else {
974 + if (vpm_enable) {
975 + (void) vpm_sync_pages(vp, off, n, 0);
976 + } else {
977 + (void) segmap_release(segkmap, base, 0);
978 + }
979 + /*
980 + * In the event that we got an access error while
981 + * faulting in a page for a write-only file just
982 + * force a write.
983 + */
984 + if (error == EACCES)
985 + goto smbfs_fwrite;
986 + }
987 + } while (!error && uiop->uio_resid > 0);
988 +#else // _KERNEL
989 + last_resid = uiop->uio_resid;
990 + last_off = uiop->uio_loffset;
991 + error = ENOSYS;
992 +#endif // _KERNEL
993 +
994 +bottom:
995 + /* undo adjustment of resid */
996 + if (error) {
997 + uiop->uio_resid = last_resid + past_limit;
998 + uiop->uio_loffset = last_off;
999 + } else {
1000 + uiop->uio_resid += past_limit;
1001 + }
1002 +
1003 + return (error);
1004 +}
1005 +
1006 +#ifdef _KERNEL
1007 +
1008 +/*
1009 + * Like nfs_client.c: writerp()
1010 + *
1011 + * Write by creating pages and uiomove data onto them.
1012 + */
1013 +
1014 +int
1015 +smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, struct uio *uio,
1016 + int pgcreated)
1017 +{
1018 + int pagecreate;
1019 + int n;
1020 + int saved_n;
1021 + caddr_t saved_base;
1022 + u_offset_t offset;
1023 + int error;
1024 + int sm_error;
1025 + vnode_t *vp = SMBTOV(np);
1026 +
1027 + ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
1028 + ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
1029 + if (!vpm_enable) {
1030 + ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
1031 + }
1032 +
1033 + /*
1034 + * Move bytes in at most PAGESIZE chunks. We must avoid
1035 + * spanning pages in uiomove() because page faults may cause
1036 + * the cache to be invalidated out from under us. The r_size is not
1037 + * updated until after the uiomove. If we push the last page of a
1038 + * file before r_size is correct, we will lose the data written past
1039 + * the current (and invalid) r_size.
1040 + */
1041 + do {
1042 + offset = uio->uio_loffset;
1043 + pagecreate = 0;
1044 +
1045 + /*
1046 + * n is the number of bytes required to satisfy the request
1047 + * or the number of bytes to fill out the page.
1048 + */
1049 + n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
1050 +
1051 + /*
1052 + * Check to see if we can skip reading in the page
1053 + * and just allocate the memory. We can do this
1054 + * if we are going to rewrite the entire mapping
1055 + * or if we are going to write to or beyond the current
1056 + * end of file from the beginning of the mapping.
1057 + *
1058 + * The read of r_size is now protected by r_statelock.
1059 + */
1060 + mutex_enter(&np->r_statelock);
1061 + /*
1062 + * When pgcreated is nonzero the caller has already done
1063 + * a segmap_getmapflt with forcefault 0 and S_WRITE. With
1064 + * segkpm this means we already have at least one page
1065 + * created and mapped at base.
1066 + */
1067 + pagecreate = pgcreated ||
1068 + ((offset & PAGEOFFSET) == 0 &&
1069 + (n == PAGESIZE || ((offset + n) >= np->r_size)));
1070 +
1071 + mutex_exit(&np->r_statelock);
1072 + if (!vpm_enable && pagecreate) {
1073 + /*
1074 + * The last argument tells segmap_pagecreate() to
1075 + * always lock the page, as opposed to sometimes
1076 + * returning with the page locked. This way we avoid a
1077 + * fault on the ensuing uiomove(), but also
1078 + * more importantly (to fix bug 1094402) we can
1079 + * call segmap_fault() to unlock the page in all
1080 + * cases. An alternative would be to modify
1081 + * segmap_pagecreate() to tell us when it is
1082 + * locking a page, but that's a fairly major
1083 + * interface change.
1084 + */
1085 + if (pgcreated == 0)
1086 + (void) segmap_pagecreate(segkmap, base,
1087 + (uint_t)n, 1);
1088 + saved_base = base;
1089 + saved_n = n;
1090 + }
1091 +
1092 + /*
1093 + * The number of bytes of data in the last page can not
1094 + * be accurately be determined while page is being
1095 + * uiomove'd to and the size of the file being updated.
1096 + * Thus, inform threads which need to know accurately
1097 + * how much data is in the last page of the file. They
1098 + * will not do the i/o immediately, but will arrange for
1099 + * the i/o to happen later when this modify operation
1100 + * will have finished.
1101 + */
1102 + ASSERT(!(np->r_flags & RMODINPROGRESS));
1103 + mutex_enter(&np->r_statelock);
1104 + np->r_flags |= RMODINPROGRESS;
1105 + np->r_modaddr = (offset & MAXBMASK);
1106 + mutex_exit(&np->r_statelock);
1107 +
1108 + if (vpm_enable) {
1109 + /*
1110 + * Copy data. If new pages are created, part of
1111 + * the page that is not written will be initizliazed
1112 + * with zeros.
1113 + */
1114 + error = vpm_data_copy(vp, offset, n, uio,
1115 + !pagecreate, NULL, 0, S_WRITE);
1116 + } else {
1117 + error = uiomove(base, n, UIO_WRITE, uio);
1118 + }
1119 +
1120 + /*
1121 + * r_size is the maximum number of
1122 + * bytes known to be in the file.
1123 + * Make sure it is at least as high as the
1124 + * first unwritten byte pointed to by uio_loffset.
1125 + */
1126 + mutex_enter(&np->r_statelock);
1127 + if (np->r_size < uio->uio_loffset)
1128 + np->r_size = uio->uio_loffset;
1129 + np->r_flags &= ~RMODINPROGRESS;
1130 + np->r_flags |= RDIRTY;
1131 + mutex_exit(&np->r_statelock);
1132 +
1133 + /* n = # of bytes written */
1134 + n = (int)(uio->uio_loffset - offset);
1135 +
1136 + if (!vpm_enable) {
1137 + base += n;
1138 + }
1139 + tcount -= n;
1140 + /*
1141 + * If we created pages w/o initializing them completely,
1142 + * we need to zero the part that wasn't set up.
1143 + * This happens on a most EOF write cases and if
1144 + * we had some sort of error during the uiomove.
1145 + */
1146 + if (!vpm_enable && pagecreate) {
1147 + if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
1148 + (void) kzero(base, PAGESIZE - n);
1149 +
1150 + if (pgcreated) {
1151 + /*
1152 + * Caller is responsible for this page,
1153 + * it was not created in this loop.
1154 + */
1155 + pgcreated = 0;
1156 + } else {
1157 + /*
1158 + * For bug 1094402: segmap_pagecreate locks
1159 + * page. Unlock it. This also unlocks the
1160 + * pages allocated by page_create_va() in
1161 + * segmap_pagecreate().
1162 + */
1163 + sm_error = segmap_fault(kas.a_hat, segkmap,
1164 + saved_base, saved_n,
1165 + F_SOFTUNLOCK, S_WRITE);
1166 + if (error == 0)
1167 + error = sm_error;
1168 + }
1169 + }
1170 + } while (tcount > 0 && error == 0);
1171 +
1172 + return (error);
1173 +}
1174 +
1175 +/*
1176 + * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
1177 + * Like nfs3_rdwrlbn()
1178 + */
1179 +static int
1180 +smbfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1181 + int flags, cred_t *cr)
1182 +{
1183 + smbmntinfo_t *smi = VTOSMI(vp);
1184 + struct buf *bp;
1185 + int error;
1186 + int sync;
1187 +
1188 + if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1189 + return (EIO);
1190 +
1191 + if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1192 + return (EIO);
1193 +
1194 + bp = pageio_setup(pp, len, vp, flags);
1195 + ASSERT(bp != NULL);
1196 +
1197 + /*
1198 + * pageio_setup should have set b_addr to 0. This
1199 + * is correct since we want to do I/O on a page
1200 + * boundary. bp_mapin will use this addr to calculate
1201 + * an offset, and then set b_addr to the kernel virtual
1202 + * address it allocated for us.
1203 + */
1204 + ASSERT(bp->b_un.b_addr == 0);
1205 +
1206 + bp->b_edev = 0;
1207 + bp->b_dev = 0;
1208 + bp->b_lblkno = lbtodb(off);
1209 + bp->b_file = vp;
1210 + bp->b_offset = (offset_t)off;
1211 + bp_mapin(bp);
1212 +
1213 + /*
1214 + * Calculate the desired level of stability to write data.
1215 + */
1216 + if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
1217 + freemem > desfree) {
1218 + sync = 0;
1219 + } else {
1220 + sync = 1;
1221 + }
1222 +
1223 + error = smbfs_bio(bp, sync, cr);
1224 +
1225 + bp_mapout(bp);
1226 + pageio_done(bp);
1227 +
1228 + return (error);
1229 +}
1230 +
1231 +
1232 +/*
1233 + * Corresponds to nfs3_vnopc.c : nfs3_bio(), though the NFS code
1234 + * uses nfs3read()/nfs3write() where we use smb_rwuio(). Also,
1235 + * NFS has this later in the file. Move it up here closer to
1236 + * the one call site just above.
1237 + */
1238 +
1239 +static int
1240 +smbfs_bio(struct buf *bp, int sync, cred_t *cr)
1241 +{
1242 + struct iovec aiov[1];
1243 + struct uio auio;
1244 + struct smb_cred scred;
1245 + smbnode_t *np = VTOSMB(bp->b_vp);
1246 + smbmntinfo_t *smi = np->n_mount;
1247 + offset_t offset;
1248 + offset_t endoff;
1249 + size_t count;
1250 + size_t past_eof;
1251 + int error;
1252 +
1253 + ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
1254 +
1255 + offset = ldbtob(bp->b_lblkno);
1256 + count = bp->b_bcount;
1257 + endoff = offset + count;
1258 + if (offset < 0 || endoff < 0)
1259 + return (EINVAL);
1260 +
1261 + /*
1262 + * Limit file I/O to the remaining file size, but see
1263 + * the notes in smbfs_getpage about SMBFS_EOF.
1264 + */
1265 + mutex_enter(&np->r_statelock);
1266 + if (offset >= np->r_size) {
1267 + mutex_exit(&np->r_statelock);
1268 + if (bp->b_flags & B_READ) {
1269 + return (SMBFS_EOF);
1270 + } else {
1271 + return (EINVAL);
1272 + }
1273 + }
1274 + if (endoff > np->r_size) {
1275 + past_eof = (size_t)(endoff - np->r_size);
1276 + count -= past_eof;
1277 + } else
1278 + past_eof = 0;
1279 + mutex_exit(&np->r_statelock);
1280 + ASSERT(count > 0);
1281 +
1282 + /* Caller did bpmapin(). Mapped address is... */
1283 + aiov[0].iov_base = bp->b_un.b_addr;
1284 + aiov[0].iov_len = count;
1285 + auio.uio_iov = aiov;
1286 + auio.uio_iovcnt = 1;
1287 + auio.uio_loffset = offset;
1288 + auio.uio_segflg = UIO_SYSSPACE;
1289 + auio.uio_fmode = 0;
1290 + auio.uio_resid = count;
1291 +
747 1292 /* Shared lock for n_fid use in smb_rwuio */
748 - if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
1293 + if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
1294 + smi->smi_flags & SMI_INT))
749 1295 return (EINTR);
750 1296 smb_credinit(&scred, cr);
751 1297
752 - /* After reconnect, n_fid is invalid */
753 - if (np->n_vcgenid != ssp->ss_vcgenid)
754 - error = ESTALE;
755 - else
756 - error = smb_rwuio(ssp, np->n_fid, UIO_WRITE,
757 - uiop, &scred, timo);
1298 + DTRACE_IO1(start, struct buf *, bp);
758 1299
759 - if (error == 0) {
1300 + if (bp->b_flags & B_READ) {
1301 +
1302 + error = smb_rwuio(np->n_fid, UIO_READ,
1303 + &auio, &scred, smb_timo_read);
1304 +
1305 + /* Like NFS, only set b_error here. */
1306 + bp->b_error = error;
1307 + bp->b_resid = auio.uio_resid;
1308 +
1309 + if (!error && auio.uio_resid != 0)
1310 + error = EIO;
1311 + if (!error && past_eof != 0) {
1312 + /* Zero the memory beyond EOF. */
1313 + bzero(bp->b_un.b_addr + count, past_eof);
1314 + }
1315 + } else {
1316 +
1317 + error = smb_rwuio(np->n_fid, UIO_WRITE,
1318 + &auio, &scred, smb_timo_write);
1319 +
1320 + /* Like NFS, only set b_error here. */
1321 + bp->b_error = error;
1322 + bp->b_resid = auio.uio_resid;
1323 +
1324 + if (!error && auio.uio_resid != 0)
1325 + error = EIO;
1326 + if (!error && sync) {
1327 + (void) smbfsflush(np, &scred);
1328 + }
1329 + }
1330 +
1331 + /*
1332 + * This comes from nfs3_commit()
1333 + */
1334 + if (error != 0) {
760 1335 mutex_enter(&np->r_statelock);
761 - np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
762 - if (uiop->uio_loffset > (offset_t)np->r_size)
763 - np->r_size = (len_t)uiop->uio_loffset;
1336 + if (error == ESTALE)
1337 + np->r_flags |= RSTALE;
1338 + if (!np->r_error)
1339 + np->r_error = error;
764 1340 mutex_exit(&np->r_statelock);
765 - if (ioflag & (FSYNC|FDSYNC)) {
766 - /* Don't error the I/O if this fails. */
767 - (void) smbfs_smb_flush(np, &scred);
768 - }
1341 + bp->b_flags |= B_ERROR;
769 1342 }
770 1343
1344 + DTRACE_IO1(done, struct buf *, bp);
1345 +
771 1346 smb_credrele(&scred);
772 1347 smbfs_rw_exit(&np->r_lkserlock);
773 1348
774 - /* undo adjustment of resid */
775 - uiop->uio_resid += past_limit;
1349 + if (error == ESTALE)
1350 + smbfs_attrcache_remove(np);
776 1351
777 1352 return (error);
778 1353 }
1354 +#endif // _KERNEL
779 1355
1356 +/*
1357 + * Here NFS has: nfs3write, nfs3read
1358 + * We use smb_rwuio instead.
1359 + */
780 1360
781 1361 /* ARGSUSED */
782 1362 static int
783 1363 smbfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag,
784 1364 cred_t *cr, int *rvalp, caller_context_t *ct)
785 1365 {
786 1366 int error;
787 - smbmntinfo_t *smi;
1367 + smbmntinfo_t *smi;
788 1368
789 1369 smi = VTOSMI(vp);
790 1370
791 1371 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
792 1372 return (EIO);
793 1373
794 1374 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
795 1375 return (EIO);
796 1376
797 1377 switch (cmd) {
798 - /* First three from ZFS. XXX - need these? */
799 1378
800 1379 case _FIOFFS:
801 1380 error = smbfs_fsync(vp, 0, cr, ct);
802 1381 break;
803 1382
804 1383 /*
805 1384 * The following two ioctls are used by bfu.
806 1385 * Silently ignore to avoid bfu errors.
807 1386 */
808 1387 case _FIOGDIO:
809 1388 case _FIOSDIO:
810 1389 error = 0;
811 1390 break;
812 1391
813 -#ifdef NOT_YET /* XXX - from the NFS code. */
1392 +#if 0 /* Todo - SMB ioctl query regions */
1393 + case _FIO_SEEK_DATA:
1394 + case _FIO_SEEK_HOLE:
1395 +#endif
1396 +
814 1397 case _FIODIRECTIO:
815 1398 error = smbfs_directio(vp, (int)arg, cr);
816 -#endif
1399 + break;
817 1400
818 1401 /*
819 1402 * Allow get/set with "raw" security descriptor (SD) data.
820 1403 * Useful for testing, diagnosing idmap problems, etc.
821 1404 */
822 1405 case SMBFSIO_GETSD:
823 1406 error = smbfs_acl_iocget(vp, arg, flag, cr);
824 1407 break;
825 1408
826 1409 case SMBFSIO_SETSD:
827 1410 error = smbfs_acl_iocset(vp, arg, flag, cr);
828 1411 break;
829 1412
830 1413 default:
831 1414 error = ENOTTY;
832 1415 break;
833 1416 }
834 1417
835 1418 return (error);
836 1419 }
837 1420
838 1421
839 1422 /*
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
840 1423 * Return either cached or remote attributes. If get remote attr
841 1424 * use them to check and invalidate caches, then cache the new attributes.
842 1425 */
843 1426 /* ARGSUSED */
844 1427 static int
845 1428 smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
846 1429 caller_context_t *ct)
847 1430 {
848 1431 smbnode_t *np;
849 1432 smbmntinfo_t *smi;
1433 + int error;
850 1434
851 1435 smi = VTOSMI(vp);
852 1436
853 1437 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
854 1438 return (EIO);
855 1439
856 1440 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
857 1441 return (EIO);
858 1442
859 1443 /*
860 1444 * If it has been specified that the return value will
861 1445 * just be used as a hint, and we are only being asked
862 1446 * for size, fsid or rdevid, then return the client's
863 1447 * notion of these values without checking to make sure
864 1448 * that the attribute cache is up to date.
865 1449 * The whole point is to avoid an over the wire GETATTR
866 1450 * call.
867 1451 */
868 1452 np = VTOSMB(vp);
869 1453 if (flags & ATTR_HINT) {
870 1454 if (vap->va_mask ==
871 1455 (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
872 1456 mutex_enter(&np->r_statelock);
873 1457 if (vap->va_mask | AT_SIZE)
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
874 1458 vap->va_size = np->r_size;
875 1459 if (vap->va_mask | AT_FSID)
876 1460 vap->va_fsid = vp->v_vfsp->vfs_dev;
877 1461 if (vap->va_mask | AT_RDEV)
878 1462 vap->va_rdev = vp->v_rdev;
879 1463 mutex_exit(&np->r_statelock);
880 1464 return (0);
881 1465 }
882 1466 }
883 1467
1468 + /*
1469 + * Only need to flush pages if asking for the mtime
1470 + * and if there any dirty pages.
1471 + *
1472 + * Here NFS also checks for async writes (np->r_awcount)
1473 + */
1474 + if (vap->va_mask & AT_MTIME) {
1475 + if (vn_has_cached_data(vp) &&
1476 + ((np->r_flags & RDIRTY) != 0)) {
1477 + mutex_enter(&np->r_statelock);
1478 + np->r_gcount++;
1479 + mutex_exit(&np->r_statelock);
1480 + error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1481 + mutex_enter(&np->r_statelock);
1482 + if (error && (error == ENOSPC || error == EDQUOT)) {
1483 + if (!np->r_error)
1484 + np->r_error = error;
1485 + }
1486 + if (--np->r_gcount == 0)
1487 + cv_broadcast(&np->r_cv);
1488 + mutex_exit(&np->r_statelock);
1489 + }
1490 + }
1491 +
884 1492 return (smbfsgetattr(vp, vap, cr));
885 1493 }
886 1494
887 1495 /* smbfsgetattr() in smbfs_client.c */
888 1496
889 1497 /*ARGSUSED4*/
890 1498 static int
891 1499 smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
892 1500 caller_context_t *ct)
893 1501 {
894 1502 vfs_t *vfsp;
895 1503 smbmntinfo_t *smi;
896 1504 int error;
897 1505 uint_t mask;
898 1506 struct vattr oldva;
899 1507
900 1508 vfsp = vp->v_vfsp;
901 1509 smi = VFTOSMI(vfsp);
902 1510
903 1511 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
904 1512 return (EIO);
905 1513
906 1514 if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
907 1515 return (EIO);
908 1516
909 1517 mask = vap->va_mask;
910 1518 if (mask & AT_NOSET)
911 1519 return (EINVAL);
912 1520
913 1521 if (vfsp->vfs_flag & VFS_RDONLY)
914 1522 return (EROFS);
915 1523
916 1524 /*
917 1525 * This is a _local_ access check so that only the owner of
918 1526 * this mount can set attributes. With ACLs enabled, the
919 1527 * file owner can be different from the mount owner, and we
920 1528 * need to check the _mount_ owner here. See _access_rwx
921 1529 */
922 1530 bzero(&oldva, sizeof (oldva));
923 1531 oldva.va_mask = AT_TYPE | AT_MODE;
924 1532 error = smbfsgetattr(vp, &oldva, cr);
925 1533 if (error)
926 1534 return (error);
927 1535 oldva.va_mask |= AT_UID | AT_GID;
928 1536 oldva.va_uid = smi->smi_uid;
929 1537 oldva.va_gid = smi->smi_gid;
930 1538
931 1539 error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
932 1540 smbfs_accessx, vp);
933 1541 if (error)
934 1542 return (error);
935 1543
936 1544 if (mask & (AT_UID | AT_GID)) {
937 1545 if (smi->smi_flags & SMI_ACL)
938 1546 error = smbfs_acl_setids(vp, vap, cr);
939 1547 else
940 1548 error = ENOSYS;
941 1549 if (error != 0) {
942 1550 SMBVDEBUG("error %d seting UID/GID on %s",
943 1551 error, VTOSMB(vp)->n_rpath);
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
944 1552 /*
945 1553 * It might be more correct to return the
946 1554 * error here, but that causes complaints
947 1555 * when root extracts a cpio archive, etc.
948 1556 * So ignore this error, and go ahead with
949 1557 * the rest of the setattr work.
950 1558 */
951 1559 }
952 1560 }
953 1561
954 - return (smbfssetattr(vp, vap, flags, cr));
1562 + error = smbfssetattr(vp, vap, flags, cr);
1563 +
1564 +#ifdef SMBFS_VNEVENT
1565 + if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
1566 + vnevent_truncate(vp, ct);
1567 +#endif
1568 +
1569 + return (error);
955 1570 }
956 1571
957 1572 /*
958 1573 * Mostly from Darwin smbfs_setattr()
959 1574 * but then modified a lot.
960 1575 */
961 1576 /* ARGSUSED */
962 1577 static int
963 1578 smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
964 1579 {
965 1580 int error = 0;
966 1581 smbnode_t *np = VTOSMB(vp);
1582 + smbmntinfo_t *smi = np->n_mount;
967 1583 uint_t mask = vap->va_mask;
968 1584 struct timespec *mtime, *atime;
969 1585 struct smb_cred scred;
970 - int cerror, modified = 0;
971 - unsigned short fid;
972 - int have_fid = 0;
1586 + int modified = 0;
1587 + smb_fh_t *fid = NULL;
973 1588 uint32_t rights = 0;
974 1589 uint32_t dosattr = 0;
975 1590
976 1591 ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
977 1592
978 1593 /*
979 1594 * There are no settable attributes on the XATTR dir,
980 1595 * so just silently ignore these. On XATTR files,
981 1596 * you can set the size but nothing else.
982 1597 */
983 1598 if (vp->v_flag & V_XATTRDIR)
984 1599 return (0);
985 1600 if (np->n_flag & N_XATTR) {
986 1601 if (mask & AT_TIMES)
987 1602 SMBVDEBUG("ignore set time on xattr\n");
988 1603 mask &= AT_SIZE;
989 1604 }
990 1605
991 1606 /*
1607 + * Only need to flush pages if there are any pages and
1608 + * if the file is marked as dirty in some fashion. The
1609 + * file must be flushed so that we can accurately
1610 + * determine the size of the file and the cached data
1611 + * after the SETATTR returns. A file is considered to
1612 + * be dirty if it is either marked with RDIRTY, has
1613 + * outstanding i/o's active, or is mmap'd. In this
1614 + * last case, we can't tell whether there are dirty
1615 + * pages, so we flush just to be sure.
1616 + */
1617 + if (vn_has_cached_data(vp) &&
1618 + ((np->r_flags & RDIRTY) ||
1619 + np->r_count > 0 ||
1620 + np->r_mapcnt > 0)) {
1621 + ASSERT(vp->v_type != VCHR);
1622 + error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1623 + if (error && (error == ENOSPC || error == EDQUOT)) {
1624 + mutex_enter(&np->r_statelock);
1625 + if (!np->r_error)
1626 + np->r_error = error;
1627 + mutex_exit(&np->r_statelock);
1628 + }
1629 + }
1630 +
1631 + /*
992 1632 * If our caller is trying to set multiple attributes, they
993 1633 * can make no assumption about what order they are done in.
994 1634 * Here we try to do them in order of decreasing likelihood
995 1635 * of failure, just to minimize the chance we'll wind up
996 1636 * with a partially complete request.
997 1637 */
998 1638
999 - /* Shared lock for (possible) n_fid use. */
1000 - if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
1001 - return (EINTR);
1002 1639 smb_credinit(&scred, cr);
1003 1640
1004 1641 /*
1005 1642 * If the caller has provided extensible attributes,
1006 1643 * map those into DOS attributes supported by SMB.
1007 1644 * Note: zero means "no change".
1008 1645 */
1009 1646 if (mask & AT_XVATTR)
1010 1647 dosattr = xvattr_to_dosattr(np, vap);
1011 1648
1012 1649 /*
1013 1650 * Will we need an open handle for this setattr?
1014 1651 * If so, what rights will we need?
1015 1652 */
1016 1653 if (dosattr || (mask & (AT_ATIME | AT_MTIME))) {
1017 1654 rights |=
1018 1655 SA_RIGHT_FILE_WRITE_ATTRIBUTES;
1019 1656 }
1020 1657 if (mask & AT_SIZE) {
1021 1658 rights |=
1022 1659 SA_RIGHT_FILE_WRITE_DATA |
1023 1660 SA_RIGHT_FILE_APPEND_DATA;
1024 1661 }
1025 1662
1026 1663 /*
1027 1664 * Only SIZE really requires a handle, but it's
1028 1665 * simpler and more reliable to set via a handle.
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
1029 1666 * Some servers like NT4 won't set times by path.
1030 1667 * Also, we're usually setting everything anyway.
1031 1668 */
1032 1669 if (rights != 0) {
1033 1670 error = smbfs_smb_tmpopen(np, rights, &scred, &fid);
1034 1671 if (error) {
1035 1672 SMBVDEBUG("error %d opening %s\n",
1036 1673 error, np->n_rpath);
1037 1674 goto out;
1038 1675 }
1039 - have_fid = 1;
1676 + ASSERT(fid != NULL);
1040 1677 }
1041 1678
1042 1679 /*
1043 1680 * If the server supports the UNIX extensions, right here is where
1044 1681 * we'd support changes to uid, gid, mode, and possibly va_flags.
1045 1682 * For now we claim to have made any such changes.
1046 1683 */
1047 1684
1048 1685 if (mask & AT_SIZE) {
1049 1686 /*
1050 1687 * If the new file size is less than what the client sees as
1051 1688 * the file size, then just change the size and invalidate
1052 1689 * the pages.
1053 - * I am commenting this code at present because the function
1054 - * smbfs_putapage() is not yet implemented.
1055 1690 */
1056 1691
1057 1692 /*
1058 1693 * Set the file size to vap->va_size.
1059 1694 */
1060 - ASSERT(have_fid);
1061 - error = smbfs_smb_setfsize(np, fid, vap->va_size, &scred);
1695 + ASSERT(fid != NULL);
1696 + error = smbfs_smb_setfsize(smi->smi_share, fid,
1697 + vap->va_size, &scred);
1062 1698 if (error) {
1063 1699 SMBVDEBUG("setsize error %d file %s\n",
1064 1700 error, np->n_rpath);
1065 1701 } else {
1066 1702 /*
1067 1703 * Darwin had code here to zero-extend.
1068 1704 * Tests indicate the server will zero-fill,
1069 - * so looks like we don't need to do this.
1070 - * Good thing, as this could take forever.
1071 - *
1072 - * XXX: Reportedly, writing one byte of zero
1073 - * at the end offset avoids problems here.
1705 + * so looks like we don't need to do that.
1074 1706 */
1075 1707 mutex_enter(&np->r_statelock);
1076 1708 np->r_size = vap->va_size;
1709 + np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
1077 1710 mutex_exit(&np->r_statelock);
1078 1711 modified = 1;
1079 1712 }
1080 1713 }
1081 1714
1082 1715 /*
1083 - * XXX: When Solaris has create_time, set that too.
1084 - * Note: create_time is different from ctime.
1716 + * Todo: Implement setting create_time (which is
1717 + * different from ctime).
1085 1718 */
1086 1719 mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0);
1087 1720 atime = ((mask & AT_ATIME) ? &vap->va_atime : 0);
1088 1721
1089 1722 if (dosattr || mtime || atime) {
1090 1723 /*
1091 1724 * Always use the handle-based set attr call now.
1092 1725 */
1093 - ASSERT(have_fid);
1094 - error = smbfs_smb_setfattr(np, fid,
1726 + ASSERT(fid != NULL);
1727 + error = smbfs_smb_setfattr(smi->smi_share, fid,
1095 1728 dosattr, mtime, atime, &scred);
1096 1729 if (error) {
1097 1730 SMBVDEBUG("set times error %d file %s\n",
1098 1731 error, np->n_rpath);
1099 1732 } else {
1100 1733 modified = 1;
1101 1734 }
1102 1735 }
1103 1736
1104 1737 out:
1738 + if (fid != NULL)
1739 + smbfs_smb_tmpclose(np, fid);
1740 +
1741 + smb_credrele(&scred);
1742 +
1105 1743 if (modified) {
1106 1744 /*
1107 1745 * Invalidate attribute cache in case the server
1108 1746 * doesn't set exactly the attributes we asked.
1109 1747 */
1110 1748 smbfs_attrcache_remove(np);
1111 - }
1112 1749
1113 - if (have_fid) {
1114 - cerror = smbfs_smb_tmpclose(np, fid, &scred);
1115 - if (cerror)
1116 - SMBVDEBUG("error %d closing %s\n",
1117 - cerror, np->n_rpath);
1750 + /*
1751 + * If changing the size of the file, invalidate
1752 + * any local cached data which is no longer part
1753 + * of the file. We also possibly invalidate the
1754 + * last page in the file. We could use
1755 + * pvn_vpzero(), but this would mark the page as
1756 + * modified and require it to be written back to
1757 + * the server for no particularly good reason.
1758 + * This way, if we access it, then we bring it
1759 + * back in. A read should be cheaper than a
1760 + * write.
1761 + */
1762 + if (mask & AT_SIZE) {
1763 + smbfs_invalidate_pages(vp,
1764 + (vap->va_size & PAGEMASK), cr);
1765 + }
1118 1766 }
1119 1767
1120 - smb_credrele(&scred);
1121 - smbfs_rw_exit(&np->r_lkserlock);
1122 -
1123 1768 return (error);
1124 1769 }
1125 1770
1126 1771 /*
1127 1772 * Helper function for extensible system attributes (PSARC 2007/315)
1128 1773 * Compute the DOS attribute word to pass to _setfattr (see above).
1129 1774 * This returns zero IFF no change is being made to attributes.
1130 1775 * Otherwise return the new attributes or SMB_EFA_NORMAL.
1131 1776 */
1132 1777 static uint32_t
1133 1778 xvattr_to_dosattr(smbnode_t *np, struct vattr *vap)
1134 1779 {
1135 1780 xvattr_t *xvap = (xvattr_t *)vap;
1136 1781 xoptattr_t *xoap = NULL;
1137 1782 uint32_t attr = np->r_attr.fa_attr;
1138 1783 boolean_t anyset = B_FALSE;
1139 1784
1140 1785 if ((xoap = xva_getxoptattr(xvap)) == NULL)
1141 1786 return (0);
1142 1787
1143 1788 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
1144 1789 if (xoap->xoa_archive)
1145 1790 attr |= SMB_FA_ARCHIVE;
1146 1791 else
1147 1792 attr &= ~SMB_FA_ARCHIVE;
1148 1793 XVA_SET_RTN(xvap, XAT_ARCHIVE);
1149 1794 anyset = B_TRUE;
1150 1795 }
1151 1796 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
1152 1797 if (xoap->xoa_system)
1153 1798 attr |= SMB_FA_SYSTEM;
1154 1799 else
1155 1800 attr &= ~SMB_FA_SYSTEM;
1156 1801 XVA_SET_RTN(xvap, XAT_SYSTEM);
1157 1802 anyset = B_TRUE;
1158 1803 }
1159 1804 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
1160 1805 if (xoap->xoa_readonly)
1161 1806 attr |= SMB_FA_RDONLY;
1162 1807 else
1163 1808 attr &= ~SMB_FA_RDONLY;
1164 1809 XVA_SET_RTN(xvap, XAT_READONLY);
1165 1810 anyset = B_TRUE;
1166 1811 }
1167 1812 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
1168 1813 if (xoap->xoa_hidden)
1169 1814 attr |= SMB_FA_HIDDEN;
1170 1815 else
1171 1816 attr &= ~SMB_FA_HIDDEN;
1172 1817 XVA_SET_RTN(xvap, XAT_HIDDEN);
1173 1818 anyset = B_TRUE;
1174 1819 }
1175 1820
1176 1821 if (anyset == B_FALSE)
1177 1822 return (0); /* no change */
1178 1823 if (attr == 0)
1179 1824 attr = SMB_EFA_NORMAL;
1180 1825
1181 1826 return (attr);
1182 1827 }
1183 1828
1184 1829 /*
1185 1830 * smbfs_access_rwx()
1186 1831 * Common function for smbfs_access, etc.
1187 1832 *
1188 1833 * The security model implemented by the FS is unusual
1189 1834 * due to the current "single user mounts" restriction:
1190 1835 * All access under a given mount point uses the CIFS
1191 1836 * credentials established by the owner of the mount.
1192 1837 *
1193 1838 * Most access checking is handled by the CIFS server,
1194 1839 * but we need sufficient Unix access checks here to
1195 1840 * prevent other local Unix users from having access
1196 1841 * to objects under this mount that the uid/gid/mode
1197 1842 * settings in the mount would not allow.
1198 1843 *
|
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
1199 1844 * With this model, there is a case where we need the
1200 1845 * ability to do an access check before we have the
1201 1846 * vnode for an object. This function takes advantage
1202 1847 * of the fact that the uid/gid/mode is per mount, and
1203 1848 * avoids the need for a vnode.
1204 1849 *
1205 1850 * We still (sort of) need a vnode when we call
1206 1851 * secpolicy_vnode_access, but that only uses
1207 1852 * the vtype field, so we can use a pair of fake
1208 1853 * vnodes that have only v_type filled in.
1209 - *
1210 - * XXX: Later, add a new secpolicy_vtype_access()
1211 - * that takes the vtype instead of a vnode, and
1212 - * get rid of the tmpl_vxxx fake vnodes below.
1213 1854 */
1214 1855 static int
1215 1856 smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr)
1216 1857 {
1217 1858 /* See the secpolicy call below. */
1218 1859 static const vnode_t tmpl_vdir = { .v_type = VDIR };
1219 1860 static const vnode_t tmpl_vreg = { .v_type = VREG };
1220 1861 vattr_t va;
1221 1862 vnode_t *tvp;
1222 1863 struct smbmntinfo *smi = VFTOSMI(vfsp);
1223 1864 int shift = 0;
1224 1865
1225 1866 /*
1226 1867 * Build our (fabricated) vnode attributes.
1227 - * XXX: Could make these templates in the
1228 - * per-mount struct and use them here.
1229 1868 */
1230 1869 bzero(&va, sizeof (va));
1231 1870 va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
1232 1871 va.va_type = vtype;
1233 1872 va.va_mode = (vtype == VDIR) ?
1234 1873 smi->smi_dmode : smi->smi_fmode;
1235 1874 va.va_uid = smi->smi_uid;
1236 1875 va.va_gid = smi->smi_gid;
1237 1876
1238 1877 /*
1239 1878 * Disallow write attempts on read-only file systems,
1240 1879 * unless the file is a device or fifo node. Note:
1241 1880 * Inline vn_is_readonly and IS_DEVVP here because
1242 1881 * we may not have a vnode ptr. Original expr. was:
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1243 1882 * (mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1244 1883 */
1245 1884 if ((mode & VWRITE) &&
1246 1885 (vfsp->vfs_flag & VFS_RDONLY) &&
1247 1886 !(vtype == VCHR || vtype == VBLK || vtype == VFIFO))
1248 1887 return (EROFS);
1249 1888
1250 1889 /*
1251 1890 * Disallow attempts to access mandatory lock files.
1252 1891 * Similarly, expand MANDLOCK here.
1253 - * XXX: not sure we need this.
1254 1892 */
1255 1893 if ((mode & (VWRITE | VREAD | VEXEC)) &&
1256 1894 va.va_type == VREG && MANDMODE(va.va_mode))
1257 1895 return (EACCES);
1258 1896
1259 1897 /*
1260 1898 * Access check is based on only
1261 1899 * one of owner, group, public.
1262 1900 * If not owner, then check group.
1263 1901 * If not a member of the group,
1264 1902 * then check public access.
1265 1903 */
1266 1904 if (crgetuid(cr) != va.va_uid) {
1267 1905 shift += 3;
1268 1906 if (!groupmember(va.va_gid, cr))
1269 1907 shift += 3;
1270 1908 }
1271 1909
1272 1910 /*
1273 1911 * We need a vnode for secpolicy_vnode_access,
1274 1912 * but the only thing it looks at is v_type,
1275 1913 * so pass one of the templates above.
1276 1914 */
1277 1915 tvp = (va.va_type == VDIR) ?
1278 1916 (vnode_t *)&tmpl_vdir :
1279 1917 (vnode_t *)&tmpl_vreg;
1280 1918
1281 1919 return (secpolicy_vnode_access2(cr, tvp, va.va_uid,
1282 1920 va.va_mode << shift, mode));
1283 1921 }
1284 1922
1285 1923 /*
1286 1924 * See smbfs_setattr
1287 1925 */
1288 1926 static int
1289 1927 smbfs_accessx(void *arg, int mode, cred_t *cr)
1290 1928 {
1291 1929 vnode_t *vp = arg;
1292 1930 /*
1293 1931 * Note: The caller has checked the current zone,
1294 1932 * the SMI_DEAD and VFS_UNMOUNTED flags, etc.
1295 1933 */
1296 1934 return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr));
1297 1935 }
1298 1936
1299 1937 /*
1300 1938 * XXX
1301 1939 * This op should support PSARC 2007/403, Modified Access Checks for CIFS
1302 1940 */
1303 1941 /* ARGSUSED */
1304 1942 static int
1305 1943 smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1306 1944 {
1307 1945 vfs_t *vfsp;
1308 1946 smbmntinfo_t *smi;
1309 1947
1310 1948 vfsp = vp->v_vfsp;
1311 1949 smi = VFTOSMI(vfsp);
1312 1950
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
1313 1951 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1314 1952 return (EIO);
1315 1953
1316 1954 if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
1317 1955 return (EIO);
1318 1956
1319 1957 return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr));
1320 1958 }
1321 1959
1322 1960
1961 +/* ARGSUSED */
1962 +static int
1963 +smbfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
1964 +{
1965 + /* Not yet... */
1966 + return (ENOSYS);
1967 +}
1968 +
1969 +
1323 1970 /*
1324 1971 * Flush local dirty pages to stable storage on the server.
1325 1972 *
1326 1973 * If FNODSYNC is specified, then there is nothing to do because
1327 1974 * metadata changes are not cached on the client before being
1328 1975 * sent to the server.
1329 1976 */
1330 1977 /* ARGSUSED */
1331 1978 static int
1332 1979 smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
1333 1980 {
1334 1981 int error = 0;
1335 1982 smbmntinfo_t *smi;
1336 - smbnode_t *np;
1983 + smbnode_t *np;
1337 1984 struct smb_cred scred;
1338 1985
1339 1986 np = VTOSMB(vp);
1340 1987 smi = VTOSMI(vp);
1341 1988
1342 1989 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1343 1990 return (EIO);
1344 1991
1345 1992 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1346 1993 return (EIO);
1347 1994
1348 1995 if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
1349 1996 return (0);
1350 1997
1351 1998 if ((syncflag & (FSYNC|FDSYNC)) == 0)
1352 1999 return (0);
1353 2000
2001 + error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2002 + if (error)
2003 + return (error);
2004 +
1354 2005 /* Shared lock for n_fid use in _flush */
1355 2006 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
1356 2007 return (EINTR);
1357 2008 smb_credinit(&scred, cr);
1358 2009
1359 - error = smbfs_smb_flush(np, &scred);
2010 + error = smbfsflush(np, &scred);
1360 2011
1361 2012 smb_credrele(&scred);
1362 2013 smbfs_rw_exit(&np->r_lkserlock);
1363 2014
1364 2015 return (error);
1365 2016 }
1366 2017
2018 +static int
2019 +smbfsflush(smbnode_t *np, struct smb_cred *scrp)
2020 +{
2021 + struct smb_share *ssp = np->n_mount->smi_share;
2022 + smb_fh_t *fhp;
2023 + int error;
2024 +
2025 + /* Shared lock for n_fid use below. */
2026 + ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_READER));
2027 +
2028 + if (!(np->n_flag & NFLUSHWIRE))
2029 + return (0);
2030 + if (np->n_fidrefs == 0)
2031 + return (0); /* not open */
2032 + if ((fhp = np->n_fid) == NULL)
2033 + return (0);
2034 +
2035 + /* After reconnect, n_fid is invalid */
2036 + if (fhp->fh_vcgenid != ssp->ss_vcgenid)
2037 + return (ESTALE);
2038 +
2039 + error = smbfs_smb_flush(ssp, fhp, scrp);
2040 +
2041 + if (!error) {
2042 + mutex_enter(&np->r_statelock);
2043 + np->n_flag &= ~NFLUSHWIRE;
2044 + mutex_exit(&np->r_statelock);
2045 + }
2046 + return (error);
2047 +}
2048 +
1367 2049 /*
1368 2050 * Last reference to vnode went away.
1369 2051 */
1370 2052 /* ARGSUSED */
1371 2053 static void
1372 2054 smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1373 2055 {
1374 - smbnode_t *np;
1375 2056 struct smb_cred scred;
2057 + smbnode_t *np = VTOSMB(vp);
2058 + int error;
1376 2059
1377 2060 /*
1378 2061 * Don't "bail out" for VFS_UNMOUNTED here,
1379 2062 * as we want to do cleanup, etc.
1380 2063 * See also pcfs_inactive
1381 2064 */
1382 2065
1383 - np = VTOSMB(vp);
1384 -
1385 2066 /*
1386 2067 * If this is coming from the wrong zone, we let someone in the right
1387 2068 * zone take care of it asynchronously. We can get here due to
1388 2069 * VN_RELE() being called from pageout() or fsflush(). This call may
1389 2070 * potentially turn into an expensive no-op if, for instance, v_count
1390 2071 * gets incremented in the meantime, but it's still correct.
1391 2072 */
1392 2073
1393 2074 /*
2075 + * From NFS:rinactive()
2076 + *
2077 + * Before freeing anything, wait until all asynchronous
2078 + * activity is done on this rnode. This will allow all
2079 + * asynchronous read ahead and write behind i/o's to
2080 + * finish.
2081 + */
2082 + mutex_enter(&np->r_statelock);
2083 + while (np->r_count > 0)
2084 + cv_wait(&np->r_cv, &np->r_statelock);
2085 + mutex_exit(&np->r_statelock);
2086 +
2087 + /*
2088 + * Flush and invalidate all pages associated with the vnode.
2089 + */
2090 + if (vn_has_cached_data(vp)) {
2091 + if ((np->r_flags & RDIRTY) && !np->r_error) {
2092 + error = smbfs_putpage(vp, (u_offset_t)0, 0, 0, cr, ct);
2093 + if (error && (error == ENOSPC || error == EDQUOT)) {
2094 + mutex_enter(&np->r_statelock);
2095 + if (!np->r_error)
2096 + np->r_error = error;
2097 + mutex_exit(&np->r_statelock);
2098 + }
2099 + }
2100 + smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
2101 + }
2102 + /*
2103 + * This vnode should have lost all cached data.
2104 + */
2105 + ASSERT(vn_has_cached_data(vp) == 0);
2106 +
2107 + /*
1394 2108 * Defend against the possibility that higher-level callers
1395 2109 * might not correctly balance open and close calls. If we
1396 2110 * get here with open references remaining, it means there
1397 2111 * was a missing VOP_CLOSE somewhere. If that happens, do
1398 2112 * the close here so we don't "leak" FIDs on the server.
1399 2113 *
1400 2114 * Exclusive lock for modifying n_fid stuff.
1401 2115 * Don't want this one ever interruptible.
1402 2116 */
1403 2117 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
1404 2118 smb_credinit(&scred, cr);
1405 2119
1406 2120 switch (np->n_ovtype) {
1407 2121 case VNON:
1408 2122 /* not open (OK) */
1409 2123 break;
1410 2124
1411 2125 case VDIR:
1412 2126 if (np->n_dirrefs == 0)
1413 2127 break;
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
1414 2128 SMBVDEBUG("open dir: refs %d path %s\n",
1415 2129 np->n_dirrefs, np->n_rpath);
1416 2130 /* Force last close. */
1417 2131 np->n_dirrefs = 1;
1418 2132 smbfs_rele_fid(np, &scred);
1419 2133 break;
1420 2134
1421 2135 case VREG:
1422 2136 if (np->n_fidrefs == 0)
1423 2137 break;
1424 - SMBVDEBUG("open file: refs %d id 0x%x path %s\n",
1425 - np->n_fidrefs, np->n_fid, np->n_rpath);
2138 + SMBVDEBUG("open file: refs %d path %s\n",
2139 + np->n_fidrefs, np->n_rpath);
1426 2140 /* Force last close. */
1427 2141 np->n_fidrefs = 1;
1428 2142 smbfs_rele_fid(np, &scred);
1429 2143 break;
1430 2144
1431 2145 default:
1432 2146 SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
1433 2147 np->n_ovtype = VNON;
1434 2148 break;
1435 2149 }
1436 2150
1437 2151 smb_credrele(&scred);
1438 2152 smbfs_rw_exit(&np->r_lkserlock);
1439 2153
2154 + /*
2155 + * XATTR directories (and the files under them) have
2156 + * little value for reclaim, so just remove them from
2157 + * the "hash" (AVL) as soon as they go inactive.
2158 + * Note that the node may already have been removed
2159 + * from the hash by smbfsremove.
2160 + */
2161 + if ((np->n_flag & N_XATTR) != 0 &&
2162 + (np->r_flags & RHASHED) != 0)
2163 + smbfs_rmhash(np);
2164 +
1440 2165 smbfs_addfree(np);
1441 2166 }
1442 2167
1443 2168 /*
1444 2169 * Remote file system operations having to do with directory manipulation.
1445 2170 */
1446 2171 /* ARGSUSED */
1447 2172 static int
1448 2173 smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1449 2174 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1450 2175 int *direntflags, pathname_t *realpnp)
1451 2176 {
1452 2177 vfs_t *vfs;
1453 2178 smbmntinfo_t *smi;
1454 2179 smbnode_t *dnp;
1455 2180 int error;
1456 2181
1457 2182 vfs = dvp->v_vfsp;
1458 2183 smi = VFTOSMI(vfs);
1459 2184
1460 2185 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1461 2186 return (EPERM);
1462 2187
1463 2188 if (smi->smi_flags & SMI_DEAD || vfs->vfs_flag & VFS_UNMOUNTED)
1464 2189 return (EIO);
1465 2190
1466 2191 dnp = VTOSMB(dvp);
1467 2192
1468 2193 /*
1469 2194 * Are we looking up extended attributes? If so, "dvp" is
1470 2195 * the file or directory for which we want attributes, and
1471 2196 * we need a lookup of the (faked up) attribute directory
1472 2197 * before we lookup the rest of the path.
1473 2198 */
1474 2199 if (flags & LOOKUP_XATTR) {
1475 2200 /*
1476 2201 * Require the xattr mount option.
1477 2202 */
1478 2203 if ((vfs->vfs_flag & VFS_XATTR) == 0)
1479 2204 return (EINVAL);
1480 2205
1481 2206 error = smbfs_get_xattrdir(dvp, vpp, cr, flags);
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
1482 2207 return (error);
1483 2208 }
1484 2209
1485 2210 if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp)))
1486 2211 return (EINTR);
1487 2212
1488 2213 error = smbfslookup(dvp, nm, vpp, cr, 1, ct);
1489 2214
1490 2215 smbfs_rw_exit(&dnp->r_rwlock);
1491 2216
2217 + /*
2218 + * If the caller passes an invalid name here, we'll have
2219 + * error == EINVAL but want to return ENOENT. This is
2220 + * common with things like "ls foo*" with no matches.
2221 + */
2222 + if (error == EINVAL)
2223 + error = ENOENT;
2224 +
1492 2225 return (error);
1493 2226 }
1494 2227
1495 2228 /* ARGSUSED */
1496 2229 static int
1497 2230 smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
1498 2231 int cache_ok, caller_context_t *ct)
1499 2232 {
1500 2233 int error;
1501 2234 int supplen; /* supported length */
1502 2235 vnode_t *vp;
1503 2236 smbnode_t *np;
1504 2237 smbnode_t *dnp;
1505 2238 smbmntinfo_t *smi;
1506 2239 /* struct smb_vc *vcp; */
1507 2240 const char *ill;
1508 2241 const char *name = (const char *)nm;
1509 - int nmlen = strlen(nm);
1510 - int rplen;
2242 + int nmlen = strlen(nm);
2243 + int rplen;
1511 2244 struct smb_cred scred;
1512 2245 struct smbfattr fa;
1513 2246
1514 2247 smi = VTOSMI(dvp);
1515 2248 dnp = VTOSMB(dvp);
1516 2249
1517 2250 ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
1518 2251
1519 -#ifdef NOT_YET
1520 - vcp = SSTOVC(smi->smi_share);
1521 -
1522 - /* XXX: Should compute this once and store it in smbmntinfo_t */
1523 - supplen = (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN2_0) ? 255 : 12;
1524 -#else
1525 2252 supplen = 255;
1526 -#endif
1527 2253
1528 2254 /*
1529 2255 * RWlock must be held, either reader or writer.
1530 - * XXX: Can we check without looking directly
1531 - * inside the struct smbfs_rwlock_t?
1532 2256 */
1533 2257 ASSERT(dnp->r_rwlock.count != 0);
1534 2258
1535 2259 /*
1536 2260 * If lookup is for "", just return dvp.
1537 2261 * No need to perform any access checks.
1538 2262 */
1539 2263 if (nmlen == 0) {
1540 2264 VN_HOLD(dvp);
1541 2265 *vpp = dvp;
1542 2266 return (0);
1543 2267 }
1544 2268
1545 2269 /*
1546 2270 * Can't do lookups in non-directories.
1547 2271 */
1548 2272 if (dvp->v_type != VDIR)
1549 2273 return (ENOTDIR);
1550 2274
1551 2275 /*
1552 2276 * Need search permission in the directory.
1553 2277 */
1554 2278 error = smbfs_access(dvp, VEXEC, 0, cr, ct);
1555 2279 if (error)
1556 2280 return (error);
1557 2281
1558 2282 /*
1559 2283 * If lookup is for ".", just return dvp.
1560 2284 * Access check was done above.
1561 2285 */
1562 2286 if (nmlen == 1 && name[0] == '.') {
1563 2287 VN_HOLD(dvp);
1564 2288 *vpp = dvp;
1565 2289 return (0);
1566 2290 }
1567 2291
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
1568 2292 /*
1569 2293 * Now some sanity checks on the name.
1570 2294 * First check the length.
1571 2295 */
1572 2296 if (nmlen > supplen)
1573 2297 return (ENAMETOOLONG);
1574 2298
1575 2299 /*
1576 2300 * Avoid surprises with characters that are
1577 2301 * illegal in Windows file names.
1578 - * Todo: CATIA mappings XXX
2302 + * Todo: CATIA mappings?
1579 2303 */
1580 2304 ill = illegal_chars;
1581 2305 if (dnp->n_flag & N_XATTR)
1582 2306 ill++; /* allow colon */
1583 2307 if (strpbrk(nm, ill))
1584 2308 return (EINVAL);
1585 2309
1586 2310 /*
1587 2311 * Special handling for lookup of ".."
1588 2312 *
1589 2313 * We keep full pathnames (as seen on the server)
1590 2314 * so we can just trim off the last component to
1591 2315 * get the full pathname of the parent. Note:
1592 2316 * We don't actually copy and modify, but just
1593 2317 * compute the trimmed length and pass that with
1594 2318 * the current dir path (not null terminated).
1595 2319 *
1596 2320 * We don't go over-the-wire to get attributes
1597 2321 * for ".." because we know it's a directory,
1598 2322 * and we can just leave the rest "stale"
1599 2323 * until someone does a getattr.
1600 2324 */
1601 2325 if (nmlen == 2 && name[0] == '.' && name[1] == '.') {
1602 2326 if (dvp->v_flag & VROOT) {
1603 2327 /*
1604 2328 * Already at the root. This can happen
1605 2329 * with directory listings at the root,
1606 2330 * which lookup "." and ".." to get the
1607 2331 * inode numbers. Let ".." be the same
1608 2332 * as "." in the FS root.
1609 2333 */
1610 2334 VN_HOLD(dvp);
1611 2335 *vpp = dvp;
1612 2336 return (0);
1613 2337 }
1614 2338
1615 2339 /*
1616 2340 * Special case for XATTR directory
1617 2341 */
1618 2342 if (dvp->v_flag & V_XATTRDIR) {
1619 2343 error = smbfs_xa_parent(dvp, vpp);
1620 2344 return (error);
1621 2345 }
1622 2346
1623 2347 /*
1624 2348 * Find the parent path length.
1625 2349 */
1626 2350 rplen = dnp->n_rplen;
1627 2351 ASSERT(rplen > 0);
1628 2352 while (--rplen >= 0) {
1629 2353 if (dnp->n_rpath[rplen] == '\\')
1630 2354 break;
1631 2355 }
1632 2356 if (rplen <= 0) {
1633 2357 /* Found our way to the root. */
1634 2358 vp = SMBTOV(smi->smi_root);
1635 2359 VN_HOLD(vp);
1636 2360 *vpp = vp;
1637 2361 return (0);
1638 2362 }
1639 2363 np = smbfs_node_findcreate(smi,
1640 2364 dnp->n_rpath, rplen, NULL, 0, 0,
1641 2365 &smbfs_fattr0); /* force create */
1642 2366 ASSERT(np != NULL);
1643 2367 vp = SMBTOV(np);
1644 2368 vp->v_type = VDIR;
1645 2369
1646 2370 /* Success! */
1647 2371 *vpp = vp;
1648 2372 return (0);
1649 2373 }
1650 2374
1651 2375 /*
1652 2376 * Normal lookup of a name under this directory.
1653 2377 * Note we handled "", ".", ".." above.
1654 2378 */
1655 2379 if (cache_ok) {
1656 2380 /*
1657 2381 * The caller indicated that it's OK to use a
1658 2382 * cached result for this lookup, so try to
1659 2383 * reclaim a node from the smbfs node cache.
1660 2384 */
1661 2385 error = smbfslookup_cache(dvp, nm, nmlen, &vp, cr);
1662 2386 if (error)
1663 2387 return (error);
1664 2388 if (vp != NULL) {
1665 2389 /* hold taken in lookup_cache */
1666 2390 *vpp = vp;
1667 2391 return (0);
1668 2392 }
1669 2393 }
1670 2394
1671 2395 /*
1672 2396 * OK, go over-the-wire to get the attributes,
1673 2397 * then create the node.
1674 2398 */
1675 2399 smb_credinit(&scred, cr);
1676 2400 /* Note: this can allocate a new "name" */
1677 2401 error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred);
1678 2402 smb_credrele(&scred);
1679 2403 if (error == ENOTDIR) {
1680 2404 /*
1681 2405 * Lookup failed because this directory was
1682 2406 * removed or renamed by another client.
1683 2407 * Remove any cached attributes under it.
1684 2408 */
1685 2409 smbfs_attrcache_remove(dnp);
1686 2410 smbfs_attrcache_prune(dnp);
1687 2411 }
1688 2412 if (error)
1689 2413 goto out;
1690 2414
1691 2415 error = smbfs_nget(dvp, name, nmlen, &fa, &vp);
1692 2416 if (error)
1693 2417 goto out;
1694 2418
1695 2419 /* Success! */
1696 2420 *vpp = vp;
1697 2421
1698 2422 out:
1699 2423 /* smbfs_smb_lookup may have allocated name. */
1700 2424 if (name != nm)
1701 2425 smbfs_name_free(name, nmlen);
1702 2426
1703 2427 return (error);
1704 2428 }
1705 2429
1706 2430 /*
1707 2431 * smbfslookup_cache
1708 2432 *
1709 2433 * Try to reclaim a node from the smbfs node cache.
1710 2434 * Some statistics for DEBUG.
1711 2435 *
1712 2436 * This mechanism lets us avoid many of the five (or more)
1713 2437 * OtW lookup calls per file seen with "ls -l" if we search
1714 2438 * the smbfs node cache for recently inactive(ated) nodes.
1715 2439 */
1716 2440 #ifdef DEBUG
1717 2441 int smbfs_lookup_cache_calls = 0;
1718 2442 int smbfs_lookup_cache_error = 0;
1719 2443 int smbfs_lookup_cache_miss = 0;
1720 2444 int smbfs_lookup_cache_stale = 0;
1721 2445 int smbfs_lookup_cache_hits = 0;
1722 2446 #endif /* DEBUG */
1723 2447
1724 2448 /* ARGSUSED */
1725 2449 static int
1726 2450 smbfslookup_cache(vnode_t *dvp, char *nm, int nmlen,
1727 2451 vnode_t **vpp, cred_t *cr)
1728 2452 {
1729 2453 struct vattr va;
1730 2454 smbnode_t *dnp;
1731 2455 smbnode_t *np;
1732 2456 vnode_t *vp;
1733 2457 int error;
1734 2458 char sep;
1735 2459
1736 2460 dnp = VTOSMB(dvp);
1737 2461 *vpp = NULL;
1738 2462
1739 2463 #ifdef DEBUG
1740 2464 smbfs_lookup_cache_calls++;
1741 2465 #endif
1742 2466
1743 2467 /*
1744 2468 * First make sure we can get attributes for the
1745 2469 * directory. Cached attributes are OK here.
1746 2470 * If we removed or renamed the directory, this
1747 2471 * will return ENOENT. If someone else removed
1748 2472 * this directory or file, we'll find out when we
1749 2473 * try to open or get attributes.
1750 2474 */
1751 2475 va.va_mask = AT_TYPE | AT_MODE;
1752 2476 error = smbfsgetattr(dvp, &va, cr);
1753 2477 if (error) {
1754 2478 #ifdef DEBUG
1755 2479 smbfs_lookup_cache_error++;
1756 2480 #endif
1757 2481 return (error);
1758 2482 }
1759 2483
1760 2484 /*
1761 2485 * Passing NULL smbfattr here so we will
1762 2486 * just look, not create.
1763 2487 */
1764 2488 sep = SMBFS_DNP_SEP(dnp);
1765 2489 np = smbfs_node_findcreate(dnp->n_mount,
1766 2490 dnp->n_rpath, dnp->n_rplen,
1767 2491 nm, nmlen, sep, NULL);
1768 2492 if (np == NULL) {
1769 2493 #ifdef DEBUG
1770 2494 smbfs_lookup_cache_miss++;
1771 2495 #endif
1772 2496 return (0);
1773 2497 }
1774 2498
1775 2499 /*
1776 2500 * Found it. Attributes still valid?
1777 2501 */
1778 2502 vp = SMBTOV(np);
1779 2503 if (np->r_attrtime <= gethrtime()) {
1780 2504 /* stale */
1781 2505 #ifdef DEBUG
1782 2506 smbfs_lookup_cache_stale++;
1783 2507 #endif
1784 2508 VN_RELE(vp);
1785 2509 return (0);
1786 2510 }
1787 2511
1788 2512 /*
|
↓ open down ↓ |
200 lines elided |
↑ open up ↑ |
1789 2513 * Success!
1790 2514 * Caller gets hold from smbfs_node_findcreate
1791 2515 */
1792 2516 #ifdef DEBUG
1793 2517 smbfs_lookup_cache_hits++;
1794 2518 #endif
1795 2519 *vpp = vp;
1796 2520 return (0);
1797 2521 }
1798 2522
2523 +
1799 2524 /*
1800 2525 * XXX
1801 2526 * vsecattr_t is new to build 77, and we need to eventually support
1802 2527 * it in order to create an ACL when an object is created.
1803 2528 *
1804 2529 * This op should support the new FIGNORECASE flag for case-insensitive
1805 2530 * lookups, per PSARC 2007/244.
1806 2531 */
1807 2532 /* ARGSUSED */
1808 2533 static int
1809 2534 smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
1810 2535 int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
1811 2536 vsecattr_t *vsecp)
1812 2537 {
1813 2538 int error;
1814 - int cerror;
1815 2539 vfs_t *vfsp;
1816 2540 vnode_t *vp;
1817 -#ifdef NOT_YET
1818 2541 smbnode_t *np;
1819 -#endif
1820 2542 smbnode_t *dnp;
1821 2543 smbmntinfo_t *smi;
1822 2544 struct vattr vattr;
1823 2545 struct smbfattr fattr;
1824 2546 struct smb_cred scred;
1825 2547 const char *name = (const char *)nm;
1826 2548 int nmlen = strlen(nm);
1827 2549 uint32_t disp;
1828 - uint16_t fid;
2550 + smb_fh_t *fid = NULL;
1829 2551 int xattr;
1830 2552
1831 2553 vfsp = dvp->v_vfsp;
1832 2554 smi = VFTOSMI(vfsp);
1833 2555 dnp = VTOSMB(dvp);
1834 2556 vp = NULL;
1835 2557
1836 2558 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1837 2559 return (EPERM);
1838 2560
1839 2561 if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
1840 2562 return (EIO);
1841 2563
1842 2564 /*
1843 2565 * Note: this may break mknod(2) calls to create a directory,
1844 2566 * but that's obscure use. Some other filesystems do this.
1845 - * XXX: Later, redirect VDIR type here to _mkdir.
2567 + * Todo: redirect VDIR type here to _mkdir.
1846 2568 */
1847 2569 if (va->va_type != VREG)
1848 2570 return (EINVAL);
1849 2571
1850 2572 /*
1851 2573 * If the pathname is "", just use dvp, no checks.
1852 2574 * Do this outside of the rwlock (like zfs).
1853 2575 */
1854 2576 if (nmlen == 0) {
1855 2577 VN_HOLD(dvp);
1856 2578 *vpp = dvp;
1857 2579 return (0);
1858 2580 }
1859 2581
1860 2582 /* Don't allow "." or ".." through here. */
1861 2583 if ((nmlen == 1 && name[0] == '.') ||
1862 2584 (nmlen == 2 && name[0] == '.' && name[1] == '.'))
1863 2585 return (EISDIR);
1864 2586
1865 2587 /*
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
1866 2588 * We make a copy of the attributes because the caller does not
1867 2589 * expect us to change what va points to.
1868 2590 */
1869 2591 vattr = *va;
1870 2592
1871 2593 if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
1872 2594 return (EINTR);
1873 2595 smb_credinit(&scred, cr);
1874 2596
1875 2597 /*
1876 - * XXX: Do we need r_lkserlock too?
1877 - * No use of any shared fid or fctx...
1878 - */
1879 -
1880 - /*
1881 2598 * NFS needs to go over the wire, just to be sure whether the
1882 2599 * file exists or not. Using a cached result is dangerous in
1883 2600 * this case when making a decision regarding existence.
1884 2601 *
1885 2602 * The SMB protocol does NOT really need to go OTW here
1886 2603 * thanks to the expressive NTCREATE disposition values.
1887 2604 * Unfortunately, to do Unix access checks correctly,
1888 2605 * we need to know if the object already exists.
1889 2606 * When the object does not exist, we need VWRITE on
1890 2607 * the directory. Note: smbfslookup() checks VEXEC.
1891 2608 */
1892 2609 error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
1893 2610 if (error == 0) {
1894 2611 /*
1895 2612 * The file already exists. Error?
1896 2613 * NB: have a hold from smbfslookup
1897 2614 */
1898 2615 if (exclusive == EXCL) {
1899 2616 error = EEXIST;
1900 2617 VN_RELE(vp);
1901 2618 goto out;
1902 2619 }
1903 2620 /*
1904 2621 * Verify requested access.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
1905 2622 */
1906 2623 error = smbfs_access(vp, mode, 0, cr, ct);
1907 2624 if (error) {
1908 2625 VN_RELE(vp);
1909 2626 goto out;
1910 2627 }
1911 2628
1912 2629 /*
1913 2630 * Truncate (if requested).
1914 2631 */
1915 - if ((vattr.va_mask & AT_SIZE) && vattr.va_size == 0) {
2632 + if ((vattr.va_mask & AT_SIZE) && vp->v_type == VREG) {
2633 + np = VTOSMB(vp);
2634 + /*
2635 + * Check here for large file truncation by
2636 + * LF-unaware process, like ufs_create().
2637 + */
2638 + if (!(lfaware & FOFFMAX)) {
2639 + mutex_enter(&np->r_statelock);
2640 + if (np->r_size > MAXOFF32_T)
2641 + error = EOVERFLOW;
2642 + mutex_exit(&np->r_statelock);
2643 + }
2644 + if (error) {
2645 + VN_RELE(vp);
2646 + goto out;
2647 + }
1916 2648 vattr.va_mask = AT_SIZE;
1917 2649 error = smbfssetattr(vp, &vattr, 0, cr);
1918 2650 if (error) {
1919 2651 VN_RELE(vp);
1920 2652 goto out;
1921 2653 }
2654 +#ifdef SMBFS_VNEVENT
2655 + /* Existing file was truncated */
2656 + vnevent_create(vp, ct);
2657 +#endif
2658 + /* invalidate pages done in smbfssetattr() */
1922 2659 }
1923 2660 /* Success! */
1924 -#ifdef NOT_YET
1925 - vnevent_create(vp, ct);
1926 -#endif
1927 2661 *vpp = vp;
1928 2662 goto out;
1929 2663 }
1930 2664
1931 2665 /*
1932 2666 * The file did not exist. Need VWRITE in the directory.
1933 2667 */
1934 2668 error = smbfs_access(dvp, VWRITE, 0, cr, ct);
1935 2669 if (error)
1936 2670 goto out;
1937 2671
1938 2672 /*
1939 2673 * Now things get tricky. We also need to check the
1940 2674 * requested open mode against the file we may create.
1941 2675 * See comments at smbfs_access_rwx
1942 2676 */
1943 2677 error = smbfs_access_rwx(vfsp, VREG, mode, cr);
1944 2678 if (error)
1945 2679 goto out;
1946 2680
1947 2681 /*
1948 2682 * Now the code derived from Darwin,
1949 2683 * but with greater use of NT_CREATE
1950 2684 * disposition options. Much changed.
1951 2685 *
1952 2686 * Create (or open) a new child node.
1953 2687 * Note we handled "." and ".." above.
1954 2688 */
1955 2689
1956 2690 if (exclusive == EXCL)
1957 2691 disp = NTCREATEX_DISP_CREATE;
1958 2692 else {
1959 2693 /* Truncate regular files if requested. */
1960 2694 if ((va->va_type == VREG) &&
1961 2695 (va->va_mask & AT_SIZE) &&
1962 2696 (va->va_size == 0))
1963 2697 disp = NTCREATEX_DISP_OVERWRITE_IF;
1964 2698 else
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1965 2699 disp = NTCREATEX_DISP_OPEN_IF;
1966 2700 }
1967 2701 xattr = (dnp->n_flag & N_XATTR) ? 1 : 0;
1968 2702 error = smbfs_smb_create(dnp,
1969 2703 name, nmlen, xattr,
1970 2704 disp, &scred, &fid);
1971 2705 if (error)
1972 2706 goto out;
1973 2707
1974 2708 /*
1975 - * XXX: Missing some code here to deal with
1976 - * the case where we opened an existing file,
1977 - * it's size is larger than 32-bits, and we're
1978 - * setting the size from a process that's not
1979 - * aware of large file offsets. i.e.
1980 - * from the NFS3 code:
1981 - */
1982 -#if NOT_YET /* XXX */
1983 - if ((vattr.va_mask & AT_SIZE) &&
1984 - vp->v_type == VREG) {
1985 - np = VTOSMB(vp);
1986 - /*
1987 - * Check here for large file handled
1988 - * by LF-unaware process (as
1989 - * ufs_create() does)
1990 - */
1991 - if (!(lfaware & FOFFMAX)) {
1992 - mutex_enter(&np->r_statelock);
1993 - if (np->r_size > MAXOFF32_T)
1994 - error = EOVERFLOW;
1995 - mutex_exit(&np->r_statelock);
1996 - }
1997 - if (!error) {
1998 - vattr.va_mask = AT_SIZE;
1999 - error = smbfssetattr(vp,
2000 - &vattr, 0, cr);
2001 - }
2002 - }
2003 -#endif /* XXX */
2004 - /*
2005 2709 * Should use the fid to get/set the size
2006 2710 * while we have it opened here. See above.
2007 2711 */
2712 + smbfs_smb_close(fid);
2008 2713
2009 - cerror = smbfs_smb_close(smi->smi_share, fid, NULL, &scred);
2010 - if (cerror)
2011 - SMBVDEBUG("error %d closing %s\\%s\n",
2012 - cerror, dnp->n_rpath, name);
2013 -
2014 2714 /*
2015 2715 * In the open case, the name may differ a little
2016 2716 * from what we passed to create (case, etc.)
2017 2717 * so call lookup to get the (opened) name.
2018 2718 *
2019 2719 * XXX: Could avoid this extra lookup if the
2020 2720 * "createact" result from NT_CREATE says we
2021 2721 * created the object.
2022 2722 */
2023 2723 error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
2024 2724 if (error)
2025 2725 goto out;
2026 2726
2027 2727 /* update attr and directory cache */
2028 2728 smbfs_attr_touchdir(dnp);
2029 2729
2030 2730 error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
2031 2731 if (error)
2032 2732 goto out;
2033 2733
2034 - /* XXX invalidate pages if we truncated? */
2035 -
2036 2734 /* Success! */
2037 2735 *vpp = vp;
2038 2736 error = 0;
2039 2737
2040 2738 out:
2041 2739 smb_credrele(&scred);
2042 2740 smbfs_rw_exit(&dnp->r_rwlock);
2043 2741 if (name != nm)
2044 2742 smbfs_name_free(name, nmlen);
2045 2743 return (error);
2046 2744 }
2047 2745
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2048 2746 /*
2049 2747 * XXX
2050 2748 * This op should support the new FIGNORECASE flag for case-insensitive
2051 2749 * lookups, per PSARC 2007/244.
2052 2750 */
2053 2751 /* ARGSUSED */
2054 2752 static int
2055 2753 smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
2056 2754 int flags)
2057 2755 {
2058 - int error;
2059 - vnode_t *vp;
2060 - smbnode_t *np;
2061 - smbnode_t *dnp;
2062 2756 struct smb_cred scred;
2063 - /* enum smbfsstat status; */
2064 - smbmntinfo_t *smi;
2757 + vnode_t *vp = NULL;
2758 + smbnode_t *dnp = VTOSMB(dvp);
2759 + smbmntinfo_t *smi = VTOSMI(dvp);
2760 + int error;
2065 2761
2066 - smi = VTOSMI(dvp);
2067 -
2068 2762 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2069 2763 return (EPERM);
2070 2764
2071 2765 if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2072 2766 return (EIO);
2073 2767
2074 - dnp = VTOSMB(dvp);
2075 - if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2076 - return (EINTR);
2077 - smb_credinit(&scred, cr);
2078 -
2079 2768 /*
2080 2769 * Verify access to the dirctory.
2081 2770 */
2082 2771 error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
2083 2772 if (error)
2084 - goto out;
2773 + return (error);
2085 2774
2086 - /*
2087 - * NOTE: the darwin code gets the "vp" passed in so it looks
2088 - * like the "vp" has probably been "lookup"ed by the VFS layer.
2089 - * It looks like we will need to lookup the vp to check the
2090 - * caches and check if the object being deleted is a directory.
2091 - */
2775 + if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2776 + return (EINTR);
2777 + smb_credinit(&scred, cr);
2778 +
2779 + /* Lookup the file to remove. */
2092 2780 error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2093 - if (error)
2781 + if (error != 0)
2094 2782 goto out;
2095 2783
2096 - /* Never allow link/unlink directories on CIFS. */
2784 + /* Don't allow unlink of a directory. */
2097 2785 if (vp->v_type == VDIR) {
2098 - VN_RELE(vp);
2099 2786 error = EPERM;
2100 2787 goto out;
2101 2788 }
2102 2789
2103 2790 /*
2104 - * Now we have the real reference count on the vnode
2105 - * Do we have the file open?
2791 + * Do the real remove work
2106 2792 */
2107 - np = VTOSMB(vp);
2108 - mutex_enter(&np->r_statelock);
2109 - if ((vp->v_count > 1) && (np->n_fidrefs > 0)) {
2110 - /*
2111 - * NFS does a rename on remove here.
2112 - * Probably not applicable for SMB.
2113 - * Like Darwin, just return EBUSY.
2114 - *
2115 - * XXX: Todo - Use Trans2rename, and
2116 - * if that fails, ask the server to
2117 - * set the delete-on-close flag.
2118 - */
2119 - mutex_exit(&np->r_statelock);
2120 - error = EBUSY;
2121 - } else {
2122 - smbfs_attrcache_rm_locked(np);
2123 - mutex_exit(&np->r_statelock);
2793 + error = smbfsremove(dvp, vp, &scred, flags);
2794 + if (error != 0)
2795 + goto out;
2124 2796
2125 - error = smbfs_smb_delete(np, &scred, NULL, 0, 0);
2797 +#ifdef SMBFS_VNEVENT
2798 + vnevent_remove(vp, dvp, nm, ct);
2799 +#endif
2126 2800
2801 +out:
2802 + if (vp != NULL)
2803 + VN_RELE(vp);
2804 +
2805 + smb_credrele(&scred);
2806 + smbfs_rw_exit(&dnp->r_rwlock);
2807 +
2808 + return (error);
2809 +}
2810 +
2811 +/*
2812 + * smbfsremove does the real work of removing in SMBFS
2813 + * Caller has done dir access checks etc.
2814 + *
2815 + * The normal way to delete a file over SMB is open it (with DELETE access),
2816 + * set the "delete-on-close" flag, and close the file. The problem for Unix
2817 + * applications is that they expect the file name to be gone once the unlink
2818 + * completes, and the SMB server does not actually delete the file until ALL
2819 + * opens of that file are closed. We can't assume our open handles are the
2820 + * only open handles on a file we're deleting, so to be safe we'll try to
2821 + * rename the file to a temporary name and then set delete-on-close. If we
2822 + * fail to set delete-on-close (i.e. because other opens prevent it) then
2823 + * undo the changes we made and give up with EBUSY. Note that we might have
2824 + * permission to delete a file but lack permission to rename, so we want to
2825 + * continue in cases where rename fails. As an optimization, only do the
2826 + * rename when we have the file open.
2827 + *
2828 + * This is similar to what NFS does when deleting a file that has local opens,
2829 + * but thanks to SMB delete-on-close, we don't need to keep track of when the
2830 + * last local open goes away and send a delete. The server does that for us.
2831 + */
2832 +/* ARGSUSED */
2833 +static int
2834 +smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
2835 + int flags)
2836 +{
2837 + smbnode_t *dnp = VTOSMB(dvp);
2838 + smbnode_t *np = VTOSMB(vp);
2839 + smbmntinfo_t *smi = np->n_mount;
2840 + char *tmpname = NULL;
2841 + int tnlen;
2842 + int error;
2843 + smb_fh_t *fid = NULL;
2844 + boolean_t renamed = B_FALSE;
2845 +
2846 + /*
2847 + * The dvp RWlock must be held as writer.
2848 + */
2849 + ASSERT(dnp->r_rwlock.owner == curthread);
2850 +
2851 + /*
2852 + * We need to flush any dirty pages which happen to
2853 + * be hanging around before removing the file. This
2854 + * shouldn't happen very often and mostly on file
2855 + * systems mounted "nocto".
2856 + */
2857 + if (vn_has_cached_data(vp) &&
2858 + ((np->r_flags & RDIRTY) || np->r_count > 0)) {
2859 + error = smbfs_putpage(vp, (offset_t)0, 0, 0,
2860 + scred->scr_cred, NULL);
2861 + if (error && (error == ENOSPC || error == EDQUOT)) {
2862 + mutex_enter(&np->r_statelock);
2863 + if (!np->r_error)
2864 + np->r_error = error;
2865 + mutex_exit(&np->r_statelock);
2866 + }
2867 + }
2868 +
2869 + /*
2870 + * Get a file handle with delete access.
2871 + * Close this FID before return.
2872 + */
2873 + error = smbfs_smb_tmpopen(np, STD_RIGHT_DELETE_ACCESS,
2874 + scred, &fid);
2875 + if (error) {
2876 + SMBVDEBUG("error %d opening %s\n",
2877 + error, np->n_rpath);
2878 + goto out;
2879 + }
2880 + ASSERT(fid != NULL);
2881 +
2882 + /*
2883 + * If we have the file open, try to rename it to a temporary name.
2884 + * If we can't rename, continue on and try setting DoC anyway.
2885 + * Unnecessary for directories.
2886 + */
2887 + if (vp->v_type != VDIR && vp->v_count > 1 && np->n_fidrefs > 0) {
2888 + tmpname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
2889 + tnlen = smbfs_newname(tmpname, MAXNAMELEN);
2890 + error = smbfs_smb_rename(dnp, np, dnp, tmpname, tnlen,
2891 + fid, scred);
2892 + if (error != 0) {
2893 + SMBVDEBUG("error %d renaming %s -> %s\n",
2894 + error, np->n_rpath, tmpname);
2895 + /* Keep going without the rename. */
2896 + } else {
2897 + renamed = B_TRUE;
2898 + }
2899 + }
2900 +
2901 + /*
2902 + * Mark the file as delete-on-close. If we can't,
2903 + * undo what we did and err out.
2904 + */
2905 + error = smbfs_smb_setdisp(smi->smi_share, fid, 1, scred);
2906 + if (error != 0) {
2907 + SMBVDEBUG("error %d setting DoC on %s\n",
2908 + error, np->n_rpath);
2127 2909 /*
2128 - * If the file should no longer exist, discard
2129 - * any cached attributes under this node.
2910 + * Failed to set DoC. If we renamed, undo that.
2911 + * Need np->n_rpath relative to parent (dnp).
2912 + * Use parent path name length plus one for
2913 + * the separator ('/' or ':')
2130 2914 */
2131 - switch (error) {
2132 - case 0:
2133 - case ENOENT:
2134 - case ENOTDIR:
2135 - smbfs_attrcache_prune(np);
2136 - break;
2915 + if (renamed) {
2916 + char *oldname;
2917 + int oldnlen;
2918 + int err2;
2919 +
2920 + oldname = np->n_rpath + (dnp->n_rplen + 1);
2921 + oldnlen = np->n_rplen - (dnp->n_rplen + 1);
2922 + err2 = smbfs_smb_rename(dnp, np, dnp, oldname, oldnlen,
2923 + fid, scred);
2924 + SMBVDEBUG("error %d un-renaming %s -> %s\n",
2925 + err2, tmpname, np->n_rpath);
2137 2926 }
2927 + error = EBUSY;
2928 + goto out;
2138 2929 }
2930 + /* Done! */
2931 + smbfs_attrcache_remove(np);
2932 + smbfs_attrcache_prune(np);
2139 2933
2140 - VN_RELE(vp);
2141 -
2142 2934 out:
2143 - smb_credrele(&scred);
2144 - smbfs_rw_exit(&dnp->r_rwlock);
2935 + if (tmpname != NULL)
2936 + kmem_free(tmpname, MAXNAMELEN);
2937 + if (fid != NULL)
2938 + smbfs_smb_tmpclose(np, fid);
2145 2939
2940 + if (error == 0) {
2941 + /* Keep lookup from finding this node anymore. */
2942 + smbfs_rmhash(np);
2943 + }
2944 +
2146 2945 return (error);
2147 2946 }
2148 2947
2149 2948
2949 +/* ARGSUSED */
2950 +static int
2951 +smbfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2952 + caller_context_t *ct, int flags)
2953 +{
2954 + /* Not yet... */
2955 + return (ENOSYS);
2956 +}
2957 +
2958 +
2150 2959 /*
2151 2960 * XXX
2152 2961 * This op should support the new FIGNORECASE flag for case-insensitive
2153 2962 * lookups, per PSARC 2007/244.
2154 2963 */
2155 2964 /* ARGSUSED */
2156 2965 static int
2157 2966 smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2158 2967 caller_context_t *ct, int flags)
2159 2968 {
2160 - /* vnode_t *realvp; */
2969 + struct smb_cred scred;
2970 + smbnode_t *odnp = VTOSMB(odvp);
2971 + smbnode_t *ndnp = VTOSMB(ndvp);
2972 + vnode_t *ovp;
2973 + int error;
2161 2974
2162 2975 if (curproc->p_zone != VTOSMI(odvp)->smi_zone_ref.zref_zone ||
2163 2976 curproc->p_zone != VTOSMI(ndvp)->smi_zone_ref.zref_zone)
2164 2977 return (EPERM);
2165 2978
2166 2979 if (VTOSMI(odvp)->smi_flags & SMI_DEAD ||
2167 2980 VTOSMI(ndvp)->smi_flags & SMI_DEAD ||
2168 2981 odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED ||
2169 2982 ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2170 2983 return (EIO);
2171 2984
2172 - return (smbfsrename(odvp, onm, ndvp, nnm, cr, ct));
2173 -}
2174 -
2175 -/*
2176 - * smbfsrename does the real work of renaming in SMBFS
2177 - */
2178 -/* ARGSUSED */
2179 -static int
2180 -smbfsrename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2181 - caller_context_t *ct)
2182 -{
2183 - int error;
2184 - int nvp_locked = 0;
2185 - vnode_t *nvp = NULL;
2186 - vnode_t *ovp = NULL;
2187 - smbnode_t *onp;
2188 - smbnode_t *nnp;
2189 - smbnode_t *odnp;
2190 - smbnode_t *ndnp;
2191 - struct smb_cred scred;
2192 - /* enum smbfsstat status; */
2193 -
2194 - ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone);
2195 -
2196 2985 if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2197 2986 strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
2198 2987 return (EINVAL);
2199 2988
2200 2989 /*
2201 2990 * Check that everything is on the same filesystem.
2202 2991 * vn_rename checks the fsid's, but in case we don't
2203 2992 * fill those in correctly, check here too.
2204 2993 */
2205 2994 if (odvp->v_vfsp != ndvp->v_vfsp)
2206 2995 return (EXDEV);
2207 2996
2208 - odnp = VTOSMB(odvp);
2209 - ndnp = VTOSMB(ndvp);
2997 + /*
2998 + * Need write access on source and target.
2999 + * Server takes care of most checks.
3000 + */
3001 + error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct);
3002 + if (error)
3003 + return (error);
3004 + if (odvp != ndvp) {
3005 + error = smbfs_access(ndvp, VWRITE, 0, cr, ct);
3006 + if (error)
3007 + return (error);
3008 + }
2210 3009
2211 3010 /*
3011 + * Need to lock both old/new dirs as writer.
3012 + *
2212 3013 * Avoid deadlock here on old vs new directory nodes
2213 3014 * by always taking the locks in order of address.
2214 3015 * The order is arbitrary, but must be consistent.
2215 3016 */
2216 3017 if (odnp < ndnp) {
2217 3018 if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
2218 3019 SMBINTR(odvp)))
2219 3020 return (EINTR);
2220 3021 if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
2221 3022 SMBINTR(ndvp))) {
2222 3023 smbfs_rw_exit(&odnp->r_rwlock);
2223 3024 return (EINTR);
2224 3025 }
2225 3026 } else {
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
2226 3027 if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
2227 3028 SMBINTR(ndvp)))
2228 3029 return (EINTR);
2229 3030 if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
2230 3031 SMBINTR(odvp))) {
2231 3032 smbfs_rw_exit(&ndnp->r_rwlock);
2232 3033 return (EINTR);
2233 3034 }
2234 3035 }
2235 3036 smb_credinit(&scred, cr);
2236 - /*
2237 - * No returns after this point (goto out)
2238 - */
2239 3037
2240 - /*
2241 - * Need write access on source and target.
2242 - * Server takes care of most checks.
2243 - */
2244 - error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct);
2245 - if (error)
2246 - goto out;
2247 - if (odvp != ndvp) {
2248 - error = smbfs_access(ndvp, VWRITE, 0, cr, ct);
2249 - if (error)
2250 - goto out;
3038 + /* Lookup the "old" name */
3039 + error = smbfslookup(odvp, onm, &ovp, cr, 0, ct);
3040 + if (error == 0) {
3041 + /*
3042 + * Do the real rename work
3043 + */
3044 + error = smbfsrename(odvp, ovp, ndvp, nnm, &scred, flags);
3045 + VN_RELE(ovp);
2251 3046 }
2252 3047
2253 - /*
2254 - * Lookup the source name. Must already exist.
2255 - */
2256 - error = smbfslookup(odvp, onm, &ovp, cr, 0, ct);
2257 - if (error)
2258 - goto out;
3048 + smb_credrele(&scred);
3049 + smbfs_rw_exit(&odnp->r_rwlock);
3050 + smbfs_rw_exit(&ndnp->r_rwlock);
2259 3051
3052 + return (error);
3053 +}
3054 +
3055 +/*
3056 + * smbfsrename does the real work of renaming in SMBFS
3057 + * Caller has done dir access checks etc.
3058 + */
3059 +/* ARGSUSED */
3060 +static int
3061 +smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, char *nnm,
3062 + struct smb_cred *scred, int flags)
3063 +{
3064 + smbnode_t *odnp = VTOSMB(odvp);
3065 + smbnode_t *onp = VTOSMB(ovp);
3066 + smbnode_t *ndnp = VTOSMB(ndvp);
3067 + vnode_t *nvp = NULL;
3068 + int error;
3069 + int nvp_locked = 0;
3070 + smb_fh_t *fid = NULL;
3071 +
3072 + /* Things our caller should have checked. */
3073 + ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone);
3074 + ASSERT(odvp->v_vfsp == ndvp->v_vfsp);
3075 + ASSERT(odnp->r_rwlock.owner == curthread);
3076 + ASSERT(ndnp->r_rwlock.owner == curthread);
3077 +
2260 3078 /*
2261 3079 * Lookup the target file. If it exists, it needs to be
2262 3080 * checked to see whether it is a mount point and whether
2263 3081 * it is active (open).
2264 3082 */
2265 - error = smbfslookup(ndvp, nnm, &nvp, cr, 0, ct);
3083 + error = smbfslookup(ndvp, nnm, &nvp, scred->scr_cred, 0, NULL);
2266 3084 if (!error) {
2267 3085 /*
2268 3086 * Target (nvp) already exists. Check that it
2269 3087 * has the same type as the source. The server
2270 3088 * will check this also, (and more reliably) but
2271 3089 * this lets us return the correct error codes.
2272 3090 */
2273 3091 if (ovp->v_type == VDIR) {
2274 3092 if (nvp->v_type != VDIR) {
2275 3093 error = ENOTDIR;
2276 3094 goto out;
2277 3095 }
2278 3096 } else {
2279 3097 if (nvp->v_type == VDIR) {
2280 3098 error = EISDIR;
2281 3099 goto out;
2282 3100 }
2283 3101 }
2284 3102
2285 3103 /*
2286 3104 * POSIX dictates that when the source and target
2287 3105 * entries refer to the same file object, rename
2288 3106 * must do nothing and exit without error.
2289 3107 */
2290 3108 if (ovp == nvp) {
2291 3109 error = 0;
2292 3110 goto out;
2293 3111 }
2294 3112
2295 3113 /*
2296 3114 * Also must ensure the target is not a mount point,
2297 3115 * and keep mount/umount away until we're done.
2298 3116 */
2299 3117 if (vn_vfsrlock(nvp)) {
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
2300 3118 error = EBUSY;
2301 3119 goto out;
2302 3120 }
2303 3121 nvp_locked = 1;
2304 3122 if (vn_mountedvfs(nvp) != NULL) {
2305 3123 error = EBUSY;
2306 3124 goto out;
2307 3125 }
2308 3126
2309 3127 /*
2310 - * CIFS gives a SHARING_VIOLATION error when
3128 + * CIFS may give a SHARING_VIOLATION error when
2311 3129 * trying to rename onto an exising object,
2312 3130 * so try to remove the target first.
2313 3131 * (Only for files, not directories.)
2314 3132 */
2315 3133 if (nvp->v_type == VDIR) {
2316 3134 error = EEXIST;
2317 3135 goto out;
2318 3136 }
2319 -
2320 - /*
2321 - * Nodes that are "not active" here have v_count=2
2322 - * because vn_renameat (our caller) did a lookup on
2323 - * both the source and target before this call.
2324 - * Otherwise this similar to smbfs_remove.
2325 - */
2326 - nnp = VTOSMB(nvp);
2327 - mutex_enter(&nnp->r_statelock);
2328 - if ((nvp->v_count > 2) && (nnp->n_fidrefs > 0)) {
2329 - /*
2330 - * The target file exists, is not the same as
2331 - * the source file, and is active. Other FS
2332 - * implementations unlink the target here.
2333 - * For SMB, we don't assume we can remove an
2334 - * open file. Return an error instead.
2335 - */
2336 - mutex_exit(&nnp->r_statelock);
2337 - error = EBUSY;
3137 + error = smbfsremove(ndvp, nvp, scred, flags);
3138 + if (error != 0)
2338 3139 goto out;
2339 - }
2340 3140
2341 3141 /*
2342 - * Target file is not active. Try to remove it.
2343 - */
2344 - smbfs_attrcache_rm_locked(nnp);
2345 - mutex_exit(&nnp->r_statelock);
2346 -
2347 - error = smbfs_smb_delete(nnp, &scred, NULL, 0, 0);
2348 -
2349 - /*
2350 - * Similar to smbfs_remove
2351 - */
2352 - switch (error) {
2353 - case 0:
2354 - case ENOENT:
2355 - case ENOTDIR:
2356 - smbfs_attrcache_prune(nnp);
2357 - break;
2358 - }
2359 -
2360 - if (error)
2361 - goto out;
2362 - /*
2363 3142 * OK, removed the target file. Continue as if
2364 3143 * lookup target had failed (nvp == NULL).
2365 3144 */
2366 3145 vn_vfsunlock(nvp);
2367 3146 nvp_locked = 0;
2368 3147 VN_RELE(nvp);
2369 3148 nvp = NULL;
2370 3149 } /* nvp */
2371 3150
2372 - onp = VTOSMB(ovp);
3151 + /*
3152 + * Get a file handle with delete access.
3153 + * Close this FID before return.
3154 + */
3155 + error = smbfs_smb_tmpopen(onp, STD_RIGHT_DELETE_ACCESS,
3156 + scred, &fid);
3157 + if (error) {
3158 + SMBVDEBUG("error %d opening %s\n",
3159 + error, onp->n_rpath);
3160 + goto out;
3161 + }
3162 +
2373 3163 smbfs_attrcache_remove(onp);
3164 + error = smbfs_smb_rename(odnp, onp, ndnp, nnm, strlen(nnm),
3165 + fid, scred);
2374 3166
2375 - error = smbfs_smb_rename(onp, ndnp, nnm, strlen(nnm), &scred);
3167 + smbfs_smb_tmpclose(onp, fid);
2376 3168
2377 3169 /*
2378 3170 * If the old name should no longer exist,
2379 3171 * discard any cached attributes under it.
2380 3172 */
2381 - if (error == 0)
3173 + if (error == 0) {
2382 3174 smbfs_attrcache_prune(onp);
3175 + /* SMBFS_VNEVENT... */
3176 + }
2383 3177
2384 3178 out:
2385 3179 if (nvp) {
2386 3180 if (nvp_locked)
2387 3181 vn_vfsunlock(nvp);
2388 3182 VN_RELE(nvp);
2389 3183 }
2390 - if (ovp)
2391 - VN_RELE(ovp);
2392 3184
2393 - smb_credrele(&scred);
2394 - smbfs_rw_exit(&odnp->r_rwlock);
2395 - smbfs_rw_exit(&ndnp->r_rwlock);
2396 -
2397 3185 return (error);
2398 3186 }
2399 3187
2400 3188 /*
2401 3189 * XXX
2402 3190 * vsecattr_t is new to build 77, and we need to eventually support
2403 3191 * it in order to create an ACL when an object is created.
2404 3192 *
2405 3193 * This op should support the new FIGNORECASE flag for case-insensitive
2406 3194 * lookups, per PSARC 2007/244.
2407 3195 */
2408 3196 /* ARGSUSED */
2409 3197 static int
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2410 3198 smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp,
2411 3199 cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
2412 3200 {
2413 3201 vnode_t *vp;
2414 3202 struct smbnode *dnp = VTOSMB(dvp);
2415 3203 struct smbmntinfo *smi = VTOSMI(dvp);
2416 3204 struct smb_cred scred;
2417 3205 struct smbfattr fattr;
2418 3206 const char *name = (const char *) nm;
2419 3207 int nmlen = strlen(name);
2420 - int error, hiderr;
3208 + int error;
2421 3209
2422 3210 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2423 3211 return (EPERM);
2424 3212
2425 3213 if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2426 3214 return (EIO);
2427 3215
2428 3216 if ((nmlen == 1 && name[0] == '.') ||
2429 3217 (nmlen == 2 && name[0] == '.' && name[1] == '.'))
2430 3218 return (EEXIST);
2431 3219
2432 3220 /* Only plain files are allowed in V_XATTRDIR. */
2433 3221 if (dvp->v_flag & V_XATTRDIR)
2434 3222 return (EINVAL);
2435 3223
2436 3224 if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2437 3225 return (EINTR);
2438 3226 smb_credinit(&scred, cr);
2439 3227
2440 3228 /*
2441 - * XXX: Do we need r_lkserlock too?
2442 - * No use of any shared fid or fctx...
2443 - */
2444 -
2445 - /*
2446 3229 * Require write access in the containing directory.
2447 3230 */
2448 3231 error = smbfs_access(dvp, VWRITE, 0, cr, ct);
2449 3232 if (error)
2450 3233 goto out;
2451 3234
2452 3235 error = smbfs_smb_mkdir(dnp, name, nmlen, &scred);
2453 3236 if (error)
2454 3237 goto out;
2455 3238
2456 3239 error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
2457 3240 if (error)
2458 3241 goto out;
2459 3242
2460 3243 smbfs_attr_touchdir(dnp);
2461 3244
2462 3245 error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
2463 3246 if (error)
2464 3247 goto out;
2465 3248
2466 - if (name[0] == '.')
2467 - if ((hiderr = smbfs_smb_hideit(VTOSMB(vp), NULL, 0, &scred)))
2468 - SMBVDEBUG("hide failure %d\n", hiderr);
2469 -
2470 3249 /* Success! */
2471 3250 *vpp = vp;
2472 3251 error = 0;
2473 3252 out:
2474 3253 smb_credrele(&scred);
2475 3254 smbfs_rw_exit(&dnp->r_rwlock);
2476 3255
2477 3256 if (name != nm)
2478 3257 smbfs_name_free(name, nmlen);
2479 3258
2480 3259 return (error);
2481 3260 }
2482 3261
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2483 3262 /*
2484 3263 * XXX
2485 3264 * This op should support the new FIGNORECASE flag for case-insensitive
2486 3265 * lookups, per PSARC 2007/244.
2487 3266 */
2488 3267 /* ARGSUSED */
2489 3268 static int
2490 3269 smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
2491 3270 caller_context_t *ct, int flags)
2492 3271 {
3272 + struct smb_cred scred;
2493 3273 vnode_t *vp = NULL;
2494 3274 int vp_locked = 0;
2495 3275 struct smbmntinfo *smi = VTOSMI(dvp);
2496 3276 struct smbnode *dnp = VTOSMB(dvp);
2497 3277 struct smbnode *np;
2498 - struct smb_cred scred;
2499 3278 int error;
2500 3279
2501 3280 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2502 3281 return (EPERM);
2503 3282
2504 3283 if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2505 3284 return (EIO);
2506 3285
3286 + /*
3287 + * Verify access to the dirctory.
3288 + */
3289 + error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
3290 + if (error)
3291 + return (error);
3292 +
2507 3293 if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2508 3294 return (EINTR);
2509 3295 smb_credinit(&scred, cr);
2510 3296
2511 3297 /*
2512 - * Require w/x access in the containing directory.
2513 - * Server handles all other access checks.
2514 - */
2515 - error = smbfs_access(dvp, VEXEC|VWRITE, 0, cr, ct);
2516 - if (error)
2517 - goto out;
2518 -
2519 - /*
2520 3298 * First lookup the entry to be removed.
2521 3299 */
2522 3300 error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2523 3301 if (error)
2524 3302 goto out;
2525 3303 np = VTOSMB(vp);
2526 3304
2527 3305 /*
2528 3306 * Disallow rmdir of "." or current dir, or the FS root.
2529 3307 * Also make sure it's a directory, not a mount point,
2530 3308 * and lock to keep mount/umount away until we're done.
2531 3309 */
2532 3310 if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) {
2533 3311 error = EINVAL;
2534 3312 goto out;
2535 3313 }
2536 3314 if (vp->v_type != VDIR) {
2537 3315 error = ENOTDIR;
2538 3316 goto out;
2539 3317 }
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2540 3318 if (vn_vfsrlock(vp)) {
2541 3319 error = EBUSY;
2542 3320 goto out;
2543 3321 }
2544 3322 vp_locked = 1;
2545 3323 if (vn_mountedvfs(vp) != NULL) {
2546 3324 error = EBUSY;
2547 3325 goto out;
2548 3326 }
2549 3327
2550 - smbfs_attrcache_remove(np);
2551 - error = smbfs_smb_rmdir(np, &scred);
2552 -
2553 3328 /*
2554 - * Similar to smbfs_remove
3329 + * Do the real rmdir work
2555 3330 */
2556 - switch (error) {
2557 - case 0:
2558 - case ENOENT:
2559 - case ENOTDIR:
2560 - smbfs_attrcache_prune(np);
2561 - break;
2562 - }
2563 -
3331 + error = smbfsremove(dvp, vp, &scred, flags);
2564 3332 if (error)
2565 3333 goto out;
2566 3334
3335 +#ifdef SMBFS_VNEVENT
3336 + vnevent_rmdir(vp, dvp, nm, ct);
3337 +#endif
3338 +
2567 3339 mutex_enter(&np->r_statelock);
2568 3340 dnp->n_flag |= NMODIFIED;
2569 3341 mutex_exit(&np->r_statelock);
2570 3342 smbfs_attr_touchdir(dnp);
2571 3343 smbfs_rmhash(np);
2572 3344
2573 3345 out:
2574 3346 if (vp) {
2575 3347 if (vp_locked)
2576 3348 vn_vfsunlock(vp);
2577 3349 VN_RELE(vp);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
2578 3350 }
2579 3351 smb_credrele(&scred);
2580 3352 smbfs_rw_exit(&dnp->r_rwlock);
2581 3353
2582 3354 return (error);
2583 3355 }
2584 3356
2585 3357
2586 3358 /* ARGSUSED */
2587 3359 static int
3360 +smbfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
3361 + caller_context_t *ct, int flags)
3362 +{
3363 + /* Not yet... */
3364 + return (ENOSYS);
3365 +}
3366 +
3367 +
3368 +/* ARGSUSED */
3369 +static int
2588 3370 smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
2589 3371 caller_context_t *ct, int flags)
2590 3372 {
2591 3373 struct smbnode *np = VTOSMB(vp);
2592 3374 int error = 0;
2593 3375 smbmntinfo_t *smi;
2594 3376
2595 3377 smi = VTOSMI(vp);
2596 3378
2597 3379 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2598 3380 return (EIO);
2599 3381
2600 3382 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2601 3383 return (EIO);
2602 3384
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2603 3385 /*
2604 3386 * Require read access in the directory.
2605 3387 */
2606 3388 error = smbfs_access(vp, VREAD, 0, cr, ct);
2607 3389 if (error)
2608 3390 return (error);
2609 3391
2610 3392 ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
2611 3393
2612 3394 /*
2613 - * XXX: Todo readdir cache here
2614 - * Note: NFS code is just below this.
3395 + * Todo readdir cache here
2615 3396 *
2616 3397 * I am serializing the entire readdir opreation
2617 3398 * now since we have not yet implemented readdir
2618 3399 * cache. This fix needs to be revisited once
2619 3400 * we implement readdir cache.
2620 3401 */
2621 3402 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
2622 3403 return (EINTR);
2623 3404
2624 3405 error = smbfs_readvdir(vp, uiop, cr, eofp, ct);
2625 3406
2626 3407 smbfs_rw_exit(&np->r_lkserlock);
2627 3408
2628 3409 return (error);
2629 3410 }
2630 3411
2631 3412 /* ARGSUSED */
2632 3413 static int
2633 3414 smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
2634 3415 caller_context_t *ct)
2635 3416 {
2636 3417 /*
2637 3418 * Note: "limit" tells the SMB-level FindFirst/FindNext
2638 3419 * functions how many directory entries to request in
2639 3420 * each OtW call. It needs to be large enough so that
2640 3421 * we don't make lots of tiny OtW requests, but there's
2641 3422 * no point making it larger than the maximum number of
2642 3423 * OtW entries that would fit in a maximum sized trans2
2643 3424 * response (64k / 48). Beyond that, it's just tuning.
2644 3425 * WinNT used 512, Win2k used 1366. We use 1000.
2645 3426 */
2646 3427 static const int limit = 1000;
2647 3428 /* Largest possible dirent size. */
2648 3429 static const size_t dbufsiz = DIRENT64_RECLEN(SMB_MAXFNAMELEN);
2649 3430 struct smb_cred scred;
2650 3431 vnode_t *newvp;
2651 3432 struct smbnode *np = VTOSMB(vp);
2652 3433 struct smbfs_fctx *ctx;
2653 3434 struct dirent64 *dp;
2654 3435 ssize_t save_resid;
2655 3436 offset_t save_offset; /* 64 bits */
2656 3437 int offset; /* yes, 32 bits */
2657 3438 int nmlen, error;
2658 3439 ushort_t reclen;
2659 3440
2660 3441 ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
2661 3442
2662 3443 /* Make sure we serialize for n_dirseq use. */
2663 3444 ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
2664 3445
2665 3446 /*
2666 3447 * Make sure smbfs_open filled in n_dirseq
2667 3448 */
2668 3449 if (np->n_dirseq == NULL)
2669 3450 return (EBADF);
2670 3451
2671 3452 /* Check for overflow of (32-bit) directory offset. */
2672 3453 if (uio->uio_loffset < 0 || uio->uio_loffset > INT32_MAX ||
2673 3454 (uio->uio_loffset + uio->uio_resid) > INT32_MAX)
2674 3455 return (EINVAL);
2675 3456
2676 3457 /* Require space for at least one dirent. */
2677 3458 if (uio->uio_resid < dbufsiz)
2678 3459 return (EINVAL);
2679 3460
2680 3461 SMBVDEBUG("dirname='%s'\n", np->n_rpath);
2681 3462 smb_credinit(&scred, cr);
2682 3463 dp = kmem_alloc(dbufsiz, KM_SLEEP);
2683 3464
2684 3465 save_resid = uio->uio_resid;
2685 3466 save_offset = uio->uio_loffset;
2686 3467 offset = uio->uio_offset;
2687 3468 SMBVDEBUG("in: offset=%d, resid=%d\n",
2688 3469 (int)uio->uio_offset, (int)uio->uio_resid);
2689 3470 error = 0;
2690 3471
2691 3472 /*
2692 3473 * Generate the "." and ".." entries here so we can
2693 3474 * (1) make sure they appear (but only once), and
2694 3475 * (2) deal with getting their I numbers which the
2695 3476 * findnext below does only for normal names.
2696 3477 */
2697 3478 while (offset < FIRST_DIROFS) {
2698 3479 /*
2699 3480 * Tricky bit filling in the first two:
2700 3481 * offset 0 is ".", offset 1 is ".."
2701 3482 * so strlen of these is offset+1.
2702 3483 */
2703 3484 reclen = DIRENT64_RECLEN(offset + 1);
2704 3485 if (uio->uio_resid < reclen)
2705 3486 goto out;
2706 3487 bzero(dp, reclen);
2707 3488 dp->d_reclen = reclen;
2708 3489 dp->d_name[0] = '.';
2709 3490 dp->d_name[1] = '.';
2710 3491 dp->d_name[offset + 1] = '\0';
2711 3492 /*
2712 3493 * Want the real I-numbers for the "." and ".."
2713 3494 * entries. For these two names, we know that
2714 3495 * smbfslookup can get the nodes efficiently.
2715 3496 */
2716 3497 error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct);
2717 3498 if (error) {
2718 3499 dp->d_ino = np->n_ino + offset; /* fiction */
2719 3500 } else {
2720 3501 dp->d_ino = VTOSMB(newvp)->n_ino;
2721 3502 VN_RELE(newvp);
2722 3503 }
2723 3504 /*
2724 3505 * Note: d_off is the offset that a user-level program
2725 3506 * should seek to for reading the NEXT directory entry.
2726 3507 * See libc: readdir, telldir, seekdir
2727 3508 */
2728 3509 dp->d_off = offset + 1;
2729 3510 error = uiomove(dp, reclen, UIO_READ, uio);
2730 3511 if (error)
2731 3512 goto out;
2732 3513 /*
2733 3514 * Note: uiomove updates uio->uio_offset,
2734 3515 * but we want it to be our "cookie" value,
2735 3516 * which just counts dirents ignoring size.
2736 3517 */
2737 3518 uio->uio_offset = ++offset;
2738 3519 }
2739 3520
2740 3521 /*
2741 3522 * If there was a backward seek, we have to reopen.
2742 3523 */
2743 3524 if (offset < np->n_dirofs) {
2744 3525 SMBVDEBUG("Reopening search %d:%d\n",
2745 3526 offset, np->n_dirofs);
2746 3527 error = smbfs_smb_findopen(np, "*", 1,
2747 3528 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
2748 3529 &scred, &ctx);
2749 3530 if (error) {
2750 3531 SMBVDEBUG("can not open search, error = %d", error);
2751 3532 goto out;
2752 3533 }
2753 3534 /* free the old one */
2754 3535 (void) smbfs_smb_findclose(np->n_dirseq, &scred);
2755 3536 /* save the new one */
2756 3537 np->n_dirseq = ctx;
2757 3538 np->n_dirofs = FIRST_DIROFS;
2758 3539 } else {
2759 3540 ctx = np->n_dirseq;
2760 3541 }
2761 3542
2762 3543 /*
2763 3544 * Skip entries before the requested offset.
2764 3545 */
2765 3546 while (np->n_dirofs < offset) {
2766 3547 error = smbfs_smb_findnext(ctx, limit, &scred);
2767 3548 if (error != 0)
2768 3549 goto out;
2769 3550 np->n_dirofs++;
2770 3551 }
2771 3552
2772 3553 /*
2773 3554 * While there's room in the caller's buffer:
2774 3555 * get a directory entry from SMB,
2775 3556 * convert to a dirent, copyout.
2776 3557 * We stop when there is no longer room for a
2777 3558 * maximum sized dirent because we must decide
2778 3559 * before we know anything about the next entry.
2779 3560 */
2780 3561 while (uio->uio_resid >= dbufsiz) {
2781 3562 error = smbfs_smb_findnext(ctx, limit, &scred);
2782 3563 if (error != 0)
2783 3564 goto out;
2784 3565 np->n_dirofs++;
2785 3566
2786 3567 /* Sanity check the name length. */
2787 3568 nmlen = ctx->f_nmlen;
2788 3569 if (nmlen > SMB_MAXFNAMELEN) {
2789 3570 nmlen = SMB_MAXFNAMELEN;
2790 3571 SMBVDEBUG("Truncating name: %s\n", ctx->f_name);
2791 3572 }
2792 3573 if (smbfs_fastlookup) {
2793 3574 /* See comment at smbfs_fastlookup above. */
2794 3575 if (smbfs_nget(vp, ctx->f_name, nmlen,
2795 3576 &ctx->f_attr, &newvp) == 0)
2796 3577 VN_RELE(newvp);
2797 3578 }
2798 3579
2799 3580 reclen = DIRENT64_RECLEN(nmlen);
2800 3581 bzero(dp, reclen);
2801 3582 dp->d_reclen = reclen;
2802 3583 bcopy(ctx->f_name, dp->d_name, nmlen);
2803 3584 dp->d_name[nmlen] = '\0';
2804 3585 dp->d_ino = ctx->f_inum;
2805 3586 dp->d_off = offset + 1; /* See d_off comment above */
2806 3587 error = uiomove(dp, reclen, UIO_READ, uio);
2807 3588 if (error)
2808 3589 goto out;
2809 3590 /* See comment re. uio_offset above. */
2810 3591 uio->uio_offset = ++offset;
2811 3592 }
2812 3593
2813 3594 out:
2814 3595 /*
2815 3596 * When we come to the end of a directory, the
2816 3597 * SMB-level functions return ENOENT, but the
2817 3598 * caller is not expecting an error return.
2818 3599 *
2819 3600 * Also note that we must delay the call to
2820 3601 * smbfs_smb_findclose(np->n_dirseq, ...)
2821 3602 * until smbfs_close so that all reads at the
2822 3603 * end of the directory will return no data.
2823 3604 */
2824 3605 if (error == ENOENT) {
2825 3606 error = 0;
2826 3607 if (eofp)
2827 3608 *eofp = 1;
2828 3609 }
2829 3610 /*
2830 3611 * If we encountered an error (i.e. "access denied")
2831 3612 * from the FindFirst call, we will have copied out
2832 3613 * the "." and ".." entries leaving offset == 2.
2833 3614 * In that case, restore the original offset/resid
2834 3615 * so the caller gets no data with the error.
2835 3616 */
2836 3617 if (error != 0 && offset == FIRST_DIROFS) {
2837 3618 uio->uio_loffset = save_offset;
|
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
2838 3619 uio->uio_resid = save_resid;
2839 3620 }
2840 3621 SMBVDEBUG("out: offset=%d, resid=%d\n",
2841 3622 (int)uio->uio_offset, (int)uio->uio_resid);
2842 3623
2843 3624 kmem_free(dp, dbufsiz);
2844 3625 smb_credrele(&scred);
2845 3626 return (error);
2846 3627 }
2847 3628
3629 +/*
3630 + * Here NFS has: nfs3_bio
3631 + * See smbfs_bio above.
3632 + */
2848 3633
3634 +/* ARGSUSED */
3635 +static int
3636 +smbfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3637 +{
3638 + return (ENOSYS);
3639 +}
3640 +
3641 +
2849 3642 /*
2850 3643 * The pair of functions VOP_RWLOCK, VOP_RWUNLOCK
2851 3644 * are optional functions that are called by:
2852 3645 * getdents, before/after VOP_READDIR
2853 3646 * pread, before/after ... VOP_READ
2854 3647 * pwrite, before/after ... VOP_WRITE
2855 3648 * (other places)
2856 3649 *
2857 3650 * Careful here: None of the above check for any
2858 3651 * error returns from VOP_RWLOCK / VOP_RWUNLOCK!
2859 3652 * In fact, the return value from _rwlock is NOT
2860 3653 * an error code, but V_WRITELOCK_TRUE / _FALSE.
2861 3654 *
2862 3655 * Therefore, it's up to _this_ code to make sure
2863 3656 * the lock state remains balanced, which means
2864 3657 * we can't "bail out" on interrupts, etc.
2865 3658 */
2866 3659
2867 3660 /* ARGSUSED2 */
2868 3661 static int
2869 3662 smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
2870 3663 {
2871 3664 smbnode_t *np = VTOSMB(vp);
2872 3665
2873 3666 if (!write_lock) {
2874 3667 (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE);
2875 3668 return (V_WRITELOCK_FALSE);
2876 3669 }
2877 3670
2878 3671
2879 3672 (void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE);
2880 3673 return (V_WRITELOCK_TRUE);
2881 3674 }
2882 3675
2883 3676 /* ARGSUSED */
2884 3677 static void
2885 3678 smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
2886 3679 {
2887 3680 smbnode_t *np = VTOSMB(vp);
2888 3681
2889 3682 smbfs_rw_exit(&np->r_rwlock);
2890 3683 }
2891 3684
2892 3685
2893 3686 /* ARGSUSED */
2894 3687 static int
2895 3688 smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
2896 3689 {
2897 3690 smbmntinfo_t *smi;
2898 3691
2899 3692 smi = VTOSMI(vp);
2900 3693
2901 3694 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2902 3695 return (EPERM);
2903 3696
2904 3697 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2905 3698 return (EIO);
2906 3699
2907 3700 /*
2908 3701 * Because we stuff the readdir cookie into the offset field
2909 3702 * someone may attempt to do an lseek with the cookie which
2910 3703 * we want to succeed.
2911 3704 */
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
2912 3705 if (vp->v_type == VDIR)
2913 3706 return (0);
2914 3707
2915 3708 /* Like NFS3, just check for 63-bit overflow. */
2916 3709 if (*noffp < 0)
2917 3710 return (EINVAL);
2918 3711
2919 3712 return (0);
2920 3713 }
2921 3714
3715 +/* mmap support ******************************************************** */
2922 3716
3717 +#ifdef _KERNEL
3718 +
3719 +#ifdef DEBUG
3720 +static int smbfs_lostpage = 0; /* number of times we lost original page */
3721 +#endif
3722 +
2923 3723 /*
3724 + * Return all the pages from [off..off+len) in file
3725 + * Like nfs3_getpage
3726 + */
3727 +/* ARGSUSED */
3728 +static int
3729 +smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3730 + page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3731 + enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3732 +{
3733 + smbnode_t *np;
3734 + smbmntinfo_t *smi;
3735 + int error;
3736 +
3737 + np = VTOSMB(vp);
3738 + smi = VTOSMI(vp);
3739 +
3740 + if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3741 + return (EIO);
3742 +
3743 + if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3744 + return (EIO);
3745 +
3746 + if (vp->v_flag & VNOMAP)
3747 + return (ENOSYS);
3748 +
3749 + if (protp != NULL)
3750 + *protp = PROT_ALL;
3751 +
3752 + /*
3753 + * Now valididate that the caches are up to date.
3754 + */
3755 + error = smbfs_validate_caches(vp, cr);
3756 + if (error)
3757 + return (error);
3758 +
3759 +retry:
3760 + mutex_enter(&np->r_statelock);
3761 +
3762 + /*
3763 + * Don't create dirty pages faster than they
3764 + * can be cleaned ... (etc. see nfs)
3765 + *
3766 + * Here NFS also tests:
3767 + * (mi->mi_max_threads != 0 &&
3768 + * rp->r_awcount > 2 * mi->mi_max_threads)
3769 + */
3770 + if (rw == S_CREATE) {
3771 + while (np->r_gcount > 0)
3772 + cv_wait(&np->r_cv, &np->r_statelock);
3773 + }
3774 +
3775 + /*
3776 + * If we are getting called as a side effect of a write
3777 + * operation the local file size might not be extended yet.
3778 + * In this case we want to be able to return pages of zeroes.
3779 + */
3780 + if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) {
3781 + mutex_exit(&np->r_statelock);
3782 + return (EFAULT); /* beyond EOF */
3783 + }
3784 +
3785 + mutex_exit(&np->r_statelock);
3786 +
3787 + error = pvn_getpages(smbfs_getapage, vp, off, len, protp,
3788 + pl, plsz, seg, addr, rw, cr);
3789 +
3790 + switch (error) {
3791 + case SMBFS_EOF:
3792 + smbfs_purge_caches(vp, cr);
3793 + goto retry;
3794 + case ESTALE:
3795 + /*
3796 + * Here NFS has: PURGE_STALE_FH(error, vp, cr);
3797 + * In-line here as we only use it once.
3798 + */
3799 + mutex_enter(&np->r_statelock);
3800 + np->r_flags |= RSTALE;
3801 + if (!np->r_error)
3802 + np->r_error = (error);
3803 + mutex_exit(&np->r_statelock);
3804 + if (vn_has_cached_data(vp))
3805 + smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
3806 + smbfs_purge_caches(vp, cr);
3807 + break;
3808 + default:
3809 + break;
3810 + }
3811 +
3812 + return (error);
3813 +}
3814 +
3815 +/*
3816 + * Called from pvn_getpages to get a particular page.
3817 + * Like nfs3_getapage
3818 + */
3819 +/* ARGSUSED */
3820 +static int
3821 +smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3822 + page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3823 + enum seg_rw rw, cred_t *cr)
3824 +{
3825 + smbnode_t *np;
3826 + smbmntinfo_t *smi;
3827 +
3828 + uint_t bsize;
3829 + struct buf *bp;
3830 + page_t *pp;
3831 + u_offset_t lbn;
3832 + u_offset_t io_off;
3833 + u_offset_t blkoff;
3834 + size_t io_len;
3835 + uint_t blksize;
3836 + int error;
3837 + /* int readahead; */
3838 + int readahead_issued = 0;
3839 + /* int ra_window; * readahead window */
3840 + page_t *pagefound;
3841 +
3842 + np = VTOSMB(vp);
3843 + smi = VTOSMI(vp);
3844 +
3845 + if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3846 + return (EIO);
3847 +
3848 + if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3849 + return (EIO);
3850 +
3851 + bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3852 +
3853 +reread:
3854 + bp = NULL;
3855 + pp = NULL;
3856 + pagefound = NULL;
3857 +
3858 + if (pl != NULL)
3859 + pl[0] = NULL;
3860 +
3861 + error = 0;
3862 + lbn = off / bsize;
3863 + blkoff = lbn * bsize;
3864 +
3865 + /*
3866 + * NFS queues up readahead work here.
3867 + */
3868 +
3869 +again:
3870 + if ((pagefound = page_exists(vp, off)) == NULL) {
3871 + if (pl == NULL) {
3872 + (void) 0; /* Todo: smbfs_async_readahead(); */
3873 + } else if (rw == S_CREATE) {
3874 + /*
3875 + * Block for this page is not allocated, or the offset
3876 + * is beyond the current allocation size, or we're
3877 + * allocating a swap slot and the page was not found,
3878 + * so allocate it and return a zero page.
3879 + */
3880 + if ((pp = page_create_va(vp, off,
3881 + PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3882 + cmn_err(CE_PANIC, "smbfs_getapage: page_create");
3883 + io_len = PAGESIZE;
3884 + mutex_enter(&np->r_statelock);
3885 + np->r_nextr = off + PAGESIZE;
3886 + mutex_exit(&np->r_statelock);
3887 + } else {
3888 + /*
3889 + * Need to go to server to get a BLOCK, exception to
3890 + * that being while reading at offset = 0 or doing
3891 + * random i/o, in that case read only a PAGE.
3892 + */
3893 + mutex_enter(&np->r_statelock);
3894 + if (blkoff < np->r_size &&
3895 + blkoff + bsize >= np->r_size) {
3896 + /*
3897 + * If only a block or less is left in
3898 + * the file, read all that is remaining.
3899 + */
3900 + if (np->r_size <= off) {
3901 + /*
3902 + * Trying to access beyond EOF,
3903 + * set up to get at least one page.
3904 + */
3905 + blksize = off + PAGESIZE - blkoff;
3906 + } else
3907 + blksize = np->r_size - blkoff;
3908 + } else if ((off == 0) ||
3909 + (off != np->r_nextr && !readahead_issued)) {
3910 + blksize = PAGESIZE;
3911 + blkoff = off; /* block = page here */
3912 + } else
3913 + blksize = bsize;
3914 + mutex_exit(&np->r_statelock);
3915 +
3916 + pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3917 + &io_len, blkoff, blksize, 0);
3918 +
3919 + /*
3920 + * Some other thread has entered the page,
3921 + * so just use it.
3922 + */
3923 + if (pp == NULL)
3924 + goto again;
3925 +
3926 + /*
3927 + * Now round the request size up to page boundaries.
3928 + * This ensures that the entire page will be
3929 + * initialized to zeroes if EOF is encountered.
3930 + */
3931 + io_len = ptob(btopr(io_len));
3932 +
3933 + bp = pageio_setup(pp, io_len, vp, B_READ);
3934 + ASSERT(bp != NULL);
3935 +
3936 + /*
3937 + * pageio_setup should have set b_addr to 0. This
3938 + * is correct since we want to do I/O on a page
3939 + * boundary. bp_mapin will use this addr to calculate
3940 + * an offset, and then set b_addr to the kernel virtual
3941 + * address it allocated for us.
3942 + */
3943 + ASSERT(bp->b_un.b_addr == 0);
3944 +
3945 + bp->b_edev = 0;
3946 + bp->b_dev = 0;
3947 + bp->b_lblkno = lbtodb(io_off);
3948 + bp->b_file = vp;
3949 + bp->b_offset = (offset_t)off;
3950 + bp_mapin(bp);
3951 +
3952 + /*
3953 + * If doing a write beyond what we believe is EOF,
3954 + * don't bother trying to read the pages from the
3955 + * server, we'll just zero the pages here. We
3956 + * don't check that the rw flag is S_WRITE here
3957 + * because some implementations may attempt a
3958 + * read access to the buffer before copying data.
3959 + */
3960 + mutex_enter(&np->r_statelock);
3961 + if (io_off >= np->r_size && seg == segkmap) {
3962 + mutex_exit(&np->r_statelock);
3963 + bzero(bp->b_un.b_addr, io_len);
3964 + } else {
3965 + mutex_exit(&np->r_statelock);
3966 + error = smbfs_bio(bp, 0, cr);
3967 + }
3968 +
3969 + /*
3970 + * Unmap the buffer before freeing it.
3971 + */
3972 + bp_mapout(bp);
3973 + pageio_done(bp);
3974 +
3975 + /* Here NFS3 updates all pp->p_fsdata */
3976 +
3977 + if (error == SMBFS_EOF) {
3978 + /*
3979 + * If doing a write system call just return
3980 + * zeroed pages, else user tried to get pages
3981 + * beyond EOF, return error. We don't check
3982 + * that the rw flag is S_WRITE here because
3983 + * some implementations may attempt a read
3984 + * access to the buffer before copying data.
3985 + */
3986 + if (seg == segkmap)
3987 + error = 0;
3988 + else
3989 + error = EFAULT;
3990 + }
3991 +
3992 + if (!readahead_issued && !error) {
3993 + mutex_enter(&np->r_statelock);
3994 + np->r_nextr = io_off + io_len;
3995 + mutex_exit(&np->r_statelock);
3996 + }
3997 + }
3998 + }
3999 +
4000 + if (pl == NULL)
4001 + return (error);
4002 +
4003 + if (error) {
4004 + if (pp != NULL)
4005 + pvn_read_done(pp, B_ERROR);
4006 + return (error);
4007 + }
4008 +
4009 + if (pagefound) {
4010 + se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
4011 +
4012 + /*
4013 + * Page exists in the cache, acquire the appropriate lock.
4014 + * If this fails, start all over again.
4015 + */
4016 + if ((pp = page_lookup(vp, off, se)) == NULL) {
4017 +#ifdef DEBUG
4018 + smbfs_lostpage++;
4019 +#endif
4020 + goto reread;
4021 + }
4022 + pl[0] = pp;
4023 + pl[1] = NULL;
4024 + return (0);
4025 + }
4026 +
4027 + if (pp != NULL)
4028 + pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4029 +
4030 + return (error);
4031 +}
4032 +
4033 +/*
4034 + * Here NFS has: nfs3_readahead
4035 + * No read-ahead in smbfs yet.
4036 + */
4037 +
4038 +#endif // _KERNEL
4039 +
4040 +/*
4041 + * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4042 + * If len == 0, do from off to EOF.
4043 + *
4044 + * The normal cases should be len == 0 && off == 0 (entire vp list),
4045 + * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4046 + * (from pageout).
4047 + *
4048 + * Like nfs3_putpage + nfs_putpages
4049 + */
4050 +/* ARGSUSED */
4051 +static int
4052 +smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4053 + caller_context_t *ct)
4054 +{
4055 +#ifdef _KERNEL
4056 + smbnode_t *np;
4057 + smbmntinfo_t *smi;
4058 + page_t *pp;
4059 + u_offset_t eoff;
4060 + u_offset_t io_off;
4061 + size_t io_len;
4062 + int error;
4063 + int rdirty;
4064 + int err;
4065 +
4066 + np = VTOSMB(vp);
4067 + smi = VTOSMI(vp);
4068 +
4069 + if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4070 + return (EIO);
4071 +
4072 + if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4073 + return (EIO);
4074 +
4075 + if (vp->v_flag & VNOMAP)
4076 + return (ENOSYS);
4077 +
4078 + /* Here NFS does rp->r_count (++/--) stuff. */
4079 +
4080 + /* Beginning of code from nfs_putpages. */
4081 +
4082 + if (!vn_has_cached_data(vp))
4083 + return (0);
4084 +
4085 + /*
4086 + * If ROUTOFSPACE is set, then all writes turn into B_INVAL
4087 + * writes. B_FORCE is set to force the VM system to actually
4088 + * invalidate the pages, even if the i/o failed. The pages
4089 + * need to get invalidated because they can't be written out
4090 + * because there isn't any space left on either the server's
4091 + * file system or in the user's disk quota. The B_FREE bit
4092 + * is cleared to avoid confusion as to whether this is a
4093 + * request to place the page on the freelist or to destroy
4094 + * it.
4095 + */
4096 + if ((np->r_flags & ROUTOFSPACE) ||
4097 + (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
4098 + flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
4099 +
4100 + if (len == 0) {
4101 + /*
4102 + * If doing a full file synchronous operation, then clear
4103 + * the RDIRTY bit. If a page gets dirtied while the flush
4104 + * is happening, then RDIRTY will get set again. The
4105 + * RDIRTY bit must get cleared before the flush so that
4106 + * we don't lose this information.
4107 + *
4108 + * NFS has B_ASYNC vs sync stuff here.
4109 + */
4110 + if (off == (u_offset_t)0 &&
4111 + (np->r_flags & RDIRTY)) {
4112 + mutex_enter(&np->r_statelock);
4113 + rdirty = (np->r_flags & RDIRTY);
4114 + np->r_flags &= ~RDIRTY;
4115 + mutex_exit(&np->r_statelock);
4116 + } else
4117 + rdirty = 0;
4118 +
4119 + /*
4120 + * Search the entire vp list for pages >= off, and flush
4121 + * the dirty pages.
4122 + */
4123 + error = pvn_vplist_dirty(vp, off, smbfs_putapage,
4124 + flags, cr);
4125 +
4126 + /*
4127 + * If an error occurred and the file was marked as dirty
4128 + * before and we aren't forcibly invalidating pages, then
4129 + * reset the RDIRTY flag.
4130 + */
4131 + if (error && rdirty &&
4132 + (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
4133 + mutex_enter(&np->r_statelock);
4134 + np->r_flags |= RDIRTY;
4135 + mutex_exit(&np->r_statelock);
4136 + }
4137 + } else {
4138 + /*
4139 + * Do a range from [off...off + len) looking for pages
4140 + * to deal with.
4141 + */
4142 + error = 0;
4143 + io_len = 1; /* quiet warnings */
4144 + eoff = off + len;
4145 +
4146 + for (io_off = off; io_off < eoff; io_off += io_len) {
4147 + mutex_enter(&np->r_statelock);
4148 + if (io_off >= np->r_size) {
4149 + mutex_exit(&np->r_statelock);
4150 + break;
4151 + }
4152 + mutex_exit(&np->r_statelock);
4153 + /*
4154 + * If we are not invalidating, synchronously
4155 + * freeing or writing pages use the routine
4156 + * page_lookup_nowait() to prevent reclaiming
4157 + * them from the free list.
4158 + */
4159 + if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
4160 + pp = page_lookup(vp, io_off,
4161 + (flags & (B_INVAL | B_FREE)) ?
4162 + SE_EXCL : SE_SHARED);
4163 + } else {
4164 + pp = page_lookup_nowait(vp, io_off,
4165 + (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4166 + }
4167 +
4168 + if (pp == NULL || !pvn_getdirty(pp, flags))
4169 + io_len = PAGESIZE;
4170 + else {
4171 + err = smbfs_putapage(vp, pp, &io_off,
4172 + &io_len, flags, cr);
4173 + if (!error)
4174 + error = err;
4175 + /*
4176 + * "io_off" and "io_len" are returned as
4177 + * the range of pages we actually wrote.
4178 + * This allows us to skip ahead more quickly
4179 + * since several pages may've been dealt
4180 + * with by this iteration of the loop.
4181 + */
4182 + }
4183 + }
4184 + }
4185 +
4186 + return (error);
4187 +
4188 +#else // _KERNEL
4189 + return (ENOSYS);
4190 +#endif // _KERNEL
4191 +}
4192 +
4193 +#ifdef _KERNEL
4194 +
4195 +/*
4196 + * Write out a single page, possibly klustering adjacent dirty pages.
4197 + *
4198 + * Like nfs3_putapage / nfs3_sync_putapage
4199 + */
4200 +static int
4201 +smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4202 + int flags, cred_t *cr)
4203 +{
4204 + smbnode_t *np;
4205 + u_offset_t io_off;
4206 + u_offset_t lbn_off;
4207 + u_offset_t lbn;
4208 + size_t io_len;
4209 + uint_t bsize;
4210 + int error;
4211 +
4212 + np = VTOSMB(vp);
4213 +
4214 + ASSERT(!vn_is_readonly(vp));
4215 +
4216 + bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4217 + lbn = pp->p_offset / bsize;
4218 + lbn_off = lbn * bsize;
4219 +
4220 + /*
4221 + * Find a kluster that fits in one block, or in
4222 + * one page if pages are bigger than blocks. If
4223 + * there is less file space allocated than a whole
4224 + * page, we'll shorten the i/o request below.
4225 + */
4226 + pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4227 + roundup(bsize, PAGESIZE), flags);
4228 +
4229 + /*
4230 + * pvn_write_kluster shouldn't have returned a page with offset
4231 + * behind the original page we were given. Verify that.
4232 + */
4233 + ASSERT((pp->p_offset / bsize) >= lbn);
4234 +
4235 + /*
4236 + * Now pp will have the list of kept dirty pages marked for
4237 + * write back. It will also handle invalidation and freeing
4238 + * of pages that are not dirty. Check for page length rounding
4239 + * problems.
4240 + */
4241 + if (io_off + io_len > lbn_off + bsize) {
4242 + ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4243 + io_len = lbn_off + bsize - io_off;
4244 + }
4245 + /*
4246 + * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4247 + * consistent value of r_size. RMODINPROGRESS is set in writerp().
4248 + * When RMODINPROGRESS is set it indicates that a uiomove() is in
4249 + * progress and the r_size has not been made consistent with the
4250 + * new size of the file. When the uiomove() completes the r_size is
4251 + * updated and the RMODINPROGRESS flag is cleared.
4252 + *
4253 + * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4254 + * consistent value of r_size. Without this handshaking, it is
4255 + * possible that smbfs_bio() picks up the old value of r_size
4256 + * before the uiomove() in writerp() completes. This will result
4257 + * in the write through smbfs_bio() being dropped.
4258 + *
4259 + * More precisely, there is a window between the time the uiomove()
4260 + * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4261 + * operation intervenes in this window, the page will be picked up,
4262 + * because it is dirty (it will be unlocked, unless it was
4263 + * pagecreate'd). When the page is picked up as dirty, the dirty
4264 + * bit is reset (pvn_getdirty()). In smbfs_write(), r_size is
4265 + * checked. This will still be the old size. Therefore the page will
4266 + * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4267 + * the page will be found to be clean and the write will be dropped.
4268 + */
4269 + if (np->r_flags & RMODINPROGRESS) {
4270 + mutex_enter(&np->r_statelock);
4271 + if ((np->r_flags & RMODINPROGRESS) &&
4272 + np->r_modaddr + MAXBSIZE > io_off &&
4273 + np->r_modaddr < io_off + io_len) {
4274 + page_t *plist;
4275 + /*
4276 + * A write is in progress for this region of the file.
4277 + * If we did not detect RMODINPROGRESS here then this
4278 + * path through smbfs_putapage() would eventually go to
4279 + * smbfs_bio() and may not write out all of the data
4280 + * in the pages. We end up losing data. So we decide
4281 + * to set the modified bit on each page in the page
4282 + * list and mark the rnode with RDIRTY. This write
4283 + * will be restarted at some later time.
4284 + */
4285 + plist = pp;
4286 + while (plist != NULL) {
4287 + pp = plist;
4288 + page_sub(&plist, pp);
4289 + hat_setmod(pp);
4290 + page_io_unlock(pp);
4291 + page_unlock(pp);
4292 + }
4293 + np->r_flags |= RDIRTY;
4294 + mutex_exit(&np->r_statelock);
4295 + if (offp)
4296 + *offp = io_off;
4297 + if (lenp)
4298 + *lenp = io_len;
4299 + return (0);
4300 + }
4301 + mutex_exit(&np->r_statelock);
4302 + }
4303 +
4304 + /*
4305 + * NFS handles (flags & B_ASYNC) here...
4306 + * (See nfs_async_putapage())
4307 + *
4308 + * This code section from: nfs3_sync_putapage()
4309 + */
4310 +
4311 + flags |= B_WRITE;
4312 +
4313 + error = smbfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4314 +
4315 + if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
4316 + error == EACCES) &&
4317 + (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4318 + if (!(np->r_flags & ROUTOFSPACE)) {
4319 + mutex_enter(&np->r_statelock);
4320 + np->r_flags |= ROUTOFSPACE;
4321 + mutex_exit(&np->r_statelock);
4322 + }
4323 + flags |= B_ERROR;
4324 + pvn_write_done(pp, flags);
4325 + /*
4326 + * If this was not an async thread, then try again to
4327 + * write out the pages, but this time, also destroy
4328 + * them whether or not the write is successful. This
4329 + * will prevent memory from filling up with these
4330 + * pages and destroying them is the only alternative
4331 + * if they can't be written out.
4332 + *
4333 + * Don't do this if this is an async thread because
4334 + * when the pages are unlocked in pvn_write_done,
4335 + * some other thread could have come along, locked
4336 + * them, and queued for an async thread. It would be
4337 + * possible for all of the async threads to be tied
4338 + * up waiting to lock the pages again and they would
4339 + * all already be locked and waiting for an async
4340 + * thread to handle them. Deadlock.
4341 + */
4342 + if (!(flags & B_ASYNC)) {
4343 + error = smbfs_putpage(vp, io_off, io_len,
4344 + B_INVAL | B_FORCE, cr, NULL);
4345 + }
4346 + } else {
4347 + if (error)
4348 + flags |= B_ERROR;
4349 + else if (np->r_flags & ROUTOFSPACE) {
4350 + mutex_enter(&np->r_statelock);
4351 + np->r_flags &= ~ROUTOFSPACE;
4352 + mutex_exit(&np->r_statelock);
4353 + }
4354 + pvn_write_done(pp, flags);
4355 + }
4356 +
4357 + /* Now more code from: nfs3_putapage */
4358 +
4359 + if (offp)
4360 + *offp = io_off;
4361 + if (lenp)
4362 + *lenp = io_len;
4363 +
4364 + return (error);
4365 +}
4366 +
4367 +#endif // _KERNEL
4368 +
4369 +
4370 +/*
4371 + * NFS has this in nfs_client.c (shared by v2,v3,...)
4372 + * We have it here so smbfs_putapage can be file scope.
4373 + */
4374 +void
4375 +smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
4376 +{
4377 + smbnode_t *np;
4378 +
4379 + np = VTOSMB(vp);
4380 +
4381 + mutex_enter(&np->r_statelock);
4382 + while (np->r_flags & RTRUNCATE)
4383 + cv_wait(&np->r_cv, &np->r_statelock);
4384 + np->r_flags |= RTRUNCATE;
4385 +
4386 + if (off == (u_offset_t)0) {
4387 + np->r_flags &= ~RDIRTY;
4388 + if (!(np->r_flags & RSTALE))
4389 + np->r_error = 0;
4390 + }
4391 + /* Here NFSv3 has np->r_truncaddr = off; */
4392 + mutex_exit(&np->r_statelock);
4393 +
4394 +#ifdef _KERNEL
4395 + (void) pvn_vplist_dirty(vp, off, smbfs_putapage,
4396 + B_INVAL | B_TRUNC, cr);
4397 +#endif // _KERNEL
4398 +
4399 + mutex_enter(&np->r_statelock);
4400 + np->r_flags &= ~RTRUNCATE;
4401 + cv_broadcast(&np->r_cv);
4402 + mutex_exit(&np->r_statelock);
4403 +}
4404 +
4405 +#ifdef _KERNEL
4406 +
4407 +/* Like nfs3_map */
4408 +
4409 +/* ARGSUSED */
4410 +static int
4411 +smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4412 + size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4413 + cred_t *cr, caller_context_t *ct)
4414 +{
4415 + segvn_crargs_t vn_a;
4416 + struct vattr va;
4417 + smbnode_t *np;
4418 + smbmntinfo_t *smi;
4419 + int error;
4420 +
4421 + np = VTOSMB(vp);
4422 + smi = VTOSMI(vp);
4423 +
4424 + if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4425 + return (EIO);
4426 +
4427 + if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4428 + return (EIO);
4429 +
4430 + if (vp->v_flag & VNOMAP)
4431 + return (ENOSYS);
4432 +
4433 + if (off < 0 || off + (ssize_t)len < 0)
4434 + return (ENXIO);
4435 +
4436 + if (vp->v_type != VREG)
4437 + return (ENODEV);
4438 +
4439 + /*
4440 + * NFS does close-to-open consistency stuff here.
4441 + * Just get (possibly cached) attributes.
4442 + */
4443 + va.va_mask = AT_ALL;
4444 + if ((error = smbfsgetattr(vp, &va, cr)) != 0)
4445 + return (error);
4446 +
4447 + /*
4448 + * Check to see if the vnode is currently marked as not cachable.
4449 + * This means portions of the file are locked (through VOP_FRLOCK).
4450 + * In this case the map request must be refused. We use
4451 + * rp->r_lkserlock to avoid a race with concurrent lock requests.
4452 + */
4453 + /*
4454 + * Atomically increment r_inmap after acquiring r_rwlock. The
4455 + * idea here is to acquire r_rwlock to block read/write and
4456 + * not to protect r_inmap. r_inmap will inform smbfs_read/write()
4457 + * that we are in smbfs_map(). Now, r_rwlock is acquired in order
4458 + * and we can prevent the deadlock that would have occurred
4459 + * when smbfs_addmap() would have acquired it out of order.
4460 + *
4461 + * Since we are not protecting r_inmap by any lock, we do not
4462 + * hold any lock when we decrement it. We atomically decrement
4463 + * r_inmap after we release r_lkserlock. Note that rwlock is
4464 + * re-entered as writer in smbfs_addmap (called via as_map).
4465 + */
4466 +
4467 + if (smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, SMBINTR(vp)))
4468 + return (EINTR);
4469 + atomic_inc_uint(&np->r_inmap);
4470 + smbfs_rw_exit(&np->r_rwlock);
4471 +
4472 + if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) {
4473 + atomic_dec_uint(&np->r_inmap);
4474 + return (EINTR);
4475 + }
4476 +
4477 + if (vp->v_flag & VNOCACHE) {
4478 + error = EAGAIN;
4479 + goto done;
4480 + }
4481 +
4482 + /*
4483 + * Don't allow concurrent locks and mapping if mandatory locking is
4484 + * enabled.
4485 + */
4486 + if ((flk_has_remote_locks(vp) || smbfs_lm_has_sleep(vp)) &&
4487 + MANDLOCK(vp, va.va_mode)) {
4488 + error = EAGAIN;
4489 + goto done;
4490 + }
4491 +
4492 + as_rangelock(as);
4493 + error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4494 + if (error != 0) {
4495 + as_rangeunlock(as);
4496 + goto done;
4497 + }
4498 +
4499 + vn_a.vp = vp;
4500 + vn_a.offset = off;
4501 + vn_a.type = (flags & MAP_TYPE);
4502 + vn_a.prot = (uchar_t)prot;
4503 + vn_a.maxprot = (uchar_t)maxprot;
4504 + vn_a.flags = (flags & ~MAP_TYPE);
4505 + vn_a.cred = cr;
4506 + vn_a.amp = NULL;
4507 + vn_a.szc = 0;
4508 + vn_a.lgrp_mem_policy_flags = 0;
4509 +
4510 + error = as_map(as, *addrp, len, segvn_create, &vn_a);
4511 + as_rangeunlock(as);
4512 +
4513 +done:
4514 + smbfs_rw_exit(&np->r_lkserlock);
4515 + atomic_dec_uint(&np->r_inmap);
4516 + return (error);
4517 +}
4518 +
4519 +/*
4520 + * This uses addmap/delmap functions to hold the SMB FID open as long as
4521 + * there are pages mapped in this as/seg. Increment the FID refs. when
4522 + * the maping count goes from zero to non-zero, and release the FID ref
4523 + * when the maping count goes from non-zero to zero.
4524 + */
4525 +
4526 +/* ARGSUSED */
4527 +static int
4528 +smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4529 + size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4530 + cred_t *cr, caller_context_t *ct)
4531 +{
4532 + smbnode_t *np = VTOSMB(vp);
4533 + boolean_t inc_fidrefs = B_FALSE;
4534 +
4535 + /*
4536 + * When r_mapcnt goes from zero to non-zero,
4537 + * increment n_fidrefs
4538 + */
4539 + mutex_enter(&np->r_statelock);
4540 + if (np->r_mapcnt == 0)
4541 + inc_fidrefs = B_TRUE;
4542 + np->r_mapcnt += btopr(len);
4543 + mutex_exit(&np->r_statelock);
4544 +
4545 + if (inc_fidrefs) {
4546 + (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4547 + np->n_fidrefs++;
4548 + smbfs_rw_exit(&np->r_lkserlock);
4549 + }
4550 +
4551 + return (0);
4552 +}
4553 +
4554 +/*
4555 + * Args passed to smbfs_delmap_async
4556 + */
4557 +typedef struct smbfs_delmap_args {
4558 + taskq_ent_t dm_tqent;
4559 + cred_t *dm_cr;
4560 + vnode_t *dm_vp;
4561 + offset_t dm_off;
4562 + caddr_t dm_addr;
4563 + size_t dm_len;
4564 + uint_t dm_prot;
4565 + uint_t dm_maxprot;
4566 + uint_t dm_flags;
4567 + boolean_t dm_rele_fid;
4568 +} smbfs_delmap_args_t;
4569 +
4570 +/*
4571 + * Using delmap not only to release the SMB FID (as described above)
4572 + * but to flush dirty pages as needed. Both of those do the actual
4573 + * work in an async taskq job to avoid interfering with locks held
4574 + * in the VM layer when this is called.
4575 + */
4576 +
4577 +/* ARGSUSED */
4578 +static int
4579 +smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4580 + size_t len, uint_t prot, uint_t maxprot, uint_t flags,
4581 + cred_t *cr, caller_context_t *ct)
4582 +{
4583 + smbnode_t *np = VTOSMB(vp);
4584 + smbmntinfo_t *smi = VTOSMI(vp);
4585 + smbfs_delmap_args_t *dmapp;
4586 +
4587 + dmapp = kmem_zalloc(sizeof (*dmapp), KM_SLEEP);
4588 +
4589 + /*
4590 + * The VM layer may segvn_free the seg holding this vnode
4591 + * before our callback has a chance run, so take a hold on
4592 + * the vnode here and release it in the callback.
4593 + * (same for the cred)
4594 + */
4595 + crhold(cr);
4596 + VN_HOLD(vp);
4597 +
4598 + dmapp->dm_vp = vp;
4599 + dmapp->dm_cr = cr;
4600 + dmapp->dm_off = off;
4601 + dmapp->dm_addr = addr;
4602 + dmapp->dm_len = len;
4603 + dmapp->dm_prot = prot;
4604 + dmapp->dm_maxprot = maxprot;
4605 + dmapp->dm_flags = flags;
4606 + dmapp->dm_rele_fid = B_FALSE;
4607 +
4608 + /*
4609 + * Go ahead and decrement r_mapcount now, which is
4610 + * the primary purpose of this function.
4611 + *
4612 + * When r_mapcnt goes to zero, we need to call
4613 + * smbfs_rele_fid, but can't do that here, so
4614 + * set a flag telling the async task to do it.
4615 + */
4616 + mutex_enter(&np->r_statelock);
4617 + np->r_mapcnt -= btopr(len);
4618 + ASSERT(np->r_mapcnt >= 0);
4619 + if (np->r_mapcnt == 0)
4620 + dmapp->dm_rele_fid = B_TRUE;
4621 + mutex_exit(&np->r_statelock);
4622 +
4623 + taskq_dispatch_ent(smi->smi_taskq, smbfs_delmap_async, dmapp, 0,
4624 + &dmapp->dm_tqent);
4625 +
4626 + return (0);
4627 +}
4628 +
4629 +/*
4630 + * Remove some pages from an mmap'd vnode. Flush any
4631 + * dirty pages in the unmapped range.
4632 + */
4633 +/* ARGSUSED */
4634 +static void
4635 +smbfs_delmap_async(void *varg)
4636 +{
4637 + smbfs_delmap_args_t *dmapp = varg;
4638 + cred_t *cr;
4639 + vnode_t *vp;
4640 + smbnode_t *np;
4641 + smbmntinfo_t *smi;
4642 +
4643 + cr = dmapp->dm_cr;
4644 + vp = dmapp->dm_vp;
4645 + np = VTOSMB(vp);
4646 + smi = VTOSMI(vp);
4647 +
4648 + /* Decremented r_mapcnt in smbfs_delmap */
4649 +
4650 + /*
4651 + * Initiate a page flush and potential commit if there are
4652 + * pages, the file system was not mounted readonly, the segment
4653 + * was mapped shared, and the pages themselves were writeable.
4654 + *
4655 + * mark RDIRTY here, will be used to check if a file is dirty when
4656 + * unmount smbfs
4657 + */
4658 + if (vn_has_cached_data(vp) && !vn_is_readonly(vp) &&
4659 + dmapp->dm_flags == MAP_SHARED &&
4660 + (dmapp->dm_maxprot & PROT_WRITE) != 0) {
4661 + mutex_enter(&np->r_statelock);
4662 + np->r_flags |= RDIRTY;
4663 + mutex_exit(&np->r_statelock);
4664 +
4665 + /*
4666 + * Need to finish the putpage before we
4667 + * close the OtW FID needed for I/O.
4668 + */
4669 + (void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 0,
4670 + dmapp->dm_cr, NULL);
4671 + }
4672 +
4673 + if ((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO))
4674 + (void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len,
4675 + B_INVAL, dmapp->dm_cr, NULL);
4676 +
4677 + /*
4678 + * If r_mapcnt went to zero, drop our FID ref now.
4679 + * On the last fidref, this does an OtW close.
4680 + */
4681 + if (dmapp->dm_rele_fid) {
4682 + struct smb_cred scred;
4683 +
4684 + (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4685 + smb_credinit(&scred, dmapp->dm_cr);
4686 +
4687 + smbfs_rele_fid(np, &scred);
4688 +
4689 + smb_credrele(&scred);
4690 + smbfs_rw_exit(&np->r_lkserlock);
4691 + }
4692 +
4693 + /* Release holds taken in smbfs_delmap */
4694 + VN_RELE(vp);
4695 + crfree(cr);
4696 +
4697 + kmem_free(dmapp, sizeof (*dmapp));
4698 +}
4699 +
4700 +/* No smbfs_pageio() or smbfs_dispose() ops. */
4701 +
4702 +#endif // _KERNEL
4703 +
4704 +/* misc. ******************************************************** */
4705 +
4706 +
4707 +/*
2924 4708 * XXX
2925 4709 * This op may need to support PSARC 2007/440, nbmand changes for CIFS Service.
2926 4710 */
2927 4711 static int
2928 4712 smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
2929 4713 offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
2930 4714 caller_context_t *ct)
2931 4715 {
2932 4716 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
2933 4717 return (EIO);
2934 4718
2935 4719 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
2936 4720 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
2937 4721 else
2938 4722 return (ENOSYS);
2939 4723 }
2940 4724
2941 4725 /*
2942 4726 * Free storage space associated with the specified vnode. The portion
2943 4727 * to be freed is specified by bfp->l_start and bfp->l_len (already
2944 4728 * normalized to a "whence" of 0).
2945 4729 *
2946 4730 * Called by fcntl(fd, F_FREESP, lkp) for libc:ftruncate, etc.
2947 4731 */
2948 4732 /* ARGSUSED */
2949 4733 static int
2950 4734 smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
2951 4735 offset_t offset, cred_t *cr, caller_context_t *ct)
2952 4736 {
2953 4737 int error;
2954 4738 smbmntinfo_t *smi;
2955 4739
2956 4740 smi = VTOSMI(vp);
2957 4741
2958 4742 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2959 4743 return (EIO);
2960 4744
2961 4745 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2962 4746 return (EIO);
2963 4747
2964 4748 /* Caller (fcntl) has checked v_type */
2965 4749 ASSERT(vp->v_type == VREG);
2966 4750 if (cmd != F_FREESP)
2967 4751 return (EINVAL);
2968 4752
2969 4753 /*
2970 4754 * Like NFS3, no 32-bit offset checks here.
2971 4755 * Our SMB layer takes care to return EFBIG
2972 4756 * when it has to fallback to a 32-bit call.
2973 4757 */
2974 4758
2975 4759 error = convoff(vp, bfp, 0, offset);
2976 4760 if (!error) {
2977 4761 ASSERT(bfp->l_start >= 0);
2978 4762 if (bfp->l_len == 0) {
2979 4763 struct vattr va;
2980 4764
2981 4765 /*
2982 4766 * ftruncate should not change the ctime and
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
2983 4767 * mtime if we truncate the file to its
2984 4768 * previous size.
2985 4769 */
2986 4770 va.va_mask = AT_SIZE;
2987 4771 error = smbfsgetattr(vp, &va, cr);
2988 4772 if (error || va.va_size == bfp->l_start)
2989 4773 return (error);
2990 4774 va.va_mask = AT_SIZE;
2991 4775 va.va_size = bfp->l_start;
2992 4776 error = smbfssetattr(vp, &va, 0, cr);
4777 + /* SMBFS_VNEVENT... */
2993 4778 } else
2994 4779 error = EINVAL;
2995 4780 }
2996 4781
2997 4782 return (error);
2998 4783 }
2999 4784
4785 +
3000 4786 /* ARGSUSED */
3001 4787 static int
4788 +smbfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4789 +{
4790 +
4791 + return (ENOSYS);
4792 +}
4793 +
4794 +
4795 +/* ARGSUSED */
4796 +static int
3002 4797 smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
3003 4798 caller_context_t *ct)
3004 4799 {
3005 4800 vfs_t *vfs;
3006 4801 smbmntinfo_t *smi;
3007 4802 struct smb_share *ssp;
3008 4803
3009 4804 vfs = vp->v_vfsp;
3010 4805 smi = VFTOSMI(vfs);
3011 4806
3012 4807 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3013 4808 return (EIO);
3014 4809
3015 4810 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3016 4811 return (EIO);
3017 4812
3018 4813 switch (cmd) {
3019 4814 case _PC_FILESIZEBITS:
3020 4815 ssp = smi->smi_share;
3021 4816 if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES)
3022 4817 *valp = 64;
3023 4818 else
3024 4819 *valp = 32;
3025 4820 break;
3026 4821
3027 4822 case _PC_LINK_MAX:
3028 4823 /* We only ever report one link to an object */
3029 4824 *valp = 1;
3030 4825 break;
3031 4826
3032 4827 case _PC_ACL_ENABLED:
3033 4828 /*
3034 4829 * Always indicate that ACLs are enabled and
3035 4830 * that we support ACE_T format, otherwise
3036 4831 * libsec will ask for ACLENT_T format data
3037 4832 * which we don't support.
3038 4833 */
3039 4834 *valp = _ACL_ACE_ENABLED;
3040 4835 break;
3041 4836
3042 4837 case _PC_SYMLINK_MAX: /* No symlinks until we do Unix extensions */
3043 4838 *valp = 0;
3044 4839 break;
3045 4840
3046 4841 case _PC_XATTR_EXISTS:
3047 4842 if (vfs->vfs_flag & VFS_XATTR) {
3048 4843 *valp = smbfs_xa_exists(vp, cr);
3049 4844 break;
3050 4845 }
3051 4846 return (EINVAL);
3052 4847
3053 4848 case _PC_SATTR_ENABLED:
3054 4849 case _PC_SATTR_EXISTS:
3055 4850 *valp = 1;
3056 4851 break;
3057 4852
3058 4853 case _PC_TIMESTAMP_RESOLUTION:
3059 4854 /*
3060 4855 * Windows times are tenths of microseconds
3061 4856 * (multiples of 100 nanoseconds).
3062 4857 */
3063 4858 *valp = 100L;
3064 4859 break;
3065 4860
3066 4861 default:
3067 4862 return (fs_pathconf(vp, cmd, valp, cr, ct));
3068 4863 }
3069 4864 return (0);
3070 4865 }
3071 4866
3072 4867 /* ARGSUSED */
3073 4868 static int
3074 4869 smbfs_getsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
3075 4870 caller_context_t *ct)
3076 4871 {
3077 4872 vfs_t *vfsp;
3078 4873 smbmntinfo_t *smi;
3079 4874 int error;
3080 4875 uint_t mask;
3081 4876
3082 4877 vfsp = vp->v_vfsp;
3083 4878 smi = VFTOSMI(vfsp);
3084 4879
3085 4880 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3086 4881 return (EIO);
3087 4882
3088 4883 if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
3089 4884 return (EIO);
3090 4885
3091 4886 /*
3092 4887 * Our _pathconf indicates _ACL_ACE_ENABLED,
3093 4888 * so we should only see VSA_ACE, etc here.
3094 4889 * Note: vn_create asks for VSA_DFACLCNT,
3095 4890 * and it expects ENOSYS and empty data.
3096 4891 */
3097 4892 mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT |
3098 4893 VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
3099 4894 if (mask == 0)
3100 4895 return (ENOSYS);
3101 4896
3102 4897 if (smi->smi_flags & SMI_ACL)
3103 4898 error = smbfs_acl_getvsa(vp, vsa, flag, cr);
3104 4899 else
3105 4900 error = ENOSYS;
3106 4901
3107 4902 if (error == ENOSYS)
3108 4903 error = fs_fab_acl(vp, vsa, flag, cr, ct);
3109 4904
3110 4905 return (error);
3111 4906 }
3112 4907
3113 4908 /* ARGSUSED */
3114 4909 static int
3115 4910 smbfs_setsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
3116 4911 caller_context_t *ct)
3117 4912 {
3118 4913 vfs_t *vfsp;
3119 4914 smbmntinfo_t *smi;
3120 4915 int error;
3121 4916 uint_t mask;
3122 4917
3123 4918 vfsp = vp->v_vfsp;
3124 4919 smi = VFTOSMI(vfsp);
3125 4920
3126 4921 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3127 4922 return (EIO);
3128 4923
3129 4924 if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
3130 4925 return (EIO);
3131 4926
3132 4927 /*
3133 4928 * Our _pathconf indicates _ACL_ACE_ENABLED,
3134 4929 * so we should only see VSA_ACE, etc here.
3135 4930 */
3136 4931 mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT);
3137 4932 if (mask == 0)
3138 4933 return (ENOSYS);
3139 4934
3140 4935 if (vfsp->vfs_flag & VFS_RDONLY)
3141 4936 return (EROFS);
3142 4937
3143 4938 /*
3144 4939 * Allow only the mount owner to do this.
3145 4940 * See comments at smbfs_access_rwx.
3146 4941 */
3147 4942 error = secpolicy_vnode_setdac(cr, smi->smi_uid);
3148 4943 if (error != 0)
3149 4944 return (error);
3150 4945
3151 4946 if (smi->smi_flags & SMI_ACL)
3152 4947 error = smbfs_acl_setvsa(vp, vsa, flag, cr);
3153 4948 else
3154 4949 error = ENOSYS;
3155 4950
3156 4951 return (error);
3157 4952 }
3158 4953
3159 4954
3160 4955 /*
3161 4956 * XXX
3162 4957 * This op should eventually support PSARC 2007/268.
3163 4958 */
3164 4959 static int
3165 4960 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
|
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
3166 4961 caller_context_t *ct)
3167 4962 {
3168 4963 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3169 4964 return (EIO);
3170 4965
3171 4966 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3172 4967 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3173 4968 else
3174 4969 return (ENOSYS);
3175 4970 }
4971 +
4972 +
4973 +/*
4974 + * Most unimplemented ops will return ENOSYS because of fs_nosys().
4975 + * The only ops where that won't work are ACCESS (due to open(2)
4976 + * failures) and ... (anything else left?)
4977 + */
4978 +const fs_operation_def_t smbfs_vnodeops_template[] = {
4979 + VOPNAME_OPEN, { .vop_open = smbfs_open },
4980 + VOPNAME_CLOSE, { .vop_close = smbfs_close },
4981 + VOPNAME_READ, { .vop_read = smbfs_read },
4982 + VOPNAME_WRITE, { .vop_write = smbfs_write },
4983 + VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl },
4984 + VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr },
4985 + VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr },
4986 + VOPNAME_ACCESS, { .vop_access = smbfs_access },
4987 + VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup },
4988 + VOPNAME_CREATE, { .vop_create = smbfs_create },
4989 + VOPNAME_REMOVE, { .vop_remove = smbfs_remove },
4990 + VOPNAME_LINK, { .vop_link = smbfs_link },
4991 + VOPNAME_RENAME, { .vop_rename = smbfs_rename },
4992 + VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir },
4993 + VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir },
4994 + VOPNAME_READDIR, { .vop_readdir = smbfs_readdir },
4995 + VOPNAME_SYMLINK, { .vop_symlink = smbfs_symlink },
4996 + VOPNAME_READLINK, { .vop_readlink = smbfs_readlink },
4997 + VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync },
4998 + VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive },
4999 + VOPNAME_FID, { .vop_fid = smbfs_fid },
5000 + VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock },
5001 + VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock },
5002 + VOPNAME_SEEK, { .vop_seek = smbfs_seek },
5003 + VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock },
5004 + VOPNAME_SPACE, { .vop_space = smbfs_space },
5005 + VOPNAME_REALVP, { .vop_realvp = smbfs_realvp },
5006 +#ifdef _KERNEL
5007 + VOPNAME_GETPAGE, { .vop_getpage = smbfs_getpage },
5008 + VOPNAME_PUTPAGE, { .vop_putpage = smbfs_putpage },
5009 + VOPNAME_MAP, { .vop_map = smbfs_map },
5010 + VOPNAME_ADDMAP, { .vop_addmap = smbfs_addmap },
5011 + VOPNAME_DELMAP, { .vop_delmap = smbfs_delmap },
5012 +#endif // _KERNEL
5013 + VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf },
5014 + VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr },
5015 + VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr },
5016 + VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock },
5017 +#ifdef SMBFS_VNEVENT
5018 + VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5019 +#endif
5020 + { NULL, NULL }
5021 +};
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX