Print this page
Go ahead and destroy the NFSv4 database tables
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/nfs/nfs4_state.c
+++ new/usr/src/uts/common/fs/nfs/nfs4_state.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright 2018 Nexenta Systems, Inc.
28 28 * Copyright 2019 Nexenta by DDN, Inc.
29 29 */
30 30
31 31 #include <sys/systm.h>
32 32 #include <sys/kmem.h>
33 33 #include <sys/cmn_err.h>
34 34 #include <sys/atomic.h>
35 35 #include <sys/clconf.h>
36 36 #include <sys/cladm.h>
37 37 #include <sys/flock.h>
38 38 #include <nfs/export.h>
39 39 #include <nfs/nfs.h>
40 40 #include <nfs/nfs4.h>
41 41 #include <nfs/nfssys.h>
42 42 #include <nfs/lm.h>
43 43 #include <sys/pathname.h>
44 44 #include <sys/sdt.h>
45 45 #include <sys/nvpair.h>
46 46
47 47 extern u_longlong_t nfs4_srv_caller_id;
48 48
49 49 extern uint_t nfs4_srv_vkey;
50 50
51 51 stateid4 special0 = {
52 52 0,
53 53 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
54 54 };
55 55
56 56 stateid4 special1 = {
57 57 0xffffffff,
58 58 {
59 59 (char)0xff, (char)0xff, (char)0xff, (char)0xff,
60 60 (char)0xff, (char)0xff, (char)0xff, (char)0xff,
61 61 (char)0xff, (char)0xff, (char)0xff, (char)0xff
62 62 }
63 63 };
64 64
65 65
66 66 #define ISSPECIAL(id) (stateid4_cmp(id, &special0) || \
67 67 stateid4_cmp(id, &special1))
68 68
69 69 /* For embedding the cluster nodeid into our clientid */
70 70 #define CLUSTER_NODEID_SHIFT 24
71 71 #define CLUSTER_MAX_NODEID 255
72 72
73 73 #ifdef DEBUG
74 74 int rfs4_debug;
75 75 #endif
76 76
77 77 static uint32_t rfs4_database_debug = 0x00;
78 78
79 79 /* CSTYLED */
80 80 static void rfs4_ss_clid_write(nfs4_srv_t *nsrv4, rfs4_client_t *cp, char *leaf);
81 81 static void rfs4_ss_clid_write_one(rfs4_client_t *cp, char *dir, char *leaf);
82 82 static void rfs4_dss_clear_oldstate(rfs4_servinst_t *sip);
83 83 static void rfs4_ss_chkclid_sip(rfs4_client_t *cp, rfs4_servinst_t *sip);
84 84
85 85 /*
86 86 * Couple of simple init/destroy functions for a general waiter
87 87 */
88 88 void
89 89 rfs4_sw_init(rfs4_state_wait_t *swp)
90 90 {
91 91 mutex_init(swp->sw_cv_lock, NULL, MUTEX_DEFAULT, NULL);
92 92 cv_init(swp->sw_cv, NULL, CV_DEFAULT, NULL);
93 93 swp->sw_active = FALSE;
94 94 swp->sw_wait_count = 0;
95 95 }
96 96
97 97 void
98 98 rfs4_sw_destroy(rfs4_state_wait_t *swp)
99 99 {
100 100 mutex_destroy(swp->sw_cv_lock);
101 101 cv_destroy(swp->sw_cv);
102 102 }
103 103
104 104 void
105 105 rfs4_sw_enter(rfs4_state_wait_t *swp)
106 106 {
107 107 mutex_enter(swp->sw_cv_lock);
108 108 while (swp->sw_active) {
109 109 swp->sw_wait_count++;
110 110 cv_wait(swp->sw_cv, swp->sw_cv_lock);
111 111 swp->sw_wait_count--;
112 112 }
113 113 ASSERT(swp->sw_active == FALSE);
114 114 swp->sw_active = TRUE;
115 115 mutex_exit(swp->sw_cv_lock);
116 116 }
117 117
118 118 void
119 119 rfs4_sw_exit(rfs4_state_wait_t *swp)
120 120 {
121 121 mutex_enter(swp->sw_cv_lock);
122 122 ASSERT(swp->sw_active == TRUE);
123 123 swp->sw_active = FALSE;
124 124 if (swp->sw_wait_count != 0)
125 125 cv_broadcast(swp->sw_cv);
126 126 mutex_exit(swp->sw_cv_lock);
127 127 }
128 128
129 129 static void
130 130 deep_lock_copy(LOCK4res *dres, LOCK4res *sres)
131 131 {
132 132 lock_owner4 *slo = &sres->LOCK4res_u.denied.owner;
133 133 lock_owner4 *dlo = &dres->LOCK4res_u.denied.owner;
134 134
135 135 if (sres->status == NFS4ERR_DENIED) {
136 136 dlo->owner_val = kmem_alloc(slo->owner_len, KM_SLEEP);
137 137 bcopy(slo->owner_val, dlo->owner_val, slo->owner_len);
138 138 }
139 139 }
140 140
141 141 /*
142 142 * CPR callback id -- not related to v4 callbacks
143 143 */
144 144 static callb_id_t cpr_id = 0;
145 145
146 146 static void
147 147 deep_lock_free(LOCK4res *res)
148 148 {
149 149 lock_owner4 *lo = &res->LOCK4res_u.denied.owner;
150 150
151 151 if (res->status == NFS4ERR_DENIED)
152 152 kmem_free(lo->owner_val, lo->owner_len);
153 153 }
154 154
155 155 static void
156 156 deep_open_copy(OPEN4res *dres, OPEN4res *sres)
157 157 {
158 158 nfsace4 *sacep, *dacep;
159 159
160 160 if (sres->status != NFS4_OK) {
161 161 return;
162 162 }
163 163
164 164 dres->attrset = sres->attrset;
165 165
166 166 switch (sres->delegation.delegation_type) {
167 167 case OPEN_DELEGATE_NONE:
168 168 return;
169 169 case OPEN_DELEGATE_READ:
170 170 sacep = &sres->delegation.open_delegation4_u.read.permissions;
171 171 dacep = &dres->delegation.open_delegation4_u.read.permissions;
172 172 break;
173 173 case OPEN_DELEGATE_WRITE:
174 174 sacep = &sres->delegation.open_delegation4_u.write.permissions;
175 175 dacep = &dres->delegation.open_delegation4_u.write.permissions;
176 176 break;
177 177 }
178 178 dacep->who.utf8string_val =
179 179 kmem_alloc(sacep->who.utf8string_len, KM_SLEEP);
180 180 bcopy(sacep->who.utf8string_val, dacep->who.utf8string_val,
181 181 sacep->who.utf8string_len);
182 182 }
183 183
184 184 static void
185 185 deep_open_free(OPEN4res *res)
186 186 {
187 187 nfsace4 *acep;
188 188 if (res->status != NFS4_OK)
189 189 return;
190 190
191 191 switch (res->delegation.delegation_type) {
192 192 case OPEN_DELEGATE_NONE:
193 193 return;
194 194 case OPEN_DELEGATE_READ:
195 195 acep = &res->delegation.open_delegation4_u.read.permissions;
196 196 break;
197 197 case OPEN_DELEGATE_WRITE:
198 198 acep = &res->delegation.open_delegation4_u.write.permissions;
199 199 break;
200 200 }
201 201
202 202 if (acep->who.utf8string_val) {
203 203 kmem_free(acep->who.utf8string_val, acep->who.utf8string_len);
204 204 acep->who.utf8string_val = NULL;
205 205 }
206 206 }
207 207
208 208 void
209 209 rfs4_free_reply(nfs_resop4 *rp)
210 210 {
211 211 switch (rp->resop) {
212 212 case OP_LOCK:
213 213 deep_lock_free(&rp->nfs_resop4_u.oplock);
214 214 break;
215 215 case OP_OPEN:
216 216 deep_open_free(&rp->nfs_resop4_u.opopen);
217 217 default:
218 218 break;
219 219 }
220 220 }
221 221
222 222 void
223 223 rfs4_copy_reply(nfs_resop4 *dst, nfs_resop4 *src)
224 224 {
225 225 *dst = *src;
226 226
227 227 /* Handle responses that need deep copy */
228 228 switch (src->resop) {
229 229 case OP_LOCK:
230 230 deep_lock_copy(&dst->nfs_resop4_u.oplock,
231 231 &src->nfs_resop4_u.oplock);
232 232 break;
233 233 case OP_OPEN:
234 234 deep_open_copy(&dst->nfs_resop4_u.opopen,
235 235 &src->nfs_resop4_u.opopen);
236 236 break;
237 237 default:
238 238 break;
239 239 };
240 240 }
241 241
242 242 /*
243 243 * This is the implementation of the underlying state engine. The
244 244 * public interface to this engine is described by
245 245 * nfs4_state.h. Callers to the engine should hold no state engine
246 246 * locks when they call in to it. If the protocol needs to lock data
247 247 * structures it should do so after acquiring all references to them
248 248 * first and then follow the following lock order:
249 249 *
250 250 * client > openowner > state > lo_state > lockowner > file.
251 251 *
252 252 * Internally we only allow a thread to hold one hash bucket lock at a
253 253 * time and the lock is higher in the lock order (must be acquired
254 254 * first) than the data structure that is on that hash list.
255 255 *
256 256 * If a new reference was acquired by the caller, that reference needs
257 257 * to be released after releasing all acquired locks with the
258 258 * corresponding rfs4_*_rele routine.
259 259 */
260 260
261 261 /*
262 262 * This code is some what prototypical for now. Its purpose currently is to
263 263 * implement the interfaces sufficiently to finish the higher protocol
264 264 * elements. This will be replaced by a dynamically resizeable tables
265 265 * backed by kmem_cache allocator. However synchronization is handled
266 266 * correctly (I hope) and will not change by much. The mutexes for
267 267 * the hash buckets that can be used to create new instances of data
268 268 * structures might be good candidates to evolve into reader writer
269 269 * locks. If it has to do a creation, it would be holding the
270 270 * mutex across a kmem_alloc with KM_SLEEP specified.
271 271 */
272 272
273 273 #ifdef DEBUG
274 274 #define TABSIZE 17
275 275 #else
276 276 #define TABSIZE 2047
277 277 #endif
278 278
279 279 #define ADDRHASH(key) ((unsigned long)(key) >> 3)
280 280
281 281 #define MAXTABSZ 1024*1024
282 282
283 283 /* The values below are rfs4_lease_time units */
284 284
285 285 #ifdef DEBUG
286 286 #define CLIENT_CACHE_TIME 1
287 287 #define OPENOWNER_CACHE_TIME 1
288 288 #define STATE_CACHE_TIME 1
289 289 #define LO_STATE_CACHE_TIME 1
290 290 #define LOCKOWNER_CACHE_TIME 1
291 291 #define FILE_CACHE_TIME 3
292 292 #define DELEG_STATE_CACHE_TIME 1
293 293 #else
294 294 #define CLIENT_CACHE_TIME 10
295 295 #define OPENOWNER_CACHE_TIME 5
296 296 #define STATE_CACHE_TIME 1
297 297 #define LO_STATE_CACHE_TIME 1
298 298 #define LOCKOWNER_CACHE_TIME 3
299 299 #define FILE_CACHE_TIME 40
300 300 #define DELEG_STATE_CACHE_TIME 1
301 301 #endif
302 302
303 303 /*
304 304 * NFSv4 server state databases
305 305 *
306 306 * Initilized when the module is loaded and used by NFSv4 state tables.
307 307 * These kmem_cache databases are global, the tables that make use of these
308 308 * are per zone.
309 309 */
310 310 kmem_cache_t *rfs4_client_mem_cache;
311 311 kmem_cache_t *rfs4_clntIP_mem_cache;
312 312 kmem_cache_t *rfs4_openown_mem_cache;
313 313 kmem_cache_t *rfs4_openstID_mem_cache;
314 314 kmem_cache_t *rfs4_lockstID_mem_cache;
315 315 kmem_cache_t *rfs4_lockown_mem_cache;
316 316 kmem_cache_t *rfs4_file_mem_cache;
317 317 kmem_cache_t *rfs4_delegstID_mem_cache;
318 318
319 319 /*
320 320 * NFSv4 state table functions
321 321 */
322 322 static bool_t rfs4_client_create(rfs4_entry_t, void *);
323 323 static void rfs4_dss_remove_cpleaf(rfs4_client_t *);
324 324 static void rfs4_dss_remove_leaf(rfs4_servinst_t *, char *, char *);
325 325 static void rfs4_client_destroy(rfs4_entry_t);
326 326 static bool_t rfs4_client_expiry(rfs4_entry_t);
327 327 static uint32_t clientid_hash(void *);
328 328 static bool_t clientid_compare(rfs4_entry_t, void *);
329 329 static void *clientid_mkkey(rfs4_entry_t);
330 330 static uint32_t nfsclnt_hash(void *);
331 331 static bool_t nfsclnt_compare(rfs4_entry_t, void *);
332 332 static void *nfsclnt_mkkey(rfs4_entry_t);
333 333 static bool_t rfs4_clntip_expiry(rfs4_entry_t);
334 334 static void rfs4_clntip_destroy(rfs4_entry_t);
335 335 static bool_t rfs4_clntip_create(rfs4_entry_t, void *);
336 336 static uint32_t clntip_hash(void *);
337 337 static bool_t clntip_compare(rfs4_entry_t, void *);
338 338 static void *clntip_mkkey(rfs4_entry_t);
339 339 static bool_t rfs4_openowner_create(rfs4_entry_t, void *);
340 340 static void rfs4_openowner_destroy(rfs4_entry_t);
341 341 static bool_t rfs4_openowner_expiry(rfs4_entry_t);
342 342 static uint32_t openowner_hash(void *);
343 343 static bool_t openowner_compare(rfs4_entry_t, void *);
344 344 static void *openowner_mkkey(rfs4_entry_t);
345 345 static bool_t rfs4_state_create(rfs4_entry_t, void *);
346 346 static void rfs4_state_destroy(rfs4_entry_t);
347 347 static bool_t rfs4_state_expiry(rfs4_entry_t);
348 348 static uint32_t state_hash(void *);
349 349 static bool_t state_compare(rfs4_entry_t, void *);
350 350 static void *state_mkkey(rfs4_entry_t);
351 351 static uint32_t state_owner_file_hash(void *);
352 352 static bool_t state_owner_file_compare(rfs4_entry_t, void *);
353 353 static void *state_owner_file_mkkey(rfs4_entry_t);
354 354 static uint32_t state_file_hash(void *);
355 355 static bool_t state_file_compare(rfs4_entry_t, void *);
356 356 static void *state_file_mkkey(rfs4_entry_t);
357 357 static bool_t rfs4_lo_state_create(rfs4_entry_t, void *);
358 358 static void rfs4_lo_state_destroy(rfs4_entry_t);
359 359 static bool_t rfs4_lo_state_expiry(rfs4_entry_t);
360 360 static uint32_t lo_state_hash(void *);
361 361 static bool_t lo_state_compare(rfs4_entry_t, void *);
362 362 static void *lo_state_mkkey(rfs4_entry_t);
363 363 static uint32_t lo_state_lo_hash(void *);
364 364 static bool_t lo_state_lo_compare(rfs4_entry_t, void *);
365 365 static void *lo_state_lo_mkkey(rfs4_entry_t);
366 366 static bool_t rfs4_lockowner_create(rfs4_entry_t, void *);
367 367 static void rfs4_lockowner_destroy(rfs4_entry_t);
368 368 static bool_t rfs4_lockowner_expiry(rfs4_entry_t);
369 369 static uint32_t lockowner_hash(void *);
370 370 static bool_t lockowner_compare(rfs4_entry_t, void *);
371 371 static void *lockowner_mkkey(rfs4_entry_t);
372 372 static uint32_t pid_hash(void *);
373 373 static bool_t pid_compare(rfs4_entry_t, void *);
374 374 static void *pid_mkkey(rfs4_entry_t);
375 375 static bool_t rfs4_file_create(rfs4_entry_t, void *);
376 376 static void rfs4_file_destroy(rfs4_entry_t);
377 377 static uint32_t file_hash(void *);
378 378 static bool_t file_compare(rfs4_entry_t, void *);
379 379 static void *file_mkkey(rfs4_entry_t);
380 380 static bool_t rfs4_deleg_state_create(rfs4_entry_t, void *);
381 381 static void rfs4_deleg_state_destroy(rfs4_entry_t);
382 382 static bool_t rfs4_deleg_state_expiry(rfs4_entry_t);
383 383 static uint32_t deleg_hash(void *);
384 384 static bool_t deleg_compare(rfs4_entry_t, void *);
385 385 static void *deleg_mkkey(rfs4_entry_t);
386 386 static uint32_t deleg_state_hash(void *);
387 387 static bool_t deleg_state_compare(rfs4_entry_t, void *);
388 388 static void *deleg_state_mkkey(rfs4_entry_t);
389 389
390 390 static void rfs4_state_rele_nounlock(rfs4_state_t *);
391 391
392 392 static int rfs4_ss_enabled = 0;
393 393
394 394 extern void (*rfs4_client_clrst)(struct nfs4clrst_args *);
395 395
396 396 void
397 397 rfs4_ss_pnfree(rfs4_ss_pn_t *ss_pn)
398 398 {
399 399 kmem_free(ss_pn, sizeof (rfs4_ss_pn_t));
400 400 }
401 401
402 402 static rfs4_ss_pn_t *
403 403 rfs4_ss_pnalloc(char *dir, char *leaf)
404 404 {
405 405 rfs4_ss_pn_t *ss_pn;
406 406 int dir_len, leaf_len;
407 407
408 408 /*
409 409 * validate we have a resonable path
410 410 * (account for the '/' and trailing null)
411 411 */
412 412 if ((dir_len = strlen(dir)) > MAXPATHLEN ||
413 413 (leaf_len = strlen(leaf)) > MAXNAMELEN ||
414 414 (dir_len + leaf_len + 2) > MAXPATHLEN) {
415 415 return (NULL);
416 416 }
417 417
418 418 ss_pn = kmem_alloc(sizeof (rfs4_ss_pn_t), KM_SLEEP);
419 419
420 420 (void) snprintf(ss_pn->pn, MAXPATHLEN, "%s/%s", dir, leaf);
421 421 /* Handy pointer to just the leaf name */
422 422 ss_pn->leaf = ss_pn->pn + dir_len + 1;
423 423 return (ss_pn);
424 424 }
425 425
426 426
427 427 /*
428 428 * Move the "leaf" filename from "sdir" directory
429 429 * to the "ddir" directory. Return the pathname of
430 430 * the destination unless the rename fails in which
431 431 * case we need to return the source pathname.
432 432 */
433 433 static rfs4_ss_pn_t *
434 434 rfs4_ss_movestate(char *sdir, char *ddir, char *leaf)
435 435 {
436 436 rfs4_ss_pn_t *src, *dst;
437 437
438 438 if ((src = rfs4_ss_pnalloc(sdir, leaf)) == NULL)
439 439 return (NULL);
440 440
441 441 if ((dst = rfs4_ss_pnalloc(ddir, leaf)) == NULL) {
442 442 rfs4_ss_pnfree(src);
443 443 return (NULL);
444 444 }
445 445
446 446 /*
447 447 * If the rename fails we shall return the src
448 448 * pathname and free the dst. Otherwise we need
449 449 * to free the src and return the dst pathanme.
450 450 */
451 451 if (vn_rename(src->pn, dst->pn, UIO_SYSSPACE)) {
452 452 rfs4_ss_pnfree(dst);
453 453 return (src);
454 454 }
455 455 rfs4_ss_pnfree(src);
456 456 return (dst);
457 457 }
458 458
459 459
460 460 static rfs4_oldstate_t *
461 461 rfs4_ss_getstate(vnode_t *dvp, rfs4_ss_pn_t *ss_pn)
462 462 {
463 463 struct uio uio;
464 464 struct iovec iov[3];
465 465
466 466 rfs4_oldstate_t *cl_ss = NULL;
467 467 vnode_t *vp;
468 468 vattr_t va;
469 469 uint_t id_len;
470 470 int err, kill_file, file_vers;
471 471
472 472 if (ss_pn == NULL)
473 473 return (NULL);
474 474
475 475 /*
476 476 * open the state file.
477 477 */
478 478 if (vn_open(ss_pn->pn, UIO_SYSSPACE, FREAD, 0, &vp, 0, 0) != 0) {
479 479 return (NULL);
480 480 }
481 481
482 482 if (vp->v_type != VREG) {
483 483 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
484 484 VN_RELE(vp);
485 485 return (NULL);
486 486 }
487 487
488 488 err = VOP_ACCESS(vp, VREAD, 0, CRED(), NULL);
489 489 if (err) {
490 490 /*
491 491 * We don't have read access? better get the heck out.
492 492 */
493 493 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
494 494 VN_RELE(vp);
495 495 return (NULL);
496 496 }
497 497
498 498 (void) VOP_RWLOCK(vp, V_WRITELOCK_FALSE, NULL);
499 499 /*
500 500 * get the file size to do some basic validation
501 501 */
502 502 va.va_mask = AT_SIZE;
503 503 err = VOP_GETATTR(vp, &va, 0, CRED(), NULL);
504 504
505 505 kill_file = (va.va_size == 0 || va.va_size <
506 506 (NFS4_VERIFIER_SIZE + sizeof (uint_t)+1));
507 507
508 508 if (err || kill_file) {
509 509 VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
510 510 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
511 511 VN_RELE(vp);
512 512 if (kill_file) {
513 513 (void) VOP_REMOVE(dvp, ss_pn->leaf, CRED(), NULL, 0);
514 514 }
515 515 return (NULL);
516 516 }
517 517
518 518 cl_ss = kmem_alloc(sizeof (rfs4_oldstate_t), KM_SLEEP);
519 519
520 520 /*
521 521 * build iovecs to read in the file_version, verifier and id_len
522 522 */
523 523 iov[0].iov_base = (caddr_t)&file_vers;
524 524 iov[0].iov_len = sizeof (int);
525 525 iov[1].iov_base = (caddr_t)&cl_ss->cl_id4.verifier;
526 526 iov[1].iov_len = NFS4_VERIFIER_SIZE;
527 527 iov[2].iov_base = (caddr_t)&id_len;
528 528 iov[2].iov_len = sizeof (uint_t);
529 529
530 530 uio.uio_iov = iov;
531 531 uio.uio_iovcnt = 3;
532 532 uio.uio_segflg = UIO_SYSSPACE;
533 533 uio.uio_loffset = 0;
534 534 uio.uio_resid = sizeof (int) + NFS4_VERIFIER_SIZE + sizeof (uint_t);
535 535
536 536 if (err = VOP_READ(vp, &uio, FREAD, CRED(), NULL)) {
537 537 VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
538 538 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
539 539 VN_RELE(vp);
540 540 kmem_free(cl_ss, sizeof (rfs4_oldstate_t));
541 541 return (NULL);
542 542 }
543 543
544 544 /*
545 545 * if the file_version doesn't match or if the
546 546 * id_len is zero or the combination of the verifier,
547 547 * id_len and id_val is bigger than the file we have
548 548 * a problem. If so ditch the file.
549 549 */
550 550 kill_file = (file_vers != NFS4_SS_VERSION || id_len == 0 ||
551 551 (id_len + NFS4_VERIFIER_SIZE + sizeof (uint_t)) > va.va_size);
552 552
553 553 if (err || kill_file) {
554 554 VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
555 555 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
556 556 VN_RELE(vp);
557 557 kmem_free(cl_ss, sizeof (rfs4_oldstate_t));
558 558 if (kill_file) {
559 559 (void) VOP_REMOVE(dvp, ss_pn->leaf, CRED(), NULL, 0);
560 560 }
561 561 return (NULL);
562 562 }
563 563
564 564 /*
565 565 * now get the client id value
566 566 */
567 567 cl_ss->cl_id4.id_val = kmem_alloc(id_len, KM_SLEEP);
568 568 iov[0].iov_base = cl_ss->cl_id4.id_val;
569 569 iov[0].iov_len = id_len;
570 570
571 571 uio.uio_iov = iov;
572 572 uio.uio_iovcnt = 1;
573 573 uio.uio_segflg = UIO_SYSSPACE;
574 574 uio.uio_resid = cl_ss->cl_id4.id_len = id_len;
575 575
576 576 if (err = VOP_READ(vp, &uio, FREAD, CRED(), NULL)) {
577 577 VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
578 578 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
579 579 VN_RELE(vp);
580 580 kmem_free(cl_ss->cl_id4.id_val, id_len);
581 581 kmem_free(cl_ss, sizeof (rfs4_oldstate_t));
582 582 return (NULL);
583 583 }
584 584
585 585 VOP_RWUNLOCK(vp, V_WRITELOCK_FALSE, NULL);
586 586 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL);
587 587 VN_RELE(vp);
588 588 return (cl_ss);
589 589 }
590 590
591 591 #ifdef nextdp
592 592 #undef nextdp
593 593 #endif
594 594 #define nextdp(dp) ((struct dirent64 *)((char *)(dp) + (dp)->d_reclen))
595 595
596 596 /*
597 597 * Add entries from statedir to supplied oldstate list.
598 598 * Optionally, move all entries from statedir -> destdir.
599 599 */
600 600 void
601 601 rfs4_ss_oldstate(rfs4_oldstate_t *oldstate, char *statedir, char *destdir)
602 602 {
603 603 rfs4_ss_pn_t *ss_pn;
604 604 rfs4_oldstate_t *cl_ss = NULL;
605 605 char *dirt = NULL;
606 606 int err, dir_eof = 0, size = 0;
607 607 vnode_t *dvp;
608 608 struct iovec iov;
609 609 struct uio uio;
610 610 struct dirent64 *dep;
611 611 offset_t dirchunk_offset = 0;
612 612
613 613 /*
614 614 * open the state directory
615 615 */
616 616 if (vn_open(statedir, UIO_SYSSPACE, FREAD, 0, &dvp, 0, 0))
617 617 return;
618 618
619 619 if (dvp->v_type != VDIR || VOP_ACCESS(dvp, VREAD, 0, CRED(), NULL))
620 620 goto out;
621 621
622 622 dirt = kmem_alloc(RFS4_SS_DIRSIZE, KM_SLEEP);
623 623
624 624 /*
625 625 * Get and process the directory entries
626 626 */
627 627 while (!dir_eof) {
628 628 (void) VOP_RWLOCK(dvp, V_WRITELOCK_FALSE, NULL);
629 629 iov.iov_base = dirt;
630 630 iov.iov_len = RFS4_SS_DIRSIZE;
631 631 uio.uio_iov = &iov;
632 632 uio.uio_iovcnt = 1;
633 633 uio.uio_segflg = UIO_SYSSPACE;
634 634 uio.uio_loffset = dirchunk_offset;
635 635 uio.uio_resid = RFS4_SS_DIRSIZE;
636 636
637 637 err = VOP_READDIR(dvp, &uio, CRED(), &dir_eof, NULL, 0);
638 638 VOP_RWUNLOCK(dvp, V_WRITELOCK_FALSE, NULL);
639 639 if (err)
640 640 goto out;
641 641
642 642 size = RFS4_SS_DIRSIZE - uio.uio_resid;
643 643
644 644 /*
645 645 * Process all the directory entries in this
646 646 * readdir chunk
647 647 */
648 648 for (dep = (struct dirent64 *)dirt; size > 0;
649 649 dep = nextdp(dep)) {
650 650
651 651 size -= dep->d_reclen;
652 652 dirchunk_offset = dep->d_off;
653 653
654 654 /*
655 655 * Skip '.' and '..'
656 656 */
657 657 if (NFS_IS_DOTNAME(dep->d_name))
658 658 continue;
659 659
660 660 ss_pn = rfs4_ss_pnalloc(statedir, dep->d_name);
661 661 if (ss_pn == NULL)
662 662 continue;
663 663
664 664 if (cl_ss = rfs4_ss_getstate(dvp, ss_pn)) {
665 665 if (destdir != NULL) {
666 666 rfs4_ss_pnfree(ss_pn);
667 667 cl_ss->ss_pn = rfs4_ss_movestate(
668 668 statedir, destdir, dep->d_name);
669 669 } else {
670 670 cl_ss->ss_pn = ss_pn;
671 671 }
672 672 insque(cl_ss, oldstate);
673 673 } else {
674 674 rfs4_ss_pnfree(ss_pn);
675 675 }
676 676 }
677 677 }
678 678
679 679 out:
680 680 (void) VOP_CLOSE(dvp, FREAD, 1, (offset_t)0, CRED(), NULL);
681 681 VN_RELE(dvp);
682 682 if (dirt)
683 683 kmem_free((caddr_t)dirt, RFS4_SS_DIRSIZE);
684 684 }
685 685
686 686 static void
687 687 rfs4_ss_init(nfs4_srv_t *nsrv4)
688 688 {
689 689 int npaths = 1;
690 690 char *default_dss_path = NFS4_DSS_VAR_DIR;
691 691
692 692 /* read the default stable storage state */
693 693 rfs4_dss_readstate(nsrv4, npaths, &default_dss_path);
694 694
695 695 rfs4_ss_enabled = 1;
696 696 }
697 697
698 698 static void
699 699 rfs4_ss_fini(nfs4_srv_t *nsrv4)
700 700 {
701 701 rfs4_servinst_t *sip;
702 702
703 703 mutex_enter(&nsrv4->servinst_lock);
704 704 sip = nsrv4->nfs4_cur_servinst;
705 705 while (sip != NULL) {
706 706 rfs4_dss_clear_oldstate(sip);
707 707 sip = sip->next;
708 708 }
709 709 mutex_exit(&nsrv4->servinst_lock);
710 710 }
711 711
712 712 /*
713 713 * Remove all oldstate files referenced by this servinst.
714 714 */
715 715 static void
716 716 rfs4_dss_clear_oldstate(rfs4_servinst_t *sip)
717 717 {
718 718 rfs4_oldstate_t *os_head, *osp;
719 719
720 720 rw_enter(&sip->oldstate_lock, RW_WRITER);
721 721 os_head = sip->oldstate;
722 722
723 723 if (os_head == NULL) {
724 724 rw_exit(&sip->oldstate_lock);
725 725 return;
726 726 }
727 727
728 728 /* skip dummy entry */
729 729 osp = os_head->next;
730 730 while (osp != os_head) {
731 731 char *leaf = osp->ss_pn->leaf;
732 732 rfs4_oldstate_t *os_next;
733 733
734 734 rfs4_dss_remove_leaf(sip, NFS4_DSS_OLDSTATE_LEAF, leaf);
735 735
736 736 if (osp->cl_id4.id_val)
737 737 kmem_free(osp->cl_id4.id_val, osp->cl_id4.id_len);
738 738 rfs4_ss_pnfree(osp->ss_pn);
739 739
740 740 os_next = osp->next;
741 741 remque(osp);
742 742 kmem_free(osp, sizeof (rfs4_oldstate_t));
743 743 osp = os_next;
744 744 }
745 745
746 746 rw_exit(&sip->oldstate_lock);
747 747 }
748 748
749 749 /*
750 750 * Form the state and oldstate paths, and read in the stable storage files.
751 751 */
752 752 void
753 753 rfs4_dss_readstate(nfs4_srv_t *nsrv4, int npaths, char **paths)
754 754 {
755 755 int i;
756 756 char *state, *oldstate;
757 757
758 758 state = kmem_alloc(MAXPATHLEN, KM_SLEEP);
759 759 oldstate = kmem_alloc(MAXPATHLEN, KM_SLEEP);
760 760
761 761 for (i = 0; i < npaths; i++) {
762 762 char *path = paths[i];
763 763
764 764 (void) sprintf(state, "%s/%s", path, NFS4_DSS_STATE_LEAF);
765 765 (void) sprintf(oldstate, "%s/%s", path, NFS4_DSS_OLDSTATE_LEAF);
766 766
767 767 /*
768 768 * Populate the current server instance's oldstate list.
769 769 *
770 770 * 1. Read stable storage data from old state directory,
771 771 * leaving its contents alone.
772 772 *
773 773 * 2. Read stable storage data from state directory,
774 774 * and move the latter's contents to old state
775 775 * directory.
776 776 */
777 777 /* CSTYLED */
778 778 rfs4_ss_oldstate(nsrv4->nfs4_cur_servinst->oldstate, oldstate, NULL);
779 779 /* CSTYLED */
780 780 rfs4_ss_oldstate(nsrv4->nfs4_cur_servinst->oldstate, state, oldstate);
781 781 }
782 782
783 783 kmem_free(state, MAXPATHLEN);
784 784 kmem_free(oldstate, MAXPATHLEN);
785 785 }
786 786
787 787
788 788 /*
789 789 * Check if we are still in grace and if the client can be
790 790 * granted permission to perform reclaims.
791 791 */
792 792 void
793 793 rfs4_ss_chkclid(nfs4_srv_t *nsrv4, rfs4_client_t *cp)
794 794 {
795 795 rfs4_servinst_t *sip;
796 796
797 797 /*
798 798 * It should be sufficient to check the oldstate data for just
799 799 * this client's instance. However, since our per-instance
800 800 * client grouping is solely temporal, HA-NFSv4 RG failover
801 801 * might result in clients of the same RG being partitioned into
802 802 * separate instances.
803 803 *
804 804 * Until the client grouping is improved, we must check the
805 805 * oldstate data for all instances with an active grace period.
806 806 *
807 807 * This also serves as the mechanism to remove stale oldstate data.
808 808 * The first time we check an instance after its grace period has
809 809 * expired, the oldstate data should be cleared.
810 810 *
811 811 * Start at the current instance, and walk the list backwards
812 812 * to the first.
813 813 */
814 814 mutex_enter(&nsrv4->servinst_lock);
815 815 for (sip = nsrv4->nfs4_cur_servinst; sip != NULL; sip = sip->prev) {
816 816 rfs4_ss_chkclid_sip(cp, sip);
817 817
818 818 /* if the above check found this client, we're done */
819 819 if (cp->rc_can_reclaim)
820 820 break;
821 821 }
822 822 mutex_exit(&nsrv4->servinst_lock);
823 823 }
824 824
825 825 static void
826 826 rfs4_ss_chkclid_sip(rfs4_client_t *cp, rfs4_servinst_t *sip)
827 827 {
828 828 rfs4_oldstate_t *osp, *os_head;
829 829
830 830 /* short circuit everything if this server instance has no oldstate */
831 831 rw_enter(&sip->oldstate_lock, RW_READER);
832 832 os_head = sip->oldstate;
833 833 rw_exit(&sip->oldstate_lock);
834 834 if (os_head == NULL)
835 835 return;
836 836
837 837 /*
838 838 * If this server instance is no longer in a grace period then
839 839 * the client won't be able to reclaim. No further need for this
840 840 * instance's oldstate data, so it can be cleared.
841 841 */
842 842 if (!rfs4_servinst_in_grace(sip))
843 843 return;
844 844
845 845 /* this instance is still in grace; search for the clientid */
846 846
847 847 rw_enter(&sip->oldstate_lock, RW_READER);
848 848
849 849 os_head = sip->oldstate;
850 850 /* skip dummy entry */
851 851 osp = os_head->next;
852 852 while (osp != os_head) {
853 853 if (osp->cl_id4.id_len == cp->rc_nfs_client.id_len) {
854 854 if (bcmp(osp->cl_id4.id_val, cp->rc_nfs_client.id_val,
855 855 osp->cl_id4.id_len) == 0) {
856 856 cp->rc_can_reclaim = 1;
857 857 break;
858 858 }
859 859 }
860 860 osp = osp->next;
861 861 }
862 862
863 863 rw_exit(&sip->oldstate_lock);
864 864 }
865 865
866 866 /*
867 867 * Place client information into stable storage: 1/3.
868 868 * First, generate the leaf filename, from the client's IP address and
869 869 * the server-generated short-hand clientid.
870 870 */
871 871 void
872 872 rfs4_ss_clid(nfs4_srv_t *nsrv4, rfs4_client_t *cp)
873 873 {
874 874 const char *kinet_ntop6(uchar_t *, char *, size_t);
875 875 char leaf[MAXNAMELEN], buf[INET6_ADDRSTRLEN];
876 876 struct sockaddr *ca;
877 877 uchar_t *b;
878 878
879 879 if (rfs4_ss_enabled == 0) {
880 880 return;
881 881 }
882 882
883 883 buf[0] = 0;
884 884
885 885 ca = (struct sockaddr *)&cp->rc_addr;
886 886
887 887 /*
888 888 * Convert the caller's IP address to a dotted string
889 889 */
890 890 if (ca->sa_family == AF_INET) {
891 891 b = (uchar_t *)&((struct sockaddr_in *)ca)->sin_addr;
892 892 (void) sprintf(buf, "%03d.%03d.%03d.%03d", b[0] & 0xFF,
893 893 b[1] & 0xFF, b[2] & 0xFF, b[3] & 0xFF);
894 894 } else if (ca->sa_family == AF_INET6) {
895 895 struct sockaddr_in6 *sin6;
896 896
897 897 sin6 = (struct sockaddr_in6 *)ca;
898 898 (void) kinet_ntop6((uchar_t *)&sin6->sin6_addr,
899 899 buf, INET6_ADDRSTRLEN);
900 900 }
901 901
902 902 (void) snprintf(leaf, MAXNAMELEN, "%s-%llx", buf,
903 903 (longlong_t)cp->rc_clientid);
904 904 rfs4_ss_clid_write(nsrv4, cp, leaf);
905 905 }
906 906
907 907 /*
908 908 * Place client information into stable storage: 2/3.
909 909 * DSS: distributed stable storage: the file may need to be written to
910 910 * multiple directories.
911 911 */
912 912 static void
913 913 rfs4_ss_clid_write(nfs4_srv_t *nsrv4, rfs4_client_t *cp, char *leaf)
914 914 {
915 915 rfs4_servinst_t *sip;
916 916
917 917 /*
918 918 * It should be sufficient to write the leaf file to (all) DSS paths
919 919 * associated with just this client's instance. However, since our
920 920 * per-instance client grouping is solely temporal, HA-NFSv4 RG
921 921 * failover might result in us losing DSS data.
922 922 *
923 923 * Until the client grouping is improved, we must write the DSS data
924 924 * to all instances' paths. Start at the current instance, and
925 925 * walk the list backwards to the first.
926 926 */
927 927 mutex_enter(&nsrv4->servinst_lock);
928 928 for (sip = nsrv4->nfs4_cur_servinst; sip != NULL; sip = sip->prev) {
929 929 int i, npaths = sip->dss_npaths;
930 930
931 931 /* write the leaf file to all DSS paths */
932 932 for (i = 0; i < npaths; i++) {
933 933 rfs4_dss_path_t *dss_path = sip->dss_paths[i];
934 934
935 935 /* HA-NFSv4 path might have been failed-away from us */
936 936 if (dss_path == NULL)
937 937 continue;
938 938
939 939 rfs4_ss_clid_write_one(cp, dss_path->path, leaf);
940 940 }
941 941 }
942 942 mutex_exit(&nsrv4->servinst_lock);
943 943 }
944 944
945 945 /*
946 946 * Place client information into stable storage: 3/3.
947 947 * Write the stable storage data to the requested file.
948 948 */
949 949 static void
950 950 rfs4_ss_clid_write_one(rfs4_client_t *cp, char *dss_path, char *leaf)
951 951 {
952 952 int ioflag;
953 953 int file_vers = NFS4_SS_VERSION;
954 954 size_t dirlen;
955 955 struct uio uio;
956 956 struct iovec iov[4];
957 957 char *dir;
958 958 rfs4_ss_pn_t *ss_pn;
959 959 vnode_t *vp;
960 960 nfs_client_id4 *cl_id4 = &(cp->rc_nfs_client);
961 961
962 962 /* allow 2 extra bytes for '/' & NUL */
963 963 dirlen = strlen(dss_path) + strlen(NFS4_DSS_STATE_LEAF) + 2;
964 964 dir = kmem_alloc(dirlen, KM_SLEEP);
965 965 (void) sprintf(dir, "%s/%s", dss_path, NFS4_DSS_STATE_LEAF);
966 966
967 967 ss_pn = rfs4_ss_pnalloc(dir, leaf);
968 968 /* rfs4_ss_pnalloc takes its own copy */
969 969 kmem_free(dir, dirlen);
970 970 if (ss_pn == NULL)
971 971 return;
972 972
973 973 if (vn_open(ss_pn->pn, UIO_SYSSPACE, FCREAT|FWRITE, 0600, &vp,
974 974 CRCREAT, 0)) {
975 975 rfs4_ss_pnfree(ss_pn);
976 976 return;
977 977 }
978 978
979 979 /*
980 980 * We need to record leaf - i.e. the filename - so that we know
981 981 * what to remove, in the future. However, the dir part of cp->ss_pn
982 982 * should never be referenced directly, since it's potentially only
983 983 * one of several paths with this leaf in it.
984 984 */
985 985 if (cp->rc_ss_pn != NULL) {
986 986 if (strcmp(cp->rc_ss_pn->leaf, leaf) == 0) {
987 987 /* we've already recorded *this* leaf */
988 988 rfs4_ss_pnfree(ss_pn);
989 989 } else {
990 990 /* replace with this leaf */
991 991 rfs4_ss_pnfree(cp->rc_ss_pn);
992 992 cp->rc_ss_pn = ss_pn;
993 993 }
994 994 } else {
995 995 cp->rc_ss_pn = ss_pn;
996 996 }
997 997
998 998 /*
999 999 * Build a scatter list that points to the nfs_client_id4
1000 1000 */
1001 1001 iov[0].iov_base = (caddr_t)&file_vers;
1002 1002 iov[0].iov_len = sizeof (int);
1003 1003 iov[1].iov_base = (caddr_t)&(cl_id4->verifier);
1004 1004 iov[1].iov_len = NFS4_VERIFIER_SIZE;
1005 1005 iov[2].iov_base = (caddr_t)&(cl_id4->id_len);
1006 1006 iov[2].iov_len = sizeof (uint_t);
1007 1007 iov[3].iov_base = (caddr_t)cl_id4->id_val;
1008 1008 iov[3].iov_len = cl_id4->id_len;
1009 1009
1010 1010 uio.uio_iov = iov;
1011 1011 uio.uio_iovcnt = 4;
1012 1012 uio.uio_loffset = 0;
1013 1013 uio.uio_segflg = UIO_SYSSPACE;
1014 1014 uio.uio_llimit = (rlim64_t)MAXOFFSET_T;
1015 1015 uio.uio_resid = cl_id4->id_len + sizeof (int) +
1016 1016 NFS4_VERIFIER_SIZE + sizeof (uint_t);
1017 1017
1018 1018 ioflag = uio.uio_fmode = (FWRITE|FSYNC);
1019 1019 uio.uio_extflg = UIO_COPY_DEFAULT;
1020 1020
1021 1021 (void) VOP_RWLOCK(vp, V_WRITELOCK_TRUE, NULL);
1022 1022 /* write the full client id to the file. */
1023 1023 (void) VOP_WRITE(vp, &uio, ioflag, CRED(), NULL);
1024 1024 VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1025 1025
1026 1026 (void) VOP_CLOSE(vp, FWRITE, 1, (offset_t)0, CRED(), NULL);
1027 1027 VN_RELE(vp);
1028 1028 }
1029 1029
1030 1030 /*
1031 1031 * DSS: distributed stable storage.
1032 1032 * Unpack the list of paths passed by nfsd.
1033 1033 * Use nvlist_alloc(9F) to manage the data.
1034 1034 * The caller is responsible for allocating and freeing the buffer.
1035 1035 */
1036 1036 int
1037 1037 rfs4_dss_setpaths(char *buf, size_t buflen)
1038 1038 {
1039 1039 int error;
1040 1040
1041 1041 /*
1042 1042 * If this is a "warm start", i.e. we previously had DSS paths,
1043 1043 * preserve the old paths.
1044 1044 */
1045 1045 if (rfs4_dss_paths != NULL) {
1046 1046 /*
1047 1047 * Before we lose the ptr, destroy the nvlist and pathnames
1048 1048 * array from the warm start before this one.
1049 1049 */
1050 1050 nvlist_free(rfs4_dss_oldpaths);
1051 1051 rfs4_dss_oldpaths = rfs4_dss_paths;
1052 1052 }
1053 1053
1054 1054 /* unpack the buffer into a searchable nvlist */
1055 1055 error = nvlist_unpack(buf, buflen, &rfs4_dss_paths, KM_SLEEP);
1056 1056 if (error)
1057 1057 return (error);
1058 1058
1059 1059 /*
1060 1060 * Search the nvlist for the pathnames nvpair (which is the only nvpair
1061 1061 * in the list, and record its location.
1062 1062 */
1063 1063 error = nvlist_lookup_string_array(rfs4_dss_paths, NFS4_DSS_NVPAIR_NAME,
1064 1064 &rfs4_dss_newpaths, &rfs4_dss_numnewpaths);
1065 1065 return (error);
1066 1066 }
1067 1067
1068 1068 /*
1069 1069 * Ultimately the nfssys() call NFS4_CLR_STATE endsup here
1070 1070 * to find and mark the client for forced expire.
1071 1071 */
1072 1072 static void
1073 1073 rfs4_client_scrub(rfs4_entry_t ent, void *arg)
1074 1074 {
1075 1075 rfs4_client_t *cp = (rfs4_client_t *)ent;
1076 1076 struct nfs4clrst_args *clr = arg;
1077 1077 struct sockaddr_in6 *ent_sin6;
1078 1078 struct in6_addr clr_in6;
1079 1079 struct sockaddr_in *ent_sin;
1080 1080 struct in_addr clr_in;
1081 1081
1082 1082 if (clr->addr_type != cp->rc_addr.ss_family) {
1083 1083 return;
1084 1084 }
1085 1085
1086 1086 switch (clr->addr_type) {
1087 1087
1088 1088 case AF_INET6:
1089 1089 /* copyin the address from user space */
1090 1090 if (copyin(clr->ap, &clr_in6, sizeof (clr_in6))) {
1091 1091 break;
1092 1092 }
1093 1093
1094 1094 ent_sin6 = (struct sockaddr_in6 *)&cp->rc_addr;
1095 1095
1096 1096 /*
1097 1097 * now compare, and if equivalent mark entry
1098 1098 * for forced expiration
1099 1099 */
1100 1100 if (IN6_ARE_ADDR_EQUAL(&ent_sin6->sin6_addr, &clr_in6)) {
1101 1101 cp->rc_forced_expire = 1;
1102 1102 }
1103 1103 break;
1104 1104
1105 1105 case AF_INET:
1106 1106 /* copyin the address from user space */
1107 1107 if (copyin(clr->ap, &clr_in, sizeof (clr_in))) {
1108 1108 break;
1109 1109 }
1110 1110
1111 1111 ent_sin = (struct sockaddr_in *)&cp->rc_addr;
1112 1112
1113 1113 /*
1114 1114 * now compare, and if equivalent mark entry
1115 1115 * for forced expiration
1116 1116 */
1117 1117 if (ent_sin->sin_addr.s_addr == clr_in.s_addr) {
1118 1118 cp->rc_forced_expire = 1;
1119 1119 }
1120 1120 break;
1121 1121
1122 1122 default:
1123 1123 /* force this assert to fail */
1124 1124 ASSERT(clr->addr_type != clr->addr_type);
1125 1125 }
1126 1126 }
1127 1127
1128 1128 /*
1129 1129 * This is called from nfssys() in order to clear server state
1130 1130 * for the specified client IP Address.
1131 1131 */
1132 1132 void
1133 1133 rfs4_clear_client_state(struct nfs4clrst_args *clr)
1134 1134 {
1135 1135 nfs4_srv_t *nsrv4;
1136 1136 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1137 1137 (void) rfs4_dbe_walk(nsrv4->rfs4_client_tab, rfs4_client_scrub, clr);
1138 1138 }
1139 1139
1140 1140 /*
1141 1141 * Used to initialize the NFSv4 server's state or database. All of
1142 1142 * the tables are created and timers are set.
1143 1143 */
1144 1144 void
1145 1145 rfs4_state_g_init()
1146 1146 {
1147 1147 extern boolean_t rfs4_cpr_callb(void *, int);
1148 1148 /*
1149 1149 * Add a CPR callback so that we can update client
1150 1150 * access times to extend the lease after a suspend
1151 1151 * and resume (using the same class as rpcmod/connmgr)
1152 1152 */
1153 1153 cpr_id = callb_add(rfs4_cpr_callb, 0, CB_CL_CPR_RPC, "rfs4");
1154 1154
1155 1155 /*
1156 1156 * NFSv4 server state databases
1157 1157 *
1158 1158 * Initilized when the module is loaded and used by NFSv4 state tables.
1159 1159 * These kmem_cache free pools are used globally, the NFSv4 state
1160 1160 * tables which make use of these kmem_cache free pools are per zone.
1161 1161 *
1162 1162 * initialize the global kmem_cache free pools which will be used by
1163 1163 * the NFSv4 state tables.
1164 1164 */
1165 1165 /* CSTYLED */
1166 1166 rfs4_client_mem_cache = nfs4_init_mem_cache("Client_entry_cache", 2, sizeof (rfs4_client_t), 0);
1167 1167 /* CSTYLED */
1168 1168 rfs4_clntIP_mem_cache = nfs4_init_mem_cache("ClntIP_entry_cache", 1, sizeof (rfs4_clntip_t), 1);
1169 1169 /* CSTYLED */
1170 1170 rfs4_openown_mem_cache = nfs4_init_mem_cache("OpenOwner_entry_cache", 1, sizeof (rfs4_openowner_t), 2);
1171 1171 /* CSTYLED */
1172 1172 rfs4_openstID_mem_cache = nfs4_init_mem_cache("OpenStateID_entry_cache", 3, sizeof (rfs4_state_t), 3);
1173 1173 /* CSTYLED */
1174 1174 rfs4_lockstID_mem_cache = nfs4_init_mem_cache("LockStateID_entry_cache", 3, sizeof (rfs4_lo_state_t), 4);
1175 1175 /* CSTYLED */
1176 1176 rfs4_lockown_mem_cache = nfs4_init_mem_cache("Lockowner_entry_cache", 2, sizeof (rfs4_lockowner_t), 5);
1177 1177 /* CSTYLED */
1178 1178 rfs4_file_mem_cache = nfs4_init_mem_cache("File_entry_cache", 1, sizeof (rfs4_file_t), 6);
1179 1179 /* CSTYLED */
1180 1180 rfs4_delegstID_mem_cache = nfs4_init_mem_cache("DelegStateID_entry_cache", 2, sizeof (rfs4_deleg_state_t), 7);
1181 1181
1182 1182 rfs4_client_clrst = rfs4_clear_client_state;
1183 1183 }
1184 1184
1185 1185
1186 1186 /*
1187 1187 * Used at server shutdown to cleanup all of the NFSv4 server's structures
1188 1188 * and other state.
1189 1189 */
1190 1190 void
1191 1191 rfs4_state_g_fini()
1192 1192 {
1193 1193 int i;
1194 1194 /*
1195 1195 * Cleanup the CPR callback.
1196 1196 */
1197 1197 if (cpr_id)
1198 1198 (void) callb_delete(cpr_id);
1199 1199
1200 1200 rfs4_client_clrst = NULL;
1201 1201
1202 1202 /* free the NFSv4 state databases */
1203 1203 for (i = 0; i < RFS4_DB_MEM_CACHE_NUM; i++) {
1204 1204 kmem_cache_destroy(rfs4_db_mem_cache_table[i].r_db_mem_cache);
1205 1205 rfs4_db_mem_cache_table[i].r_db_mem_cache = NULL;
1206 1206 }
1207 1207
1208 1208 rfs4_client_mem_cache = NULL;
1209 1209 rfs4_clntIP_mem_cache = NULL;
1210 1210 rfs4_openown_mem_cache = NULL;
1211 1211 rfs4_openstID_mem_cache = NULL;
1212 1212 rfs4_lockstID_mem_cache = NULL;
1213 1213 rfs4_lockown_mem_cache = NULL;
1214 1214 rfs4_file_mem_cache = NULL;
1215 1215 rfs4_delegstID_mem_cache = NULL;
1216 1216
1217 1217 /* DSS: distributed stable storage */
1218 1218 nvlist_free(rfs4_dss_oldpaths);
1219 1219 nvlist_free(rfs4_dss_paths);
1220 1220 rfs4_dss_paths = rfs4_dss_oldpaths = NULL;
1221 1221 }
1222 1222
1223 1223 /*
1224 1224 * Used to initialize the per zone NFSv4 server's state
1225 1225 */
1226 1226 void
1227 1227 rfs4_state_zone_init(nfs4_srv_t *nsrv4)
1228 1228 {
1229 1229 time_t start_time;
1230 1230 int start_grace;
1231 1231 char *dss_path = NFS4_DSS_VAR_DIR;
1232 1232
1233 1233 /* DSS: distributed stable storage: initialise served paths list */
1234 1234 nsrv4->dss_pathlist = NULL;
1235 1235
1236 1236 /*
1237 1237 * Set the boot time. If the server
1238 1238 * has been restarted quickly and has had the opportunity to
1239 1239 * service clients, then the start_time needs to be bumped
1240 1240 * regardless. A small window but it exists...
1241 1241 */
1242 1242 start_time = gethrestime_sec();
1243 1243 if (nsrv4->rfs4_start_time < start_time)
1244 1244 nsrv4->rfs4_start_time = start_time;
1245 1245 else
1246 1246 nsrv4->rfs4_start_time++;
1247 1247
1248 1248 /*
1249 1249 * Create the first server instance, or a new one if the server has
1250 1250 * been restarted; see above comments on rfs4_start_time. Don't
1251 1251 * start its grace period; that will be done later, to maximise the
1252 1252 * clients' recovery window.
1253 1253 */
1254 1254 start_grace = 0;
1255 1255 if (curzone == global_zone && rfs4_dss_numnewpaths > 0) {
1256 1256 int i;
1257 1257 char **dss_allpaths = NULL;
1258 1258 dss_allpaths = kmem_alloc(sizeof (char *) * (rfs4_dss_numnewpaths + 1), KM_SLEEP);
1259 1259 /*
1260 1260 * Add the default path into the list of paths for saving
1261 1261 * state informantion.
1262 1262 */
1263 1263 dss_allpaths[0] = dss_path;
1264 1264 for ( i = 0; i < rfs4_dss_numnewpaths; i++) {
1265 1265 dss_allpaths[i + 1] = rfs4_dss_newpaths[i];
1266 1266 }
1267 1267 rfs4_servinst_create(nsrv4, start_grace, (rfs4_dss_numnewpaths + 1), dss_allpaths);
1268 1268 kmem_free(dss_allpaths, (sizeof (char *) * (rfs4_dss_numnewpaths + 1)));
1269 1269 } else {
1270 1270 rfs4_servinst_create(nsrv4, start_grace, 1, &dss_path);
1271 1271 }
1272 1272
1273 1273 /* reset the "first NFSv4 request" status */
1274 1274 nsrv4->seen_first_compound = 0;
1275 1275
1276 1276 mutex_enter(&nsrv4->state_lock);
1277 1277
1278 1278 /*
1279 1279 * If the server state database has already been initialized,
1280 1280 * skip it
1281 1281 */
1282 1282 if (nsrv4->nfs4_server_state != NULL) {
1283 1283 mutex_exit(&nsrv4->state_lock);
1284 1284 return;
1285 1285 }
1286 1286
1287 1287 rw_init(&nsrv4->rfs4_findclient_lock, NULL, RW_DEFAULT, NULL);
1288 1288
1289 1289 /* set the various cache timers for table creation */
1290 1290 if (nsrv4->rfs4_client_cache_time == 0)
1291 1291 nsrv4->rfs4_client_cache_time = CLIENT_CACHE_TIME;
1292 1292 if (nsrv4->rfs4_openowner_cache_time == 0)
1293 1293 nsrv4->rfs4_openowner_cache_time = OPENOWNER_CACHE_TIME;
1294 1294 if (nsrv4->rfs4_state_cache_time == 0)
1295 1295 nsrv4->rfs4_state_cache_time = STATE_CACHE_TIME;
1296 1296 if (nsrv4->rfs4_lo_state_cache_time == 0)
1297 1297 nsrv4->rfs4_lo_state_cache_time = LO_STATE_CACHE_TIME;
1298 1298 if (nsrv4->rfs4_lockowner_cache_time == 0)
1299 1299 nsrv4->rfs4_lockowner_cache_time = LOCKOWNER_CACHE_TIME;
1300 1300 if (nsrv4->rfs4_file_cache_time == 0)
1301 1301 nsrv4->rfs4_file_cache_time = FILE_CACHE_TIME;
1302 1302 if (nsrv4->rfs4_deleg_state_cache_time == 0)
1303 1303 nsrv4->rfs4_deleg_state_cache_time = DELEG_STATE_CACHE_TIME;
1304 1304
1305 1305 /* Create the overall database to hold all server state */
1306 1306 nsrv4->nfs4_server_state = rfs4_database_create(rfs4_database_debug);
1307 1307
1308 1308 /* Now create the individual tables */
1309 1309 nsrv4->rfs4_client_cache_time *= rfs4_lease_time;
1310 1310 nsrv4->rfs4_client_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1311 1311 "Client",
1312 1312 nsrv4->rfs4_client_cache_time,
1313 1313 2,
1314 1314 rfs4_client_create,
1315 1315 rfs4_client_destroy,
1316 1316 rfs4_client_expiry,
1317 1317 sizeof (rfs4_client_t),
1318 1318 TABSIZE,
1319 1319 MAXTABSZ/8, 100);
1320 1320 nsrv4->rfs4_nfsclnt_idx = rfs4_index_create(nsrv4->rfs4_client_tab,
1321 1321 "nfs_client_id4", nfsclnt_hash,
1322 1322 nfsclnt_compare, nfsclnt_mkkey,
1323 1323 TRUE);
1324 1324 nsrv4->rfs4_clientid_idx = rfs4_index_create(nsrv4->rfs4_client_tab,
1325 1325 "client_id", clientid_hash,
1326 1326 clientid_compare, clientid_mkkey,
1327 1327 FALSE);
1328 1328
1329 1329 nsrv4->rfs4_clntip_cache_time = 86400 * 365; /* about a year */
1330 1330 nsrv4->rfs4_clntip_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1331 1331 "ClntIP",
1332 1332 nsrv4->rfs4_clntip_cache_time,
1333 1333 1,
1334 1334 rfs4_clntip_create,
1335 1335 rfs4_clntip_destroy,
1336 1336 rfs4_clntip_expiry,
1337 1337 sizeof (rfs4_clntip_t),
1338 1338 TABSIZE,
1339 1339 MAXTABSZ, 100);
1340 1340 nsrv4->rfs4_clntip_idx = rfs4_index_create(nsrv4->rfs4_clntip_tab,
1341 1341 "client_ip", clntip_hash,
1342 1342 clntip_compare, clntip_mkkey,
1343 1343 TRUE);
1344 1344
1345 1345 nsrv4->rfs4_openowner_cache_time *= rfs4_lease_time;
1346 1346 nsrv4->rfs4_openowner_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1347 1347 "OpenOwner",
1348 1348 nsrv4->rfs4_openowner_cache_time,
1349 1349 1,
1350 1350 rfs4_openowner_create,
1351 1351 rfs4_openowner_destroy,
1352 1352 rfs4_openowner_expiry,
1353 1353 sizeof (rfs4_openowner_t),
1354 1354 TABSIZE,
1355 1355 MAXTABSZ, 100);
1356 1356 nsrv4->rfs4_openowner_idx = rfs4_index_create(nsrv4->rfs4_openowner_tab,
1357 1357 "open_owner4", openowner_hash,
1358 1358 openowner_compare,
1359 1359 openowner_mkkey, TRUE);
1360 1360
1361 1361 nsrv4->rfs4_state_cache_time *= rfs4_lease_time;
1362 1362 nsrv4->rfs4_state_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1363 1363 "OpenStateID",
1364 1364 nsrv4->rfs4_state_cache_time,
1365 1365 3,
1366 1366 rfs4_state_create,
1367 1367 rfs4_state_destroy,
1368 1368 rfs4_state_expiry,
1369 1369 sizeof (rfs4_state_t),
1370 1370 TABSIZE,
1371 1371 MAXTABSZ, 100);
1372 1372
1373 1373 /* CSTYLED */
1374 1374 nsrv4->rfs4_state_owner_file_idx = rfs4_index_create(nsrv4->rfs4_state_tab,
1375 1375 "Openowner-File",
1376 1376 state_owner_file_hash,
1377 1377 state_owner_file_compare,
1378 1378 state_owner_file_mkkey, TRUE);
1379 1379
1380 1380 nsrv4->rfs4_state_idx = rfs4_index_create(nsrv4->rfs4_state_tab,
1381 1381 "State-id", state_hash,
1382 1382 state_compare, state_mkkey, FALSE);
1383 1383
1384 1384 nsrv4->rfs4_state_file_idx = rfs4_index_create(nsrv4->rfs4_state_tab,
1385 1385 "File", state_file_hash,
1386 1386 state_file_compare, state_file_mkkey,
1387 1387 FALSE);
1388 1388
1389 1389 nsrv4->rfs4_lo_state_cache_time *= rfs4_lease_time;
1390 1390 nsrv4->rfs4_lo_state_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1391 1391 "LockStateID",
1392 1392 nsrv4->rfs4_lo_state_cache_time,
1393 1393 2,
1394 1394 rfs4_lo_state_create,
1395 1395 rfs4_lo_state_destroy,
1396 1396 rfs4_lo_state_expiry,
1397 1397 sizeof (rfs4_lo_state_t),
1398 1398 TABSIZE,
1399 1399 MAXTABSZ, 100);
1400 1400
1401 1401 /* CSTYLED */
1402 1402 nsrv4->rfs4_lo_state_owner_idx = rfs4_index_create(nsrv4->rfs4_lo_state_tab,
1403 1403 "lockownerxstate",
1404 1404 lo_state_lo_hash,
1405 1405 lo_state_lo_compare,
1406 1406 lo_state_lo_mkkey, TRUE);
1407 1407
1408 1408 nsrv4->rfs4_lo_state_idx = rfs4_index_create(nsrv4->rfs4_lo_state_tab,
1409 1409 "State-id",
1410 1410 lo_state_hash, lo_state_compare,
1411 1411 lo_state_mkkey, FALSE);
1412 1412
1413 1413 nsrv4->rfs4_lockowner_cache_time *= rfs4_lease_time;
1414 1414
1415 1415 nsrv4->rfs4_lockowner_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1416 1416 "Lockowner",
1417 1417 nsrv4->rfs4_lockowner_cache_time,
1418 1418 2,
1419 1419 rfs4_lockowner_create,
1420 1420 rfs4_lockowner_destroy,
1421 1421 rfs4_lockowner_expiry,
1422 1422 sizeof (rfs4_lockowner_t),
1423 1423 TABSIZE,
1424 1424 MAXTABSZ, 100);
1425 1425
1426 1426 nsrv4->rfs4_lockowner_idx = rfs4_index_create(nsrv4->rfs4_lockowner_tab,
1427 1427 "lock_owner4", lockowner_hash,
1428 1428 lockowner_compare,
1429 1429 lockowner_mkkey, TRUE);
1430 1430
1431 1431 /* CSTYLED */
1432 1432 nsrv4->rfs4_lockowner_pid_idx = rfs4_index_create(nsrv4->rfs4_lockowner_tab,
1433 1433 "pid", pid_hash,
1434 1434 pid_compare, pid_mkkey,
1435 1435 FALSE);
1436 1436
1437 1437 nsrv4->rfs4_file_cache_time *= rfs4_lease_time;
1438 1438 nsrv4->rfs4_file_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1439 1439 "File",
1440 1440 nsrv4->rfs4_file_cache_time,
1441 1441 1,
1442 1442 rfs4_file_create,
1443 1443 rfs4_file_destroy,
1444 1444 NULL,
1445 1445 sizeof (rfs4_file_t),
1446 1446 TABSIZE,
1447 1447 MAXTABSZ, -1);
1448 1448
1449 1449 nsrv4->rfs4_file_idx = rfs4_index_create(nsrv4->rfs4_file_tab,
1450 1450 "Filehandle", file_hash,
1451 1451 file_compare, file_mkkey, TRUE);
1452 1452
1453 1453 nsrv4->rfs4_deleg_state_cache_time *= rfs4_lease_time;
1454 1454 /* CSTYLED */
1455 1455 nsrv4->rfs4_deleg_state_tab = rfs4_table_create(nsrv4->nfs4_server_state,
1456 1456 "DelegStateID",
1457 1457 nsrv4->rfs4_deleg_state_cache_time,
1458 1458 2,
1459 1459 rfs4_deleg_state_create,
1460 1460 rfs4_deleg_state_destroy,
1461 1461 rfs4_deleg_state_expiry,
1462 1462 sizeof (rfs4_deleg_state_t),
1463 1463 TABSIZE,
1464 1464 MAXTABSZ, 100);
1465 1465 nsrv4->rfs4_deleg_idx = rfs4_index_create(nsrv4->rfs4_deleg_state_tab,
1466 1466 "DelegByFileClient",
1467 1467 deleg_hash,
1468 1468 deleg_compare,
1469 1469 deleg_mkkey, TRUE);
1470 1470
1471 1471 /* CSTYLED */
1472 1472 nsrv4->rfs4_deleg_state_idx = rfs4_index_create(nsrv4->rfs4_deleg_state_tab,
1473 1473 "DelegState",
1474 1474 deleg_state_hash,
1475 1475 deleg_state_compare,
1476 1476 deleg_state_mkkey, FALSE);
1477 1477
1478 1478 mutex_exit(&nsrv4->state_lock);
1479 1479
1480 1480 /*
1481 1481 * Init the stable storage.
1482 1482 */
1483 1483 rfs4_ss_init(nsrv4);
1484 1484 }
1485 1485
1486 1486 /*
1487 1487 * Used at server shutdown to cleanup all of NFSv4 server's zone structures
1488 1488 * and state.
1489 1489 */
1490 1490 void
1491 1491 rfs4_state_zone_fini()
1492 1492 {
1493 1493 rfs4_database_t *dbp;
1494 1494 nfs4_srv_t *nsrv4;
1495 1495 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1496 1496
1497 1497 rfs4_set_deleg_policy(nsrv4, SRV_NEVER_DELEGATE);
1498 1498
1499 1499 mutex_enter(&nsrv4->state_lock);
1500 1500
1501 1501 if (nsrv4->nfs4_server_state == NULL) {
1502 1502 mutex_exit(&nsrv4->state_lock);
1503 1503 return;
1504 1504 }
1505 1505
1506 1506 /* destroy server instances and current instance ptr */
1507 1507 rfs4_servinst_destroy_all(nsrv4);
1508 1508
1509 1509 /* reset the "first NFSv4 request" status */
|
↓ open down ↓ |
1509 lines elided |
↑ open up ↑ |
1510 1510 nsrv4->seen_first_compound = 0;
1511 1511
1512 1512 dbp = nsrv4->nfs4_server_state;
1513 1513 nsrv4->nfs4_server_state = NULL;
1514 1514
1515 1515 rw_destroy(&nsrv4->rfs4_findclient_lock);
1516 1516
1517 1517 /* First stop all of the reaper threads in the database */
1518 1518 rfs4_database_shutdown(dbp);
1519 1519 /*
1520 - * XXX workaround
1521 - * Skip destrying the state database yet just in case there
1522 - * are unfinished operations depending on it.
1520 + * WARNING: There may be consumers of the rfs4 database still
1521 + * active as we destroy these. IF that's the case, consider putting
1522 + * some of their _zone_fini()-like functions into the zsd key as
1523 + * ~~SHUTDOWN~~ functions instead of ~~DESTROY~~ functions. We can
1524 + * maintain some ordering guarantees better that way.
1523 1525 */
1524 1526 /* Now destroy/release the database tables */
1525 - /* rfs4_database_destroy(dbp); */
1527 + rfs4_database_destroy(dbp);
1526 1528
1527 1529 /* Reset the cache timers for next time */
1528 1530 nsrv4->rfs4_client_cache_time = 0;
1529 1531 nsrv4->rfs4_openowner_cache_time = 0;
1530 1532 nsrv4->rfs4_state_cache_time = 0;
1531 1533 nsrv4->rfs4_lo_state_cache_time = 0;
1532 1534 nsrv4->rfs4_lockowner_cache_time = 0;
1533 1535 nsrv4->rfs4_file_cache_time = 0;
1534 1536 nsrv4->rfs4_deleg_state_cache_time = 0;
1535 1537
1536 1538 mutex_exit(&nsrv4->state_lock);
1537 1539
1538 1540 /* clean up any dangling stable storage structures */
1539 1541 rfs4_ss_fini(nsrv4);
1540 1542 }
1541 1543
1542 1544 typedef union {
1543 1545 struct {
1544 1546 uint32_t start_time;
1545 1547 uint32_t c_id;
1546 1548 } impl_id;
1547 1549 clientid4 id4;
1548 1550 } cid;
1549 1551
1550 1552 static int foreign_stateid(stateid_t *id);
1551 1553 static int foreign_clientid(cid *cidp);
1552 1554 static void embed_nodeid(cid *cidp);
1553 1555
1554 1556 typedef union {
1555 1557 struct {
1556 1558 uint32_t c_id;
1557 1559 uint32_t gen_num;
1558 1560 } cv_impl;
1559 1561 verifier4 confirm_verf;
1560 1562 } scid_confirm_verf;
1561 1563
1562 1564 static uint32_t
1563 1565 clientid_hash(void *key)
1564 1566 {
1565 1567 cid *idp = key;
1566 1568
1567 1569 return (idp->impl_id.c_id);
1568 1570 }
1569 1571
1570 1572 static bool_t
1571 1573 clientid_compare(rfs4_entry_t entry, void *key)
1572 1574 {
1573 1575 rfs4_client_t *cp = (rfs4_client_t *)entry;
1574 1576 clientid4 *idp = key;
1575 1577
1576 1578 return (*idp == cp->rc_clientid);
1577 1579 }
1578 1580
1579 1581 static void *
1580 1582 clientid_mkkey(rfs4_entry_t entry)
1581 1583 {
1582 1584 rfs4_client_t *cp = (rfs4_client_t *)entry;
1583 1585
1584 1586 return (&cp->rc_clientid);
1585 1587 }
1586 1588
1587 1589 static uint32_t
1588 1590 nfsclnt_hash(void *key)
1589 1591 {
1590 1592 nfs_client_id4 *client = key;
1591 1593 int i;
1592 1594 uint32_t hash = 0;
1593 1595
1594 1596 for (i = 0; i < client->id_len; i++) {
1595 1597 hash <<= 1;
1596 1598 hash += (uint_t)client->id_val[i];
1597 1599 }
1598 1600 return (hash);
1599 1601 }
1600 1602
1601 1603
1602 1604 static bool_t
1603 1605 nfsclnt_compare(rfs4_entry_t entry, void *key)
1604 1606 {
1605 1607 rfs4_client_t *cp = (rfs4_client_t *)entry;
1606 1608 nfs_client_id4 *nfs_client = key;
1607 1609
1608 1610 if (cp->rc_nfs_client.id_len != nfs_client->id_len)
1609 1611 return (FALSE);
1610 1612
1611 1613 return (bcmp(cp->rc_nfs_client.id_val, nfs_client->id_val,
1612 1614 nfs_client->id_len) == 0);
1613 1615 }
1614 1616
1615 1617 static void *
1616 1618 nfsclnt_mkkey(rfs4_entry_t entry)
1617 1619 {
1618 1620 rfs4_client_t *cp = (rfs4_client_t *)entry;
1619 1621
1620 1622 return (&cp->rc_nfs_client);
1621 1623 }
1622 1624
1623 1625 static bool_t
1624 1626 rfs4_client_expiry(rfs4_entry_t u_entry)
1625 1627 {
1626 1628 rfs4_client_t *cp = (rfs4_client_t *)u_entry;
1627 1629 bool_t cp_expired;
1628 1630
1629 1631 if (rfs4_dbe_is_invalid(cp->rc_dbe)) {
1630 1632 cp->rc_ss_remove = 1;
1631 1633 return (TRUE);
1632 1634 }
1633 1635 /*
1634 1636 * If the sysadmin has used clear_locks for this
1635 1637 * entry then forced_expire will be set and we
1636 1638 * want this entry to be reaped. Or the entry
1637 1639 * has exceeded its lease period.
1638 1640 */
1639 1641 cp_expired = (cp->rc_forced_expire ||
1640 1642 (gethrestime_sec() - cp->rc_last_access
1641 1643 > rfs4_lease_time));
1642 1644
1643 1645 if (!cp->rc_ss_remove && cp_expired)
1644 1646 cp->rc_ss_remove = 1;
1645 1647 return (cp_expired);
1646 1648 }
1647 1649
1648 1650 /*
1649 1651 * Remove the leaf file from all distributed stable storage paths.
1650 1652 */
1651 1653 static void
1652 1654 rfs4_dss_remove_cpleaf(rfs4_client_t *cp)
1653 1655 {
1654 1656 nfs4_srv_t *nsrv4;
1655 1657 rfs4_servinst_t *sip;
1656 1658 char *leaf = cp->rc_ss_pn->leaf;
1657 1659
1658 1660 /*
1659 1661 * since the state files are written to all DSS
1660 1662 * paths we must remove this leaf file instance
1661 1663 * from all server instances.
1662 1664 */
1663 1665
1664 1666 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1665 1667 mutex_enter(&nsrv4->servinst_lock);
1666 1668 for (sip = nsrv4->nfs4_cur_servinst; sip != NULL; sip = sip->prev) {
1667 1669 /* remove the leaf file associated with this server instance */
1668 1670 rfs4_dss_remove_leaf(sip, NFS4_DSS_STATE_LEAF, leaf);
1669 1671 }
1670 1672 mutex_exit(&nsrv4->servinst_lock);
1671 1673 }
1672 1674
1673 1675 static void
1674 1676 rfs4_dss_remove_leaf(rfs4_servinst_t *sip, char *dir_leaf, char *leaf)
1675 1677 {
1676 1678 int i, npaths = sip->dss_npaths;
1677 1679
1678 1680 for (i = 0; i < npaths; i++) {
1679 1681 rfs4_dss_path_t *dss_path = sip->dss_paths[i];
1680 1682 char *path, *dir;
1681 1683 size_t pathlen;
1682 1684
1683 1685 /* the HA-NFSv4 path might have been failed-over away from us */
1684 1686 if (dss_path == NULL)
1685 1687 continue;
1686 1688
1687 1689 dir = dss_path->path;
1688 1690
1689 1691 /* allow 3 extra bytes for two '/' & a NUL */
1690 1692 pathlen = strlen(dir) + strlen(dir_leaf) + strlen(leaf) + 3;
1691 1693 path = kmem_alloc(pathlen, KM_SLEEP);
1692 1694 (void) sprintf(path, "%s/%s/%s", dir, dir_leaf, leaf);
1693 1695
1694 1696 (void) vn_remove(path, UIO_SYSSPACE, RMFILE);
1695 1697
1696 1698 kmem_free(path, pathlen);
1697 1699 }
1698 1700 }
1699 1701
1700 1702 static void
1701 1703 rfs4_client_destroy(rfs4_entry_t u_entry)
1702 1704 {
1703 1705 rfs4_client_t *cp = (rfs4_client_t *)u_entry;
1704 1706
1705 1707 mutex_destroy(cp->rc_cbinfo.cb_lock);
1706 1708 cv_destroy(cp->rc_cbinfo.cb_cv);
1707 1709 cv_destroy(cp->rc_cbinfo.cb_cv_nullcaller);
1708 1710 list_destroy(&cp->rc_openownerlist);
1709 1711
1710 1712 /* free callback info */
1711 1713 rfs4_cbinfo_free(&cp->rc_cbinfo);
1712 1714
1713 1715 if (cp->rc_cp_confirmed)
1714 1716 rfs4_client_rele(cp->rc_cp_confirmed);
1715 1717
1716 1718 if (cp->rc_ss_pn) {
1717 1719 /* check if the stable storage files need to be removed */
1718 1720 if (cp->rc_ss_remove)
1719 1721 rfs4_dss_remove_cpleaf(cp);
1720 1722 rfs4_ss_pnfree(cp->rc_ss_pn);
1721 1723 }
1722 1724
1723 1725 /* Free the client supplied client id */
1724 1726 kmem_free(cp->rc_nfs_client.id_val, cp->rc_nfs_client.id_len);
1725 1727
1726 1728 if (cp->rc_sysidt != LM_NOSYSID)
1727 1729 lm_free_sysidt(cp->rc_sysidt);
1728 1730 }
1729 1731
1730 1732 static bool_t
1731 1733 rfs4_client_create(rfs4_entry_t u_entry, void *arg)
1732 1734 {
1733 1735 rfs4_client_t *cp = (rfs4_client_t *)u_entry;
1734 1736 nfs_client_id4 *client = (nfs_client_id4 *)arg;
1735 1737 struct sockaddr *ca;
1736 1738 cid *cidp;
1737 1739 scid_confirm_verf *scvp;
1738 1740 nfs4_srv_t *nsrv4;
1739 1741
1740 1742 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1741 1743
1742 1744 /* Get a clientid to give to the client */
1743 1745 cidp = (cid *)&cp->rc_clientid;
1744 1746 cidp->impl_id.start_time = nsrv4->rfs4_start_time;
1745 1747 cidp->impl_id.c_id = (uint32_t)rfs4_dbe_getid(cp->rc_dbe);
1746 1748
1747 1749 /* If we are booted as a cluster node, embed our nodeid */
1748 1750 if (cluster_bootflags & CLUSTER_BOOTED)
1749 1751 embed_nodeid(cidp);
1750 1752
1751 1753 /* Allocate and copy client's client id value */
1752 1754 cp->rc_nfs_client.id_val = kmem_alloc(client->id_len, KM_SLEEP);
1753 1755 cp->rc_nfs_client.id_len = client->id_len;
1754 1756 bcopy(client->id_val, cp->rc_nfs_client.id_val, client->id_len);
1755 1757 cp->rc_nfs_client.verifier = client->verifier;
1756 1758
1757 1759 /* Copy client's IP address */
1758 1760 ca = client->cl_addr;
1759 1761 if (ca->sa_family == AF_INET)
1760 1762 bcopy(ca, &cp->rc_addr, sizeof (struct sockaddr_in));
1761 1763 else if (ca->sa_family == AF_INET6)
1762 1764 bcopy(ca, &cp->rc_addr, sizeof (struct sockaddr_in6));
1763 1765 cp->rc_nfs_client.cl_addr = (struct sockaddr *)&cp->rc_addr;
1764 1766
1765 1767 /* Init the value for the SETCLIENTID_CONFIRM verifier */
1766 1768 scvp = (scid_confirm_verf *)&cp->rc_confirm_verf;
1767 1769 scvp->cv_impl.c_id = cidp->impl_id.c_id;
1768 1770 scvp->cv_impl.gen_num = 0;
1769 1771
1770 1772 /* An F_UNLKSYS has been done for this client */
1771 1773 cp->rc_unlksys_completed = FALSE;
1772 1774
1773 1775 /* We need the client to ack us */
1774 1776 cp->rc_need_confirm = TRUE;
1775 1777 cp->rc_cp_confirmed = NULL;
1776 1778
1777 1779 /* TRUE all the time until the callback path actually fails */
1778 1780 cp->rc_cbinfo.cb_notified_of_cb_path_down = TRUE;
1779 1781
1780 1782 /* Initialize the access time to now */
1781 1783 cp->rc_last_access = gethrestime_sec();
1782 1784
1783 1785 cp->rc_cr_set = NULL;
1784 1786
1785 1787 cp->rc_sysidt = LM_NOSYSID;
1786 1788
1787 1789 list_create(&cp->rc_openownerlist, sizeof (rfs4_openowner_t),
1788 1790 offsetof(rfs4_openowner_t, ro_node));
1789 1791
1790 1792 /* set up the callback control structure */
1791 1793 cp->rc_cbinfo.cb_state = CB_UNINIT;
1792 1794 mutex_init(cp->rc_cbinfo.cb_lock, NULL, MUTEX_DEFAULT, NULL);
1793 1795 cv_init(cp->rc_cbinfo.cb_cv, NULL, CV_DEFAULT, NULL);
1794 1796 cv_init(cp->rc_cbinfo.cb_cv_nullcaller, NULL, CV_DEFAULT, NULL);
1795 1797
1796 1798 /*
1797 1799 * Associate the client_t with the current server instance.
1798 1800 * The hold is solely to satisfy the calling requirement of
1799 1801 * rfs4_servinst_assign(). In this case it's not strictly necessary.
1800 1802 */
1801 1803 rfs4_dbe_hold(cp->rc_dbe);
1802 1804 rfs4_servinst_assign(nsrv4, cp, nsrv4->nfs4_cur_servinst);
1803 1805 rfs4_dbe_rele(cp->rc_dbe);
1804 1806
1805 1807 return (TRUE);
1806 1808 }
1807 1809
1808 1810 /*
1809 1811 * Caller wants to generate/update the setclientid_confirm verifier
1810 1812 * associated with a client. This is done during the SETCLIENTID
1811 1813 * processing.
1812 1814 */
1813 1815 void
1814 1816 rfs4_client_scv_next(rfs4_client_t *cp)
1815 1817 {
1816 1818 scid_confirm_verf *scvp;
1817 1819
1818 1820 /* Init the value for the SETCLIENTID_CONFIRM verifier */
1819 1821 scvp = (scid_confirm_verf *)&cp->rc_confirm_verf;
1820 1822 scvp->cv_impl.gen_num++;
1821 1823 }
1822 1824
1823 1825 void
1824 1826 rfs4_client_rele(rfs4_client_t *cp)
1825 1827 {
1826 1828 rfs4_dbe_rele(cp->rc_dbe);
1827 1829 }
1828 1830
1829 1831 rfs4_client_t *
1830 1832 rfs4_findclient(nfs_client_id4 *client, bool_t *create, rfs4_client_t *oldcp)
1831 1833 {
1832 1834 rfs4_client_t *cp;
1833 1835 nfs4_srv_t *nsrv4;
1834 1836 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1835 1837
1836 1838
1837 1839 if (oldcp) {
1838 1840 rw_enter(&nsrv4->rfs4_findclient_lock, RW_WRITER);
1839 1841 rfs4_dbe_hide(oldcp->rc_dbe);
1840 1842 } else {
1841 1843 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
1842 1844 }
1843 1845
1844 1846 cp = (rfs4_client_t *)rfs4_dbsearch(nsrv4->rfs4_nfsclnt_idx, client,
1845 1847 create, (void *)client, RFS4_DBS_VALID);
1846 1848
1847 1849 if (oldcp)
1848 1850 rfs4_dbe_unhide(oldcp->rc_dbe);
1849 1851
1850 1852 rw_exit(&nsrv4->rfs4_findclient_lock);
1851 1853
1852 1854 return (cp);
1853 1855 }
1854 1856
1855 1857 rfs4_client_t *
1856 1858 rfs4_findclient_by_id(clientid4 clientid, bool_t find_unconfirmed)
1857 1859 {
1858 1860 rfs4_client_t *cp;
1859 1861 bool_t create = FALSE;
1860 1862 cid *cidp = (cid *)&clientid;
1861 1863 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1862 1864
1863 1865 /* If we're a cluster and the nodeid isn't right, short-circuit */
1864 1866 if (cluster_bootflags & CLUSTER_BOOTED && foreign_clientid(cidp))
1865 1867 return (NULL);
1866 1868
1867 1869 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
1868 1870
1869 1871 cp = (rfs4_client_t *)rfs4_dbsearch(nsrv4->rfs4_clientid_idx, &clientid,
1870 1872 &create, NULL, RFS4_DBS_VALID);
1871 1873
1872 1874 rw_exit(&nsrv4->rfs4_findclient_lock);
1873 1875
1874 1876 if (cp && cp->rc_need_confirm && find_unconfirmed == FALSE) {
1875 1877 rfs4_client_rele(cp);
1876 1878 return (NULL);
1877 1879 } else {
1878 1880 return (cp);
1879 1881 }
1880 1882 }
1881 1883
1882 1884 static uint32_t
1883 1885 clntip_hash(void *key)
1884 1886 {
1885 1887 struct sockaddr *addr = key;
1886 1888 int i, len = 0;
1887 1889 uint32_t hash = 0;
1888 1890 char *ptr;
1889 1891
1890 1892 if (addr->sa_family == AF_INET) {
1891 1893 struct sockaddr_in *a = (struct sockaddr_in *)addr;
1892 1894 len = sizeof (struct in_addr);
1893 1895 ptr = (char *)&a->sin_addr;
1894 1896 } else if (addr->sa_family == AF_INET6) {
1895 1897 struct sockaddr_in6 *a = (struct sockaddr_in6 *)addr;
1896 1898 len = sizeof (struct in6_addr);
1897 1899 ptr = (char *)&a->sin6_addr;
1898 1900 } else
1899 1901 return (0);
1900 1902
1901 1903 for (i = 0; i < len; i++) {
1902 1904 hash <<= 1;
1903 1905 hash += (uint_t)ptr[i];
1904 1906 }
1905 1907 return (hash);
1906 1908 }
1907 1909
1908 1910 static bool_t
1909 1911 clntip_compare(rfs4_entry_t entry, void *key)
1910 1912 {
1911 1913 rfs4_clntip_t *cp = (rfs4_clntip_t *)entry;
1912 1914 struct sockaddr *addr = key;
1913 1915 int len = 0;
1914 1916 char *p1, *p2;
1915 1917
1916 1918 if (addr->sa_family == AF_INET) {
1917 1919 struct sockaddr_in *a1 = (struct sockaddr_in *)&cp->ri_addr;
1918 1920 struct sockaddr_in *a2 = (struct sockaddr_in *)addr;
1919 1921 len = sizeof (struct in_addr);
1920 1922 p1 = (char *)&a1->sin_addr;
1921 1923 p2 = (char *)&a2->sin_addr;
1922 1924 } else if (addr->sa_family == AF_INET6) {
1923 1925 struct sockaddr_in6 *a1 = (struct sockaddr_in6 *)&cp->ri_addr;
1924 1926 struct sockaddr_in6 *a2 = (struct sockaddr_in6 *)addr;
1925 1927 len = sizeof (struct in6_addr);
1926 1928 p1 = (char *)&a1->sin6_addr;
1927 1929 p2 = (char *)&a2->sin6_addr;
1928 1930 } else
1929 1931 return (0);
1930 1932
1931 1933 return (bcmp(p1, p2, len) == 0);
1932 1934 }
1933 1935
1934 1936 static void *
1935 1937 clntip_mkkey(rfs4_entry_t entry)
1936 1938 {
1937 1939 rfs4_clntip_t *cp = (rfs4_clntip_t *)entry;
1938 1940
1939 1941 return (&cp->ri_addr);
1940 1942 }
1941 1943
1942 1944 static bool_t
1943 1945 rfs4_clntip_expiry(rfs4_entry_t u_entry)
1944 1946 {
1945 1947 rfs4_clntip_t *cp = (rfs4_clntip_t *)u_entry;
1946 1948
1947 1949 if (rfs4_dbe_is_invalid(cp->ri_dbe))
1948 1950 return (TRUE);
1949 1951 return (FALSE);
1950 1952 }
1951 1953
1952 1954 /* ARGSUSED */
1953 1955 static void
1954 1956 rfs4_clntip_destroy(rfs4_entry_t u_entry)
1955 1957 {
1956 1958 }
1957 1959
1958 1960 static bool_t
1959 1961 rfs4_clntip_create(rfs4_entry_t u_entry, void *arg)
1960 1962 {
1961 1963 rfs4_clntip_t *cp = (rfs4_clntip_t *)u_entry;
1962 1964 struct sockaddr *ca = (struct sockaddr *)arg;
1963 1965
1964 1966 /* Copy client's IP address */
1965 1967 if (ca->sa_family == AF_INET)
1966 1968 bcopy(ca, &cp->ri_addr, sizeof (struct sockaddr_in));
1967 1969 else if (ca->sa_family == AF_INET6)
1968 1970 bcopy(ca, &cp->ri_addr, sizeof (struct sockaddr_in6));
1969 1971 else
1970 1972 return (FALSE);
1971 1973 cp->ri_no_referrals = 1;
1972 1974
1973 1975 return (TRUE);
1974 1976 }
1975 1977
1976 1978 rfs4_clntip_t *
1977 1979 rfs4_find_clntip(struct sockaddr *addr, bool_t *create)
1978 1980 {
1979 1981 rfs4_clntip_t *cp;
1980 1982 nfs4_srv_t *nsrv4;
1981 1983
1982 1984 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1983 1985
1984 1986 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
1985 1987
1986 1988 cp = (rfs4_clntip_t *)rfs4_dbsearch(nsrv4->rfs4_clntip_idx, addr,
1987 1989 create, addr, RFS4_DBS_VALID);
1988 1990
1989 1991 rw_exit(&nsrv4->rfs4_findclient_lock);
1990 1992
1991 1993 return (cp);
1992 1994 }
1993 1995
1994 1996 void
1995 1997 rfs4_invalidate_clntip(struct sockaddr *addr)
1996 1998 {
1997 1999 rfs4_clntip_t *cp;
1998 2000 bool_t create = FALSE;
1999 2001 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2000 2002
2001 2003 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
2002 2004
2003 2005 cp = (rfs4_clntip_t *)rfs4_dbsearch(nsrv4->rfs4_clntip_idx, addr,
2004 2006 &create, NULL, RFS4_DBS_VALID);
2005 2007 if (cp == NULL) {
2006 2008 rw_exit(&nsrv4->rfs4_findclient_lock);
2007 2009 return;
2008 2010 }
2009 2011 rfs4_dbe_invalidate(cp->ri_dbe);
2010 2012 rfs4_dbe_rele(cp->ri_dbe);
2011 2013
2012 2014 rw_exit(&nsrv4->rfs4_findclient_lock);
2013 2015 }
2014 2016
2015 2017 bool_t
2016 2018 rfs4_lease_expired(rfs4_client_t *cp)
2017 2019 {
2018 2020 bool_t rc;
2019 2021
2020 2022 rfs4_dbe_lock(cp->rc_dbe);
2021 2023
2022 2024 /*
2023 2025 * If the admin has executed clear_locks for this
2024 2026 * client id, force expire will be set, so no need
2025 2027 * to calculate anything because it's "outa here".
2026 2028 */
2027 2029 if (cp->rc_forced_expire) {
2028 2030 rc = TRUE;
2029 2031 } else {
2030 2032 rc = (gethrestime_sec() - cp->rc_last_access > rfs4_lease_time);
2031 2033 }
2032 2034
2033 2035 /*
2034 2036 * If the lease has expired we will also want
2035 2037 * to remove any stable storage state data. So
2036 2038 * mark the client id accordingly.
2037 2039 */
2038 2040 if (!cp->rc_ss_remove)
2039 2041 cp->rc_ss_remove = (rc == TRUE);
2040 2042
2041 2043 rfs4_dbe_unlock(cp->rc_dbe);
2042 2044
2043 2045 return (rc);
2044 2046 }
2045 2047
2046 2048 void
2047 2049 rfs4_update_lease(rfs4_client_t *cp)
2048 2050 {
2049 2051 rfs4_dbe_lock(cp->rc_dbe);
2050 2052 if (!cp->rc_forced_expire)
2051 2053 cp->rc_last_access = gethrestime_sec();
2052 2054 rfs4_dbe_unlock(cp->rc_dbe);
2053 2055 }
2054 2056
2055 2057
2056 2058 static bool_t
2057 2059 EQOPENOWNER(open_owner4 *a, open_owner4 *b)
2058 2060 {
2059 2061 bool_t rc;
2060 2062
2061 2063 if (a->clientid != b->clientid)
2062 2064 return (FALSE);
2063 2065
2064 2066 if (a->owner_len != b->owner_len)
2065 2067 return (FALSE);
2066 2068
2067 2069 rc = (bcmp(a->owner_val, b->owner_val, a->owner_len) == 0);
2068 2070
2069 2071 return (rc);
2070 2072 }
2071 2073
2072 2074 static uint_t
2073 2075 openowner_hash(void *key)
2074 2076 {
2075 2077 int i;
2076 2078 open_owner4 *openowner = key;
2077 2079 uint_t hash = 0;
2078 2080
2079 2081 for (i = 0; i < openowner->owner_len; i++) {
2080 2082 hash <<= 4;
2081 2083 hash += (uint_t)openowner->owner_val[i];
2082 2084 }
2083 2085 hash += (uint_t)openowner->clientid;
2084 2086 hash |= (openowner->clientid >> 32);
2085 2087
2086 2088 return (hash);
2087 2089 }
2088 2090
2089 2091 static bool_t
2090 2092 openowner_compare(rfs4_entry_t u_entry, void *key)
2091 2093 {
2092 2094 rfs4_openowner_t *oo = (rfs4_openowner_t *)u_entry;
2093 2095 open_owner4 *arg = key;
2094 2096
2095 2097 return (EQOPENOWNER(&oo->ro_owner, arg));
2096 2098 }
2097 2099
2098 2100 void *
2099 2101 openowner_mkkey(rfs4_entry_t u_entry)
2100 2102 {
2101 2103 rfs4_openowner_t *oo = (rfs4_openowner_t *)u_entry;
2102 2104
2103 2105 return (&oo->ro_owner);
2104 2106 }
2105 2107
2106 2108 /* ARGSUSED */
2107 2109 static bool_t
2108 2110 rfs4_openowner_expiry(rfs4_entry_t u_entry)
2109 2111 {
2110 2112 /* openstateid held us and did all needed delay */
2111 2113 return (TRUE);
2112 2114 }
2113 2115
2114 2116 static void
2115 2117 rfs4_openowner_destroy(rfs4_entry_t u_entry)
2116 2118 {
2117 2119 rfs4_openowner_t *oo = (rfs4_openowner_t *)u_entry;
2118 2120
2119 2121 /* Remove open owner from client's lists of open owners */
2120 2122 rfs4_dbe_lock(oo->ro_client->rc_dbe);
2121 2123 list_remove(&oo->ro_client->rc_openownerlist, oo);
2122 2124 rfs4_dbe_unlock(oo->ro_client->rc_dbe);
2123 2125
2124 2126 /* One less reference to the client */
2125 2127 rfs4_client_rele(oo->ro_client);
2126 2128 oo->ro_client = NULL;
2127 2129
2128 2130 /* Free the last reply for this lock owner */
2129 2131 rfs4_free_reply(&oo->ro_reply);
2130 2132
2131 2133 if (oo->ro_reply_fh.nfs_fh4_val) {
2132 2134 kmem_free(oo->ro_reply_fh.nfs_fh4_val,
2133 2135 oo->ro_reply_fh.nfs_fh4_len);
2134 2136 oo->ro_reply_fh.nfs_fh4_val = NULL;
2135 2137 oo->ro_reply_fh.nfs_fh4_len = 0;
2136 2138 }
2137 2139
2138 2140 rfs4_sw_destroy(&oo->ro_sw);
2139 2141 list_destroy(&oo->ro_statelist);
2140 2142
2141 2143 /* Free the lock owner id */
2142 2144 kmem_free(oo->ro_owner.owner_val, oo->ro_owner.owner_len);
2143 2145 }
2144 2146
2145 2147 void
2146 2148 rfs4_openowner_rele(rfs4_openowner_t *oo)
2147 2149 {
2148 2150 rfs4_dbe_rele(oo->ro_dbe);
2149 2151 }
2150 2152
2151 2153 static bool_t
2152 2154 rfs4_openowner_create(rfs4_entry_t u_entry, void *arg)
2153 2155 {
2154 2156 rfs4_openowner_t *oo = (rfs4_openowner_t *)u_entry;
2155 2157 rfs4_openowner_t *argp = (rfs4_openowner_t *)arg;
2156 2158 open_owner4 *openowner = &argp->ro_owner;
2157 2159 seqid4 seqid = argp->ro_open_seqid;
2158 2160 rfs4_client_t *cp;
2159 2161 bool_t create = FALSE;
2160 2162 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2161 2163
2162 2164 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
2163 2165
2164 2166 cp = (rfs4_client_t *)rfs4_dbsearch(nsrv4->rfs4_clientid_idx,
2165 2167 &openowner->clientid,
2166 2168 &create, NULL, RFS4_DBS_VALID);
2167 2169
2168 2170 rw_exit(&nsrv4->rfs4_findclient_lock);
2169 2171
2170 2172 if (cp == NULL)
2171 2173 return (FALSE);
2172 2174
2173 2175 oo->ro_reply_fh.nfs_fh4_len = 0;
2174 2176 oo->ro_reply_fh.nfs_fh4_val = NULL;
2175 2177
2176 2178 oo->ro_owner.clientid = openowner->clientid;
2177 2179 oo->ro_owner.owner_val =
2178 2180 kmem_alloc(openowner->owner_len, KM_SLEEP);
2179 2181
2180 2182 bcopy(openowner->owner_val,
2181 2183 oo->ro_owner.owner_val, openowner->owner_len);
2182 2184
2183 2185 oo->ro_owner.owner_len = openowner->owner_len;
2184 2186
2185 2187 oo->ro_need_confirm = TRUE;
2186 2188
2187 2189 rfs4_sw_init(&oo->ro_sw);
2188 2190
2189 2191 oo->ro_open_seqid = seqid;
2190 2192 bzero(&oo->ro_reply, sizeof (nfs_resop4));
2191 2193 oo->ro_client = cp;
2192 2194 oo->ro_cr_set = NULL;
2193 2195
2194 2196 list_create(&oo->ro_statelist, sizeof (rfs4_state_t),
2195 2197 offsetof(rfs4_state_t, rs_node));
2196 2198
2197 2199 /* Insert openowner into client's open owner list */
2198 2200 rfs4_dbe_lock(cp->rc_dbe);
2199 2201 list_insert_tail(&cp->rc_openownerlist, oo);
2200 2202 rfs4_dbe_unlock(cp->rc_dbe);
2201 2203
2202 2204 return (TRUE);
2203 2205 }
2204 2206
2205 2207 rfs4_openowner_t *
2206 2208 rfs4_findopenowner(open_owner4 *openowner, bool_t *create, seqid4 seqid)
2207 2209 {
2208 2210 rfs4_openowner_t *oo;
2209 2211 rfs4_openowner_t arg;
2210 2212 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2211 2213
2212 2214 arg.ro_owner = *openowner;
2213 2215 arg.ro_open_seqid = seqid;
2214 2216 /* CSTYLED */
2215 2217 oo = (rfs4_openowner_t *)rfs4_dbsearch(nsrv4->rfs4_openowner_idx, openowner,
2216 2218 create, &arg, RFS4_DBS_VALID);
2217 2219
2218 2220 return (oo);
2219 2221 }
2220 2222
2221 2223 void
2222 2224 rfs4_update_open_sequence(rfs4_openowner_t *oo)
2223 2225 {
2224 2226
2225 2227 rfs4_dbe_lock(oo->ro_dbe);
2226 2228
2227 2229 oo->ro_open_seqid++;
2228 2230
2229 2231 rfs4_dbe_unlock(oo->ro_dbe);
2230 2232 }
2231 2233
2232 2234 void
2233 2235 rfs4_update_open_resp(rfs4_openowner_t *oo, nfs_resop4 *resp, nfs_fh4 *fh)
2234 2236 {
2235 2237
2236 2238 rfs4_dbe_lock(oo->ro_dbe);
2237 2239
2238 2240 rfs4_free_reply(&oo->ro_reply);
2239 2241
2240 2242 rfs4_copy_reply(&oo->ro_reply, resp);
2241 2243
2242 2244 /* Save the filehandle if provided and free if not used */
2243 2245 if (resp->nfs_resop4_u.opopen.status == NFS4_OK &&
2244 2246 fh && fh->nfs_fh4_len) {
2245 2247 if (oo->ro_reply_fh.nfs_fh4_val == NULL)
2246 2248 oo->ro_reply_fh.nfs_fh4_val =
2247 2249 kmem_alloc(fh->nfs_fh4_len, KM_SLEEP);
2248 2250 nfs_fh4_copy(fh, &oo->ro_reply_fh);
2249 2251 } else {
2250 2252 if (oo->ro_reply_fh.nfs_fh4_val) {
2251 2253 kmem_free(oo->ro_reply_fh.nfs_fh4_val,
2252 2254 oo->ro_reply_fh.nfs_fh4_len);
2253 2255 oo->ro_reply_fh.nfs_fh4_val = NULL;
2254 2256 oo->ro_reply_fh.nfs_fh4_len = 0;
2255 2257 }
2256 2258 }
2257 2259
2258 2260 rfs4_dbe_unlock(oo->ro_dbe);
2259 2261 }
2260 2262
2261 2263 static bool_t
2262 2264 lockowner_compare(rfs4_entry_t u_entry, void *key)
2263 2265 {
2264 2266 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2265 2267 lock_owner4 *b = (lock_owner4 *)key;
2266 2268
2267 2269 if (lo->rl_owner.clientid != b->clientid)
2268 2270 return (FALSE);
2269 2271
2270 2272 if (lo->rl_owner.owner_len != b->owner_len)
2271 2273 return (FALSE);
2272 2274
2273 2275 return (bcmp(lo->rl_owner.owner_val, b->owner_val,
2274 2276 lo->rl_owner.owner_len) == 0);
2275 2277 }
2276 2278
2277 2279 void *
2278 2280 lockowner_mkkey(rfs4_entry_t u_entry)
2279 2281 {
2280 2282 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2281 2283
2282 2284 return (&lo->rl_owner);
2283 2285 }
2284 2286
2285 2287 static uint32_t
2286 2288 lockowner_hash(void *key)
2287 2289 {
2288 2290 int i;
2289 2291 lock_owner4 *lockowner = key;
2290 2292 uint_t hash = 0;
2291 2293
2292 2294 for (i = 0; i < lockowner->owner_len; i++) {
2293 2295 hash <<= 4;
2294 2296 hash += (uint_t)lockowner->owner_val[i];
2295 2297 }
2296 2298 hash += (uint_t)lockowner->clientid;
2297 2299 hash |= (lockowner->clientid >> 32);
2298 2300
2299 2301 return (hash);
2300 2302 }
2301 2303
2302 2304 static uint32_t
2303 2305 pid_hash(void *key)
2304 2306 {
2305 2307 return ((uint32_t)(uintptr_t)key);
2306 2308 }
2307 2309
2308 2310 static void *
2309 2311 pid_mkkey(rfs4_entry_t u_entry)
2310 2312 {
2311 2313 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2312 2314
2313 2315 return ((void *)(uintptr_t)lo->rl_pid);
2314 2316 }
2315 2317
2316 2318 static bool_t
2317 2319 pid_compare(rfs4_entry_t u_entry, void *key)
2318 2320 {
2319 2321 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2320 2322
2321 2323 return (lo->rl_pid == (pid_t)(uintptr_t)key);
2322 2324 }
2323 2325
2324 2326 static void
2325 2327 rfs4_lockowner_destroy(rfs4_entry_t u_entry)
2326 2328 {
2327 2329 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2328 2330
2329 2331 /* Free the lock owner id */
2330 2332 kmem_free(lo->rl_owner.owner_val, lo->rl_owner.owner_len);
2331 2333 rfs4_client_rele(lo->rl_client);
2332 2334 }
2333 2335
2334 2336 void
2335 2337 rfs4_lockowner_rele(rfs4_lockowner_t *lo)
2336 2338 {
2337 2339 rfs4_dbe_rele(lo->rl_dbe);
2338 2340 }
2339 2341
2340 2342 /* ARGSUSED */
2341 2343 static bool_t
2342 2344 rfs4_lockowner_expiry(rfs4_entry_t u_entry)
2343 2345 {
2344 2346 /*
2345 2347 * Since expiry is called with no other references on
2346 2348 * this struct, go ahead and have it removed.
2347 2349 */
2348 2350 return (TRUE);
2349 2351 }
2350 2352
2351 2353 static bool_t
2352 2354 rfs4_lockowner_create(rfs4_entry_t u_entry, void *arg)
2353 2355 {
2354 2356 rfs4_lockowner_t *lo = (rfs4_lockowner_t *)u_entry;
2355 2357 lock_owner4 *lockowner = (lock_owner4 *)arg;
2356 2358 rfs4_client_t *cp;
2357 2359 bool_t create = FALSE;
2358 2360 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2359 2361
2360 2362 rw_enter(&nsrv4->rfs4_findclient_lock, RW_READER);
2361 2363
2362 2364 cp = (rfs4_client_t *)rfs4_dbsearch(nsrv4->rfs4_clientid_idx,
2363 2365 &lockowner->clientid,
2364 2366 &create, NULL, RFS4_DBS_VALID);
2365 2367
2366 2368 rw_exit(&nsrv4->rfs4_findclient_lock);
2367 2369
2368 2370 if (cp == NULL)
2369 2371 return (FALSE);
2370 2372
2371 2373 /* Reference client */
2372 2374 lo->rl_client = cp;
2373 2375 lo->rl_owner.clientid = lockowner->clientid;
2374 2376 lo->rl_owner.owner_val = kmem_alloc(lockowner->owner_len, KM_SLEEP);
2375 2377 bcopy(lockowner->owner_val, lo->rl_owner.owner_val,
2376 2378 lockowner->owner_len);
2377 2379 lo->rl_owner.owner_len = lockowner->owner_len;
2378 2380 lo->rl_pid = rfs4_dbe_getid(lo->rl_dbe);
2379 2381
2380 2382 return (TRUE);
2381 2383 }
2382 2384
2383 2385 rfs4_lockowner_t *
2384 2386 rfs4_findlockowner(lock_owner4 *lockowner, bool_t *create)
2385 2387 {
2386 2388 rfs4_lockowner_t *lo;
2387 2389 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2388 2390
2389 2391 /* CSTYLED */
2390 2392 lo = (rfs4_lockowner_t *)rfs4_dbsearch(nsrv4->rfs4_lockowner_idx, lockowner,
2391 2393 create, lockowner, RFS4_DBS_VALID);
2392 2394
2393 2395 return (lo);
2394 2396 }
2395 2397
2396 2398 rfs4_lockowner_t *
2397 2399 rfs4_findlockowner_by_pid(pid_t pid)
2398 2400 {
2399 2401 rfs4_lockowner_t *lo;
2400 2402 bool_t create = FALSE;
2401 2403 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2402 2404
2403 2405 lo = (rfs4_lockowner_t *)rfs4_dbsearch(nsrv4->rfs4_lockowner_pid_idx,
2404 2406 (void *)(uintptr_t)pid, &create, NULL, RFS4_DBS_VALID);
2405 2407
2406 2408 return (lo);
2407 2409 }
2408 2410
2409 2411
2410 2412 static uint32_t
2411 2413 file_hash(void *key)
2412 2414 {
2413 2415 return (ADDRHASH(key));
2414 2416 }
2415 2417
2416 2418 static void *
2417 2419 file_mkkey(rfs4_entry_t u_entry)
2418 2420 {
2419 2421 rfs4_file_t *fp = (rfs4_file_t *)u_entry;
2420 2422
2421 2423 return (fp->rf_vp);
2422 2424 }
2423 2425
2424 2426 static bool_t
2425 2427 file_compare(rfs4_entry_t u_entry, void *key)
2426 2428 {
2427 2429 rfs4_file_t *fp = (rfs4_file_t *)u_entry;
2428 2430
2429 2431 return (fp->rf_vp == (vnode_t *)key);
2430 2432 }
2431 2433
2432 2434 static void
2433 2435 rfs4_file_destroy(rfs4_entry_t u_entry)
2434 2436 {
2435 2437 rfs4_file_t *fp = (rfs4_file_t *)u_entry;
2436 2438
2437 2439 list_destroy(&fp->rf_delegstatelist);
2438 2440
2439 2441 if (fp->rf_filehandle.nfs_fh4_val)
2440 2442 kmem_free(fp->rf_filehandle.nfs_fh4_val,
2441 2443 fp->rf_filehandle.nfs_fh4_len);
2442 2444 cv_destroy(fp->rf_dinfo.rd_recall_cv);
2443 2445 if (fp->rf_vp) {
2444 2446 vnode_t *vp = fp->rf_vp;
2445 2447
2446 2448 mutex_enter(&vp->v_vsd_lock);
2447 2449 (void) vsd_set(vp, nfs4_srv_vkey, NULL);
2448 2450 mutex_exit(&vp->v_vsd_lock);
2449 2451 VN_RELE(vp);
2450 2452 fp->rf_vp = NULL;
2451 2453 }
2452 2454 rw_destroy(&fp->rf_file_rwlock);
2453 2455 }
2454 2456
2455 2457 /*
2456 2458 * Used to unlock the underlying dbe struct only
2457 2459 */
2458 2460 void
2459 2461 rfs4_file_rele(rfs4_file_t *fp)
2460 2462 {
2461 2463 rfs4_dbe_rele(fp->rf_dbe);
2462 2464 }
2463 2465
2464 2466 typedef struct {
2465 2467 vnode_t *vp;
2466 2468 nfs_fh4 *fh;
2467 2469 } rfs4_fcreate_arg;
2468 2470
2469 2471 static bool_t
2470 2472 rfs4_file_create(rfs4_entry_t u_entry, void *arg)
2471 2473 {
2472 2474 rfs4_file_t *fp = (rfs4_file_t *)u_entry;
2473 2475 rfs4_fcreate_arg *ap = (rfs4_fcreate_arg *)arg;
2474 2476 vnode_t *vp = ap->vp;
2475 2477 nfs_fh4 *fh = ap->fh;
2476 2478
2477 2479 VN_HOLD(vp);
2478 2480
2479 2481 fp->rf_filehandle.nfs_fh4_len = 0;
2480 2482 fp->rf_filehandle.nfs_fh4_val = NULL;
2481 2483 ASSERT(fh && fh->nfs_fh4_len);
2482 2484 if (fh && fh->nfs_fh4_len) {
2483 2485 fp->rf_filehandle.nfs_fh4_val =
2484 2486 kmem_alloc(fh->nfs_fh4_len, KM_SLEEP);
2485 2487 nfs_fh4_copy(fh, &fp->rf_filehandle);
2486 2488 }
2487 2489 fp->rf_vp = vp;
2488 2490
2489 2491 list_create(&fp->rf_delegstatelist, sizeof (rfs4_deleg_state_t),
2490 2492 offsetof(rfs4_deleg_state_t, rds_node));
2491 2493
2492 2494 fp->rf_share_deny = fp->rf_share_access = fp->rf_access_read = 0;
2493 2495 fp->rf_access_write = fp->rf_deny_read = fp->rf_deny_write = 0;
2494 2496
2495 2497 mutex_init(fp->rf_dinfo.rd_recall_lock, NULL, MUTEX_DEFAULT, NULL);
2496 2498 cv_init(fp->rf_dinfo.rd_recall_cv, NULL, CV_DEFAULT, NULL);
2497 2499
2498 2500 fp->rf_dinfo.rd_dtype = OPEN_DELEGATE_NONE;
2499 2501
2500 2502 rw_init(&fp->rf_file_rwlock, NULL, RW_DEFAULT, NULL);
2501 2503
2502 2504 mutex_enter(&vp->v_vsd_lock);
2503 2505 VERIFY(vsd_set(vp, nfs4_srv_vkey, (void *)fp) == 0);
2504 2506 mutex_exit(&vp->v_vsd_lock);
2505 2507
2506 2508 return (TRUE);
2507 2509 }
2508 2510
2509 2511 rfs4_file_t *
2510 2512 rfs4_findfile(vnode_t *vp, nfs_fh4 *fh, bool_t *create)
2511 2513 {
2512 2514 rfs4_file_t *fp;
2513 2515 rfs4_fcreate_arg arg;
2514 2516 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2515 2517
2516 2518 arg.vp = vp;
2517 2519 arg.fh = fh;
2518 2520
2519 2521 if (*create == TRUE)
2520 2522 /* CSTYLED */
2521 2523 fp = (rfs4_file_t *)rfs4_dbsearch(nsrv4->rfs4_file_idx, vp, create,
2522 2524 &arg, RFS4_DBS_VALID);
2523 2525 else {
2524 2526 mutex_enter(&vp->v_vsd_lock);
2525 2527 fp = (rfs4_file_t *)vsd_get(vp, nfs4_srv_vkey);
2526 2528 if (fp) {
2527 2529 rfs4_dbe_lock(fp->rf_dbe);
2528 2530 if (rfs4_dbe_is_invalid(fp->rf_dbe) ||
2529 2531 (rfs4_dbe_refcnt(fp->rf_dbe) == 0)) {
2530 2532 rfs4_dbe_unlock(fp->rf_dbe);
2531 2533 fp = NULL;
2532 2534 } else {
2533 2535 rfs4_dbe_hold(fp->rf_dbe);
2534 2536 rfs4_dbe_unlock(fp->rf_dbe);
2535 2537 }
2536 2538 }
2537 2539 mutex_exit(&vp->v_vsd_lock);
2538 2540 }
2539 2541 return (fp);
2540 2542 }
2541 2543
2542 2544 /*
2543 2545 * Find a file in the db and once it is located, take the rw lock.
2544 2546 * Need to check the vnode pointer and if it does not exist (it was
2545 2547 * removed between the db location and check) redo the find. This
2546 2548 * assumes that a file struct that has a NULL vnode pointer is marked
2547 2549 * at 'invalid' and will not be found in the db the second time
2548 2550 * around.
2549 2551 */
2550 2552 rfs4_file_t *
2551 2553 rfs4_findfile_withlock(vnode_t *vp, nfs_fh4 *fh, bool_t *create)
2552 2554 {
2553 2555 rfs4_file_t *fp;
2554 2556 rfs4_fcreate_arg arg;
2555 2557 bool_t screate = *create;
2556 2558 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2557 2559
2558 2560 if (screate == FALSE) {
2559 2561 mutex_enter(&vp->v_vsd_lock);
2560 2562 fp = (rfs4_file_t *)vsd_get(vp, nfs4_srv_vkey);
2561 2563 if (fp) {
2562 2564 rfs4_dbe_lock(fp->rf_dbe);
2563 2565 if (rfs4_dbe_is_invalid(fp->rf_dbe) ||
2564 2566 (rfs4_dbe_refcnt(fp->rf_dbe) == 0)) {
2565 2567 rfs4_dbe_unlock(fp->rf_dbe);
2566 2568 mutex_exit(&vp->v_vsd_lock);
2567 2569 fp = NULL;
2568 2570 } else {
2569 2571 rfs4_dbe_hold(fp->rf_dbe);
2570 2572 rfs4_dbe_unlock(fp->rf_dbe);
2571 2573 mutex_exit(&vp->v_vsd_lock);
2572 2574 rw_enter(&fp->rf_file_rwlock, RW_WRITER);
2573 2575 if (fp->rf_vp == NULL) {
2574 2576 rw_exit(&fp->rf_file_rwlock);
2575 2577 rfs4_file_rele(fp);
2576 2578 fp = NULL;
2577 2579 }
2578 2580 }
2579 2581 } else {
2580 2582 mutex_exit(&vp->v_vsd_lock);
2581 2583 }
2582 2584 } else {
2583 2585 retry:
2584 2586 arg.vp = vp;
2585 2587 arg.fh = fh;
2586 2588
2587 2589 fp = (rfs4_file_t *)rfs4_dbsearch(nsrv4->rfs4_file_idx, vp,
2588 2590 create, &arg, RFS4_DBS_VALID);
2589 2591 if (fp != NULL) {
2590 2592 rw_enter(&fp->rf_file_rwlock, RW_WRITER);
2591 2593 if (fp->rf_vp == NULL) {
2592 2594 rw_exit(&fp->rf_file_rwlock);
2593 2595 rfs4_file_rele(fp);
2594 2596 *create = screate;
2595 2597 goto retry;
2596 2598 }
2597 2599 }
2598 2600 }
2599 2601
2600 2602 return (fp);
2601 2603 }
2602 2604
2603 2605 static uint32_t
2604 2606 lo_state_hash(void *key)
2605 2607 {
2606 2608 stateid_t *id = key;
2607 2609
2608 2610 return (id->bits.ident+id->bits.pid);
2609 2611 }
2610 2612
2611 2613 static bool_t
2612 2614 lo_state_compare(rfs4_entry_t u_entry, void *key)
2613 2615 {
2614 2616 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2615 2617 stateid_t *id = key;
2616 2618 bool_t rc;
2617 2619
2618 2620 rc = (lsp->rls_lockid.bits.boottime == id->bits.boottime &&
2619 2621 lsp->rls_lockid.bits.type == id->bits.type &&
2620 2622 lsp->rls_lockid.bits.ident == id->bits.ident &&
2621 2623 lsp->rls_lockid.bits.pid == id->bits.pid);
2622 2624
2623 2625 return (rc);
2624 2626 }
2625 2627
2626 2628 static void *
2627 2629 lo_state_mkkey(rfs4_entry_t u_entry)
2628 2630 {
2629 2631 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2630 2632
2631 2633 return (&lsp->rls_lockid);
2632 2634 }
2633 2635
2634 2636 static bool_t
2635 2637 rfs4_lo_state_expiry(rfs4_entry_t u_entry)
2636 2638 {
2637 2639 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2638 2640
2639 2641 if (rfs4_dbe_is_invalid(lsp->rls_dbe))
2640 2642 return (TRUE);
2641 2643 if (lsp->rls_state->rs_closed)
2642 2644 return (TRUE);
2643 2645 return ((gethrestime_sec() -
2644 2646 lsp->rls_state->rs_owner->ro_client->rc_last_access
2645 2647 > rfs4_lease_time));
2646 2648 }
2647 2649
2648 2650 static void
2649 2651 rfs4_lo_state_destroy(rfs4_entry_t u_entry)
2650 2652 {
2651 2653 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2652 2654
2653 2655 rfs4_dbe_lock(lsp->rls_state->rs_dbe);
2654 2656 list_remove(&lsp->rls_state->rs_lostatelist, lsp);
2655 2657 rfs4_dbe_unlock(lsp->rls_state->rs_dbe);
2656 2658
2657 2659 rfs4_sw_destroy(&lsp->rls_sw);
2658 2660
2659 2661 /* Make sure to release the file locks */
2660 2662 if (lsp->rls_locks_cleaned == FALSE) {
2661 2663 lsp->rls_locks_cleaned = TRUE;
2662 2664 if (lsp->rls_locker->rl_client->rc_sysidt != LM_NOSYSID) {
2663 2665 /* Is the PxFS kernel module loaded? */
2664 2666 if (lm_remove_file_locks != NULL) {
2665 2667 int new_sysid;
2666 2668
2667 2669 /* Encode the cluster nodeid in new sysid */
2668 2670 new_sysid =
2669 2671 lsp->rls_locker->rl_client->rc_sysidt;
2670 2672 lm_set_nlmid_flk(&new_sysid);
2671 2673
2672 2674 /*
2673 2675 * This PxFS routine removes file locks for a
2674 2676 * client over all nodes of a cluster.
2675 2677 */
2676 2678 DTRACE_PROBE1(nfss_i_clust_rm_lck,
2677 2679 int, new_sysid);
2678 2680 (*lm_remove_file_locks)(new_sysid);
2679 2681 } else {
2680 2682 (void) cleanlocks(
2681 2683 lsp->rls_state->rs_finfo->rf_vp,
2682 2684 lsp->rls_locker->rl_pid,
2683 2685 lsp->rls_locker->rl_client->rc_sysidt);
2684 2686 }
2685 2687 }
2686 2688 }
2687 2689
2688 2690 /* Free the last reply for this state */
2689 2691 rfs4_free_reply(&lsp->rls_reply);
2690 2692
2691 2693 rfs4_lockowner_rele(lsp->rls_locker);
2692 2694 lsp->rls_locker = NULL;
2693 2695
2694 2696 rfs4_state_rele_nounlock(lsp->rls_state);
2695 2697 lsp->rls_state = NULL;
2696 2698 }
2697 2699
2698 2700 static bool_t
2699 2701 rfs4_lo_state_create(rfs4_entry_t u_entry, void *arg)
2700 2702 {
2701 2703 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2702 2704 rfs4_lo_state_t *argp = (rfs4_lo_state_t *)arg;
2703 2705 rfs4_lockowner_t *lo = argp->rls_locker;
2704 2706 rfs4_state_t *sp = argp->rls_state;
2705 2707
2706 2708 lsp->rls_state = sp;
2707 2709
2708 2710 lsp->rls_lockid = sp->rs_stateid;
2709 2711 lsp->rls_lockid.bits.type = LOCKID;
2710 2712 lsp->rls_lockid.bits.chgseq = 0;
2711 2713 lsp->rls_lockid.bits.pid = lo->rl_pid;
2712 2714
2713 2715 lsp->rls_locks_cleaned = FALSE;
2714 2716 lsp->rls_lock_completed = FALSE;
2715 2717
2716 2718 rfs4_sw_init(&lsp->rls_sw);
2717 2719
2718 2720 /* Attached the supplied lock owner */
2719 2721 rfs4_dbe_hold(lo->rl_dbe);
2720 2722 lsp->rls_locker = lo;
2721 2723
2722 2724 rfs4_dbe_lock(sp->rs_dbe);
2723 2725 list_insert_tail(&sp->rs_lostatelist, lsp);
2724 2726 rfs4_dbe_hold(sp->rs_dbe);
2725 2727 rfs4_dbe_unlock(sp->rs_dbe);
2726 2728
2727 2729 return (TRUE);
2728 2730 }
2729 2731
2730 2732 void
2731 2733 rfs4_lo_state_rele(rfs4_lo_state_t *lsp, bool_t unlock_fp)
2732 2734 {
2733 2735 if (unlock_fp == TRUE)
2734 2736 rw_exit(&lsp->rls_state->rs_finfo->rf_file_rwlock);
2735 2737 rfs4_dbe_rele(lsp->rls_dbe);
2736 2738 }
2737 2739
2738 2740 static rfs4_lo_state_t *
2739 2741 rfs4_findlo_state(stateid_t *id, bool_t lock_fp)
2740 2742 {
2741 2743 rfs4_lo_state_t *lsp;
2742 2744 bool_t create = FALSE;
2743 2745 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2744 2746
2745 2747 lsp = (rfs4_lo_state_t *)rfs4_dbsearch(nsrv4->rfs4_lo_state_idx, id,
2746 2748 &create, NULL, RFS4_DBS_VALID);
2747 2749 if (lock_fp == TRUE && lsp != NULL)
2748 2750 rw_enter(&lsp->rls_state->rs_finfo->rf_file_rwlock, RW_READER);
2749 2751
2750 2752 return (lsp);
2751 2753 }
2752 2754
2753 2755
2754 2756 static uint32_t
2755 2757 lo_state_lo_hash(void *key)
2756 2758 {
2757 2759 rfs4_lo_state_t *lsp = key;
2758 2760
2759 2761 return (ADDRHASH(lsp->rls_locker) ^ ADDRHASH(lsp->rls_state));
2760 2762 }
2761 2763
2762 2764 static bool_t
2763 2765 lo_state_lo_compare(rfs4_entry_t u_entry, void *key)
2764 2766 {
2765 2767 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
2766 2768 rfs4_lo_state_t *keyp = key;
2767 2769
2768 2770 return (keyp->rls_locker == lsp->rls_locker &&
2769 2771 keyp->rls_state == lsp->rls_state);
2770 2772 }
2771 2773
2772 2774 static void *
2773 2775 lo_state_lo_mkkey(rfs4_entry_t u_entry)
2774 2776 {
2775 2777 return (u_entry);
2776 2778 }
2777 2779
2778 2780 rfs4_lo_state_t *
2779 2781 rfs4_findlo_state_by_owner(rfs4_lockowner_t *lo, rfs4_state_t *sp,
2780 2782 bool_t *create)
2781 2783 {
2782 2784 rfs4_lo_state_t *lsp;
2783 2785 rfs4_lo_state_t arg;
2784 2786 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2785 2787
2786 2788 arg.rls_locker = lo;
2787 2789 arg.rls_state = sp;
2788 2790
2789 2791 lsp = (rfs4_lo_state_t *)rfs4_dbsearch(nsrv4->rfs4_lo_state_owner_idx,
2790 2792 &arg, create, &arg, RFS4_DBS_VALID);
2791 2793
2792 2794 return (lsp);
2793 2795 }
2794 2796
2795 2797 static stateid_t
2796 2798 get_stateid(id_t eid)
2797 2799 {
2798 2800 stateid_t id;
2799 2801 nfs4_srv_t *nsrv4;
2800 2802
2801 2803 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
2802 2804
2803 2805 id.bits.boottime = nsrv4->rfs4_start_time;
2804 2806 id.bits.ident = eid;
2805 2807 id.bits.chgseq = 0;
2806 2808 id.bits.type = 0;
2807 2809 id.bits.pid = 0;
2808 2810
2809 2811 /*
2810 2812 * If we are booted as a cluster node, embed our nodeid.
2811 2813 * We've already done sanity checks in rfs4_client_create() so no
2812 2814 * need to repeat them here.
2813 2815 */
2814 2816 id.bits.clnodeid = (cluster_bootflags & CLUSTER_BOOTED) ?
2815 2817 clconf_get_nodeid() : 0;
2816 2818
2817 2819 return (id);
2818 2820 }
2819 2821
2820 2822 /*
2821 2823 * For use only when booted as a cluster node.
2822 2824 * Returns TRUE if the embedded nodeid indicates that this stateid was
2823 2825 * generated on another node.
2824 2826 */
2825 2827 static int
2826 2828 foreign_stateid(stateid_t *id)
2827 2829 {
2828 2830 ASSERT(cluster_bootflags & CLUSTER_BOOTED);
2829 2831 return (id->bits.clnodeid != (uint32_t)clconf_get_nodeid());
2830 2832 }
2831 2833
2832 2834 /*
2833 2835 * For use only when booted as a cluster node.
2834 2836 * Returns TRUE if the embedded nodeid indicates that this clientid was
2835 2837 * generated on another node.
2836 2838 */
2837 2839 static int
2838 2840 foreign_clientid(cid *cidp)
2839 2841 {
2840 2842 ASSERT(cluster_bootflags & CLUSTER_BOOTED);
2841 2843 return (cidp->impl_id.c_id >> CLUSTER_NODEID_SHIFT !=
2842 2844 (uint32_t)clconf_get_nodeid());
2843 2845 }
2844 2846
2845 2847 /*
2846 2848 * For use only when booted as a cluster node.
2847 2849 * Embed our cluster nodeid into the clientid.
2848 2850 */
2849 2851 static void
2850 2852 embed_nodeid(cid *cidp)
2851 2853 {
2852 2854 int clnodeid;
2853 2855 /*
2854 2856 * Currently, our state tables are small enough that their
2855 2857 * ids will leave enough bits free for the nodeid. If the
2856 2858 * tables become larger, we mustn't overwrite the id.
2857 2859 * Equally, we only have room for so many bits of nodeid, so
2858 2860 * must check that too.
2859 2861 */
2860 2862 ASSERT(cluster_bootflags & CLUSTER_BOOTED);
2861 2863 ASSERT(cidp->impl_id.c_id >> CLUSTER_NODEID_SHIFT == 0);
2862 2864 clnodeid = clconf_get_nodeid();
2863 2865 ASSERT(clnodeid <= CLUSTER_MAX_NODEID);
2864 2866 ASSERT(clnodeid != NODEID_UNKNOWN);
2865 2867 cidp->impl_id.c_id |= (clnodeid << CLUSTER_NODEID_SHIFT);
2866 2868 }
2867 2869
2868 2870 static uint32_t
2869 2871 state_hash(void *key)
2870 2872 {
2871 2873 stateid_t *ip = (stateid_t *)key;
2872 2874
2873 2875 return (ip->bits.ident);
2874 2876 }
2875 2877
2876 2878 static bool_t
2877 2879 state_compare(rfs4_entry_t u_entry, void *key)
2878 2880 {
2879 2881 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
2880 2882 stateid_t *id = (stateid_t *)key;
2881 2883 bool_t rc;
2882 2884
2883 2885 rc = (sp->rs_stateid.bits.boottime == id->bits.boottime &&
2884 2886 sp->rs_stateid.bits.ident == id->bits.ident);
2885 2887
2886 2888 return (rc);
2887 2889 }
2888 2890
2889 2891 static void *
2890 2892 state_mkkey(rfs4_entry_t u_entry)
2891 2893 {
2892 2894 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
2893 2895
2894 2896 return (&sp->rs_stateid);
2895 2897 }
2896 2898
2897 2899 static void
2898 2900 rfs4_state_destroy(rfs4_entry_t u_entry)
2899 2901 {
2900 2902 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
2901 2903
2902 2904 /* remove from openowner list */
2903 2905 rfs4_dbe_lock(sp->rs_owner->ro_dbe);
2904 2906 list_remove(&sp->rs_owner->ro_statelist, sp);
2905 2907 rfs4_dbe_unlock(sp->rs_owner->ro_dbe);
2906 2908
2907 2909 list_destroy(&sp->rs_lostatelist);
2908 2910
2909 2911 /* release any share locks for this stateid if it's still open */
2910 2912 if (!sp->rs_closed) {
2911 2913 rfs4_dbe_lock(sp->rs_dbe);
2912 2914 (void) rfs4_unshare(sp);
2913 2915 rfs4_dbe_unlock(sp->rs_dbe);
2914 2916 }
2915 2917
2916 2918 /* Were done with the file */
2917 2919 rfs4_file_rele(sp->rs_finfo);
2918 2920 sp->rs_finfo = NULL;
2919 2921
2920 2922 /* And now with the openowner */
2921 2923 rfs4_openowner_rele(sp->rs_owner);
2922 2924 sp->rs_owner = NULL;
2923 2925 }
2924 2926
2925 2927 static void
2926 2928 rfs4_state_rele_nounlock(rfs4_state_t *sp)
2927 2929 {
2928 2930 rfs4_dbe_rele(sp->rs_dbe);
2929 2931 }
2930 2932
2931 2933 void
2932 2934 rfs4_state_rele(rfs4_state_t *sp)
2933 2935 {
2934 2936 rw_exit(&sp->rs_finfo->rf_file_rwlock);
2935 2937 rfs4_dbe_rele(sp->rs_dbe);
2936 2938 }
2937 2939
2938 2940 static uint32_t
2939 2941 deleg_hash(void *key)
2940 2942 {
2941 2943 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)key;
2942 2944
2943 2945 return (ADDRHASH(dsp->rds_client) ^ ADDRHASH(dsp->rds_finfo));
2944 2946 }
2945 2947
2946 2948 static bool_t
2947 2949 deleg_compare(rfs4_entry_t u_entry, void *key)
2948 2950 {
2949 2951 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
2950 2952 rfs4_deleg_state_t *kdsp = (rfs4_deleg_state_t *)key;
2951 2953
2952 2954 return (dsp->rds_client == kdsp->rds_client &&
2953 2955 dsp->rds_finfo == kdsp->rds_finfo);
2954 2956 }
2955 2957
2956 2958 static void *
2957 2959 deleg_mkkey(rfs4_entry_t u_entry)
2958 2960 {
2959 2961 return (u_entry);
2960 2962 }
2961 2963
2962 2964 static uint32_t
2963 2965 deleg_state_hash(void *key)
2964 2966 {
2965 2967 stateid_t *ip = (stateid_t *)key;
2966 2968
2967 2969 return (ip->bits.ident);
2968 2970 }
2969 2971
2970 2972 static bool_t
2971 2973 deleg_state_compare(rfs4_entry_t u_entry, void *key)
2972 2974 {
2973 2975 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
2974 2976 stateid_t *id = (stateid_t *)key;
2975 2977 bool_t rc;
2976 2978
2977 2979 if (id->bits.type != DELEGID)
2978 2980 return (FALSE);
2979 2981
2980 2982 rc = (dsp->rds_delegid.bits.boottime == id->bits.boottime &&
2981 2983 dsp->rds_delegid.bits.ident == id->bits.ident);
2982 2984
2983 2985 return (rc);
2984 2986 }
2985 2987
2986 2988 static void *
2987 2989 deleg_state_mkkey(rfs4_entry_t u_entry)
2988 2990 {
2989 2991 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
2990 2992
2991 2993 return (&dsp->rds_delegid);
2992 2994 }
2993 2995
2994 2996 static bool_t
2995 2997 rfs4_deleg_state_expiry(rfs4_entry_t u_entry)
2996 2998 {
2997 2999 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
2998 3000
2999 3001 if (rfs4_dbe_is_invalid(dsp->rds_dbe))
3000 3002 return (TRUE);
3001 3003
3002 3004 if (dsp->rds_dtype == OPEN_DELEGATE_NONE)
3003 3005 return (TRUE);
3004 3006
3005 3007 if ((gethrestime_sec() - dsp->rds_client->rc_last_access
3006 3008 > rfs4_lease_time)) {
3007 3009 rfs4_dbe_invalidate(dsp->rds_dbe);
3008 3010 return (TRUE);
3009 3011 }
3010 3012
3011 3013 return (FALSE);
3012 3014 }
3013 3015
3014 3016 static bool_t
3015 3017 rfs4_deleg_state_create(rfs4_entry_t u_entry, void *argp)
3016 3018 {
3017 3019 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
3018 3020 rfs4_file_t *fp = ((rfs4_deleg_state_t *)argp)->rds_finfo;
3019 3021 rfs4_client_t *cp = ((rfs4_deleg_state_t *)argp)->rds_client;
3020 3022
3021 3023 rfs4_dbe_hold(fp->rf_dbe);
3022 3024 rfs4_dbe_hold(cp->rc_dbe);
3023 3025
3024 3026 dsp->rds_delegid = get_stateid(rfs4_dbe_getid(dsp->rds_dbe));
3025 3027 dsp->rds_delegid.bits.type = DELEGID;
3026 3028 dsp->rds_finfo = fp;
3027 3029 dsp->rds_client = cp;
3028 3030 dsp->rds_dtype = OPEN_DELEGATE_NONE;
3029 3031
3030 3032 dsp->rds_time_granted = gethrestime_sec(); /* observability */
3031 3033 dsp->rds_time_revoked = 0;
3032 3034
3033 3035 list_link_init(&dsp->rds_node);
3034 3036
3035 3037 return (TRUE);
3036 3038 }
3037 3039
3038 3040 static void
3039 3041 rfs4_deleg_state_destroy(rfs4_entry_t u_entry)
3040 3042 {
3041 3043 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
3042 3044
3043 3045 /* return delegation if necessary */
3044 3046 rfs4_return_deleg(dsp, FALSE);
3045 3047
3046 3048 /* Were done with the file */
3047 3049 rfs4_file_rele(dsp->rds_finfo);
3048 3050 dsp->rds_finfo = NULL;
3049 3051
3050 3052 /* And now with the openowner */
3051 3053 rfs4_client_rele(dsp->rds_client);
3052 3054 dsp->rds_client = NULL;
3053 3055 }
3054 3056
3055 3057 rfs4_deleg_state_t *
3056 3058 rfs4_finddeleg(rfs4_state_t *sp, bool_t *create)
3057 3059 {
3058 3060 rfs4_deleg_state_t ds, *dsp;
3059 3061 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3060 3062
3061 3063 ds.rds_client = sp->rs_owner->ro_client;
3062 3064 ds.rds_finfo = sp->rs_finfo;
3063 3065
3064 3066 dsp = (rfs4_deleg_state_t *)rfs4_dbsearch(nsrv4->rfs4_deleg_idx, &ds,
3065 3067 create, &ds, RFS4_DBS_VALID);
3066 3068
3067 3069 return (dsp);
3068 3070 }
3069 3071
3070 3072 rfs4_deleg_state_t *
3071 3073 rfs4_finddelegstate(stateid_t *id)
3072 3074 {
3073 3075 rfs4_deleg_state_t *dsp;
3074 3076 bool_t create = FALSE;
3075 3077 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3076 3078
3077 3079 dsp = (rfs4_deleg_state_t *)rfs4_dbsearch(nsrv4->rfs4_deleg_state_idx,
3078 3080 id, &create, NULL, RFS4_DBS_VALID);
3079 3081
3080 3082 return (dsp);
3081 3083 }
3082 3084
3083 3085 void
3084 3086 rfs4_deleg_state_rele(rfs4_deleg_state_t *dsp)
3085 3087 {
3086 3088 rfs4_dbe_rele(dsp->rds_dbe);
3087 3089 }
3088 3090
3089 3091 void
3090 3092 rfs4_update_lock_sequence(rfs4_lo_state_t *lsp)
3091 3093 {
3092 3094
3093 3095 rfs4_dbe_lock(lsp->rls_dbe);
3094 3096
3095 3097 /*
3096 3098 * If we are skipping sequence id checking, this means that
3097 3099 * this is the first lock request and therefore the sequence
3098 3100 * id does not need to be updated. This only happens on the
3099 3101 * first lock request for a lockowner
3100 3102 */
3101 3103 if (!lsp->rls_skip_seqid_check)
3102 3104 lsp->rls_seqid++;
3103 3105
3104 3106 rfs4_dbe_unlock(lsp->rls_dbe);
3105 3107 }
3106 3108
3107 3109 void
3108 3110 rfs4_update_lock_resp(rfs4_lo_state_t *lsp, nfs_resop4 *resp)
3109 3111 {
3110 3112
3111 3113 rfs4_dbe_lock(lsp->rls_dbe);
3112 3114
3113 3115 rfs4_free_reply(&lsp->rls_reply);
3114 3116
3115 3117 rfs4_copy_reply(&lsp->rls_reply, resp);
3116 3118
3117 3119 rfs4_dbe_unlock(lsp->rls_dbe);
3118 3120 }
3119 3121
3120 3122 void
3121 3123 rfs4_free_opens(rfs4_openowner_t *oo, bool_t invalidate,
3122 3124 bool_t close_of_client)
3123 3125 {
3124 3126 rfs4_state_t *sp;
3125 3127
3126 3128 rfs4_dbe_lock(oo->ro_dbe);
3127 3129
3128 3130 for (sp = list_head(&oo->ro_statelist); sp != NULL;
3129 3131 sp = list_next(&oo->ro_statelist, sp)) {
3130 3132 rfs4_state_close(sp, FALSE, close_of_client, CRED());
3131 3133 if (invalidate == TRUE)
3132 3134 rfs4_dbe_invalidate(sp->rs_dbe);
3133 3135 }
3134 3136
3135 3137 rfs4_dbe_invalidate(oo->ro_dbe);
3136 3138 rfs4_dbe_unlock(oo->ro_dbe);
3137 3139 }
3138 3140
3139 3141 static uint32_t
3140 3142 state_owner_file_hash(void *key)
3141 3143 {
3142 3144 rfs4_state_t *sp = key;
3143 3145
3144 3146 return (ADDRHASH(sp->rs_owner) ^ ADDRHASH(sp->rs_finfo));
3145 3147 }
3146 3148
3147 3149 static bool_t
3148 3150 state_owner_file_compare(rfs4_entry_t u_entry, void *key)
3149 3151 {
3150 3152 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3151 3153 rfs4_state_t *arg = key;
3152 3154
3153 3155 if (sp->rs_closed == TRUE)
3154 3156 return (FALSE);
3155 3157
3156 3158 return (arg->rs_owner == sp->rs_owner && arg->rs_finfo == sp->rs_finfo);
3157 3159 }
3158 3160
3159 3161 static void *
3160 3162 state_owner_file_mkkey(rfs4_entry_t u_entry)
3161 3163 {
3162 3164 return (u_entry);
3163 3165 }
3164 3166
3165 3167 static uint32_t
3166 3168 state_file_hash(void *key)
3167 3169 {
3168 3170 return (ADDRHASH(key));
3169 3171 }
3170 3172
3171 3173 static bool_t
3172 3174 state_file_compare(rfs4_entry_t u_entry, void *key)
3173 3175 {
3174 3176 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3175 3177 rfs4_file_t *fp = key;
3176 3178
3177 3179 if (sp->rs_closed == TRUE)
3178 3180 return (FALSE);
3179 3181
3180 3182 return (fp == sp->rs_finfo);
3181 3183 }
3182 3184
3183 3185 static void *
3184 3186 state_file_mkkey(rfs4_entry_t u_entry)
3185 3187 {
3186 3188 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3187 3189
3188 3190 return (sp->rs_finfo);
3189 3191 }
3190 3192
3191 3193 rfs4_state_t *
3192 3194 rfs4_findstate_by_owner_file(rfs4_openowner_t *oo, rfs4_file_t *fp,
3193 3195 bool_t *create)
3194 3196 {
3195 3197 rfs4_state_t *sp;
3196 3198 rfs4_state_t key;
3197 3199 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3198 3200
3199 3201 key.rs_owner = oo;
3200 3202 key.rs_finfo = fp;
3201 3203
3202 3204 sp = (rfs4_state_t *)rfs4_dbsearch(nsrv4->rfs4_state_owner_file_idx,
3203 3205 &key, create, &key, RFS4_DBS_VALID);
3204 3206
3205 3207 return (sp);
3206 3208 }
3207 3209
3208 3210 /* This returns ANY state struct that refers to this file */
3209 3211 static rfs4_state_t *
3210 3212 rfs4_findstate_by_file(rfs4_file_t *fp)
3211 3213 {
3212 3214 bool_t create = FALSE;
3213 3215 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3214 3216
3215 3217 return ((rfs4_state_t *)rfs4_dbsearch(nsrv4->rfs4_state_file_idx, fp,
3216 3218 &create, fp, RFS4_DBS_VALID));
3217 3219 }
3218 3220
3219 3221 static bool_t
3220 3222 rfs4_state_expiry(rfs4_entry_t u_entry)
3221 3223 {
3222 3224 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3223 3225
3224 3226 if (rfs4_dbe_is_invalid(sp->rs_dbe))
3225 3227 return (TRUE);
3226 3228
3227 3229 if (sp->rs_closed == TRUE &&
3228 3230 ((gethrestime_sec() - rfs4_dbe_get_timerele(sp->rs_dbe))
3229 3231 > rfs4_lease_time))
3230 3232 return (TRUE);
3231 3233
3232 3234 return ((gethrestime_sec() - sp->rs_owner->ro_client->rc_last_access
3233 3235 > rfs4_lease_time));
3234 3236 }
3235 3237
3236 3238 static bool_t
3237 3239 rfs4_state_create(rfs4_entry_t u_entry, void *argp)
3238 3240 {
3239 3241 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3240 3242 rfs4_file_t *fp = ((rfs4_state_t *)argp)->rs_finfo;
3241 3243 rfs4_openowner_t *oo = ((rfs4_state_t *)argp)->rs_owner;
3242 3244
3243 3245 rfs4_dbe_hold(fp->rf_dbe);
3244 3246 rfs4_dbe_hold(oo->ro_dbe);
3245 3247 sp->rs_stateid = get_stateid(rfs4_dbe_getid(sp->rs_dbe));
3246 3248 sp->rs_stateid.bits.type = OPENID;
3247 3249 sp->rs_owner = oo;
3248 3250 sp->rs_finfo = fp;
3249 3251
3250 3252 list_create(&sp->rs_lostatelist, sizeof (rfs4_lo_state_t),
3251 3253 offsetof(rfs4_lo_state_t, rls_node));
3252 3254
3253 3255 /* Insert state on per open owner's list */
3254 3256 rfs4_dbe_lock(oo->ro_dbe);
3255 3257 list_insert_tail(&oo->ro_statelist, sp);
3256 3258 rfs4_dbe_unlock(oo->ro_dbe);
3257 3259
3258 3260 return (TRUE);
3259 3261 }
3260 3262
3261 3263 static rfs4_state_t *
3262 3264 rfs4_findstate(stateid_t *id, rfs4_dbsearch_type_t find_invalid, bool_t lock_fp)
3263 3265 {
3264 3266 rfs4_state_t *sp;
3265 3267 bool_t create = FALSE;
3266 3268 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3267 3269
3268 3270 sp = (rfs4_state_t *)rfs4_dbsearch(nsrv4->rfs4_state_idx, id,
3269 3271 &create, NULL, find_invalid);
3270 3272 if (lock_fp == TRUE && sp != NULL)
3271 3273 rw_enter(&sp->rs_finfo->rf_file_rwlock, RW_READER);
3272 3274
3273 3275 return (sp);
3274 3276 }
3275 3277
3276 3278 void
3277 3279 rfs4_state_close(rfs4_state_t *sp, bool_t lock_held, bool_t close_of_client,
3278 3280 cred_t *cr)
3279 3281 {
3280 3282 /* Remove the associated lo_state owners */
3281 3283 if (!lock_held)
3282 3284 rfs4_dbe_lock(sp->rs_dbe);
3283 3285
3284 3286 /*
3285 3287 * If refcnt == 0, the dbe is about to be destroyed.
3286 3288 * lock state will be released by the reaper thread.
3287 3289 */
3288 3290
3289 3291 if (rfs4_dbe_refcnt(sp->rs_dbe) > 0) {
3290 3292 if (sp->rs_closed == FALSE) {
3291 3293 rfs4_release_share_lock_state(sp, cr, close_of_client);
3292 3294 sp->rs_closed = TRUE;
3293 3295 }
3294 3296 }
3295 3297
3296 3298 if (!lock_held)
3297 3299 rfs4_dbe_unlock(sp->rs_dbe);
3298 3300 }
3299 3301
3300 3302 /*
3301 3303 * Remove all state associated with the given client.
3302 3304 */
3303 3305 void
3304 3306 rfs4_client_state_remove(rfs4_client_t *cp)
3305 3307 {
3306 3308 rfs4_openowner_t *oo;
3307 3309
3308 3310 rfs4_dbe_lock(cp->rc_dbe);
3309 3311
3310 3312 for (oo = list_head(&cp->rc_openownerlist); oo != NULL;
3311 3313 oo = list_next(&cp->rc_openownerlist, oo)) {
3312 3314 rfs4_free_opens(oo, TRUE, TRUE);
3313 3315 }
3314 3316
3315 3317 rfs4_dbe_unlock(cp->rc_dbe);
3316 3318 }
3317 3319
3318 3320 void
3319 3321 rfs4_client_close(rfs4_client_t *cp)
3320 3322 {
3321 3323 /* Mark client as going away. */
3322 3324 rfs4_dbe_lock(cp->rc_dbe);
3323 3325 rfs4_dbe_invalidate(cp->rc_dbe);
3324 3326 rfs4_dbe_unlock(cp->rc_dbe);
3325 3327
3326 3328 rfs4_client_state_remove(cp);
3327 3329
3328 3330 /* Release the client */
3329 3331 rfs4_client_rele(cp);
3330 3332 }
3331 3333
3332 3334 nfsstat4
3333 3335 rfs4_check_clientid(clientid4 *cp, int setclid_confirm)
3334 3336 {
3335 3337 cid *cidp = (cid *) cp;
3336 3338 nfs4_srv_t *nsrv4;
3337 3339
3338 3340 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3339 3341
3340 3342 /*
3341 3343 * If we are booted as a cluster node, check the embedded nodeid.
3342 3344 * If it indicates that this clientid was generated on another node,
3343 3345 * inform the client accordingly.
3344 3346 */
3345 3347 if (cluster_bootflags & CLUSTER_BOOTED && foreign_clientid(cidp))
3346 3348 return (NFS4ERR_STALE_CLIENTID);
3347 3349
3348 3350 /*
3349 3351 * If the server start time matches the time provided
3350 3352 * by the client (via the clientid) and this is NOT a
3351 3353 * setclientid_confirm then return EXPIRED.
3352 3354 */
3353 3355 if (!setclid_confirm &&
3354 3356 cidp->impl_id.start_time == nsrv4->rfs4_start_time)
3355 3357 return (NFS4ERR_EXPIRED);
3356 3358
3357 3359 return (NFS4ERR_STALE_CLIENTID);
3358 3360 }
3359 3361
3360 3362 /*
3361 3363 * This is used when a stateid has not been found amongst the
3362 3364 * current server's state. Check the stateid to see if it
3363 3365 * was from this server instantiation or not.
3364 3366 */
3365 3367 static nfsstat4
3366 3368 what_stateid_error(stateid_t *id, stateid_type_t type)
3367 3369 {
3368 3370 nfs4_srv_t *nsrv4;
3369 3371
3370 3372 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
3371 3373
3372 3374 /* If we are booted as a cluster node, was stateid locally generated? */
3373 3375 if ((cluster_bootflags & CLUSTER_BOOTED) && foreign_stateid(id))
3374 3376 return (NFS4ERR_STALE_STATEID);
3375 3377
3376 3378 /* If types don't match then no use checking further */
3377 3379 if (type != id->bits.type)
3378 3380 return (NFS4ERR_BAD_STATEID);
3379 3381
3380 3382 /* From a different server instantiation, return STALE */
3381 3383 if (id->bits.boottime != nsrv4->rfs4_start_time)
3382 3384 return (NFS4ERR_STALE_STATEID);
3383 3385
3384 3386 /*
3385 3387 * From this server but the state is most likely beyond lease
3386 3388 * timeout: return NFS4ERR_EXPIRED. However, there is the
3387 3389 * case of a delegation stateid. For delegations, there is a
3388 3390 * case where the state can be removed without the client's
3389 3391 * knowledge/consent: revocation. In the case of delegation
3390 3392 * revocation, the delegation state will be removed and will
3391 3393 * not be found. If the client does something like a
3392 3394 * DELEGRETURN or even a READ/WRITE with a delegatoin stateid
3393 3395 * that has been revoked, the server should return BAD_STATEID
3394 3396 * instead of the more common EXPIRED error.
3395 3397 */
3396 3398 if (id->bits.boottime == nsrv4->rfs4_start_time) {
3397 3399 if (type == DELEGID)
3398 3400 return (NFS4ERR_BAD_STATEID);
3399 3401 else
3400 3402 return (NFS4ERR_EXPIRED);
3401 3403 }
3402 3404
3403 3405 return (NFS4ERR_BAD_STATEID);
3404 3406 }
3405 3407
3406 3408 /*
3407 3409 * Used later on to find the various state structs. When called from
3408 3410 * rfs4_check_stateid()->rfs4_get_all_state(), no file struct lock is
3409 3411 * taken (it is not needed) and helps on the read/write path with
3410 3412 * respect to performance.
3411 3413 */
3412 3414 static nfsstat4
3413 3415 rfs4_get_state_lockit(stateid4 *stateid, rfs4_state_t **spp,
3414 3416 rfs4_dbsearch_type_t find_invalid, bool_t lock_fp)
3415 3417 {
3416 3418 stateid_t *id = (stateid_t *)stateid;
3417 3419 rfs4_state_t *sp;
3418 3420
3419 3421 *spp = NULL;
3420 3422
3421 3423 /* If we are booted as a cluster node, was stateid locally generated? */
3422 3424 if ((cluster_bootflags & CLUSTER_BOOTED) && foreign_stateid(id))
3423 3425 return (NFS4ERR_STALE_STATEID);
3424 3426
3425 3427 sp = rfs4_findstate(id, find_invalid, lock_fp);
3426 3428 if (sp == NULL) {
3427 3429 return (what_stateid_error(id, OPENID));
3428 3430 }
3429 3431
3430 3432 if (rfs4_lease_expired(sp->rs_owner->ro_client)) {
3431 3433 if (lock_fp == TRUE)
3432 3434 rfs4_state_rele(sp);
3433 3435 else
3434 3436 rfs4_state_rele_nounlock(sp);
3435 3437 return (NFS4ERR_EXPIRED);
3436 3438 }
3437 3439
3438 3440 *spp = sp;
3439 3441
3440 3442 return (NFS4_OK);
3441 3443 }
3442 3444
3443 3445 nfsstat4
3444 3446 rfs4_get_state(stateid4 *stateid, rfs4_state_t **spp,
3445 3447 rfs4_dbsearch_type_t find_invalid)
3446 3448 {
3447 3449 return (rfs4_get_state_lockit(stateid, spp, find_invalid, TRUE));
3448 3450 }
3449 3451
3450 3452 int
3451 3453 rfs4_check_stateid_seqid(rfs4_state_t *sp, stateid4 *stateid)
3452 3454 {
3453 3455 stateid_t *id = (stateid_t *)stateid;
3454 3456
3455 3457 if (rfs4_lease_expired(sp->rs_owner->ro_client))
3456 3458 return (NFS4_CHECK_STATEID_EXPIRED);
3457 3459
3458 3460 /* Stateid is some time in the future - that's bad */
3459 3461 if (sp->rs_stateid.bits.chgseq < id->bits.chgseq)
3460 3462 return (NFS4_CHECK_STATEID_BAD);
3461 3463
3462 3464 if (sp->rs_stateid.bits.chgseq == id->bits.chgseq + 1)
3463 3465 return (NFS4_CHECK_STATEID_REPLAY);
3464 3466
3465 3467 /* Stateid is some time in the past - that's old */
3466 3468 if (sp->rs_stateid.bits.chgseq > id->bits.chgseq)
3467 3469 return (NFS4_CHECK_STATEID_OLD);
3468 3470
3469 3471 /* Caller needs to know about confirmation before closure */
3470 3472 if (sp->rs_owner->ro_need_confirm)
3471 3473 return (NFS4_CHECK_STATEID_UNCONFIRMED);
3472 3474
3473 3475 if (sp->rs_closed == TRUE)
3474 3476 return (NFS4_CHECK_STATEID_CLOSED);
3475 3477
3476 3478 return (NFS4_CHECK_STATEID_OKAY);
3477 3479 }
3478 3480
3479 3481 int
3480 3482 rfs4_check_lo_stateid_seqid(rfs4_lo_state_t *lsp, stateid4 *stateid)
3481 3483 {
3482 3484 stateid_t *id = (stateid_t *)stateid;
3483 3485
3484 3486 if (rfs4_lease_expired(lsp->rls_state->rs_owner->ro_client))
3485 3487 return (NFS4_CHECK_STATEID_EXPIRED);
3486 3488
3487 3489 /* Stateid is some time in the future - that's bad */
3488 3490 if (lsp->rls_lockid.bits.chgseq < id->bits.chgseq)
3489 3491 return (NFS4_CHECK_STATEID_BAD);
3490 3492
3491 3493 if (lsp->rls_lockid.bits.chgseq == id->bits.chgseq + 1)
3492 3494 return (NFS4_CHECK_STATEID_REPLAY);
3493 3495
3494 3496 /* Stateid is some time in the past - that's old */
3495 3497 if (lsp->rls_lockid.bits.chgseq > id->bits.chgseq)
3496 3498 return (NFS4_CHECK_STATEID_OLD);
3497 3499
3498 3500 if (lsp->rls_state->rs_closed == TRUE)
3499 3501 return (NFS4_CHECK_STATEID_CLOSED);
3500 3502
3501 3503 return (NFS4_CHECK_STATEID_OKAY);
3502 3504 }
3503 3505
3504 3506 nfsstat4
3505 3507 rfs4_get_deleg_state(stateid4 *stateid, rfs4_deleg_state_t **dspp)
3506 3508 {
3507 3509 stateid_t *id = (stateid_t *)stateid;
3508 3510 rfs4_deleg_state_t *dsp;
3509 3511
3510 3512 *dspp = NULL;
3511 3513
3512 3514 /* If we are booted as a cluster node, was stateid locally generated? */
3513 3515 if ((cluster_bootflags & CLUSTER_BOOTED) && foreign_stateid(id))
3514 3516 return (NFS4ERR_STALE_STATEID);
3515 3517
3516 3518 dsp = rfs4_finddelegstate(id);
3517 3519 if (dsp == NULL) {
3518 3520 return (what_stateid_error(id, DELEGID));
3519 3521 }
3520 3522
3521 3523 if (rfs4_lease_expired(dsp->rds_client)) {
3522 3524 rfs4_deleg_state_rele(dsp);
3523 3525 return (NFS4ERR_EXPIRED);
3524 3526 }
3525 3527
3526 3528 *dspp = dsp;
3527 3529
3528 3530 return (NFS4_OK);
3529 3531 }
3530 3532
3531 3533 nfsstat4
3532 3534 rfs4_get_lo_state(stateid4 *stateid, rfs4_lo_state_t **lspp, bool_t lock_fp)
3533 3535 {
3534 3536 stateid_t *id = (stateid_t *)stateid;
3535 3537 rfs4_lo_state_t *lsp;
3536 3538
3537 3539 *lspp = NULL;
3538 3540
3539 3541 /* If we are booted as a cluster node, was stateid locally generated? */
3540 3542 if ((cluster_bootflags & CLUSTER_BOOTED) && foreign_stateid(id))
3541 3543 return (NFS4ERR_STALE_STATEID);
3542 3544
3543 3545 lsp = rfs4_findlo_state(id, lock_fp);
3544 3546 if (lsp == NULL) {
3545 3547 return (what_stateid_error(id, LOCKID));
3546 3548 }
3547 3549
3548 3550 if (rfs4_lease_expired(lsp->rls_state->rs_owner->ro_client)) {
3549 3551 rfs4_lo_state_rele(lsp, lock_fp);
3550 3552 return (NFS4ERR_EXPIRED);
3551 3553 }
3552 3554
3553 3555 *lspp = lsp;
3554 3556
3555 3557 return (NFS4_OK);
3556 3558 }
3557 3559
3558 3560 static nfsstat4
3559 3561 rfs4_get_all_state(stateid4 *sid, rfs4_state_t **spp,
3560 3562 rfs4_deleg_state_t **dspp, rfs4_lo_state_t **lspp)
3561 3563 {
3562 3564 rfs4_state_t *sp = NULL;
3563 3565 rfs4_deleg_state_t *dsp = NULL;
3564 3566 rfs4_lo_state_t *lsp = NULL;
3565 3567 stateid_t *id;
3566 3568 nfsstat4 status;
3567 3569
3568 3570 *spp = NULL; *dspp = NULL; *lspp = NULL;
3569 3571
3570 3572 id = (stateid_t *)sid;
3571 3573 switch (id->bits.type) {
3572 3574 case OPENID:
3573 3575 status = rfs4_get_state_lockit(sid, &sp, FALSE, FALSE);
3574 3576 break;
3575 3577 case DELEGID:
3576 3578 status = rfs4_get_deleg_state(sid, &dsp);
3577 3579 break;
3578 3580 case LOCKID:
3579 3581 status = rfs4_get_lo_state(sid, &lsp, FALSE);
3580 3582 if (status == NFS4_OK) {
3581 3583 sp = lsp->rls_state;
3582 3584 rfs4_dbe_hold(sp->rs_dbe);
3583 3585 }
3584 3586 break;
3585 3587 default:
3586 3588 status = NFS4ERR_BAD_STATEID;
3587 3589 }
3588 3590
3589 3591 if (status == NFS4_OK) {
3590 3592 *spp = sp;
3591 3593 *dspp = dsp;
3592 3594 *lspp = lsp;
3593 3595 }
3594 3596
3595 3597 return (status);
3596 3598 }
3597 3599
3598 3600 /*
3599 3601 * Given the I/O mode (FREAD or FWRITE), this checks whether the
3600 3602 * rfs4_state_t struct has access to do this operation and if so
3601 3603 * return NFS4_OK; otherwise the proper NFSv4 error is returned.
3602 3604 */
3603 3605 nfsstat4
3604 3606 rfs4_state_has_access(rfs4_state_t *sp, int mode, vnode_t *vp)
3605 3607 {
3606 3608 nfsstat4 stat = NFS4_OK;
3607 3609 rfs4_file_t *fp;
3608 3610 bool_t create = FALSE;
3609 3611
3610 3612 rfs4_dbe_lock(sp->rs_dbe);
3611 3613 if (mode == FWRITE) {
3612 3614 if (!(sp->rs_share_access & OPEN4_SHARE_ACCESS_WRITE)) {
3613 3615 stat = NFS4ERR_OPENMODE;
3614 3616 }
3615 3617 } else if (mode == FREAD) {
3616 3618 if (!(sp->rs_share_access & OPEN4_SHARE_ACCESS_READ)) {
3617 3619 /*
3618 3620 * If we have OPENed the file with DENYing access
3619 3621 * to both READ and WRITE then no one else could
3620 3622 * have OPENed the file, hence no conflicting READ
3621 3623 * deny. This check is merely an optimization.
3622 3624 */
3623 3625 if (sp->rs_share_deny == OPEN4_SHARE_DENY_BOTH)
3624 3626 goto out;
3625 3627
3626 3628 /* Check against file struct's DENY mode */
3627 3629 fp = rfs4_findfile(vp, NULL, &create);
3628 3630 if (fp != NULL) {
3629 3631 int deny_read = 0;
3630 3632 rfs4_dbe_lock(fp->rf_dbe);
3631 3633 /*
3632 3634 * Check if any other open owner has the file
3633 3635 * OPENed with deny READ.
3634 3636 */
3635 3637 if (sp->rs_share_deny & OPEN4_SHARE_DENY_READ)
3636 3638 deny_read = 1;
3637 3639 ASSERT(fp->rf_deny_read >= deny_read);
3638 3640 if (fp->rf_deny_read > deny_read)
3639 3641 stat = NFS4ERR_OPENMODE;
3640 3642 rfs4_dbe_unlock(fp->rf_dbe);
3641 3643 rfs4_file_rele(fp);
3642 3644 }
3643 3645 }
3644 3646 } else {
3645 3647 /* Illegal I/O mode */
3646 3648 stat = NFS4ERR_INVAL;
3647 3649 }
3648 3650 out:
3649 3651 rfs4_dbe_unlock(sp->rs_dbe);
3650 3652 return (stat);
3651 3653 }
3652 3654
3653 3655 /*
3654 3656 * Given the I/O mode (FREAD or FWRITE), the vnode, the stateid and whether
3655 3657 * the file is being truncated, return NFS4_OK if allowed or appropriate
3656 3658 * V4 error if not. Note NFS4ERR_DELAY will be returned and a recall on
3657 3659 * the associated file will be done if the I/O is not consistent with any
3658 3660 * delegation in effect on the file. Should be holding VOP_RWLOCK, either
3659 3661 * as reader or writer as appropriate. rfs4_op_open will acquire the
3660 3662 * VOP_RWLOCK as writer when setting up delegation. If the stateid is bad
3661 3663 * this routine will return NFS4ERR_BAD_STATEID. In addition, through the
3662 3664 * deleg parameter, we will return whether a write delegation is held by
3663 3665 * the client associated with this stateid.
3664 3666 * If the server instance associated with the relevant client is in its
3665 3667 * grace period, return NFS4ERR_GRACE.
3666 3668 */
3667 3669
3668 3670 nfsstat4
3669 3671 rfs4_check_stateid(int mode, vnode_t *vp,
3670 3672 stateid4 *stateid, bool_t trunc, bool_t *deleg,
3671 3673 bool_t do_access, caller_context_t *ct)
3672 3674 {
3673 3675 rfs4_file_t *fp;
3674 3676 bool_t create = FALSE;
3675 3677 rfs4_state_t *sp;
3676 3678 rfs4_deleg_state_t *dsp;
3677 3679 rfs4_lo_state_t *lsp;
3678 3680 stateid_t *id = (stateid_t *)stateid;
3679 3681 nfsstat4 stat = NFS4_OK;
3680 3682
3681 3683 if (ct != NULL) {
3682 3684 ct->cc_sysid = 0;
3683 3685 ct->cc_pid = 0;
3684 3686 ct->cc_caller_id = nfs4_srv_caller_id;
3685 3687 ct->cc_flags = CC_DONTBLOCK;
3686 3688 }
3687 3689
3688 3690 if (ISSPECIAL(stateid)) {
3689 3691 fp = rfs4_findfile(vp, NULL, &create);
3690 3692 if (fp == NULL)
3691 3693 return (NFS4_OK);
3692 3694 if (fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_NONE) {
3693 3695 rfs4_file_rele(fp);
3694 3696 return (NFS4_OK);
3695 3697 }
3696 3698 if (mode == FWRITE ||
3697 3699 fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_WRITE) {
3698 3700 rfs4_recall_deleg(fp, trunc, NULL);
3699 3701 rfs4_file_rele(fp);
3700 3702 return (NFS4ERR_DELAY);
3701 3703 }
3702 3704 rfs4_file_rele(fp);
3703 3705 return (NFS4_OK);
3704 3706 } else {
3705 3707 stat = rfs4_get_all_state(stateid, &sp, &dsp, &lsp);
3706 3708 if (stat != NFS4_OK)
3707 3709 return (stat);
3708 3710 if (lsp != NULL) {
3709 3711 /* Is associated server instance in its grace period? */
3710 3712 if (rfs4_clnt_in_grace(lsp->rls_locker->rl_client)) {
3711 3713 rfs4_lo_state_rele(lsp, FALSE);
3712 3714 if (sp != NULL)
3713 3715 rfs4_state_rele_nounlock(sp);
3714 3716 return (NFS4ERR_GRACE);
3715 3717 }
3716 3718 if (id->bits.type == LOCKID) {
3717 3719 /* Seqid in the future? - that's bad */
3718 3720 if (lsp->rls_lockid.bits.chgseq <
3719 3721 id->bits.chgseq) {
3720 3722 rfs4_lo_state_rele(lsp, FALSE);
3721 3723 if (sp != NULL)
3722 3724 rfs4_state_rele_nounlock(sp);
3723 3725 return (NFS4ERR_BAD_STATEID);
3724 3726 }
3725 3727 /* Seqid in the past? - that's old */
3726 3728 if (lsp->rls_lockid.bits.chgseq >
3727 3729 id->bits.chgseq) {
3728 3730 rfs4_lo_state_rele(lsp, FALSE);
3729 3731 if (sp != NULL)
3730 3732 rfs4_state_rele_nounlock(sp);
3731 3733 return (NFS4ERR_OLD_STATEID);
3732 3734 }
3733 3735 /* Ensure specified filehandle matches */
3734 3736 if (lsp->rls_state->rs_finfo->rf_vp != vp) {
3735 3737 rfs4_lo_state_rele(lsp, FALSE);
3736 3738 if (sp != NULL)
3737 3739 rfs4_state_rele_nounlock(sp);
3738 3740 return (NFS4ERR_BAD_STATEID);
3739 3741 }
3740 3742 }
3741 3743 if (ct != NULL) {
3742 3744 ct->cc_sysid =
3743 3745 lsp->rls_locker->rl_client->rc_sysidt;
3744 3746 ct->cc_pid = lsp->rls_locker->rl_pid;
3745 3747 }
3746 3748 rfs4_lo_state_rele(lsp, FALSE);
3747 3749 }
3748 3750
3749 3751 /* Stateid provided was an "open" stateid */
3750 3752 if (sp != NULL) {
3751 3753 /* Is associated server instance in its grace period? */
3752 3754 if (rfs4_clnt_in_grace(sp->rs_owner->ro_client)) {
3753 3755 rfs4_state_rele_nounlock(sp);
3754 3756 return (NFS4ERR_GRACE);
3755 3757 }
3756 3758 if (id->bits.type == OPENID) {
3757 3759 /* Seqid in the future? - that's bad */
3758 3760 if (sp->rs_stateid.bits.chgseq <
3759 3761 id->bits.chgseq) {
3760 3762 rfs4_state_rele_nounlock(sp);
3761 3763 return (NFS4ERR_BAD_STATEID);
3762 3764 }
3763 3765 /* Seqid in the past - that's old */
3764 3766 if (sp->rs_stateid.bits.chgseq >
3765 3767 id->bits.chgseq) {
3766 3768 rfs4_state_rele_nounlock(sp);
3767 3769 return (NFS4ERR_OLD_STATEID);
3768 3770 }
3769 3771 }
3770 3772 /* Ensure specified filehandle matches */
3771 3773 if (sp->rs_finfo->rf_vp != vp) {
3772 3774 rfs4_state_rele_nounlock(sp);
3773 3775 return (NFS4ERR_BAD_STATEID);
3774 3776 }
3775 3777
3776 3778 if (sp->rs_owner->ro_need_confirm) {
3777 3779 rfs4_state_rele_nounlock(sp);
3778 3780 return (NFS4ERR_BAD_STATEID);
3779 3781 }
3780 3782
3781 3783 if (sp->rs_closed == TRUE) {
3782 3784 rfs4_state_rele_nounlock(sp);
3783 3785 return (NFS4ERR_OLD_STATEID);
3784 3786 }
3785 3787
3786 3788 if (do_access)
3787 3789 stat = rfs4_state_has_access(sp, mode, vp);
3788 3790 else
3789 3791 stat = NFS4_OK;
3790 3792
3791 3793 /*
3792 3794 * Return whether this state has write
3793 3795 * delegation if desired
3794 3796 */
3795 3797 if (deleg && (sp->rs_finfo->rf_dinfo.rd_dtype ==
3796 3798 OPEN_DELEGATE_WRITE))
3797 3799 *deleg = TRUE;
3798 3800
3799 3801 /*
3800 3802 * We got a valid stateid, so we update the
3801 3803 * lease on the client. Ideally we would like
3802 3804 * to do this after the calling op succeeds,
3803 3805 * but for now this will be good
3804 3806 * enough. Callers of this routine are
3805 3807 * currently insulated from the state stuff.
3806 3808 */
3807 3809 rfs4_update_lease(sp->rs_owner->ro_client);
3808 3810
3809 3811 /*
3810 3812 * If a delegation is present on this file and
3811 3813 * this is a WRITE, then update the lastwrite
3812 3814 * time to indicate that activity is present.
3813 3815 */
3814 3816 if (sp->rs_finfo->rf_dinfo.rd_dtype ==
3815 3817 OPEN_DELEGATE_WRITE &&
3816 3818 mode == FWRITE) {
3817 3819 sp->rs_finfo->rf_dinfo.rd_time_lastwrite =
3818 3820 gethrestime_sec();
3819 3821 }
3820 3822
3821 3823 rfs4_state_rele_nounlock(sp);
3822 3824
3823 3825 return (stat);
3824 3826 }
3825 3827
3826 3828 if (dsp != NULL) {
3827 3829 /* Is associated server instance in its grace period? */
3828 3830 if (rfs4_clnt_in_grace(dsp->rds_client)) {
3829 3831 rfs4_deleg_state_rele(dsp);
3830 3832 return (NFS4ERR_GRACE);
3831 3833 }
3832 3834 if (dsp->rds_delegid.bits.chgseq != id->bits.chgseq) {
3833 3835 rfs4_deleg_state_rele(dsp);
3834 3836 return (NFS4ERR_BAD_STATEID);
3835 3837 }
3836 3838
3837 3839 /* Ensure specified filehandle matches */
3838 3840 if (dsp->rds_finfo->rf_vp != vp) {
3839 3841 rfs4_deleg_state_rele(dsp);
3840 3842 return (NFS4ERR_BAD_STATEID);
3841 3843 }
3842 3844 /*
3843 3845 * Return whether this state has write
3844 3846 * delegation if desired
3845 3847 */
3846 3848 if (deleg && (dsp->rds_finfo->rf_dinfo.rd_dtype ==
3847 3849 OPEN_DELEGATE_WRITE))
3848 3850 *deleg = TRUE;
3849 3851
3850 3852 rfs4_update_lease(dsp->rds_client);
3851 3853
3852 3854 /*
3853 3855 * If a delegation is present on this file and
3854 3856 * this is a WRITE, then update the lastwrite
3855 3857 * time to indicate that activity is present.
3856 3858 */
3857 3859 if (dsp->rds_finfo->rf_dinfo.rd_dtype ==
3858 3860 OPEN_DELEGATE_WRITE && mode == FWRITE) {
3859 3861 dsp->rds_finfo->rf_dinfo.rd_time_lastwrite =
3860 3862 gethrestime_sec();
3861 3863 }
3862 3864
3863 3865 /*
3864 3866 * XXX - what happens if this is a WRITE and the
3865 3867 * delegation type of for READ.
3866 3868 */
3867 3869 rfs4_deleg_state_rele(dsp);
3868 3870
3869 3871 return (stat);
3870 3872 }
3871 3873 /*
3872 3874 * If we got this far, something bad happened
3873 3875 */
3874 3876 return (NFS4ERR_BAD_STATEID);
3875 3877 }
3876 3878 }
3877 3879
3878 3880
3879 3881 /*
3880 3882 * This is a special function in that for the file struct provided the
3881 3883 * server wants to remove/close all current state associated with the
3882 3884 * file. The prime use of this would be with OP_REMOVE to force the
3883 3885 * release of state and particularly of file locks.
3884 3886 *
3885 3887 * There is an assumption that there is no delegations outstanding on
3886 3888 * this file at this point. The caller should have waited for those
3887 3889 * to be returned or revoked.
3888 3890 */
3889 3891 void
3890 3892 rfs4_close_all_state(rfs4_file_t *fp)
3891 3893 {
3892 3894 rfs4_state_t *sp;
3893 3895
3894 3896 rfs4_dbe_lock(fp->rf_dbe);
3895 3897
3896 3898 #ifdef DEBUG
3897 3899 /* only applies when server is handing out delegations */
3898 3900 if (nfs4_get_deleg_policy() != SRV_NEVER_DELEGATE)
3899 3901 ASSERT(fp->rf_dinfo.rd_hold_grant > 0);
3900 3902 #endif
3901 3903
3902 3904 /* No delegations for this file */
3903 3905 ASSERT(list_is_empty(&fp->rf_delegstatelist));
3904 3906
3905 3907 /* Make sure that it can not be found */
3906 3908 rfs4_dbe_invalidate(fp->rf_dbe);
3907 3909
3908 3910 if (fp->rf_vp == NULL) {
3909 3911 rfs4_dbe_unlock(fp->rf_dbe);
3910 3912 return;
3911 3913 }
3912 3914 rfs4_dbe_unlock(fp->rf_dbe);
3913 3915
3914 3916 /*
3915 3917 * Hold as writer to prevent other server threads from
3916 3918 * processing requests related to the file while all state is
3917 3919 * being removed.
3918 3920 */
3919 3921 rw_enter(&fp->rf_file_rwlock, RW_WRITER);
3920 3922
3921 3923 /* Remove ALL state from the file */
3922 3924 while (sp = rfs4_findstate_by_file(fp)) {
3923 3925 rfs4_state_close(sp, FALSE, FALSE, CRED());
3924 3926 rfs4_state_rele_nounlock(sp);
3925 3927 }
3926 3928
3927 3929 /*
3928 3930 * This is only safe since there are no further references to
3929 3931 * the file.
3930 3932 */
3931 3933 rfs4_dbe_lock(fp->rf_dbe);
3932 3934 if (fp->rf_vp) {
3933 3935 vnode_t *vp = fp->rf_vp;
3934 3936
3935 3937 mutex_enter(&vp->v_vsd_lock);
3936 3938 (void) vsd_set(vp, nfs4_srv_vkey, NULL);
3937 3939 mutex_exit(&vp->v_vsd_lock);
3938 3940 VN_RELE(vp);
3939 3941 fp->rf_vp = NULL;
3940 3942 }
3941 3943 rfs4_dbe_unlock(fp->rf_dbe);
3942 3944
3943 3945 /* Finally let other references to proceed */
3944 3946 rw_exit(&fp->rf_file_rwlock);
3945 3947 }
3946 3948
3947 3949 /*
3948 3950 * This function is used as a target for the rfs4_dbe_walk() call
3949 3951 * below. The purpose of this function is to see if the
3950 3952 * lockowner_state refers to a file that resides within the exportinfo
3951 3953 * export. If so, then remove the lock_owner state (file locks and
3952 3954 * share "locks") for this object since the intent is the server is
3953 3955 * unexporting the specified directory. Be sure to invalidate the
3954 3956 * object after the state has been released
3955 3957 */
3956 3958 static void
3957 3959 rfs4_lo_state_walk_callout(rfs4_entry_t u_entry, void *e)
3958 3960 {
3959 3961 rfs4_lo_state_t *lsp = (rfs4_lo_state_t *)u_entry;
3960 3962 struct exportinfo *exi = (struct exportinfo *)e;
3961 3963 nfs_fh4_fmt_t fhfmt4, *exi_fhp, *finfo_fhp;
3962 3964 fhandle_t *efhp;
3963 3965
3964 3966 efhp = (fhandle_t *)&exi->exi_fh;
3965 3967 exi_fhp = (nfs_fh4_fmt_t *)&fhfmt4;
3966 3968
3967 3969 FH_TO_FMT4(efhp, exi_fhp);
3968 3970
3969 3971 finfo_fhp = (nfs_fh4_fmt_t *)lsp->rls_state->rs_finfo->
3970 3972 rf_filehandle.nfs_fh4_val;
3971 3973
3972 3974 if (EQFSID(&finfo_fhp->fh4_fsid, &exi_fhp->fh4_fsid) &&
3973 3975 bcmp(&finfo_fhp->fh4_xdata, &exi_fhp->fh4_xdata,
3974 3976 exi_fhp->fh4_xlen) == 0) {
3975 3977 rfs4_state_close(lsp->rls_state, FALSE, FALSE, CRED());
3976 3978 rfs4_dbe_invalidate(lsp->rls_dbe);
3977 3979 rfs4_dbe_invalidate(lsp->rls_state->rs_dbe);
3978 3980 }
3979 3981 }
3980 3982
3981 3983 /*
3982 3984 * This function is used as a target for the rfs4_dbe_walk() call
3983 3985 * below. The purpose of this function is to see if the state refers
3984 3986 * to a file that resides within the exportinfo export. If so, then
3985 3987 * remove the open state for this object since the intent is the
3986 3988 * server is unexporting the specified directory. The main result for
3987 3989 * this type of entry is to invalidate it such it will not be found in
3988 3990 * the future.
3989 3991 */
3990 3992 static void
3991 3993 rfs4_state_walk_callout(rfs4_entry_t u_entry, void *e)
3992 3994 {
3993 3995 rfs4_state_t *sp = (rfs4_state_t *)u_entry;
3994 3996 struct exportinfo *exi = (struct exportinfo *)e;
3995 3997 nfs_fh4_fmt_t fhfmt4, *exi_fhp, *finfo_fhp;
3996 3998 fhandle_t *efhp;
3997 3999
3998 4000 efhp = (fhandle_t *)&exi->exi_fh;
3999 4001 exi_fhp = (nfs_fh4_fmt_t *)&fhfmt4;
4000 4002
4001 4003 FH_TO_FMT4(efhp, exi_fhp);
4002 4004
4003 4005 finfo_fhp =
4004 4006 (nfs_fh4_fmt_t *)sp->rs_finfo->rf_filehandle.nfs_fh4_val;
4005 4007
4006 4008 if (EQFSID(&finfo_fhp->fh4_fsid, &exi_fhp->fh4_fsid) &&
4007 4009 bcmp(&finfo_fhp->fh4_xdata, &exi_fhp->fh4_xdata,
4008 4010 exi_fhp->fh4_xlen) == 0) {
4009 4011 rfs4_state_close(sp, TRUE, FALSE, CRED());
4010 4012 rfs4_dbe_invalidate(sp->rs_dbe);
4011 4013 }
4012 4014 }
4013 4015
4014 4016 /*
4015 4017 * This function is used as a target for the rfs4_dbe_walk() call
4016 4018 * below. The purpose of this function is to see if the state refers
4017 4019 * to a file that resides within the exportinfo export. If so, then
4018 4020 * remove the deleg state for this object since the intent is the
4019 4021 * server is unexporting the specified directory. The main result for
4020 4022 * this type of entry is to invalidate it such it will not be found in
4021 4023 * the future.
4022 4024 */
4023 4025 static void
4024 4026 rfs4_deleg_state_walk_callout(rfs4_entry_t u_entry, void *e)
4025 4027 {
4026 4028 rfs4_deleg_state_t *dsp = (rfs4_deleg_state_t *)u_entry;
4027 4029 struct exportinfo *exi = (struct exportinfo *)e;
4028 4030 nfs_fh4_fmt_t fhfmt4, *exi_fhp, *finfo_fhp;
4029 4031 fhandle_t *efhp;
4030 4032
4031 4033 efhp = (fhandle_t *)&exi->exi_fh;
4032 4034 exi_fhp = (nfs_fh4_fmt_t *)&fhfmt4;
4033 4035
4034 4036 FH_TO_FMT4(efhp, exi_fhp);
4035 4037
4036 4038 finfo_fhp =
4037 4039 (nfs_fh4_fmt_t *)dsp->rds_finfo->rf_filehandle.nfs_fh4_val;
4038 4040
4039 4041 if (EQFSID(&finfo_fhp->fh4_fsid, &exi_fhp->fh4_fsid) &&
4040 4042 bcmp(&finfo_fhp->fh4_xdata, &exi_fhp->fh4_xdata,
4041 4043 exi_fhp->fh4_xlen) == 0) {
4042 4044 rfs4_dbe_invalidate(dsp->rds_dbe);
4043 4045 }
4044 4046 }
4045 4047
4046 4048 /*
4047 4049 * This function is used as a target for the rfs4_dbe_walk() call
4048 4050 * below. The purpose of this function is to see if the state refers
4049 4051 * to a file that resides within the exportinfo export. If so, then
4050 4052 * release vnode hold for this object since the intent is the server
4051 4053 * is unexporting the specified directory. Invalidation will prevent
4052 4054 * this struct from being found in the future.
4053 4055 */
4054 4056 static void
4055 4057 rfs4_file_walk_callout(rfs4_entry_t u_entry, void *e)
4056 4058 {
4057 4059 rfs4_file_t *fp = (rfs4_file_t *)u_entry;
4058 4060 struct exportinfo *exi = (struct exportinfo *)e;
4059 4061 nfs_fh4_fmt_t fhfmt4, *exi_fhp, *finfo_fhp;
4060 4062 fhandle_t *efhp;
4061 4063
4062 4064 efhp = (fhandle_t *)&exi->exi_fh;
4063 4065 exi_fhp = (nfs_fh4_fmt_t *)&fhfmt4;
4064 4066
4065 4067 FH_TO_FMT4(efhp, exi_fhp);
4066 4068
4067 4069 finfo_fhp = (nfs_fh4_fmt_t *)fp->rf_filehandle.nfs_fh4_val;
4068 4070
4069 4071 if (EQFSID(&finfo_fhp->fh4_fsid, &exi_fhp->fh4_fsid) &&
4070 4072 bcmp(&finfo_fhp->fh4_xdata, &exi_fhp->fh4_xdata,
4071 4073 exi_fhp->fh4_xlen) == 0) {
4072 4074 if (fp->rf_vp) {
4073 4075 vnode_t *vp = fp->rf_vp;
4074 4076
4075 4077 /*
4076 4078 * don't leak monitors and remove the reference
4077 4079 * put on the vnode when the delegation was granted.
4078 4080 */
4079 4081 if (fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_READ) {
4080 4082 (void) fem_uninstall(vp, deleg_rdops,
4081 4083 (void *)fp);
4082 4084 vn_open_downgrade(vp, FREAD);
4083 4085 } else if (fp->rf_dinfo.rd_dtype ==
4084 4086 OPEN_DELEGATE_WRITE) {
4085 4087 (void) fem_uninstall(vp, deleg_wrops,
4086 4088 (void *)fp);
4087 4089 vn_open_downgrade(vp, FREAD|FWRITE);
4088 4090 }
4089 4091 mutex_enter(&vp->v_vsd_lock);
4090 4092 (void) vsd_set(vp, nfs4_srv_vkey, NULL);
4091 4093 mutex_exit(&vp->v_vsd_lock);
4092 4094 VN_RELE(vp);
4093 4095 fp->rf_vp = NULL;
4094 4096 }
4095 4097 rfs4_dbe_invalidate(fp->rf_dbe);
4096 4098 }
4097 4099 }
4098 4100
4099 4101 /*
4100 4102 * Given a directory that is being unexported, cleanup/release all
4101 4103 * state in the server that refers to objects residing underneath this
4102 4104 * particular export. The ordering of the release is important.
4103 4105 * Lock_owner, then state and then file.
4104 4106 */
4105 4107 void
4106 4108 rfs4_clean_state_exi(struct exportinfo *exi)
4107 4109 {
4108 4110 nfs4_srv_t *nsrv4;
4109 4111
4110 4112 /* curzone mightn't be exi_zone, so use exi_zone instead. */
4111 4113 ASSERT(exi->exi_zone == curzone || curzone == global_zone);
4112 4114 nsrv4 = zone_getspecific(rfs4_zone_key, exi->exi_zone);
4113 4115 if (nsrv4 == NULL) /* NOTE: NFSv4 cleanup MAY have already happened. */
4114 4116 return;
4115 4117 mutex_enter(&nsrv4->state_lock);
4116 4118
4117 4119 if (nsrv4->nfs4_server_state == NULL) {
4118 4120 mutex_exit(&nsrv4->state_lock);
4119 4121 return;
4120 4122 }
4121 4123
4122 4124 /* CSTYLED */
4123 4125 rfs4_dbe_walk(nsrv4->rfs4_lo_state_tab, rfs4_lo_state_walk_callout, exi);
4124 4126 rfs4_dbe_walk(nsrv4->rfs4_state_tab, rfs4_state_walk_callout, exi);
4125 4127 /* CSTYLED */
4126 4128 rfs4_dbe_walk(nsrv4->rfs4_deleg_state_tab, rfs4_deleg_state_walk_callout, exi);
4127 4129 rfs4_dbe_walk(nsrv4->rfs4_file_tab, rfs4_file_walk_callout, exi);
4128 4130
4129 4131 mutex_exit(&nsrv4->state_lock);
4130 4132 }
|
↓ open down ↓ |
2595 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX