Print this page
Fix NFS design problems re. multiple zone keys
Make NFS server zone-specific data all have the same lifetime
Fix rfs4_clean_state_exi
Fix exi_cache_reclaim
Fix mistakes in zone keys work
More fixes re. exi_zoneid and exi_tree
(danmcd -> Keep some ASSERT()s around for readability.)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/nfs/nfs4_dispatch.c
+++ new/usr/src/uts/common/fs/nfs/nfs4_dispatch.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright 2018 Nexenta Systems, Inc.
29 29 */
30 30
31 31 #include <sys/systm.h>
32 32 #include <sys/sdt.h>
33 33 #include <rpc/types.h>
34 34 #include <rpc/auth.h>
35 35 #include <rpc/auth_unix.h>
36 36 #include <rpc/auth_des.h>
37 37 #include <rpc/svc.h>
38 38 #include <rpc/xdr.h>
39 39 #include <nfs/nfs4.h>
40 40 #include <nfs/nfs_dispatch.h>
41 41 #include <nfs/nfs4_drc.h>
42 42
43 43 #define NFS4_MAX_MINOR_VERSION 0
44 44
45 45 /*
46 46 * The default size of the duplicate request cache
47 47 */
|
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
48 48 uint32_t nfs4_drc_max = 8 * 1024;
49 49
50 50 /*
51 51 * The number of buckets we'd like to hash the
52 52 * replies into.. do not change this on the fly.
53 53 */
54 54 uint32_t nfs4_drc_hash = 541;
55 55
56 56 static void rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp);
57 57
58 -extern zone_key_t rfs4_zone_key;
59 -
60 58 /*
61 59 * Initialize a duplicate request cache.
62 60 */
63 61 rfs4_drc_t *
64 62 rfs4_init_drc(uint32_t drc_size, uint32_t drc_hash_size)
65 63 {
66 64 rfs4_drc_t *drc;
67 65 uint32_t bki;
68 66
69 67 ASSERT(drc_size);
70 68 ASSERT(drc_hash_size);
71 69
72 70 drc = kmem_alloc(sizeof (rfs4_drc_t), KM_SLEEP);
73 71
74 72 drc->max_size = drc_size;
75 73 drc->in_use = 0;
76 74
77 75 mutex_init(&drc->lock, NULL, MUTEX_DEFAULT, NULL);
78 76
79 77 drc->dr_hash = drc_hash_size;
80 78
81 79 drc->dr_buckets = kmem_alloc(sizeof (list_t)*drc_hash_size, KM_SLEEP);
82 80
83 81 for (bki = 0; bki < drc_hash_size; bki++) {
84 82 list_create(&drc->dr_buckets[bki], sizeof (rfs4_dupreq_t),
85 83 offsetof(rfs4_dupreq_t, dr_bkt_next));
86 84 }
87 85
88 86 list_create(&(drc->dr_cache), sizeof (rfs4_dupreq_t),
89 87 offsetof(rfs4_dupreq_t, dr_next));
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
90 88
91 89 return (drc);
92 90 }
93 91
94 92 /*
95 93 * Destroy a duplicate request cache.
96 94 */
97 95 void
98 96 rfs4_fini_drc(void)
99 97 {
100 - nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
98 + nfs4_srv_t *nsrv4 = nfs4_get_srv();
101 99 rfs4_drc_t *drc = nsrv4->nfs4_drc;
102 100 rfs4_dupreq_t *drp, *drp_next;
103 101
104 102 /* iterate over the dr_cache and free the enties */
105 103 for (drp = list_head(&(drc->dr_cache)); drp != NULL; drp = drp_next) {
106 104
107 105 if (drp->dr_state == NFS4_DUP_REPLAY)
108 106 rfs4_compound_free(&(drp->dr_res));
109 107
110 108 if (drp->dr_addr.buf != NULL)
111 109 kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
112 110
113 111 drp_next = list_next(&(drc->dr_cache), drp);
114 112
115 113 kmem_free(drp, sizeof (rfs4_dupreq_t));
116 114 }
117 115
118 116 mutex_destroy(&drc->lock);
119 117 kmem_free(drc->dr_buckets,
120 118 sizeof (list_t)*drc->dr_hash);
121 119 kmem_free(drc, sizeof (rfs4_drc_t));
122 120 }
123 121
124 122 /*
125 123 * rfs4_dr_chstate:
126 124 *
127 125 * Change the state of a rfs4_dupreq. If it's not in transition
128 126 * to the FREE state, return. If we are moving to the FREE state
129 127 * then we need to clean up the compound results and move the entry
130 128 * to the end of the list.
131 129 */
132 130 void
133 131 rfs4_dr_chstate(rfs4_dupreq_t *drp, int new_state)
134 132 {
135 133 rfs4_drc_t *drc;
136 134
137 135 ASSERT(drp);
138 136 ASSERT(drp->drc);
139 137 ASSERT(drp->dr_bkt);
140 138 ASSERT(MUTEX_HELD(&drp->drc->lock));
141 139
142 140 drp->dr_state = new_state;
143 141
144 142 if (new_state != NFS4_DUP_FREE)
145 143 return;
146 144
147 145 drc = drp->drc;
148 146
149 147 /*
150 148 * Remove entry from the bucket and
151 149 * dr_cache list, free compound results.
152 150 */
153 151 list_remove(drp->dr_bkt, drp);
154 152 list_remove(&(drc->dr_cache), drp);
155 153 rfs4_compound_free(&(drp->dr_res));
156 154 }
157 155
158 156 /*
159 157 * rfs4_alloc_dr:
160 158 *
161 159 * Malloc a new one if we have not reached our maximum cache
162 160 * limit, otherwise pick an entry off the tail -- Use if it
163 161 * is marked as NFS4_DUP_FREE, or is an entry in the
164 162 * NFS4_DUP_REPLAY state.
165 163 */
166 164 rfs4_dupreq_t *
167 165 rfs4_alloc_dr(rfs4_drc_t *drc)
168 166 {
169 167 rfs4_dupreq_t *drp_tail, *drp = NULL;
170 168
171 169 ASSERT(drc);
172 170 ASSERT(MUTEX_HELD(&drc->lock));
173 171
174 172 /*
175 173 * Have we hit the cache limit yet ?
176 174 */
177 175 if (drc->in_use < drc->max_size) {
178 176 /*
179 177 * nope, so let's malloc a new one
180 178 */
181 179 drp = kmem_zalloc(sizeof (rfs4_dupreq_t), KM_SLEEP);
182 180 drp->drc = drc;
183 181 drc->in_use++;
184 182 DTRACE_PROBE1(nfss__i__drc_new, rfs4_dupreq_t *, drp);
185 183 return (drp);
186 184 }
187 185
188 186 /*
189 187 * Cache is all allocated now traverse the list
190 188 * backwards to find one we can reuse.
191 189 */
192 190 for (drp_tail = list_tail(&drc->dr_cache); drp_tail != NULL;
193 191 drp_tail = list_prev(&drc->dr_cache, drp_tail)) {
194 192
195 193 switch (drp_tail->dr_state) {
196 194
197 195 case NFS4_DUP_FREE:
198 196 list_remove(&(drc->dr_cache), drp_tail);
199 197 DTRACE_PROBE1(nfss__i__drc_freeclaim,
200 198 rfs4_dupreq_t *, drp_tail);
201 199 return (drp_tail);
202 200 /* NOTREACHED */
203 201
204 202 case NFS4_DUP_REPLAY:
205 203 /* grab it. */
206 204 rfs4_dr_chstate(drp_tail, NFS4_DUP_FREE);
207 205 DTRACE_PROBE1(nfss__i__drc_replayclaim,
208 206 rfs4_dupreq_t *, drp_tail);
209 207 return (drp_tail);
210 208 /* NOTREACHED */
211 209 }
212 210 }
213 211 DTRACE_PROBE1(nfss__i__drc_full, rfs4_drc_t *, drc);
214 212 return (NULL);
215 213 }
216 214
217 215 /*
218 216 * rfs4_find_dr:
219 217 *
220 218 * Search for an entry in the duplicate request cache by
221 219 * calculating the hash index based on the XID, and examining
222 220 * the entries in the hash bucket. If we find a match, return.
223 221 * Once we have searched the bucket we call rfs4_alloc_dr() to
224 222 * allocate a new entry, or reuse one that is available.
225 223 */
226 224 int
227 225 rfs4_find_dr(struct svc_req *req, rfs4_drc_t *drc, rfs4_dupreq_t **dup)
228 226 {
229 227
230 228 uint32_t the_xid;
231 229 list_t *dr_bkt;
232 230 rfs4_dupreq_t *drp;
233 231 int bktdex;
234 232
235 233 /*
236 234 * Get the XID, calculate the bucket and search to
237 235 * see if we need to replay from the cache.
238 236 */
239 237 the_xid = req->rq_xprt->xp_xid;
240 238 bktdex = the_xid % drc->dr_hash;
241 239
242 240 dr_bkt = (list_t *)
243 241 &(drc->dr_buckets[(the_xid % drc->dr_hash)]);
244 242
245 243 DTRACE_PROBE3(nfss__i__drc_bktdex,
246 244 int, bktdex,
247 245 uint32_t, the_xid,
248 246 list_t *, dr_bkt);
249 247
250 248 *dup = NULL;
251 249
252 250 mutex_enter(&drc->lock);
253 251 /*
254 252 * Search the bucket for a matching xid and address.
255 253 */
256 254 for (drp = list_head(dr_bkt); drp != NULL;
257 255 drp = list_next(dr_bkt, drp)) {
258 256
259 257 if (drp->dr_xid == the_xid &&
260 258 drp->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
261 259 bcmp((caddr_t)drp->dr_addr.buf,
262 260 (caddr_t)req->rq_xprt->xp_rtaddr.buf,
263 261 drp->dr_addr.len) == 0) {
264 262
265 263 /*
266 264 * Found a match so REPLAY the Reply
267 265 */
268 266 if (drp->dr_state == NFS4_DUP_REPLAY) {
269 267 rfs4_dr_chstate(drp, NFS4_DUP_INUSE);
270 268 mutex_exit(&drc->lock);
271 269 *dup = drp;
272 270 DTRACE_PROBE1(nfss__i__drc_replay,
273 271 rfs4_dupreq_t *, drp);
274 272 return (NFS4_DUP_REPLAY);
275 273 }
276 274
277 275 /*
278 276 * This entry must be in transition, so return
279 277 * the 'pending' status.
280 278 */
281 279 mutex_exit(&drc->lock);
282 280 return (NFS4_DUP_PENDING);
283 281 }
284 282 }
285 283
286 284 drp = rfs4_alloc_dr(drc);
287 285 mutex_exit(&drc->lock);
288 286
289 287 /*
290 288 * The DRC is full and all entries are in use. Upper function
291 289 * should error out this request and force the client to
292 290 * retransmit -- effectively this is a resource issue. NFSD
293 291 * threads tied up with native File System, or the cache size
294 292 * is too small for the server load.
295 293 */
296 294 if (drp == NULL)
297 295 return (NFS4_DUP_ERROR);
298 296
299 297 /*
300 298 * Init the state to NEW.
301 299 */
302 300 drp->dr_state = NFS4_DUP_NEW;
303 301
304 302 /*
305 303 * If needed, resize the address buffer
306 304 */
307 305 if (drp->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) {
308 306 if (drp->dr_addr.buf != NULL)
309 307 kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
310 308 drp->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
311 309 drp->dr_addr.buf = kmem_alloc(drp->dr_addr.maxlen, KM_NOSLEEP);
312 310 if (drp->dr_addr.buf == NULL) {
313 311 /*
314 312 * If the malloc fails, mark the entry
315 313 * as free and put on the tail.
316 314 */
317 315 drp->dr_addr.maxlen = 0;
318 316 drp->dr_state = NFS4_DUP_FREE;
319 317 mutex_enter(&drc->lock);
320 318 list_insert_tail(&(drc->dr_cache), drp);
321 319 mutex_exit(&drc->lock);
322 320 return (NFS4_DUP_ERROR);
323 321 }
324 322 }
325 323
326 324
327 325 /*
328 326 * Copy the address.
329 327 */
330 328 drp->dr_addr.len = req->rq_xprt->xp_rtaddr.len;
331 329
332 330 bcopy((caddr_t)req->rq_xprt->xp_rtaddr.buf,
333 331 (caddr_t)drp->dr_addr.buf,
334 332 drp->dr_addr.len);
335 333
336 334 drp->dr_xid = the_xid;
337 335 drp->dr_bkt = dr_bkt;
338 336
339 337 /*
340 338 * Insert at the head of the bucket and
341 339 * the drc lists..
342 340 */
343 341 mutex_enter(&drc->lock);
344 342 list_insert_head(&drc->dr_cache, drp);
345 343 list_insert_head(dr_bkt, drp);
346 344 mutex_exit(&drc->lock);
347 345
348 346 *dup = drp;
349 347
350 348 return (NFS4_DUP_NEW);
351 349 }
352 350
353 351 /*
354 352 *
355 353 * This function handles the duplicate request cache,
356 354 * NULL_PROC and COMPOUND procedure calls for NFSv4;
357 355 *
358 356 * Passed into this function are:-
359 357 *
360 358 * disp A pointer to our dispatch table entry
361 359 * req The request to process
362 360 * xprt The server transport handle
363 361 * ap A pointer to the arguments
364 362 *
365 363 *
366 364 * When appropriate this function is responsible for inserting
367 365 * the reply into the duplicate cache or replaying an existing
368 366 * cached reply.
369 367 *
370 368 * dr_stat reflects the state of the duplicate request that
371 369 * has been inserted into or retrieved from the cache
372 370 *
373 371 * drp is the duplicate request entry
374 372 *
375 373 */
376 374 int
377 375 rfs4_dispatch(struct rpcdisp *disp, struct svc_req *req,
378 376 SVCXPRT *xprt, char *ap)
379 377 {
|
↓ open down ↓ |
269 lines elided |
↑ open up ↑ |
380 378
381 379 COMPOUND4res res_buf;
382 380 COMPOUND4res *rbp;
383 381 COMPOUND4args *cap;
384 382 cred_t *cr = NULL;
385 383 int error = 0;
386 384 int dis_flags = 0;
387 385 int dr_stat = NFS4_NOT_DUP;
388 386 rfs4_dupreq_t *drp = NULL;
389 387 int rv;
390 - nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
388 + nfs4_srv_t *nsrv4 = nfs4_get_srv();
391 389 rfs4_drc_t *nfs4_drc = nsrv4->nfs4_drc;
392 390
393 391 ASSERT(disp);
394 392
395 393 /*
396 394 * Short circuit the RPC_NULL proc.
397 395 */
398 396 if (disp->dis_proc == rpc_null) {
399 397 DTRACE_NFSV4_1(null__start, struct svc_req *, req);
400 398 if (!svc_sendreply(xprt, xdr_void, NULL)) {
401 399 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
402 400 svcerr_systemerr(xprt);
403 401 return (1);
404 402 }
405 403 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
406 404 return (0);
407 405 }
408 406
409 407 /* Only NFSv4 Compounds from this point onward */
410 408
411 409 rbp = &res_buf;
412 410 cap = (COMPOUND4args *)ap;
413 411
414 412 /*
415 413 * Figure out the disposition of the whole COMPOUND
416 414 * and record it's IDEMPOTENTCY.
417 415 */
418 416 rfs4_compound_flagproc(cap, &dis_flags);
419 417
420 418 /*
421 419 * If NON-IDEMPOTENT then we need to figure out if this
422 420 * request can be replied from the duplicate cache.
423 421 *
424 422 * If this is a new request then we need to insert the
425 423 * reply into the duplicate cache.
426 424 */
427 425 if (!(dis_flags & RPC_IDEMPOTENT)) {
428 426 /* look for a replay from the cache or allocate */
429 427 dr_stat = rfs4_find_dr(req, nfs4_drc, &drp);
430 428
431 429 switch (dr_stat) {
432 430
433 431 case NFS4_DUP_ERROR:
434 432 rfs4_resource_err(req, cap);
435 433 return (1);
436 434 /* NOTREACHED */
437 435
438 436 case NFS4_DUP_PENDING:
439 437 /*
440 438 * reply has previously been inserted into the
441 439 * duplicate cache, however the reply has
442 440 * not yet been sent via svc_sendreply()
443 441 */
444 442 return (1);
445 443 /* NOTREACHED */
446 444
447 445 case NFS4_DUP_NEW:
448 446 curthread->t_flag |= T_DONTPEND;
449 447 /* NON-IDEMPOTENT proc call */
450 448 rfs4_compound(cap, rbp, NULL, req, cr, &rv);
451 449 curthread->t_flag &= ~T_DONTPEND;
452 450
453 451 if (rv) /* short ckt sendreply on error */
454 452 return (rv);
455 453
456 454 /*
457 455 * dr_res must be initialized before calling
458 456 * rfs4_dr_chstate (it frees the reply).
459 457 */
460 458 drp->dr_res = res_buf;
461 459 if (curthread->t_flag & T_WOULDBLOCK) {
462 460 curthread->t_flag &= ~T_WOULDBLOCK;
463 461 /*
464 462 * mark this entry as FREE and plop
465 463 * on the end of the cache list
466 464 */
467 465 mutex_enter(&drp->drc->lock);
468 466 rfs4_dr_chstate(drp, NFS4_DUP_FREE);
469 467 list_insert_tail(&(drp->drc->dr_cache), drp);
470 468 mutex_exit(&drp->drc->lock);
471 469 return (1);
472 470 }
473 471 break;
474 472
475 473 case NFS4_DUP_REPLAY:
476 474 /* replay from the cache */
477 475 rbp = &(drp->dr_res);
478 476 break;
479 477 }
480 478 } else {
481 479 curthread->t_flag |= T_DONTPEND;
482 480 /* IDEMPOTENT proc call */
483 481 rfs4_compound(cap, rbp, NULL, req, cr, &rv);
484 482 curthread->t_flag &= ~T_DONTPEND;
485 483
486 484 if (rv) /* short ckt sendreply on error */
487 485 return (rv);
488 486
489 487 if (curthread->t_flag & T_WOULDBLOCK) {
490 488 curthread->t_flag &= ~T_WOULDBLOCK;
491 489 return (1);
492 490 }
493 491 }
494 492
495 493 /*
496 494 * Send out the replayed reply or the 'real' one.
497 495 */
498 496 if (!svc_sendreply(xprt, xdr_COMPOUND4res_srv, (char *)rbp)) {
499 497 DTRACE_PROBE2(nfss__e__dispatch_sendfail,
500 498 struct svc_req *, xprt,
501 499 char *, rbp);
502 500 svcerr_systemerr(xprt);
503 501 error++;
504 502 }
505 503
506 504 /*
507 505 * If this reply was just inserted into the duplicate cache
508 506 * or it was replayed from the dup cache; (re)mark it as
509 507 * available for replay
510 508 *
511 509 * At first glance, this 'if' statement seems a little strange;
512 510 * testing for NFS4_DUP_REPLAY, and then calling...
513 511 *
514 512 * rfs4_dr_chatate(NFS4_DUP_REPLAY)
515 513 *
516 514 * ... but notice that we are checking dr_stat, and not the
517 515 * state of the entry itself, the entry will be NFS4_DUP_INUSE,
518 516 * we do that so that we know not to prematurely reap it whilst
519 517 * we resent it to the client.
520 518 *
521 519 */
522 520 if (dr_stat == NFS4_DUP_NEW || dr_stat == NFS4_DUP_REPLAY) {
523 521 mutex_enter(&drp->drc->lock);
524 522 rfs4_dr_chstate(drp, NFS4_DUP_REPLAY);
525 523 mutex_exit(&drp->drc->lock);
526 524 } else if (dr_stat == NFS4_NOT_DUP) {
527 525 rfs4_compound_free(rbp);
528 526 }
529 527
530 528 return (error);
531 529 }
532 530
533 531 bool_t
534 532 rfs4_minorvers_mismatch(struct svc_req *req, SVCXPRT *xprt, void *args)
535 533 {
536 534 COMPOUND4args *argsp;
537 535 COMPOUND4res res_buf, *resp;
538 536
539 537 if (req->rq_vers != 4)
540 538 return (FALSE);
541 539
542 540 argsp = (COMPOUND4args *)args;
543 541
544 542 if (argsp->minorversion <= NFS4_MAX_MINOR_VERSION)
545 543 return (FALSE);
546 544
547 545 resp = &res_buf;
548 546
549 547 /*
550 548 * Form a reply tag by copying over the reqeuest tag.
551 549 */
552 550 resp->tag.utf8string_val =
553 551 kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
554 552 resp->tag.utf8string_len = argsp->tag.utf8string_len;
555 553 bcopy(argsp->tag.utf8string_val, resp->tag.utf8string_val,
556 554 resp->tag.utf8string_len);
557 555 resp->array_len = 0;
558 556 resp->array = NULL;
559 557 resp->status = NFS4ERR_MINOR_VERS_MISMATCH;
560 558 if (!svc_sendreply(xprt, xdr_COMPOUND4res_srv, (char *)resp)) {
561 559 DTRACE_PROBE2(nfss__e__minorvers_mismatch,
562 560 SVCXPRT *, xprt, char *, resp);
563 561 svcerr_systemerr(xprt);
564 562 }
565 563 rfs4_compound_free(resp);
566 564 return (TRUE);
567 565 }
568 566
569 567 void
570 568 rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp)
571 569 {
572 570 COMPOUND4res res_buf, *rbp;
573 571 nfs_resop4 *resop;
574 572 PUTFH4res *resp;
575 573
576 574 rbp = &res_buf;
577 575
578 576 /*
579 577 * Form a reply tag by copying over the request tag.
580 578 */
581 579 rbp->tag.utf8string_val =
582 580 kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
583 581 rbp->tag.utf8string_len = argsp->tag.utf8string_len;
584 582 bcopy(argsp->tag.utf8string_val, rbp->tag.utf8string_val,
585 583 rbp->tag.utf8string_len);
586 584
587 585 rbp->array_len = 1;
588 586 rbp->array = kmem_zalloc(rbp->array_len * sizeof (nfs_resop4),
589 587 KM_SLEEP);
590 588 resop = &rbp->array[0];
591 589 resop->resop = argsp->array[0].argop; /* copy first op over */
592 590
593 591 /* Any op will do, just need to access status field */
594 592 resp = &resop->nfs_resop4_u.opputfh;
595 593
596 594 /*
597 595 * NFS4ERR_RESOURCE is allowed for all ops, except OP_ILLEGAL.
598 596 * Note that all op numbers in the compound array were already
599 597 * validated by the XDR decoder (xdr_COMPOUND4args_srv()).
600 598 */
601 599 resp->status = (resop->resop == OP_ILLEGAL ?
602 600 NFS4ERR_OP_ILLEGAL : NFS4ERR_RESOURCE);
603 601
604 602 /* compound status is same as first op status */
605 603 rbp->status = resp->status;
606 604
607 605 if (!svc_sendreply(req->rq_xprt, xdr_COMPOUND4res_srv, (char *)rbp)) {
608 606 DTRACE_PROBE2(nfss__rsrc_err__sendfail,
609 607 struct svc_req *, req->rq_xprt, char *, rbp);
610 608 svcerr_systemerr(req->rq_xprt);
611 609 }
612 610
613 611 UTF8STRING_FREE(rbp->tag);
614 612 kmem_free(rbp->array, rbp->array_len * sizeof (nfs_resop4));
615 613 }
|
↓ open down ↓ |
215 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX