38 #include <rpc/xdr.h>
39 #include <nfs/nfs4.h>
40 #include <nfs/nfs_dispatch.h>
41 #include <nfs/nfs4_drc.h>
42
43 #define NFS4_MAX_MINOR_VERSION 0
44
45 /*
46 * The default size of the duplicate request cache
47 */
48 uint32_t nfs4_drc_max = 8 * 1024;
49
50 /*
51 * The number of buckets we'd like to hash the
52 * replies into.. do not change this on the fly.
53 */
54 uint32_t nfs4_drc_hash = 541;
55
56 static void rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp);
57
58 extern zone_key_t rfs4_zone_key;
59
60 /*
61 * Initialize a duplicate request cache.
62 */
63 rfs4_drc_t *
64 rfs4_init_drc(uint32_t drc_size, uint32_t drc_hash_size)
65 {
66 rfs4_drc_t *drc;
67 uint32_t bki;
68
69 ASSERT(drc_size);
70 ASSERT(drc_hash_size);
71
72 drc = kmem_alloc(sizeof (rfs4_drc_t), KM_SLEEP);
73
74 drc->max_size = drc_size;
75 drc->in_use = 0;
76
77 mutex_init(&drc->lock, NULL, MUTEX_DEFAULT, NULL);
78
79 drc->dr_hash = drc_hash_size;
80
81 drc->dr_buckets = kmem_alloc(sizeof (list_t)*drc_hash_size, KM_SLEEP);
82
83 for (bki = 0; bki < drc_hash_size; bki++) {
84 list_create(&drc->dr_buckets[bki], sizeof (rfs4_dupreq_t),
85 offsetof(rfs4_dupreq_t, dr_bkt_next));
86 }
87
88 list_create(&(drc->dr_cache), sizeof (rfs4_dupreq_t),
89 offsetof(rfs4_dupreq_t, dr_next));
90
91 return (drc);
92 }
93
94 /*
95 * Destroy a duplicate request cache.
96 */
97 void
98 rfs4_fini_drc(void)
99 {
100 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
101 rfs4_drc_t *drc = nsrv4->nfs4_drc;
102 rfs4_dupreq_t *drp, *drp_next;
103
104 /* iterate over the dr_cache and free the enties */
105 for (drp = list_head(&(drc->dr_cache)); drp != NULL; drp = drp_next) {
106
107 if (drp->dr_state == NFS4_DUP_REPLAY)
108 rfs4_compound_free(&(drp->dr_res));
109
110 if (drp->dr_addr.buf != NULL)
111 kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
112
113 drp_next = list_next(&(drc->dr_cache), drp);
114
115 kmem_free(drp, sizeof (rfs4_dupreq_t));
116 }
117
118 mutex_destroy(&drc->lock);
119 kmem_free(drc->dr_buckets,
120 sizeof (list_t)*drc->dr_hash);
370 * dr_stat reflects the state of the duplicate request that
371 * has been inserted into or retrieved from the cache
372 *
373 * drp is the duplicate request entry
374 *
375 */
376 int
377 rfs4_dispatch(struct rpcdisp *disp, struct svc_req *req,
378 SVCXPRT *xprt, char *ap)
379 {
380
381 COMPOUND4res res_buf;
382 COMPOUND4res *rbp;
383 COMPOUND4args *cap;
384 cred_t *cr = NULL;
385 int error = 0;
386 int dis_flags = 0;
387 int dr_stat = NFS4_NOT_DUP;
388 rfs4_dupreq_t *drp = NULL;
389 int rv;
390 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
391 rfs4_drc_t *nfs4_drc = nsrv4->nfs4_drc;
392
393 ASSERT(disp);
394
395 /*
396 * Short circuit the RPC_NULL proc.
397 */
398 if (disp->dis_proc == rpc_null) {
399 DTRACE_NFSV4_1(null__start, struct svc_req *, req);
400 if (!svc_sendreply(xprt, xdr_void, NULL)) {
401 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
402 svcerr_systemerr(xprt);
403 return (1);
404 }
405 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
406 return (0);
407 }
408
409 /* Only NFSv4 Compounds from this point onward */
410
|
38 #include <rpc/xdr.h>
39 #include <nfs/nfs4.h>
40 #include <nfs/nfs_dispatch.h>
41 #include <nfs/nfs4_drc.h>
42
43 #define NFS4_MAX_MINOR_VERSION 0
44
45 /*
46 * The default size of the duplicate request cache
47 */
48 uint32_t nfs4_drc_max = 8 * 1024;
49
50 /*
51 * The number of buckets we'd like to hash the
52 * replies into.. do not change this on the fly.
53 */
54 uint32_t nfs4_drc_hash = 541;
55
56 static void rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp);
57
58 /*
59 * Initialize a duplicate request cache.
60 */
61 rfs4_drc_t *
62 rfs4_init_drc(uint32_t drc_size, uint32_t drc_hash_size)
63 {
64 rfs4_drc_t *drc;
65 uint32_t bki;
66
67 ASSERT(drc_size);
68 ASSERT(drc_hash_size);
69
70 drc = kmem_alloc(sizeof (rfs4_drc_t), KM_SLEEP);
71
72 drc->max_size = drc_size;
73 drc->in_use = 0;
74
75 mutex_init(&drc->lock, NULL, MUTEX_DEFAULT, NULL);
76
77 drc->dr_hash = drc_hash_size;
78
79 drc->dr_buckets = kmem_alloc(sizeof (list_t)*drc_hash_size, KM_SLEEP);
80
81 for (bki = 0; bki < drc_hash_size; bki++) {
82 list_create(&drc->dr_buckets[bki], sizeof (rfs4_dupreq_t),
83 offsetof(rfs4_dupreq_t, dr_bkt_next));
84 }
85
86 list_create(&(drc->dr_cache), sizeof (rfs4_dupreq_t),
87 offsetof(rfs4_dupreq_t, dr_next));
88
89 return (drc);
90 }
91
92 /*
93 * Destroy a duplicate request cache.
94 */
95 void
96 rfs4_fini_drc(void)
97 {
98 nfs4_srv_t *nsrv4 = nfs4_get_srv();
99 rfs4_drc_t *drc = nsrv4->nfs4_drc;
100 rfs4_dupreq_t *drp, *drp_next;
101
102 /* iterate over the dr_cache and free the enties */
103 for (drp = list_head(&(drc->dr_cache)); drp != NULL; drp = drp_next) {
104
105 if (drp->dr_state == NFS4_DUP_REPLAY)
106 rfs4_compound_free(&(drp->dr_res));
107
108 if (drp->dr_addr.buf != NULL)
109 kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
110
111 drp_next = list_next(&(drc->dr_cache), drp);
112
113 kmem_free(drp, sizeof (rfs4_dupreq_t));
114 }
115
116 mutex_destroy(&drc->lock);
117 kmem_free(drc->dr_buckets,
118 sizeof (list_t)*drc->dr_hash);
368 * dr_stat reflects the state of the duplicate request that
369 * has been inserted into or retrieved from the cache
370 *
371 * drp is the duplicate request entry
372 *
373 */
374 int
375 rfs4_dispatch(struct rpcdisp *disp, struct svc_req *req,
376 SVCXPRT *xprt, char *ap)
377 {
378
379 COMPOUND4res res_buf;
380 COMPOUND4res *rbp;
381 COMPOUND4args *cap;
382 cred_t *cr = NULL;
383 int error = 0;
384 int dis_flags = 0;
385 int dr_stat = NFS4_NOT_DUP;
386 rfs4_dupreq_t *drp = NULL;
387 int rv;
388 nfs4_srv_t *nsrv4 = nfs4_get_srv();
389 rfs4_drc_t *nfs4_drc = nsrv4->nfs4_drc;
390
391 ASSERT(disp);
392
393 /*
394 * Short circuit the RPC_NULL proc.
395 */
396 if (disp->dis_proc == rpc_null) {
397 DTRACE_NFSV4_1(null__start, struct svc_req *, req);
398 if (!svc_sendreply(xprt, xdr_void, NULL)) {
399 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
400 svcerr_systemerr(xprt);
401 return (1);
402 }
403 DTRACE_NFSV4_1(null__done, struct svc_req *, req);
404 return (0);
405 }
406
407 /* Only NFSv4 Compounds from this point onward */
408
|