Print this page
4463 NLM fails wrongly on clnt_control().
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/klm/nlm_rpc_handle.c
+++ new/usr/src/uts/common/klm/nlm_rpc_handle.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 25 */
26 26
27 27 #include <sys/param.h>
28 28 #include <sys/systm.h>
29 29 #include <sys/socket.h>
30 30 #include <sys/syslog.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/unistd.h>
33 33 #include <sys/queue.h>
34 34 #include <sys/sdt.h>
35 35 #include <netinet/in.h>
36 36
37 37 #include <rpc/rpc.h>
38 38 #include <rpc/xdr.h>
39 39 #include <rpc/pmap_prot.h>
40 40 #include <rpc/pmap_clnt.h>
41 41 #include <rpc/rpcb_prot.h>
42 42
43 43 #include <rpcsvc/nlm_prot.h>
44 44 #include <rpcsvc/sm_inter.h>
45 45
46 46 #include "nlm_impl.h"
47 47
48 48 /*
49 49 * The following errors codes from nlm_null_rpc indicate that the port we have
50 50 * cached for the client's NLM service is stale and that we need to establish
51 51 * a new RPC client.
52 52 */
53 53 #define NLM_STALE_CLNT(_status) \
54 54 ((_status) == RPC_PROGUNAVAIL || \
55 55 (_status) == RPC_PROGVERSMISMATCH || \
56 56 (_status) == RPC_PROCUNAVAIL || \
57 57 (_status) == RPC_CANTCONNECT || \
58 58 (_status) == RPC_XPRTFAILED)
59 59
60 60 static struct kmem_cache *nlm_rpch_cache = NULL;
61 61
62 62 static int nlm_rpch_ctor(void *, void *, int);
63 63 static void nlm_rpch_dtor(void *, void *);
64 64 static void destroy_rpch(nlm_rpc_t *);
65 65 static nlm_rpc_t *get_nlm_rpc_fromcache(struct nlm_host *, int);
66 66 static void update_host_rpcbinding(struct nlm_host *, int);
67 67 static int refresh_nlm_rpc(struct nlm_host *, nlm_rpc_t *);
68 68 static void nlm_host_rele_rpc_locked(struct nlm_host *, nlm_rpc_t *);
69 69
70 70 static nlm_rpc_t *
71 71 get_nlm_rpc_fromcache(struct nlm_host *hostp, int vers)
72 72 {
73 73 nlm_rpc_t *rpcp;
74 74 bool_t found = FALSE;
75 75
76 76 ASSERT(MUTEX_HELD(&hostp->nh_lock));
77 77 if (TAILQ_EMPTY(&hostp->nh_rpchc))
78 78 return (NULL);
79 79
80 80 TAILQ_FOREACH(rpcp, &hostp->nh_rpchc, nr_link) {
81 81 if (rpcp->nr_vers == vers) {
82 82 found = TRUE;
83 83 break;
84 84 }
85 85 }
86 86
87 87 if (!found)
88 88 return (NULL);
89 89
90 90 TAILQ_REMOVE(&hostp->nh_rpchc, rpcp, nr_link);
91 91 return (rpcp);
92 92 }
93 93
94 94 /*
95 95 * Update host's RPC binding (host->nh_addr).
96 96 * The function is executed by only one thread at time.
97 97 */
98 98 static void
99 99 update_host_rpcbinding(struct nlm_host *hostp, int vers)
100 100 {
101 101 enum clnt_stat stat;
102 102
103 103 ASSERT(MUTEX_HELD(&hostp->nh_lock));
104 104
105 105 /*
106 106 * Mark RPC binding state as "update in progress" in order
107 107 * to say other threads that they need to wait until binding
108 108 * is fully updated.
109 109 */
110 110 hostp->nh_rpcb_state = NRPCB_UPDATE_INPROGRESS;
111 111 hostp->nh_rpcb_ustat = RPC_SUCCESS;
112 112 mutex_exit(&hostp->nh_lock);
113 113
114 114 stat = rpcbind_getaddr(&hostp->nh_knc, NLM_PROG, vers, &hostp->nh_addr);
115 115 mutex_enter(&hostp->nh_lock);
116 116
117 117 hostp->nh_rpcb_state = ((stat == RPC_SUCCESS) ?
118 118 NRPCB_UPDATED : NRPCB_NEED_UPDATE);
119 119
120 120 hostp->nh_rpcb_ustat = stat;
121 121 cv_broadcast(&hostp->nh_rpcb_cv);
122 122 }
123 123
124 124 /*
125 125 * Refresh RPC handle taken from host handles cache.
126 126 * This function is called when an RPC handle is either
127 127 * uninitialized or was initialized using a binding that's
128 128 * no longer current.
129 129 */
130 130 static int
131 131 refresh_nlm_rpc(struct nlm_host *hostp, nlm_rpc_t *rpcp)
132 132 {
133 133 int ret;
134 134
135 135 if (rpcp->nr_handle == NULL) {
136 136 bool_t clset = TRUE;
137 137
138 138 ret = clnt_tli_kcreate(&hostp->nh_knc, &hostp->nh_addr,
139 139 NLM_PROG, rpcp->nr_vers, 0, NLM_RPC_RETRIES,
140 140 CRED(), &rpcp->nr_handle);
141 141
142 142 /*
|
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
143 143 * Set the client's CLSET_NODELAYONERR option to true. The
144 144 * RPC clnt_call interface creates an artificial delay for
145 145 * certain call errors in order to prevent RPC consumers
146 146 * from getting into tight retry loops. Since this function is
147 147 * called by the NLM service routines we would like to avoid
148 148 * this artificial delay when possible. We do not retry if the
149 149 * NULL request fails so it is safe for us to turn this option
150 150 * on.
151 151 */
152 152 if (clnt_control(rpcp->nr_handle, CLSET_NODELAYONERR,
153 - (char *)&clset)) {
153 + (char *)&clset) == FALSE) {
154 154 NLM_ERR("Unable to set CLSET_NODELAYONERR\n");
155 155 }
156 156 } else {
157 157 ret = clnt_tli_kinit(rpcp->nr_handle, &hostp->nh_knc,
158 158 &hostp->nh_addr, 0, NLM_RPC_RETRIES, CRED());
159 159 if (ret == 0) {
160 160 enum clnt_stat stat;
161 161
162 162 /*
163 163 * Check whether host's RPC binding is still
164 164 * fresh, i.e. if remote program is still sits
165 165 * on the same port we assume. Call NULL proc
166 166 * to do it.
167 167 *
168 168 * Note: Even though we set no delay on error on the
169 169 * client handle the call to nlm_null_rpc can still
170 170 * delay for 10 seconds before returning an error. For
171 171 * example the no delay on error option is not honored
172 172 * for RPC_XPRTFAILED errors (see clnt_cots_kcallit).
173 173 */
174 174 stat = nlm_null_rpc(rpcp->nr_handle, rpcp->nr_vers);
175 175 if (NLM_STALE_CLNT(stat)) {
176 176 ret = ESTALE;
177 177 }
178 178 }
179 179 }
180 180
181 181 return (ret);
182 182 }
183 183
184 184 /*
185 185 * Get RPC handle that can be used to talk to the NLM
186 186 * of given version running on given host.
187 187 * Saves obtained RPC handle to rpcpp argument.
188 188 *
189 189 * If error occures, return nonzero error code.
190 190 */
191 191 int
192 192 nlm_host_get_rpc(struct nlm_host *hostp, int vers, nlm_rpc_t **rpcpp)
193 193 {
194 194 nlm_rpc_t *rpcp = NULL;
195 195 int rc;
196 196
197 197 mutex_enter(&hostp->nh_lock);
198 198
199 199 /*
200 200 * If this handle is either uninitialized, or was
201 201 * initialized using binding that's now stale
202 202 * do the init or re-init.
203 203 * See comments to enum nlm_rpcb_state for more
204 204 * details.
205 205 */
206 206 again:
207 207 while (hostp->nh_rpcb_state != NRPCB_UPDATED) {
208 208 if (hostp->nh_rpcb_state == NRPCB_UPDATE_INPROGRESS) {
209 209 rc = cv_wait_sig(&hostp->nh_rpcb_cv, &hostp->nh_lock);
210 210 if (rc == 0) {
211 211 mutex_exit(&hostp->nh_lock);
212 212 return (EINTR);
213 213 }
214 214 }
215 215
216 216 /*
217 217 * Check if RPC binding was marked for update.
218 218 * If so, start RPC binding update operation.
219 219 * NOTE: the operation can be executed by only
220 220 * one thread at time.
221 221 */
222 222 if (hostp->nh_rpcb_state == NRPCB_NEED_UPDATE)
223 223 update_host_rpcbinding(hostp, vers);
224 224
225 225 /*
226 226 * Check if RPC error occured during RPC binding
227 227 * update operation. If so, report a correspoding
228 228 * error.
229 229 */
230 230 if (hostp->nh_rpcb_ustat != RPC_SUCCESS) {
231 231 mutex_exit(&hostp->nh_lock);
232 232 return (ENOENT);
233 233 }
234 234 }
235 235
236 236 rpcp = get_nlm_rpc_fromcache(hostp, vers);
237 237 mutex_exit(&hostp->nh_lock);
238 238 if (rpcp == NULL) {
239 239 /*
240 240 * There weren't any RPC handles in a host
241 241 * cache. No luck, just create a new one.
242 242 */
243 243 rpcp = kmem_cache_alloc(nlm_rpch_cache, KM_SLEEP);
244 244 rpcp->nr_vers = vers;
245 245 }
246 246
247 247 /*
248 248 * Refresh RPC binding
249 249 */
250 250 rc = refresh_nlm_rpc(hostp, rpcp);
251 251 if (rc != 0) {
252 252 if (rc == ESTALE) {
253 253 /*
254 254 * Host's RPC binding is stale, we have
255 255 * to update it. Put the RPC handle back
256 256 * to the cache and mark the host as
257 257 * "need update".
258 258 */
259 259 mutex_enter(&hostp->nh_lock);
260 260 hostp->nh_rpcb_state = NRPCB_NEED_UPDATE;
261 261 nlm_host_rele_rpc_locked(hostp, rpcp);
262 262 goto again;
263 263 }
264 264
265 265 destroy_rpch(rpcp);
266 266 return (rc);
267 267 }
268 268
269 269 DTRACE_PROBE2(end, struct nlm_host *, hostp,
270 270 nlm_rpc_t *, rpcp);
271 271
272 272 *rpcpp = rpcp;
273 273 return (0);
274 274 }
275 275
276 276 void
277 277 nlm_host_rele_rpc(struct nlm_host *hostp, nlm_rpc_t *rpcp)
278 278 {
279 279 mutex_enter(&hostp->nh_lock);
280 280 nlm_host_rele_rpc_locked(hostp, rpcp);
281 281 mutex_exit(&hostp->nh_lock);
282 282 }
283 283
284 284 static void
285 285 nlm_host_rele_rpc_locked(struct nlm_host *hostp, nlm_rpc_t *rpcp)
286 286 {
287 287 ASSERT(mutex_owned(&hostp->nh_lock));
288 288 TAILQ_INSERT_HEAD(&hostp->nh_rpchc, rpcp, nr_link);
289 289 }
290 290
291 291 /*
292 292 * The function invalidates host's RPC binding by marking it
293 293 * as not fresh. In this case another time thread tries to
294 294 * get RPC handle from host's handles cache, host's RPC binding
295 295 * will be updated.
296 296 *
297 297 * The function should be executed when RPC call invoked via
298 298 * handle taken from RPC cache returns RPC_PROCUNAVAIL.
299 299 */
300 300 void
301 301 nlm_host_invalidate_binding(struct nlm_host *hostp)
302 302 {
303 303 mutex_enter(&hostp->nh_lock);
304 304 hostp->nh_rpcb_state = NRPCB_NEED_UPDATE;
305 305 mutex_exit(&hostp->nh_lock);
306 306 }
307 307
308 308 void
309 309 nlm_rpc_init(void)
310 310 {
311 311 nlm_rpch_cache = kmem_cache_create("nlm_rpch_cache",
312 312 sizeof (nlm_rpc_t), 0, nlm_rpch_ctor, nlm_rpch_dtor,
313 313 NULL, NULL, NULL, 0);
314 314 }
315 315
316 316 void
317 317 nlm_rpc_cache_destroy(struct nlm_host *hostp)
318 318 {
319 319 nlm_rpc_t *rpcp;
320 320
321 321 /*
322 322 * There's no need to lock host's mutex here,
323 323 * nlm_rpc_cache_destroy() should be called from
324 324 * only one place: nlm_host_destroy, when all
325 325 * resources host owns are already cleaned up.
326 326 * So there shouldn't be any raises.
327 327 */
328 328 while ((rpcp = TAILQ_FIRST(&hostp->nh_rpchc)) != NULL) {
329 329 TAILQ_REMOVE(&hostp->nh_rpchc, rpcp, nr_link);
330 330 destroy_rpch(rpcp);
331 331 }
332 332 }
333 333
334 334 /* ARGSUSED */
335 335 static int
336 336 nlm_rpch_ctor(void *datap, void *cdrarg, int kmflags)
337 337 {
338 338 nlm_rpc_t *rpcp = (nlm_rpc_t *)datap;
339 339
340 340 bzero(rpcp, sizeof (*rpcp));
341 341 return (0);
342 342 }
343 343
344 344 /* ARGSUSED */
345 345 static void
346 346 nlm_rpch_dtor(void *datap, void *cdrarg)
347 347 {
348 348 nlm_rpc_t *rpcp = (nlm_rpc_t *)datap;
349 349 ASSERT(rpcp->nr_handle == NULL);
350 350 }
351 351
352 352 static void
353 353 destroy_rpch(nlm_rpc_t *rpcp)
354 354 {
355 355 if (rpcp->nr_handle != NULL) {
356 356 AUTH_DESTROY(rpcp->nr_handle->cl_auth);
357 357 CLNT_DESTROY(rpcp->nr_handle);
358 358 rpcp->nr_handle = NULL;
359 359 }
360 360
361 361 kmem_cache_free(nlm_rpch_cache, rpcp);
362 362 }
|
↓ open down ↓ |
199 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX