Print this page
re #13388 rb4382 fmd_api.h uses bool which is a C99/C++ keyword
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/cmd/fm/fmd/common/fmd_rpc_adm.c
+++ new/usr/src/cmd/fm/fmd/common/fmd_rpc_adm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 #include <strings.h>
27 27 #include <limits.h>
28 28 #include <unistd.h>
29 29 #include <stdlib.h>
30 30 #include <alloca.h>
31 31
32 32 #include <fmd_rpc_adm.h>
33 33 #include <fmd_rpc.h>
34 34 #include <fmd_module.h>
35 35 #include <fmd_ustat.h>
36 36 #include <fmd_error.h>
37 37 #include <fmd_asru.h>
38 38 #include <fmd_ckpt.h>
39 39 #include <fmd_case.h>
40 40 #include <fmd_fmri.h>
41 41 #include <fmd_idspace.h>
42 42 #include <fmd_xprt.h>
43 43
44 44 #include <fmd.h>
45 45
46 46 bool_t
47 47 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req)
48 48 {
49 49 struct fmd_rpc_modinfo *rmi;
50 50 fmd_module_t *mp;
51 51
52 52 rvp->rml_list = NULL;
53 53 rvp->rml_err = 0;
54 54 rvp->rml_len = 0;
55 55
56 56 if (fmd_rpc_deny(req)) {
57 57 rvp->rml_err = FMD_ADM_ERR_PERM;
58 58 return (TRUE);
59 59 }
60 60
61 61 (void) pthread_mutex_lock(&fmd.d_mod_lock);
62 62
63 63 for (mp = fmd_list_next(&fmd.d_mod_list);
64 64 mp != NULL; mp = fmd_list_next(mp)) {
65 65
66 66 if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) {
67 67 rvp->rml_err = FMD_ADM_ERR_NOMEM;
68 68 break;
69 69 }
70 70
71 71 fmd_module_lock(mp);
72 72
73 73 /*
74 74 * If mod_info is NULL, the module is in the middle of loading:
75 75 * do not report its presence to observability tools yet.
76 76 */
77 77 if (mp->mod_info == NULL) {
78 78 fmd_module_unlock(mp);
79 79 free(rmi);
80 80 continue;
81 81 }
82 82
83 83 rmi->rmi_name = strdup(mp->mod_name);
84 84 rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc);
85 85 rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers);
86 86 rmi->rmi_faulty = mp->mod_error != 0;
87 87 rmi->rmi_next = rvp->rml_list;
88 88
89 89 fmd_module_unlock(mp);
90 90 rvp->rml_list = rmi;
91 91 rvp->rml_len++;
92 92
93 93 if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) {
94 94 rvp->rml_err = FMD_ADM_ERR_NOMEM;
95 95 break;
96 96 }
97 97 }
98 98
99 99 (void) pthread_mutex_unlock(&fmd.d_mod_lock);
100 100 return (TRUE);
101 101 }
102 102
103 103 bool_t
104 104 fmd_adm_modcstat_1_svc(char *name,
105 105 struct fmd_rpc_modstat *rms, struct svc_req *req)
106 106 {
107 107 fmd_ustat_snap_t snap;
108 108 fmd_module_t *mp;
109 109
110 110 rms->rms_buf.rms_buf_val = NULL;
111 111 rms->rms_buf.rms_buf_len = 0;
112 112 rms->rms_err = 0;
113 113
114 114 if (fmd_rpc_deny(req)) {
115 115 rms->rms_err = FMD_ADM_ERR_PERM;
116 116 return (TRUE);
117 117 }
118 118
119 119 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
120 120 rms->rms_err = FMD_ADM_ERR_MODSRCH;
121 121 return (TRUE);
122 122 }
123 123
124 124 if (fmd_modstat_snapshot(mp, &snap) == 0) {
125 125 rms->rms_buf.rms_buf_val = snap.uss_buf;
126 126 rms->rms_buf.rms_buf_len = snap.uss_len;
127 127 } else if (errno == EFMD_HDL_ABORT) {
128 128 rms->rms_err = FMD_ADM_ERR_MODFAIL;
129 129 } else
130 130 rms->rms_err = FMD_ADM_ERR_NOMEM;
131 131
132 132 fmd_module_rele(mp);
133 133 return (TRUE);
134 134 }
135 135
136 136 bool_t
137 137 fmd_adm_moddstat_1_svc(char *name,
138 138 struct fmd_rpc_modstat *rms, struct svc_req *req)
139 139 {
140 140 fmd_module_t *mp;
141 141
142 142 rms->rms_buf.rms_buf_val = NULL;
143 143 rms->rms_buf.rms_buf_len = 0;
144 144 rms->rms_err = 0;
145 145
146 146 if (fmd_rpc_deny(req)) {
147 147 rms->rms_err = FMD_ADM_ERR_PERM;
148 148 return (TRUE);
149 149 }
150 150
151 151 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
152 152 rms->rms_err = FMD_ADM_ERR_MODSRCH;
153 153 return (TRUE);
154 154 }
155 155
156 156 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t));
157 157 rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t);
158 158
159 159 if (rms->rms_buf.rms_buf_val == NULL) {
160 160 rms->rms_err = FMD_ADM_ERR_NOMEM;
161 161 rms->rms_buf.rms_buf_len = 0;
162 162 fmd_module_rele(mp);
163 163 return (TRUE);
164 164 }
165 165
166 166 /*
167 167 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats
168 168 * are present in mp->mod_stats. We don't use any for the daemon-
169 169 * maintained stats and provide this function in order to reduce the
170 170 * overhead of the fmstat(1M) default view, where these minimal stats
171 171 * must be retrieved for all of the active modules.
172 172 */
173 173 (void) pthread_mutex_lock(&mp->mod_stats_lock);
174 174
175 175 if (mp->mod_stats != NULL) {
176 176 mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime();
177 177 bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val,
178 178 sizeof (fmd_modstat_t));
179 179 } else {
180 180 free(rms->rms_buf.rms_buf_val);
181 181 rms->rms_buf.rms_buf_val = NULL;
182 182 rms->rms_buf.rms_buf_len = 0;
183 183 rms->rms_err = FMD_ADM_ERR_MODFAIL;
184 184 }
185 185
186 186 (void) pthread_mutex_unlock(&mp->mod_stats_lock);
187 187 fmd_module_rele(mp);
188 188 return (TRUE);
189 189 }
190 190
191 191 bool_t
192 192 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req)
193 193 {
194 194 const size_t size = sizeof (fmd_statistics_t);
195 195
196 196 if (fmd_rpc_deny(req)) {
197 197 rms->rms_buf.rms_buf_val = NULL;
198 198 rms->rms_buf.rms_buf_len = 0;
199 199 rms->rms_err = FMD_ADM_ERR_PERM;
200 200 } else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) {
201 201 /*
202 202 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING
203 203 * stats are present in fmd.d_stats (see definition in fmd.c).
204 204 */
205 205 (void) pthread_mutex_lock(&fmd.d_stats_lock);
206 206 bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size);
207 207 (void) pthread_mutex_unlock(&fmd.d_stats_lock);
208 208 rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t);
209 209 rms->rms_err = 0;
210 210 } else {
211 211 rms->rms_buf.rms_buf_len = 0;
212 212 rms->rms_err = FMD_ADM_ERR_NOMEM;
213 213 }
214 214
215 215 return (TRUE);
216 216 }
217 217
218 218 bool_t
219 219 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req)
220 220 {
221 221 fmd_module_t *mp;
222 222 const char *p;
223 223 int err = 0;
224 224
225 225 if (fmd_rpc_deny(req)) {
226 226 *rvp = FMD_ADM_ERR_PERM;
227 227 return (TRUE);
228 228 }
229 229
230 230 /*
231 231 * Before we endure the expense of constructing a module and attempting
232 232 * to load it, do a quick check to see if the pathname is valid.
233 233 */
234 234 if (access(path, F_OK) != 0) {
235 235 *rvp = FMD_ADM_ERR_MODNOENT;
236 236 return (TRUE);
237 237 }
238 238
239 239 if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0)
240 240 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops);
241 241 else
242 242 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops);
243 243
244 244 if (mp == NULL) {
245 245 switch (errno) {
246 246 case EFMD_MOD_LOADED:
247 247 err = FMD_ADM_ERR_MODEXIST;
248 248 break;
249 249 case EFMD_MOD_INIT:
250 250 err = FMD_ADM_ERR_MODINIT;
251 251 break;
252 252 default:
253 253 err = FMD_ADM_ERR_MODLOAD;
254 254 break;
255 255 }
256 256 }
257 257
258 258 *rvp = err;
259 259 return (TRUE);
260 260 }
261 261
262 262 bool_t
263 263 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req)
264 264 {
265 265 fmd_module_t *mp = NULL;
266 266 int err = 0;
267 267
268 268 if (fmd_rpc_deny(req))
269 269 err = FMD_ADM_ERR_PERM;
270 270 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
271 271 err = FMD_ADM_ERR_MODSRCH;
272 272 else if (mp == fmd.d_self)
273 273 err = FMD_ADM_ERR_MODBUSY;
274 274 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
275 275 err = FMD_ADM_ERR_MODSRCH;
276 276
277 277 if (mp != NULL)
278 278 fmd_module_rele(mp);
279 279
280 280 *rvp = err;
281 281 return (TRUE);
282 282 }
283 283
284 284 bool_t
285 285 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req)
286 286 {
287 287 fmd_module_t *mp = NULL;
288 288 int err = 0;
289 289
290 290 if (fmd_rpc_deny(req))
291 291 err = FMD_ADM_ERR_PERM;
292 292 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
293 293 err = FMD_ADM_ERR_MODSRCH;
294 294 else if (mp == fmd.d_self)
295 295 err = FMD_ADM_ERR_MODBUSY;
296 296 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
297 297 err = FMD_ADM_ERR_MODSRCH;
298 298
299 299 if (err == 0)
300 300 fmd_ckpt_delete(mp); /* erase any saved checkpoints */
301 301
302 302 if (err == 0 && fmd_modhash_load(fmd.d_mod_hash,
303 303 mp->mod_path, mp->mod_ops) == NULL) {
304 304 if (errno == EFMD_MOD_INIT)
305 305 err = FMD_ADM_ERR_MODINIT;
306 306 else
307 307 err = FMD_ADM_ERR_MODLOAD;
308 308 }
309 309
310 310 if (mp != NULL)
311 311 fmd_module_rele(mp);
312 312
313 313 *rvp = err;
314 314 return (TRUE);
315 315 }
316 316
317 317 bool_t
318 318 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req)
319 319 {
320 320 fmd_module_t *mp;
321 321 int err = 0;
322 322
323 323 if (fmd_rpc_deny(req))
324 324 err = FMD_ADM_ERR_PERM;
325 325 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
326 326 err = FMD_ADM_ERR_MODSRCH;
327 327 else {
328 328 fmd_module_gc(mp);
329 329 fmd_module_rele(mp);
330 330 }
331 331
332 332 *rvp = err;
333 333 return (TRUE);
334 334 }
335 335
336 336 /*
337 337 * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts
338 338 * of data that may exceed the underlying RPC transport buffer size if the
339 339 * resource cache is heavily populated and/or all resources are requested.
340 340 * To minimize the likelihood of running out of RPC buffer space and having to
341 341 * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the
342 342 * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an
343 343 * individual FMRI if more information is needed. To further reduce the XDR
344 344 * overhead, the string list is represented as XDR-opaque data where the
345 345 * entire list is returned as a string table (e.g. "fmriA\0fmriB\0...").
346 346 */
347 347 static void
348 348 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg)
349 349 {
350 350 struct fmd_rpc_rsrclist *rrl = arg;
351 351 size_t name_len, buf_len;
352 352 void *p;
353 353
354 354 /*
355 355 * Skip the ASRU if this fault is marked as invisible.
356 356 * If rrl_all is false, we take a quick look at asru_flags with no lock
357 357 * held to see if the ASRU is not faulty. If so,
358 358 * we don't want to report it by default and can just skip this ASRU.
359 359 * This helps keep overhead low in the common case, as the call to
360 360 * fmd_asru_getstate() can be expensive depending on the scheme.
361 361 */
362 362
363 363 if (ap->asru_flags & FMD_ASRU_INVISIBLE)
364 364 return;
365 365 if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY))
366 366 return;
367 367
368 368 if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0)
369 369 return; /* error has occurred or resource is in 'ok' state */
370 370
371 371 /*
372 372 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold
373 373 * another string, doubling it as needed. Then copy the new string
374 374 * on to the end, and increment rrl_len to indicate the used space.
375 375 */
376 376 (void) pthread_mutex_lock(&ap->asru_lock);
377 377 name_len = strlen(ap->asru_name) + 1;
378 378
379 379 while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) {
380 380 if (rrl->rrl_buf.rrl_buf_len != 0)
381 381 buf_len = rrl->rrl_buf.rrl_buf_len * 2;
382 382 else
383 383 buf_len = 1024; /* default buffer size */
384 384
385 385 if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) {
386 386 bzero((char *)p + rrl->rrl_buf.rrl_buf_len,
387 387 buf_len - rrl->rrl_buf.rrl_buf_len);
388 388 rrl->rrl_buf.rrl_buf_val = p;
389 389 rrl->rrl_buf.rrl_buf_len = buf_len;
390 390 } else {
391 391 rrl->rrl_err = FMD_ADM_ERR_NOMEM;
392 392 break;
393 393 }
394 394 }
395 395
396 396 if (rrl->rrl_err == 0) {
397 397 bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val +
398 398 rrl->rrl_len, name_len);
399 399 rrl->rrl_len += name_len;
400 400 rrl->rrl_cnt++;
401 401 }
402 402
403 403 (void) pthread_mutex_unlock(&ap->asru_lock);
404 404 }
405 405
406 406 bool_t
407 407 fmd_adm_rsrclist_1_svc(bool_t all,
408 408 struct fmd_rpc_rsrclist *rvp, struct svc_req *req)
409 409 {
410 410 rvp->rrl_buf.rrl_buf_len = 0;
411 411 rvp->rrl_buf.rrl_buf_val = NULL;
412 412 rvp->rrl_len = 0;
413 413 rvp->rrl_cnt = 0;
414 414 rvp->rrl_err = 0;
415 415 rvp->rrl_all = all;
416 416
417 417 if (fmd_rpc_deny(req))
418 418 rvp->rrl_err = FMD_ADM_ERR_PERM;
419 419 else
420 420 fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp);
421 421
422 422 return (TRUE);
423 423 }
424 424
425 425 bool_t
426 426 fmd_adm_rsrcinfo_1_svc(char *fmri,
427 427 struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req)
428 428 {
429 429 fmd_asru_t *ap;
430 430 fmd_case_impl_t *cip;
431 431 int state;
432 432
433 433 bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo));
434 434
435 435 if (fmd_rpc_deny(req)) {
436 436 rvp->rri_err = FMD_ADM_ERR_PERM;
437 437 return (TRUE);
438 438 }
439 439
440 440 if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) {
441 441 rvp->rri_err = FMD_ADM_ERR_RSRCSRCH;
442 442 return (TRUE);
443 443 }
444 444
445 445 state = fmd_asru_getstate(ap);
446 446 (void) pthread_mutex_lock(&ap->asru_lock);
447 447 cip = (fmd_case_impl_t *)ap->asru_case;
448 448
449 449 rvp->rri_fmri = strdup(ap->asru_name);
450 450 rvp->rri_uuid = strdup(ap->asru_uuid);
451 451 rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL;
452 452 rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0;
453 453 rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0;
454 454 rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0;
455 455
456 456 (void) pthread_mutex_unlock(&ap->asru_lock);
457 457 fmd_asru_hash_release(fmd.d_asrus, ap);
458 458
459 459 if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL)
460 460 rvp->rri_err = FMD_ADM_ERR_NOMEM;
461 461
462 462 return (TRUE);
463 463 }
464 464
465 465 static void
466 466 fmd_adm_do_repair(char *name, struct svc_req *req, int *errp, uint8_t reason,
467 467 char *uuid)
468 468 {
469 469 if (fmd_rpc_deny(req))
470 470 *errp = FMD_ADM_ERR_PERM;
471 471 else {
472 472 fmd_asru_rep_arg_t fara;
473 473 int err = FARA_ERR_RSRCNOTF;
474 474
475 475 fara.fara_reason = reason;
476 476 fara.fara_rval = &err;
477 477 fara.fara_uuid = uuid;
478 478 fara.fara_bywhat = FARA_BY_ASRU;
479 479 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
480 480 fmd_asru_repaired, &fara);
481 481 fara.fara_bywhat = FARA_BY_LABEL;
482 482 fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
483 483 fmd_asru_repaired, &fara);
484 484 fara.fara_bywhat = FARA_BY_FRU;
485 485 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
486 486 fmd_asru_repaired, &fara);
487 487 fara.fara_bywhat = FARA_BY_RSRC;
488 488 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
489 489 fmd_asru_repaired, &fara);
490 490 if (err == FARA_ERR_RSRCNOTR)
491 491 *errp = FMD_ADM_ERR_RSRCNOTR;
492 492 else if (err == FARA_OK)
493 493 *errp = 0;
494 494 }
495 495 }
496 496
497 497 bool_t
498 498 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req)
499 499 {
500 500 int err = FMD_ADM_ERR_RSRCNOTF;
501 501
502 502 /*
503 503 * If anyone does an fmadm flush command, discard any resolved
504 504 * cases that were being retained for historic diagnosis.
505 505 */
506 506 if (fmd_rpc_deny(req))
507 507 err = FMD_ADM_ERR_PERM;
508 508 else {
509 509 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
510 510 fmd_asru_flush, &err);
511 511 fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
512 512 fmd_asru_flush, &err);
513 513 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
514 514 fmd_asru_flush, &err);
515 515 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
516 516 fmd_asru_flush, &err);
517 517 }
518 518 *rvp = err;
519 519 return (TRUE);
520 520 }
521 521
522 522 bool_t
523 523 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req)
524 524 {
525 525 int err = FMD_ADM_ERR_RSRCNOTF;
526 526
527 527 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL);
528 528 *rvp = err;
529 529 return (TRUE);
530 530 }
531 531
532 532 bool_t
533 533 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req)
534 534 {
535 535 int err = FMD_ADM_ERR_RSRCNOTF;
536 536
537 537 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPLACED, NULL);
538 538 *rvp = err;
539 539 return (TRUE);
540 540 }
541 541
542 542 bool_t
543 543 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req)
544 544 {
545 545 int err = FMD_ADM_ERR_RSRCNOTF;
546 546
547 547 fmd_adm_do_repair(name, req, &err, FMD_ASRU_ACQUITTED, uuid);
548 548 *rvp = err;
549 549 return (TRUE);
550 550 }
551 551
552 552 static void
553 553 fmd_adm_serdlist_measure(fmd_serd_eng_t *sgp, void *arg)
554 554 {
555 555 struct fmd_rpc_serdlist *rsl = arg;
556 556
557 557 rsl->rsl_len += strlen(sgp->sg_name) + 1;
558 558 rsl->rsl_cnt++;
559 559 }
560 560
561 561 static void
562 562 fmd_adm_serdlist_record(fmd_serd_eng_t *sgp, void *arg)
563 563 {
564 564 struct fmd_rpc_serdlist *rsl = arg;
565 565
566 566 bcopy(sgp->sg_name, rsl->rsl_buf.rsl_buf_val + rsl->rsl_len,
567 567 strlen(sgp->sg_name));
568 568 rsl->rsl_len += strlen(sgp->sg_name) + 1;
569 569 }
570 570
571 571 bool_t
572 572 fmd_adm_serdlist_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
573 573 struct svc_req *req)
574 574 {
575 575 fmd_module_t *mp;
576 576 void *p;
577 577
578 578 rvp->rsl_buf.rsl_buf_len = 0;
579 579 rvp->rsl_buf.rsl_buf_val = NULL;
580 580 rvp->rsl_len = 0;
581 581 rvp->rsl_cnt = 0;
582 582 rvp->rsl_err = 0;
583 583
584 584 if (fmd_rpc_deny(req)) {
585 585 rvp->rsl_err = FMD_ADM_ERR_PERM;
586 586 return (TRUE);
587 587 }
588 588
589 589 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
590 590 rvp->rsl_err = FMD_ADM_ERR_MODSRCH;
591 591 return (TRUE);
592 592 }
593 593
594 594 fmd_module_lock(mp);
595 595 /* In the first pass, collect the overall length of the buffer. */
596 596 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_measure, rvp);
597 597 if (rvp->rsl_len == 0) {
598 598 fmd_module_unlock(mp);
599 599 fmd_module_rele(mp);
600 600 return (TRUE);
601 601 }
602 602 p = malloc(rvp->rsl_len);
603 603 if (p) {
604 604 rvp->rsl_buf.rsl_buf_val = p;
605 605 rvp->rsl_buf.rsl_buf_len = rvp->rsl_len;
606 606 bzero(rvp->rsl_buf.rsl_buf_val, rvp->rsl_buf.rsl_buf_len);
607 607 rvp->rsl_len = 0;
608 608 /* In the second pass, populate the buffer with data. */
609 609 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_record,
610 610 rvp);
611 611 } else {
612 612 rvp->rsl_err = FMD_ADM_ERR_NOMEM;
613 613 }
614 614 fmd_module_unlock(mp);
615 615
616 616 fmd_module_rele(mp);
617 617 return (TRUE);
618 618 }
619 619
620 620 static void
621 621 fmd_adm_serdinfo_record(fmd_serd_eng_t *sgp, struct fmd_rpc_serdinfo *rsi)
622 622 {
623 623 uint64_t old, now = fmd_time_gethrtime();
624 624 const fmd_serd_elem_t *oep;
625 625
626 626 if ((rsi->rsi_name = strdup(sgp->sg_name)) == NULL) {
627 627 rsi->rsi_err = FMD_ADM_ERR_NOMEM;
628 628 return;
629 629 }
630 630
631 631 if ((oep = fmd_list_next(&sgp->sg_list)) != NULL)
632 632 old = fmd_event_hrtime(oep->se_event);
633 633 else
634 634 old = now;
635 635
636 636 rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1;
637 637 rsi->rsi_count = sgp->sg_count;
638 638 rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0;
639 639 rsi->rsi_n = sgp->sg_n;
640 640 rsi->rsi_t = sgp->sg_t;
641 641 }
642 642
643 643 bool_t
644 644 fmd_adm_serdinfo_1_svc(char *mname, char *sname, struct fmd_rpc_serdinfo *rvp,
645 645 struct svc_req *req)
646 646 {
647 647 fmd_module_t *mp;
648 648 fmd_serd_eng_t *sgp;
649 649
650 650 bzero(rvp, sizeof (struct fmd_rpc_serdinfo));
651 651
652 652 if (fmd_rpc_deny(req)) {
653 653 rvp->rsi_err = FMD_ADM_ERR_PERM;
654 654 return (TRUE);
655 655 }
656 656
657 657 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
658 658 rvp->rsi_err = FMD_ADM_ERR_MODSRCH;
659 659 return (TRUE);
660 660 }
661 661
662 662 fmd_module_lock(mp);
663 663
664 664 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
665 665 fmd_adm_serdinfo_record(sgp, rvp);
666 666 } else
667 667 rvp->rsi_err = FMD_ADM_ERR_SERDSRCH;
668 668
669 669 fmd_module_unlock(mp);
670 670 fmd_module_rele(mp);
671 671
672 672 return (TRUE);
673 673 }
674 674
675 675 /*ARGSUSED*/
676 676 bool_t
677 677 fmd_adm_serdinfo_old_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
678 678 struct svc_req *req)
679 679 {
680 680 return (FALSE);
681 681 }
682 682
683 683 bool_t
684 684 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req)
685 685 {
686 686 fmd_module_t *mp;
687 687 fmd_serd_eng_t *sgp;
688 688 int err = 0;
689 689
690 690 if (fmd_rpc_deny(req)) {
691 691 *rvp = FMD_ADM_ERR_PERM;
692 692 return (TRUE);
693 693 }
694 694
695 695 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
696 696 *rvp = FMD_ADM_ERR_MODSRCH;
697 697 return (TRUE);
698 698 }
699 699
700 700 fmd_module_lock(mp);
701 701
702 702 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
703 703 if (fmd_serd_eng_fired(sgp)) {
704 704 err = FMD_ADM_ERR_SERDFIRED;
705 705 } else {
706 706 fmd_serd_eng_reset(sgp);
707 707 fmd_module_setdirty(mp);
708 708 }
709 709 } else
710 710 err = FMD_ADM_ERR_SERDSRCH;
711 711
712 712 fmd_module_unlock(mp);
713 713 fmd_module_rele(mp);
714 714
715 715 *rvp = err;
716 716 return (TRUE);
717 717 }
718 718
719 719 bool_t
720 720 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req)
721 721 {
722 722 fmd_log_t **lpp, *old, *new;
723 723 int try = 1, trylimit = 1;
724 724 pthread_rwlock_t *lockp;
725 725
726 726 hrtime_t nsec = 0;
727 727 timespec_t tv;
728 728
729 729 if (fmd_rpc_deny(req)) {
730 730 *rvp = FMD_ADM_ERR_PERM;
731 731 return (TRUE);
732 732 }
733 733
734 734 if (strcmp(name, "errlog") == 0) {
735 735 lpp = &fmd.d_errlog;
736 736 lockp = &fmd.d_log_lock;
737 737 } else if (strcmp(name, "fltlog") == 0) {
738 738 lpp = &fmd.d_fltlog;
739 739 lockp = &fmd.d_log_lock;
740 740 } else if (strcmp(name, "infolog") == 0) {
741 741 lpp = &fmd.d_ilog;
742 742 lockp = &fmd.d_ilog_lock;
743 743 } else if (strcmp(name, "infolog_hival") == 0) {
744 744 lpp = &fmd.d_hvilog;
745 745 lockp = &fmd.d_hvilog_lock;
746 746 } else {
747 747 *rvp = FMD_ADM_ERR_ROTSRCH;
748 748 return (TRUE);
749 749 }
750 750
751 751 (void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit);
752 752 (void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec);
753 753
754 754 tv.tv_sec = nsec / NANOSEC;
755 755 tv.tv_nsec = nsec % NANOSEC;
756 756
757 757 /*
758 758 * To rotate a log file, grab d_log_lock as writer to make sure no
759 759 * one else can discover the current log pointer. Then try to rotate
760 760 * the log. If we're successful, release the old log pointer.
761 761 */
762 762 do {
763 763 if (try > 1)
764 764 (void) nanosleep(&tv, NULL); /* wait for checkpoints */
765 765
766 766 (void) pthread_rwlock_wrlock(lockp);
767 767 old = *lpp;
768 768
769 769 if ((new = fmd_log_rotate(old)) != NULL) {
770 770 fmd_log_rele(old);
771 771 *lpp = new;
772 772 }
773 773
774 774 (void) pthread_rwlock_unlock(lockp);
775 775
776 776 } while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit);
777 777
778 778 if (new != NULL)
779 779 *rvp = 0;
780 780 else if (errno == EFMD_LOG_ROTBUSY)
781 781 *rvp = FMD_ADM_ERR_ROTBUSY;
782 782 else
783 783 *rvp = FMD_ADM_ERR_ROTFAIL;
784 784
785 785 return (TRUE);
786 786 }
787 787
788 788 bool_t
789 789 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req)
790 790 {
791 791 fmd_case_t *cp = NULL;
792 792 int err = 0;
793 793
794 794 if (fmd_rpc_deny(req))
795 795 err = FMD_ADM_ERR_PERM;
796 796 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
797 797 err = FMD_ADM_ERR_CASESRCH;
798 798 else if (fmd_case_repair(cp) != 0) {
799 799 err = errno == EFMD_CASE_OWNER ?
800 800 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
801 801 }
802 802
803 803 if (cp != NULL)
804 804 fmd_case_rele(cp);
805 805
806 806 *rvp = err;
807 807 return (TRUE);
808 808 }
809 809
810 810 bool_t
811 811 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req)
812 812 {
813 813 fmd_case_t *cp = NULL;
814 814 int err = 0;
815 815
816 816 if (fmd_rpc_deny(req))
817 817 err = FMD_ADM_ERR_PERM;
818 818 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
819 819 err = FMD_ADM_ERR_CASESRCH;
820 820 else if (fmd_case_acquit(cp) != 0) {
821 821 err = errno == EFMD_CASE_OWNER ?
822 822 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
823 823 }
824 824
825 825 if (cp != NULL)
826 826 fmd_case_rele(cp);
827 827
828 828 *rvp = err;
829 829 return (TRUE);
830 830 }
831 831
832 832 void
833 833 fmd_adm_caselist_case(fmd_case_t *cp, void *arg)
834 834 {
835 835 fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
836 836 struct fmd_rpc_caselist *rcl = arg;
837 837 size_t uuid_len, buf_len;
838 838 void *p;
839 839
840 840 if (rcl->rcl_err != 0)
841 841 return;
842 842
843 843 /*
844 844 * skip invisible cases
845 845 */
846 846 if (cip->ci_flags & FMD_CF_INVISIBLE)
847 847 return;
848 848
849 849 /*
850 850 * Lock the case and reallocate rcl_buf[] to be large enough to hold
851 851 * another string, doubling it as needed. Then copy the new string
852 852 * on to the end, and increment rcl_len to indicate the used space.
853 853 */
854 854 if (!(cip->ci_flags & FMD_CF_SOLVED))
855 855 return;
856 856
857 857 (void) pthread_mutex_lock(&cip->ci_lock);
858 858
859 859 uuid_len = cip->ci_uuidlen + 1;
860 860
861 861 while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) {
862 862 if (rcl->rcl_buf.rcl_buf_len != 0)
863 863 buf_len = rcl->rcl_buf.rcl_buf_len * 2;
864 864 else
865 865 buf_len = 1024; /* default buffer size */
866 866
867 867 if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) {
868 868 bzero((char *)p + rcl->rcl_buf.rcl_buf_len,
869 869 buf_len - rcl->rcl_buf.rcl_buf_len);
870 870 rcl->rcl_buf.rcl_buf_val = p;
871 871 rcl->rcl_buf.rcl_buf_len = buf_len;
872 872 } else {
873 873 rcl->rcl_err = FMD_ADM_ERR_NOMEM;
874 874 break;
875 875 }
876 876 }
877 877
878 878 if (rcl->rcl_err == 0) {
879 879 bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val +
880 880 rcl->rcl_len, uuid_len);
881 881 rcl->rcl_len += uuid_len;
882 882 rcl->rcl_cnt++;
883 883 }
884 884
885 885 (void) pthread_mutex_unlock(&cip->ci_lock);
886 886 }
887 887
888 888 bool_t
889 889 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req)
890 890 {
891 891 rvp->rcl_buf.rcl_buf_len = 0;
892 892 rvp->rcl_buf.rcl_buf_val = NULL;
893 893 rvp->rcl_len = 0;
894 894 rvp->rcl_cnt = 0;
895 895 rvp->rcl_err = 0;
896 896
897 897 if (fmd_rpc_deny(req))
898 898 rvp->rcl_err = FMD_ADM_ERR_PERM;
899 899 else
900 900 fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp);
901 901
902 902 return (TRUE);
903 903 }
904 904
905 905 bool_t
906 906 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp,
907 907 struct svc_req *req)
908 908 {
909 909 fmd_case_t *cp;
910 910 nvlist_t *nvl;
911 911 int err = 0;
912 912
913 913 bzero(rvp, sizeof (struct fmd_rpc_caseinfo));
914 914
915 915 if (fmd_rpc_deny(req)) {
916 916 rvp->rci_err = FMD_ADM_ERR_PERM;
917 917 return (TRUE);
918 918 }
919 919
920 920 if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) {
921 921 rvp->rci_err = FMD_ADM_ERR_CASESRCH;
922 922 return (TRUE);
923 923 }
924 924
925 925 if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) {
926 926 fmd_case_rele(cp);
927 927 rvp->rci_err = FMD_ADM_ERR_CASESRCH;
928 928 return (TRUE);
929 929 }
930 930
931 931 nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS);
932 932
933 933 err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val,
934 934 &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0);
935 935
936 936 nvlist_free(nvl);
937 937
938 938 if (err != 0)
939 939 rvp->rci_err = FMD_ADM_ERR_NOMEM;
940 940
941 941 fmd_case_rele(cp);
942 942
943 943 return (TRUE);
944 944 }
945 945
946 946 /*ARGSUSED*/
947 947 static void
948 948 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg)
949 949 {
950 950 struct fmd_rpc_xprtlist *rvp = arg;
951 951
952 952 if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len)
953 953 rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id;
954 954 }
955 955
956 956 bool_t
957 957 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req)
958 958 {
959 959 if (fmd_rpc_deny(req)) {
960 960 rvp->rxl_buf.rxl_buf_len = 0;
961 961 rvp->rxl_buf.rxl_buf_val = NULL;
962 962 rvp->rxl_len = 0;
963 963 rvp->rxl_err = FMD_ADM_ERR_PERM;
964 964 return (TRUE);
965 965 }
966 966
967 967 /*
968 968 * Since we're taking a snapshot of the transports, and these could
969 969 * change after we return our result, there's no need to hold any kind
970 970 * of lock between retrieving ids_count and taking the snapshot. We'll
971 971 * just capture up to a maximum of whatever ids_count value we sampled.
972 972 */
973 973 rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count;
974 974 rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) *
975 975 rvp->rxl_buf.rxl_buf_len);
976 976 rvp->rxl_len = 0;
977 977 rvp->rxl_err = 0;
978 978
979 979 if (rvp->rxl_buf.rxl_buf_val == NULL) {
980 980 rvp->rxl_err = FMD_ADM_ERR_NOMEM;
981 981 return (TRUE);
982 982 }
983 983
984 984 fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp);
985 985 return (TRUE);
986 986 }
987 987
988 988 bool_t
989 989 fmd_adm_xprtstat_1_svc(int32_t id,
990 990 struct fmd_rpc_modstat *rms, struct svc_req *req)
991 991 {
992 992 fmd_xprt_impl_t *xip;
993 993 fmd_stat_t *sp, *ep, *cp;
994 994
995 995 if (fmd_rpc_deny(req)) {
996 996 rms->rms_buf.rms_buf_val = NULL;
997 997 rms->rms_buf.rms_buf_len = 0;
998 998 rms->rms_err = FMD_ADM_ERR_PERM;
999 999 return (TRUE);
1000 1000 }
1001 1001
1002 1002 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t));
1003 1003 rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) /
1004 1004 sizeof (fmd_stat_t);
1005 1005 rms->rms_err = 0;
1006 1006
1007 1007 if (rms->rms_buf.rms_buf_val == NULL) {
1008 1008 rms->rms_err = FMD_ADM_ERR_NOMEM;
1009 1009 rms->rms_buf.rms_buf_len = 0;
1010 1010 return (TRUE);
1011 1011 }
1012 1012
1013 1013 if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) {
1014 1014 rms->rms_err = FMD_ADM_ERR_XPRTSRCH;
1015 1015 return (TRUE);
1016 1016 }
1017 1017
1018 1018 /*
1019 1019 * Grab the stats lock and bcopy the entire transport stats array in
1020 1020 * one shot. Then go back through and duplicate any string values.
1021 1021 */
1022 1022 (void) pthread_mutex_lock(&xip->xi_stats_lock);
1023 1023
1024 1024 sp = (fmd_stat_t *)xip->xi_stats;
1025 1025 ep = sp + rms->rms_buf.rms_buf_len;
1026 1026 cp = rms->rms_buf.rms_buf_val;
1027 1027
1028 1028 bcopy(sp, cp, sizeof (fmd_xprt_stat_t));
1029 1029
1030 1030 for (; sp < ep; sp++, cp++) {
1031 1031 if (sp->fmds_type == FMD_TYPE_STRING &&
1032 1032 sp->fmds_value.str != NULL)
1033 1033 cp->fmds_value.str = strdup(sp->fmds_value.str);
1034 1034 }
1035 1035
1036 1036 (void) pthread_mutex_unlock(&xip->xi_stats_lock);
1037 1037 fmd_idspace_rele(fmd.d_xprt_ids, id);
1038 1038
1039 1039 return (TRUE);
1040 1040 }
1041 1041
1042 1042 int
1043 1043 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data)
1044 1044 {
1045 1045 xdr_free(proc, data);
1046 1046 svc_done(xprt);
1047 1047 return (TRUE);
1048 1048 }
1049 1049
1050 1050 /*
1051 1051 * Custom XDR routine for our API structure fmd_stat_t. This function must
1052 1052 * match the definition of fmd_stat_t in <fmd_api.h> and must also match
1053 1053 * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c.
1054 1054 */
1055 1055 bool_t
|
↓ open down ↓ |
1055 lines elided |
↑ open up ↑ |
1056 1056 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp)
1057 1057 {
1058 1058 bool_t rv = TRUE;
1059 1059
1060 1060 rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name));
1061 1061 rv &= xdr_u_int(xp, &sp->fmds_type);
1062 1062 rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc));
1063 1063
1064 1064 switch (sp->fmds_type) {
1065 1065 case FMD_TYPE_BOOL:
1066 - rv &= xdr_int(xp, &sp->fmds_value.bool);
1066 + rv &= xdr_int(xp, &sp->fmds_value.b);
1067 1067 break;
1068 1068 case FMD_TYPE_INT32:
1069 1069 rv &= xdr_int32_t(xp, &sp->fmds_value.i32);
1070 1070 break;
1071 1071 case FMD_TYPE_UINT32:
1072 1072 rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32);
1073 1073 break;
1074 1074 case FMD_TYPE_INT64:
1075 1075 rv &= xdr_int64_t(xp, &sp->fmds_value.i64);
1076 1076 break;
1077 1077 case FMD_TYPE_UINT64:
1078 1078 case FMD_TYPE_TIME:
1079 1079 case FMD_TYPE_SIZE:
1080 1080 rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64);
1081 1081 break;
1082 1082 case FMD_TYPE_STRING:
1083 1083 rv &= xdr_string(xp, &sp->fmds_value.str, ~0);
1084 1084 break;
1085 1085 }
1086 1086
1087 1087 return (rv);
1088 1088 }
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX