Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/lib/libc_db/common/thread_db.c
+++ new/usr/src/lib/libc_db/common/thread_db.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
29 29 * Copyright 2023 Oxide Computer Company
30 30 */
31 31
32 32 #include <stdio.h>
33 33 #include <stdlib.h>
34 34 #include <stddef.h>
35 35 #include <unistd.h>
36 36 #include <thr_uberdata.h>
37 37 #include <thread_db.h>
38 38 #include <libc_int.h>
39 39
40 40 /*
41 41 * Private structures.
42 42 */
43 43
44 44 typedef union {
45 45 mutex_t lock;
46 46 rwlock_t rwlock;
47 47 sema_t semaphore;
48 48 cond_t condition;
49 49 } td_so_un_t;
50 50
51 51 struct td_thragent {
52 52 rwlock_t rwlock;
53 53 struct ps_prochandle *ph_p;
54 54 int initialized;
55 55 int sync_tracking;
56 56 int model;
57 57 int primary_map;
58 58 psaddr_t bootstrap_addr;
59 59 psaddr_t uberdata_addr;
60 60 psaddr_t tdb_eventmask_addr;
61 61 psaddr_t tdb_register_sync_addr;
62 62 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
63 63 psaddr_t hash_table_addr;
64 64 int hash_size;
65 65 lwpid_t single_lwpid;
66 66 psaddr_t single_ulwp_addr;
67 67 };
68 68
69 69 /*
70 70 * This is the name of the variable in libc that contains
71 71 * the uberdata address that we will need.
72 72 */
73 73 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
74 74 /*
75 75 * This is the actual name of uberdata, used in the event
76 76 * that tdb_bootstrap has not yet been initialized.
77 77 */
78 78 #define TD_UBERDATA_NAME "_uberdata"
79 79 /*
80 80 * The library name should end with ".so.1", but older versions of
81 81 * dbx expect the unadorned name and malfunction if ".1" is specified.
82 82 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
83 83 * is applied to another instance of itself (due to the presence of
84 84 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
85 85 */
86 86 #define TD_LIBRARY_NAME "libc.so"
87 87 #define TD_LIBRARY_NAME_1 "libc.so.1"
88 88
89 89 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
90 90
91 91 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
92 92 void *cbdata_p, td_thr_state_e state, int ti_pri,
93 93 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
94 94
95 95 /*
96 96 * Initialize threads debugging interface.
97 97 */
98 98 #pragma weak td_init = __td_init
99 99 td_err_e
100 100 __td_init()
101 101 {
102 102 return (TD_OK);
103 103 }
104 104
105 105 /*
106 106 * This function does nothing, and never did.
107 107 * But the symbol is in the ABI, so we can't delete it.
108 108 */
109 109 #pragma weak td_log = __td_log
110 110 void
111 111 __td_log()
112 112 {
113 113 }
114 114
115 115 /*
116 116 * Short-cut to read just the hash table size from the process,
117 117 * to avoid repeatedly reading the full uberdata structure when
118 118 * dealing with a single-threaded process.
119 119 */
120 120 static uint_t
121 121 td_read_hash_size(td_thragent_t *ta_p)
122 122 {
123 123 psaddr_t addr;
124 124 uint_t hash_size;
125 125
126 126 switch (ta_p->initialized) {
127 127 default: /* uninitialized */
128 128 return (0);
129 129 case 1: /* partially initialized */
130 130 break;
131 131 case 2: /* fully initialized */
132 132 return (ta_p->hash_size);
133 133 }
134 134
135 135 if (ta_p->model == PR_MODEL_NATIVE) {
136 136 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
137 137 } else {
138 138 #if defined(_LP64) && defined(_SYSCALL32)
139 139 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
140 140 #else
141 141 addr = 0;
142 142 #endif
143 143 }
144 144 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
145 145 != PS_OK)
146 146 return (0);
147 147 return (hash_size);
148 148 }
149 149
150 150 static td_err_e
151 151 td_read_uberdata(td_thragent_t *ta_p)
152 152 {
153 153 struct ps_prochandle *ph_p = ta_p->ph_p;
154 154 int i;
155 155
156 156 if (ta_p->model == PR_MODEL_NATIVE) {
157 157 uberdata_t uberdata;
158 158
159 159 if (ps_pdread(ph_p, ta_p->uberdata_addr,
160 160 &uberdata, sizeof (uberdata)) != PS_OK)
161 161 return (TD_DBERR);
162 162 ta_p->primary_map = uberdata.primary_map;
163 163 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
164 164 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
165 165 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
166 166 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
167 167 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
168 168 ta_p->hash_size = uberdata.hash_size;
169 169 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
170 170 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
171 171 return (TD_DBERR);
172 172 } else {
173 173 #if defined(_LP64) && defined(_SYSCALL32)
174 174 uberdata32_t uberdata;
175 175 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
176 176
177 177 if (ps_pdread(ph_p, ta_p->uberdata_addr,
178 178 &uberdata, sizeof (uberdata)) != PS_OK)
179 179 return (TD_DBERR);
180 180 ta_p->primary_map = uberdata.primary_map;
181 181 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
182 182 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
183 183 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
184 184 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
185 185 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
186 186 ta_p->hash_size = uberdata.hash_size;
187 187 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
188 188 tdb_events, sizeof (tdb_events)) != PS_OK)
189 189 return (TD_DBERR);
190 190 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
191 191 ta_p->tdb_events[i] = tdb_events[i];
192 192 #else
193 193 return (TD_DBERR);
194 194 #endif
195 195 }
196 196
197 197 /*
198 198 * Unfortunately, we are (implicitly) assuming that our uberdata
199 199 * definition precisely matches that of our target. If this is not
200 200 * true (that is, if we're examining a core file from a foreign
201 201 * system that has a different definition of uberdata), the failure
202 202 * modes can be frustratingly non-explicit. In an effort to catch
203 203 * this upon initialization (when the debugger may still be able to
204 204 * opt for another thread model or may be able to fail explicitly), we
205 205 * check that each of our tdb_events points to valid memory (these are
206 206 * putatively text upon which a breakpoint can be issued), with the
207 207 * hope that this is enough of a self-consistency check to lead to
208 208 * explicit failure on a mismatch.
209 209 */
210 210 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
211 211 uint8_t check;
212 212
213 213 if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
214 214 &check, sizeof (check)) != PS_OK) {
215 215 return (TD_DBERR);
216 216 }
217 217 }
218 218
219 219 if (ta_p->hash_size != 1) { /* multi-threaded */
220 220 ta_p->initialized = 2;
221 221 ta_p->single_lwpid = 0;
222 222 ta_p->single_ulwp_addr = 0;
223 223 } else { /* single-threaded */
224 224 ta_p->initialized = 1;
225 225 /*
226 226 * Get the address and lwpid of the single thread/LWP.
227 227 * It may not be ulwp_one if this is a child of fork1().
228 228 */
229 229 if (ta_p->model == PR_MODEL_NATIVE) {
230 230 thr_hash_table_t head;
231 231 lwpid_t lwpid = 0;
232 232
233 233 if (ps_pdread(ph_p, ta_p->hash_table_addr,
234 234 &head, sizeof (head)) != PS_OK)
235 235 return (TD_DBERR);
236 236 if ((psaddr_t)head.hash_bucket == 0)
237 237 ta_p->initialized = 0;
238 238 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
239 239 offsetof(ulwp_t, ul_lwpid),
240 240 &lwpid, sizeof (lwpid)) != PS_OK)
241 241 return (TD_DBERR);
242 242 ta_p->single_lwpid = lwpid;
243 243 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
244 244 } else {
245 245 #if defined(_LP64) && defined(_SYSCALL32)
246 246 thr_hash_table32_t head;
247 247 lwpid_t lwpid = 0;
248 248
249 249 if (ps_pdread(ph_p, ta_p->hash_table_addr,
250 250 &head, sizeof (head)) != PS_OK)
251 251 return (TD_DBERR);
252 252 if ((psaddr_t)head.hash_bucket == 0)
253 253 ta_p->initialized = 0;
254 254 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
255 255 offsetof(ulwp32_t, ul_lwpid),
256 256 &lwpid, sizeof (lwpid)) != PS_OK)
257 257 return (TD_DBERR);
258 258 ta_p->single_lwpid = lwpid;
259 259 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
260 260 #else
261 261 return (TD_DBERR);
262 262 #endif
263 263 }
264 264 }
265 265 if (!ta_p->primary_map)
266 266 ta_p->initialized = 0;
267 267 return (TD_OK);
268 268 }
269 269
270 270 static td_err_e
271 271 td_read_bootstrap_data(td_thragent_t *ta_p)
272 272 {
273 273 struct ps_prochandle *ph_p = ta_p->ph_p;
274 274 psaddr_t bootstrap_addr;
275 275 psaddr_t uberdata_addr;
276 276 ps_err_e db_return;
277 277 td_err_e return_val;
278 278 int do_1;
279 279
280 280 switch (ta_p->initialized) {
281 281 case 2: /* fully initialized */
282 282 return (TD_OK);
283 283 case 1: /* partially initialized */
284 284 if (td_read_hash_size(ta_p) == 1)
285 285 return (TD_OK);
286 286 return (td_read_uberdata(ta_p));
287 287 }
288 288
289 289 /*
290 290 * Uninitialized -- do the startup work.
291 291 * We set ta_p->initialized to -1 to cut off recursive calls
292 292 * into libc_db by code in the provider of ps_pglobal_lookup().
293 293 */
294 294 do_1 = 0;
295 295 ta_p->initialized = -1;
296 296 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
297 297 TD_BOOTSTRAP_NAME, &bootstrap_addr);
298 298 if (db_return == PS_NOSYM) {
299 299 do_1 = 1;
300 300 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
301 301 TD_BOOTSTRAP_NAME, &bootstrap_addr);
302 302 }
303 303 if (db_return == PS_NOSYM) /* libc is not linked yet */
304 304 return (TD_NOLIBTHREAD);
305 305 if (db_return != PS_OK)
306 306 return (TD_ERR);
307 307 db_return = ps_pglobal_lookup(ph_p,
308 308 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
309 309 TD_UBERDATA_NAME, &uberdata_addr);
310 310 if (db_return == PS_NOSYM) /* libc is not linked yet */
311 311 return (TD_NOLIBTHREAD);
312 312 if (db_return != PS_OK)
313 313 return (TD_ERR);
314 314
315 315 /*
316 316 * Read the uberdata address into the thread agent structure.
317 317 */
318 318 if (ta_p->model == PR_MODEL_NATIVE) {
319 319 psaddr_t psaddr;
320 320 if (ps_pdread(ph_p, bootstrap_addr,
321 321 &psaddr, sizeof (psaddr)) != PS_OK)
322 322 return (TD_DBERR);
323 323 if ((ta_p->bootstrap_addr = psaddr) == 0)
324 324 psaddr = uberdata_addr;
325 325 else if (ps_pdread(ph_p, psaddr,
326 326 &psaddr, sizeof (psaddr)) != PS_OK)
327 327 return (TD_DBERR);
328 328 if (psaddr == 0) {
329 329 /* primary linkmap in the tgt is not initialized */
330 330 ta_p->bootstrap_addr = 0;
331 331 psaddr = uberdata_addr;
332 332 }
333 333 ta_p->uberdata_addr = psaddr;
334 334 } else {
335 335 #if defined(_LP64) && defined(_SYSCALL32)
336 336 caddr32_t psaddr;
337 337 if (ps_pdread(ph_p, bootstrap_addr,
338 338 &psaddr, sizeof (psaddr)) != PS_OK)
339 339 return (TD_DBERR);
340 340 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
341 341 psaddr = (caddr32_t)uberdata_addr;
342 342 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
343 343 &psaddr, sizeof (psaddr)) != PS_OK)
344 344 return (TD_DBERR);
345 345 if (psaddr == 0) {
346 346 /* primary linkmap in the tgt is not initialized */
347 347 ta_p->bootstrap_addr = 0;
348 348 psaddr = (caddr32_t)uberdata_addr;
349 349 }
350 350 ta_p->uberdata_addr = (psaddr_t)psaddr;
351 351 #else
352 352 return (TD_DBERR);
353 353 #endif /* _SYSCALL32 */
354 354 }
355 355
356 356 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
357 357 return (return_val);
358 358 if (ta_p->bootstrap_addr == 0)
359 359 ta_p->initialized = 0;
360 360 return (TD_OK);
361 361 }
362 362
363 363 #pragma weak ps_kill
364 364 #pragma weak ps_lrolltoaddr
365 365
366 366 /*
367 367 * Allocate a new agent process handle ("thread agent").
368 368 */
369 369 #pragma weak td_ta_new = __td_ta_new
370 370 td_err_e
371 371 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
372 372 {
373 373 td_thragent_t *ta_p;
374 374 int model;
375 375 td_err_e return_val = TD_OK;
376 376
377 377 if (ph_p == NULL)
378 378 return (TD_BADPH);
379 379 if (ta_pp == NULL)
380 380 return (TD_ERR);
381 381 *ta_pp = NULL;
382 382 if (ps_pstop(ph_p) != PS_OK)
383 383 return (TD_DBERR);
384 384 /*
385 385 * ps_pdmodel might not be defined if this is an older client.
386 386 * Make it a weak symbol and test if it exists before calling.
387 387 */
388 388 #pragma weak ps_pdmodel
389 389 if (ps_pdmodel == NULL) {
390 390 model = PR_MODEL_NATIVE;
391 391 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
392 392 (void) ps_pcontinue(ph_p);
393 393 return (TD_ERR);
394 394 }
395 395 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
396 396 (void) ps_pcontinue(ph_p);
397 397 return (TD_MALLOC);
398 398 }
399 399
400 400 /*
401 401 * Initialize the agent process handle.
402 402 * Pick up the symbol value we need from the target process.
403 403 */
404 404 (void) memset(ta_p, 0, sizeof (*ta_p));
405 405 ta_p->ph_p = ph_p;
406 406 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
407 407 ta_p->model = model;
408 408 return_val = td_read_bootstrap_data(ta_p);
409 409
410 410 /*
411 411 * Because the old libthread_db enabled lock tracking by default,
412 412 * we must also do it. However, we do it only if the application
413 413 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
414 414 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
415 415 */
416 416 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
417 417 register_sync_t oldenable;
418 418 register_sync_t enable = REGISTER_SYNC_ENABLE;
419 419 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
420 420
421 421 if (ps_pdread(ph_p, psaddr,
422 422 &oldenable, sizeof (oldenable)) != PS_OK)
423 423 return_val = TD_DBERR;
424 424 else if (oldenable != REGISTER_SYNC_OFF ||
425 425 ps_pdwrite(ph_p, psaddr,
426 426 &enable, sizeof (enable)) != PS_OK) {
427 427 /*
428 428 * Lock tracking was already enabled or we
429 429 * failed to enable it, probably because we
430 430 * are examining a core file. In either case
431 431 * set the sync_tracking flag non-zero to
432 432 * indicate that we should not attempt to
433 433 * disable lock tracking when we delete the
434 434 * agent process handle in td_ta_delete().
435 435 */
436 436 ta_p->sync_tracking = 1;
437 437 }
438 438 }
439 439
440 440 if (return_val == TD_OK)
441 441 *ta_pp = ta_p;
442 442 else
443 443 free(ta_p);
444 444
445 445 (void) ps_pcontinue(ph_p);
446 446 return (return_val);
447 447 }
448 448
449 449 /*
450 450 * Utility function to grab the readers lock and return the prochandle,
451 451 * given an agent process handle. Performs standard error checking.
452 452 * Returns non-NULL with the lock held, or NULL with the lock not held.
453 453 */
454 454 static struct ps_prochandle *
455 455 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
456 456 {
457 457 struct ps_prochandle *ph_p = NULL;
458 458 td_err_e error;
459 459
460 460 if (ta_p == NULL || ta_p->initialized == -1) {
461 461 *err = TD_BADTA;
462 462 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
463 463 *err = TD_BADTA;
464 464 } else if ((ph_p = ta_p->ph_p) == NULL) {
465 465 (void) rw_unlock(&ta_p->rwlock);
466 466 *err = TD_BADPH;
467 467 } else if (ta_p->initialized != 2 &&
468 468 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
469 469 (void) rw_unlock(&ta_p->rwlock);
470 470 ph_p = NULL;
471 471 *err = error;
472 472 } else {
473 473 *err = TD_OK;
474 474 }
475 475
476 476 return (ph_p);
477 477 }
478 478
479 479 /*
480 480 * Utility function to grab the readers lock and return the prochandle,
481 481 * given an agent thread handle. Performs standard error checking.
482 482 * Returns non-NULL with the lock held, or NULL with the lock not held.
483 483 */
484 484 static struct ps_prochandle *
485 485 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
486 486 {
487 487 if (th_p == NULL || th_p->th_unique == 0) {
488 488 *err = TD_BADTH;
489 489 return (NULL);
490 490 }
491 491 return (ph_lock_ta(th_p->th_ta_p, err));
492 492 }
493 493
494 494 /*
495 495 * Utility function to grab the readers lock and return the prochandle,
496 496 * given a synchronization object handle. Performs standard error checking.
497 497 * Returns non-NULL with the lock held, or NULL with the lock not held.
498 498 */
499 499 static struct ps_prochandle *
500 500 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
501 501 {
502 502 if (sh_p == NULL || sh_p->sh_unique == 0) {
503 503 *err = TD_BADSH;
504 504 return (NULL);
505 505 }
506 506 return (ph_lock_ta(sh_p->sh_ta_p, err));
507 507 }
508 508
509 509 /*
510 510 * Unlock the agent process handle obtained from ph_lock_*().
511 511 */
512 512 static void
513 513 ph_unlock(td_thragent_t *ta_p)
514 514 {
515 515 (void) rw_unlock(&ta_p->rwlock);
516 516 }
517 517
518 518 /*
519 519 * De-allocate an agent process handle,
520 520 * releasing all related resources.
521 521 *
522 522 * XXX -- This is hopelessly broken ---
523 523 * Storage for thread agent is not deallocated. The prochandle
524 524 * in the thread agent is set to NULL so that future uses of
525 525 * the thread agent can be detected and an error value returned.
526 526 * All functions in the external user interface that make
527 527 * use of the thread agent are expected
528 528 * to check for a NULL prochandle in the thread agent.
529 529 * All such functions are also expected to obtain a
530 530 * reader lock on the thread agent while it is using it.
531 531 */
532 532 #pragma weak td_ta_delete = __td_ta_delete
533 533 td_err_e
534 534 __td_ta_delete(td_thragent_t *ta_p)
535 535 {
536 536 struct ps_prochandle *ph_p;
537 537
538 538 /*
539 539 * This is the only place we grab the writer lock.
540 540 * We are going to NULL out the prochandle.
541 541 */
542 542 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
543 543 return (TD_BADTA);
544 544 if ((ph_p = ta_p->ph_p) == NULL) {
545 545 (void) rw_unlock(&ta_p->rwlock);
546 546 return (TD_BADPH);
547 547 }
548 548 /*
549 549 * If synch. tracking was disabled when td_ta_new() was called and
550 550 * if td_ta_sync_tracking_enable() was never called, then disable
551 551 * synch. tracking (it was enabled by default in td_ta_new()).
552 552 */
553 553 if (ta_p->sync_tracking == 0 &&
554 554 ps_kill != NULL && ps_lrolltoaddr != NULL) {
555 555 register_sync_t enable = REGISTER_SYNC_DISABLE;
556 556
557 557 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
558 558 &enable, sizeof (enable));
559 559 }
560 560 ta_p->ph_p = NULL;
561 561 (void) rw_unlock(&ta_p->rwlock);
562 562 return (TD_OK);
563 563 }
564 564
565 565 /*
566 566 * Map an agent process handle to a client prochandle.
567 567 * Currently unused by dbx.
568 568 */
569 569 #pragma weak td_ta_get_ph = __td_ta_get_ph
570 570 td_err_e
571 571 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
572 572 {
573 573 td_err_e return_val;
574 574
575 575 if (ph_pp != NULL) /* protect stupid callers */
576 576 *ph_pp = NULL;
577 577 if (ph_pp == NULL)
578 578 return (TD_ERR);
579 579 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
580 580 return (return_val);
581 581 ph_unlock(ta_p);
582 582 return (TD_OK);
583 583 }
584 584
585 585 /*
586 586 * Set the process's suggested concurrency level.
587 587 * This is a no-op in a one-level model.
588 588 * Currently unused by dbx.
589 589 */
590 590 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
591 591 /* ARGSUSED1 */
592 592 td_err_e
593 593 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
594 594 {
595 595 if (ta_p == NULL)
596 596 return (TD_BADTA);
597 597 if (ta_p->ph_p == NULL)
598 598 return (TD_BADPH);
599 599 return (TD_OK);
600 600 }
601 601
602 602 /*
603 603 * Get the number of threads in the process.
604 604 */
605 605 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
606 606 td_err_e
607 607 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
608 608 {
609 609 struct ps_prochandle *ph_p;
610 610 td_err_e return_val;
611 611 int nthreads;
612 612 int nzombies;
613 613 psaddr_t nthreads_addr;
614 614 psaddr_t nzombies_addr;
615 615
616 616 if (ta_p->model == PR_MODEL_NATIVE) {
617 617 nthreads_addr = ta_p->uberdata_addr +
618 618 offsetof(uberdata_t, nthreads);
619 619 nzombies_addr = ta_p->uberdata_addr +
620 620 offsetof(uberdata_t, nzombies);
621 621 } else {
622 622 #if defined(_LP64) && defined(_SYSCALL32)
623 623 nthreads_addr = ta_p->uberdata_addr +
624 624 offsetof(uberdata32_t, nthreads);
625 625 nzombies_addr = ta_p->uberdata_addr +
626 626 offsetof(uberdata32_t, nzombies);
627 627 #else
628 628 nthreads_addr = 0;
629 629 nzombies_addr = 0;
630 630 #endif /* _SYSCALL32 */
631 631 }
632 632
633 633 if (nthread_p == NULL)
634 634 return (TD_ERR);
635 635 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
636 636 return (return_val);
637 637 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
638 638 return_val = TD_DBERR;
639 639 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
640 640 return_val = TD_DBERR;
641 641 ph_unlock(ta_p);
642 642 if (return_val == TD_OK)
643 643 *nthread_p = nthreads + nzombies;
644 644 return (return_val);
645 645 }
646 646
647 647 typedef struct {
648 648 thread_t tid;
649 649 int found;
650 650 td_thrhandle_t th;
651 651 } td_mapper_param_t;
652 652
653 653 /*
654 654 * Check the value in data against the thread id.
655 655 * If it matches, return 1 to terminate iterations.
656 656 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
657 657 */
658 658 static int
659 659 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
660 660 {
661 661 td_thrinfo_t ti;
662 662
663 663 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
664 664 data->tid == ti.ti_tid) {
665 665 data->found = 1;
666 666 data->th = *th_p;
667 667 return (1);
668 668 }
669 669 return (0);
670 670 }
671 671
672 672 /*
673 673 * Given a thread identifier, return the corresponding thread handle.
674 674 */
675 675 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
676 676 td_err_e
677 677 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
678 678 td_thrhandle_t *th_p)
679 679 {
680 680 td_err_e return_val;
681 681 td_mapper_param_t data;
682 682
683 683 if (th_p != NULL && /* optimize for a single thread */
684 684 ta_p != NULL &&
685 685 ta_p->initialized == 1 &&
686 686 (td_read_hash_size(ta_p) == 1 ||
687 687 td_read_uberdata(ta_p) == TD_OK) &&
688 688 ta_p->initialized == 1 &&
689 689 ta_p->single_lwpid == tid) {
690 690 th_p->th_ta_p = ta_p;
691 691 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
692 692 return (TD_NOTHR);
693 693 return (TD_OK);
694 694 }
695 695
696 696 /*
697 697 * LOCKING EXCEPTION - Locking is not required here because
698 698 * the locking and checking will be done in __td_ta_thr_iter.
699 699 */
700 700
701 701 if (ta_p == NULL)
702 702 return (TD_BADTA);
703 703 if (th_p == NULL)
704 704 return (TD_BADTH);
705 705 if (tid == 0)
706 706 return (TD_NOTHR);
707 707
708 708 data.tid = tid;
709 709 data.found = 0;
710 710 return_val = __td_ta_thr_iter(ta_p,
711 711 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
712 712 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
713 713 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
714 714 if (return_val == TD_OK) {
715 715 if (data.found == 0)
716 716 return_val = TD_NOTHR;
717 717 else
718 718 *th_p = data.th;
719 719 }
720 720
721 721 return (return_val);
722 722 }
723 723
724 724 /*
725 725 * Map the address of a synchronization object to a sync. object handle.
726 726 */
727 727 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
728 728 td_err_e
729 729 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
730 730 {
731 731 struct ps_prochandle *ph_p;
732 732 td_err_e return_val;
733 733 uint16_t sync_magic;
734 734
735 735 if (sh_p == NULL)
736 736 return (TD_BADSH);
737 737 if (addr == 0)
738 738 return (TD_ERR);
739 739 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
740 740 return (return_val);
741 741 /*
742 742 * Check the magic number of the sync. object to make sure it's valid.
743 743 * The magic number is at the same offset for all sync. objects.
744 744 */
745 745 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
746 746 &sync_magic, sizeof (sync_magic)) != PS_OK) {
747 747 ph_unlock(ta_p);
748 748 return (TD_BADSH);
749 749 }
750 750 ph_unlock(ta_p);
751 751 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
752 752 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
753 753 return (TD_BADSH);
754 754 /*
755 755 * Just fill in the appropriate fields of the sync. handle.
756 756 */
757 757 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
758 758 sh_p->sh_unique = addr;
759 759 return (TD_OK);
760 760 }
761 761
762 762 /*
763 763 * Iterate over the set of global TSD keys.
764 764 * The call back function is called with three arguments,
765 765 * a key, a pointer to the destructor function, and the cbdata pointer.
766 766 * Currently unused by dbx.
767 767 */
768 768 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
769 769 td_err_e
770 770 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
771 771 {
772 772 struct ps_prochandle *ph_p;
773 773 td_err_e return_val;
774 774 int key;
775 775 int numkeys;
776 776 psaddr_t dest_addr;
777 777 psaddr_t *destructors = NULL;
778 778 PFrV destructor;
779 779
780 780 if (cb == NULL)
781 781 return (TD_ERR);
782 782 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
783 783 return (return_val);
784 784 if (ps_pstop(ph_p) != PS_OK) {
785 785 ph_unlock(ta_p);
786 786 return (TD_DBERR);
787 787 }
788 788
789 789 if (ta_p->model == PR_MODEL_NATIVE) {
790 790 tsd_metadata_t tsdm;
791 791
792 792 if (ps_pdread(ph_p,
793 793 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
794 794 &tsdm, sizeof (tsdm)) != PS_OK)
795 795 return_val = TD_DBERR;
796 796 else {
797 797 numkeys = tsdm.tsdm_nused;
798 798 dest_addr = (psaddr_t)tsdm.tsdm_destro;
799 799 if (numkeys > 0)
800 800 destructors =
801 801 malloc(numkeys * sizeof (psaddr_t));
802 802 }
803 803 } else {
804 804 #if defined(_LP64) && defined(_SYSCALL32)
805 805 tsd_metadata32_t tsdm;
806 806
807 807 if (ps_pdread(ph_p,
808 808 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
809 809 &tsdm, sizeof (tsdm)) != PS_OK)
810 810 return_val = TD_DBERR;
811 811 else {
812 812 numkeys = tsdm.tsdm_nused;
813 813 dest_addr = (psaddr_t)tsdm.tsdm_destro;
814 814 if (numkeys > 0)
815 815 destructors =
816 816 malloc(numkeys * sizeof (caddr32_t));
817 817 }
818 818 #else
819 819 return_val = TD_DBERR;
820 820 #endif /* _SYSCALL32 */
821 821 }
822 822
823 823 if (return_val != TD_OK || numkeys <= 0) {
824 824 (void) ps_pcontinue(ph_p);
825 825 ph_unlock(ta_p);
826 826 return (return_val);
827 827 }
828 828
829 829 if (destructors == NULL)
830 830 return_val = TD_MALLOC;
831 831 else if (ta_p->model == PR_MODEL_NATIVE) {
832 832 if (ps_pdread(ph_p, dest_addr,
833 833 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
834 834 return_val = TD_DBERR;
835 835 else {
836 836 for (key = 1; key < numkeys; key++) {
837 837 destructor = (PFrV)destructors[key];
838 838 if (destructor != TSD_UNALLOCATED &&
839 839 (*cb)(key, destructor, cbdata_p))
840 840 break;
841 841 }
842 842 }
843 843 #if defined(_LP64) && defined(_SYSCALL32)
844 844 } else {
845 845 caddr32_t *destructors32 = (caddr32_t *)destructors;
846 846 caddr32_t destruct32;
847 847
848 848 if (ps_pdread(ph_p, dest_addr,
849 849 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
850 850 return_val = TD_DBERR;
851 851 else {
852 852 for (key = 1; key < numkeys; key++) {
853 853 destruct32 = destructors32[key];
854 854 if ((destruct32 !=
855 855 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
856 856 (*cb)(key, (PFrV)(uintptr_t)destruct32,
857 857 cbdata_p))
858 858 break;
859 859 }
860 860 }
861 861 #endif /* _SYSCALL32 */
862 862 }
863 863
864 864 if (destructors)
865 865 free(destructors);
866 866 (void) ps_pcontinue(ph_p);
867 867 ph_unlock(ta_p);
868 868 return (return_val);
869 869 }
870 870
871 871 int
872 872 sigequalset(const sigset_t *s1, const sigset_t *s2)
873 873 {
874 874 return (
875 875 s1->__sigbits[0] == s2->__sigbits[0] &&
876 876 s1->__sigbits[1] == s2->__sigbits[1] &&
877 877 s1->__sigbits[2] == s2->__sigbits[2] &&
878 878 s1->__sigbits[3] == s2->__sigbits[3]);
879 879 }
880 880
881 881 /*
882 882 * Description:
883 883 * Iterate over all threads. For each thread call
884 884 * the function pointed to by "cb" with a pointer
885 885 * to a thread handle, and a pointer to data which
886 886 * can be NULL. Only call td_thr_iter_f() on threads
887 887 * which match the properties of state, ti_pri,
888 888 * ti_sigmask_p, and ti_user_flags. If cb returns
889 889 * a non-zero value, terminate iterations.
890 890 *
891 891 * Input:
892 892 * *ta_p - thread agent
893 893 * *cb - call back function defined by user.
894 894 * td_thr_iter_f() takes a thread handle and
895 895 * cbdata_p as a parameter.
896 896 * cbdata_p - parameter for td_thr_iter_f().
897 897 *
898 898 * state - state of threads of interest. A value of
899 899 * TD_THR_ANY_STATE from enum td_thr_state_e
900 900 * does not restrict iterations by state.
901 901 * ti_pri - lower bound of priorities of threads of
902 902 * interest. A value of TD_THR_LOWEST_PRIORITY
903 903 * defined in thread_db.h does not restrict
904 904 * iterations by priority. A thread with priority
905 905 * less than ti_pri will NOT be passed to the callback
906 906 * function.
907 907 * ti_sigmask_p - signal mask of threads of interest.
908 908 * A value of TD_SIGNO_MASK defined in thread_db.h
909 909 * does not restrict iterations by signal mask.
910 910 * ti_user_flags - user flags of threads of interest. A
911 911 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
912 912 * does not restrict iterations by user flags.
913 913 */
914 914 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
915 915 td_err_e
916 916 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
917 917 void *cbdata_p, td_thr_state_e state, int ti_pri,
918 918 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
919 919 {
920 920 struct ps_prochandle *ph_p;
921 921 psaddr_t first_lwp_addr;
922 922 psaddr_t first_zombie_addr;
923 923 psaddr_t curr_lwp_addr;
924 924 psaddr_t next_lwp_addr;
925 925 td_thrhandle_t th;
926 926 ps_err_e db_return;
927 927 ps_err_e db_return2;
928 928 td_err_e return_val;
929 929
930 930 if (cb == NULL)
931 931 return (TD_ERR);
932 932 /*
933 933 * If state is not within bound, short circuit.
934 934 */
935 935 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
936 936 return (TD_OK);
937 937
938 938 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
939 939 return (return_val);
940 940 if (ps_pstop(ph_p) != PS_OK) {
941 941 ph_unlock(ta_p);
942 942 return (TD_DBERR);
943 943 }
944 944
945 945 /*
946 946 * For each ulwp_t in the circular linked lists pointed
947 947 * to by "all_lwps" and "all_zombies":
948 948 * (1) Filter each thread.
949 949 * (2) Create the thread_object for each thread that passes.
950 950 * (3) Call the call back function on each thread.
951 951 */
952 952
953 953 if (ta_p->model == PR_MODEL_NATIVE) {
954 954 db_return = ps_pdread(ph_p,
955 955 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
956 956 &first_lwp_addr, sizeof (first_lwp_addr));
957 957 db_return2 = ps_pdread(ph_p,
958 958 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
959 959 &first_zombie_addr, sizeof (first_zombie_addr));
960 960 } else {
961 961 #if defined(_LP64) && defined(_SYSCALL32)
962 962 caddr32_t addr32;
963 963
964 964 db_return = ps_pdread(ph_p,
965 965 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
966 966 &addr32, sizeof (addr32));
967 967 first_lwp_addr = addr32;
968 968 db_return2 = ps_pdread(ph_p,
969 969 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
970 970 &addr32, sizeof (addr32));
971 971 first_zombie_addr = addr32;
972 972 #else /* _SYSCALL32 */
973 973 db_return = PS_ERR;
974 974 db_return2 = PS_ERR;
975 975 #endif /* _SYSCALL32 */
976 976 }
977 977 if (db_return == PS_OK)
978 978 db_return = db_return2;
979 979
980 980 /*
981 981 * If first_lwp_addr and first_zombie_addr are both NULL,
982 982 * libc must not yet be initialized or all threads have
983 983 * exited. Return TD_NOTHR and all will be well.
984 984 */
985 985 if (db_return == PS_OK &&
986 986 first_lwp_addr == 0 && first_zombie_addr == 0) {
987 987 (void) ps_pcontinue(ph_p);
988 988 ph_unlock(ta_p);
989 989 return (TD_NOTHR);
990 990 }
991 991 if (db_return != PS_OK) {
992 992 (void) ps_pcontinue(ph_p);
993 993 ph_unlock(ta_p);
994 994 return (TD_DBERR);
995 995 }
996 996
997 997 /*
998 998 * Run down the lists of all living and dead lwps.
999 999 */
1000 1000 if (first_lwp_addr == 0)
1001 1001 first_lwp_addr = first_zombie_addr;
1002 1002 curr_lwp_addr = first_lwp_addr;
1003 1003 for (;;) {
1004 1004 td_thr_state_e ts_state;
1005 1005 int userpri;
1006 1006 unsigned userflags;
1007 1007 sigset_t mask;
1008 1008
1009 1009 /*
1010 1010 * Read the ulwp struct.
1011 1011 */
1012 1012 if (ta_p->model == PR_MODEL_NATIVE) {
1013 1013 ulwp_t ulwp;
1014 1014
1015 1015 if (ps_pdread(ph_p, curr_lwp_addr,
1016 1016 &ulwp, sizeof (ulwp)) != PS_OK &&
1017 1017 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1018 1018 ps_pdread(ph_p, curr_lwp_addr,
1019 1019 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1020 1020 return_val = TD_DBERR;
1021 1021 break;
1022 1022 }
1023 1023 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1024 1024
1025 1025 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1026 1026 ulwp.ul_stop? TD_THR_STOPPED :
1027 1027 ulwp.ul_wchan? TD_THR_SLEEP :
1028 1028 TD_THR_ACTIVE;
1029 1029 userpri = ulwp.ul_pri;
1030 1030 userflags = ulwp.ul_usropts;
1031 1031 if (ulwp.ul_dead)
1032 1032 (void) sigemptyset(&mask);
1033 1033 else
1034 1034 mask = *(sigset_t *)&ulwp.ul_sigmask;
1035 1035 } else {
1036 1036 #if defined(_LP64) && defined(_SYSCALL32)
1037 1037 ulwp32_t ulwp;
1038 1038
1039 1039 if (ps_pdread(ph_p, curr_lwp_addr,
1040 1040 &ulwp, sizeof (ulwp)) != PS_OK &&
1041 1041 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1042 1042 ps_pdread(ph_p, curr_lwp_addr,
1043 1043 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1044 1044 return_val = TD_DBERR;
1045 1045 break;
1046 1046 }
1047 1047 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1048 1048
1049 1049 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1050 1050 ulwp.ul_stop? TD_THR_STOPPED :
1051 1051 ulwp.ul_wchan? TD_THR_SLEEP :
1052 1052 TD_THR_ACTIVE;
1053 1053 userpri = ulwp.ul_pri;
1054 1054 userflags = ulwp.ul_usropts;
1055 1055 if (ulwp.ul_dead)
1056 1056 (void) sigemptyset(&mask);
1057 1057 else
1058 1058 mask = *(sigset_t *)&ulwp.ul_sigmask;
1059 1059 #else /* _SYSCALL32 */
1060 1060 return_val = TD_ERR;
1061 1061 break;
1062 1062 #endif /* _SYSCALL32 */
1063 1063 }
1064 1064
1065 1065 /*
1066 1066 * Filter on state, priority, sigmask, and user flags.
1067 1067 */
1068 1068
1069 1069 if ((state != ts_state) &&
1070 1070 (state != TD_THR_ANY_STATE))
1071 1071 goto advance;
1072 1072
1073 1073 if (ti_pri > userpri)
1074 1074 goto advance;
1075 1075
1076 1076 if (ti_sigmask_p != TD_SIGNO_MASK &&
1077 1077 !sigequalset(ti_sigmask_p, &mask))
1078 1078 goto advance;
1079 1079
1080 1080 if (ti_user_flags != userflags &&
1081 1081 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1082 1082 goto advance;
1083 1083
1084 1084 /*
1085 1085 * Call back - break if the return
1086 1086 * from the call back is non-zero.
1087 1087 */
1088 1088 th.th_ta_p = (td_thragent_t *)ta_p;
1089 1089 th.th_unique = curr_lwp_addr;
1090 1090 if ((*cb)(&th, cbdata_p))
1091 1091 break;
1092 1092
1093 1093 advance:
1094 1094 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1095 1095 /*
1096 1096 * Switch to the zombie list, unless it is NULL
1097 1097 * or we have already been doing the zombie list,
1098 1098 * in which case terminate the loop.
1099 1099 */
1100 1100 if (first_zombie_addr == 0 ||
1101 1101 first_lwp_addr == first_zombie_addr)
1102 1102 break;
1103 1103 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1104 1104 }
1105 1105 }
1106 1106
1107 1107 (void) ps_pcontinue(ph_p);
1108 1108 ph_unlock(ta_p);
1109 1109 return (return_val);
1110 1110 }
1111 1111
1112 1112 /*
1113 1113 * Enable or disable process synchronization object tracking.
1114 1114 * Currently unused by dbx.
1115 1115 */
1116 1116 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1117 1117 td_err_e
1118 1118 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1119 1119 {
1120 1120 struct ps_prochandle *ph_p;
1121 1121 td_err_e return_val;
1122 1122 register_sync_t enable;
1123 1123
1124 1124 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1125 1125 return (return_val);
1126 1126 /*
1127 1127 * Values of tdb_register_sync in the victim process:
1128 1128 * REGISTER_SYNC_ENABLE enables registration of synch objects
1129 1129 * REGISTER_SYNC_DISABLE disables registration of synch objects
1130 1130 * These cause the table to be cleared and tdb_register_sync set to:
1131 1131 * REGISTER_SYNC_ON registration in effect
1132 1132 * REGISTER_SYNC_OFF registration not in effect
1133 1133 */
1134 1134 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1135 1135 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1136 1136 &enable, sizeof (enable)) != PS_OK)
1137 1137 return_val = TD_DBERR;
1138 1138 /*
1139 1139 * Remember that this interface was called (see td_ta_delete()).
1140 1140 */
1141 1141 ta_p->sync_tracking = 1;
1142 1142 ph_unlock(ta_p);
1143 1143 return (return_val);
1144 1144 }
1145 1145
1146 1146 /*
1147 1147 * Iterate over all known synchronization variables.
1148 1148 * It is very possible that the list generated is incomplete,
1149 1149 * because the iterator can only find synchronization variables
1150 1150 * that have been registered by the process since synchronization
1151 1151 * object registration was enabled.
1152 1152 * The call back function cb is called for each synchronization
1153 1153 * variable with two arguments: a pointer to the synchronization
1154 1154 * handle and the passed-in argument cbdata.
1155 1155 * If cb returns a non-zero value, iterations are terminated.
1156 1156 */
1157 1157 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1158 1158 td_err_e
1159 1159 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1160 1160 {
1161 1161 struct ps_prochandle *ph_p;
1162 1162 td_err_e return_val;
1163 1163 int i;
1164 1164 register_sync_t enable;
1165 1165 psaddr_t next_desc;
1166 1166 tdb_sync_stats_t sync_stats;
1167 1167 td_synchandle_t synchandle;
1168 1168 psaddr_t psaddr;
1169 1169 void *vaddr;
1170 1170 uint64_t *sync_addr_hash = NULL;
1171 1171
1172 1172 if (cb == NULL)
1173 1173 return (TD_ERR);
1174 1174 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1175 1175 return (return_val);
1176 1176 if (ps_pstop(ph_p) != PS_OK) {
1177 1177 ph_unlock(ta_p);
1178 1178 return (TD_DBERR);
1179 1179 }
1180 1180 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1181 1181 &enable, sizeof (enable)) != PS_OK) {
1182 1182 return_val = TD_DBERR;
1183 1183 goto out;
1184 1184 }
1185 1185 if (enable != REGISTER_SYNC_ON)
1186 1186 goto out;
1187 1187
1188 1188 /*
1189 1189 * First read the hash table.
1190 1190 * The hash table is large; allocate with mmap().
1191 1191 */
1192 1192 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1193 1193 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1194 1194 == MAP_FAILED) {
1195 1195 return_val = TD_MALLOC;
1196 1196 goto out;
1197 1197 }
1198 1198 sync_addr_hash = vaddr;
1199 1199
1200 1200 if (ta_p->model == PR_MODEL_NATIVE) {
1201 1201 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1202 1202 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1203 1203 &psaddr, sizeof (&psaddr)) != PS_OK) {
1204 1204 return_val = TD_DBERR;
1205 1205 goto out;
1206 1206 }
1207 1207 } else {
1208 1208 #ifdef _SYSCALL32
1209 1209 caddr32_t addr;
1210 1210
1211 1211 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1212 1212 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1213 1213 &addr, sizeof (addr)) != PS_OK) {
1214 1214 return_val = TD_DBERR;
1215 1215 goto out;
1216 1216 }
1217 1217 psaddr = addr;
1218 1218 #else
1219 1219 return_val = TD_ERR;
1220 1220 goto out;
1221 1221 #endif /* _SYSCALL32 */
1222 1222 }
1223 1223
1224 1224 if (psaddr == 0)
1225 1225 goto out;
1226 1226 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1227 1227 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1228 1228 return_val = TD_DBERR;
1229 1229 goto out;
1230 1230 }
1231 1231
1232 1232 /*
1233 1233 * Now scan the hash table.
1234 1234 */
1235 1235 for (i = 0; i < TDB_HASH_SIZE; i++) {
1236 1236 for (next_desc = (psaddr_t)sync_addr_hash[i];
1237 1237 next_desc != 0;
1238 1238 next_desc = (psaddr_t)sync_stats.next) {
1239 1239 if (ps_pdread(ph_p, next_desc,
1240 1240 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1241 1241 return_val = TD_DBERR;
1242 1242 goto out;
1243 1243 }
1244 1244 if (sync_stats.un.type == TDB_NONE) {
1245 1245 /* not registered since registration enabled */
1246 1246 continue;
1247 1247 }
1248 1248 synchandle.sh_ta_p = ta_p;
1249 1249 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1250 1250 if ((*cb)(&synchandle, cbdata) != 0)
1251 1251 goto out;
1252 1252 }
1253 1253 }
1254 1254
1255 1255 out:
1256 1256 if (sync_addr_hash != NULL)
1257 1257 (void) munmap((void *)sync_addr_hash,
1258 1258 TDB_HASH_SIZE * sizeof (uint64_t));
1259 1259 (void) ps_pcontinue(ph_p);
1260 1260 ph_unlock(ta_p);
1261 1261 return (return_val);
1262 1262 }
1263 1263
1264 1264 /*
1265 1265 * Enable process statistics collection.
1266 1266 */
1267 1267 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1268 1268 /* ARGSUSED */
1269 1269 td_err_e
1270 1270 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1271 1271 {
1272 1272 return (TD_NOCAPAB);
1273 1273 }
1274 1274
1275 1275 /*
1276 1276 * Reset process statistics.
1277 1277 */
1278 1278 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1279 1279 /* ARGSUSED */
1280 1280 td_err_e
1281 1281 __td_ta_reset_stats(const td_thragent_t *ta_p)
1282 1282 {
1283 1283 return (TD_NOCAPAB);
1284 1284 }
1285 1285
1286 1286 /*
1287 1287 * Read process statistics.
1288 1288 */
1289 1289 #pragma weak td_ta_get_stats = __td_ta_get_stats
1290 1290 /* ARGSUSED */
1291 1291 td_err_e
1292 1292 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1293 1293 {
1294 1294 return (TD_NOCAPAB);
1295 1295 }
1296 1296
1297 1297 /*
1298 1298 * Transfer information from lwp struct to thread information struct.
1299 1299 * XXX -- lots of this needs cleaning up.
1300 1300 */
1301 1301 static void
1302 1302 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1303 1303 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1304 1304 {
1305 1305 lwpid_t lwpid;
1306 1306
1307 1307 if ((lwpid = ulwp->ul_lwpid) == 0)
1308 1308 lwpid = 1;
1309 1309 (void) memset(ti_p, 0, sizeof (*ti_p));
1310 1310 ti_p->ti_ta_p = ta_p;
1311 1311 ti_p->ti_user_flags = ulwp->ul_usropts;
1312 1312 ti_p->ti_tid = lwpid;
1313 1313 ti_p->ti_exitval = ulwp->ul_rval;
1314 1314 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1315 1315 if (!ulwp->ul_dead) {
1316 1316 /*
1317 1317 * The bloody fools got this backwards!
1318 1318 */
1319 1319 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1320 1320 ti_p->ti_stksize = ulwp->ul_stksiz;
1321 1321 }
1322 1322 ti_p->ti_ro_area = ts_addr;
1323 1323 ti_p->ti_ro_size = ulwp->ul_replace?
1324 1324 REPLACEMENT_SIZE : sizeof (ulwp_t);
1325 1325 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1326 1326 ulwp->ul_stop? TD_THR_STOPPED :
1327 1327 ulwp->ul_wchan? TD_THR_SLEEP :
1328 1328 TD_THR_ACTIVE;
1329 1329 ti_p->ti_db_suspended = 0;
1330 1330 ti_p->ti_type = TD_THR_USER;
1331 1331 ti_p->ti_sp = ulwp->ul_sp;
1332 1332 ti_p->ti_flags = 0;
1333 1333 ti_p->ti_pri = ulwp->ul_pri;
1334 1334 ti_p->ti_lid = lwpid;
1335 1335 if (!ulwp->ul_dead)
1336 1336 ti_p->ti_sigmask = ulwp->ul_sigmask;
1337 1337 ti_p->ti_traceme = 0;
1338 1338 ti_p->ti_preemptflag = 0;
1339 1339 ti_p->ti_pirecflag = 0;
1340 1340 (void) sigemptyset(&ti_p->ti_pending);
1341 1341 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1342 1342 }
1343 1343
1344 1344 #if defined(_LP64) && defined(_SYSCALL32)
1345 1345 static void
1346 1346 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1347 1347 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1348 1348 {
1349 1349 lwpid_t lwpid;
1350 1350
1351 1351 if ((lwpid = ulwp->ul_lwpid) == 0)
1352 1352 lwpid = 1;
1353 1353 (void) memset(ti_p, 0, sizeof (*ti_p));
1354 1354 ti_p->ti_ta_p = ta_p;
1355 1355 ti_p->ti_user_flags = ulwp->ul_usropts;
1356 1356 ti_p->ti_tid = lwpid;
1357 1357 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1358 1358 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1359 1359 if (!ulwp->ul_dead) {
1360 1360 /*
1361 1361 * The bloody fools got this backwards!
1362 1362 */
1363 1363 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1364 1364 ti_p->ti_stksize = ulwp->ul_stksiz;
1365 1365 }
1366 1366 ti_p->ti_ro_area = ts_addr;
1367 1367 ti_p->ti_ro_size = ulwp->ul_replace?
1368 1368 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1369 1369 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1370 1370 ulwp->ul_stop? TD_THR_STOPPED :
1371 1371 ulwp->ul_wchan? TD_THR_SLEEP :
1372 1372 TD_THR_ACTIVE;
1373 1373 ti_p->ti_db_suspended = 0;
1374 1374 ti_p->ti_type = TD_THR_USER;
1375 1375 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1376 1376 ti_p->ti_flags = 0;
1377 1377 ti_p->ti_pri = ulwp->ul_pri;
1378 1378 ti_p->ti_lid = lwpid;
1379 1379 if (!ulwp->ul_dead)
1380 1380 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1381 1381 ti_p->ti_traceme = 0;
1382 1382 ti_p->ti_preemptflag = 0;
1383 1383 ti_p->ti_pirecflag = 0;
1384 1384 (void) sigemptyset(&ti_p->ti_pending);
1385 1385 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1386 1386 }
1387 1387 #endif /* _SYSCALL32 */
1388 1388
1389 1389 /*
1390 1390 * Get thread information.
1391 1391 */
1392 1392 #pragma weak td_thr_get_info = __td_thr_get_info
1393 1393 td_err_e
1394 1394 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1395 1395 {
1396 1396 struct ps_prochandle *ph_p;
1397 1397 td_thragent_t *ta_p;
1398 1398 td_err_e return_val;
1399 1399 psaddr_t psaddr;
1400 1400
1401 1401 if (ti_p == NULL)
1402 1402 return (TD_ERR);
1403 1403 (void) memset(ti_p, 0, sizeof (*ti_p));
1404 1404
1405 1405 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1406 1406 return (return_val);
1407 1407 ta_p = th_p->th_ta_p;
1408 1408 if (ps_pstop(ph_p) != PS_OK) {
1409 1409 ph_unlock(ta_p);
1410 1410 return (TD_DBERR);
1411 1411 }
1412 1412
1413 1413 /*
1414 1414 * Read the ulwp struct from the process.
1415 1415 * Transfer the ulwp struct to the thread information struct.
1416 1416 */
1417 1417 psaddr = th_p->th_unique;
1418 1418 if (ta_p->model == PR_MODEL_NATIVE) {
1419 1419 ulwp_t ulwp;
1420 1420
1421 1421 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1422 1422 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1423 1423 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1424 1424 return_val = TD_DBERR;
1425 1425 else
1426 1426 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1427 1427 } else {
1428 1428 #if defined(_LP64) && defined(_SYSCALL32)
1429 1429 ulwp32_t ulwp;
1430 1430
1431 1431 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1432 1432 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1433 1433 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1434 1434 PS_OK)
1435 1435 return_val = TD_DBERR;
1436 1436 else
1437 1437 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1438 1438 #else
1439 1439 return_val = TD_ERR;
1440 1440 #endif /* _SYSCALL32 */
1441 1441 }
1442 1442
1443 1443 (void) ps_pcontinue(ph_p);
1444 1444 ph_unlock(ta_p);
1445 1445 return (return_val);
1446 1446 }
1447 1447
1448 1448 /*
1449 1449 * Given a process and an event number, return information about
1450 1450 * an address in the process or at which a breakpoint can be set
1451 1451 * to monitor the event.
1452 1452 */
1453 1453 #pragma weak td_ta_event_addr = __td_ta_event_addr
1454 1454 td_err_e
1455 1455 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1456 1456 {
1457 1457 if (ta_p == NULL)
1458 1458 return (TD_BADTA);
1459 1459 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1460 1460 return (TD_NOEVENT);
1461 1461 if (notify_p == NULL)
1462 1462 return (TD_ERR);
1463 1463
1464 1464 notify_p->type = NOTIFY_BPT;
1465 1465 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1466 1466
1467 1467 return (TD_OK);
1468 1468 }
1469 1469
1470 1470 /*
1471 1471 * Add the events in eventset 2 to eventset 1.
1472 1472 */
1473 1473 static void
1474 1474 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1475 1475 {
1476 1476 int i;
1477 1477
1478 1478 for (i = 0; i < TD_EVENTSIZE; i++)
1479 1479 event1_p->event_bits[i] |= event2_p->event_bits[i];
1480 1480 }
1481 1481
1482 1482 /*
1483 1483 * Delete the events in eventset 2 from eventset 1.
1484 1484 */
1485 1485 static void
1486 1486 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1487 1487 {
1488 1488 int i;
1489 1489
1490 1490 for (i = 0; i < TD_EVENTSIZE; i++)
1491 1491 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1492 1492 }
1493 1493
1494 1494 /*
1495 1495 * Either add or delete the given event set from a thread's event mask.
1496 1496 */
1497 1497 static td_err_e
1498 1498 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1499 1499 {
1500 1500 struct ps_prochandle *ph_p;
1501 1501 td_err_e return_val = TD_OK;
1502 1502 char enable;
1503 1503 td_thr_events_t evset;
1504 1504 psaddr_t psaddr_evset;
1505 1505 psaddr_t psaddr_enab;
1506 1506
1507 1507 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1508 1508 return (return_val);
1509 1509 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1510 1510 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1511 1511 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1512 1512 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1513 1513 } else {
1514 1514 #if defined(_LP64) && defined(_SYSCALL32)
1515 1515 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1516 1516 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1517 1517 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1518 1518 #else
1519 1519 ph_unlock(th_p->th_ta_p);
1520 1520 return (TD_ERR);
1521 1521 #endif /* _SYSCALL32 */
1522 1522 }
1523 1523 if (ps_pstop(ph_p) != PS_OK) {
1524 1524 ph_unlock(th_p->th_ta_p);
1525 1525 return (TD_DBERR);
1526 1526 }
1527 1527
1528 1528 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1529 1529 return_val = TD_DBERR;
1530 1530 else {
1531 1531 if (onoff)
1532 1532 eventsetaddset(&evset, events);
1533 1533 else
1534 1534 eventsetdelset(&evset, events);
1535 1535 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1536 1536 != PS_OK)
1537 1537 return_val = TD_DBERR;
1538 1538 else {
1539 1539 enable = 0;
1540 1540 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1541 1541 enable = 1;
1542 1542 if (ps_pdwrite(ph_p, psaddr_enab,
1543 1543 &enable, sizeof (enable)) != PS_OK)
1544 1544 return_val = TD_DBERR;
1545 1545 }
1546 1546 }
1547 1547
1548 1548 (void) ps_pcontinue(ph_p);
1549 1549 ph_unlock(th_p->th_ta_p);
1550 1550 return (return_val);
1551 1551 }
1552 1552
1553 1553 /*
1554 1554 * Enable or disable tracing for a given thread. Tracing
1555 1555 * is filtered based on the event mask of each thread. Tracing
1556 1556 * can be turned on/off for the thread without changing thread
1557 1557 * event mask.
1558 1558 * Currently unused by dbx.
1559 1559 */
1560 1560 #pragma weak td_thr_event_enable = __td_thr_event_enable
1561 1561 td_err_e
1562 1562 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1563 1563 {
1564 1564 td_thr_events_t evset;
1565 1565
1566 1566 td_event_emptyset(&evset);
1567 1567 td_event_addset(&evset, TD_EVENTS_ENABLE);
1568 1568 return (mod_eventset(th_p, &evset, onoff));
1569 1569 }
1570 1570
1571 1571 /*
1572 1572 * Set event mask to enable event. event is turned on in
1573 1573 * event mask for thread. If a thread encounters an event
1574 1574 * for which its event mask is on, notification will be sent
1575 1575 * to the debugger.
1576 1576 * Addresses for each event are provided to the
1577 1577 * debugger. It is assumed that a breakpoint of some type will
1578 1578 * be placed at that address. If the event mask for the thread
1579 1579 * is on, the instruction at the address will be executed.
1580 1580 * Otherwise, the instruction will be skipped.
1581 1581 */
1582 1582 #pragma weak td_thr_set_event = __td_thr_set_event
1583 1583 td_err_e
1584 1584 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1585 1585 {
1586 1586 return (mod_eventset(th_p, events, 1));
1587 1587 }
1588 1588
1589 1589 /*
1590 1590 * Enable or disable a set of events in the process-global event mask,
1591 1591 * depending on the value of onoff.
1592 1592 */
1593 1593 static td_err_e
1594 1594 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1595 1595 {
1596 1596 struct ps_prochandle *ph_p;
1597 1597 td_thr_events_t targ_eventset;
1598 1598 td_err_e return_val;
1599 1599
1600 1600 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1601 1601 return (return_val);
1602 1602 if (ps_pstop(ph_p) != PS_OK) {
1603 1603 ph_unlock(ta_p);
1604 1604 return (TD_DBERR);
1605 1605 }
1606 1606 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1607 1607 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1608 1608 return_val = TD_DBERR;
1609 1609 else {
1610 1610 if (onoff)
1611 1611 eventsetaddset(&targ_eventset, events);
1612 1612 else
1613 1613 eventsetdelset(&targ_eventset, events);
1614 1614 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1615 1615 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1616 1616 return_val = TD_DBERR;
1617 1617 }
1618 1618 (void) ps_pcontinue(ph_p);
1619 1619 ph_unlock(ta_p);
1620 1620 return (return_val);
1621 1621 }
1622 1622
1623 1623 /*
1624 1624 * Enable a set of events in the process-global event mask.
1625 1625 */
1626 1626 #pragma weak td_ta_set_event = __td_ta_set_event
1627 1627 td_err_e
1628 1628 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1629 1629 {
1630 1630 return (td_ta_mod_event(ta_p, events, 1));
1631 1631 }
1632 1632
1633 1633 /*
1634 1634 * Set event mask to disable the given event set; these events are cleared
1635 1635 * from the event mask of the thread. Events that occur for a thread
1636 1636 * with the event masked off will not cause notification to be
1637 1637 * sent to the debugger (see td_thr_set_event for fuller description).
1638 1638 */
1639 1639 #pragma weak td_thr_clear_event = __td_thr_clear_event
1640 1640 td_err_e
1641 1641 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1642 1642 {
1643 1643 return (mod_eventset(th_p, events, 0));
1644 1644 }
1645 1645
1646 1646 /*
1647 1647 * Disable a set of events in the process-global event mask.
1648 1648 */
1649 1649 #pragma weak td_ta_clear_event = __td_ta_clear_event
1650 1650 td_err_e
1651 1651 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1652 1652 {
1653 1653 return (td_ta_mod_event(ta_p, events, 0));
1654 1654 }
1655 1655
1656 1656 /*
1657 1657 * This function returns the most recent event message, if any,
1658 1658 * associated with a thread. Given a thread handle, return the message
1659 1659 * corresponding to the event encountered by the thread. Only one
1660 1660 * message per thread is saved. Messages from earlier events are lost
1661 1661 * when later events occur.
1662 1662 */
1663 1663 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1664 1664 td_err_e
1665 1665 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1666 1666 {
1667 1667 struct ps_prochandle *ph_p;
1668 1668 td_err_e return_val = TD_OK;
1669 1669 psaddr_t psaddr;
1670 1670
1671 1671 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1672 1672 return (return_val);
1673 1673 if (ps_pstop(ph_p) != PS_OK) {
1674 1674 ph_unlock(th_p->th_ta_p);
1675 1675 return (TD_BADTA);
1676 1676 }
1677 1677 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1678 1678 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1679 1679 td_evbuf_t evbuf;
1680 1680
1681 1681 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1682 1682 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1683 1683 return_val = TD_DBERR;
1684 1684 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1685 1685 return_val = TD_NOEVENT;
1686 1686 } else {
1687 1687 msg->event = evbuf.eventnum;
1688 1688 msg->th_p = (td_thrhandle_t *)th_p;
1689 1689 msg->msg.data = (uintptr_t)evbuf.eventdata;
1690 1690 /* "Consume" the message */
1691 1691 evbuf.eventnum = TD_EVENT_NONE;
1692 1692 evbuf.eventdata = NULL;
1693 1693 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1694 1694 != PS_OK)
1695 1695 return_val = TD_DBERR;
1696 1696 }
1697 1697 } else {
1698 1698 #if defined(_LP64) && defined(_SYSCALL32)
1699 1699 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1700 1700 td_evbuf32_t evbuf;
1701 1701
1702 1702 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1703 1703 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1704 1704 return_val = TD_DBERR;
1705 1705 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1706 1706 return_val = TD_NOEVENT;
1707 1707 } else {
1708 1708 msg->event = evbuf.eventnum;
1709 1709 msg->th_p = (td_thrhandle_t *)th_p;
1710 1710 msg->msg.data = (uintptr_t)evbuf.eventdata;
1711 1711 /* "Consume" the message */
1712 1712 evbuf.eventnum = TD_EVENT_NONE;
1713 1713 evbuf.eventdata = 0;
1714 1714 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1715 1715 != PS_OK)
1716 1716 return_val = TD_DBERR;
1717 1717 }
1718 1718 #else
1719 1719 return_val = TD_ERR;
1720 1720 #endif /* _SYSCALL32 */
1721 1721 }
1722 1722
1723 1723 (void) ps_pcontinue(ph_p);
1724 1724 ph_unlock(th_p->th_ta_p);
1725 1725 return (return_val);
1726 1726 }
1727 1727
1728 1728 /*
1729 1729 * The callback function td_ta_event_getmsg uses when looking for
1730 1730 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1731 1731 */
1732 1732 static int
1733 1733 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1734 1734 {
1735 1735 static td_thrhandle_t th;
1736 1736 td_event_msg_t *msg = arg;
1737 1737
1738 1738 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1739 1739 /*
1740 1740 * Got an event, stop iterating.
1741 1741 *
1742 1742 * Because of past mistakes in interface definition,
1743 1743 * we are forced to pass back a static local variable
1744 1744 * for the thread handle because th_p is a pointer
1745 1745 * to a local variable in __td_ta_thr_iter().
1746 1746 * Grr...
1747 1747 */
1748 1748 th = *th_p;
1749 1749 msg->th_p = &th;
1750 1750 return (1);
1751 1751 }
1752 1752 return (0);
1753 1753 }
1754 1754
1755 1755 /*
1756 1756 * This function is just like td_thr_event_getmsg, except that it is
1757 1757 * passed a process handle rather than a thread handle, and returns
1758 1758 * an event message for some thread in the process that has an event
1759 1759 * message pending. If no thread has an event message pending, this
1760 1760 * routine returns TD_NOEVENT. Thus, all pending event messages may
1761 1761 * be collected from a process by repeatedly calling this routine
1762 1762 * until it returns TD_NOEVENT.
1763 1763 */
1764 1764 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1765 1765 td_err_e
1766 1766 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1767 1767 {
1768 1768 td_err_e return_val;
1769 1769
1770 1770 if (ta_p == NULL)
1771 1771 return (TD_BADTA);
1772 1772 if (ta_p->ph_p == NULL)
1773 1773 return (TD_BADPH);
1774 1774 if (msg == NULL)
1775 1775 return (TD_ERR);
1776 1776 msg->event = TD_EVENT_NONE;
1777 1777 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1778 1778 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1779 1779 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1780 1780 return (return_val);
1781 1781 if (msg->event == TD_EVENT_NONE)
1782 1782 return (TD_NOEVENT);
1783 1783 return (TD_OK);
1784 1784 }
1785 1785
1786 1786 static lwpid_t
1787 1787 thr_to_lwpid(const td_thrhandle_t *th_p)
1788 1788 {
1789 1789 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1790 1790 lwpid_t lwpid;
1791 1791
1792 1792 /*
1793 1793 * The caller holds the prochandle lock
1794 1794 * and has already verfied everything.
1795 1795 */
1796 1796 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1797 1797 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1798 1798
1799 1799 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1800 1800 &lwpid, sizeof (lwpid)) != PS_OK)
1801 1801 lwpid = 0;
1802 1802 else if (lwpid == 0)
1803 1803 lwpid = 1;
1804 1804 } else {
1805 1805 #if defined(_LP64) && defined(_SYSCALL32)
1806 1806 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1807 1807
1808 1808 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1809 1809 &lwpid, sizeof (lwpid)) != PS_OK)
1810 1810 lwpid = 0;
1811 1811 else if (lwpid == 0)
1812 1812 lwpid = 1;
1813 1813 #else
1814 1814 lwpid = 0;
1815 1815 #endif /* _SYSCALL32 */
1816 1816 }
1817 1817
1818 1818 return (lwpid);
1819 1819 }
1820 1820
1821 1821 /*
1822 1822 * Suspend a thread.
1823 1823 * XXX: What does this mean in a one-level model?
1824 1824 */
1825 1825 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1826 1826 td_err_e
1827 1827 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1828 1828 {
1829 1829 struct ps_prochandle *ph_p;
1830 1830 td_err_e return_val;
1831 1831
1832 1832 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1833 1833 return (return_val);
1834 1834 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1835 1835 return_val = TD_DBERR;
1836 1836 ph_unlock(th_p->th_ta_p);
1837 1837 return (return_val);
1838 1838 }
1839 1839
1840 1840 /*
1841 1841 * Resume a suspended thread.
1842 1842 * XXX: What does this mean in a one-level model?
1843 1843 */
1844 1844 #pragma weak td_thr_dbresume = __td_thr_dbresume
1845 1845 td_err_e
1846 1846 __td_thr_dbresume(const td_thrhandle_t *th_p)
1847 1847 {
1848 1848 struct ps_prochandle *ph_p;
1849 1849 td_err_e return_val;
1850 1850
1851 1851 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1852 1852 return (return_val);
1853 1853 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1854 1854 return_val = TD_DBERR;
1855 1855 ph_unlock(th_p->th_ta_p);
1856 1856 return (return_val);
1857 1857 }
1858 1858
1859 1859 /*
1860 1860 * Set a thread's signal mask.
1861 1861 * Currently unused by dbx.
1862 1862 */
1863 1863 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1864 1864 /* ARGSUSED */
1865 1865 td_err_e
1866 1866 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1867 1867 {
1868 1868 return (TD_NOCAPAB);
1869 1869 }
1870 1870
1871 1871 /*
1872 1872 * Set a thread's "signals-pending" set.
1873 1873 * Currently unused by dbx.
1874 1874 */
1875 1875 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1876 1876 /* ARGSUSED */
1877 1877 td_err_e
1878 1878 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1879 1879 uchar_t ti_pending_flag, const sigset_t ti_pending)
1880 1880 {
1881 1881 return (TD_NOCAPAB);
1882 1882 }
1883 1883
1884 1884 /*
1885 1885 * Get a thread's general register set.
1886 1886 */
1887 1887 #pragma weak td_thr_getgregs = __td_thr_getgregs
1888 1888 td_err_e
1889 1889 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1890 1890 {
1891 1891 struct ps_prochandle *ph_p;
1892 1892 td_err_e return_val;
1893 1893
1894 1894 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1895 1895 return (return_val);
1896 1896 if (ps_pstop(ph_p) != PS_OK) {
1897 1897 ph_unlock(th_p->th_ta_p);
1898 1898 return (TD_DBERR);
1899 1899 }
1900 1900
1901 1901 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1902 1902 return_val = TD_DBERR;
1903 1903
1904 1904 (void) ps_pcontinue(ph_p);
1905 1905 ph_unlock(th_p->th_ta_p);
1906 1906 return (return_val);
1907 1907 }
1908 1908
1909 1909 /*
1910 1910 * Set a thread's general register set.
1911 1911 */
1912 1912 #pragma weak td_thr_setgregs = __td_thr_setgregs
1913 1913 td_err_e
1914 1914 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1915 1915 {
1916 1916 struct ps_prochandle *ph_p;
1917 1917 td_err_e return_val;
1918 1918
1919 1919 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1920 1920 return (return_val);
1921 1921 if (ps_pstop(ph_p) != PS_OK) {
1922 1922 ph_unlock(th_p->th_ta_p);
1923 1923 return (TD_DBERR);
1924 1924 }
1925 1925
1926 1926 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1927 1927 return_val = TD_DBERR;
1928 1928
1929 1929 (void) ps_pcontinue(ph_p);
1930 1930 ph_unlock(th_p->th_ta_p);
1931 1931 return (return_val);
1932 1932 }
1933 1933
1934 1934 /*
1935 1935 * Get a thread's floating-point register set.
1936 1936 */
1937 1937 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1938 1938 td_err_e
1939 1939 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1940 1940 {
1941 1941 struct ps_prochandle *ph_p;
1942 1942 td_err_e return_val;
1943 1943
1944 1944 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1945 1945 return (return_val);
1946 1946 if (ps_pstop(ph_p) != PS_OK) {
1947 1947 ph_unlock(th_p->th_ta_p);
1948 1948 return (TD_DBERR);
1949 1949 }
1950 1950
1951 1951 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1952 1952 return_val = TD_DBERR;
1953 1953
1954 1954 (void) ps_pcontinue(ph_p);
1955 1955 ph_unlock(th_p->th_ta_p);
1956 1956 return (return_val);
1957 1957 }
1958 1958
1959 1959 /*
1960 1960 * Set a thread's floating-point register set.
1961 1961 */
1962 1962 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1963 1963 td_err_e
1964 1964 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1965 1965 {
1966 1966 struct ps_prochandle *ph_p;
1967 1967 td_err_e return_val;
1968 1968
1969 1969 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1970 1970 return (return_val);
1971 1971 if (ps_pstop(ph_p) != PS_OK) {
1972 1972 ph_unlock(th_p->th_ta_p);
1973 1973 return (TD_DBERR);
1974 1974 }
1975 1975
1976 1976 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1977 1977 return_val = TD_DBERR;
1978 1978
1979 1979 (void) ps_pcontinue(ph_p);
1980 1980 ph_unlock(th_p->th_ta_p);
1981 1981 return (return_val);
1982 1982 }
1983 1983
1984 1984 /*
1985 1985 * Get the size of the extra state register set for this architecture.
1986 1986 * Currently unused by dbx.
1987 1987 */
1988 1988 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1989 1989 /* ARGSUSED */
1990 1990 td_err_e
1991 1991 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1992 1992 {
1993 1993 struct ps_prochandle *ph_p;
1994 1994 td_err_e return_val;
1995 1995
1996 1996 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1997 1997 return (return_val);
1998 1998 if (ps_pstop(ph_p) != PS_OK) {
1999 1999 ph_unlock(th_p->th_ta_p);
2000 2000 return (TD_DBERR);
2001 2001 }
2002 2002
2003 2003 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
2004 2004 return_val = TD_DBERR;
2005 2005
2006 2006 if (*xregsize == 0)
2007 2007 return_val = TD_NOXREGS;
2008 2008
2009 2009 (void) ps_pcontinue(ph_p);
2010 2010 ph_unlock(th_p->th_ta_p);
2011 2011 return (return_val);
2012 2012 }
2013 2013
2014 2014 /*
2015 2015 * Get a thread's extra state register set.
2016 2016 */
2017 2017 #pragma weak td_thr_getxregs = __td_thr_getxregs
2018 2018 td_err_e
2019 2019 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2020 2020 {
2021 2021 struct ps_prochandle *ph_p;
2022 2022 td_err_e return_val;
2023 2023 ps_err_e ps_err;
2024 2024
2025 2025 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2026 2026 return (return_val);
2027 2027 if (ps_pstop(ph_p) != PS_OK) {
2028 2028 ph_unlock(th_p->th_ta_p);
2029 2029 return (TD_DBERR);
2030 2030 }
2031 2031
2032 2032 ps_err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
2033 2033 if (ps_err == PS_NOXREGS)
2034 2034 return_val = TD_NOXREGS;
2035 2035 else if (ps_err != PS_OK)
2036 2036 return_val = TD_DBERR;
2037 2037
2038 2038 (void) ps_pcontinue(ph_p);
2039 2039 ph_unlock(th_p->th_ta_p);
2040 2040 return (return_val);
2041 2041 }
2042 2042
2043 2043 /*
2044 2044 * Set a thread's extra state register set.
2045 2045 */
2046 2046 #pragma weak td_thr_setxregs = __td_thr_setxregs
2047 2047 /* ARGSUSED */
2048 2048 td_err_e
2049 2049 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2050 2050 {
2051 2051 struct ps_prochandle *ph_p;
2052 2052 td_err_e return_val;
2053 2053
2054 2054 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2055 2055 return (return_val);
2056 2056 if (ps_pstop(ph_p) != PS_OK) {
2057 2057 ph_unlock(th_p->th_ta_p);
2058 2058 return (TD_DBERR);
2059 2059 }
2060 2060
2061 2061 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2062 2062 return_val = TD_DBERR;
2063 2063
2064 2064 (void) ps_pcontinue(ph_p);
2065 2065 ph_unlock(th_p->th_ta_p);
2066 2066 return (return_val);
2067 2067 }
2068 2068
2069 2069 struct searcher {
2070 2070 psaddr_t addr;
2071 2071 int status;
2072 2072 };
2073 2073
2074 2074 /*
2075 2075 * Check the struct thread address in *th_p again first
2076 2076 * value in "data". If value in data is found, set second value
2077 2077 * in "data" to 1 and return 1 to terminate iterations.
2078 2078 * This function is used by td_thr_validate() to verify that
2079 2079 * a thread handle is valid.
2080 2080 */
2081 2081 static int
2082 2082 td_searcher(const td_thrhandle_t *th_p, void *data)
2083 2083 {
2084 2084 struct searcher *searcher_data = (struct searcher *)data;
2085 2085
2086 2086 if (searcher_data->addr == th_p->th_unique) {
2087 2087 searcher_data->status = 1;
2088 2088 return (1);
2089 2089 }
2090 2090 return (0);
2091 2091 }
2092 2092
2093 2093 /*
2094 2094 * Validate the thread handle. Check that
2095 2095 * a thread exists in the thread agent/process that
2096 2096 * corresponds to thread with handle *th_p.
2097 2097 * Currently unused by dbx.
2098 2098 */
2099 2099 #pragma weak td_thr_validate = __td_thr_validate
2100 2100 td_err_e
2101 2101 __td_thr_validate(const td_thrhandle_t *th_p)
2102 2102 {
2103 2103 td_err_e return_val;
2104 2104 struct searcher searcher_data = {0, 0};
2105 2105
2106 2106 if (th_p == NULL)
2107 2107 return (TD_BADTH);
2108 2108 if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
2109 2109 return (TD_BADTH);
2110 2110
2111 2111 /*
2112 2112 * LOCKING EXCEPTION - Locking is not required
2113 2113 * here because no use of the thread agent is made (other
2114 2114 * than the sanity check) and checking of the thread
2115 2115 * agent will be done in __td_ta_thr_iter.
2116 2116 */
2117 2117
2118 2118 searcher_data.addr = th_p->th_unique;
2119 2119 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2120 2120 td_searcher, &searcher_data,
2121 2121 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2122 2122 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2123 2123
2124 2124 if (return_val == TD_OK && searcher_data.status == 0)
2125 2125 return_val = TD_NOTHR;
2126 2126
2127 2127 return (return_val);
2128 2128 }
2129 2129
2130 2130 /*
2131 2131 * Get a thread's private binding to a given thread specific
2132 2132 * data(TSD) key(see thr_getspecific(3C). If the thread doesn't
2133 2133 * have a binding for a particular key, then NULL is returned.
2134 2134 */
2135 2135 #pragma weak td_thr_tsd = __td_thr_tsd
2136 2136 td_err_e
2137 2137 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2138 2138 {
2139 2139 struct ps_prochandle *ph_p;
2140 2140 td_thragent_t *ta_p;
2141 2141 td_err_e return_val;
2142 2142 int maxkey;
2143 2143 int nkey;
2144 2144 psaddr_t tsd_paddr;
2145 2145
2146 2146 if (data_pp == NULL)
2147 2147 return (TD_ERR);
2148 2148 *data_pp = NULL;
2149 2149 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2150 2150 return (return_val);
2151 2151 ta_p = th_p->th_ta_p;
2152 2152 if (ps_pstop(ph_p) != PS_OK) {
2153 2153 ph_unlock(ta_p);
2154 2154 return (TD_DBERR);
2155 2155 }
2156 2156
2157 2157 if (ta_p->model == PR_MODEL_NATIVE) {
2158 2158 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2159 2159 tsd_metadata_t tsdm;
2160 2160 tsd_t stsd;
2161 2161
2162 2162 if (ps_pdread(ph_p,
2163 2163 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2164 2164 &tsdm, sizeof (tsdm)) != PS_OK)
2165 2165 return_val = TD_DBERR;
2166 2166 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2167 2167 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2168 2168 return_val = TD_DBERR;
2169 2169 else if (tsd_paddr != 0 &&
2170 2170 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2171 2171 return_val = TD_DBERR;
2172 2172 else {
2173 2173 maxkey = tsdm.tsdm_nused;
2174 2174 nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2175 2175
2176 2176 if (key < TSD_NFAST)
2177 2177 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2178 2178 }
2179 2179 } else {
2180 2180 #if defined(_LP64) && defined(_SYSCALL32)
2181 2181 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2182 2182 tsd_metadata32_t tsdm;
2183 2183 tsd32_t stsd;
2184 2184 caddr32_t addr;
2185 2185
2186 2186 if (ps_pdread(ph_p,
2187 2187 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2188 2188 &tsdm, sizeof (tsdm)) != PS_OK)
2189 2189 return_val = TD_DBERR;
2190 2190 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2191 2191 &addr, sizeof (addr)) != PS_OK)
2192 2192 return_val = TD_DBERR;
2193 2193 else if (addr != 0 &&
2194 2194 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2195 2195 return_val = TD_DBERR;
2196 2196 else {
2197 2197 maxkey = tsdm.tsdm_nused;
2198 2198 nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2199 2199
2200 2200 if (key < TSD_NFAST) {
2201 2201 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2202 2202 } else {
2203 2203 tsd_paddr = addr;
2204 2204 }
2205 2205 }
2206 2206 #else
2207 2207 return_val = TD_ERR;
2208 2208 #endif /* _SYSCALL32 */
2209 2209 }
2210 2210
2211 2211 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2212 2212 return_val = TD_NOTSD;
2213 2213 if (return_val != TD_OK || key >= nkey) {
2214 2214 /* NULL has already been stored in data_pp */
2215 2215 (void) ps_pcontinue(ph_p);
2216 2216 ph_unlock(ta_p);
2217 2217 return (return_val);
2218 2218 }
2219 2219
2220 2220 /*
2221 2221 * Read the value from the thread's tsd array.
2222 2222 */
2223 2223 if (ta_p->model == PR_MODEL_NATIVE) {
2224 2224 void *value;
2225 2225
2226 2226 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2227 2227 &value, sizeof (value)) != PS_OK)
2228 2228 return_val = TD_DBERR;
2229 2229 else
2230 2230 *data_pp = value;
2231 2231 #if defined(_LP64) && defined(_SYSCALL32)
2232 2232 } else {
2233 2233 caddr32_t value32;
2234 2234
2235 2235 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2236 2236 &value32, sizeof (value32)) != PS_OK)
2237 2237 return_val = TD_DBERR;
2238 2238 else
2239 2239 *data_pp = (void *)(uintptr_t)value32;
2240 2240 #endif /* _SYSCALL32 */
2241 2241 }
2242 2242
2243 2243 (void) ps_pcontinue(ph_p);
2244 2244 ph_unlock(ta_p);
2245 2245 return (return_val);
2246 2246 }
2247 2247
2248 2248 /*
2249 2249 * Get the base address of a thread's thread local storage (TLS) block
2250 2250 * for the module (executable or shared object) identified by 'moduleid'.
2251 2251 */
2252 2252 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2253 2253 td_err_e
2254 2254 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2255 2255 {
2256 2256 struct ps_prochandle *ph_p;
2257 2257 td_thragent_t *ta_p;
2258 2258 td_err_e return_val;
2259 2259
2260 2260 if (base == NULL)
2261 2261 return (TD_ERR);
2262 2262 *base = 0;
2263 2263 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2264 2264 return (return_val);
2265 2265 ta_p = th_p->th_ta_p;
2266 2266 if (ps_pstop(ph_p) != PS_OK) {
2267 2267 ph_unlock(ta_p);
2268 2268 return (TD_DBERR);
2269 2269 }
2270 2270
2271 2271 if (ta_p->model == PR_MODEL_NATIVE) {
2272 2272 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2273 2273 tls_metadata_t tls_metadata;
2274 2274 TLS_modinfo tlsmod;
2275 2275 tls_t tls;
2276 2276
2277 2277 if (ps_pdread(ph_p,
2278 2278 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2279 2279 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2280 2280 return_val = TD_DBERR;
2281 2281 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2282 2282 return_val = TD_NOTLS;
2283 2283 else if (ps_pdread(ph_p,
2284 2284 (psaddr_t)((TLS_modinfo *)
2285 2285 tls_metadata.tls_modinfo.tls_data + moduleid),
2286 2286 &tlsmod, sizeof (tlsmod)) != PS_OK)
2287 2287 return_val = TD_DBERR;
2288 2288 else if (tlsmod.tm_memsz == 0)
2289 2289 return_val = TD_NOTLS;
2290 2290 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2291 2291 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2292 2292 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2293 2293 &tls, sizeof (tls)) != PS_OK)
2294 2294 return_val = TD_DBERR;
2295 2295 else if (moduleid >= tls.tls_size)
2296 2296 return_val = TD_TLSDEFER;
2297 2297 else if (ps_pdread(ph_p,
2298 2298 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2299 2299 &tls, sizeof (tls)) != PS_OK)
2300 2300 return_val = TD_DBERR;
2301 2301 else if (tls.tls_size == 0)
2302 2302 return_val = TD_TLSDEFER;
2303 2303 else
2304 2304 *base = (psaddr_t)tls.tls_data;
2305 2305 } else {
2306 2306 #if defined(_LP64) && defined(_SYSCALL32)
2307 2307 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2308 2308 tls_metadata32_t tls_metadata;
2309 2309 TLS_modinfo32 tlsmod;
2310 2310 tls32_t tls;
2311 2311
2312 2312 if (ps_pdread(ph_p,
2313 2313 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2314 2314 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2315 2315 return_val = TD_DBERR;
2316 2316 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2317 2317 return_val = TD_NOTLS;
2318 2318 else if (ps_pdread(ph_p,
2319 2319 (psaddr_t)((TLS_modinfo32 *)
2320 2320 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2321 2321 &tlsmod, sizeof (tlsmod)) != PS_OK)
2322 2322 return_val = TD_DBERR;
2323 2323 else if (tlsmod.tm_memsz == 0)
2324 2324 return_val = TD_NOTLS;
2325 2325 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2326 2326 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2327 2327 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2328 2328 &tls, sizeof (tls)) != PS_OK)
2329 2329 return_val = TD_DBERR;
2330 2330 else if (moduleid >= tls.tls_size)
2331 2331 return_val = TD_TLSDEFER;
2332 2332 else if (ps_pdread(ph_p,
2333 2333 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2334 2334 &tls, sizeof (tls)) != PS_OK)
2335 2335 return_val = TD_DBERR;
2336 2336 else if (tls.tls_size == 0)
2337 2337 return_val = TD_TLSDEFER;
2338 2338 else
2339 2339 *base = (psaddr_t)tls.tls_data;
2340 2340 #else
2341 2341 return_val = TD_ERR;
2342 2342 #endif /* _SYSCALL32 */
2343 2343 }
2344 2344
2345 2345 (void) ps_pcontinue(ph_p);
2346 2346 ph_unlock(ta_p);
2347 2347 return (return_val);
2348 2348 }
2349 2349
2350 2350 /*
2351 2351 * Change a thread's priority to the value specified by ti_pri.
2352 2352 * Currently unused by dbx.
2353 2353 */
2354 2354 #pragma weak td_thr_setprio = __td_thr_setprio
2355 2355 /* ARGSUSED */
2356 2356 td_err_e
2357 2357 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2358 2358 {
2359 2359 return (TD_NOCAPAB);
2360 2360 }
2361 2361
2362 2362 /*
2363 2363 * This structure links td_thr_lockowner and the lowner_cb callback function.
2364 2364 */
2365 2365 typedef struct {
2366 2366 td_sync_iter_f *owner_cb;
2367 2367 void *owner_cb_arg;
2368 2368 td_thrhandle_t *th_p;
2369 2369 } lowner_cb_ctl_t;
2370 2370
2371 2371 static int
2372 2372 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2373 2373 {
2374 2374 lowner_cb_ctl_t *ocb = arg;
2375 2375 int trunc = 0;
2376 2376 union {
2377 2377 rwlock_t rwl;
2378 2378 mutex_t mx;
2379 2379 } rw_m;
2380 2380
2381 2381 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2382 2382 &rw_m, sizeof (rw_m)) != PS_OK) {
2383 2383 trunc = 1;
2384 2384 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2385 2385 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2386 2386 return (0);
2387 2387 }
2388 2388 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2389 2389 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2390 2390 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2391 2391 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2392 2392 mutex_t *rwlock = &rw_m.rwl.mutex;
2393 2393 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2394 2394 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2395 2395 }
2396 2396 return (0);
2397 2397 }
2398 2398
2399 2399 /*
2400 2400 * Iterate over the set of locks owned by a specified thread.
2401 2401 * If cb returns a non-zero value, terminate iterations.
2402 2402 */
2403 2403 #pragma weak td_thr_lockowner = __td_thr_lockowner
2404 2404 td_err_e
2405 2405 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2406 2406 void *cb_data)
2407 2407 {
2408 2408 td_thragent_t *ta_p;
2409 2409 td_err_e return_val;
2410 2410 lowner_cb_ctl_t lcb;
2411 2411
2412 2412 /*
2413 2413 * Just sanity checks.
2414 2414 */
2415 2415 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2416 2416 return (return_val);
2417 2417 ta_p = th_p->th_ta_p;
2418 2418 ph_unlock(ta_p);
2419 2419
2420 2420 lcb.owner_cb = cb;
2421 2421 lcb.owner_cb_arg = cb_data;
2422 2422 lcb.th_p = (td_thrhandle_t *)th_p;
2423 2423 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2424 2424 }
2425 2425
2426 2426 /*
2427 2427 * If a thread is asleep on a synchronization variable,
2428 2428 * then get the synchronization handle.
2429 2429 */
2430 2430 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2431 2431 td_err_e
2432 2432 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2433 2433 {
2434 2434 struct ps_prochandle *ph_p;
2435 2435 td_err_e return_val = TD_OK;
2436 2436 uintptr_t wchan;
2437 2437
2438 2438 if (sh_p == NULL)
2439 2439 return (TD_ERR);
2440 2440 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2441 2441 return (return_val);
2442 2442
2443 2443 /*
2444 2444 * No need to stop the process for a simple read.
2445 2445 */
2446 2446 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2447 2447 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2448 2448
2449 2449 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2450 2450 &wchan, sizeof (wchan)) != PS_OK)
2451 2451 return_val = TD_DBERR;
2452 2452 } else {
2453 2453 #if defined(_LP64) && defined(_SYSCALL32)
2454 2454 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2455 2455 caddr32_t wchan32;
2456 2456
2457 2457 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2458 2458 &wchan32, sizeof (wchan32)) != PS_OK)
2459 2459 return_val = TD_DBERR;
2460 2460 wchan = wchan32;
2461 2461 #else
2462 2462 return_val = TD_ERR;
2463 2463 #endif /* _SYSCALL32 */
2464 2464 }
2465 2465
2466 2466 if (return_val != TD_OK || wchan == 0) {
2467 2467 sh_p->sh_ta_p = NULL;
2468 2468 sh_p->sh_unique = 0;
2469 2469 if (return_val == TD_OK)
2470 2470 return_val = TD_ERR;
2471 2471 } else {
2472 2472 sh_p->sh_ta_p = th_p->th_ta_p;
2473 2473 sh_p->sh_unique = (psaddr_t)wchan;
2474 2474 }
2475 2475
2476 2476 ph_unlock(th_p->th_ta_p);
2477 2477 return (return_val);
2478 2478 }
2479 2479
2480 2480 /*
2481 2481 * Which thread is running on an lwp?
2482 2482 */
2483 2483 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2484 2484 td_err_e
2485 2485 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2486 2486 td_thrhandle_t *th_p)
2487 2487 {
2488 2488 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2489 2489 }
2490 2490
2491 2491 /*
2492 2492 * Common code for td_sync_get_info() and td_sync_get_stats()
2493 2493 */
2494 2494 static td_err_e
2495 2495 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2496 2496 td_syncinfo_t *si_p)
2497 2497 {
2498 2498 int trunc = 0;
2499 2499 td_so_un_t generic_so;
2500 2500
2501 2501 /*
2502 2502 * Determine the sync. object type; a little type fudgery here.
2503 2503 * First attempt to read the whole union. If that fails, attempt
2504 2504 * to read just the condvar. A condvar is the smallest sync. object.
2505 2505 */
2506 2506 if (ps_pdread(ph_p, sh_p->sh_unique,
2507 2507 &generic_so, sizeof (generic_so)) != PS_OK) {
2508 2508 trunc = 1;
2509 2509 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2510 2510 sizeof (generic_so.condition)) != PS_OK)
2511 2511 return (TD_DBERR);
2512 2512 }
2513 2513
2514 2514 switch (generic_so.condition.cond_magic) {
2515 2515 case MUTEX_MAGIC:
2516 2516 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2517 2517 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2518 2518 return (TD_DBERR);
2519 2519 si_p->si_type = TD_SYNC_MUTEX;
2520 2520 si_p->si_shared_type =
2521 2521 (generic_so.lock.mutex_type & USYNC_PROCESS);
2522 2522 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2523 2523 sizeof (generic_so.lock.mutex_flag));
2524 2524 si_p->si_state.mutex_locked =
2525 2525 (generic_so.lock.mutex_lockw != 0);
2526 2526 si_p->si_size = sizeof (generic_so.lock);
2527 2527 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2528 2528 si_p->si_rcount = generic_so.lock.mutex_rcount;
2529 2529 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2530 2530 if (si_p->si_state.mutex_locked) {
2531 2531 if (si_p->si_shared_type & USYNC_PROCESS)
2532 2532 si_p->si_ownerpid =
2533 2533 generic_so.lock.mutex_ownerpid;
2534 2534 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2535 2535 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2536 2536 }
2537 2537 break;
2538 2538 case COND_MAGIC:
2539 2539 si_p->si_type = TD_SYNC_COND;
2540 2540 si_p->si_shared_type =
2541 2541 (generic_so.condition.cond_type & USYNC_PROCESS);
2542 2542 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2543 2543 sizeof (generic_so.condition.flags.flag));
2544 2544 si_p->si_size = sizeof (generic_so.condition);
2545 2545 si_p->si_has_waiters =
2546 2546 (generic_so.condition.cond_waiters_user |
2547 2547 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2548 2548 break;
2549 2549 case SEMA_MAGIC:
2550 2550 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2551 2551 &generic_so.semaphore, sizeof (generic_so.semaphore))
2552 2552 != PS_OK)
2553 2553 return (TD_DBERR);
2554 2554 si_p->si_type = TD_SYNC_SEMA;
2555 2555 si_p->si_shared_type =
2556 2556 (generic_so.semaphore.type & USYNC_PROCESS);
2557 2557 si_p->si_state.sem_count = generic_so.semaphore.count;
2558 2558 si_p->si_size = sizeof (generic_so.semaphore);
2559 2559 si_p->si_has_waiters =
2560 2560 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2561 2561 /* this is useless but the old interface provided it */
2562 2562 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2563 2563 break;
2564 2564 case RWL_MAGIC:
2565 2565 {
2566 2566 uint32_t rwstate;
2567 2567
2568 2568 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2569 2569 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2570 2570 return (TD_DBERR);
2571 2571 si_p->si_type = TD_SYNC_RWLOCK;
2572 2572 si_p->si_shared_type =
2573 2573 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2574 2574 si_p->si_size = sizeof (generic_so.rwlock);
2575 2575
2576 2576 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2577 2577 if (rwstate & URW_WRITE_LOCKED) {
2578 2578 si_p->si_state.nreaders = -1;
2579 2579 si_p->si_is_wlock = 1;
2580 2580 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2581 2581 si_p->si_owner.th_unique =
2582 2582 generic_so.rwlock.rwlock_owner;
2583 2583 if (si_p->si_shared_type & USYNC_PROCESS)
2584 2584 si_p->si_ownerpid =
2585 2585 generic_so.rwlock.rwlock_ownerpid;
2586 2586 } else {
2587 2587 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2588 2588 }
2589 2589 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2590 2590
2591 2591 /* this is useless but the old interface provided it */
2592 2592 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2593 2593 break;
2594 2594 }
2595 2595 default:
2596 2596 return (TD_BADSH);
2597 2597 }
2598 2598
2599 2599 si_p->si_ta_p = sh_p->sh_ta_p;
2600 2600 si_p->si_sv_addr = sh_p->sh_unique;
2601 2601 return (TD_OK);
2602 2602 }
2603 2603
2604 2604 /*
2605 2605 * Given a synchronization handle, fill in the
2606 2606 * information for the synchronization variable into *si_p.
2607 2607 */
2608 2608 #pragma weak td_sync_get_info = __td_sync_get_info
2609 2609 td_err_e
2610 2610 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2611 2611 {
2612 2612 struct ps_prochandle *ph_p;
2613 2613 td_err_e return_val;
2614 2614
2615 2615 if (si_p == NULL)
2616 2616 return (TD_ERR);
2617 2617 (void) memset(si_p, 0, sizeof (*si_p));
2618 2618 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2619 2619 return (return_val);
2620 2620 if (ps_pstop(ph_p) != PS_OK) {
2621 2621 ph_unlock(sh_p->sh_ta_p);
2622 2622 return (TD_DBERR);
2623 2623 }
2624 2624
2625 2625 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2626 2626
2627 2627 (void) ps_pcontinue(ph_p);
2628 2628 ph_unlock(sh_p->sh_ta_p);
2629 2629 return (return_val);
2630 2630 }
2631 2631
2632 2632 static uint_t
2633 2633 tdb_addr_hash64(uint64_t addr)
2634 2634 {
2635 2635 uint64_t value60 = (addr >> 4);
2636 2636 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2637 2637 return ((value30 >> 15) ^ (value30 & 0x7fff));
2638 2638 }
2639 2639
2640 2640 static uint_t
2641 2641 tdb_addr_hash32(uint64_t addr)
2642 2642 {
2643 2643 uint32_t value30 = (addr >> 2); /* 30 bits */
2644 2644 return ((value30 >> 15) ^ (value30 & 0x7fff));
2645 2645 }
2646 2646
2647 2647 static td_err_e
2648 2648 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2649 2649 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2650 2650 {
2651 2651 psaddr_t next_desc;
2652 2652 uint64_t first;
2653 2653 uint_t ix;
2654 2654
2655 2655 /*
2656 2656 * Compute the hash table index from the synch object's address.
2657 2657 */
2658 2658 if (ta_p->model == PR_MODEL_LP64)
2659 2659 ix = tdb_addr_hash64(sync_obj_addr);
2660 2660 else
2661 2661 ix = tdb_addr_hash32(sync_obj_addr);
2662 2662
2663 2663 /*
2664 2664 * Get the address of the first element in the linked list.
2665 2665 */
2666 2666 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2667 2667 &first, sizeof (first)) != PS_OK)
2668 2668 return (TD_DBERR);
2669 2669
2670 2670 /*
2671 2671 * Search the linked list for an entry for the synch object..
2672 2672 */
2673 2673 for (next_desc = (psaddr_t)first; next_desc != 0;
2674 2674 next_desc = (psaddr_t)sync_stats->next) {
2675 2675 if (ps_pdread(ta_p->ph_p, next_desc,
2676 2676 sync_stats, sizeof (*sync_stats)) != PS_OK)
2677 2677 return (TD_DBERR);
2678 2678 if (sync_stats->sync_addr == sync_obj_addr)
2679 2679 return (TD_OK);
2680 2680 }
2681 2681
2682 2682 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2683 2683 return (TD_OK);
2684 2684 }
2685 2685
2686 2686 /*
2687 2687 * Given a synchronization handle, fill in the
2688 2688 * statistics for the synchronization variable into *ss_p.
2689 2689 */
2690 2690 #pragma weak td_sync_get_stats = __td_sync_get_stats
2691 2691 td_err_e
2692 2692 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2693 2693 {
2694 2694 struct ps_prochandle *ph_p;
2695 2695 td_thragent_t *ta_p;
2696 2696 td_err_e return_val;
2697 2697 register_sync_t enable;
2698 2698 psaddr_t hashaddr;
2699 2699 tdb_sync_stats_t sync_stats;
2700 2700 size_t ix;
2701 2701
2702 2702 if (ss_p == NULL)
2703 2703 return (TD_ERR);
2704 2704 (void) memset(ss_p, 0, sizeof (*ss_p));
2705 2705 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2706 2706 return (return_val);
2707 2707 ta_p = sh_p->sh_ta_p;
2708 2708 if (ps_pstop(ph_p) != PS_OK) {
2709 2709 ph_unlock(ta_p);
2710 2710 return (TD_DBERR);
2711 2711 }
2712 2712
2713 2713 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2714 2714 != TD_OK) {
2715 2715 if (return_val != TD_BADSH)
2716 2716 goto out;
2717 2717 /* we can correct TD_BADSH */
2718 2718 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2719 2719 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2720 2720 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2721 2721 /* we correct si_type and si_size below */
2722 2722 return_val = TD_OK;
2723 2723 }
2724 2724 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2725 2725 &enable, sizeof (enable)) != PS_OK) {
2726 2726 return_val = TD_DBERR;
2727 2727 goto out;
2728 2728 }
2729 2729 if (enable != REGISTER_SYNC_ON)
2730 2730 goto out;
2731 2731
2732 2732 /*
2733 2733 * Get the address of the hash table in the target process.
2734 2734 */
2735 2735 if (ta_p->model == PR_MODEL_NATIVE) {
2736 2736 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2737 2737 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2738 2738 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2739 2739 return_val = TD_DBERR;
2740 2740 goto out;
2741 2741 }
2742 2742 } else {
2743 2743 #if defined(_LP64) && defined(_SYSCALL32)
2744 2744 caddr32_t addr;
2745 2745
2746 2746 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2747 2747 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2748 2748 &addr, sizeof (addr)) != PS_OK) {
2749 2749 return_val = TD_DBERR;
2750 2750 goto out;
2751 2751 }
2752 2752 hashaddr = addr;
2753 2753 #else
2754 2754 return_val = TD_ERR;
2755 2755 goto out;
2756 2756 #endif /* _SYSCALL32 */
2757 2757 }
2758 2758
2759 2759 if (hashaddr == 0)
2760 2760 return_val = TD_BADSH;
2761 2761 else
2762 2762 return_val = read_sync_stats(ta_p, hashaddr,
2763 2763 sh_p->sh_unique, &sync_stats);
2764 2764 if (return_val != TD_OK)
2765 2765 goto out;
2766 2766
2767 2767 /*
2768 2768 * We have the hash table entry. Transfer the data to
2769 2769 * the td_syncstats_t structure provided by the caller.
2770 2770 */
2771 2771 switch (sync_stats.un.type) {
2772 2772 case TDB_MUTEX:
2773 2773 {
2774 2774 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2775 2775
2776 2776 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2777 2777 ss_p->ss_info.si_size = sizeof (mutex_t);
2778 2778 msp->mutex_lock =
2779 2779 sync_stats.un.mutex.mutex_lock;
2780 2780 msp->mutex_sleep =
2781 2781 sync_stats.un.mutex.mutex_sleep;
2782 2782 msp->mutex_sleep_time =
2783 2783 sync_stats.un.mutex.mutex_sleep_time;
2784 2784 msp->mutex_hold_time =
2785 2785 sync_stats.un.mutex.mutex_hold_time;
2786 2786 msp->mutex_try =
2787 2787 sync_stats.un.mutex.mutex_try;
2788 2788 msp->mutex_try_fail =
2789 2789 sync_stats.un.mutex.mutex_try_fail;
2790 2790 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2791 2791 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2792 2792 < ta_p->hash_size * sizeof (thr_hash_table_t))
2793 2793 msp->mutex_internal =
2794 2794 ix / sizeof (thr_hash_table_t) + 1;
2795 2795 break;
2796 2796 }
2797 2797 case TDB_COND:
2798 2798 {
2799 2799 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2800 2800
2801 2801 ss_p->ss_info.si_type = TD_SYNC_COND;
2802 2802 ss_p->ss_info.si_size = sizeof (cond_t);
2803 2803 csp->cond_wait =
2804 2804 sync_stats.un.cond.cond_wait;
2805 2805 csp->cond_timedwait =
2806 2806 sync_stats.un.cond.cond_timedwait;
2807 2807 csp->cond_wait_sleep_time =
2808 2808 sync_stats.un.cond.cond_wait_sleep_time;
2809 2809 csp->cond_timedwait_sleep_time =
2810 2810 sync_stats.un.cond.cond_timedwait_sleep_time;
2811 2811 csp->cond_timedwait_timeout =
2812 2812 sync_stats.un.cond.cond_timedwait_timeout;
2813 2813 csp->cond_signal =
2814 2814 sync_stats.un.cond.cond_signal;
2815 2815 csp->cond_broadcast =
2816 2816 sync_stats.un.cond.cond_broadcast;
2817 2817 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2818 2818 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2819 2819 < ta_p->hash_size * sizeof (thr_hash_table_t))
2820 2820 csp->cond_internal =
2821 2821 ix / sizeof (thr_hash_table_t) + 1;
2822 2822 break;
2823 2823 }
2824 2824 case TDB_RWLOCK:
2825 2825 {
2826 2826 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2827 2827
2828 2828 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2829 2829 ss_p->ss_info.si_size = sizeof (rwlock_t);
2830 2830 rwsp->rw_rdlock =
2831 2831 sync_stats.un.rwlock.rw_rdlock;
2832 2832 rwsp->rw_rdlock_try =
2833 2833 sync_stats.un.rwlock.rw_rdlock_try;
2834 2834 rwsp->rw_rdlock_try_fail =
2835 2835 sync_stats.un.rwlock.rw_rdlock_try_fail;
2836 2836 rwsp->rw_wrlock =
2837 2837 sync_stats.un.rwlock.rw_wrlock;
2838 2838 rwsp->rw_wrlock_hold_time =
2839 2839 sync_stats.un.rwlock.rw_wrlock_hold_time;
2840 2840 rwsp->rw_wrlock_try =
2841 2841 sync_stats.un.rwlock.rw_wrlock_try;
2842 2842 rwsp->rw_wrlock_try_fail =
2843 2843 sync_stats.un.rwlock.rw_wrlock_try_fail;
2844 2844 break;
2845 2845 }
2846 2846 case TDB_SEMA:
2847 2847 {
2848 2848 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2849 2849
2850 2850 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2851 2851 ss_p->ss_info.si_size = sizeof (sema_t);
2852 2852 ssp->sema_wait =
2853 2853 sync_stats.un.sema.sema_wait;
2854 2854 ssp->sema_wait_sleep =
2855 2855 sync_stats.un.sema.sema_wait_sleep;
2856 2856 ssp->sema_wait_sleep_time =
2857 2857 sync_stats.un.sema.sema_wait_sleep_time;
2858 2858 ssp->sema_trywait =
2859 2859 sync_stats.un.sema.sema_trywait;
2860 2860 ssp->sema_trywait_fail =
2861 2861 sync_stats.un.sema.sema_trywait_fail;
2862 2862 ssp->sema_post =
2863 2863 sync_stats.un.sema.sema_post;
2864 2864 ssp->sema_max_count =
2865 2865 sync_stats.un.sema.sema_max_count;
2866 2866 ssp->sema_min_count =
2867 2867 sync_stats.un.sema.sema_min_count;
2868 2868 break;
2869 2869 }
2870 2870 default:
2871 2871 return_val = TD_BADSH;
2872 2872 break;
2873 2873 }
2874 2874
2875 2875 out:
2876 2876 (void) ps_pcontinue(ph_p);
2877 2877 ph_unlock(ta_p);
2878 2878 return (return_val);
2879 2879 }
2880 2880
2881 2881 /*
2882 2882 * Change the state of a synchronization variable.
2883 2883 * 1) mutex lock state set to value
2884 2884 * 2) semaphore's count set to value
2885 2885 * 3) writer's lock set by value < 0
2886 2886 * 4) reader's lock number of readers set to value >= 0
2887 2887 * Currently unused by dbx.
2888 2888 */
2889 2889 #pragma weak td_sync_setstate = __td_sync_setstate
2890 2890 td_err_e
2891 2891 __td_sync_setstate(const td_synchandle_t *sh_p, int value)
2892 2892 {
2893 2893 struct ps_prochandle *ph_p;
2894 2894 int trunc = 0;
2895 2895 td_err_e return_val;
2896 2896 td_so_un_t generic_so;
2897 2897 uint32_t *rwstate;
2898 2898
2899 2899 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2900 2900 return (return_val);
2901 2901 if (ps_pstop(ph_p) != PS_OK) {
2902 2902 ph_unlock(sh_p->sh_ta_p);
2903 2903 return (TD_DBERR);
2904 2904 }
2905 2905
2906 2906 /*
2907 2907 * Read the synch. variable information.
2908 2908 * First attempt to read the whole union and if that fails
2909 2909 * fall back to reading only the smallest member, the condvar.
2910 2910 */
2911 2911 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2912 2912 sizeof (generic_so)) != PS_OK) {
2913 2913 trunc = 1;
2914 2914 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2915 2915 sizeof (generic_so.condition)) != PS_OK) {
2916 2916 (void) ps_pcontinue(ph_p);
2917 2917 ph_unlock(sh_p->sh_ta_p);
2918 2918 return (TD_DBERR);
2919 2919 }
2920 2920 }
2921 2921
2922 2922 /*
2923 2923 * Set the new value in the sync. variable, read the synch. variable
2924 2924 * information. from the process, reset its value and write it back.
2925 2925 */
2926 2926 switch (generic_so.condition.mutex_magic) {
2927 2927 case MUTEX_MAGIC:
2928 2928 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2929 2929 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2930 2930 return_val = TD_DBERR;
2931 2931 break;
2932 2932 }
2933 2933 generic_so.lock.mutex_lockw = (uint8_t)value;
2934 2934 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2935 2935 sizeof (generic_so.lock)) != PS_OK)
2936 2936 return_val = TD_DBERR;
2937 2937 break;
2938 2938 case SEMA_MAGIC:
2939 2939 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2940 2940 &generic_so.semaphore, sizeof (generic_so.semaphore))
2941 2941 != PS_OK) {
2942 2942 return_val = TD_DBERR;
2943 2943 break;
2944 2944 }
2945 2945 generic_so.semaphore.count = value;
2946 2946 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2947 2947 sizeof (generic_so.semaphore)) != PS_OK)
2948 2948 return_val = TD_DBERR;
2949 2949 break;
2950 2950 case COND_MAGIC:
2951 2951 /* Operation not supported on a condition variable */
2952 2952 return_val = TD_ERR;
2953 2953 break;
2954 2954 case RWL_MAGIC:
2955 2955 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2956 2956 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2957 2957 return_val = TD_DBERR;
2958 2958 break;
2959 2959 }
2960 2960 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2961 2961 *rwstate &= URW_HAS_WAITERS;
2962 2962 if (value < 0)
2963 2963 *rwstate |= URW_WRITE_LOCKED;
2964 2964 else
2965 2965 *rwstate |= (value & URW_READERS_MASK);
2966 2966 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2967 2967 sizeof (generic_so.rwlock)) != PS_OK)
2968 2968 return_val = TD_DBERR;
2969 2969 break;
2970 2970 default:
2971 2971 /* Bad sync. object type */
2972 2972 return_val = TD_BADSH;
2973 2973 break;
2974 2974 }
2975 2975
2976 2976 (void) ps_pcontinue(ph_p);
2977 2977 ph_unlock(sh_p->sh_ta_p);
2978 2978 return (return_val);
2979 2979 }
2980 2980
2981 2981 typedef struct {
2982 2982 td_thr_iter_f *waiter_cb;
2983 2983 psaddr_t sync_obj_addr;
2984 2984 uint16_t sync_magic;
2985 2985 void *waiter_cb_arg;
2986 2986 td_err_e errcode;
2987 2987 } waiter_cb_ctl_t;
2988 2988
2989 2989 static int
2990 2990 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2991 2991 {
2992 2992 td_thragent_t *ta_p = th_p->th_ta_p;
2993 2993 struct ps_prochandle *ph_p = ta_p->ph_p;
2994 2994 waiter_cb_ctl_t *wcb = arg;
2995 2995 caddr_t wchan;
2996 2996
2997 2997 if (ta_p->model == PR_MODEL_NATIVE) {
2998 2998 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2999 2999
3000 3000 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3001 3001 &wchan, sizeof (wchan)) != PS_OK) {
3002 3002 wcb->errcode = TD_DBERR;
3003 3003 return (1);
3004 3004 }
3005 3005 } else {
3006 3006 #if defined(_LP64) && defined(_SYSCALL32)
3007 3007 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3008 3008 caddr32_t wchan32;
3009 3009
3010 3010 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3011 3011 &wchan32, sizeof (wchan32)) != PS_OK) {
3012 3012 wcb->errcode = TD_DBERR;
3013 3013 return (1);
3014 3014 }
3015 3015 wchan = (caddr_t)(uintptr_t)wchan32;
3016 3016 #else
3017 3017 wcb->errcode = TD_ERR;
3018 3018 return (1);
3019 3019 #endif /* _SYSCALL32 */
3020 3020 }
3021 3021
3022 3022 if (wchan == NULL)
3023 3023 return (0);
3024 3024
3025 3025 if (wchan == (caddr_t)wcb->sync_obj_addr)
3026 3026 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3027 3027
3028 3028 return (0);
3029 3029 }
3030 3030
3031 3031 /*
3032 3032 * For a given synchronization variable, iterate over the
3033 3033 * set of waiting threads. The call back function is passed
3034 3034 * two parameters, a pointer to a thread handle and a pointer
3035 3035 * to extra call back data.
3036 3036 */
3037 3037 #pragma weak td_sync_waiters = __td_sync_waiters
3038 3038 td_err_e
3039 3039 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3040 3040 {
3041 3041 struct ps_prochandle *ph_p;
3042 3042 waiter_cb_ctl_t wcb;
3043 3043 td_err_e return_val;
3044 3044
3045 3045 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3046 3046 return (return_val);
3047 3047 if (ps_pdread(ph_p,
3048 3048 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3049 3049 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3050 3050 ph_unlock(sh_p->sh_ta_p);
3051 3051 return (TD_DBERR);
3052 3052 }
3053 3053 ph_unlock(sh_p->sh_ta_p);
3054 3054
3055 3055 switch (wcb.sync_magic) {
3056 3056 case MUTEX_MAGIC:
3057 3057 case COND_MAGIC:
3058 3058 case SEMA_MAGIC:
3059 3059 case RWL_MAGIC:
3060 3060 break;
3061 3061 default:
3062 3062 return (TD_BADSH);
3063 3063 }
3064 3064
3065 3065 wcb.waiter_cb = cb;
3066 3066 wcb.sync_obj_addr = sh_p->sh_unique;
3067 3067 wcb.waiter_cb_arg = cb_data;
3068 3068 wcb.errcode = TD_OK;
3069 3069 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3070 3070 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3071 3071 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3072 3072
3073 3073 if (return_val != TD_OK)
3074 3074 return (return_val);
3075 3075
3076 3076 return (wcb.errcode);
3077 3077 }
|
↓ open down ↓ |
3077 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX