1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  29  * Copyright 2023 Oxide Computer Company
  30  */
  31 
  32 #include <stdio.h>
  33 #include <stdlib.h>
  34 #include <stddef.h>
  35 #include <unistd.h>
  36 #include <thr_uberdata.h>
  37 #include <thread_db.h>
  38 #include <libc_int.h>
  39 
  40 /*
  41  * Private structures.
  42  */
  43 
  44 typedef union {
  45         mutex_t         lock;
  46         rwlock_t        rwlock;
  47         sema_t          semaphore;
  48         cond_t          condition;
  49 } td_so_un_t;
  50 
  51 struct td_thragent {
  52         rwlock_t        rwlock;
  53         struct ps_prochandle *ph_p;
  54         int             initialized;
  55         int             sync_tracking;
  56         int             model;
  57         int             primary_map;
  58         psaddr_t        bootstrap_addr;
  59         psaddr_t        uberdata_addr;
  60         psaddr_t        tdb_eventmask_addr;
  61         psaddr_t        tdb_register_sync_addr;
  62         psaddr_t        tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
  63         psaddr_t        hash_table_addr;
  64         int             hash_size;
  65         lwpid_t         single_lwpid;
  66         psaddr_t        single_ulwp_addr;
  67 };
  68 
  69 /*
  70  * This is the name of the variable in libc that contains
  71  * the uberdata address that we will need.
  72  */
  73 #define TD_BOOTSTRAP_NAME       "_tdb_bootstrap"
  74 /*
  75  * This is the actual name of uberdata, used in the event
  76  * that tdb_bootstrap has not yet been initialized.
  77  */
  78 #define TD_UBERDATA_NAME        "_uberdata"
  79 /*
  80  * The library name should end with ".so.1", but older versions of
  81  * dbx expect the unadorned name and malfunction if ".1" is specified.
  82  * Unfortunately, if ".1" is not specified, mdb malfunctions when it
  83  * is applied to another instance of itself (due to the presence of
  84  * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
  85  */
  86 #define TD_LIBRARY_NAME         "libc.so"
  87 #define TD_LIBRARY_NAME_1       "libc.so.1"
  88 
  89 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
  90 
  91 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
  92         void *cbdata_p, td_thr_state_e state, int ti_pri,
  93         sigset_t *ti_sigmask_p, unsigned ti_user_flags);
  94 
  95 /*
  96  * Initialize threads debugging interface.
  97  */
  98 #pragma weak td_init = __td_init
  99 td_err_e
 100 __td_init()
 101 {
 102         return (TD_OK);
 103 }
 104 
 105 /*
 106  * This function does nothing, and never did.
 107  * But the symbol is in the ABI, so we can't delete it.
 108  */
 109 #pragma weak td_log = __td_log
 110 void
 111 __td_log()
 112 {
 113 }
 114 
 115 /*
 116  * Short-cut to read just the hash table size from the process,
 117  * to avoid repeatedly reading the full uberdata structure when
 118  * dealing with a single-threaded process.
 119  */
 120 static uint_t
 121 td_read_hash_size(td_thragent_t *ta_p)
 122 {
 123         psaddr_t addr;
 124         uint_t hash_size;
 125 
 126         switch (ta_p->initialized) {
 127         default:        /* uninitialized */
 128                 return (0);
 129         case 1:         /* partially initialized */
 130                 break;
 131         case 2:         /* fully initialized */
 132                 return (ta_p->hash_size);
 133         }
 134 
 135         if (ta_p->model == PR_MODEL_NATIVE) {
 136                 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
 137         } else {
 138 #if defined(_LP64) && defined(_SYSCALL32)
 139                 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
 140 #else
 141                 addr = 0;
 142 #endif
 143         }
 144         if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
 145             != PS_OK)
 146                 return (0);
 147         return (hash_size);
 148 }
 149 
 150 static td_err_e
 151 td_read_uberdata(td_thragent_t *ta_p)
 152 {
 153         struct ps_prochandle *ph_p = ta_p->ph_p;
 154         int i;
 155 
 156         if (ta_p->model == PR_MODEL_NATIVE) {
 157                 uberdata_t uberdata;
 158 
 159                 if (ps_pdread(ph_p, ta_p->uberdata_addr,
 160                     &uberdata, sizeof (uberdata)) != PS_OK)
 161                         return (TD_DBERR);
 162                 ta_p->primary_map = uberdata.primary_map;
 163                 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 164                     offsetof(uberdata_t, tdb.tdb_ev_global_mask);
 165                 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 166                     offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
 167                 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 168                 ta_p->hash_size = uberdata.hash_size;
 169                 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 170                     ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
 171                         return (TD_DBERR);
 172         } else {
 173 #if defined(_LP64) && defined(_SYSCALL32)
 174                 uberdata32_t uberdata;
 175                 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
 176 
 177                 if (ps_pdread(ph_p, ta_p->uberdata_addr,
 178                     &uberdata, sizeof (uberdata)) != PS_OK)
 179                         return (TD_DBERR);
 180                 ta_p->primary_map = uberdata.primary_map;
 181                 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 182                     offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
 183                 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 184                     offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
 185                 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 186                 ta_p->hash_size = uberdata.hash_size;
 187                 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 188                     tdb_events, sizeof (tdb_events)) != PS_OK)
 189                         return (TD_DBERR);
 190                 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
 191                         ta_p->tdb_events[i] = tdb_events[i];
 192 #else
 193                 return (TD_DBERR);
 194 #endif
 195         }
 196 
 197         /*
 198          * Unfortunately, we are (implicitly) assuming that our uberdata
 199          * definition precisely matches that of our target.  If this is not
 200          * true (that is, if we're examining a core file from a foreign
 201          * system that has a different definition of uberdata), the failure
 202          * modes can be frustratingly non-explicit.  In an effort to catch
 203          * this upon initialization (when the debugger may still be able to
 204          * opt for another thread model or may be able to fail explicitly), we
 205          * check that each of our tdb_events points to valid memory (these are
 206          * putatively text upon which a breakpoint can be issued), with the
 207          * hope that this is enough of a self-consistency check to lead to
 208          * explicit failure on a mismatch.
 209          */
 210         for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
 211                 uint8_t check;
 212 
 213                 if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
 214                     &check, sizeof (check)) != PS_OK) {
 215                         return (TD_DBERR);
 216                 }
 217         }
 218 
 219         if (ta_p->hash_size != 1) {  /* multi-threaded */
 220                 ta_p->initialized = 2;
 221                 ta_p->single_lwpid = 0;
 222                 ta_p->single_ulwp_addr = 0;
 223         } else {                        /* single-threaded */
 224                 ta_p->initialized = 1;
 225                 /*
 226                  * Get the address and lwpid of the single thread/LWP.
 227                  * It may not be ulwp_one if this is a child of fork1().
 228                  */
 229                 if (ta_p->model == PR_MODEL_NATIVE) {
 230                         thr_hash_table_t head;
 231                         lwpid_t lwpid = 0;
 232 
 233                         if (ps_pdread(ph_p, ta_p->hash_table_addr,
 234                             &head, sizeof (head)) != PS_OK)
 235                                 return (TD_DBERR);
 236                         if ((psaddr_t)head.hash_bucket == 0)
 237                                 ta_p->initialized = 0;
 238                         else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 239                             offsetof(ulwp_t, ul_lwpid),
 240                             &lwpid, sizeof (lwpid)) != PS_OK)
 241                                 return (TD_DBERR);
 242                         ta_p->single_lwpid = lwpid;
 243                         ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 244                 } else {
 245 #if defined(_LP64) && defined(_SYSCALL32)
 246                         thr_hash_table32_t head;
 247                         lwpid_t lwpid = 0;
 248 
 249                         if (ps_pdread(ph_p, ta_p->hash_table_addr,
 250                             &head, sizeof (head)) != PS_OK)
 251                                 return (TD_DBERR);
 252                         if ((psaddr_t)head.hash_bucket == 0)
 253                                 ta_p->initialized = 0;
 254                         else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 255                             offsetof(ulwp32_t, ul_lwpid),
 256                             &lwpid, sizeof (lwpid)) != PS_OK)
 257                                 return (TD_DBERR);
 258                         ta_p->single_lwpid = lwpid;
 259                         ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 260 #else
 261                         return (TD_DBERR);
 262 #endif
 263                 }
 264         }
 265         if (!ta_p->primary_map)
 266                 ta_p->initialized = 0;
 267         return (TD_OK);
 268 }
 269 
 270 static td_err_e
 271 td_read_bootstrap_data(td_thragent_t *ta_p)
 272 {
 273         struct ps_prochandle *ph_p = ta_p->ph_p;
 274         psaddr_t bootstrap_addr;
 275         psaddr_t uberdata_addr;
 276         ps_err_e db_return;
 277         td_err_e return_val;
 278         int do_1;
 279 
 280         switch (ta_p->initialized) {
 281         case 2:                 /* fully initialized */
 282                 return (TD_OK);
 283         case 1:                 /* partially initialized */
 284                 if (td_read_hash_size(ta_p) == 1)
 285                         return (TD_OK);
 286                 return (td_read_uberdata(ta_p));
 287         }
 288 
 289         /*
 290          * Uninitialized -- do the startup work.
 291          * We set ta_p->initialized to -1 to cut off recursive calls
 292          * into libc_db by code in the provider of ps_pglobal_lookup().
 293          */
 294         do_1 = 0;
 295         ta_p->initialized = -1;
 296         db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
 297             TD_BOOTSTRAP_NAME, &bootstrap_addr);
 298         if (db_return == PS_NOSYM) {
 299                 do_1 = 1;
 300                 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
 301                     TD_BOOTSTRAP_NAME, &bootstrap_addr);
 302         }
 303         if (db_return == PS_NOSYM)      /* libc is not linked yet */
 304                 return (TD_NOLIBTHREAD);
 305         if (db_return != PS_OK)
 306                 return (TD_ERR);
 307         db_return = ps_pglobal_lookup(ph_p,
 308             do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
 309             TD_UBERDATA_NAME, &uberdata_addr);
 310         if (db_return == PS_NOSYM)      /* libc is not linked yet */
 311                 return (TD_NOLIBTHREAD);
 312         if (db_return != PS_OK)
 313                 return (TD_ERR);
 314 
 315         /*
 316          * Read the uberdata address into the thread agent structure.
 317          */
 318         if (ta_p->model == PR_MODEL_NATIVE) {
 319                 psaddr_t psaddr;
 320                 if (ps_pdread(ph_p, bootstrap_addr,
 321                     &psaddr, sizeof (psaddr)) != PS_OK)
 322                         return (TD_DBERR);
 323                 if ((ta_p->bootstrap_addr = psaddr) == 0)
 324                         psaddr = uberdata_addr;
 325                 else if (ps_pdread(ph_p, psaddr,
 326                     &psaddr, sizeof (psaddr)) != PS_OK)
 327                         return (TD_DBERR);
 328                 if (psaddr == 0) {
 329                         /* primary linkmap in the tgt is not initialized */
 330                         ta_p->bootstrap_addr = 0;
 331                         psaddr = uberdata_addr;
 332                 }
 333                 ta_p->uberdata_addr = psaddr;
 334         } else {
 335 #if defined(_LP64) && defined(_SYSCALL32)
 336                 caddr32_t psaddr;
 337                 if (ps_pdread(ph_p, bootstrap_addr,
 338                     &psaddr, sizeof (psaddr)) != PS_OK)
 339                         return (TD_DBERR);
 340                 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
 341                         psaddr = (caddr32_t)uberdata_addr;
 342                 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
 343                     &psaddr, sizeof (psaddr)) != PS_OK)
 344                         return (TD_DBERR);
 345                 if (psaddr == 0) {
 346                         /* primary linkmap in the tgt is not initialized */
 347                         ta_p->bootstrap_addr = 0;
 348                         psaddr = (caddr32_t)uberdata_addr;
 349                 }
 350                 ta_p->uberdata_addr = (psaddr_t)psaddr;
 351 #else
 352                 return (TD_DBERR);
 353 #endif  /* _SYSCALL32 */
 354         }
 355 
 356         if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
 357                 return (return_val);
 358         if (ta_p->bootstrap_addr == 0)
 359                 ta_p->initialized = 0;
 360         return (TD_OK);
 361 }
 362 
 363 #pragma weak ps_kill
 364 #pragma weak ps_lrolltoaddr
 365 
 366 /*
 367  * Allocate a new agent process handle ("thread agent").
 368  */
 369 #pragma weak td_ta_new = __td_ta_new
 370 td_err_e
 371 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
 372 {
 373         td_thragent_t *ta_p;
 374         int model;
 375         td_err_e return_val = TD_OK;
 376 
 377         if (ph_p == NULL)
 378                 return (TD_BADPH);
 379         if (ta_pp == NULL)
 380                 return (TD_ERR);
 381         *ta_pp = NULL;
 382         if (ps_pstop(ph_p) != PS_OK)
 383                 return (TD_DBERR);
 384         /*
 385          * ps_pdmodel might not be defined if this is an older client.
 386          * Make it a weak symbol and test if it exists before calling.
 387          */
 388 #pragma weak ps_pdmodel
 389         if (ps_pdmodel == NULL) {
 390                 model = PR_MODEL_NATIVE;
 391         } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
 392                 (void) ps_pcontinue(ph_p);
 393                 return (TD_ERR);
 394         }
 395         if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
 396                 (void) ps_pcontinue(ph_p);
 397                 return (TD_MALLOC);
 398         }
 399 
 400         /*
 401          * Initialize the agent process handle.
 402          * Pick up the symbol value we need from the target process.
 403          */
 404         (void) memset(ta_p, 0, sizeof (*ta_p));
 405         ta_p->ph_p = ph_p;
 406         (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
 407         ta_p->model = model;
 408         return_val = td_read_bootstrap_data(ta_p);
 409 
 410         /*
 411          * Because the old libthread_db enabled lock tracking by default,
 412          * we must also do it.  However, we do it only if the application
 413          * provides the ps_kill() and ps_lrolltoaddr() interfaces.
 414          * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
 415          */
 416         if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
 417                 register_sync_t oldenable;
 418                 register_sync_t enable = REGISTER_SYNC_ENABLE;
 419                 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
 420 
 421                 if (ps_pdread(ph_p, psaddr,
 422                     &oldenable, sizeof (oldenable)) != PS_OK)
 423                         return_val = TD_DBERR;
 424                 else if (oldenable != REGISTER_SYNC_OFF ||
 425                     ps_pdwrite(ph_p, psaddr,
 426                     &enable, sizeof (enable)) != PS_OK) {
 427                         /*
 428                          * Lock tracking was already enabled or we
 429                          * failed to enable it, probably because we
 430                          * are examining a core file.  In either case
 431                          * set the sync_tracking flag non-zero to
 432                          * indicate that we should not attempt to
 433                          * disable lock tracking when we delete the
 434                          * agent process handle in td_ta_delete().
 435                          */
 436                         ta_p->sync_tracking = 1;
 437                 }
 438         }
 439 
 440         if (return_val == TD_OK)
 441                 *ta_pp = ta_p;
 442         else
 443                 free(ta_p);
 444 
 445         (void) ps_pcontinue(ph_p);
 446         return (return_val);
 447 }
 448 
 449 /*
 450  * Utility function to grab the readers lock and return the prochandle,
 451  * given an agent process handle.  Performs standard error checking.
 452  * Returns non-NULL with the lock held, or NULL with the lock not held.
 453  */
 454 static struct ps_prochandle *
 455 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
 456 {
 457         struct ps_prochandle *ph_p = NULL;
 458         td_err_e error;
 459 
 460         if (ta_p == NULL || ta_p->initialized == -1) {
 461                 *err = TD_BADTA;
 462         } else if (rw_rdlock(&ta_p->rwlock) != 0) {      /* can't happen? */
 463                 *err = TD_BADTA;
 464         } else if ((ph_p = ta_p->ph_p) == NULL) {
 465                 (void) rw_unlock(&ta_p->rwlock);
 466                 *err = TD_BADPH;
 467         } else if (ta_p->initialized != 2 &&
 468             (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
 469                 (void) rw_unlock(&ta_p->rwlock);
 470                 ph_p = NULL;
 471                 *err = error;
 472         } else {
 473                 *err = TD_OK;
 474         }
 475 
 476         return (ph_p);
 477 }
 478 
 479 /*
 480  * Utility function to grab the readers lock and return the prochandle,
 481  * given an agent thread handle.  Performs standard error checking.
 482  * Returns non-NULL with the lock held, or NULL with the lock not held.
 483  */
 484 static struct ps_prochandle *
 485 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
 486 {
 487         if (th_p == NULL || th_p->th_unique == 0) {
 488                 *err = TD_BADTH;
 489                 return (NULL);
 490         }
 491         return (ph_lock_ta(th_p->th_ta_p, err));
 492 }
 493 
 494 /*
 495  * Utility function to grab the readers lock and return the prochandle,
 496  * given a synchronization object handle.  Performs standard error checking.
 497  * Returns non-NULL with the lock held, or NULL with the lock not held.
 498  */
 499 static struct ps_prochandle *
 500 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
 501 {
 502         if (sh_p == NULL || sh_p->sh_unique == 0) {
 503                 *err = TD_BADSH;
 504                 return (NULL);
 505         }
 506         return (ph_lock_ta(sh_p->sh_ta_p, err));
 507 }
 508 
 509 /*
 510  * Unlock the agent process handle obtained from ph_lock_*().
 511  */
 512 static void
 513 ph_unlock(td_thragent_t *ta_p)
 514 {
 515         (void) rw_unlock(&ta_p->rwlock);
 516 }
 517 
 518 /*
 519  * De-allocate an agent process handle,
 520  * releasing all related resources.
 521  *
 522  * XXX -- This is hopelessly broken ---
 523  * Storage for thread agent is not deallocated.  The prochandle
 524  * in the thread agent is set to NULL so that future uses of
 525  * the thread agent can be detected and an error value returned.
 526  * All functions in the external user interface that make
 527  * use of the thread agent are expected
 528  * to check for a NULL prochandle in the thread agent.
 529  * All such functions are also expected to obtain a
 530  * reader lock on the thread agent while it is using it.
 531  */
 532 #pragma weak td_ta_delete = __td_ta_delete
 533 td_err_e
 534 __td_ta_delete(td_thragent_t *ta_p)
 535 {
 536         struct ps_prochandle *ph_p;
 537 
 538         /*
 539          * This is the only place we grab the writer lock.
 540          * We are going to NULL out the prochandle.
 541          */
 542         if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
 543                 return (TD_BADTA);
 544         if ((ph_p = ta_p->ph_p) == NULL) {
 545                 (void) rw_unlock(&ta_p->rwlock);
 546                 return (TD_BADPH);
 547         }
 548         /*
 549          * If synch. tracking was disabled when td_ta_new() was called and
 550          * if td_ta_sync_tracking_enable() was never called, then disable
 551          * synch. tracking (it was enabled by default in td_ta_new()).
 552          */
 553         if (ta_p->sync_tracking == 0 &&
 554             ps_kill != NULL && ps_lrolltoaddr != NULL) {
 555                 register_sync_t enable = REGISTER_SYNC_DISABLE;
 556 
 557                 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
 558                     &enable, sizeof (enable));
 559         }
 560         ta_p->ph_p = NULL;
 561         (void) rw_unlock(&ta_p->rwlock);
 562         return (TD_OK);
 563 }
 564 
 565 /*
 566  * Map an agent process handle to a client prochandle.
 567  * Currently unused by dbx.
 568  */
 569 #pragma weak td_ta_get_ph = __td_ta_get_ph
 570 td_err_e
 571 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
 572 {
 573         td_err_e return_val;
 574 
 575         if (ph_pp != NULL)      /* protect stupid callers */
 576                 *ph_pp = NULL;
 577         if (ph_pp == NULL)
 578                 return (TD_ERR);
 579         if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
 580                 return (return_val);
 581         ph_unlock(ta_p);
 582         return (TD_OK);
 583 }
 584 
 585 /*
 586  * Set the process's suggested concurrency level.
 587  * This is a no-op in a one-level model.
 588  * Currently unused by dbx.
 589  */
 590 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
 591 /* ARGSUSED1 */
 592 td_err_e
 593 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
 594 {
 595         if (ta_p == NULL)
 596                 return (TD_BADTA);
 597         if (ta_p->ph_p == NULL)
 598                 return (TD_BADPH);
 599         return (TD_OK);
 600 }
 601 
 602 /*
 603  * Get the number of threads in the process.
 604  */
 605 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
 606 td_err_e
 607 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
 608 {
 609         struct ps_prochandle *ph_p;
 610         td_err_e return_val;
 611         int nthreads;
 612         int nzombies;
 613         psaddr_t nthreads_addr;
 614         psaddr_t nzombies_addr;
 615 
 616         if (ta_p->model == PR_MODEL_NATIVE) {
 617                 nthreads_addr = ta_p->uberdata_addr +
 618                     offsetof(uberdata_t, nthreads);
 619                 nzombies_addr = ta_p->uberdata_addr +
 620                     offsetof(uberdata_t, nzombies);
 621         } else {
 622 #if defined(_LP64) && defined(_SYSCALL32)
 623                 nthreads_addr = ta_p->uberdata_addr +
 624                     offsetof(uberdata32_t, nthreads);
 625                 nzombies_addr = ta_p->uberdata_addr +
 626                     offsetof(uberdata32_t, nzombies);
 627 #else
 628                 nthreads_addr = 0;
 629                 nzombies_addr = 0;
 630 #endif  /* _SYSCALL32 */
 631         }
 632 
 633         if (nthread_p == NULL)
 634                 return (TD_ERR);
 635         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 636                 return (return_val);
 637         if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
 638                 return_val = TD_DBERR;
 639         if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
 640                 return_val = TD_DBERR;
 641         ph_unlock(ta_p);
 642         if (return_val == TD_OK)
 643                 *nthread_p = nthreads + nzombies;
 644         return (return_val);
 645 }
 646 
 647 typedef struct {
 648         thread_t        tid;
 649         int             found;
 650         td_thrhandle_t  th;
 651 } td_mapper_param_t;
 652 
 653 /*
 654  * Check the value in data against the thread id.
 655  * If it matches, return 1 to terminate iterations.
 656  * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
 657  */
 658 static int
 659 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
 660 {
 661         td_thrinfo_t ti;
 662 
 663         if (__td_thr_get_info(th_p, &ti) == TD_OK &&
 664             data->tid == ti.ti_tid) {
 665                 data->found = 1;
 666                 data->th = *th_p;
 667                 return (1);
 668         }
 669         return (0);
 670 }
 671 
 672 /*
 673  * Given a thread identifier, return the corresponding thread handle.
 674  */
 675 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
 676 td_err_e
 677 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
 678     td_thrhandle_t *th_p)
 679 {
 680         td_err_e                return_val;
 681         td_mapper_param_t       data;
 682 
 683         if (th_p != NULL &&     /* optimize for a single thread */
 684             ta_p != NULL &&
 685             ta_p->initialized == 1 &&
 686             (td_read_hash_size(ta_p) == 1 ||
 687             td_read_uberdata(ta_p) == TD_OK) &&
 688             ta_p->initialized == 1 &&
 689             ta_p->single_lwpid == tid) {
 690                 th_p->th_ta_p = ta_p;
 691                 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
 692                         return (TD_NOTHR);
 693                 return (TD_OK);
 694         }
 695 
 696         /*
 697          * LOCKING EXCEPTION - Locking is not required here because
 698          * the locking and checking will be done in __td_ta_thr_iter.
 699          */
 700 
 701         if (ta_p == NULL)
 702                 return (TD_BADTA);
 703         if (th_p == NULL)
 704                 return (TD_BADTH);
 705         if (tid == 0)
 706                 return (TD_NOTHR);
 707 
 708         data.tid = tid;
 709         data.found = 0;
 710         return_val = __td_ta_thr_iter(ta_p,
 711             (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
 712             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
 713             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
 714         if (return_val == TD_OK) {
 715                 if (data.found == 0)
 716                         return_val = TD_NOTHR;
 717                 else
 718                         *th_p = data.th;
 719         }
 720 
 721         return (return_val);
 722 }
 723 
 724 /*
 725  * Map the address of a synchronization object to a sync. object handle.
 726  */
 727 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
 728 td_err_e
 729 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
 730 {
 731         struct ps_prochandle *ph_p;
 732         td_err_e return_val;
 733         uint16_t sync_magic;
 734 
 735         if (sh_p == NULL)
 736                 return (TD_BADSH);
 737         if (addr == 0)
 738                 return (TD_ERR);
 739         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 740                 return (return_val);
 741         /*
 742          * Check the magic number of the sync. object to make sure it's valid.
 743          * The magic number is at the same offset for all sync. objects.
 744          */
 745         if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
 746             &sync_magic, sizeof (sync_magic)) != PS_OK) {
 747                 ph_unlock(ta_p);
 748                 return (TD_BADSH);
 749         }
 750         ph_unlock(ta_p);
 751         if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
 752             sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
 753                 return (TD_BADSH);
 754         /*
 755          * Just fill in the appropriate fields of the sync. handle.
 756          */
 757         sh_p->sh_ta_p = (td_thragent_t *)ta_p;
 758         sh_p->sh_unique = addr;
 759         return (TD_OK);
 760 }
 761 
 762 /*
 763  * Iterate over the set of global TSD keys.
 764  * The call back function is called with three arguments,
 765  * a key, a pointer to the destructor function, and the cbdata pointer.
 766  * Currently unused by dbx.
 767  */
 768 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
 769 td_err_e
 770 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
 771 {
 772         struct ps_prochandle *ph_p;
 773         td_err_e        return_val;
 774         int             key;
 775         int             numkeys;
 776         psaddr_t        dest_addr;
 777         psaddr_t        *destructors = NULL;
 778         PFrV            destructor;
 779 
 780         if (cb == NULL)
 781                 return (TD_ERR);
 782         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 783                 return (return_val);
 784         if (ps_pstop(ph_p) != PS_OK) {
 785                 ph_unlock(ta_p);
 786                 return (TD_DBERR);
 787         }
 788 
 789         if (ta_p->model == PR_MODEL_NATIVE) {
 790                 tsd_metadata_t tsdm;
 791 
 792                 if (ps_pdread(ph_p,
 793                     ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
 794                     &tsdm, sizeof (tsdm)) != PS_OK)
 795                         return_val = TD_DBERR;
 796                 else {
 797                         numkeys = tsdm.tsdm_nused;
 798                         dest_addr = (psaddr_t)tsdm.tsdm_destro;
 799                         if (numkeys > 0)
 800                                 destructors =
 801                                     malloc(numkeys * sizeof (psaddr_t));
 802                 }
 803         } else {
 804 #if defined(_LP64) && defined(_SYSCALL32)
 805                 tsd_metadata32_t tsdm;
 806 
 807                 if (ps_pdread(ph_p,
 808                     ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
 809                     &tsdm, sizeof (tsdm)) != PS_OK)
 810                         return_val = TD_DBERR;
 811                 else {
 812                         numkeys = tsdm.tsdm_nused;
 813                         dest_addr = (psaddr_t)tsdm.tsdm_destro;
 814                         if (numkeys > 0)
 815                                 destructors =
 816                                     malloc(numkeys * sizeof (caddr32_t));
 817                 }
 818 #else
 819                 return_val = TD_DBERR;
 820 #endif  /* _SYSCALL32 */
 821         }
 822 
 823         if (return_val != TD_OK || numkeys <= 0) {
 824                 (void) ps_pcontinue(ph_p);
 825                 ph_unlock(ta_p);
 826                 return (return_val);
 827         }
 828 
 829         if (destructors == NULL)
 830                 return_val = TD_MALLOC;
 831         else if (ta_p->model == PR_MODEL_NATIVE) {
 832                 if (ps_pdread(ph_p, dest_addr,
 833                     destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
 834                         return_val = TD_DBERR;
 835                 else {
 836                         for (key = 1; key < numkeys; key++) {
 837                                 destructor = (PFrV)destructors[key];
 838                                 if (destructor != TSD_UNALLOCATED &&
 839                                     (*cb)(key, destructor, cbdata_p))
 840                                         break;
 841                         }
 842                 }
 843 #if defined(_LP64) && defined(_SYSCALL32)
 844         } else {
 845                 caddr32_t *destructors32 = (caddr32_t *)destructors;
 846                 caddr32_t destruct32;
 847 
 848                 if (ps_pdread(ph_p, dest_addr,
 849                     destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
 850                         return_val = TD_DBERR;
 851                 else {
 852                         for (key = 1; key < numkeys; key++) {
 853                                 destruct32 = destructors32[key];
 854                                 if ((destruct32 !=
 855                                     (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
 856                                     (*cb)(key, (PFrV)(uintptr_t)destruct32,
 857                                     cbdata_p))
 858                                         break;
 859                         }
 860                 }
 861 #endif  /* _SYSCALL32 */
 862         }
 863 
 864         if (destructors)
 865                 free(destructors);
 866         (void) ps_pcontinue(ph_p);
 867         ph_unlock(ta_p);
 868         return (return_val);
 869 }
 870 
 871 int
 872 sigequalset(const sigset_t *s1, const sigset_t *s2)
 873 {
 874         return (
 875             s1->__sigbits[0] == s2->__sigbits[0] &&
 876             s1->__sigbits[1] == s2->__sigbits[1] &&
 877             s1->__sigbits[2] == s2->__sigbits[2] &&
 878             s1->__sigbits[3] == s2->__sigbits[3]);
 879 }
 880 
 881 /*
 882  * Description:
 883  *   Iterate over all threads. For each thread call
 884  * the function pointed to by "cb" with a pointer
 885  * to a thread handle, and a pointer to data which
 886  * can be NULL. Only call td_thr_iter_f() on threads
 887  * which match the properties of state, ti_pri,
 888  * ti_sigmask_p, and ti_user_flags.  If cb returns
 889  * a non-zero value, terminate iterations.
 890  *
 891  * Input:
 892  *   *ta_p - thread agent
 893  *   *cb - call back function defined by user.
 894  * td_thr_iter_f() takes a thread handle and
 895  * cbdata_p as a parameter.
 896  *   cbdata_p - parameter for td_thr_iter_f().
 897  *
 898  *   state - state of threads of interest.  A value of
 899  * TD_THR_ANY_STATE from enum td_thr_state_e
 900  * does not restrict iterations by state.
 901  *   ti_pri - lower bound of priorities of threads of
 902  * interest.  A value of TD_THR_LOWEST_PRIORITY
 903  * defined in thread_db.h does not restrict
 904  * iterations by priority.  A thread with priority
 905  * less than ti_pri will NOT be passed to the callback
 906  * function.
 907  *   ti_sigmask_p - signal mask of threads of interest.
 908  * A value of TD_SIGNO_MASK defined in thread_db.h
 909  * does not restrict iterations by signal mask.
 910  *   ti_user_flags - user flags of threads of interest.  A
 911  * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
 912  * does not restrict iterations by user flags.
 913  */
 914 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
 915 td_err_e
 916 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
 917     void *cbdata_p, td_thr_state_e state, int ti_pri,
 918     sigset_t *ti_sigmask_p, unsigned ti_user_flags)
 919 {
 920         struct ps_prochandle *ph_p;
 921         psaddr_t        first_lwp_addr;
 922         psaddr_t        first_zombie_addr;
 923         psaddr_t        curr_lwp_addr;
 924         psaddr_t        next_lwp_addr;
 925         td_thrhandle_t  th;
 926         ps_err_e        db_return;
 927         ps_err_e        db_return2;
 928         td_err_e        return_val;
 929 
 930         if (cb == NULL)
 931                 return (TD_ERR);
 932         /*
 933          * If state is not within bound, short circuit.
 934          */
 935         if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
 936                 return (TD_OK);
 937 
 938         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 939                 return (return_val);
 940         if (ps_pstop(ph_p) != PS_OK) {
 941                 ph_unlock(ta_p);
 942                 return (TD_DBERR);
 943         }
 944 
 945         /*
 946          * For each ulwp_t in the circular linked lists pointed
 947          * to by "all_lwps" and "all_zombies":
 948          * (1) Filter each thread.
 949          * (2) Create the thread_object for each thread that passes.
 950          * (3) Call the call back function on each thread.
 951          */
 952 
 953         if (ta_p->model == PR_MODEL_NATIVE) {
 954                 db_return = ps_pdread(ph_p,
 955                     ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
 956                     &first_lwp_addr, sizeof (first_lwp_addr));
 957                 db_return2 = ps_pdread(ph_p,
 958                     ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
 959                     &first_zombie_addr, sizeof (first_zombie_addr));
 960         } else {
 961 #if defined(_LP64) && defined(_SYSCALL32)
 962                 caddr32_t addr32;
 963 
 964                 db_return = ps_pdread(ph_p,
 965                     ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
 966                     &addr32, sizeof (addr32));
 967                 first_lwp_addr = addr32;
 968                 db_return2 = ps_pdread(ph_p,
 969                     ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
 970                     &addr32, sizeof (addr32));
 971                 first_zombie_addr = addr32;
 972 #else   /* _SYSCALL32 */
 973                 db_return = PS_ERR;
 974                 db_return2 = PS_ERR;
 975 #endif  /* _SYSCALL32 */
 976         }
 977         if (db_return == PS_OK)
 978                 db_return = db_return2;
 979 
 980         /*
 981          * If first_lwp_addr and first_zombie_addr are both NULL,
 982          * libc must not yet be initialized or all threads have
 983          * exited.  Return TD_NOTHR and all will be well.
 984          */
 985         if (db_return == PS_OK &&
 986             first_lwp_addr == 0 && first_zombie_addr == 0) {
 987                 (void) ps_pcontinue(ph_p);
 988                 ph_unlock(ta_p);
 989                 return (TD_NOTHR);
 990         }
 991         if (db_return != PS_OK) {
 992                 (void) ps_pcontinue(ph_p);
 993                 ph_unlock(ta_p);
 994                 return (TD_DBERR);
 995         }
 996 
 997         /*
 998          * Run down the lists of all living and dead lwps.
 999          */
1000         if (first_lwp_addr == 0)
1001                 first_lwp_addr = first_zombie_addr;
1002         curr_lwp_addr = first_lwp_addr;
1003         for (;;) {
1004                 td_thr_state_e ts_state;
1005                 int userpri;
1006                 unsigned userflags;
1007                 sigset_t mask;
1008 
1009                 /*
1010                  * Read the ulwp struct.
1011                  */
1012                 if (ta_p->model == PR_MODEL_NATIVE) {
1013                         ulwp_t ulwp;
1014 
1015                         if (ps_pdread(ph_p, curr_lwp_addr,
1016                             &ulwp, sizeof (ulwp)) != PS_OK &&
1017                             ((void) memset(&ulwp, 0, sizeof (ulwp)),
1018                             ps_pdread(ph_p, curr_lwp_addr,
1019                             &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1020                                 return_val = TD_DBERR;
1021                                 break;
1022                         }
1023                         next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1024 
1025                         ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1026                             ulwp.ul_stop? TD_THR_STOPPED :
1027                             ulwp.ul_wchan? TD_THR_SLEEP :
1028                             TD_THR_ACTIVE;
1029                         userpri = ulwp.ul_pri;
1030                         userflags = ulwp.ul_usropts;
1031                         if (ulwp.ul_dead)
1032                                 (void) sigemptyset(&mask);
1033                         else
1034                                 mask = *(sigset_t *)&ulwp.ul_sigmask;
1035                 } else {
1036 #if defined(_LP64) && defined(_SYSCALL32)
1037                         ulwp32_t ulwp;
1038 
1039                         if (ps_pdread(ph_p, curr_lwp_addr,
1040                             &ulwp, sizeof (ulwp)) != PS_OK &&
1041                             ((void) memset(&ulwp, 0, sizeof (ulwp)),
1042                             ps_pdread(ph_p, curr_lwp_addr,
1043                             &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1044                                 return_val = TD_DBERR;
1045                                 break;
1046                         }
1047                         next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1048 
1049                         ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1050                             ulwp.ul_stop? TD_THR_STOPPED :
1051                             ulwp.ul_wchan? TD_THR_SLEEP :
1052                             TD_THR_ACTIVE;
1053                         userpri = ulwp.ul_pri;
1054                         userflags = ulwp.ul_usropts;
1055                         if (ulwp.ul_dead)
1056                                 (void) sigemptyset(&mask);
1057                         else
1058                                 mask = *(sigset_t *)&ulwp.ul_sigmask;
1059 #else   /* _SYSCALL32 */
1060                         return_val = TD_ERR;
1061                         break;
1062 #endif  /* _SYSCALL32 */
1063                 }
1064 
1065                 /*
1066                  * Filter on state, priority, sigmask, and user flags.
1067                  */
1068 
1069                 if ((state != ts_state) &&
1070                     (state != TD_THR_ANY_STATE))
1071                         goto advance;
1072 
1073                 if (ti_pri > userpri)
1074                         goto advance;
1075 
1076                 if (ti_sigmask_p != TD_SIGNO_MASK &&
1077                     !sigequalset(ti_sigmask_p, &mask))
1078                         goto advance;
1079 
1080                 if (ti_user_flags != userflags &&
1081                     ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1082                         goto advance;
1083 
1084                 /*
1085                  * Call back - break if the return
1086                  * from the call back is non-zero.
1087                  */
1088                 th.th_ta_p = (td_thragent_t *)ta_p;
1089                 th.th_unique = curr_lwp_addr;
1090                 if ((*cb)(&th, cbdata_p))
1091                         break;
1092 
1093 advance:
1094                 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1095                         /*
1096                          * Switch to the zombie list, unless it is NULL
1097                          * or we have already been doing the zombie list,
1098                          * in which case terminate the loop.
1099                          */
1100                         if (first_zombie_addr == 0 ||
1101                             first_lwp_addr == first_zombie_addr)
1102                                 break;
1103                         curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1104                 }
1105         }
1106 
1107         (void) ps_pcontinue(ph_p);
1108         ph_unlock(ta_p);
1109         return (return_val);
1110 }
1111 
1112 /*
1113  * Enable or disable process synchronization object tracking.
1114  * Currently unused by dbx.
1115  */
1116 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1117 td_err_e
1118 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1119 {
1120         struct ps_prochandle *ph_p;
1121         td_err_e return_val;
1122         register_sync_t enable;
1123 
1124         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1125                 return (return_val);
1126         /*
1127          * Values of tdb_register_sync in the victim process:
1128          *      REGISTER_SYNC_ENABLE    enables registration of synch objects
1129          *      REGISTER_SYNC_DISABLE   disables registration of synch objects
1130          * These cause the table to be cleared and tdb_register_sync set to:
1131          *      REGISTER_SYNC_ON        registration in effect
1132          *      REGISTER_SYNC_OFF       registration not in effect
1133          */
1134         enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1135         if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1136             &enable, sizeof (enable)) != PS_OK)
1137                 return_val = TD_DBERR;
1138         /*
1139          * Remember that this interface was called (see td_ta_delete()).
1140          */
1141         ta_p->sync_tracking = 1;
1142         ph_unlock(ta_p);
1143         return (return_val);
1144 }
1145 
1146 /*
1147  * Iterate over all known synchronization variables.
1148  * It is very possible that the list generated is incomplete,
1149  * because the iterator can only find synchronization variables
1150  * that have been registered by the process since synchronization
1151  * object registration was enabled.
1152  * The call back function cb is called for each synchronization
1153  * variable with two arguments: a pointer to the synchronization
1154  * handle and the passed-in argument cbdata.
1155  * If cb returns a non-zero value, iterations are terminated.
1156  */
1157 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1158 td_err_e
1159 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1160 {
1161         struct ps_prochandle *ph_p;
1162         td_err_e        return_val;
1163         int             i;
1164         register_sync_t enable;
1165         psaddr_t        next_desc;
1166         tdb_sync_stats_t sync_stats;
1167         td_synchandle_t synchandle;
1168         psaddr_t        psaddr;
1169         void            *vaddr;
1170         uint64_t        *sync_addr_hash = NULL;
1171 
1172         if (cb == NULL)
1173                 return (TD_ERR);
1174         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1175                 return (return_val);
1176         if (ps_pstop(ph_p) != PS_OK) {
1177                 ph_unlock(ta_p);
1178                 return (TD_DBERR);
1179         }
1180         if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1181             &enable, sizeof (enable)) != PS_OK) {
1182                 return_val = TD_DBERR;
1183                 goto out;
1184         }
1185         if (enable != REGISTER_SYNC_ON)
1186                 goto out;
1187 
1188         /*
1189          * First read the hash table.
1190          * The hash table is large; allocate with mmap().
1191          */
1192         if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1193             PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1194             == MAP_FAILED) {
1195                 return_val = TD_MALLOC;
1196                 goto out;
1197         }
1198         sync_addr_hash = vaddr;
1199 
1200         if (ta_p->model == PR_MODEL_NATIVE) {
1201                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1202                     offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1203                     &psaddr, sizeof (&psaddr)) != PS_OK) {
1204                         return_val = TD_DBERR;
1205                         goto out;
1206                 }
1207         } else {
1208 #ifdef  _SYSCALL32
1209                 caddr32_t addr;
1210 
1211                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1212                     offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1213                     &addr, sizeof (addr)) != PS_OK) {
1214                         return_val = TD_DBERR;
1215                         goto out;
1216                 }
1217                 psaddr = addr;
1218 #else
1219                 return_val = TD_ERR;
1220                 goto out;
1221 #endif /* _SYSCALL32 */
1222         }
1223 
1224         if (psaddr == 0)
1225                 goto out;
1226         if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1227             TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1228                 return_val = TD_DBERR;
1229                 goto out;
1230         }
1231 
1232         /*
1233          * Now scan the hash table.
1234          */
1235         for (i = 0; i < TDB_HASH_SIZE; i++) {
1236                 for (next_desc = (psaddr_t)sync_addr_hash[i];
1237                     next_desc != 0;
1238                     next_desc = (psaddr_t)sync_stats.next) {
1239                         if (ps_pdread(ph_p, next_desc,
1240                             &sync_stats, sizeof (sync_stats)) != PS_OK) {
1241                                 return_val = TD_DBERR;
1242                                 goto out;
1243                         }
1244                         if (sync_stats.un.type == TDB_NONE) {
1245                                 /* not registered since registration enabled */
1246                                 continue;
1247                         }
1248                         synchandle.sh_ta_p = ta_p;
1249                         synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1250                         if ((*cb)(&synchandle, cbdata) != 0)
1251                                 goto out;
1252                 }
1253         }
1254 
1255 out:
1256         if (sync_addr_hash != NULL)
1257                 (void) munmap((void *)sync_addr_hash,
1258                     TDB_HASH_SIZE * sizeof (uint64_t));
1259         (void) ps_pcontinue(ph_p);
1260         ph_unlock(ta_p);
1261         return (return_val);
1262 }
1263 
1264 /*
1265  * Enable process statistics collection.
1266  */
1267 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1268 /* ARGSUSED */
1269 td_err_e
1270 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1271 {
1272         return (TD_NOCAPAB);
1273 }
1274 
1275 /*
1276  * Reset process statistics.
1277  */
1278 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1279 /* ARGSUSED */
1280 td_err_e
1281 __td_ta_reset_stats(const td_thragent_t *ta_p)
1282 {
1283         return (TD_NOCAPAB);
1284 }
1285 
1286 /*
1287  * Read process statistics.
1288  */
1289 #pragma weak td_ta_get_stats = __td_ta_get_stats
1290 /* ARGSUSED */
1291 td_err_e
1292 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1293 {
1294         return (TD_NOCAPAB);
1295 }
1296 
1297 /*
1298  * Transfer information from lwp struct to thread information struct.
1299  * XXX -- lots of this needs cleaning up.
1300  */
1301 static void
1302 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1303     ulwp_t *ulwp, td_thrinfo_t *ti_p)
1304 {
1305         lwpid_t lwpid;
1306 
1307         if ((lwpid = ulwp->ul_lwpid) == 0)
1308                 lwpid = 1;
1309         (void) memset(ti_p, 0, sizeof (*ti_p));
1310         ti_p->ti_ta_p = ta_p;
1311         ti_p->ti_user_flags = ulwp->ul_usropts;
1312         ti_p->ti_tid = lwpid;
1313         ti_p->ti_exitval = ulwp->ul_rval;
1314         ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1315         if (!ulwp->ul_dead) {
1316                 /*
1317                  * The bloody fools got this backwards!
1318                  */
1319                 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1320                 ti_p->ti_stksize = ulwp->ul_stksiz;
1321         }
1322         ti_p->ti_ro_area = ts_addr;
1323         ti_p->ti_ro_size = ulwp->ul_replace?
1324             REPLACEMENT_SIZE : sizeof (ulwp_t);
1325         ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1326             ulwp->ul_stop? TD_THR_STOPPED :
1327             ulwp->ul_wchan? TD_THR_SLEEP :
1328             TD_THR_ACTIVE;
1329         ti_p->ti_db_suspended = 0;
1330         ti_p->ti_type = TD_THR_USER;
1331         ti_p->ti_sp = ulwp->ul_sp;
1332         ti_p->ti_flags = 0;
1333         ti_p->ti_pri = ulwp->ul_pri;
1334         ti_p->ti_lid = lwpid;
1335         if (!ulwp->ul_dead)
1336                 ti_p->ti_sigmask = ulwp->ul_sigmask;
1337         ti_p->ti_traceme = 0;
1338         ti_p->ti_preemptflag = 0;
1339         ti_p->ti_pirecflag = 0;
1340         (void) sigemptyset(&ti_p->ti_pending);
1341         ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1342 }
1343 
1344 #if defined(_LP64) && defined(_SYSCALL32)
1345 static void
1346 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1347     ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1348 {
1349         lwpid_t lwpid;
1350 
1351         if ((lwpid = ulwp->ul_lwpid) == 0)
1352                 lwpid = 1;
1353         (void) memset(ti_p, 0, sizeof (*ti_p));
1354         ti_p->ti_ta_p = ta_p;
1355         ti_p->ti_user_flags = ulwp->ul_usropts;
1356         ti_p->ti_tid = lwpid;
1357         ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1358         ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1359         if (!ulwp->ul_dead) {
1360                 /*
1361                  * The bloody fools got this backwards!
1362                  */
1363                 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1364                 ti_p->ti_stksize = ulwp->ul_stksiz;
1365         }
1366         ti_p->ti_ro_area = ts_addr;
1367         ti_p->ti_ro_size = ulwp->ul_replace?
1368             REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1369         ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1370             ulwp->ul_stop? TD_THR_STOPPED :
1371             ulwp->ul_wchan? TD_THR_SLEEP :
1372             TD_THR_ACTIVE;
1373         ti_p->ti_db_suspended = 0;
1374         ti_p->ti_type = TD_THR_USER;
1375         ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1376         ti_p->ti_flags = 0;
1377         ti_p->ti_pri = ulwp->ul_pri;
1378         ti_p->ti_lid = lwpid;
1379         if (!ulwp->ul_dead)
1380                 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1381         ti_p->ti_traceme = 0;
1382         ti_p->ti_preemptflag = 0;
1383         ti_p->ti_pirecflag = 0;
1384         (void) sigemptyset(&ti_p->ti_pending);
1385         ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1386 }
1387 #endif  /* _SYSCALL32 */
1388 
1389 /*
1390  * Get thread information.
1391  */
1392 #pragma weak td_thr_get_info = __td_thr_get_info
1393 td_err_e
1394 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1395 {
1396         struct ps_prochandle *ph_p;
1397         td_thragent_t   *ta_p;
1398         td_err_e        return_val;
1399         psaddr_t        psaddr;
1400 
1401         if (ti_p == NULL)
1402                 return (TD_ERR);
1403         (void) memset(ti_p, 0, sizeof (*ti_p));
1404 
1405         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1406                 return (return_val);
1407         ta_p = th_p->th_ta_p;
1408         if (ps_pstop(ph_p) != PS_OK) {
1409                 ph_unlock(ta_p);
1410                 return (TD_DBERR);
1411         }
1412 
1413         /*
1414          * Read the ulwp struct from the process.
1415          * Transfer the ulwp struct to the thread information struct.
1416          */
1417         psaddr = th_p->th_unique;
1418         if (ta_p->model == PR_MODEL_NATIVE) {
1419                 ulwp_t ulwp;
1420 
1421                 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1422                     ((void) memset(&ulwp, 0, sizeof (ulwp)),
1423                     ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1424                         return_val = TD_DBERR;
1425                 else
1426                         td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1427         } else {
1428 #if defined(_LP64) && defined(_SYSCALL32)
1429                 ulwp32_t ulwp;
1430 
1431                 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1432                     ((void) memset(&ulwp, 0, sizeof (ulwp)),
1433                     ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1434                     PS_OK)
1435                         return_val = TD_DBERR;
1436                 else
1437                         td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1438 #else
1439                 return_val = TD_ERR;
1440 #endif  /* _SYSCALL32 */
1441         }
1442 
1443         (void) ps_pcontinue(ph_p);
1444         ph_unlock(ta_p);
1445         return (return_val);
1446 }
1447 
1448 /*
1449  * Given a process and an event number, return information about
1450  * an address in the process or at which a breakpoint can be set
1451  * to monitor the event.
1452  */
1453 #pragma weak td_ta_event_addr = __td_ta_event_addr
1454 td_err_e
1455 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1456 {
1457         if (ta_p == NULL)
1458                 return (TD_BADTA);
1459         if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1460                 return (TD_NOEVENT);
1461         if (notify_p == NULL)
1462                 return (TD_ERR);
1463 
1464         notify_p->type = NOTIFY_BPT;
1465         notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1466 
1467         return (TD_OK);
1468 }
1469 
1470 /*
1471  * Add the events in eventset 2 to eventset 1.
1472  */
1473 static void
1474 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1475 {
1476         int     i;
1477 
1478         for (i = 0; i < TD_EVENTSIZE; i++)
1479                 event1_p->event_bits[i] |= event2_p->event_bits[i];
1480 }
1481 
1482 /*
1483  * Delete the events in eventset 2 from eventset 1.
1484  */
1485 static void
1486 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1487 {
1488         int     i;
1489 
1490         for (i = 0; i < TD_EVENTSIZE; i++)
1491                 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1492 }
1493 
1494 /*
1495  * Either add or delete the given event set from a thread's event mask.
1496  */
1497 static td_err_e
1498 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1499 {
1500         struct ps_prochandle *ph_p;
1501         td_err_e        return_val = TD_OK;
1502         char            enable;
1503         td_thr_events_t evset;
1504         psaddr_t        psaddr_evset;
1505         psaddr_t        psaddr_enab;
1506 
1507         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1508                 return (return_val);
1509         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1510                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1511                 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1512                 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1513         } else {
1514 #if defined(_LP64) && defined(_SYSCALL32)
1515                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1516                 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1517                 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1518 #else
1519                 ph_unlock(th_p->th_ta_p);
1520                 return (TD_ERR);
1521 #endif  /* _SYSCALL32 */
1522         }
1523         if (ps_pstop(ph_p) != PS_OK) {
1524                 ph_unlock(th_p->th_ta_p);
1525                 return (TD_DBERR);
1526         }
1527 
1528         if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1529                 return_val = TD_DBERR;
1530         else {
1531                 if (onoff)
1532                         eventsetaddset(&evset, events);
1533                 else
1534                         eventsetdelset(&evset, events);
1535                 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1536                     != PS_OK)
1537                         return_val = TD_DBERR;
1538                 else {
1539                         enable = 0;
1540                         if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1541                                 enable = 1;
1542                         if (ps_pdwrite(ph_p, psaddr_enab,
1543                             &enable, sizeof (enable)) != PS_OK)
1544                                 return_val = TD_DBERR;
1545                 }
1546         }
1547 
1548         (void) ps_pcontinue(ph_p);
1549         ph_unlock(th_p->th_ta_p);
1550         return (return_val);
1551 }
1552 
1553 /*
1554  * Enable or disable tracing for a given thread.  Tracing
1555  * is filtered based on the event mask of each thread.  Tracing
1556  * can be turned on/off for the thread without changing thread
1557  * event mask.
1558  * Currently unused by dbx.
1559  */
1560 #pragma weak td_thr_event_enable = __td_thr_event_enable
1561 td_err_e
1562 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1563 {
1564         td_thr_events_t evset;
1565 
1566         td_event_emptyset(&evset);
1567         td_event_addset(&evset, TD_EVENTS_ENABLE);
1568         return (mod_eventset(th_p, &evset, onoff));
1569 }
1570 
1571 /*
1572  * Set event mask to enable event. event is turned on in
1573  * event mask for thread.  If a thread encounters an event
1574  * for which its event mask is on, notification will be sent
1575  * to the debugger.
1576  * Addresses for each event are provided to the
1577  * debugger.  It is assumed that a breakpoint of some type will
1578  * be placed at that address.  If the event mask for the thread
1579  * is on, the instruction at the address will be executed.
1580  * Otherwise, the instruction will be skipped.
1581  */
1582 #pragma weak td_thr_set_event = __td_thr_set_event
1583 td_err_e
1584 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1585 {
1586         return (mod_eventset(th_p, events, 1));
1587 }
1588 
1589 /*
1590  * Enable or disable a set of events in the process-global event mask,
1591  * depending on the value of onoff.
1592  */
1593 static td_err_e
1594 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1595 {
1596         struct ps_prochandle *ph_p;
1597         td_thr_events_t targ_eventset;
1598         td_err_e        return_val;
1599 
1600         if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1601                 return (return_val);
1602         if (ps_pstop(ph_p) != PS_OK) {
1603                 ph_unlock(ta_p);
1604                 return (TD_DBERR);
1605         }
1606         if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1607             &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1608                 return_val = TD_DBERR;
1609         else {
1610                 if (onoff)
1611                         eventsetaddset(&targ_eventset, events);
1612                 else
1613                         eventsetdelset(&targ_eventset, events);
1614                 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1615                     &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1616                         return_val = TD_DBERR;
1617         }
1618         (void) ps_pcontinue(ph_p);
1619         ph_unlock(ta_p);
1620         return (return_val);
1621 }
1622 
1623 /*
1624  * Enable a set of events in the process-global event mask.
1625  */
1626 #pragma weak td_ta_set_event = __td_ta_set_event
1627 td_err_e
1628 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1629 {
1630         return (td_ta_mod_event(ta_p, events, 1));
1631 }
1632 
1633 /*
1634  * Set event mask to disable the given event set; these events are cleared
1635  * from the event mask of the thread.  Events that occur for a thread
1636  * with the event masked off will not cause notification to be
1637  * sent to the debugger (see td_thr_set_event for fuller description).
1638  */
1639 #pragma weak td_thr_clear_event = __td_thr_clear_event
1640 td_err_e
1641 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1642 {
1643         return (mod_eventset(th_p, events, 0));
1644 }
1645 
1646 /*
1647  * Disable a set of events in the process-global event mask.
1648  */
1649 #pragma weak td_ta_clear_event = __td_ta_clear_event
1650 td_err_e
1651 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1652 {
1653         return (td_ta_mod_event(ta_p, events, 0));
1654 }
1655 
1656 /*
1657  * This function returns the most recent event message, if any,
1658  * associated with a thread.  Given a thread handle, return the message
1659  * corresponding to the event encountered by the thread.  Only one
1660  * message per thread is saved.  Messages from earlier events are lost
1661  * when later events occur.
1662  */
1663 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1664 td_err_e
1665 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1666 {
1667         struct ps_prochandle *ph_p;
1668         td_err_e        return_val = TD_OK;
1669         psaddr_t        psaddr;
1670 
1671         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1672                 return (return_val);
1673         if (ps_pstop(ph_p) != PS_OK) {
1674                 ph_unlock(th_p->th_ta_p);
1675                 return (TD_BADTA);
1676         }
1677         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1678                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1679                 td_evbuf_t evbuf;
1680 
1681                 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1682                 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1683                         return_val = TD_DBERR;
1684                 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1685                         return_val = TD_NOEVENT;
1686                 } else {
1687                         msg->event = evbuf.eventnum;
1688                         msg->th_p = (td_thrhandle_t *)th_p;
1689                         msg->msg.data = (uintptr_t)evbuf.eventdata;
1690                         /* "Consume" the message */
1691                         evbuf.eventnum = TD_EVENT_NONE;
1692                         evbuf.eventdata = NULL;
1693                         if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1694                             != PS_OK)
1695                                 return_val = TD_DBERR;
1696                 }
1697         } else {
1698 #if defined(_LP64) && defined(_SYSCALL32)
1699                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1700                 td_evbuf32_t evbuf;
1701 
1702                 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1703                 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1704                         return_val = TD_DBERR;
1705                 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1706                         return_val = TD_NOEVENT;
1707                 } else {
1708                         msg->event = evbuf.eventnum;
1709                         msg->th_p = (td_thrhandle_t *)th_p;
1710                         msg->msg.data = (uintptr_t)evbuf.eventdata;
1711                         /* "Consume" the message */
1712                         evbuf.eventnum = TD_EVENT_NONE;
1713                         evbuf.eventdata = 0;
1714                         if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1715                             != PS_OK)
1716                                 return_val = TD_DBERR;
1717                 }
1718 #else
1719                 return_val = TD_ERR;
1720 #endif  /* _SYSCALL32 */
1721         }
1722 
1723         (void) ps_pcontinue(ph_p);
1724         ph_unlock(th_p->th_ta_p);
1725         return (return_val);
1726 }
1727 
1728 /*
1729  * The callback function td_ta_event_getmsg uses when looking for
1730  * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
1731  */
1732 static int
1733 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1734 {
1735         static td_thrhandle_t th;
1736         td_event_msg_t *msg = arg;
1737 
1738         if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1739                 /*
1740                  * Got an event, stop iterating.
1741                  *
1742                  * Because of past mistakes in interface definition,
1743                  * we are forced to pass back a static local variable
1744                  * for the thread handle because th_p is a pointer
1745                  * to a local variable in __td_ta_thr_iter().
1746                  * Grr...
1747                  */
1748                 th = *th_p;
1749                 msg->th_p = &th;
1750                 return (1);
1751         }
1752         return (0);
1753 }
1754 
1755 /*
1756  * This function is just like td_thr_event_getmsg, except that it is
1757  * passed a process handle rather than a thread handle, and returns
1758  * an event message for some thread in the process that has an event
1759  * message pending.  If no thread has an event message pending, this
1760  * routine returns TD_NOEVENT.  Thus, all pending event messages may
1761  * be collected from a process by repeatedly calling this routine
1762  * until it returns TD_NOEVENT.
1763  */
1764 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1765 td_err_e
1766 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1767 {
1768         td_err_e return_val;
1769 
1770         if (ta_p == NULL)
1771                 return (TD_BADTA);
1772         if (ta_p->ph_p == NULL)
1773                 return (TD_BADPH);
1774         if (msg == NULL)
1775                 return (TD_ERR);
1776         msg->event = TD_EVENT_NONE;
1777         if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1778             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1779             TD_THR_ANY_USER_FLAGS)) != TD_OK)
1780                 return (return_val);
1781         if (msg->event == TD_EVENT_NONE)
1782                 return (TD_NOEVENT);
1783         return (TD_OK);
1784 }
1785 
1786 static lwpid_t
1787 thr_to_lwpid(const td_thrhandle_t *th_p)
1788 {
1789         struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1790         lwpid_t lwpid;
1791 
1792         /*
1793          * The caller holds the prochandle lock
1794          * and has already verfied everything.
1795          */
1796         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1797                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1798 
1799                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1800                     &lwpid, sizeof (lwpid)) != PS_OK)
1801                         lwpid = 0;
1802                 else if (lwpid == 0)
1803                         lwpid = 1;
1804         } else {
1805 #if defined(_LP64) && defined(_SYSCALL32)
1806                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1807 
1808                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1809                     &lwpid, sizeof (lwpid)) != PS_OK)
1810                         lwpid = 0;
1811                 else if (lwpid == 0)
1812                         lwpid = 1;
1813 #else
1814                 lwpid = 0;
1815 #endif  /* _SYSCALL32 */
1816         }
1817 
1818         return (lwpid);
1819 }
1820 
1821 /*
1822  * Suspend a thread.
1823  * XXX: What does this mean in a one-level model?
1824  */
1825 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1826 td_err_e
1827 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1828 {
1829         struct ps_prochandle *ph_p;
1830         td_err_e return_val;
1831 
1832         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1833                 return (return_val);
1834         if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1835                 return_val = TD_DBERR;
1836         ph_unlock(th_p->th_ta_p);
1837         return (return_val);
1838 }
1839 
1840 /*
1841  * Resume a suspended thread.
1842  * XXX: What does this mean in a one-level model?
1843  */
1844 #pragma weak td_thr_dbresume = __td_thr_dbresume
1845 td_err_e
1846 __td_thr_dbresume(const td_thrhandle_t *th_p)
1847 {
1848         struct ps_prochandle *ph_p;
1849         td_err_e return_val;
1850 
1851         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1852                 return (return_val);
1853         if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1854                 return_val = TD_DBERR;
1855         ph_unlock(th_p->th_ta_p);
1856         return (return_val);
1857 }
1858 
1859 /*
1860  * Set a thread's signal mask.
1861  * Currently unused by dbx.
1862  */
1863 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1864 /* ARGSUSED */
1865 td_err_e
1866 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1867 {
1868         return (TD_NOCAPAB);
1869 }
1870 
1871 /*
1872  * Set a thread's "signals-pending" set.
1873  * Currently unused by dbx.
1874  */
1875 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1876 /* ARGSUSED */
1877 td_err_e
1878 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1879     uchar_t ti_pending_flag, const sigset_t ti_pending)
1880 {
1881         return (TD_NOCAPAB);
1882 }
1883 
1884 /*
1885  * Get a thread's general register set.
1886  */
1887 #pragma weak td_thr_getgregs = __td_thr_getgregs
1888 td_err_e
1889 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1890 {
1891         struct ps_prochandle *ph_p;
1892         td_err_e return_val;
1893 
1894         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1895                 return (return_val);
1896         if (ps_pstop(ph_p) != PS_OK) {
1897                 ph_unlock(th_p->th_ta_p);
1898                 return (TD_DBERR);
1899         }
1900 
1901         if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1902                 return_val = TD_DBERR;
1903 
1904         (void) ps_pcontinue(ph_p);
1905         ph_unlock(th_p->th_ta_p);
1906         return (return_val);
1907 }
1908 
1909 /*
1910  * Set a thread's general register set.
1911  */
1912 #pragma weak td_thr_setgregs = __td_thr_setgregs
1913 td_err_e
1914 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1915 {
1916         struct ps_prochandle *ph_p;
1917         td_err_e return_val;
1918 
1919         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1920                 return (return_val);
1921         if (ps_pstop(ph_p) != PS_OK) {
1922                 ph_unlock(th_p->th_ta_p);
1923                 return (TD_DBERR);
1924         }
1925 
1926         if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1927                 return_val = TD_DBERR;
1928 
1929         (void) ps_pcontinue(ph_p);
1930         ph_unlock(th_p->th_ta_p);
1931         return (return_val);
1932 }
1933 
1934 /*
1935  * Get a thread's floating-point register set.
1936  */
1937 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1938 td_err_e
1939 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1940 {
1941         struct ps_prochandle *ph_p;
1942         td_err_e return_val;
1943 
1944         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1945                 return (return_val);
1946         if (ps_pstop(ph_p) != PS_OK) {
1947                 ph_unlock(th_p->th_ta_p);
1948                 return (TD_DBERR);
1949         }
1950 
1951         if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1952                 return_val = TD_DBERR;
1953 
1954         (void) ps_pcontinue(ph_p);
1955         ph_unlock(th_p->th_ta_p);
1956         return (return_val);
1957 }
1958 
1959 /*
1960  * Set a thread's floating-point register set.
1961  */
1962 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1963 td_err_e
1964 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1965 {
1966         struct ps_prochandle *ph_p;
1967         td_err_e return_val;
1968 
1969         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1970                 return (return_val);
1971         if (ps_pstop(ph_p) != PS_OK) {
1972                 ph_unlock(th_p->th_ta_p);
1973                 return (TD_DBERR);
1974         }
1975 
1976         if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1977                 return_val = TD_DBERR;
1978 
1979         (void) ps_pcontinue(ph_p);
1980         ph_unlock(th_p->th_ta_p);
1981         return (return_val);
1982 }
1983 
1984 /*
1985  * Get the size of the extra state register set for this architecture.
1986  * Currently unused by dbx.
1987  */
1988 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1989 /* ARGSUSED */
1990 td_err_e
1991 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1992 {
1993         struct ps_prochandle *ph_p;
1994         td_err_e return_val;
1995 
1996         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1997                 return (return_val);
1998         if (ps_pstop(ph_p) != PS_OK) {
1999                 ph_unlock(th_p->th_ta_p);
2000                 return (TD_DBERR);
2001         }
2002 
2003         if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
2004                 return_val = TD_DBERR;
2005 
2006         if (*xregsize == 0)
2007                 return_val = TD_NOXREGS;
2008 
2009         (void) ps_pcontinue(ph_p);
2010         ph_unlock(th_p->th_ta_p);
2011         return (return_val);
2012 }
2013 
2014 /*
2015  * Get a thread's extra state register set.
2016  */
2017 #pragma weak td_thr_getxregs = __td_thr_getxregs
2018 td_err_e
2019 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2020 {
2021         struct ps_prochandle *ph_p;
2022         td_err_e return_val;
2023         ps_err_e ps_err;
2024 
2025         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2026                 return (return_val);
2027         if (ps_pstop(ph_p) != PS_OK) {
2028                 ph_unlock(th_p->th_ta_p);
2029                 return (TD_DBERR);
2030         }
2031 
2032         ps_err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
2033         if (ps_err == PS_NOXREGS)
2034                 return_val = TD_NOXREGS;
2035         else if (ps_err != PS_OK)
2036                 return_val = TD_DBERR;
2037 
2038         (void) ps_pcontinue(ph_p);
2039         ph_unlock(th_p->th_ta_p);
2040         return (return_val);
2041 }
2042 
2043 /*
2044  * Set a thread's extra state register set.
2045  */
2046 #pragma weak td_thr_setxregs = __td_thr_setxregs
2047 /* ARGSUSED */
2048 td_err_e
2049 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2050 {
2051         struct ps_prochandle *ph_p;
2052         td_err_e return_val;
2053 
2054         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2055                 return (return_val);
2056         if (ps_pstop(ph_p) != PS_OK) {
2057                 ph_unlock(th_p->th_ta_p);
2058                 return (TD_DBERR);
2059         }
2060 
2061         if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2062                 return_val = TD_DBERR;
2063 
2064         (void) ps_pcontinue(ph_p);
2065         ph_unlock(th_p->th_ta_p);
2066         return (return_val);
2067 }
2068 
2069 struct searcher {
2070         psaddr_t        addr;
2071         int             status;
2072 };
2073 
2074 /*
2075  * Check the struct thread address in *th_p again first
2076  * value in "data".  If value in data is found, set second value
2077  * in "data" to 1 and return 1 to terminate iterations.
2078  * This function is used by td_thr_validate() to verify that
2079  * a thread handle is valid.
2080  */
2081 static int
2082 td_searcher(const td_thrhandle_t *th_p, void *data)
2083 {
2084         struct searcher *searcher_data = (struct searcher *)data;
2085 
2086         if (searcher_data->addr == th_p->th_unique) {
2087                 searcher_data->status = 1;
2088                 return (1);
2089         }
2090         return (0);
2091 }
2092 
2093 /*
2094  * Validate the thread handle.  Check that
2095  * a thread exists in the thread agent/process that
2096  * corresponds to thread with handle *th_p.
2097  * Currently unused by dbx.
2098  */
2099 #pragma weak td_thr_validate = __td_thr_validate
2100 td_err_e
2101 __td_thr_validate(const td_thrhandle_t *th_p)
2102 {
2103         td_err_e return_val;
2104         struct searcher searcher_data = {0, 0};
2105 
2106         if (th_p == NULL)
2107                 return (TD_BADTH);
2108         if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
2109                 return (TD_BADTH);
2110 
2111         /*
2112          * LOCKING EXCEPTION - Locking is not required
2113          * here because no use of the thread agent is made (other
2114          * than the sanity check) and checking of the thread
2115          * agent will be done in __td_ta_thr_iter.
2116          */
2117 
2118         searcher_data.addr = th_p->th_unique;
2119         return_val = __td_ta_thr_iter(th_p->th_ta_p,
2120             td_searcher, &searcher_data,
2121             TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2122             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2123 
2124         if (return_val == TD_OK && searcher_data.status == 0)
2125                 return_val = TD_NOTHR;
2126 
2127         return (return_val);
2128 }
2129 
2130 /*
2131  * Get a thread's private binding to a given thread specific
2132  * data(TSD) key(see thr_getspecific(3C).  If the thread doesn't
2133  * have a binding for a particular key, then NULL is returned.
2134  */
2135 #pragma weak td_thr_tsd = __td_thr_tsd
2136 td_err_e
2137 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2138 {
2139         struct ps_prochandle *ph_p;
2140         td_thragent_t   *ta_p;
2141         td_err_e        return_val;
2142         int             maxkey;
2143         int             nkey;
2144         psaddr_t        tsd_paddr;
2145 
2146         if (data_pp == NULL)
2147                 return (TD_ERR);
2148         *data_pp = NULL;
2149         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2150                 return (return_val);
2151         ta_p = th_p->th_ta_p;
2152         if (ps_pstop(ph_p) != PS_OK) {
2153                 ph_unlock(ta_p);
2154                 return (TD_DBERR);
2155         }
2156 
2157         if (ta_p->model == PR_MODEL_NATIVE) {
2158                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2159                 tsd_metadata_t tsdm;
2160                 tsd_t stsd;
2161 
2162                 if (ps_pdread(ph_p,
2163                     ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2164                     &tsdm, sizeof (tsdm)) != PS_OK)
2165                         return_val = TD_DBERR;
2166                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2167                     &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2168                         return_val = TD_DBERR;
2169                 else if (tsd_paddr != 0 &&
2170                     ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2171                         return_val = TD_DBERR;
2172                 else {
2173                         maxkey = tsdm.tsdm_nused;
2174                         nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2175 
2176                         if (key < TSD_NFAST)
2177                                 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2178                 }
2179         } else {
2180 #if defined(_LP64) && defined(_SYSCALL32)
2181                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2182                 tsd_metadata32_t tsdm;
2183                 tsd32_t stsd;
2184                 caddr32_t addr;
2185 
2186                 if (ps_pdread(ph_p,
2187                     ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2188                     &tsdm, sizeof (tsdm)) != PS_OK)
2189                         return_val = TD_DBERR;
2190                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2191                     &addr, sizeof (addr)) != PS_OK)
2192                         return_val = TD_DBERR;
2193                 else if (addr != 0 &&
2194                     ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2195                         return_val = TD_DBERR;
2196                 else {
2197                         maxkey = tsdm.tsdm_nused;
2198                         nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2199 
2200                         if (key < TSD_NFAST) {
2201                                 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2202                         } else {
2203                                 tsd_paddr = addr;
2204                         }
2205                 }
2206 #else
2207                 return_val = TD_ERR;
2208 #endif  /* _SYSCALL32 */
2209         }
2210 
2211         if (return_val == TD_OK && (key < 1 || key >= maxkey))
2212                 return_val = TD_NOTSD;
2213         if (return_val != TD_OK || key >= nkey) {
2214                 /* NULL has already been stored in data_pp */
2215                 (void) ps_pcontinue(ph_p);
2216                 ph_unlock(ta_p);
2217                 return (return_val);
2218         }
2219 
2220         /*
2221          * Read the value from the thread's tsd array.
2222          */
2223         if (ta_p->model == PR_MODEL_NATIVE) {
2224                 void *value;
2225 
2226                 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2227                     &value, sizeof (value)) != PS_OK)
2228                         return_val = TD_DBERR;
2229                 else
2230                         *data_pp = value;
2231 #if defined(_LP64) && defined(_SYSCALL32)
2232         } else {
2233                 caddr32_t value32;
2234 
2235                 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2236                     &value32, sizeof (value32)) != PS_OK)
2237                         return_val = TD_DBERR;
2238                 else
2239                         *data_pp = (void *)(uintptr_t)value32;
2240 #endif  /* _SYSCALL32 */
2241         }
2242 
2243         (void) ps_pcontinue(ph_p);
2244         ph_unlock(ta_p);
2245         return (return_val);
2246 }
2247 
2248 /*
2249  * Get the base address of a thread's thread local storage (TLS) block
2250  * for the module (executable or shared object) identified by 'moduleid'.
2251  */
2252 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2253 td_err_e
2254 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2255 {
2256         struct ps_prochandle *ph_p;
2257         td_thragent_t   *ta_p;
2258         td_err_e        return_val;
2259 
2260         if (base == NULL)
2261                 return (TD_ERR);
2262         *base = 0;
2263         if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2264                 return (return_val);
2265         ta_p = th_p->th_ta_p;
2266         if (ps_pstop(ph_p) != PS_OK) {
2267                 ph_unlock(ta_p);
2268                 return (TD_DBERR);
2269         }
2270 
2271         if (ta_p->model == PR_MODEL_NATIVE) {
2272                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2273                 tls_metadata_t tls_metadata;
2274                 TLS_modinfo tlsmod;
2275                 tls_t tls;
2276 
2277                 if (ps_pdread(ph_p,
2278                     ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2279                     &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2280                         return_val = TD_DBERR;
2281                 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2282                         return_val = TD_NOTLS;
2283                 else if (ps_pdread(ph_p,
2284                     (psaddr_t)((TLS_modinfo *)
2285                     tls_metadata.tls_modinfo.tls_data + moduleid),
2286                     &tlsmod, sizeof (tlsmod)) != PS_OK)
2287                         return_val = TD_DBERR;
2288                 else if (tlsmod.tm_memsz == 0)
2289                         return_val = TD_NOTLS;
2290                 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2291                         *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2292                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2293                     &tls, sizeof (tls)) != PS_OK)
2294                         return_val = TD_DBERR;
2295                 else if (moduleid >= tls.tls_size)
2296                         return_val = TD_TLSDEFER;
2297                 else if (ps_pdread(ph_p,
2298                     (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2299                     &tls, sizeof (tls)) != PS_OK)
2300                         return_val = TD_DBERR;
2301                 else if (tls.tls_size == 0)
2302                         return_val = TD_TLSDEFER;
2303                 else
2304                         *base = (psaddr_t)tls.tls_data;
2305         } else {
2306 #if defined(_LP64) && defined(_SYSCALL32)
2307                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2308                 tls_metadata32_t tls_metadata;
2309                 TLS_modinfo32 tlsmod;
2310                 tls32_t tls;
2311 
2312                 if (ps_pdread(ph_p,
2313                     ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2314                     &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2315                         return_val = TD_DBERR;
2316                 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2317                         return_val = TD_NOTLS;
2318                 else if (ps_pdread(ph_p,
2319                     (psaddr_t)((TLS_modinfo32 *)
2320                     (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2321                     &tlsmod, sizeof (tlsmod)) != PS_OK)
2322                         return_val = TD_DBERR;
2323                 else if (tlsmod.tm_memsz == 0)
2324                         return_val = TD_NOTLS;
2325                 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2326                         *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2327                 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2328                     &tls, sizeof (tls)) != PS_OK)
2329                         return_val = TD_DBERR;
2330                 else if (moduleid >= tls.tls_size)
2331                         return_val = TD_TLSDEFER;
2332                 else if (ps_pdread(ph_p,
2333                     (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2334                     &tls, sizeof (tls)) != PS_OK)
2335                         return_val = TD_DBERR;
2336                 else if (tls.tls_size == 0)
2337                         return_val = TD_TLSDEFER;
2338                 else
2339                         *base = (psaddr_t)tls.tls_data;
2340 #else
2341                 return_val = TD_ERR;
2342 #endif  /* _SYSCALL32 */
2343         }
2344 
2345         (void) ps_pcontinue(ph_p);
2346         ph_unlock(ta_p);
2347         return (return_val);
2348 }
2349 
2350 /*
2351  * Change a thread's priority to the value specified by ti_pri.
2352  * Currently unused by dbx.
2353  */
2354 #pragma weak td_thr_setprio = __td_thr_setprio
2355 /* ARGSUSED */
2356 td_err_e
2357 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2358 {
2359         return (TD_NOCAPAB);
2360 }
2361 
2362 /*
2363  * This structure links td_thr_lockowner and the lowner_cb callback function.
2364  */
2365 typedef struct {
2366         td_sync_iter_f  *owner_cb;
2367         void            *owner_cb_arg;
2368         td_thrhandle_t  *th_p;
2369 } lowner_cb_ctl_t;
2370 
2371 static int
2372 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2373 {
2374         lowner_cb_ctl_t *ocb = arg;
2375         int trunc = 0;
2376         union {
2377                 rwlock_t rwl;
2378                 mutex_t mx;
2379         } rw_m;
2380 
2381         if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2382             &rw_m, sizeof (rw_m)) != PS_OK) {
2383                 trunc = 1;
2384                 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2385                     &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2386                         return (0);
2387         }
2388         if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2389             rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2390                 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2391         if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2392                 mutex_t *rwlock = &rw_m.rwl.mutex;
2393                 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2394                         return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2395         }
2396         return (0);
2397 }
2398 
2399 /*
2400  * Iterate over the set of locks owned by a specified thread.
2401  * If cb returns a non-zero value, terminate iterations.
2402  */
2403 #pragma weak td_thr_lockowner = __td_thr_lockowner
2404 td_err_e
2405 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2406     void *cb_data)
2407 {
2408         td_thragent_t   *ta_p;
2409         td_err_e        return_val;
2410         lowner_cb_ctl_t lcb;
2411 
2412         /*
2413          * Just sanity checks.
2414          */
2415         if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2416                 return (return_val);
2417         ta_p = th_p->th_ta_p;
2418         ph_unlock(ta_p);
2419 
2420         lcb.owner_cb = cb;
2421         lcb.owner_cb_arg = cb_data;
2422         lcb.th_p = (td_thrhandle_t *)th_p;
2423         return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2424 }
2425 
2426 /*
2427  * If a thread is asleep on a synchronization variable,
2428  * then get the synchronization handle.
2429  */
2430 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2431 td_err_e
2432 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2433 {
2434         struct ps_prochandle *ph_p;
2435         td_err_e        return_val = TD_OK;
2436         uintptr_t       wchan;
2437 
2438         if (sh_p == NULL)
2439                 return (TD_ERR);
2440         if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2441                 return (return_val);
2442 
2443         /*
2444          * No need to stop the process for a simple read.
2445          */
2446         if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2447                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2448 
2449                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2450                     &wchan, sizeof (wchan)) != PS_OK)
2451                         return_val = TD_DBERR;
2452         } else {
2453 #if defined(_LP64) && defined(_SYSCALL32)
2454                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2455                 caddr32_t wchan32;
2456 
2457                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2458                     &wchan32, sizeof (wchan32)) != PS_OK)
2459                         return_val = TD_DBERR;
2460                 wchan = wchan32;
2461 #else
2462                 return_val = TD_ERR;
2463 #endif  /* _SYSCALL32 */
2464         }
2465 
2466         if (return_val != TD_OK || wchan == 0) {
2467                 sh_p->sh_ta_p = NULL;
2468                 sh_p->sh_unique = 0;
2469                 if (return_val == TD_OK)
2470                         return_val = TD_ERR;
2471         } else {
2472                 sh_p->sh_ta_p = th_p->th_ta_p;
2473                 sh_p->sh_unique = (psaddr_t)wchan;
2474         }
2475 
2476         ph_unlock(th_p->th_ta_p);
2477         return (return_val);
2478 }
2479 
2480 /*
2481  * Which thread is running on an lwp?
2482  */
2483 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2484 td_err_e
2485 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2486     td_thrhandle_t *th_p)
2487 {
2488         return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2489 }
2490 
2491 /*
2492  * Common code for td_sync_get_info() and td_sync_get_stats()
2493  */
2494 static td_err_e
2495 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2496     td_syncinfo_t *si_p)
2497 {
2498         int trunc = 0;
2499         td_so_un_t generic_so;
2500 
2501         /*
2502          * Determine the sync. object type; a little type fudgery here.
2503          * First attempt to read the whole union.  If that fails, attempt
2504          * to read just the condvar.  A condvar is the smallest sync. object.
2505          */
2506         if (ps_pdread(ph_p, sh_p->sh_unique,
2507             &generic_so, sizeof (generic_so)) != PS_OK) {
2508                 trunc = 1;
2509                 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2510                     sizeof (generic_so.condition)) != PS_OK)
2511                         return (TD_DBERR);
2512         }
2513 
2514         switch (generic_so.condition.cond_magic) {
2515         case MUTEX_MAGIC:
2516                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2517                     &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2518                         return (TD_DBERR);
2519                 si_p->si_type = TD_SYNC_MUTEX;
2520                 si_p->si_shared_type =
2521                     (generic_so.lock.mutex_type & USYNC_PROCESS);
2522                 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2523                     sizeof (generic_so.lock.mutex_flag));
2524                 si_p->si_state.mutex_locked =
2525                     (generic_so.lock.mutex_lockw != 0);
2526                 si_p->si_size = sizeof (generic_so.lock);
2527                 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2528                 si_p->si_rcount = generic_so.lock.mutex_rcount;
2529                 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2530                 if (si_p->si_state.mutex_locked) {
2531                         if (si_p->si_shared_type & USYNC_PROCESS)
2532                                 si_p->si_ownerpid =
2533                                     generic_so.lock.mutex_ownerpid;
2534                         si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2535                         si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2536                 }
2537                 break;
2538         case COND_MAGIC:
2539                 si_p->si_type = TD_SYNC_COND;
2540                 si_p->si_shared_type =
2541                     (generic_so.condition.cond_type & USYNC_PROCESS);
2542                 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2543                     sizeof (generic_so.condition.flags.flag));
2544                 si_p->si_size = sizeof (generic_so.condition);
2545                 si_p->si_has_waiters =
2546                     (generic_so.condition.cond_waiters_user |
2547                     generic_so.condition.cond_waiters_kernel)? 1 : 0;
2548                 break;
2549         case SEMA_MAGIC:
2550                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2551                     &generic_so.semaphore, sizeof (generic_so.semaphore))
2552                     != PS_OK)
2553                         return (TD_DBERR);
2554                 si_p->si_type = TD_SYNC_SEMA;
2555                 si_p->si_shared_type =
2556                     (generic_so.semaphore.type & USYNC_PROCESS);
2557                 si_p->si_state.sem_count = generic_so.semaphore.count;
2558                 si_p->si_size = sizeof (generic_so.semaphore);
2559                 si_p->si_has_waiters =
2560                     ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2561                 /* this is useless but the old interface provided it */
2562                 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2563                 break;
2564         case RWL_MAGIC:
2565         {
2566                 uint32_t rwstate;
2567 
2568                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2569                     &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2570                         return (TD_DBERR);
2571                 si_p->si_type = TD_SYNC_RWLOCK;
2572                 si_p->si_shared_type =
2573                     (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2574                 si_p->si_size = sizeof (generic_so.rwlock);
2575 
2576                 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2577                 if (rwstate & URW_WRITE_LOCKED) {
2578                         si_p->si_state.nreaders = -1;
2579                         si_p->si_is_wlock = 1;
2580                         si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2581                         si_p->si_owner.th_unique =
2582                             generic_so.rwlock.rwlock_owner;
2583                         if (si_p->si_shared_type & USYNC_PROCESS)
2584                                 si_p->si_ownerpid =
2585                                     generic_so.rwlock.rwlock_ownerpid;
2586                 } else {
2587                         si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2588                 }
2589                 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2590 
2591                 /* this is useless but the old interface provided it */
2592                 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2593                 break;
2594         }
2595         default:
2596                 return (TD_BADSH);
2597         }
2598 
2599         si_p->si_ta_p = sh_p->sh_ta_p;
2600         si_p->si_sv_addr = sh_p->sh_unique;
2601         return (TD_OK);
2602 }
2603 
2604 /*
2605  * Given a synchronization handle, fill in the
2606  * information for the synchronization variable into *si_p.
2607  */
2608 #pragma weak td_sync_get_info = __td_sync_get_info
2609 td_err_e
2610 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2611 {
2612         struct ps_prochandle *ph_p;
2613         td_err_e return_val;
2614 
2615         if (si_p == NULL)
2616                 return (TD_ERR);
2617         (void) memset(si_p, 0, sizeof (*si_p));
2618         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2619                 return (return_val);
2620         if (ps_pstop(ph_p) != PS_OK) {
2621                 ph_unlock(sh_p->sh_ta_p);
2622                 return (TD_DBERR);
2623         }
2624 
2625         return_val = sync_get_info_common(sh_p, ph_p, si_p);
2626 
2627         (void) ps_pcontinue(ph_p);
2628         ph_unlock(sh_p->sh_ta_p);
2629         return (return_val);
2630 }
2631 
2632 static uint_t
2633 tdb_addr_hash64(uint64_t addr)
2634 {
2635         uint64_t value60 = (addr >> 4);
2636         uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2637         return ((value30 >> 15) ^ (value30 & 0x7fff));
2638 }
2639 
2640 static uint_t
2641 tdb_addr_hash32(uint64_t addr)
2642 {
2643         uint32_t value30 = (addr >> 2);           /* 30 bits */
2644         return ((value30 >> 15) ^ (value30 & 0x7fff));
2645 }
2646 
2647 static td_err_e
2648 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2649     psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2650 {
2651         psaddr_t next_desc;
2652         uint64_t first;
2653         uint_t ix;
2654 
2655         /*
2656          * Compute the hash table index from the synch object's address.
2657          */
2658         if (ta_p->model == PR_MODEL_LP64)
2659                 ix = tdb_addr_hash64(sync_obj_addr);
2660         else
2661                 ix = tdb_addr_hash32(sync_obj_addr);
2662 
2663         /*
2664          * Get the address of the first element in the linked list.
2665          */
2666         if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2667             &first, sizeof (first)) != PS_OK)
2668                 return (TD_DBERR);
2669 
2670         /*
2671          * Search the linked list for an entry for the synch object..
2672          */
2673         for (next_desc = (psaddr_t)first; next_desc != 0;
2674             next_desc = (psaddr_t)sync_stats->next) {
2675                 if (ps_pdread(ta_p->ph_p, next_desc,
2676                     sync_stats, sizeof (*sync_stats)) != PS_OK)
2677                         return (TD_DBERR);
2678                 if (sync_stats->sync_addr == sync_obj_addr)
2679                         return (TD_OK);
2680         }
2681 
2682         (void) memset(sync_stats, 0, sizeof (*sync_stats));
2683         return (TD_OK);
2684 }
2685 
2686 /*
2687  * Given a synchronization handle, fill in the
2688  * statistics for the synchronization variable into *ss_p.
2689  */
2690 #pragma weak td_sync_get_stats = __td_sync_get_stats
2691 td_err_e
2692 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2693 {
2694         struct ps_prochandle *ph_p;
2695         td_thragent_t *ta_p;
2696         td_err_e return_val;
2697         register_sync_t enable;
2698         psaddr_t hashaddr;
2699         tdb_sync_stats_t sync_stats;
2700         size_t ix;
2701 
2702         if (ss_p == NULL)
2703                 return (TD_ERR);
2704         (void) memset(ss_p, 0, sizeof (*ss_p));
2705         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2706                 return (return_val);
2707         ta_p = sh_p->sh_ta_p;
2708         if (ps_pstop(ph_p) != PS_OK) {
2709                 ph_unlock(ta_p);
2710                 return (TD_DBERR);
2711         }
2712 
2713         if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2714             != TD_OK) {
2715                 if (return_val != TD_BADSH)
2716                         goto out;
2717                 /* we can correct TD_BADSH */
2718                 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2719                 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2720                 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2721                 /* we correct si_type and si_size below */
2722                 return_val = TD_OK;
2723         }
2724         if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2725             &enable, sizeof (enable)) != PS_OK) {
2726                 return_val = TD_DBERR;
2727                 goto out;
2728         }
2729         if (enable != REGISTER_SYNC_ON)
2730                 goto out;
2731 
2732         /*
2733          * Get the address of the hash table in the target process.
2734          */
2735         if (ta_p->model == PR_MODEL_NATIVE) {
2736                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2737                     offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2738                     &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2739                         return_val = TD_DBERR;
2740                         goto out;
2741                 }
2742         } else {
2743 #if defined(_LP64) && defined(_SYSCALL32)
2744                 caddr32_t addr;
2745 
2746                 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2747                     offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2748                     &addr, sizeof (addr)) != PS_OK) {
2749                         return_val = TD_DBERR;
2750                         goto out;
2751                 }
2752                 hashaddr = addr;
2753 #else
2754                 return_val = TD_ERR;
2755                 goto out;
2756 #endif  /* _SYSCALL32 */
2757         }
2758 
2759         if (hashaddr == 0)
2760                 return_val = TD_BADSH;
2761         else
2762                 return_val = read_sync_stats(ta_p, hashaddr,
2763                     sh_p->sh_unique, &sync_stats);
2764         if (return_val != TD_OK)
2765                 goto out;
2766 
2767         /*
2768          * We have the hash table entry.  Transfer the data to
2769          * the td_syncstats_t structure provided by the caller.
2770          */
2771         switch (sync_stats.un.type) {
2772         case TDB_MUTEX:
2773         {
2774                 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2775 
2776                 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2777                 ss_p->ss_info.si_size = sizeof (mutex_t);
2778                 msp->mutex_lock =
2779                     sync_stats.un.mutex.mutex_lock;
2780                 msp->mutex_sleep =
2781                     sync_stats.un.mutex.mutex_sleep;
2782                 msp->mutex_sleep_time =
2783                     sync_stats.un.mutex.mutex_sleep_time;
2784                 msp->mutex_hold_time =
2785                     sync_stats.un.mutex.mutex_hold_time;
2786                 msp->mutex_try =
2787                     sync_stats.un.mutex.mutex_try;
2788                 msp->mutex_try_fail =
2789                     sync_stats.un.mutex.mutex_try_fail;
2790                 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2791                     (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2792                     < ta_p->hash_size * sizeof (thr_hash_table_t))
2793                         msp->mutex_internal =
2794                             ix / sizeof (thr_hash_table_t) + 1;
2795                 break;
2796         }
2797         case TDB_COND:
2798         {
2799                 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2800 
2801                 ss_p->ss_info.si_type = TD_SYNC_COND;
2802                 ss_p->ss_info.si_size = sizeof (cond_t);
2803                 csp->cond_wait =
2804                     sync_stats.un.cond.cond_wait;
2805                 csp->cond_timedwait =
2806                     sync_stats.un.cond.cond_timedwait;
2807                 csp->cond_wait_sleep_time =
2808                     sync_stats.un.cond.cond_wait_sleep_time;
2809                 csp->cond_timedwait_sleep_time =
2810                     sync_stats.un.cond.cond_timedwait_sleep_time;
2811                 csp->cond_timedwait_timeout =
2812                     sync_stats.un.cond.cond_timedwait_timeout;
2813                 csp->cond_signal =
2814                     sync_stats.un.cond.cond_signal;
2815                 csp->cond_broadcast =
2816                     sync_stats.un.cond.cond_broadcast;
2817                 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2818                     (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2819                     < ta_p->hash_size * sizeof (thr_hash_table_t))
2820                         csp->cond_internal =
2821                             ix / sizeof (thr_hash_table_t) + 1;
2822                 break;
2823         }
2824         case TDB_RWLOCK:
2825         {
2826                 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2827 
2828                 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2829                 ss_p->ss_info.si_size = sizeof (rwlock_t);
2830                 rwsp->rw_rdlock =
2831                     sync_stats.un.rwlock.rw_rdlock;
2832                 rwsp->rw_rdlock_try =
2833                     sync_stats.un.rwlock.rw_rdlock_try;
2834                 rwsp->rw_rdlock_try_fail =
2835                     sync_stats.un.rwlock.rw_rdlock_try_fail;
2836                 rwsp->rw_wrlock =
2837                     sync_stats.un.rwlock.rw_wrlock;
2838                 rwsp->rw_wrlock_hold_time =
2839                     sync_stats.un.rwlock.rw_wrlock_hold_time;
2840                 rwsp->rw_wrlock_try =
2841                     sync_stats.un.rwlock.rw_wrlock_try;
2842                 rwsp->rw_wrlock_try_fail =
2843                     sync_stats.un.rwlock.rw_wrlock_try_fail;
2844                 break;
2845         }
2846         case TDB_SEMA:
2847         {
2848                 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2849 
2850                 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2851                 ss_p->ss_info.si_size = sizeof (sema_t);
2852                 ssp->sema_wait =
2853                     sync_stats.un.sema.sema_wait;
2854                 ssp->sema_wait_sleep =
2855                     sync_stats.un.sema.sema_wait_sleep;
2856                 ssp->sema_wait_sleep_time =
2857                     sync_stats.un.sema.sema_wait_sleep_time;
2858                 ssp->sema_trywait =
2859                     sync_stats.un.sema.sema_trywait;
2860                 ssp->sema_trywait_fail =
2861                     sync_stats.un.sema.sema_trywait_fail;
2862                 ssp->sema_post =
2863                     sync_stats.un.sema.sema_post;
2864                 ssp->sema_max_count =
2865                     sync_stats.un.sema.sema_max_count;
2866                 ssp->sema_min_count =
2867                     sync_stats.un.sema.sema_min_count;
2868                 break;
2869         }
2870         default:
2871                 return_val = TD_BADSH;
2872                 break;
2873         }
2874 
2875 out:
2876         (void) ps_pcontinue(ph_p);
2877         ph_unlock(ta_p);
2878         return (return_val);
2879 }
2880 
2881 /*
2882  * Change the state of a synchronization variable.
2883  *      1) mutex lock state set to value
2884  *      2) semaphore's count set to value
2885  *      3) writer's lock set by value < 0
2886  *      4) reader's lock number of readers set to value >= 0
2887  * Currently unused by dbx.
2888  */
2889 #pragma weak td_sync_setstate = __td_sync_setstate
2890 td_err_e
2891 __td_sync_setstate(const td_synchandle_t *sh_p, int value)
2892 {
2893         struct ps_prochandle *ph_p;
2894         int             trunc = 0;
2895         td_err_e        return_val;
2896         td_so_un_t      generic_so;
2897         uint32_t        *rwstate;
2898 
2899         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2900                 return (return_val);
2901         if (ps_pstop(ph_p) != PS_OK) {
2902                 ph_unlock(sh_p->sh_ta_p);
2903                 return (TD_DBERR);
2904         }
2905 
2906         /*
2907          * Read the synch. variable information.
2908          * First attempt to read the whole union and if that fails
2909          * fall back to reading only the smallest member, the condvar.
2910          */
2911         if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2912             sizeof (generic_so)) != PS_OK) {
2913                 trunc = 1;
2914                 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2915                     sizeof (generic_so.condition)) != PS_OK) {
2916                         (void) ps_pcontinue(ph_p);
2917                         ph_unlock(sh_p->sh_ta_p);
2918                         return (TD_DBERR);
2919                 }
2920         }
2921 
2922         /*
2923          * Set the new value in the sync. variable, read the synch. variable
2924          * information. from the process, reset its value and write it back.
2925          */
2926         switch (generic_so.condition.mutex_magic) {
2927         case MUTEX_MAGIC:
2928                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2929                     &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2930                         return_val = TD_DBERR;
2931                         break;
2932                 }
2933                 generic_so.lock.mutex_lockw = (uint8_t)value;
2934                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2935                     sizeof (generic_so.lock)) != PS_OK)
2936                         return_val = TD_DBERR;
2937                 break;
2938         case SEMA_MAGIC:
2939                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2940                     &generic_so.semaphore, sizeof (generic_so.semaphore))
2941                     != PS_OK) {
2942                         return_val = TD_DBERR;
2943                         break;
2944                 }
2945                 generic_so.semaphore.count = value;
2946                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2947                     sizeof (generic_so.semaphore)) != PS_OK)
2948                         return_val = TD_DBERR;
2949                 break;
2950         case COND_MAGIC:
2951                 /* Operation not supported on a condition variable */
2952                 return_val = TD_ERR;
2953                 break;
2954         case RWL_MAGIC:
2955                 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2956                     &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2957                         return_val = TD_DBERR;
2958                         break;
2959                 }
2960                 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2961                 *rwstate &= URW_HAS_WAITERS;
2962                 if (value < 0)
2963                         *rwstate |= URW_WRITE_LOCKED;
2964                 else
2965                         *rwstate |= (value & URW_READERS_MASK);
2966                 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2967                     sizeof (generic_so.rwlock)) != PS_OK)
2968                         return_val = TD_DBERR;
2969                 break;
2970         default:
2971                 /* Bad sync. object type */
2972                 return_val = TD_BADSH;
2973                 break;
2974         }
2975 
2976         (void) ps_pcontinue(ph_p);
2977         ph_unlock(sh_p->sh_ta_p);
2978         return (return_val);
2979 }
2980 
2981 typedef struct {
2982         td_thr_iter_f   *waiter_cb;
2983         psaddr_t        sync_obj_addr;
2984         uint16_t        sync_magic;
2985         void            *waiter_cb_arg;
2986         td_err_e        errcode;
2987 } waiter_cb_ctl_t;
2988 
2989 static int
2990 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2991 {
2992         td_thragent_t   *ta_p = th_p->th_ta_p;
2993         struct ps_prochandle *ph_p = ta_p->ph_p;
2994         waiter_cb_ctl_t *wcb = arg;
2995         caddr_t         wchan;
2996 
2997         if (ta_p->model == PR_MODEL_NATIVE) {
2998                 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2999 
3000                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3001                     &wchan, sizeof (wchan)) != PS_OK) {
3002                         wcb->errcode = TD_DBERR;
3003                         return (1);
3004                 }
3005         } else {
3006 #if defined(_LP64) && defined(_SYSCALL32)
3007                 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3008                 caddr32_t wchan32;
3009 
3010                 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3011                     &wchan32, sizeof (wchan32)) != PS_OK) {
3012                         wcb->errcode = TD_DBERR;
3013                         return (1);
3014                 }
3015                 wchan = (caddr_t)(uintptr_t)wchan32;
3016 #else
3017                 wcb->errcode = TD_ERR;
3018                 return (1);
3019 #endif  /* _SYSCALL32 */
3020         }
3021 
3022         if (wchan == NULL)
3023                 return (0);
3024 
3025         if (wchan == (caddr_t)wcb->sync_obj_addr)
3026                 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3027 
3028         return (0);
3029 }
3030 
3031 /*
3032  * For a given synchronization variable, iterate over the
3033  * set of waiting threads.  The call back function is passed
3034  * two parameters, a pointer to a thread handle and a pointer
3035  * to extra call back data.
3036  */
3037 #pragma weak td_sync_waiters = __td_sync_waiters
3038 td_err_e
3039 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3040 {
3041         struct ps_prochandle *ph_p;
3042         waiter_cb_ctl_t wcb;
3043         td_err_e        return_val;
3044 
3045         if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3046                 return (return_val);
3047         if (ps_pdread(ph_p,
3048             (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3049             (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3050                 ph_unlock(sh_p->sh_ta_p);
3051                 return (TD_DBERR);
3052         }
3053         ph_unlock(sh_p->sh_ta_p);
3054 
3055         switch (wcb.sync_magic) {
3056         case MUTEX_MAGIC:
3057         case COND_MAGIC:
3058         case SEMA_MAGIC:
3059         case RWL_MAGIC:
3060                 break;
3061         default:
3062                 return (TD_BADSH);
3063         }
3064 
3065         wcb.waiter_cb = cb;
3066         wcb.sync_obj_addr = sh_p->sh_unique;
3067         wcb.waiter_cb_arg = cb_data;
3068         wcb.errcode = TD_OK;
3069         return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3070             TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3071             TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3072 
3073         if (return_val != TD_OK)
3074                 return (return_val);
3075 
3076         return (wcb.errcode);
3077 }