Print this page
    
15254 %ymm registers not restored after signal handler
15367 x86 getfpregs() summons corrupting %xmm ghosts
15333 want x86 /proc xregs support (libc_db, libproc, mdb, etc.)
15336 want libc functions for extended ucontext_t
15334 want ps_lwphandle-specific reg routines
15328 FPU_CW_INIT mistreats reserved bit
15335 i86pc fpu_subr.c isn't really platform-specific
15332 setcontext(2) isn't actually noreturn
15331 need <sys/stdalign.h>
Change-Id: I7060aa86042dfb989f77fc3323c065ea2eafa9ad
Conflicts:
    usr/src/uts/common/fs/proc/prcontrol.c
    usr/src/uts/intel/os/archdep.c
    usr/src/uts/intel/sys/ucontext.h
    usr/src/uts/intel/syscall/getcontext.c
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/lib/libc_db/common/thread_db.c
          +++ new/usr/src/lib/libc_db/common/thread_db.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  
    | 
      ↓ open down ↓ | 
    18 lines elided | 
    
      ↑ open up ↑ | 
  
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
  27   27  /*
  28   28   * Copyright (c) 2014, Joyent, Inc. All rights reserved.
       29 + * Copyright 2023 Oxide Computer Company
  29   30   */
  30   31  
  31   32  #include <stdio.h>
  32   33  #include <stdlib.h>
  33   34  #include <stddef.h>
  34   35  #include <unistd.h>
  35   36  #include <thr_uberdata.h>
  36   37  #include <thread_db.h>
  37   38  #include <libc_int.h>
  38   39  
  39   40  /*
  40   41   * Private structures.
  41   42   */
  42   43  
  43   44  typedef union {
  44   45          mutex_t         lock;
  45   46          rwlock_t        rwlock;
  46   47          sema_t          semaphore;
  47   48          cond_t          condition;
  48   49  } td_so_un_t;
  49   50  
  50   51  struct td_thragent {
  51   52          rwlock_t        rwlock;
  52   53          struct ps_prochandle *ph_p;
  53   54          int             initialized;
  54   55          int             sync_tracking;
  55   56          int             model;
  56   57          int             primary_map;
  57   58          psaddr_t        bootstrap_addr;
  58   59          psaddr_t        uberdata_addr;
  59   60          psaddr_t        tdb_eventmask_addr;
  60   61          psaddr_t        tdb_register_sync_addr;
  61   62          psaddr_t        tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
  62   63          psaddr_t        hash_table_addr;
  63   64          int             hash_size;
  64   65          lwpid_t         single_lwpid;
  65   66          psaddr_t        single_ulwp_addr;
  66   67  };
  67   68  
  68   69  /*
  69   70   * This is the name of the variable in libc that contains
  70   71   * the uberdata address that we will need.
  71   72   */
  72   73  #define TD_BOOTSTRAP_NAME       "_tdb_bootstrap"
  73   74  /*
  74   75   * This is the actual name of uberdata, used in the event
  75   76   * that tdb_bootstrap has not yet been initialized.
  76   77   */
  77   78  #define TD_UBERDATA_NAME        "_uberdata"
  78   79  /*
  79   80   * The library name should end with ".so.1", but older versions of
  80   81   * dbx expect the unadorned name and malfunction if ".1" is specified.
  81   82   * Unfortunately, if ".1" is not specified, mdb malfunctions when it
  82   83   * is applied to another instance of itself (due to the presence of
  83   84   * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
  84   85   */
  85   86  #define TD_LIBRARY_NAME         "libc.so"
  86   87  #define TD_LIBRARY_NAME_1       "libc.so.1"
  87   88  
  88   89  td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
  89   90  
  90   91  td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
  91   92          void *cbdata_p, td_thr_state_e state, int ti_pri,
  92   93          sigset_t *ti_sigmask_p, unsigned ti_user_flags);
  93   94  
  94   95  /*
  95   96   * Initialize threads debugging interface.
  96   97   */
  97   98  #pragma weak td_init = __td_init
  98   99  td_err_e
  99  100  __td_init()
 100  101  {
 101  102          return (TD_OK);
 102  103  }
 103  104  
 104  105  /*
 105  106   * This function does nothing, and never did.
 106  107   * But the symbol is in the ABI, so we can't delete it.
 107  108   */
 108  109  #pragma weak td_log = __td_log
 109  110  void
 110  111  __td_log()
 111  112  {
 112  113  }
 113  114  
 114  115  /*
 115  116   * Short-cut to read just the hash table size from the process,
 116  117   * to avoid repeatedly reading the full uberdata structure when
 117  118   * dealing with a single-threaded process.
 118  119   */
 119  120  static uint_t
 120  121  td_read_hash_size(td_thragent_t *ta_p)
 121  122  {
 122  123          psaddr_t addr;
 123  124          uint_t hash_size;
 124  125  
 125  126          switch (ta_p->initialized) {
 126  127          default:        /* uninitialized */
 127  128                  return (0);
 128  129          case 1:         /* partially initialized */
 129  130                  break;
 130  131          case 2:         /* fully initialized */
 131  132                  return (ta_p->hash_size);
 132  133          }
 133  134  
 134  135          if (ta_p->model == PR_MODEL_NATIVE) {
 135  136                  addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
 136  137          } else {
 137  138  #if defined(_LP64) && defined(_SYSCALL32)
 138  139                  addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
 139  140  #else
 140  141                  addr = 0;
 141  142  #endif
 142  143          }
 143  144          if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
 144  145              != PS_OK)
 145  146                  return (0);
 146  147          return (hash_size);
 147  148  }
 148  149  
 149  150  static td_err_e
 150  151  td_read_uberdata(td_thragent_t *ta_p)
 151  152  {
 152  153          struct ps_prochandle *ph_p = ta_p->ph_p;
 153  154          int i;
 154  155  
 155  156          if (ta_p->model == PR_MODEL_NATIVE) {
 156  157                  uberdata_t uberdata;
 157  158  
 158  159                  if (ps_pdread(ph_p, ta_p->uberdata_addr,
 159  160                      &uberdata, sizeof (uberdata)) != PS_OK)
 160  161                          return (TD_DBERR);
 161  162                  ta_p->primary_map = uberdata.primary_map;
 162  163                  ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 163  164                      offsetof(uberdata_t, tdb.tdb_ev_global_mask);
 164  165                  ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 165  166                      offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
 166  167                  ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 167  168                  ta_p->hash_size = uberdata.hash_size;
 168  169                  if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 169  170                      ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
 170  171                          return (TD_DBERR);
 171  172          } else {
 172  173  #if defined(_LP64) && defined(_SYSCALL32)
 173  174                  uberdata32_t uberdata;
 174  175                  caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
 175  176  
 176  177                  if (ps_pdread(ph_p, ta_p->uberdata_addr,
 177  178                      &uberdata, sizeof (uberdata)) != PS_OK)
 178  179                          return (TD_DBERR);
 179  180                  ta_p->primary_map = uberdata.primary_map;
 180  181                  ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
 181  182                      offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
 182  183                  ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
 183  184                      offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
 184  185                  ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
 185  186                  ta_p->hash_size = uberdata.hash_size;
 186  187                  if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
 187  188                      tdb_events, sizeof (tdb_events)) != PS_OK)
 188  189                          return (TD_DBERR);
 189  190                  for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
 190  191                          ta_p->tdb_events[i] = tdb_events[i];
 191  192  #else
 192  193                  return (TD_DBERR);
 193  194  #endif
 194  195          }
 195  196  
 196  197          /*
 197  198           * Unfortunately, we are (implicitly) assuming that our uberdata
 198  199           * definition precisely matches that of our target.  If this is not
 199  200           * true (that is, if we're examining a core file from a foreign
 200  201           * system that has a different definition of uberdata), the failure
 201  202           * modes can be frustratingly non-explicit.  In an effort to catch
 202  203           * this upon initialization (when the debugger may still be able to
 203  204           * opt for another thread model or may be able to fail explicitly), we
 204  205           * check that each of our tdb_events points to valid memory (these are
 205  206           * putatively text upon which a breakpoint can be issued), with the
 206  207           * hope that this is enough of a self-consistency check to lead to
 207  208           * explicit failure on a mismatch.
 208  209           */
 209  210          for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
 210  211                  uint8_t check;
 211  212  
 212  213                  if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
 213  214                      &check, sizeof (check)) != PS_OK) {
 214  215                          return (TD_DBERR);
 215  216                  }
 216  217          }
 217  218  
 218  219          if (ta_p->hash_size != 1) {     /* multi-threaded */
 219  220                  ta_p->initialized = 2;
 220  221                  ta_p->single_lwpid = 0;
 221  222                  ta_p->single_ulwp_addr = 0;
 222  223          } else {                        /* single-threaded */
 223  224                  ta_p->initialized = 1;
 224  225                  /*
 225  226                   * Get the address and lwpid of the single thread/LWP.
 226  227                   * It may not be ulwp_one if this is a child of fork1().
 227  228                   */
 228  229                  if (ta_p->model == PR_MODEL_NATIVE) {
 229  230                          thr_hash_table_t head;
 230  231                          lwpid_t lwpid = 0;
 231  232  
 232  233                          if (ps_pdread(ph_p, ta_p->hash_table_addr,
 233  234                              &head, sizeof (head)) != PS_OK)
 234  235                                  return (TD_DBERR);
 235  236                          if ((psaddr_t)head.hash_bucket == 0)
 236  237                                  ta_p->initialized = 0;
 237  238                          else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 238  239                              offsetof(ulwp_t, ul_lwpid),
 239  240                              &lwpid, sizeof (lwpid)) != PS_OK)
 240  241                                  return (TD_DBERR);
 241  242                          ta_p->single_lwpid = lwpid;
 242  243                          ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 243  244                  } else {
 244  245  #if defined(_LP64) && defined(_SYSCALL32)
 245  246                          thr_hash_table32_t head;
 246  247                          lwpid_t lwpid = 0;
 247  248  
 248  249                          if (ps_pdread(ph_p, ta_p->hash_table_addr,
 249  250                              &head, sizeof (head)) != PS_OK)
 250  251                                  return (TD_DBERR);
 251  252                          if ((psaddr_t)head.hash_bucket == 0)
 252  253                                  ta_p->initialized = 0;
 253  254                          else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
 254  255                              offsetof(ulwp32_t, ul_lwpid),
 255  256                              &lwpid, sizeof (lwpid)) != PS_OK)
 256  257                                  return (TD_DBERR);
 257  258                          ta_p->single_lwpid = lwpid;
 258  259                          ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
 259  260  #else
 260  261                          return (TD_DBERR);
 261  262  #endif
 262  263                  }
 263  264          }
 264  265          if (!ta_p->primary_map)
 265  266                  ta_p->initialized = 0;
 266  267          return (TD_OK);
 267  268  }
 268  269  
 269  270  static td_err_e
 270  271  td_read_bootstrap_data(td_thragent_t *ta_p)
 271  272  {
 272  273          struct ps_prochandle *ph_p = ta_p->ph_p;
 273  274          psaddr_t bootstrap_addr;
 274  275          psaddr_t uberdata_addr;
 275  276          ps_err_e db_return;
 276  277          td_err_e return_val;
 277  278          int do_1;
 278  279  
 279  280          switch (ta_p->initialized) {
 280  281          case 2:                 /* fully initialized */
 281  282                  return (TD_OK);
 282  283          case 1:                 /* partially initialized */
 283  284                  if (td_read_hash_size(ta_p) == 1)
 284  285                          return (TD_OK);
 285  286                  return (td_read_uberdata(ta_p));
 286  287          }
 287  288  
 288  289          /*
 289  290           * Uninitialized -- do the startup work.
 290  291           * We set ta_p->initialized to -1 to cut off recursive calls
 291  292           * into libc_db by code in the provider of ps_pglobal_lookup().
 292  293           */
 293  294          do_1 = 0;
 294  295          ta_p->initialized = -1;
 295  296          db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
 296  297              TD_BOOTSTRAP_NAME, &bootstrap_addr);
 297  298          if (db_return == PS_NOSYM) {
 298  299                  do_1 = 1;
 299  300                  db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
 300  301                      TD_BOOTSTRAP_NAME, &bootstrap_addr);
 301  302          }
 302  303          if (db_return == PS_NOSYM)      /* libc is not linked yet */
 303  304                  return (TD_NOLIBTHREAD);
 304  305          if (db_return != PS_OK)
 305  306                  return (TD_ERR);
 306  307          db_return = ps_pglobal_lookup(ph_p,
 307  308              do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
 308  309              TD_UBERDATA_NAME, &uberdata_addr);
 309  310          if (db_return == PS_NOSYM)      /* libc is not linked yet */
 310  311                  return (TD_NOLIBTHREAD);
 311  312          if (db_return != PS_OK)
 312  313                  return (TD_ERR);
 313  314  
 314  315          /*
 315  316           * Read the uberdata address into the thread agent structure.
 316  317           */
 317  318          if (ta_p->model == PR_MODEL_NATIVE) {
 318  319                  psaddr_t psaddr;
 319  320                  if (ps_pdread(ph_p, bootstrap_addr,
 320  321                      &psaddr, sizeof (psaddr)) != PS_OK)
 321  322                          return (TD_DBERR);
 322  323                  if ((ta_p->bootstrap_addr = psaddr) == 0)
 323  324                          psaddr = uberdata_addr;
 324  325                  else if (ps_pdread(ph_p, psaddr,
 325  326                      &psaddr, sizeof (psaddr)) != PS_OK)
 326  327                          return (TD_DBERR);
 327  328                  if (psaddr == 0) {
 328  329                          /* primary linkmap in the tgt is not initialized */
 329  330                          ta_p->bootstrap_addr = 0;
 330  331                          psaddr = uberdata_addr;
 331  332                  }
 332  333                  ta_p->uberdata_addr = psaddr;
 333  334          } else {
 334  335  #if defined(_LP64) && defined(_SYSCALL32)
 335  336                  caddr32_t psaddr;
 336  337                  if (ps_pdread(ph_p, bootstrap_addr,
 337  338                      &psaddr, sizeof (psaddr)) != PS_OK)
 338  339                          return (TD_DBERR);
 339  340                  if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
 340  341                          psaddr = (caddr32_t)uberdata_addr;
 341  342                  else if (ps_pdread(ph_p, (psaddr_t)psaddr,
 342  343                      &psaddr, sizeof (psaddr)) != PS_OK)
 343  344                          return (TD_DBERR);
 344  345                  if (psaddr == 0) {
 345  346                          /* primary linkmap in the tgt is not initialized */
 346  347                          ta_p->bootstrap_addr = 0;
 347  348                          psaddr = (caddr32_t)uberdata_addr;
 348  349                  }
 349  350                  ta_p->uberdata_addr = (psaddr_t)psaddr;
 350  351  #else
 351  352                  return (TD_DBERR);
 352  353  #endif  /* _SYSCALL32 */
 353  354          }
 354  355  
 355  356          if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
 356  357                  return (return_val);
 357  358          if (ta_p->bootstrap_addr == 0)
 358  359                  ta_p->initialized = 0;
 359  360          return (TD_OK);
 360  361  }
 361  362  
 362  363  #pragma weak ps_kill
 363  364  #pragma weak ps_lrolltoaddr
 364  365  
 365  366  /*
 366  367   * Allocate a new agent process handle ("thread agent").
 367  368   */
 368  369  #pragma weak td_ta_new = __td_ta_new
 369  370  td_err_e
 370  371  __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
 371  372  {
 372  373          td_thragent_t *ta_p;
 373  374          int model;
 374  375          td_err_e return_val = TD_OK;
 375  376  
 376  377          if (ph_p == NULL)
 377  378                  return (TD_BADPH);
 378  379          if (ta_pp == NULL)
 379  380                  return (TD_ERR);
 380  381          *ta_pp = NULL;
 381  382          if (ps_pstop(ph_p) != PS_OK)
 382  383                  return (TD_DBERR);
 383  384          /*
 384  385           * ps_pdmodel might not be defined if this is an older client.
 385  386           * Make it a weak symbol and test if it exists before calling.
 386  387           */
 387  388  #pragma weak ps_pdmodel
 388  389          if (ps_pdmodel == NULL) {
 389  390                  model = PR_MODEL_NATIVE;
 390  391          } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
 391  392                  (void) ps_pcontinue(ph_p);
 392  393                  return (TD_ERR);
 393  394          }
 394  395          if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
 395  396                  (void) ps_pcontinue(ph_p);
 396  397                  return (TD_MALLOC);
 397  398          }
 398  399  
 399  400          /*
 400  401           * Initialize the agent process handle.
 401  402           * Pick up the symbol value we need from the target process.
 402  403           */
 403  404          (void) memset(ta_p, 0, sizeof (*ta_p));
 404  405          ta_p->ph_p = ph_p;
 405  406          (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
 406  407          ta_p->model = model;
 407  408          return_val = td_read_bootstrap_data(ta_p);
 408  409  
 409  410          /*
 410  411           * Because the old libthread_db enabled lock tracking by default,
 411  412           * we must also do it.  However, we do it only if the application
 412  413           * provides the ps_kill() and ps_lrolltoaddr() interfaces.
 413  414           * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
 414  415           */
 415  416          if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
 416  417                  register_sync_t oldenable;
 417  418                  register_sync_t enable = REGISTER_SYNC_ENABLE;
 418  419                  psaddr_t psaddr = ta_p->tdb_register_sync_addr;
 419  420  
 420  421                  if (ps_pdread(ph_p, psaddr,
 421  422                      &oldenable, sizeof (oldenable)) != PS_OK)
 422  423                          return_val = TD_DBERR;
 423  424                  else if (oldenable != REGISTER_SYNC_OFF ||
 424  425                      ps_pdwrite(ph_p, psaddr,
 425  426                      &enable, sizeof (enable)) != PS_OK) {
 426  427                          /*
 427  428                           * Lock tracking was already enabled or we
 428  429                           * failed to enable it, probably because we
 429  430                           * are examining a core file.  In either case
 430  431                           * set the sync_tracking flag non-zero to
 431  432                           * indicate that we should not attempt to
 432  433                           * disable lock tracking when we delete the
 433  434                           * agent process handle in td_ta_delete().
 434  435                           */
 435  436                          ta_p->sync_tracking = 1;
 436  437                  }
 437  438          }
 438  439  
 439  440          if (return_val == TD_OK)
 440  441                  *ta_pp = ta_p;
 441  442          else
 442  443                  free(ta_p);
 443  444  
 444  445          (void) ps_pcontinue(ph_p);
 445  446          return (return_val);
 446  447  }
 447  448  
 448  449  /*
 449  450   * Utility function to grab the readers lock and return the prochandle,
 450  451   * given an agent process handle.  Performs standard error checking.
 451  452   * Returns non-NULL with the lock held, or NULL with the lock not held.
 452  453   */
 453  454  static struct ps_prochandle *
 454  455  ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
 455  456  {
 456  457          struct ps_prochandle *ph_p = NULL;
 457  458          td_err_e error;
 458  459  
 459  460          if (ta_p == NULL || ta_p->initialized == -1) {
 460  461                  *err = TD_BADTA;
 461  462          } else if (rw_rdlock(&ta_p->rwlock) != 0) {     /* can't happen? */
 462  463                  *err = TD_BADTA;
 463  464          } else if ((ph_p = ta_p->ph_p) == NULL) {
 464  465                  (void) rw_unlock(&ta_p->rwlock);
 465  466                  *err = TD_BADPH;
 466  467          } else if (ta_p->initialized != 2 &&
 467  468              (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
 468  469                  (void) rw_unlock(&ta_p->rwlock);
 469  470                  ph_p = NULL;
 470  471                  *err = error;
 471  472          } else {
 472  473                  *err = TD_OK;
 473  474          }
 474  475  
 475  476          return (ph_p);
 476  477  }
 477  478  
 478  479  /*
 479  480   * Utility function to grab the readers lock and return the prochandle,
 480  481   * given an agent thread handle.  Performs standard error checking.
 481  482   * Returns non-NULL with the lock held, or NULL with the lock not held.
 482  483   */
 483  484  static struct ps_prochandle *
 484  485  ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
 485  486  {
 486  487          if (th_p == NULL || th_p->th_unique == 0) {
 487  488                  *err = TD_BADTH;
 488  489                  return (NULL);
 489  490          }
 490  491          return (ph_lock_ta(th_p->th_ta_p, err));
 491  492  }
 492  493  
 493  494  /*
 494  495   * Utility function to grab the readers lock and return the prochandle,
 495  496   * given a synchronization object handle.  Performs standard error checking.
 496  497   * Returns non-NULL with the lock held, or NULL with the lock not held.
 497  498   */
 498  499  static struct ps_prochandle *
 499  500  ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
 500  501  {
 501  502          if (sh_p == NULL || sh_p->sh_unique == 0) {
 502  503                  *err = TD_BADSH;
 503  504                  return (NULL);
 504  505          }
 505  506          return (ph_lock_ta(sh_p->sh_ta_p, err));
 506  507  }
 507  508  
 508  509  /*
 509  510   * Unlock the agent process handle obtained from ph_lock_*().
 510  511   */
 511  512  static void
 512  513  ph_unlock(td_thragent_t *ta_p)
 513  514  {
 514  515          (void) rw_unlock(&ta_p->rwlock);
 515  516  }
 516  517  
 517  518  /*
 518  519   * De-allocate an agent process handle,
 519  520   * releasing all related resources.
 520  521   *
 521  522   * XXX -- This is hopelessly broken ---
 522  523   * Storage for thread agent is not deallocated.  The prochandle
 523  524   * in the thread agent is set to NULL so that future uses of
 524  525   * the thread agent can be detected and an error value returned.
 525  526   * All functions in the external user interface that make
 526  527   * use of the thread agent are expected
 527  528   * to check for a NULL prochandle in the thread agent.
 528  529   * All such functions are also expected to obtain a
 529  530   * reader lock on the thread agent while it is using it.
 530  531   */
 531  532  #pragma weak td_ta_delete = __td_ta_delete
 532  533  td_err_e
 533  534  __td_ta_delete(td_thragent_t *ta_p)
 534  535  {
 535  536          struct ps_prochandle *ph_p;
 536  537  
 537  538          /*
 538  539           * This is the only place we grab the writer lock.
 539  540           * We are going to NULL out the prochandle.
 540  541           */
 541  542          if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
 542  543                  return (TD_BADTA);
 543  544          if ((ph_p = ta_p->ph_p) == NULL) {
 544  545                  (void) rw_unlock(&ta_p->rwlock);
 545  546                  return (TD_BADPH);
 546  547          }
 547  548          /*
 548  549           * If synch. tracking was disabled when td_ta_new() was called and
 549  550           * if td_ta_sync_tracking_enable() was never called, then disable
 550  551           * synch. tracking (it was enabled by default in td_ta_new()).
 551  552           */
 552  553          if (ta_p->sync_tracking == 0 &&
 553  554              ps_kill != NULL && ps_lrolltoaddr != NULL) {
 554  555                  register_sync_t enable = REGISTER_SYNC_DISABLE;
 555  556  
 556  557                  (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
 557  558                      &enable, sizeof (enable));
 558  559          }
 559  560          ta_p->ph_p = NULL;
 560  561          (void) rw_unlock(&ta_p->rwlock);
 561  562          return (TD_OK);
 562  563  }
 563  564  
 564  565  /*
 565  566   * Map an agent process handle to a client prochandle.
 566  567   * Currently unused by dbx.
 567  568   */
 568  569  #pragma weak td_ta_get_ph = __td_ta_get_ph
 569  570  td_err_e
 570  571  __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
 571  572  {
 572  573          td_err_e return_val;
 573  574  
 574  575          if (ph_pp != NULL)      /* protect stupid callers */
 575  576                  *ph_pp = NULL;
 576  577          if (ph_pp == NULL)
 577  578                  return (TD_ERR);
 578  579          if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
 579  580                  return (return_val);
 580  581          ph_unlock(ta_p);
 581  582          return (TD_OK);
 582  583  }
 583  584  
 584  585  /*
 585  586   * Set the process's suggested concurrency level.
 586  587   * This is a no-op in a one-level model.
 587  588   * Currently unused by dbx.
 588  589   */
 589  590  #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
 590  591  /* ARGSUSED1 */
 591  592  td_err_e
 592  593  __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
 593  594  {
 594  595          if (ta_p == NULL)
 595  596                  return (TD_BADTA);
 596  597          if (ta_p->ph_p == NULL)
 597  598                  return (TD_BADPH);
 598  599          return (TD_OK);
 599  600  }
 600  601  
 601  602  /*
 602  603   * Get the number of threads in the process.
 603  604   */
 604  605  #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
 605  606  td_err_e
 606  607  __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
 607  608  {
 608  609          struct ps_prochandle *ph_p;
 609  610          td_err_e return_val;
 610  611          int nthreads;
 611  612          int nzombies;
 612  613          psaddr_t nthreads_addr;
 613  614          psaddr_t nzombies_addr;
 614  615  
 615  616          if (ta_p->model == PR_MODEL_NATIVE) {
 616  617                  nthreads_addr = ta_p->uberdata_addr +
 617  618                      offsetof(uberdata_t, nthreads);
 618  619                  nzombies_addr = ta_p->uberdata_addr +
 619  620                      offsetof(uberdata_t, nzombies);
 620  621          } else {
 621  622  #if defined(_LP64) && defined(_SYSCALL32)
 622  623                  nthreads_addr = ta_p->uberdata_addr +
 623  624                      offsetof(uberdata32_t, nthreads);
 624  625                  nzombies_addr = ta_p->uberdata_addr +
 625  626                      offsetof(uberdata32_t, nzombies);
 626  627  #else
 627  628                  nthreads_addr = 0;
 628  629                  nzombies_addr = 0;
 629  630  #endif  /* _SYSCALL32 */
 630  631          }
 631  632  
 632  633          if (nthread_p == NULL)
 633  634                  return (TD_ERR);
 634  635          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 635  636                  return (return_val);
 636  637          if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
 637  638                  return_val = TD_DBERR;
 638  639          if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
 639  640                  return_val = TD_DBERR;
 640  641          ph_unlock(ta_p);
 641  642          if (return_val == TD_OK)
 642  643                  *nthread_p = nthreads + nzombies;
 643  644          return (return_val);
 644  645  }
 645  646  
 646  647  typedef struct {
 647  648          thread_t        tid;
 648  649          int             found;
 649  650          td_thrhandle_t  th;
 650  651  } td_mapper_param_t;
 651  652  
 652  653  /*
 653  654   * Check the value in data against the thread id.
 654  655   * If it matches, return 1 to terminate iterations.
 655  656   * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
 656  657   */
 657  658  static int
 658  659  td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
 659  660  {
 660  661          td_thrinfo_t ti;
 661  662  
 662  663          if (__td_thr_get_info(th_p, &ti) == TD_OK &&
 663  664              data->tid == ti.ti_tid) {
 664  665                  data->found = 1;
 665  666                  data->th = *th_p;
 666  667                  return (1);
 667  668          }
 668  669          return (0);
 669  670  }
 670  671  
 671  672  /*
 672  673   * Given a thread identifier, return the corresponding thread handle.
 673  674   */
 674  675  #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
 675  676  td_err_e
 676  677  __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
 677  678      td_thrhandle_t *th_p)
 678  679  {
 679  680          td_err_e                return_val;
 680  681          td_mapper_param_t       data;
 681  682  
 682  683          if (th_p != NULL &&     /* optimize for a single thread */
 683  684              ta_p != NULL &&
 684  685              ta_p->initialized == 1 &&
 685  686              (td_read_hash_size(ta_p) == 1 ||
 686  687              td_read_uberdata(ta_p) == TD_OK) &&
 687  688              ta_p->initialized == 1 &&
 688  689              ta_p->single_lwpid == tid) {
 689  690                  th_p->th_ta_p = ta_p;
 690  691                  if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
 691  692                          return (TD_NOTHR);
 692  693                  return (TD_OK);
 693  694          }
 694  695  
 695  696          /*
 696  697           * LOCKING EXCEPTION - Locking is not required here because
 697  698           * the locking and checking will be done in __td_ta_thr_iter.
 698  699           */
 699  700  
 700  701          if (ta_p == NULL)
 701  702                  return (TD_BADTA);
 702  703          if (th_p == NULL)
 703  704                  return (TD_BADTH);
 704  705          if (tid == 0)
 705  706                  return (TD_NOTHR);
 706  707  
 707  708          data.tid = tid;
 708  709          data.found = 0;
 709  710          return_val = __td_ta_thr_iter(ta_p,
 710  711              (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
 711  712              TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
 712  713              TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
 713  714          if (return_val == TD_OK) {
 714  715                  if (data.found == 0)
 715  716                          return_val = TD_NOTHR;
 716  717                  else
 717  718                          *th_p = data.th;
 718  719          }
 719  720  
 720  721          return (return_val);
 721  722  }
 722  723  
 723  724  /*
 724  725   * Map the address of a synchronization object to a sync. object handle.
 725  726   */
 726  727  #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
 727  728  td_err_e
 728  729  __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
 729  730  {
 730  731          struct ps_prochandle *ph_p;
 731  732          td_err_e return_val;
 732  733          uint16_t sync_magic;
 733  734  
 734  735          if (sh_p == NULL)
 735  736                  return (TD_BADSH);
 736  737          if (addr == 0)
 737  738                  return (TD_ERR);
 738  739          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 739  740                  return (return_val);
 740  741          /*
 741  742           * Check the magic number of the sync. object to make sure it's valid.
 742  743           * The magic number is at the same offset for all sync. objects.
 743  744           */
 744  745          if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
 745  746              &sync_magic, sizeof (sync_magic)) != PS_OK) {
 746  747                  ph_unlock(ta_p);
 747  748                  return (TD_BADSH);
 748  749          }
 749  750          ph_unlock(ta_p);
 750  751          if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
 751  752              sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
 752  753                  return (TD_BADSH);
 753  754          /*
 754  755           * Just fill in the appropriate fields of the sync. handle.
 755  756           */
 756  757          sh_p->sh_ta_p = (td_thragent_t *)ta_p;
 757  758          sh_p->sh_unique = addr;
 758  759          return (TD_OK);
 759  760  }
 760  761  
 761  762  /*
 762  763   * Iterate over the set of global TSD keys.
 763  764   * The call back function is called with three arguments,
 764  765   * a key, a pointer to the destructor function, and the cbdata pointer.
 765  766   * Currently unused by dbx.
 766  767   */
 767  768  #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
 768  769  td_err_e
 769  770  __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
 770  771  {
 771  772          struct ps_prochandle *ph_p;
 772  773          td_err_e        return_val;
 773  774          int             key;
 774  775          int             numkeys;
 775  776          psaddr_t        dest_addr;
 776  777          psaddr_t        *destructors = NULL;
 777  778          PFrV            destructor;
 778  779  
 779  780          if (cb == NULL)
 780  781                  return (TD_ERR);
 781  782          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 782  783                  return (return_val);
 783  784          if (ps_pstop(ph_p) != PS_OK) {
 784  785                  ph_unlock(ta_p);
 785  786                  return (TD_DBERR);
 786  787          }
 787  788  
 788  789          if (ta_p->model == PR_MODEL_NATIVE) {
 789  790                  tsd_metadata_t tsdm;
 790  791  
 791  792                  if (ps_pdread(ph_p,
 792  793                      ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
 793  794                      &tsdm, sizeof (tsdm)) != PS_OK)
 794  795                          return_val = TD_DBERR;
 795  796                  else {
 796  797                          numkeys = tsdm.tsdm_nused;
 797  798                          dest_addr = (psaddr_t)tsdm.tsdm_destro;
 798  799                          if (numkeys > 0)
 799  800                                  destructors =
 800  801                                      malloc(numkeys * sizeof (psaddr_t));
 801  802                  }
 802  803          } else {
 803  804  #if defined(_LP64) && defined(_SYSCALL32)
 804  805                  tsd_metadata32_t tsdm;
 805  806  
 806  807                  if (ps_pdread(ph_p,
 807  808                      ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
 808  809                      &tsdm, sizeof (tsdm)) != PS_OK)
 809  810                          return_val = TD_DBERR;
 810  811                  else {
 811  812                          numkeys = tsdm.tsdm_nused;
 812  813                          dest_addr = (psaddr_t)tsdm.tsdm_destro;
 813  814                          if (numkeys > 0)
 814  815                                  destructors =
 815  816                                      malloc(numkeys * sizeof (caddr32_t));
 816  817                  }
 817  818  #else
 818  819                  return_val = TD_DBERR;
 819  820  #endif  /* _SYSCALL32 */
 820  821          }
 821  822  
 822  823          if (return_val != TD_OK || numkeys <= 0) {
 823  824                  (void) ps_pcontinue(ph_p);
 824  825                  ph_unlock(ta_p);
 825  826                  return (return_val);
 826  827          }
 827  828  
 828  829          if (destructors == NULL)
 829  830                  return_val = TD_MALLOC;
 830  831          else if (ta_p->model == PR_MODEL_NATIVE) {
 831  832                  if (ps_pdread(ph_p, dest_addr,
 832  833                      destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
 833  834                          return_val = TD_DBERR;
 834  835                  else {
 835  836                          for (key = 1; key < numkeys; key++) {
 836  837                                  destructor = (PFrV)destructors[key];
 837  838                                  if (destructor != TSD_UNALLOCATED &&
 838  839                                      (*cb)(key, destructor, cbdata_p))
 839  840                                          break;
 840  841                          }
 841  842                  }
 842  843  #if defined(_LP64) && defined(_SYSCALL32)
 843  844          } else {
 844  845                  caddr32_t *destructors32 = (caddr32_t *)destructors;
 845  846                  caddr32_t destruct32;
 846  847  
 847  848                  if (ps_pdread(ph_p, dest_addr,
 848  849                      destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
 849  850                          return_val = TD_DBERR;
 850  851                  else {
 851  852                          for (key = 1; key < numkeys; key++) {
 852  853                                  destruct32 = destructors32[key];
 853  854                                  if ((destruct32 !=
 854  855                                      (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
 855  856                                      (*cb)(key, (PFrV)(uintptr_t)destruct32,
 856  857                                      cbdata_p))
 857  858                                          break;
 858  859                          }
 859  860                  }
 860  861  #endif  /* _SYSCALL32 */
 861  862          }
 862  863  
 863  864          if (destructors)
 864  865                  free(destructors);
 865  866          (void) ps_pcontinue(ph_p);
 866  867          ph_unlock(ta_p);
 867  868          return (return_val);
 868  869  }
 869  870  
 870  871  int
 871  872  sigequalset(const sigset_t *s1, const sigset_t *s2)
 872  873  {
 873  874          return (
 874  875              s1->__sigbits[0] == s2->__sigbits[0] &&
 875  876              s1->__sigbits[1] == s2->__sigbits[1] &&
 876  877              s1->__sigbits[2] == s2->__sigbits[2] &&
 877  878              s1->__sigbits[3] == s2->__sigbits[3]);
 878  879  }
 879  880  
 880  881  /*
 881  882   * Description:
 882  883   *   Iterate over all threads. For each thread call
 883  884   * the function pointed to by "cb" with a pointer
 884  885   * to a thread handle, and a pointer to data which
 885  886   * can be NULL. Only call td_thr_iter_f() on threads
 886  887   * which match the properties of state, ti_pri,
 887  888   * ti_sigmask_p, and ti_user_flags.  If cb returns
 888  889   * a non-zero value, terminate iterations.
 889  890   *
 890  891   * Input:
 891  892   *   *ta_p - thread agent
 892  893   *   *cb - call back function defined by user.
 893  894   * td_thr_iter_f() takes a thread handle and
 894  895   * cbdata_p as a parameter.
 895  896   *   cbdata_p - parameter for td_thr_iter_f().
 896  897   *
 897  898   *   state - state of threads of interest.  A value of
 898  899   * TD_THR_ANY_STATE from enum td_thr_state_e
 899  900   * does not restrict iterations by state.
 900  901   *   ti_pri - lower bound of priorities of threads of
 901  902   * interest.  A value of TD_THR_LOWEST_PRIORITY
 902  903   * defined in thread_db.h does not restrict
 903  904   * iterations by priority.  A thread with priority
 904  905   * less than ti_pri will NOT be passed to the callback
 905  906   * function.
 906  907   *   ti_sigmask_p - signal mask of threads of interest.
 907  908   * A value of TD_SIGNO_MASK defined in thread_db.h
 908  909   * does not restrict iterations by signal mask.
 909  910   *   ti_user_flags - user flags of threads of interest.  A
 910  911   * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
 911  912   * does not restrict iterations by user flags.
 912  913   */
 913  914  #pragma weak td_ta_thr_iter = __td_ta_thr_iter
 914  915  td_err_e
 915  916  __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
 916  917      void *cbdata_p, td_thr_state_e state, int ti_pri,
 917  918      sigset_t *ti_sigmask_p, unsigned ti_user_flags)
 918  919  {
 919  920          struct ps_prochandle *ph_p;
 920  921          psaddr_t        first_lwp_addr;
 921  922          psaddr_t        first_zombie_addr;
 922  923          psaddr_t        curr_lwp_addr;
 923  924          psaddr_t        next_lwp_addr;
 924  925          td_thrhandle_t  th;
 925  926          ps_err_e        db_return;
 926  927          ps_err_e        db_return2;
 927  928          td_err_e        return_val;
 928  929  
 929  930          if (cb == NULL)
 930  931                  return (TD_ERR);
 931  932          /*
 932  933           * If state is not within bound, short circuit.
 933  934           */
 934  935          if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
 935  936                  return (TD_OK);
 936  937  
 937  938          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
 938  939                  return (return_val);
 939  940          if (ps_pstop(ph_p) != PS_OK) {
 940  941                  ph_unlock(ta_p);
 941  942                  return (TD_DBERR);
 942  943          }
 943  944  
 944  945          /*
 945  946           * For each ulwp_t in the circular linked lists pointed
 946  947           * to by "all_lwps" and "all_zombies":
 947  948           * (1) Filter each thread.
 948  949           * (2) Create the thread_object for each thread that passes.
 949  950           * (3) Call the call back function on each thread.
 950  951           */
 951  952  
 952  953          if (ta_p->model == PR_MODEL_NATIVE) {
 953  954                  db_return = ps_pdread(ph_p,
 954  955                      ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
 955  956                      &first_lwp_addr, sizeof (first_lwp_addr));
 956  957                  db_return2 = ps_pdread(ph_p,
 957  958                      ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
 958  959                      &first_zombie_addr, sizeof (first_zombie_addr));
 959  960          } else {
 960  961  #if defined(_LP64) && defined(_SYSCALL32)
 961  962                  caddr32_t addr32;
 962  963  
 963  964                  db_return = ps_pdread(ph_p,
 964  965                      ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
 965  966                      &addr32, sizeof (addr32));
 966  967                  first_lwp_addr = addr32;
 967  968                  db_return2 = ps_pdread(ph_p,
 968  969                      ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
 969  970                      &addr32, sizeof (addr32));
 970  971                  first_zombie_addr = addr32;
 971  972  #else   /* _SYSCALL32 */
 972  973                  db_return = PS_ERR;
 973  974                  db_return2 = PS_ERR;
 974  975  #endif  /* _SYSCALL32 */
 975  976          }
 976  977          if (db_return == PS_OK)
 977  978                  db_return = db_return2;
 978  979  
 979  980          /*
 980  981           * If first_lwp_addr and first_zombie_addr are both NULL,
 981  982           * libc must not yet be initialized or all threads have
 982  983           * exited.  Return TD_NOTHR and all will be well.
 983  984           */
 984  985          if (db_return == PS_OK &&
 985  986              first_lwp_addr == 0 && first_zombie_addr == 0) {
 986  987                  (void) ps_pcontinue(ph_p);
 987  988                  ph_unlock(ta_p);
 988  989                  return (TD_NOTHR);
 989  990          }
 990  991          if (db_return != PS_OK) {
 991  992                  (void) ps_pcontinue(ph_p);
 992  993                  ph_unlock(ta_p);
 993  994                  return (TD_DBERR);
 994  995          }
 995  996  
 996  997          /*
 997  998           * Run down the lists of all living and dead lwps.
 998  999           */
 999 1000          if (first_lwp_addr == 0)
1000 1001                  first_lwp_addr = first_zombie_addr;
1001 1002          curr_lwp_addr = first_lwp_addr;
1002 1003          for (;;) {
1003 1004                  td_thr_state_e ts_state;
1004 1005                  int userpri;
1005 1006                  unsigned userflags;
1006 1007                  sigset_t mask;
1007 1008  
1008 1009                  /*
1009 1010                   * Read the ulwp struct.
1010 1011                   */
1011 1012                  if (ta_p->model == PR_MODEL_NATIVE) {
1012 1013                          ulwp_t ulwp;
1013 1014  
1014 1015                          if (ps_pdread(ph_p, curr_lwp_addr,
1015 1016                              &ulwp, sizeof (ulwp)) != PS_OK &&
1016 1017                              ((void) memset(&ulwp, 0, sizeof (ulwp)),
1017 1018                              ps_pdread(ph_p, curr_lwp_addr,
1018 1019                              &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1019 1020                                  return_val = TD_DBERR;
1020 1021                                  break;
1021 1022                          }
1022 1023                          next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1023 1024  
1024 1025                          ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1025 1026                              ulwp.ul_stop? TD_THR_STOPPED :
1026 1027                              ulwp.ul_wchan? TD_THR_SLEEP :
1027 1028                              TD_THR_ACTIVE;
1028 1029                          userpri = ulwp.ul_pri;
1029 1030                          userflags = ulwp.ul_usropts;
1030 1031                          if (ulwp.ul_dead)
1031 1032                                  (void) sigemptyset(&mask);
1032 1033                          else
1033 1034                                  mask = *(sigset_t *)&ulwp.ul_sigmask;
1034 1035                  } else {
1035 1036  #if defined(_LP64) && defined(_SYSCALL32)
1036 1037                          ulwp32_t ulwp;
1037 1038  
1038 1039                          if (ps_pdread(ph_p, curr_lwp_addr,
1039 1040                              &ulwp, sizeof (ulwp)) != PS_OK &&
1040 1041                              ((void) memset(&ulwp, 0, sizeof (ulwp)),
1041 1042                              ps_pdread(ph_p, curr_lwp_addr,
1042 1043                              &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1043 1044                                  return_val = TD_DBERR;
1044 1045                                  break;
1045 1046                          }
1046 1047                          next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1047 1048  
1048 1049                          ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1049 1050                              ulwp.ul_stop? TD_THR_STOPPED :
1050 1051                              ulwp.ul_wchan? TD_THR_SLEEP :
1051 1052                              TD_THR_ACTIVE;
1052 1053                          userpri = ulwp.ul_pri;
1053 1054                          userflags = ulwp.ul_usropts;
1054 1055                          if (ulwp.ul_dead)
1055 1056                                  (void) sigemptyset(&mask);
1056 1057                          else
1057 1058                                  mask = *(sigset_t *)&ulwp.ul_sigmask;
1058 1059  #else   /* _SYSCALL32 */
1059 1060                          return_val = TD_ERR;
1060 1061                          break;
1061 1062  #endif  /* _SYSCALL32 */
1062 1063                  }
1063 1064  
1064 1065                  /*
1065 1066                   * Filter on state, priority, sigmask, and user flags.
1066 1067                   */
1067 1068  
1068 1069                  if ((state != ts_state) &&
1069 1070                      (state != TD_THR_ANY_STATE))
1070 1071                          goto advance;
1071 1072  
1072 1073                  if (ti_pri > userpri)
1073 1074                          goto advance;
1074 1075  
1075 1076                  if (ti_sigmask_p != TD_SIGNO_MASK &&
1076 1077                      !sigequalset(ti_sigmask_p, &mask))
1077 1078                          goto advance;
1078 1079  
1079 1080                  if (ti_user_flags != userflags &&
1080 1081                      ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1081 1082                          goto advance;
1082 1083  
1083 1084                  /*
1084 1085                   * Call back - break if the return
1085 1086                   * from the call back is non-zero.
1086 1087                   */
1087 1088                  th.th_ta_p = (td_thragent_t *)ta_p;
1088 1089                  th.th_unique = curr_lwp_addr;
1089 1090                  if ((*cb)(&th, cbdata_p))
1090 1091                          break;
1091 1092  
1092 1093  advance:
1093 1094                  if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1094 1095                          /*
1095 1096                           * Switch to the zombie list, unless it is NULL
1096 1097                           * or we have already been doing the zombie list,
1097 1098                           * in which case terminate the loop.
1098 1099                           */
1099 1100                          if (first_zombie_addr == 0 ||
1100 1101                              first_lwp_addr == first_zombie_addr)
1101 1102                                  break;
1102 1103                          curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1103 1104                  }
1104 1105          }
1105 1106  
1106 1107          (void) ps_pcontinue(ph_p);
1107 1108          ph_unlock(ta_p);
1108 1109          return (return_val);
1109 1110  }
1110 1111  
1111 1112  /*
1112 1113   * Enable or disable process synchronization object tracking.
1113 1114   * Currently unused by dbx.
1114 1115   */
1115 1116  #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1116 1117  td_err_e
1117 1118  __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1118 1119  {
1119 1120          struct ps_prochandle *ph_p;
1120 1121          td_err_e return_val;
1121 1122          register_sync_t enable;
1122 1123  
1123 1124          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1124 1125                  return (return_val);
1125 1126          /*
1126 1127           * Values of tdb_register_sync in the victim process:
1127 1128           *      REGISTER_SYNC_ENABLE    enables registration of synch objects
1128 1129           *      REGISTER_SYNC_DISABLE   disables registration of synch objects
1129 1130           * These cause the table to be cleared and tdb_register_sync set to:
1130 1131           *      REGISTER_SYNC_ON        registration in effect
1131 1132           *      REGISTER_SYNC_OFF       registration not in effect
1132 1133           */
1133 1134          enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1134 1135          if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1135 1136              &enable, sizeof (enable)) != PS_OK)
1136 1137                  return_val = TD_DBERR;
1137 1138          /*
1138 1139           * Remember that this interface was called (see td_ta_delete()).
1139 1140           */
1140 1141          ta_p->sync_tracking = 1;
1141 1142          ph_unlock(ta_p);
1142 1143          return (return_val);
1143 1144  }
1144 1145  
1145 1146  /*
1146 1147   * Iterate over all known synchronization variables.
1147 1148   * It is very possible that the list generated is incomplete,
1148 1149   * because the iterator can only find synchronization variables
1149 1150   * that have been registered by the process since synchronization
1150 1151   * object registration was enabled.
1151 1152   * The call back function cb is called for each synchronization
1152 1153   * variable with two arguments: a pointer to the synchronization
1153 1154   * handle and the passed-in argument cbdata.
1154 1155   * If cb returns a non-zero value, iterations are terminated.
1155 1156   */
1156 1157  #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1157 1158  td_err_e
1158 1159  __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1159 1160  {
1160 1161          struct ps_prochandle *ph_p;
1161 1162          td_err_e        return_val;
1162 1163          int             i;
1163 1164          register_sync_t enable;
1164 1165          psaddr_t        next_desc;
1165 1166          tdb_sync_stats_t sync_stats;
1166 1167          td_synchandle_t synchandle;
1167 1168          psaddr_t        psaddr;
1168 1169          void            *vaddr;
1169 1170          uint64_t        *sync_addr_hash = NULL;
1170 1171  
1171 1172          if (cb == NULL)
1172 1173                  return (TD_ERR);
1173 1174          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1174 1175                  return (return_val);
1175 1176          if (ps_pstop(ph_p) != PS_OK) {
1176 1177                  ph_unlock(ta_p);
1177 1178                  return (TD_DBERR);
1178 1179          }
1179 1180          if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1180 1181              &enable, sizeof (enable)) != PS_OK) {
1181 1182                  return_val = TD_DBERR;
1182 1183                  goto out;
1183 1184          }
1184 1185          if (enable != REGISTER_SYNC_ON)
1185 1186                  goto out;
1186 1187  
1187 1188          /*
1188 1189           * First read the hash table.
1189 1190           * The hash table is large; allocate with mmap().
1190 1191           */
1191 1192          if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1192 1193              PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1193 1194              == MAP_FAILED) {
1194 1195                  return_val = TD_MALLOC;
1195 1196                  goto out;
1196 1197          }
1197 1198          sync_addr_hash = vaddr;
1198 1199  
1199 1200          if (ta_p->model == PR_MODEL_NATIVE) {
1200 1201                  if (ps_pdread(ph_p, ta_p->uberdata_addr +
1201 1202                      offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1202 1203                      &psaddr, sizeof (&psaddr)) != PS_OK) {
1203 1204                          return_val = TD_DBERR;
1204 1205                          goto out;
1205 1206                  }
1206 1207          } else {
1207 1208  #ifdef  _SYSCALL32
1208 1209                  caddr32_t addr;
1209 1210  
1210 1211                  if (ps_pdread(ph_p, ta_p->uberdata_addr +
1211 1212                      offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1212 1213                      &addr, sizeof (addr)) != PS_OK) {
1213 1214                          return_val = TD_DBERR;
1214 1215                          goto out;
1215 1216                  }
1216 1217                  psaddr = addr;
1217 1218  #else
1218 1219                  return_val = TD_ERR;
1219 1220                  goto out;
1220 1221  #endif /* _SYSCALL32 */
1221 1222          }
1222 1223  
1223 1224          if (psaddr == 0)
1224 1225                  goto out;
1225 1226          if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1226 1227              TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1227 1228                  return_val = TD_DBERR;
1228 1229                  goto out;
1229 1230          }
1230 1231  
1231 1232          /*
1232 1233           * Now scan the hash table.
1233 1234           */
1234 1235          for (i = 0; i < TDB_HASH_SIZE; i++) {
1235 1236                  for (next_desc = (psaddr_t)sync_addr_hash[i];
1236 1237                      next_desc != 0;
1237 1238                      next_desc = (psaddr_t)sync_stats.next) {
1238 1239                          if (ps_pdread(ph_p, next_desc,
1239 1240                              &sync_stats, sizeof (sync_stats)) != PS_OK) {
1240 1241                                  return_val = TD_DBERR;
1241 1242                                  goto out;
1242 1243                          }
1243 1244                          if (sync_stats.un.type == TDB_NONE) {
1244 1245                                  /* not registered since registration enabled */
1245 1246                                  continue;
1246 1247                          }
1247 1248                          synchandle.sh_ta_p = ta_p;
1248 1249                          synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1249 1250                          if ((*cb)(&synchandle, cbdata) != 0)
1250 1251                                  goto out;
1251 1252                  }
1252 1253          }
1253 1254  
1254 1255  out:
1255 1256          if (sync_addr_hash != NULL)
1256 1257                  (void) munmap((void *)sync_addr_hash,
1257 1258                      TDB_HASH_SIZE * sizeof (uint64_t));
1258 1259          (void) ps_pcontinue(ph_p);
1259 1260          ph_unlock(ta_p);
1260 1261          return (return_val);
1261 1262  }
1262 1263  
1263 1264  /*
1264 1265   * Enable process statistics collection.
1265 1266   */
1266 1267  #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1267 1268  /* ARGSUSED */
1268 1269  td_err_e
1269 1270  __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1270 1271  {
1271 1272          return (TD_NOCAPAB);
1272 1273  }
1273 1274  
1274 1275  /*
1275 1276   * Reset process statistics.
1276 1277   */
1277 1278  #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1278 1279  /* ARGSUSED */
1279 1280  td_err_e
1280 1281  __td_ta_reset_stats(const td_thragent_t *ta_p)
1281 1282  {
1282 1283          return (TD_NOCAPAB);
1283 1284  }
1284 1285  
1285 1286  /*
1286 1287   * Read process statistics.
1287 1288   */
1288 1289  #pragma weak td_ta_get_stats = __td_ta_get_stats
1289 1290  /* ARGSUSED */
1290 1291  td_err_e
1291 1292  __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1292 1293  {
1293 1294          return (TD_NOCAPAB);
1294 1295  }
1295 1296  
1296 1297  /*
1297 1298   * Transfer information from lwp struct to thread information struct.
1298 1299   * XXX -- lots of this needs cleaning up.
1299 1300   */
1300 1301  static void
1301 1302  td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1302 1303      ulwp_t *ulwp, td_thrinfo_t *ti_p)
1303 1304  {
1304 1305          lwpid_t lwpid;
1305 1306  
1306 1307          if ((lwpid = ulwp->ul_lwpid) == 0)
1307 1308                  lwpid = 1;
1308 1309          (void) memset(ti_p, 0, sizeof (*ti_p));
1309 1310          ti_p->ti_ta_p = ta_p;
1310 1311          ti_p->ti_user_flags = ulwp->ul_usropts;
1311 1312          ti_p->ti_tid = lwpid;
1312 1313          ti_p->ti_exitval = ulwp->ul_rval;
1313 1314          ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1314 1315          if (!ulwp->ul_dead) {
1315 1316                  /*
1316 1317                   * The bloody fools got this backwards!
1317 1318                   */
1318 1319                  ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1319 1320                  ti_p->ti_stksize = ulwp->ul_stksiz;
1320 1321          }
1321 1322          ti_p->ti_ro_area = ts_addr;
1322 1323          ti_p->ti_ro_size = ulwp->ul_replace?
1323 1324              REPLACEMENT_SIZE : sizeof (ulwp_t);
1324 1325          ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1325 1326              ulwp->ul_stop? TD_THR_STOPPED :
1326 1327              ulwp->ul_wchan? TD_THR_SLEEP :
1327 1328              TD_THR_ACTIVE;
1328 1329          ti_p->ti_db_suspended = 0;
1329 1330          ti_p->ti_type = TD_THR_USER;
1330 1331          ti_p->ti_sp = ulwp->ul_sp;
1331 1332          ti_p->ti_flags = 0;
1332 1333          ti_p->ti_pri = ulwp->ul_pri;
1333 1334          ti_p->ti_lid = lwpid;
1334 1335          if (!ulwp->ul_dead)
1335 1336                  ti_p->ti_sigmask = ulwp->ul_sigmask;
1336 1337          ti_p->ti_traceme = 0;
1337 1338          ti_p->ti_preemptflag = 0;
1338 1339          ti_p->ti_pirecflag = 0;
1339 1340          (void) sigemptyset(&ti_p->ti_pending);
1340 1341          ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1341 1342  }
1342 1343  
1343 1344  #if defined(_LP64) && defined(_SYSCALL32)
1344 1345  static void
1345 1346  td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1346 1347      ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1347 1348  {
1348 1349          lwpid_t lwpid;
1349 1350  
1350 1351          if ((lwpid = ulwp->ul_lwpid) == 0)
1351 1352                  lwpid = 1;
1352 1353          (void) memset(ti_p, 0, sizeof (*ti_p));
1353 1354          ti_p->ti_ta_p = ta_p;
1354 1355          ti_p->ti_user_flags = ulwp->ul_usropts;
1355 1356          ti_p->ti_tid = lwpid;
1356 1357          ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1357 1358          ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1358 1359          if (!ulwp->ul_dead) {
1359 1360                  /*
1360 1361                   * The bloody fools got this backwards!
1361 1362                   */
1362 1363                  ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1363 1364                  ti_p->ti_stksize = ulwp->ul_stksiz;
1364 1365          }
1365 1366          ti_p->ti_ro_area = ts_addr;
1366 1367          ti_p->ti_ro_size = ulwp->ul_replace?
1367 1368              REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1368 1369          ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1369 1370              ulwp->ul_stop? TD_THR_STOPPED :
1370 1371              ulwp->ul_wchan? TD_THR_SLEEP :
1371 1372              TD_THR_ACTIVE;
1372 1373          ti_p->ti_db_suspended = 0;
1373 1374          ti_p->ti_type = TD_THR_USER;
1374 1375          ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1375 1376          ti_p->ti_flags = 0;
1376 1377          ti_p->ti_pri = ulwp->ul_pri;
1377 1378          ti_p->ti_lid = lwpid;
1378 1379          if (!ulwp->ul_dead)
1379 1380                  ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1380 1381          ti_p->ti_traceme = 0;
1381 1382          ti_p->ti_preemptflag = 0;
1382 1383          ti_p->ti_pirecflag = 0;
1383 1384          (void) sigemptyset(&ti_p->ti_pending);
1384 1385          ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1385 1386  }
1386 1387  #endif  /* _SYSCALL32 */
1387 1388  
1388 1389  /*
1389 1390   * Get thread information.
1390 1391   */
1391 1392  #pragma weak td_thr_get_info = __td_thr_get_info
1392 1393  td_err_e
1393 1394  __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1394 1395  {
1395 1396          struct ps_prochandle *ph_p;
1396 1397          td_thragent_t   *ta_p;
1397 1398          td_err_e        return_val;
1398 1399          psaddr_t        psaddr;
1399 1400  
1400 1401          if (ti_p == NULL)
1401 1402                  return (TD_ERR);
1402 1403          (void) memset(ti_p, 0, sizeof (*ti_p));
1403 1404  
1404 1405          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1405 1406                  return (return_val);
1406 1407          ta_p = th_p->th_ta_p;
1407 1408          if (ps_pstop(ph_p) != PS_OK) {
1408 1409                  ph_unlock(ta_p);
1409 1410                  return (TD_DBERR);
1410 1411          }
1411 1412  
1412 1413          /*
1413 1414           * Read the ulwp struct from the process.
1414 1415           * Transfer the ulwp struct to the thread information struct.
1415 1416           */
1416 1417          psaddr = th_p->th_unique;
1417 1418          if (ta_p->model == PR_MODEL_NATIVE) {
1418 1419                  ulwp_t ulwp;
1419 1420  
1420 1421                  if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1421 1422                      ((void) memset(&ulwp, 0, sizeof (ulwp)),
1422 1423                      ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1423 1424                          return_val = TD_DBERR;
1424 1425                  else
1425 1426                          td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1426 1427          } else {
1427 1428  #if defined(_LP64) && defined(_SYSCALL32)
1428 1429                  ulwp32_t ulwp;
1429 1430  
1430 1431                  if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1431 1432                      ((void) memset(&ulwp, 0, sizeof (ulwp)),
1432 1433                      ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1433 1434                      PS_OK)
1434 1435                          return_val = TD_DBERR;
1435 1436                  else
1436 1437                          td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1437 1438  #else
1438 1439                  return_val = TD_ERR;
1439 1440  #endif  /* _SYSCALL32 */
1440 1441          }
1441 1442  
1442 1443          (void) ps_pcontinue(ph_p);
1443 1444          ph_unlock(ta_p);
1444 1445          return (return_val);
1445 1446  }
1446 1447  
1447 1448  /*
1448 1449   * Given a process and an event number, return information about
1449 1450   * an address in the process or at which a breakpoint can be set
1450 1451   * to monitor the event.
1451 1452   */
1452 1453  #pragma weak td_ta_event_addr = __td_ta_event_addr
1453 1454  td_err_e
1454 1455  __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1455 1456  {
1456 1457          if (ta_p == NULL)
1457 1458                  return (TD_BADTA);
1458 1459          if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1459 1460                  return (TD_NOEVENT);
1460 1461          if (notify_p == NULL)
1461 1462                  return (TD_ERR);
1462 1463  
1463 1464          notify_p->type = NOTIFY_BPT;
1464 1465          notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1465 1466  
1466 1467          return (TD_OK);
1467 1468  }
1468 1469  
1469 1470  /*
1470 1471   * Add the events in eventset 2 to eventset 1.
1471 1472   */
1472 1473  static void
1473 1474  eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1474 1475  {
1475 1476          int     i;
1476 1477  
1477 1478          for (i = 0; i < TD_EVENTSIZE; i++)
1478 1479                  event1_p->event_bits[i] |= event2_p->event_bits[i];
1479 1480  }
1480 1481  
1481 1482  /*
1482 1483   * Delete the events in eventset 2 from eventset 1.
1483 1484   */
1484 1485  static void
1485 1486  eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1486 1487  {
1487 1488          int     i;
1488 1489  
1489 1490          for (i = 0; i < TD_EVENTSIZE; i++)
1490 1491                  event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1491 1492  }
1492 1493  
1493 1494  /*
1494 1495   * Either add or delete the given event set from a thread's event mask.
1495 1496   */
1496 1497  static td_err_e
1497 1498  mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1498 1499  {
1499 1500          struct ps_prochandle *ph_p;
1500 1501          td_err_e        return_val = TD_OK;
1501 1502          char            enable;
1502 1503          td_thr_events_t evset;
1503 1504          psaddr_t        psaddr_evset;
1504 1505          psaddr_t        psaddr_enab;
1505 1506  
1506 1507          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1507 1508                  return (return_val);
1508 1509          if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1509 1510                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1510 1511                  psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1511 1512                  psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1512 1513          } else {
1513 1514  #if defined(_LP64) && defined(_SYSCALL32)
1514 1515                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1515 1516                  psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1516 1517                  psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1517 1518  #else
1518 1519                  ph_unlock(th_p->th_ta_p);
1519 1520                  return (TD_ERR);
1520 1521  #endif  /* _SYSCALL32 */
1521 1522          }
1522 1523          if (ps_pstop(ph_p) != PS_OK) {
1523 1524                  ph_unlock(th_p->th_ta_p);
1524 1525                  return (TD_DBERR);
1525 1526          }
1526 1527  
1527 1528          if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1528 1529                  return_val = TD_DBERR;
1529 1530          else {
1530 1531                  if (onoff)
1531 1532                          eventsetaddset(&evset, events);
1532 1533                  else
1533 1534                          eventsetdelset(&evset, events);
1534 1535                  if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1535 1536                      != PS_OK)
1536 1537                          return_val = TD_DBERR;
1537 1538                  else {
1538 1539                          enable = 0;
1539 1540                          if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1540 1541                                  enable = 1;
1541 1542                          if (ps_pdwrite(ph_p, psaddr_enab,
1542 1543                              &enable, sizeof (enable)) != PS_OK)
1543 1544                                  return_val = TD_DBERR;
1544 1545                  }
1545 1546          }
1546 1547  
1547 1548          (void) ps_pcontinue(ph_p);
1548 1549          ph_unlock(th_p->th_ta_p);
1549 1550          return (return_val);
1550 1551  }
1551 1552  
1552 1553  /*
1553 1554   * Enable or disable tracing for a given thread.  Tracing
1554 1555   * is filtered based on the event mask of each thread.  Tracing
1555 1556   * can be turned on/off for the thread without changing thread
1556 1557   * event mask.
1557 1558   * Currently unused by dbx.
1558 1559   */
1559 1560  #pragma weak td_thr_event_enable = __td_thr_event_enable
1560 1561  td_err_e
1561 1562  __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1562 1563  {
1563 1564          td_thr_events_t evset;
1564 1565  
1565 1566          td_event_emptyset(&evset);
1566 1567          td_event_addset(&evset, TD_EVENTS_ENABLE);
1567 1568          return (mod_eventset(th_p, &evset, onoff));
1568 1569  }
1569 1570  
1570 1571  /*
1571 1572   * Set event mask to enable event. event is turned on in
1572 1573   * event mask for thread.  If a thread encounters an event
1573 1574   * for which its event mask is on, notification will be sent
1574 1575   * to the debugger.
1575 1576   * Addresses for each event are provided to the
1576 1577   * debugger.  It is assumed that a breakpoint of some type will
1577 1578   * be placed at that address.  If the event mask for the thread
1578 1579   * is on, the instruction at the address will be executed.
1579 1580   * Otherwise, the instruction will be skipped.
1580 1581   */
1581 1582  #pragma weak td_thr_set_event = __td_thr_set_event
1582 1583  td_err_e
1583 1584  __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1584 1585  {
1585 1586          return (mod_eventset(th_p, events, 1));
1586 1587  }
1587 1588  
1588 1589  /*
1589 1590   * Enable or disable a set of events in the process-global event mask,
1590 1591   * depending on the value of onoff.
1591 1592   */
1592 1593  static td_err_e
1593 1594  td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1594 1595  {
1595 1596          struct ps_prochandle *ph_p;
1596 1597          td_thr_events_t targ_eventset;
1597 1598          td_err_e        return_val;
1598 1599  
1599 1600          if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1600 1601                  return (return_val);
1601 1602          if (ps_pstop(ph_p) != PS_OK) {
1602 1603                  ph_unlock(ta_p);
1603 1604                  return (TD_DBERR);
1604 1605          }
1605 1606          if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1606 1607              &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1607 1608                  return_val = TD_DBERR;
1608 1609          else {
1609 1610                  if (onoff)
1610 1611                          eventsetaddset(&targ_eventset, events);
1611 1612                  else
1612 1613                          eventsetdelset(&targ_eventset, events);
1613 1614                  if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1614 1615                      &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1615 1616                          return_val = TD_DBERR;
1616 1617          }
1617 1618          (void) ps_pcontinue(ph_p);
1618 1619          ph_unlock(ta_p);
1619 1620          return (return_val);
1620 1621  }
1621 1622  
1622 1623  /*
1623 1624   * Enable a set of events in the process-global event mask.
1624 1625   */
1625 1626  #pragma weak td_ta_set_event = __td_ta_set_event
1626 1627  td_err_e
1627 1628  __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1628 1629  {
1629 1630          return (td_ta_mod_event(ta_p, events, 1));
1630 1631  }
1631 1632  
1632 1633  /*
1633 1634   * Set event mask to disable the given event set; these events are cleared
1634 1635   * from the event mask of the thread.  Events that occur for a thread
1635 1636   * with the event masked off will not cause notification to be
1636 1637   * sent to the debugger (see td_thr_set_event for fuller description).
1637 1638   */
1638 1639  #pragma weak td_thr_clear_event = __td_thr_clear_event
1639 1640  td_err_e
1640 1641  __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1641 1642  {
1642 1643          return (mod_eventset(th_p, events, 0));
1643 1644  }
1644 1645  
1645 1646  /*
1646 1647   * Disable a set of events in the process-global event mask.
1647 1648   */
1648 1649  #pragma weak td_ta_clear_event = __td_ta_clear_event
1649 1650  td_err_e
1650 1651  __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1651 1652  {
1652 1653          return (td_ta_mod_event(ta_p, events, 0));
1653 1654  }
1654 1655  
1655 1656  /*
1656 1657   * This function returns the most recent event message, if any,
1657 1658   * associated with a thread.  Given a thread handle, return the message
1658 1659   * corresponding to the event encountered by the thread.  Only one
1659 1660   * message per thread is saved.  Messages from earlier events are lost
1660 1661   * when later events occur.
1661 1662   */
1662 1663  #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1663 1664  td_err_e
1664 1665  __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1665 1666  {
1666 1667          struct ps_prochandle *ph_p;
1667 1668          td_err_e        return_val = TD_OK;
1668 1669          psaddr_t        psaddr;
1669 1670  
1670 1671          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1671 1672                  return (return_val);
1672 1673          if (ps_pstop(ph_p) != PS_OK) {
1673 1674                  ph_unlock(th_p->th_ta_p);
1674 1675                  return (TD_BADTA);
1675 1676          }
1676 1677          if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1677 1678                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1678 1679                  td_evbuf_t evbuf;
1679 1680  
1680 1681                  psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1681 1682                  if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1682 1683                          return_val = TD_DBERR;
1683 1684                  } else if (evbuf.eventnum == TD_EVENT_NONE) {
1684 1685                          return_val = TD_NOEVENT;
1685 1686                  } else {
1686 1687                          msg->event = evbuf.eventnum;
1687 1688                          msg->th_p = (td_thrhandle_t *)th_p;
1688 1689                          msg->msg.data = (uintptr_t)evbuf.eventdata;
1689 1690                          /* "Consume" the message */
1690 1691                          evbuf.eventnum = TD_EVENT_NONE;
1691 1692                          evbuf.eventdata = NULL;
1692 1693                          if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1693 1694                              != PS_OK)
1694 1695                                  return_val = TD_DBERR;
1695 1696                  }
1696 1697          } else {
1697 1698  #if defined(_LP64) && defined(_SYSCALL32)
1698 1699                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1699 1700                  td_evbuf32_t evbuf;
1700 1701  
1701 1702                  psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1702 1703                  if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1703 1704                          return_val = TD_DBERR;
1704 1705                  } else if (evbuf.eventnum == TD_EVENT_NONE) {
1705 1706                          return_val = TD_NOEVENT;
1706 1707                  } else {
1707 1708                          msg->event = evbuf.eventnum;
1708 1709                          msg->th_p = (td_thrhandle_t *)th_p;
1709 1710                          msg->msg.data = (uintptr_t)evbuf.eventdata;
1710 1711                          /* "Consume" the message */
1711 1712                          evbuf.eventnum = TD_EVENT_NONE;
1712 1713                          evbuf.eventdata = 0;
1713 1714                          if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1714 1715                              != PS_OK)
1715 1716                                  return_val = TD_DBERR;
1716 1717                  }
1717 1718  #else
1718 1719                  return_val = TD_ERR;
1719 1720  #endif  /* _SYSCALL32 */
1720 1721          }
1721 1722  
1722 1723          (void) ps_pcontinue(ph_p);
1723 1724          ph_unlock(th_p->th_ta_p);
1724 1725          return (return_val);
1725 1726  }
1726 1727  
1727 1728  /*
1728 1729   * The callback function td_ta_event_getmsg uses when looking for
1729 1730   * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
1730 1731   */
1731 1732  static int
1732 1733  event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1733 1734  {
1734 1735          static td_thrhandle_t th;
1735 1736          td_event_msg_t *msg = arg;
1736 1737  
1737 1738          if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1738 1739                  /*
1739 1740                   * Got an event, stop iterating.
1740 1741                   *
1741 1742                   * Because of past mistakes in interface definition,
1742 1743                   * we are forced to pass back a static local variable
1743 1744                   * for the thread handle because th_p is a pointer
1744 1745                   * to a local variable in __td_ta_thr_iter().
1745 1746                   * Grr...
1746 1747                   */
1747 1748                  th = *th_p;
1748 1749                  msg->th_p = &th;
1749 1750                  return (1);
1750 1751          }
1751 1752          return (0);
1752 1753  }
1753 1754  
1754 1755  /*
1755 1756   * This function is just like td_thr_event_getmsg, except that it is
1756 1757   * passed a process handle rather than a thread handle, and returns
1757 1758   * an event message for some thread in the process that has an event
1758 1759   * message pending.  If no thread has an event message pending, this
1759 1760   * routine returns TD_NOEVENT.  Thus, all pending event messages may
1760 1761   * be collected from a process by repeatedly calling this routine
1761 1762   * until it returns TD_NOEVENT.
1762 1763   */
1763 1764  #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1764 1765  td_err_e
1765 1766  __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1766 1767  {
1767 1768          td_err_e return_val;
1768 1769  
1769 1770          if (ta_p == NULL)
1770 1771                  return (TD_BADTA);
1771 1772          if (ta_p->ph_p == NULL)
1772 1773                  return (TD_BADPH);
1773 1774          if (msg == NULL)
1774 1775                  return (TD_ERR);
1775 1776          msg->event = TD_EVENT_NONE;
1776 1777          if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1777 1778              TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1778 1779              TD_THR_ANY_USER_FLAGS)) != TD_OK)
1779 1780                  return (return_val);
1780 1781          if (msg->event == TD_EVENT_NONE)
1781 1782                  return (TD_NOEVENT);
1782 1783          return (TD_OK);
1783 1784  }
1784 1785  
1785 1786  static lwpid_t
1786 1787  thr_to_lwpid(const td_thrhandle_t *th_p)
1787 1788  {
1788 1789          struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1789 1790          lwpid_t lwpid;
1790 1791  
1791 1792          /*
1792 1793           * The caller holds the prochandle lock
1793 1794           * and has already verfied everything.
1794 1795           */
1795 1796          if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1796 1797                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1797 1798  
1798 1799                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1799 1800                      &lwpid, sizeof (lwpid)) != PS_OK)
1800 1801                          lwpid = 0;
1801 1802                  else if (lwpid == 0)
1802 1803                          lwpid = 1;
1803 1804          } else {
1804 1805  #if defined(_LP64) && defined(_SYSCALL32)
1805 1806                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1806 1807  
1807 1808                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1808 1809                      &lwpid, sizeof (lwpid)) != PS_OK)
1809 1810                          lwpid = 0;
1810 1811                  else if (lwpid == 0)
1811 1812                          lwpid = 1;
1812 1813  #else
1813 1814                  lwpid = 0;
1814 1815  #endif  /* _SYSCALL32 */
1815 1816          }
1816 1817  
1817 1818          return (lwpid);
1818 1819  }
1819 1820  
1820 1821  /*
1821 1822   * Suspend a thread.
1822 1823   * XXX: What does this mean in a one-level model?
1823 1824   */
1824 1825  #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1825 1826  td_err_e
1826 1827  __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1827 1828  {
1828 1829          struct ps_prochandle *ph_p;
1829 1830          td_err_e return_val;
1830 1831  
1831 1832          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1832 1833                  return (return_val);
1833 1834          if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1834 1835                  return_val = TD_DBERR;
1835 1836          ph_unlock(th_p->th_ta_p);
1836 1837          return (return_val);
1837 1838  }
1838 1839  
1839 1840  /*
1840 1841   * Resume a suspended thread.
1841 1842   * XXX: What does this mean in a one-level model?
1842 1843   */
1843 1844  #pragma weak td_thr_dbresume = __td_thr_dbresume
1844 1845  td_err_e
1845 1846  __td_thr_dbresume(const td_thrhandle_t *th_p)
1846 1847  {
1847 1848          struct ps_prochandle *ph_p;
1848 1849          td_err_e return_val;
1849 1850  
1850 1851          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1851 1852                  return (return_val);
1852 1853          if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1853 1854                  return_val = TD_DBERR;
1854 1855          ph_unlock(th_p->th_ta_p);
1855 1856          return (return_val);
1856 1857  }
1857 1858  
1858 1859  /*
1859 1860   * Set a thread's signal mask.
1860 1861   * Currently unused by dbx.
1861 1862   */
1862 1863  #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1863 1864  /* ARGSUSED */
1864 1865  td_err_e
1865 1866  __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1866 1867  {
1867 1868          return (TD_NOCAPAB);
1868 1869  }
1869 1870  
1870 1871  /*
1871 1872   * Set a thread's "signals-pending" set.
1872 1873   * Currently unused by dbx.
1873 1874   */
1874 1875  #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1875 1876  /* ARGSUSED */
1876 1877  td_err_e
1877 1878  __td_thr_setsigpending(const td_thrhandle_t *th_p,
1878 1879      uchar_t ti_pending_flag, const sigset_t ti_pending)
1879 1880  {
1880 1881          return (TD_NOCAPAB);
1881 1882  }
1882 1883  
1883 1884  /*
1884 1885   * Get a thread's general register set.
1885 1886   */
1886 1887  #pragma weak td_thr_getgregs = __td_thr_getgregs
1887 1888  td_err_e
1888 1889  __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1889 1890  {
1890 1891          struct ps_prochandle *ph_p;
1891 1892          td_err_e return_val;
1892 1893  
1893 1894          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1894 1895                  return (return_val);
1895 1896          if (ps_pstop(ph_p) != PS_OK) {
1896 1897                  ph_unlock(th_p->th_ta_p);
1897 1898                  return (TD_DBERR);
1898 1899          }
1899 1900  
1900 1901          if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1901 1902                  return_val = TD_DBERR;
1902 1903  
1903 1904          (void) ps_pcontinue(ph_p);
1904 1905          ph_unlock(th_p->th_ta_p);
1905 1906          return (return_val);
1906 1907  }
1907 1908  
1908 1909  /*
1909 1910   * Set a thread's general register set.
1910 1911   */
1911 1912  #pragma weak td_thr_setgregs = __td_thr_setgregs
1912 1913  td_err_e
1913 1914  __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1914 1915  {
1915 1916          struct ps_prochandle *ph_p;
1916 1917          td_err_e return_val;
1917 1918  
1918 1919          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1919 1920                  return (return_val);
1920 1921          if (ps_pstop(ph_p) != PS_OK) {
1921 1922                  ph_unlock(th_p->th_ta_p);
1922 1923                  return (TD_DBERR);
1923 1924          }
1924 1925  
1925 1926          if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1926 1927                  return_val = TD_DBERR;
1927 1928  
1928 1929          (void) ps_pcontinue(ph_p);
1929 1930          ph_unlock(th_p->th_ta_p);
1930 1931          return (return_val);
1931 1932  }
1932 1933  
1933 1934  /*
1934 1935   * Get a thread's floating-point register set.
1935 1936   */
1936 1937  #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1937 1938  td_err_e
1938 1939  __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1939 1940  {
1940 1941          struct ps_prochandle *ph_p;
1941 1942          td_err_e return_val;
1942 1943  
1943 1944          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1944 1945                  return (return_val);
1945 1946          if (ps_pstop(ph_p) != PS_OK) {
1946 1947                  ph_unlock(th_p->th_ta_p);
1947 1948                  return (TD_DBERR);
1948 1949          }
1949 1950  
1950 1951          if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1951 1952                  return_val = TD_DBERR;
1952 1953  
1953 1954          (void) ps_pcontinue(ph_p);
1954 1955          ph_unlock(th_p->th_ta_p);
1955 1956          return (return_val);
1956 1957  }
1957 1958  
1958 1959  /*
1959 1960   * Set a thread's floating-point register set.
1960 1961   */
1961 1962  #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1962 1963  td_err_e
1963 1964  __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1964 1965  {
1965 1966          struct ps_prochandle *ph_p;
1966 1967          td_err_e return_val;
1967 1968  
1968 1969          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1969 1970                  return (return_val);
1970 1971          if (ps_pstop(ph_p) != PS_OK) {
1971 1972                  ph_unlock(th_p->th_ta_p);
1972 1973                  return (TD_DBERR);
1973 1974          }
1974 1975  
1975 1976          if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1976 1977                  return_val = TD_DBERR;
1977 1978  
1978 1979          (void) ps_pcontinue(ph_p);
1979 1980          ph_unlock(th_p->th_ta_p);
1980 1981          return (return_val);
1981 1982  }
  
    | 
      ↓ open down ↓ | 
    1943 lines elided | 
    
      ↑ open up ↑ | 
  
1982 1983  
1983 1984  /*
1984 1985   * Get the size of the extra state register set for this architecture.
1985 1986   * Currently unused by dbx.
1986 1987   */
1987 1988  #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1988 1989  /* ARGSUSED */
1989 1990  td_err_e
1990 1991  __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1991 1992  {
1992      -#if defined(__sparc)
1993 1993          struct ps_prochandle *ph_p;
1994 1994          td_err_e return_val;
1995 1995  
1996 1996          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1997 1997                  return (return_val);
1998 1998          if (ps_pstop(ph_p) != PS_OK) {
1999 1999                  ph_unlock(th_p->th_ta_p);
2000 2000                  return (TD_DBERR);
2001 2001          }
2002 2002  
2003 2003          if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
2004 2004                  return_val = TD_DBERR;
2005 2005  
     2006 +        if (*xregsize == 0)
     2007 +                return_val = TD_NOXREGS;
     2008 +
2006 2009          (void) ps_pcontinue(ph_p);
2007 2010          ph_unlock(th_p->th_ta_p);
2008 2011          return (return_val);
2009      -#else   /* __sparc */
2010      -        return (TD_NOXREGS);
2011      -#endif  /* __sparc */
2012 2012  }
2013 2013  
2014 2014  /*
2015 2015   * Get a thread's extra state register set.
2016 2016   */
2017 2017  #pragma weak td_thr_getxregs = __td_thr_getxregs
2018      -/* ARGSUSED */
2019 2018  td_err_e
2020 2019  __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2021 2020  {
2022      -#if defined(__sparc)
2023 2021          struct ps_prochandle *ph_p;
2024 2022          td_err_e return_val;
     2023 +        ps_err_e ps_err;
2025 2024  
2026 2025          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2027 2026                  return (return_val);
2028 2027          if (ps_pstop(ph_p) != PS_OK) {
2029 2028                  ph_unlock(th_p->th_ta_p);
2030 2029                  return (TD_DBERR);
2031 2030          }
2032 2031  
2033      -        if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
     2032 +        ps_err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
     2033 +        if (ps_err == PS_NOXREGS)
     2034 +                return_val = TD_NOXREGS;
     2035 +        else if (ps_err != PS_OK)
2034 2036                  return_val = TD_DBERR;
2035 2037  
2036 2038          (void) ps_pcontinue(ph_p);
2037 2039          ph_unlock(th_p->th_ta_p);
2038 2040          return (return_val);
2039      -#else   /* __sparc */
2040      -        return (TD_NOXREGS);
2041      -#endif  /* __sparc */
2042 2041  }
2043 2042  
2044 2043  /*
2045 2044   * Set a thread's extra state register set.
2046 2045   */
2047 2046  #pragma weak td_thr_setxregs = __td_thr_setxregs
2048 2047  /* ARGSUSED */
2049 2048  td_err_e
2050 2049  __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2051 2050  {
2052      -#if defined(__sparc)
2053 2051          struct ps_prochandle *ph_p;
2054 2052          td_err_e return_val;
2055 2053  
2056 2054          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2057 2055                  return (return_val);
2058 2056          if (ps_pstop(ph_p) != PS_OK) {
2059 2057                  ph_unlock(th_p->th_ta_p);
2060 2058                  return (TD_DBERR);
2061 2059          }
2062 2060  
2063 2061          if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2064 2062                  return_val = TD_DBERR;
2065 2063  
2066 2064          (void) ps_pcontinue(ph_p);
2067 2065          ph_unlock(th_p->th_ta_p);
2068 2066          return (return_val);
2069      -#else   /* __sparc */
2070      -        return (TD_NOXREGS);
2071      -#endif  /* __sparc */
2072 2067  }
2073 2068  
2074 2069  struct searcher {
2075 2070          psaddr_t        addr;
2076 2071          int             status;
2077 2072  };
2078 2073  
2079 2074  /*
2080 2075   * Check the struct thread address in *th_p again first
2081 2076   * value in "data".  If value in data is found, set second value
2082 2077   * in "data" to 1 and return 1 to terminate iterations.
2083 2078   * This function is used by td_thr_validate() to verify that
2084 2079   * a thread handle is valid.
2085 2080   */
2086 2081  static int
2087 2082  td_searcher(const td_thrhandle_t *th_p, void *data)
2088 2083  {
2089 2084          struct searcher *searcher_data = (struct searcher *)data;
2090 2085  
2091 2086          if (searcher_data->addr == th_p->th_unique) {
2092 2087                  searcher_data->status = 1;
2093 2088                  return (1);
2094 2089          }
2095 2090          return (0);
2096 2091  }
2097 2092  
2098 2093  /*
2099 2094   * Validate the thread handle.  Check that
2100 2095   * a thread exists in the thread agent/process that
2101 2096   * corresponds to thread with handle *th_p.
2102 2097   * Currently unused by dbx.
2103 2098   */
2104 2099  #pragma weak td_thr_validate = __td_thr_validate
2105 2100  td_err_e
2106 2101  __td_thr_validate(const td_thrhandle_t *th_p)
2107 2102  {
2108 2103          td_err_e return_val;
2109 2104          struct searcher searcher_data = {0, 0};
2110 2105  
2111 2106          if (th_p == NULL)
2112 2107                  return (TD_BADTH);
2113 2108          if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
2114 2109                  return (TD_BADTH);
2115 2110  
2116 2111          /*
2117 2112           * LOCKING EXCEPTION - Locking is not required
2118 2113           * here because no use of the thread agent is made (other
2119 2114           * than the sanity check) and checking of the thread
2120 2115           * agent will be done in __td_ta_thr_iter.
2121 2116           */
2122 2117  
2123 2118          searcher_data.addr = th_p->th_unique;
2124 2119          return_val = __td_ta_thr_iter(th_p->th_ta_p,
2125 2120              td_searcher, &searcher_data,
2126 2121              TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2127 2122              TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2128 2123  
2129 2124          if (return_val == TD_OK && searcher_data.status == 0)
2130 2125                  return_val = TD_NOTHR;
2131 2126  
2132 2127          return (return_val);
2133 2128  }
2134 2129  
2135 2130  /*
2136 2131   * Get a thread's private binding to a given thread specific
2137 2132   * data(TSD) key(see thr_getspecific(3C).  If the thread doesn't
2138 2133   * have a binding for a particular key, then NULL is returned.
2139 2134   */
2140 2135  #pragma weak td_thr_tsd = __td_thr_tsd
2141 2136  td_err_e
2142 2137  __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2143 2138  {
2144 2139          struct ps_prochandle *ph_p;
2145 2140          td_thragent_t   *ta_p;
2146 2141          td_err_e        return_val;
2147 2142          int             maxkey;
2148 2143          int             nkey;
2149 2144          psaddr_t        tsd_paddr;
2150 2145  
2151 2146          if (data_pp == NULL)
2152 2147                  return (TD_ERR);
2153 2148          *data_pp = NULL;
2154 2149          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2155 2150                  return (return_val);
2156 2151          ta_p = th_p->th_ta_p;
2157 2152          if (ps_pstop(ph_p) != PS_OK) {
2158 2153                  ph_unlock(ta_p);
2159 2154                  return (TD_DBERR);
2160 2155          }
2161 2156  
2162 2157          if (ta_p->model == PR_MODEL_NATIVE) {
2163 2158                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2164 2159                  tsd_metadata_t tsdm;
2165 2160                  tsd_t stsd;
2166 2161  
2167 2162                  if (ps_pdread(ph_p,
2168 2163                      ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2169 2164                      &tsdm, sizeof (tsdm)) != PS_OK)
2170 2165                          return_val = TD_DBERR;
2171 2166                  else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2172 2167                      &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2173 2168                          return_val = TD_DBERR;
2174 2169                  else if (tsd_paddr != 0 &&
2175 2170                      ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2176 2171                          return_val = TD_DBERR;
2177 2172                  else {
2178 2173                          maxkey = tsdm.tsdm_nused;
2179 2174                          nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2180 2175  
2181 2176                          if (key < TSD_NFAST)
2182 2177                                  tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2183 2178                  }
2184 2179          } else {
2185 2180  #if defined(_LP64) && defined(_SYSCALL32)
2186 2181                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2187 2182                  tsd_metadata32_t tsdm;
2188 2183                  tsd32_t stsd;
2189 2184                  caddr32_t addr;
2190 2185  
2191 2186                  if (ps_pdread(ph_p,
2192 2187                      ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2193 2188                      &tsdm, sizeof (tsdm)) != PS_OK)
2194 2189                          return_val = TD_DBERR;
2195 2190                  else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2196 2191                      &addr, sizeof (addr)) != PS_OK)
2197 2192                          return_val = TD_DBERR;
2198 2193                  else if (addr != 0 &&
2199 2194                      ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2200 2195                          return_val = TD_DBERR;
2201 2196                  else {
2202 2197                          maxkey = tsdm.tsdm_nused;
2203 2198                          nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2204 2199  
2205 2200                          if (key < TSD_NFAST) {
2206 2201                                  tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2207 2202                          } else {
2208 2203                                  tsd_paddr = addr;
2209 2204                          }
2210 2205                  }
2211 2206  #else
2212 2207                  return_val = TD_ERR;
2213 2208  #endif  /* _SYSCALL32 */
2214 2209          }
2215 2210  
2216 2211          if (return_val == TD_OK && (key < 1 || key >= maxkey))
2217 2212                  return_val = TD_NOTSD;
2218 2213          if (return_val != TD_OK || key >= nkey) {
2219 2214                  /* NULL has already been stored in data_pp */
2220 2215                  (void) ps_pcontinue(ph_p);
2221 2216                  ph_unlock(ta_p);
2222 2217                  return (return_val);
2223 2218          }
2224 2219  
2225 2220          /*
2226 2221           * Read the value from the thread's tsd array.
2227 2222           */
2228 2223          if (ta_p->model == PR_MODEL_NATIVE) {
2229 2224                  void *value;
2230 2225  
2231 2226                  if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2232 2227                      &value, sizeof (value)) != PS_OK)
2233 2228                          return_val = TD_DBERR;
2234 2229                  else
2235 2230                          *data_pp = value;
2236 2231  #if defined(_LP64) && defined(_SYSCALL32)
2237 2232          } else {
2238 2233                  caddr32_t value32;
2239 2234  
2240 2235                  if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2241 2236                      &value32, sizeof (value32)) != PS_OK)
2242 2237                          return_val = TD_DBERR;
2243 2238                  else
2244 2239                          *data_pp = (void *)(uintptr_t)value32;
2245 2240  #endif  /* _SYSCALL32 */
2246 2241          }
2247 2242  
2248 2243          (void) ps_pcontinue(ph_p);
2249 2244          ph_unlock(ta_p);
2250 2245          return (return_val);
2251 2246  }
2252 2247  
2253 2248  /*
2254 2249   * Get the base address of a thread's thread local storage (TLS) block
2255 2250   * for the module (executable or shared object) identified by 'moduleid'.
2256 2251   */
2257 2252  #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2258 2253  td_err_e
2259 2254  __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2260 2255  {
2261 2256          struct ps_prochandle *ph_p;
2262 2257          td_thragent_t   *ta_p;
2263 2258          td_err_e        return_val;
2264 2259  
2265 2260          if (base == NULL)
2266 2261                  return (TD_ERR);
2267 2262          *base = 0;
2268 2263          if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2269 2264                  return (return_val);
2270 2265          ta_p = th_p->th_ta_p;
2271 2266          if (ps_pstop(ph_p) != PS_OK) {
2272 2267                  ph_unlock(ta_p);
2273 2268                  return (TD_DBERR);
2274 2269          }
2275 2270  
2276 2271          if (ta_p->model == PR_MODEL_NATIVE) {
2277 2272                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2278 2273                  tls_metadata_t tls_metadata;
2279 2274                  TLS_modinfo tlsmod;
2280 2275                  tls_t tls;
2281 2276  
2282 2277                  if (ps_pdread(ph_p,
2283 2278                      ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2284 2279                      &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2285 2280                          return_val = TD_DBERR;
2286 2281                  else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2287 2282                          return_val = TD_NOTLS;
2288 2283                  else if (ps_pdread(ph_p,
2289 2284                      (psaddr_t)((TLS_modinfo *)
2290 2285                      tls_metadata.tls_modinfo.tls_data + moduleid),
2291 2286                      &tlsmod, sizeof (tlsmod)) != PS_OK)
2292 2287                          return_val = TD_DBERR;
2293 2288                  else if (tlsmod.tm_memsz == 0)
2294 2289                          return_val = TD_NOTLS;
2295 2290                  else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2296 2291                          *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2297 2292                  else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2298 2293                      &tls, sizeof (tls)) != PS_OK)
2299 2294                          return_val = TD_DBERR;
2300 2295                  else if (moduleid >= tls.tls_size)
2301 2296                          return_val = TD_TLSDEFER;
2302 2297                  else if (ps_pdread(ph_p,
2303 2298                      (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2304 2299                      &tls, sizeof (tls)) != PS_OK)
2305 2300                          return_val = TD_DBERR;
2306 2301                  else if (tls.tls_size == 0)
2307 2302                          return_val = TD_TLSDEFER;
2308 2303                  else
2309 2304                          *base = (psaddr_t)tls.tls_data;
2310 2305          } else {
2311 2306  #if defined(_LP64) && defined(_SYSCALL32)
2312 2307                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2313 2308                  tls_metadata32_t tls_metadata;
2314 2309                  TLS_modinfo32 tlsmod;
2315 2310                  tls32_t tls;
2316 2311  
2317 2312                  if (ps_pdread(ph_p,
2318 2313                      ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2319 2314                      &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2320 2315                          return_val = TD_DBERR;
2321 2316                  else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2322 2317                          return_val = TD_NOTLS;
2323 2318                  else if (ps_pdread(ph_p,
2324 2319                      (psaddr_t)((TLS_modinfo32 *)
2325 2320                      (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2326 2321                      &tlsmod, sizeof (tlsmod)) != PS_OK)
2327 2322                          return_val = TD_DBERR;
2328 2323                  else if (tlsmod.tm_memsz == 0)
2329 2324                          return_val = TD_NOTLS;
2330 2325                  else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2331 2326                          *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2332 2327                  else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2333 2328                      &tls, sizeof (tls)) != PS_OK)
2334 2329                          return_val = TD_DBERR;
2335 2330                  else if (moduleid >= tls.tls_size)
2336 2331                          return_val = TD_TLSDEFER;
2337 2332                  else if (ps_pdread(ph_p,
2338 2333                      (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2339 2334                      &tls, sizeof (tls)) != PS_OK)
2340 2335                          return_val = TD_DBERR;
2341 2336                  else if (tls.tls_size == 0)
2342 2337                          return_val = TD_TLSDEFER;
2343 2338                  else
2344 2339                          *base = (psaddr_t)tls.tls_data;
2345 2340  #else
2346 2341                  return_val = TD_ERR;
2347 2342  #endif  /* _SYSCALL32 */
2348 2343          }
2349 2344  
2350 2345          (void) ps_pcontinue(ph_p);
2351 2346          ph_unlock(ta_p);
2352 2347          return (return_val);
2353 2348  }
2354 2349  
2355 2350  /*
2356 2351   * Change a thread's priority to the value specified by ti_pri.
2357 2352   * Currently unused by dbx.
2358 2353   */
2359 2354  #pragma weak td_thr_setprio = __td_thr_setprio
2360 2355  /* ARGSUSED */
2361 2356  td_err_e
2362 2357  __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2363 2358  {
2364 2359          return (TD_NOCAPAB);
2365 2360  }
2366 2361  
2367 2362  /*
2368 2363   * This structure links td_thr_lockowner and the lowner_cb callback function.
2369 2364   */
2370 2365  typedef struct {
2371 2366          td_sync_iter_f  *owner_cb;
2372 2367          void            *owner_cb_arg;
2373 2368          td_thrhandle_t  *th_p;
2374 2369  } lowner_cb_ctl_t;
2375 2370  
2376 2371  static int
2377 2372  lowner_cb(const td_synchandle_t *sh_p, void *arg)
2378 2373  {
2379 2374          lowner_cb_ctl_t *ocb = arg;
2380 2375          int trunc = 0;
2381 2376          union {
2382 2377                  rwlock_t rwl;
2383 2378                  mutex_t mx;
2384 2379          } rw_m;
2385 2380  
2386 2381          if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2387 2382              &rw_m, sizeof (rw_m)) != PS_OK) {
2388 2383                  trunc = 1;
2389 2384                  if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2390 2385                      &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2391 2386                          return (0);
2392 2387          }
2393 2388          if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2394 2389              rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2395 2390                  return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2396 2391          if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2397 2392                  mutex_t *rwlock = &rw_m.rwl.mutex;
2398 2393                  if (rwlock->mutex_owner == ocb->th_p->th_unique)
2399 2394                          return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2400 2395          }
2401 2396          return (0);
2402 2397  }
2403 2398  
2404 2399  /*
2405 2400   * Iterate over the set of locks owned by a specified thread.
2406 2401   * If cb returns a non-zero value, terminate iterations.
2407 2402   */
2408 2403  #pragma weak td_thr_lockowner = __td_thr_lockowner
2409 2404  td_err_e
2410 2405  __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2411 2406      void *cb_data)
2412 2407  {
2413 2408          td_thragent_t   *ta_p;
2414 2409          td_err_e        return_val;
2415 2410          lowner_cb_ctl_t lcb;
2416 2411  
2417 2412          /*
2418 2413           * Just sanity checks.
2419 2414           */
2420 2415          if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2421 2416                  return (return_val);
2422 2417          ta_p = th_p->th_ta_p;
2423 2418          ph_unlock(ta_p);
2424 2419  
2425 2420          lcb.owner_cb = cb;
2426 2421          lcb.owner_cb_arg = cb_data;
2427 2422          lcb.th_p = (td_thrhandle_t *)th_p;
2428 2423          return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2429 2424  }
2430 2425  
2431 2426  /*
2432 2427   * If a thread is asleep on a synchronization variable,
2433 2428   * then get the synchronization handle.
2434 2429   */
2435 2430  #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2436 2431  td_err_e
2437 2432  __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2438 2433  {
2439 2434          struct ps_prochandle *ph_p;
2440 2435          td_err_e        return_val = TD_OK;
2441 2436          uintptr_t       wchan;
2442 2437  
2443 2438          if (sh_p == NULL)
2444 2439                  return (TD_ERR);
2445 2440          if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2446 2441                  return (return_val);
2447 2442  
2448 2443          /*
2449 2444           * No need to stop the process for a simple read.
2450 2445           */
2451 2446          if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2452 2447                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2453 2448  
2454 2449                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2455 2450                      &wchan, sizeof (wchan)) != PS_OK)
2456 2451                          return_val = TD_DBERR;
2457 2452          } else {
2458 2453  #if defined(_LP64) && defined(_SYSCALL32)
2459 2454                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2460 2455                  caddr32_t wchan32;
2461 2456  
2462 2457                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2463 2458                      &wchan32, sizeof (wchan32)) != PS_OK)
2464 2459                          return_val = TD_DBERR;
2465 2460                  wchan = wchan32;
2466 2461  #else
2467 2462                  return_val = TD_ERR;
2468 2463  #endif  /* _SYSCALL32 */
2469 2464          }
2470 2465  
2471 2466          if (return_val != TD_OK || wchan == 0) {
2472 2467                  sh_p->sh_ta_p = NULL;
2473 2468                  sh_p->sh_unique = 0;
2474 2469                  if (return_val == TD_OK)
2475 2470                          return_val = TD_ERR;
2476 2471          } else {
2477 2472                  sh_p->sh_ta_p = th_p->th_ta_p;
2478 2473                  sh_p->sh_unique = (psaddr_t)wchan;
2479 2474          }
2480 2475  
2481 2476          ph_unlock(th_p->th_ta_p);
2482 2477          return (return_val);
2483 2478  }
2484 2479  
2485 2480  /*
2486 2481   * Which thread is running on an lwp?
2487 2482   */
2488 2483  #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2489 2484  td_err_e
2490 2485  __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2491 2486      td_thrhandle_t *th_p)
2492 2487  {
2493 2488          return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2494 2489  }
2495 2490  
2496 2491  /*
2497 2492   * Common code for td_sync_get_info() and td_sync_get_stats()
2498 2493   */
2499 2494  static td_err_e
2500 2495  sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2501 2496      td_syncinfo_t *si_p)
2502 2497  {
2503 2498          int trunc = 0;
2504 2499          td_so_un_t generic_so;
2505 2500  
2506 2501          /*
2507 2502           * Determine the sync. object type; a little type fudgery here.
2508 2503           * First attempt to read the whole union.  If that fails, attempt
2509 2504           * to read just the condvar.  A condvar is the smallest sync. object.
2510 2505           */
2511 2506          if (ps_pdread(ph_p, sh_p->sh_unique,
2512 2507              &generic_so, sizeof (generic_so)) != PS_OK) {
2513 2508                  trunc = 1;
2514 2509                  if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2515 2510                      sizeof (generic_so.condition)) != PS_OK)
2516 2511                          return (TD_DBERR);
2517 2512          }
2518 2513  
2519 2514          switch (generic_so.condition.cond_magic) {
2520 2515          case MUTEX_MAGIC:
2521 2516                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2522 2517                      &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2523 2518                          return (TD_DBERR);
2524 2519                  si_p->si_type = TD_SYNC_MUTEX;
2525 2520                  si_p->si_shared_type =
2526 2521                      (generic_so.lock.mutex_type & USYNC_PROCESS);
2527 2522                  (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2528 2523                      sizeof (generic_so.lock.mutex_flag));
2529 2524                  si_p->si_state.mutex_locked =
2530 2525                      (generic_so.lock.mutex_lockw != 0);
2531 2526                  si_p->si_size = sizeof (generic_so.lock);
2532 2527                  si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2533 2528                  si_p->si_rcount = generic_so.lock.mutex_rcount;
2534 2529                  si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2535 2530                  if (si_p->si_state.mutex_locked) {
2536 2531                          if (si_p->si_shared_type & USYNC_PROCESS)
2537 2532                                  si_p->si_ownerpid =
2538 2533                                      generic_so.lock.mutex_ownerpid;
2539 2534                          si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2540 2535                          si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2541 2536                  }
2542 2537                  break;
2543 2538          case COND_MAGIC:
2544 2539                  si_p->si_type = TD_SYNC_COND;
2545 2540                  si_p->si_shared_type =
2546 2541                      (generic_so.condition.cond_type & USYNC_PROCESS);
2547 2542                  (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2548 2543                      sizeof (generic_so.condition.flags.flag));
2549 2544                  si_p->si_size = sizeof (generic_so.condition);
2550 2545                  si_p->si_has_waiters =
2551 2546                      (generic_so.condition.cond_waiters_user |
2552 2547                      generic_so.condition.cond_waiters_kernel)? 1 : 0;
2553 2548                  break;
2554 2549          case SEMA_MAGIC:
2555 2550                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2556 2551                      &generic_so.semaphore, sizeof (generic_so.semaphore))
2557 2552                      != PS_OK)
2558 2553                          return (TD_DBERR);
2559 2554                  si_p->si_type = TD_SYNC_SEMA;
2560 2555                  si_p->si_shared_type =
2561 2556                      (generic_so.semaphore.type & USYNC_PROCESS);
2562 2557                  si_p->si_state.sem_count = generic_so.semaphore.count;
2563 2558                  si_p->si_size = sizeof (generic_so.semaphore);
2564 2559                  si_p->si_has_waiters =
2565 2560                      ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2566 2561                  /* this is useless but the old interface provided it */
2567 2562                  si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2568 2563                  break;
2569 2564          case RWL_MAGIC:
2570 2565          {
2571 2566                  uint32_t rwstate;
2572 2567  
2573 2568                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2574 2569                      &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2575 2570                          return (TD_DBERR);
2576 2571                  si_p->si_type = TD_SYNC_RWLOCK;
2577 2572                  si_p->si_shared_type =
2578 2573                      (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2579 2574                  si_p->si_size = sizeof (generic_so.rwlock);
2580 2575  
2581 2576                  rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2582 2577                  if (rwstate & URW_WRITE_LOCKED) {
2583 2578                          si_p->si_state.nreaders = -1;
2584 2579                          si_p->si_is_wlock = 1;
2585 2580                          si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2586 2581                          si_p->si_owner.th_unique =
2587 2582                              generic_so.rwlock.rwlock_owner;
2588 2583                          if (si_p->si_shared_type & USYNC_PROCESS)
2589 2584                                  si_p->si_ownerpid =
2590 2585                                      generic_so.rwlock.rwlock_ownerpid;
2591 2586                  } else {
2592 2587                          si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2593 2588                  }
2594 2589                  si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2595 2590  
2596 2591                  /* this is useless but the old interface provided it */
2597 2592                  si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2598 2593                  break;
2599 2594          }
2600 2595          default:
2601 2596                  return (TD_BADSH);
2602 2597          }
2603 2598  
2604 2599          si_p->si_ta_p = sh_p->sh_ta_p;
2605 2600          si_p->si_sv_addr = sh_p->sh_unique;
2606 2601          return (TD_OK);
2607 2602  }
2608 2603  
2609 2604  /*
2610 2605   * Given a synchronization handle, fill in the
2611 2606   * information for the synchronization variable into *si_p.
2612 2607   */
2613 2608  #pragma weak td_sync_get_info = __td_sync_get_info
2614 2609  td_err_e
2615 2610  __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2616 2611  {
2617 2612          struct ps_prochandle *ph_p;
2618 2613          td_err_e return_val;
2619 2614  
2620 2615          if (si_p == NULL)
2621 2616                  return (TD_ERR);
2622 2617          (void) memset(si_p, 0, sizeof (*si_p));
2623 2618          if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2624 2619                  return (return_val);
2625 2620          if (ps_pstop(ph_p) != PS_OK) {
2626 2621                  ph_unlock(sh_p->sh_ta_p);
2627 2622                  return (TD_DBERR);
2628 2623          }
2629 2624  
2630 2625          return_val = sync_get_info_common(sh_p, ph_p, si_p);
2631 2626  
2632 2627          (void) ps_pcontinue(ph_p);
2633 2628          ph_unlock(sh_p->sh_ta_p);
2634 2629          return (return_val);
2635 2630  }
2636 2631  
2637 2632  static uint_t
2638 2633  tdb_addr_hash64(uint64_t addr)
2639 2634  {
2640 2635          uint64_t value60 = (addr >> 4);
2641 2636          uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2642 2637          return ((value30 >> 15) ^ (value30 & 0x7fff));
2643 2638  }
2644 2639  
2645 2640  static uint_t
2646 2641  tdb_addr_hash32(uint64_t addr)
2647 2642  {
2648 2643          uint32_t value30 = (addr >> 2);         /* 30 bits */
2649 2644          return ((value30 >> 15) ^ (value30 & 0x7fff));
2650 2645  }
2651 2646  
2652 2647  static td_err_e
2653 2648  read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2654 2649      psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2655 2650  {
2656 2651          psaddr_t next_desc;
2657 2652          uint64_t first;
2658 2653          uint_t ix;
2659 2654  
2660 2655          /*
2661 2656           * Compute the hash table index from the synch object's address.
2662 2657           */
2663 2658          if (ta_p->model == PR_MODEL_LP64)
2664 2659                  ix = tdb_addr_hash64(sync_obj_addr);
2665 2660          else
2666 2661                  ix = tdb_addr_hash32(sync_obj_addr);
2667 2662  
2668 2663          /*
2669 2664           * Get the address of the first element in the linked list.
2670 2665           */
2671 2666          if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2672 2667              &first, sizeof (first)) != PS_OK)
2673 2668                  return (TD_DBERR);
2674 2669  
2675 2670          /*
2676 2671           * Search the linked list for an entry for the synch object..
2677 2672           */
2678 2673          for (next_desc = (psaddr_t)first; next_desc != 0;
2679 2674              next_desc = (psaddr_t)sync_stats->next) {
2680 2675                  if (ps_pdread(ta_p->ph_p, next_desc,
2681 2676                      sync_stats, sizeof (*sync_stats)) != PS_OK)
2682 2677                          return (TD_DBERR);
2683 2678                  if (sync_stats->sync_addr == sync_obj_addr)
2684 2679                          return (TD_OK);
2685 2680          }
2686 2681  
2687 2682          (void) memset(sync_stats, 0, sizeof (*sync_stats));
2688 2683          return (TD_OK);
2689 2684  }
2690 2685  
2691 2686  /*
2692 2687   * Given a synchronization handle, fill in the
2693 2688   * statistics for the synchronization variable into *ss_p.
2694 2689   */
2695 2690  #pragma weak td_sync_get_stats = __td_sync_get_stats
2696 2691  td_err_e
2697 2692  __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2698 2693  {
2699 2694          struct ps_prochandle *ph_p;
2700 2695          td_thragent_t *ta_p;
2701 2696          td_err_e return_val;
2702 2697          register_sync_t enable;
2703 2698          psaddr_t hashaddr;
2704 2699          tdb_sync_stats_t sync_stats;
2705 2700          size_t ix;
2706 2701  
2707 2702          if (ss_p == NULL)
2708 2703                  return (TD_ERR);
2709 2704          (void) memset(ss_p, 0, sizeof (*ss_p));
2710 2705          if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2711 2706                  return (return_val);
2712 2707          ta_p = sh_p->sh_ta_p;
2713 2708          if (ps_pstop(ph_p) != PS_OK) {
2714 2709                  ph_unlock(ta_p);
2715 2710                  return (TD_DBERR);
2716 2711          }
2717 2712  
2718 2713          if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2719 2714              != TD_OK) {
2720 2715                  if (return_val != TD_BADSH)
2721 2716                          goto out;
2722 2717                  /* we can correct TD_BADSH */
2723 2718                  (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2724 2719                  ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2725 2720                  ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2726 2721                  /* we correct si_type and si_size below */
2727 2722                  return_val = TD_OK;
2728 2723          }
2729 2724          if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2730 2725              &enable, sizeof (enable)) != PS_OK) {
2731 2726                  return_val = TD_DBERR;
2732 2727                  goto out;
2733 2728          }
2734 2729          if (enable != REGISTER_SYNC_ON)
2735 2730                  goto out;
2736 2731  
2737 2732          /*
2738 2733           * Get the address of the hash table in the target process.
2739 2734           */
2740 2735          if (ta_p->model == PR_MODEL_NATIVE) {
2741 2736                  if (ps_pdread(ph_p, ta_p->uberdata_addr +
2742 2737                      offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2743 2738                      &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2744 2739                          return_val = TD_DBERR;
2745 2740                          goto out;
2746 2741                  }
2747 2742          } else {
2748 2743  #if defined(_LP64) && defined(_SYSCALL32)
2749 2744                  caddr32_t addr;
2750 2745  
2751 2746                  if (ps_pdread(ph_p, ta_p->uberdata_addr +
2752 2747                      offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2753 2748                      &addr, sizeof (addr)) != PS_OK) {
2754 2749                          return_val = TD_DBERR;
2755 2750                          goto out;
2756 2751                  }
2757 2752                  hashaddr = addr;
2758 2753  #else
2759 2754                  return_val = TD_ERR;
2760 2755                  goto out;
2761 2756  #endif  /* _SYSCALL32 */
2762 2757          }
2763 2758  
2764 2759          if (hashaddr == 0)
2765 2760                  return_val = TD_BADSH;
2766 2761          else
2767 2762                  return_val = read_sync_stats(ta_p, hashaddr,
2768 2763                      sh_p->sh_unique, &sync_stats);
2769 2764          if (return_val != TD_OK)
2770 2765                  goto out;
2771 2766  
2772 2767          /*
2773 2768           * We have the hash table entry.  Transfer the data to
2774 2769           * the td_syncstats_t structure provided by the caller.
2775 2770           */
2776 2771          switch (sync_stats.un.type) {
2777 2772          case TDB_MUTEX:
2778 2773          {
2779 2774                  td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2780 2775  
2781 2776                  ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2782 2777                  ss_p->ss_info.si_size = sizeof (mutex_t);
2783 2778                  msp->mutex_lock =
2784 2779                      sync_stats.un.mutex.mutex_lock;
2785 2780                  msp->mutex_sleep =
2786 2781                      sync_stats.un.mutex.mutex_sleep;
2787 2782                  msp->mutex_sleep_time =
2788 2783                      sync_stats.un.mutex.mutex_sleep_time;
2789 2784                  msp->mutex_hold_time =
2790 2785                      sync_stats.un.mutex.mutex_hold_time;
2791 2786                  msp->mutex_try =
2792 2787                      sync_stats.un.mutex.mutex_try;
2793 2788                  msp->mutex_try_fail =
2794 2789                      sync_stats.un.mutex.mutex_try_fail;
2795 2790                  if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2796 2791                      (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2797 2792                      < ta_p->hash_size * sizeof (thr_hash_table_t))
2798 2793                          msp->mutex_internal =
2799 2794                              ix / sizeof (thr_hash_table_t) + 1;
2800 2795                  break;
2801 2796          }
2802 2797          case TDB_COND:
2803 2798          {
2804 2799                  td_cond_stats_t *csp = &ss_p->ss_un.cond;
2805 2800  
2806 2801                  ss_p->ss_info.si_type = TD_SYNC_COND;
2807 2802                  ss_p->ss_info.si_size = sizeof (cond_t);
2808 2803                  csp->cond_wait =
2809 2804                      sync_stats.un.cond.cond_wait;
2810 2805                  csp->cond_timedwait =
2811 2806                      sync_stats.un.cond.cond_timedwait;
2812 2807                  csp->cond_wait_sleep_time =
2813 2808                      sync_stats.un.cond.cond_wait_sleep_time;
2814 2809                  csp->cond_timedwait_sleep_time =
2815 2810                      sync_stats.un.cond.cond_timedwait_sleep_time;
2816 2811                  csp->cond_timedwait_timeout =
2817 2812                      sync_stats.un.cond.cond_timedwait_timeout;
2818 2813                  csp->cond_signal =
2819 2814                      sync_stats.un.cond.cond_signal;
2820 2815                  csp->cond_broadcast =
2821 2816                      sync_stats.un.cond.cond_broadcast;
2822 2817                  if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2823 2818                      (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2824 2819                      < ta_p->hash_size * sizeof (thr_hash_table_t))
2825 2820                          csp->cond_internal =
2826 2821                              ix / sizeof (thr_hash_table_t) + 1;
2827 2822                  break;
2828 2823          }
2829 2824          case TDB_RWLOCK:
2830 2825          {
2831 2826                  td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2832 2827  
2833 2828                  ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2834 2829                  ss_p->ss_info.si_size = sizeof (rwlock_t);
2835 2830                  rwsp->rw_rdlock =
2836 2831                      sync_stats.un.rwlock.rw_rdlock;
2837 2832                  rwsp->rw_rdlock_try =
2838 2833                      sync_stats.un.rwlock.rw_rdlock_try;
2839 2834                  rwsp->rw_rdlock_try_fail =
2840 2835                      sync_stats.un.rwlock.rw_rdlock_try_fail;
2841 2836                  rwsp->rw_wrlock =
2842 2837                      sync_stats.un.rwlock.rw_wrlock;
2843 2838                  rwsp->rw_wrlock_hold_time =
2844 2839                      sync_stats.un.rwlock.rw_wrlock_hold_time;
2845 2840                  rwsp->rw_wrlock_try =
2846 2841                      sync_stats.un.rwlock.rw_wrlock_try;
2847 2842                  rwsp->rw_wrlock_try_fail =
2848 2843                      sync_stats.un.rwlock.rw_wrlock_try_fail;
2849 2844                  break;
2850 2845          }
2851 2846          case TDB_SEMA:
2852 2847          {
2853 2848                  td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2854 2849  
2855 2850                  ss_p->ss_info.si_type = TD_SYNC_SEMA;
2856 2851                  ss_p->ss_info.si_size = sizeof (sema_t);
2857 2852                  ssp->sema_wait =
2858 2853                      sync_stats.un.sema.sema_wait;
2859 2854                  ssp->sema_wait_sleep =
2860 2855                      sync_stats.un.sema.sema_wait_sleep;
2861 2856                  ssp->sema_wait_sleep_time =
2862 2857                      sync_stats.un.sema.sema_wait_sleep_time;
2863 2858                  ssp->sema_trywait =
2864 2859                      sync_stats.un.sema.sema_trywait;
2865 2860                  ssp->sema_trywait_fail =
2866 2861                      sync_stats.un.sema.sema_trywait_fail;
2867 2862                  ssp->sema_post =
2868 2863                      sync_stats.un.sema.sema_post;
2869 2864                  ssp->sema_max_count =
2870 2865                      sync_stats.un.sema.sema_max_count;
2871 2866                  ssp->sema_min_count =
2872 2867                      sync_stats.un.sema.sema_min_count;
2873 2868                  break;
2874 2869          }
2875 2870          default:
2876 2871                  return_val = TD_BADSH;
2877 2872                  break;
2878 2873          }
2879 2874  
2880 2875  out:
2881 2876          (void) ps_pcontinue(ph_p);
2882 2877          ph_unlock(ta_p);
2883 2878          return (return_val);
2884 2879  }
2885 2880  
2886 2881  /*
2887 2882   * Change the state of a synchronization variable.
2888 2883   *      1) mutex lock state set to value
2889 2884   *      2) semaphore's count set to value
2890 2885   *      3) writer's lock set by value < 0
2891 2886   *      4) reader's lock number of readers set to value >= 0
2892 2887   * Currently unused by dbx.
2893 2888   */
2894 2889  #pragma weak td_sync_setstate = __td_sync_setstate
2895 2890  td_err_e
2896 2891  __td_sync_setstate(const td_synchandle_t *sh_p, int value)
2897 2892  {
2898 2893          struct ps_prochandle *ph_p;
2899 2894          int             trunc = 0;
2900 2895          td_err_e        return_val;
2901 2896          td_so_un_t      generic_so;
2902 2897          uint32_t        *rwstate;
2903 2898  
2904 2899          if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2905 2900                  return (return_val);
2906 2901          if (ps_pstop(ph_p) != PS_OK) {
2907 2902                  ph_unlock(sh_p->sh_ta_p);
2908 2903                  return (TD_DBERR);
2909 2904          }
2910 2905  
2911 2906          /*
2912 2907           * Read the synch. variable information.
2913 2908           * First attempt to read the whole union and if that fails
2914 2909           * fall back to reading only the smallest member, the condvar.
2915 2910           */
2916 2911          if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2917 2912              sizeof (generic_so)) != PS_OK) {
2918 2913                  trunc = 1;
2919 2914                  if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2920 2915                      sizeof (generic_so.condition)) != PS_OK) {
2921 2916                          (void) ps_pcontinue(ph_p);
2922 2917                          ph_unlock(sh_p->sh_ta_p);
2923 2918                          return (TD_DBERR);
2924 2919                  }
2925 2920          }
2926 2921  
2927 2922          /*
2928 2923           * Set the new value in the sync. variable, read the synch. variable
2929 2924           * information. from the process, reset its value and write it back.
2930 2925           */
2931 2926          switch (generic_so.condition.mutex_magic) {
2932 2927          case MUTEX_MAGIC:
2933 2928                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2934 2929                      &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2935 2930                          return_val = TD_DBERR;
2936 2931                          break;
2937 2932                  }
2938 2933                  generic_so.lock.mutex_lockw = (uint8_t)value;
2939 2934                  if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2940 2935                      sizeof (generic_so.lock)) != PS_OK)
2941 2936                          return_val = TD_DBERR;
2942 2937                  break;
2943 2938          case SEMA_MAGIC:
2944 2939                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2945 2940                      &generic_so.semaphore, sizeof (generic_so.semaphore))
2946 2941                      != PS_OK) {
2947 2942                          return_val = TD_DBERR;
2948 2943                          break;
2949 2944                  }
2950 2945                  generic_so.semaphore.count = value;
2951 2946                  if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2952 2947                      sizeof (generic_so.semaphore)) != PS_OK)
2953 2948                          return_val = TD_DBERR;
2954 2949                  break;
2955 2950          case COND_MAGIC:
2956 2951                  /* Operation not supported on a condition variable */
2957 2952                  return_val = TD_ERR;
2958 2953                  break;
2959 2954          case RWL_MAGIC:
2960 2955                  if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2961 2956                      &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2962 2957                          return_val = TD_DBERR;
2963 2958                          break;
2964 2959                  }
2965 2960                  rwstate = (uint32_t *)&generic_so.rwlock.readers;
2966 2961                  *rwstate &= URW_HAS_WAITERS;
2967 2962                  if (value < 0)
2968 2963                          *rwstate |= URW_WRITE_LOCKED;
2969 2964                  else
2970 2965                          *rwstate |= (value & URW_READERS_MASK);
2971 2966                  if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2972 2967                      sizeof (generic_so.rwlock)) != PS_OK)
2973 2968                          return_val = TD_DBERR;
2974 2969                  break;
2975 2970          default:
2976 2971                  /* Bad sync. object type */
2977 2972                  return_val = TD_BADSH;
2978 2973                  break;
2979 2974          }
2980 2975  
2981 2976          (void) ps_pcontinue(ph_p);
2982 2977          ph_unlock(sh_p->sh_ta_p);
2983 2978          return (return_val);
2984 2979  }
2985 2980  
2986 2981  typedef struct {
2987 2982          td_thr_iter_f   *waiter_cb;
2988 2983          psaddr_t        sync_obj_addr;
2989 2984          uint16_t        sync_magic;
2990 2985          void            *waiter_cb_arg;
2991 2986          td_err_e        errcode;
2992 2987  } waiter_cb_ctl_t;
2993 2988  
2994 2989  static int
2995 2990  waiters_cb(const td_thrhandle_t *th_p, void *arg)
2996 2991  {
2997 2992          td_thragent_t   *ta_p = th_p->th_ta_p;
2998 2993          struct ps_prochandle *ph_p = ta_p->ph_p;
2999 2994          waiter_cb_ctl_t *wcb = arg;
3000 2995          caddr_t         wchan;
3001 2996  
3002 2997          if (ta_p->model == PR_MODEL_NATIVE) {
3003 2998                  ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
3004 2999  
3005 3000                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3006 3001                      &wchan, sizeof (wchan)) != PS_OK) {
3007 3002                          wcb->errcode = TD_DBERR;
3008 3003                          return (1);
3009 3004                  }
3010 3005          } else {
3011 3006  #if defined(_LP64) && defined(_SYSCALL32)
3012 3007                  ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3013 3008                  caddr32_t wchan32;
3014 3009  
3015 3010                  if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3016 3011                      &wchan32, sizeof (wchan32)) != PS_OK) {
3017 3012                          wcb->errcode = TD_DBERR;
3018 3013                          return (1);
3019 3014                  }
3020 3015                  wchan = (caddr_t)(uintptr_t)wchan32;
3021 3016  #else
3022 3017                  wcb->errcode = TD_ERR;
3023 3018                  return (1);
3024 3019  #endif  /* _SYSCALL32 */
3025 3020          }
3026 3021  
3027 3022          if (wchan == NULL)
3028 3023                  return (0);
3029 3024  
3030 3025          if (wchan == (caddr_t)wcb->sync_obj_addr)
3031 3026                  return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3032 3027  
3033 3028          return (0);
3034 3029  }
3035 3030  
3036 3031  /*
3037 3032   * For a given synchronization variable, iterate over the
3038 3033   * set of waiting threads.  The call back function is passed
3039 3034   * two parameters, a pointer to a thread handle and a pointer
3040 3035   * to extra call back data.
3041 3036   */
3042 3037  #pragma weak td_sync_waiters = __td_sync_waiters
3043 3038  td_err_e
3044 3039  __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3045 3040  {
3046 3041          struct ps_prochandle *ph_p;
3047 3042          waiter_cb_ctl_t wcb;
3048 3043          td_err_e        return_val;
3049 3044  
3050 3045          if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3051 3046                  return (return_val);
3052 3047          if (ps_pdread(ph_p,
3053 3048              (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3054 3049              (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3055 3050                  ph_unlock(sh_p->sh_ta_p);
3056 3051                  return (TD_DBERR);
3057 3052          }
3058 3053          ph_unlock(sh_p->sh_ta_p);
3059 3054  
3060 3055          switch (wcb.sync_magic) {
3061 3056          case MUTEX_MAGIC:
3062 3057          case COND_MAGIC:
3063 3058          case SEMA_MAGIC:
3064 3059          case RWL_MAGIC:
3065 3060                  break;
3066 3061          default:
3067 3062                  return (TD_BADSH);
3068 3063          }
3069 3064  
3070 3065          wcb.waiter_cb = cb;
3071 3066          wcb.sync_obj_addr = sh_p->sh_unique;
3072 3067          wcb.waiter_cb_arg = cb_data;
3073 3068          wcb.errcode = TD_OK;
3074 3069          return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3075 3070              TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3076 3071              TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3077 3072  
3078 3073          if (return_val != TD_OK)
3079 3074                  return (return_val);
3080 3075  
3081 3076          return (wcb.errcode);
3082 3077  }
  
    | 
      ↓ open down ↓ | 
    1001 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX