1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 #include <sys/note.h>
  27 #include <sys/types.h>
  28 #include <sys/param.h>
  29 #include <sys/systm.h>
  30 #include <sys/buf.h>
  31 #include <sys/uio.h>
  32 #include <sys/cred.h>
  33 #include <sys/poll.h>
  34 #include <sys/mman.h>
  35 #include <sys/kmem.h>
  36 #include <sys/model.h>
  37 #include <sys/file.h>
  38 #include <sys/proc.h>
  39 #include <sys/open.h>
  40 #include <sys/user.h>
  41 #include <sys/t_lock.h>
  42 #include <sys/vm.h>
  43 #include <sys/stat.h>
  44 #include <vm/hat.h>
  45 #include <vm/seg.h>
  46 #include <vm/seg_vn.h>
  47 #include <vm/seg_dev.h>
  48 #include <vm/as.h>
  49 #include <sys/cmn_err.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/debug.h>
  52 #include <sys/autoconf.h>
  53 #include <sys/sunddi.h>
  54 #include <sys/esunddi.h>
  55 #include <sys/sunndi.h>
  56 #include <sys/kstat.h>
  57 #include <sys/conf.h>
  58 #include <sys/ddi_impldefs.h>     /* include implementation structure defs */
  59 #include <sys/ndi_impldefs.h>     /* include prototypes */
  60 #include <sys/ddi_timer.h>
  61 #include <sys/hwconf.h>
  62 #include <sys/pathname.h>
  63 #include <sys/modctl.h>
  64 #include <sys/epm.h>
  65 #include <sys/devctl.h>
  66 #include <sys/callb.h>
  67 #include <sys/cladm.h>
  68 #include <sys/sysevent.h>
  69 #include <sys/dacf_impl.h>
  70 #include <sys/ddidevmap.h>
  71 #include <sys/bootconf.h>
  72 #include <sys/disp.h>
  73 #include <sys/atomic.h>
  74 #include <sys/promif.h>
  75 #include <sys/instance.h>
  76 #include <sys/sysevent/eventdefs.h>
  77 #include <sys/task.h>
  78 #include <sys/project.h>
  79 #include <sys/taskq.h>
  80 #include <sys/devpolicy.h>
  81 #include <sys/ctype.h>
  82 #include <net/if.h>
  83 #include <sys/rctl.h>
  84 #include <sys/zone.h>
  85 #include <sys/clock_impl.h>
  86 #include <sys/ddi.h>
  87 #include <sys/modhash.h>
  88 #include <sys/sunldi_impl.h>
  89 #include <sys/fs/dv_node.h>
  90 #include <sys/fs/snode.h>
  91 
  92 extern  pri_t   minclsyspri;
  93 
  94 extern  rctl_hndl_t rc_project_locked_mem;
  95 extern  rctl_hndl_t rc_zone_locked_mem;
  96 
  97 #ifdef DEBUG
  98 static int sunddi_debug = 0;
  99 #endif /* DEBUG */
 100 
 101 /* ddi_umem_unlock miscellaneous */
 102 
 103 static  void    i_ddi_umem_unlock_thread_start(void);
 104 
 105 static  kmutex_t        ddi_umem_unlock_mutex; /* unlock list mutex */
 106 static  kcondvar_t      ddi_umem_unlock_cv; /* unlock list block/unblock */
 107 static  kthread_t       *ddi_umem_unlock_thread;
 108 /*
 109  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
 110  */
 111 static  struct  ddi_umem_cookie *ddi_umem_unlock_head = NULL;
 112 static  struct  ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
 113 
 114 /*
 115  * DDI(Sun) Function and flag definitions:
 116  */
 117 
 118 #if defined(__x86)
 119 /*
 120  * Used to indicate which entries were chosen from a range.
 121  */
 122 char    *chosen_reg = "chosen-reg";
 123 #endif
 124 
 125 /*
 126  * Function used to ring system console bell
 127  */
 128 void (*ddi_console_bell_func)(clock_t duration);
 129 
 130 /*
 131  * Creating register mappings and handling interrupts:
 132  */
 133 
 134 /*
 135  * Generic ddi_map: Call parent to fulfill request...
 136  */
 137 
 138 int
 139 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
 140     off_t len, caddr_t *addrp)
 141 {
 142         dev_info_t *pdip;
 143 
 144         ASSERT(dp);
 145         pdip = (dev_info_t *)DEVI(dp)->devi_parent;
 146         return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
 147             dp, mp, offset, len, addrp));
 148 }
 149 
 150 /*
 151  * ddi_apply_range: (Called by nexi only.)
 152  * Apply ranges in parent node dp, to child regspec rp...
 153  */
 154 
 155 int
 156 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
 157 {
 158         return (i_ddi_apply_range(dp, rdip, rp));
 159 }
 160 
 161 int
 162 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
 163     off_t len)
 164 {
 165         ddi_map_req_t mr;
 166 #if defined(__x86)
 167         struct {
 168                 int     bus;
 169                 int     addr;
 170                 int     size;
 171         } reg, *reglist;
 172         uint_t  length;
 173         int     rc;
 174 
 175         /*
 176          * get the 'registers' or the 'reg' property.
 177          * We look up the reg property as an array of
 178          * int's.
 179          */
 180         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
 181             DDI_PROP_DONTPASS, "registers", (int **)®list, &length);
 182         if (rc != DDI_PROP_SUCCESS)
 183                 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
 184                     DDI_PROP_DONTPASS, "reg", (int **)®list, &length);
 185         if (rc == DDI_PROP_SUCCESS) {
 186                 /*
 187                  * point to the required entry.
 188                  */
 189                 reg = reglist[rnumber];
 190                 reg.addr += offset;
 191                 if (len != 0)
 192                         reg.size = len;
 193                 /*
 194                  * make a new property containing ONLY the required tuple.
 195                  */
 196                 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
 197                     chosen_reg, (int *)®, (sizeof (reg)/sizeof (int)))
 198                     != DDI_PROP_SUCCESS) {
 199                         cmn_err(CE_WARN, "%s%d: cannot create '%s' "
 200                             "property", DEVI(dip)->devi_name,
 201                             DEVI(dip)->devi_instance, chosen_reg);
 202                 }
 203                 /*
 204                  * free the memory allocated by
 205                  * ddi_prop_lookup_int_array ().
 206                  */
 207                 ddi_prop_free((void *)reglist);
 208         }
 209 #endif
 210         mr.map_op = DDI_MO_MAP_LOCKED;
 211         mr.map_type = DDI_MT_RNUMBER;
 212         mr.map_obj.rnumber = rnumber;
 213         mr.map_prot = PROT_READ | PROT_WRITE;
 214         mr.map_flags = DDI_MF_KERNEL_MAPPING;
 215         mr.map_handlep = NULL;
 216         mr.map_vers = DDI_MAP_VERSION;
 217 
 218         /*
 219          * Call my parent to map in my regs.
 220          */
 221 
 222         return (ddi_map(dip, &mr, offset, len, kaddrp));
 223 }
 224 
 225 void
 226 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
 227     off_t len)
 228 {
 229         ddi_map_req_t mr;
 230 
 231         mr.map_op = DDI_MO_UNMAP;
 232         mr.map_type = DDI_MT_RNUMBER;
 233         mr.map_flags = DDI_MF_KERNEL_MAPPING;
 234         mr.map_prot = PROT_READ | PROT_WRITE;   /* who cares? */
 235         mr.map_obj.rnumber = rnumber;
 236         mr.map_handlep = NULL;
 237         mr.map_vers = DDI_MAP_VERSION;
 238 
 239         /*
 240          * Call my parent to unmap my regs.
 241          */
 242 
 243         (void) ddi_map(dip, &mr, offset, len, kaddrp);
 244         *kaddrp = (caddr_t)0;
 245 #if defined(__x86)
 246         (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
 247 #endif
 248 }
 249 
 250 int
 251 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
 252         off_t offset, off_t len, caddr_t *vaddrp)
 253 {
 254         return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
 255 }
 256 
 257 /*
 258  * nullbusmap:  The/DDI default bus_map entry point for nexi
 259  *              not conforming to the reg/range paradigm (i.e. scsi, etc.)
 260  *              with no HAT/MMU layer to be programmed at this level.
 261  *
 262  *              If the call is to map by rnumber, return an error,
 263  *              otherwise pass anything else up the tree to my parent.
 264  */
 265 int
 266 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
 267         off_t offset, off_t len, caddr_t *vaddrp)
 268 {
 269         _NOTE(ARGUNUSED(rdip))
 270         if (mp->map_type == DDI_MT_RNUMBER)
 271                 return (DDI_ME_UNSUPPORTED);
 272 
 273         return (ddi_map(dip, mp, offset, len, vaddrp));
 274 }
 275 
 276 /*
 277  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
 278  *                         Only for use by nexi using the reg/range paradigm.
 279  */
 280 struct regspec *
 281 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
 282 {
 283         return (i_ddi_rnumber_to_regspec(dip, rnumber));
 284 }
 285 
 286 
 287 /*
 288  * Note that we allow the dip to be nil because we may be called
 289  * prior even to the instantiation of the devinfo tree itself - all
 290  * regular leaf and nexus drivers should always use a non-nil dip!
 291  *
 292  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
 293  * simply get a synchronous fault as soon as we touch a missing address.
 294  *
 295  * Poke is rather more carefully handled because we might poke to a write
 296  * buffer, "succeed", then only find some time later that we got an
 297  * asynchronous fault that indicated that the address we were writing to
 298  * was not really backed by hardware.
 299  */
 300 
 301 static int
 302 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
 303     void *addr, void *value_p)
 304 {
 305         union {
 306                 uint64_t        u64;
 307                 uint32_t        u32;
 308                 uint16_t        u16;
 309                 uint8_t         u8;
 310         } peekpoke_value;
 311 
 312         peekpoke_ctlops_t peekpoke_args;
 313         uint64_t dummy_result;
 314         int rval;
 315 
 316         /* Note: size is assumed to be correct;  it is not checked. */
 317         peekpoke_args.size = size;
 318         peekpoke_args.dev_addr = (uintptr_t)addr;
 319         peekpoke_args.handle = NULL;
 320         peekpoke_args.repcount = 1;
 321         peekpoke_args.flags = 0;
 322 
 323         if (cmd == DDI_CTLOPS_POKE) {
 324                 switch (size) {
 325                 case sizeof (uint8_t):
 326                         peekpoke_value.u8 = *(uint8_t *)value_p;
 327                         break;
 328                 case sizeof (uint16_t):
 329                         peekpoke_value.u16 = *(uint16_t *)value_p;
 330                         break;
 331                 case sizeof (uint32_t):
 332                         peekpoke_value.u32 = *(uint32_t *)value_p;
 333                         break;
 334                 case sizeof (uint64_t):
 335                         peekpoke_value.u64 = *(uint64_t *)value_p;
 336                         break;
 337                 }
 338         }
 339 
 340         peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
 341 
 342         if (devi != NULL)
 343                 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
 344                     &dummy_result);
 345         else
 346                 rval = peekpoke_mem(cmd, &peekpoke_args);
 347 
 348         /*
 349          * A NULL value_p is permitted by ddi_peek(9F); discard the result.
 350          */
 351         if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
 352                 switch (size) {
 353                 case sizeof (uint8_t):
 354                         *(uint8_t *)value_p = peekpoke_value.u8;
 355                         break;
 356                 case sizeof (uint16_t):
 357                         *(uint16_t *)value_p = peekpoke_value.u16;
 358                         break;
 359                 case sizeof (uint32_t):
 360                         *(uint32_t *)value_p = peekpoke_value.u32;
 361                         break;
 362                 case sizeof (uint64_t):
 363                         *(uint64_t *)value_p = peekpoke_value.u64;
 364                         break;
 365                 }
 366         }
 367 
 368         return (rval);
 369 }
 370 
 371 /*
 372  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
 373  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
 374  */
 375 int
 376 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
 377 {
 378         switch (size) {
 379         case sizeof (uint8_t):
 380         case sizeof (uint16_t):
 381         case sizeof (uint32_t):
 382         case sizeof (uint64_t):
 383                 break;
 384         default:
 385                 return (DDI_FAILURE);
 386         }
 387 
 388         return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
 389 }
 390 
 391 int
 392 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
 393 {
 394         switch (size) {
 395         case sizeof (uint8_t):
 396         case sizeof (uint16_t):
 397         case sizeof (uint32_t):
 398         case sizeof (uint64_t):
 399                 break;
 400         default:
 401                 return (DDI_FAILURE);
 402         }
 403 
 404         return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
 405 }
 406 
 407 int
 408 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
 409 {
 410         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 411             val_p));
 412 }
 413 
 414 int
 415 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
 416 {
 417         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 418             val_p));
 419 }
 420 
 421 int
 422 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
 423 {
 424         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 425             val_p));
 426 }
 427 
 428 int
 429 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
 430 {
 431         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 432             val_p));
 433 }
 434 
 435 
 436 /*
 437  * We need to separate the old interfaces from the new ones and leave them
 438  * in here for a while. Previous versions of the OS defined the new interfaces
 439  * to the old interfaces. This way we can fix things up so that we can
 440  * eventually remove these interfaces.
 441  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
 442  * or earlier will actually have a reference to ddi_peekc in the binary.
 443  */
 444 #ifdef _ILP32
 445 int
 446 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
 447 {
 448         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 449             val_p));
 450 }
 451 
 452 int
 453 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
 454 {
 455         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 456             val_p));
 457 }
 458 
 459 int
 460 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
 461 {
 462         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 463             val_p));
 464 }
 465 
 466 int
 467 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
 468 {
 469         return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
 470             val_p));
 471 }
 472 #endif /* _ILP32 */
 473 
 474 int
 475 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
 476 {
 477         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 478 }
 479 
 480 int
 481 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
 482 {
 483         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 484 }
 485 
 486 int
 487 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
 488 {
 489         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 490 }
 491 
 492 int
 493 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
 494 {
 495         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 496 }
 497 
 498 /*
 499  * We need to separate the old interfaces from the new ones and leave them
 500  * in here for a while. Previous versions of the OS defined the new interfaces
 501  * to the old interfaces. This way we can fix things up so that we can
 502  * eventually remove these interfaces.
 503  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
 504  * or earlier will actually have a reference to ddi_pokec in the binary.
 505  */
 506 #ifdef _ILP32
 507 int
 508 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
 509 {
 510         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 511 }
 512 
 513 int
 514 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
 515 {
 516         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 517 }
 518 
 519 int
 520 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
 521 {
 522         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 523 }
 524 
 525 int
 526 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
 527 {
 528         return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
 529 }
 530 #endif /* _ILP32 */
 531 
 532 /*
 533  * ddi_peekpokeio() is used primarily by the mem drivers for moving
 534  * data to and from uio structures via peek and poke.  Note that we
 535  * use "internal" routines ddi_peek and ddi_poke to make this go
 536  * slightly faster, avoiding the call overhead ..
 537  */
 538 int
 539 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
 540     caddr_t addr, size_t len, uint_t xfersize)
 541 {
 542         int64_t ibuffer;
 543         int8_t w8;
 544         size_t sz;
 545         int o;
 546 
 547         if (xfersize > sizeof (long))
 548                 xfersize = sizeof (long);
 549 
 550         while (len != 0) {
 551                 if ((len | (uintptr_t)addr) & 1) {
 552                         sz = sizeof (int8_t);
 553                         if (rw == UIO_WRITE) {
 554                                 if ((o = uwritec(uio)) == -1)
 555                                         return (DDI_FAILURE);
 556                                 if (ddi_poke8(devi, (int8_t *)addr,
 557                                     (int8_t)o) != DDI_SUCCESS)
 558                                         return (DDI_FAILURE);
 559                         } else {
 560                                 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
 561                                     (int8_t *)addr, &w8) != DDI_SUCCESS)
 562                                         return (DDI_FAILURE);
 563                                 if (ureadc(w8, uio))
 564                                         return (DDI_FAILURE);
 565                         }
 566                 } else {
 567                         switch (xfersize) {
 568                         case sizeof (int64_t):
 569                                 if (((len | (uintptr_t)addr) &
 570                                     (sizeof (int64_t) - 1)) == 0) {
 571                                         sz = xfersize;
 572                                         break;
 573                                 }
 574                                 /*FALLTHROUGH*/
 575                         case sizeof (int32_t):
 576                                 if (((len | (uintptr_t)addr) &
 577                                     (sizeof (int32_t) - 1)) == 0) {
 578                                         sz = xfersize;
 579                                         break;
 580                                 }
 581                                 /*FALLTHROUGH*/
 582                         default:
 583                                 /*
 584                                  * This still assumes that we might have an
 585                                  * I/O bus out there that permits 16-bit
 586                                  * transfers (and that it would be upset by
 587                                  * 32-bit transfers from such locations).
 588                                  */
 589                                 sz = sizeof (int16_t);
 590                                 break;
 591                         }
 592 
 593                         if (rw == UIO_READ) {
 594                                 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
 595                                     addr, &ibuffer) != DDI_SUCCESS)
 596                                         return (DDI_FAILURE);
 597                         }
 598 
 599                         if (uiomove(&ibuffer, sz, rw, uio))
 600                                 return (DDI_FAILURE);
 601 
 602                         if (rw == UIO_WRITE) {
 603                                 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
 604                                     addr, &ibuffer) != DDI_SUCCESS)
 605                                         return (DDI_FAILURE);
 606                         }
 607                 }
 608                 addr += sz;
 609                 len -= sz;
 610         }
 611         return (DDI_SUCCESS);
 612 }
 613 
 614 /*
 615  * These routines are used by drivers that do layered ioctls
 616  * On sparc, they're implemented in assembler to avoid spilling
 617  * register windows in the common (copyin) case ..
 618  */
 619 #if !defined(__sparc)
 620 int
 621 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
 622 {
 623         if (flags & FKIOCTL)
 624                 return (kcopy(buf, kernbuf, size) ? -1 : 0);
 625         return (copyin(buf, kernbuf, size));
 626 }
 627 
 628 int
 629 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
 630 {
 631         if (flags & FKIOCTL)
 632                 return (kcopy(buf, kernbuf, size) ? -1 : 0);
 633         return (copyout(buf, kernbuf, size));
 634 }
 635 #endif  /* !__sparc */
 636 
 637 /*
 638  * Conversions in nexus pagesize units.  We don't duplicate the
 639  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
 640  * routines anyway.
 641  */
 642 unsigned long
 643 ddi_btop(dev_info_t *dip, unsigned long bytes)
 644 {
 645         unsigned long pages;
 646 
 647         (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
 648         return (pages);
 649 }
 650 
 651 unsigned long
 652 ddi_btopr(dev_info_t *dip, unsigned long bytes)
 653 {
 654         unsigned long pages;
 655 
 656         (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
 657         return (pages);
 658 }
 659 
 660 unsigned long
 661 ddi_ptob(dev_info_t *dip, unsigned long pages)
 662 {
 663         unsigned long bytes;
 664 
 665         (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
 666         return (bytes);
 667 }
 668 
 669 unsigned int
 670 ddi_enter_critical(void)
 671 {
 672         return ((uint_t)spl7());
 673 }
 674 
 675 void
 676 ddi_exit_critical(unsigned int spl)
 677 {
 678         splx((int)spl);
 679 }
 680 
 681 /*
 682  * Nexus ctlops punter
 683  */
 684 
 685 #if !defined(__sparc)
 686 /*
 687  * Request bus_ctl parent to handle a bus_ctl request
 688  *
 689  * (The sparc version is in sparc_ddi.s)
 690  */
 691 int
 692 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
 693 {
 694         int (*fp)();
 695 
 696         if (!d || !r)
 697                 return (DDI_FAILURE);
 698 
 699         if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
 700                 return (DDI_FAILURE);
 701 
 702         fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
 703         return ((*fp)(d, r, op, a, v));
 704 }
 705 
 706 #endif
 707 
 708 /*
 709  * DMA/DVMA setup
 710  */
 711 
 712 #if defined(__sparc)
 713 static ddi_dma_lim_t standard_limits = {
 714         (uint_t)0,      /* addr_t dlim_addr_lo */
 715         (uint_t)-1,     /* addr_t dlim_addr_hi */
 716         (uint_t)-1,     /* uint_t dlim_cntr_max */
 717         (uint_t)1,      /* uint_t dlim_burstsizes */
 718         (uint_t)1,      /* uint_t dlim_minxfer */
 719         0               /* uint_t dlim_dmaspeed */
 720 };
 721 #elif defined(__x86)
 722 static ddi_dma_lim_t standard_limits = {
 723         (uint_t)0,              /* addr_t dlim_addr_lo */
 724         (uint_t)0xffffff,       /* addr_t dlim_addr_hi */
 725         (uint_t)0,              /* uint_t dlim_cntr_max */
 726         (uint_t)0x00000001,     /* uint_t dlim_burstsizes */
 727         (uint_t)DMA_UNIT_8,     /* uint_t dlim_minxfer */
 728         (uint_t)0,              /* uint_t dlim_dmaspeed */
 729         (uint_t)0x86<<24+0,       /* uint_t dlim_version */
 730         (uint_t)0xffff,         /* uint_t dlim_adreg_max */
 731         (uint_t)0xffff,         /* uint_t dlim_ctreg_max */
 732         (uint_t)512,            /* uint_t dlim_granular */
 733         (int)1,                 /* int dlim_sgllen */
 734         (uint_t)0xffffffff      /* uint_t dlim_reqsizes */
 735 };
 736 
 737 #endif
 738 
 739 int
 740 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
 741     ddi_dma_handle_t *handlep)
 742 {
 743         int (*funcp)() = ddi_dma_map;
 744         struct bus_ops *bop;
 745 #if defined(__sparc)
 746         auto ddi_dma_lim_t dma_lim;
 747 
 748         if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
 749                 dma_lim = standard_limits;
 750         } else {
 751                 dma_lim = *dmareqp->dmar_limits;
 752         }
 753         dmareqp->dmar_limits = &dma_lim;
 754 #endif
 755 #if defined(__x86)
 756         if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
 757                 return (DDI_FAILURE);
 758 #endif
 759 
 760         /*
 761          * Handle the case that the requester is both a leaf
 762          * and a nexus driver simultaneously by calling the
 763          * requester's bus_dma_map function directly instead
 764          * of ddi_dma_map.
 765          */
 766         bop = DEVI(dip)->devi_ops->devo_bus_ops;
 767         if (bop && bop->bus_dma_map)
 768                 funcp = bop->bus_dma_map;
 769         return ((*funcp)(dip, dip, dmareqp, handlep));
 770 }
 771 
 772 int
 773 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
 774     uint_t flags, int (*waitfp)(), caddr_t arg,
 775     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
 776 {
 777         int (*funcp)() = ddi_dma_map;
 778         ddi_dma_lim_t dma_lim;
 779         struct ddi_dma_req dmareq;
 780         struct bus_ops *bop;
 781 
 782         if (len == 0) {
 783                 return (DDI_DMA_NOMAPPING);
 784         }
 785         if (limits == (ddi_dma_lim_t *)0) {
 786                 dma_lim = standard_limits;
 787         } else {
 788                 dma_lim = *limits;
 789         }
 790         dmareq.dmar_limits = &dma_lim;
 791         dmareq.dmar_flags = flags;
 792         dmareq.dmar_fp = waitfp;
 793         dmareq.dmar_arg = arg;
 794         dmareq.dmar_object.dmao_size = len;
 795         dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
 796         dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
 797         dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
 798         dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
 799 
 800         /*
 801          * Handle the case that the requester is both a leaf
 802          * and a nexus driver simultaneously by calling the
 803          * requester's bus_dma_map function directly instead
 804          * of ddi_dma_map.
 805          */
 806         bop = DEVI(dip)->devi_ops->devo_bus_ops;
 807         if (bop && bop->bus_dma_map)
 808                 funcp = bop->bus_dma_map;
 809 
 810         return ((*funcp)(dip, dip, &dmareq, handlep));
 811 }
 812 
 813 int
 814 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
 815     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
 816     ddi_dma_handle_t *handlep)
 817 {
 818         int (*funcp)() = ddi_dma_map;
 819         ddi_dma_lim_t dma_lim;
 820         struct ddi_dma_req dmareq;
 821         struct bus_ops *bop;
 822 
 823         if (limits == (ddi_dma_lim_t *)0) {
 824                 dma_lim = standard_limits;
 825         } else {
 826                 dma_lim = *limits;
 827         }
 828         dmareq.dmar_limits = &dma_lim;
 829         dmareq.dmar_flags = flags;
 830         dmareq.dmar_fp = waitfp;
 831         dmareq.dmar_arg = arg;
 832         dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
 833 
 834         if (bp->b_flags & B_PAGEIO) {
 835                 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
 836                 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
 837                 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
 838                     (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
 839         } else {
 840                 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
 841                 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
 842                 if (bp->b_flags & B_SHADOW) {
 843                         dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
 844                             bp->b_shadow;
 845                 } else {
 846                         dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
 847                 }
 848 
 849                 /*
 850                  * If the buffer has no proc pointer, or the proc
 851                  * struct has the kernel address space, or the buffer has
 852                  * been marked B_REMAPPED (meaning that it is now
 853                  * mapped into the kernel's address space), then
 854                  * the address space is kas (kernel address space).
 855                  */
 856                 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
 857                     (bp->b_flags & B_REMAPPED)) {
 858                         dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
 859                 } else {
 860                         dmareq.dmar_object.dmao_obj.virt_obj.v_as =
 861                             bp->b_proc->p_as;
 862                 }
 863         }
 864 
 865         /*
 866          * Handle the case that the requester is both a leaf
 867          * and a nexus driver simultaneously by calling the
 868          * requester's bus_dma_map function directly instead
 869          * of ddi_dma_map.
 870          */
 871         bop = DEVI(dip)->devi_ops->devo_bus_ops;
 872         if (bop && bop->bus_dma_map)
 873                 funcp = bop->bus_dma_map;
 874 
 875         return ((*funcp)(dip, dip, &dmareq, handlep));
 876 }
 877 
 878 #if !defined(__sparc)
 879 /*
 880  * Request bus_dma_ctl parent to fiddle with a dma request.
 881  *
 882  * (The sparc version is in sparc_subr.s)
 883  */
 884 int
 885 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
 886     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
 887     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
 888 {
 889         int (*fp)();
 890 
 891         if (dip != ddi_root_node())
 892                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
 893         fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
 894         return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
 895 }
 896 #endif
 897 
 898 /*
 899  * For all DMA control functions, call the DMA control
 900  * routine and return status.
 901  *
 902  * Just plain assume that the parent is to be called.
 903  * If a nexus driver or a thread outside the framework
 904  * of a nexus driver or a leaf driver calls these functions,
 905  * it is up to them to deal with the fact that the parent's
 906  * bus_dma_ctl function will be the first one called.
 907  */
 908 
 909 #define HD      ((ddi_dma_impl_t *)h)->dmai_rdip
 910 
 911 int
 912 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
 913 {
 914         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
 915 }
 916 
 917 int
 918 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
 919 {
 920         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
 921 }
 922 
 923 int
 924 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
 925 {
 926         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
 927             (off_t *)c, 0, (caddr_t *)o, 0));
 928 }
 929 
 930 int
 931 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
 932 {
 933         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
 934             l, (caddr_t *)c, 0));
 935 }
 936 
 937 int
 938 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
 939 {
 940         if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
 941                 return (DDI_FAILURE);
 942         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
 943 }
 944 
 945 int
 946 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
 947     ddi_dma_win_t *nwin)
 948 {
 949         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
 950             (caddr_t *)nwin, 0));
 951 }
 952 
 953 int
 954 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
 955 {
 956         ddi_dma_handle_t h = (ddi_dma_handle_t)win;
 957 
 958         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
 959             (size_t *)&seg, (caddr_t *)nseg, 0));
 960 }
 961 
 962 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
 963 /*
 964  * This routine is Obsolete and should be removed from ALL architectures
 965  * in a future release of Solaris.
 966  *
 967  * It is deliberately NOT ported to amd64; please fix the code that
 968  * depends on this routine to use ddi_dma_nextcookie(9F).
 969  *
 970  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
 971  * is a side effect to some other cleanup), we're still not going to support
 972  * this interface on x64.
 973  */
 974 int
 975 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
 976     ddi_dma_cookie_t *cookiep)
 977 {
 978         ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
 979 
 980         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
 981             (caddr_t *)cookiep, 0));
 982 }
 983 #endif  /* (__i386 && !__amd64) || __sparc */
 984 
 985 #if !defined(__sparc)
 986 
 987 /*
 988  * The SPARC versions of these routines are done in assembler to
 989  * save register windows, so they're in sparc_subr.s.
 990  */
 991 
 992 int
 993 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
 994         struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
 995 {
 996         int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
 997             ddi_dma_handle_t *);
 998 
 999         if (dip != ddi_root_node())
1000                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1001 
1002         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_map;
1003         return ((*funcp)(dip, rdip, dmareqp, handlep));
1004 }
1005 
1006 int
1007 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1008     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1009 {
1010         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1011             int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1012 
1013         if (dip != ddi_root_node())
1014                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1015 
1016         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1017         return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
1018 }
1019 
1020 int
1021 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1022 {
1023         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1024 
1025         if (dip != ddi_root_node())
1026                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1027 
1028         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1029         return ((*funcp)(dip, rdip, handlep));
1030 }
1031 
1032 int
1033 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1034     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1035     ddi_dma_cookie_t *cp, uint_t *ccountp)
1036 {
1037         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1038             struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1039 
1040         if (dip != ddi_root_node())
1041                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1042 
1043         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1044         return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
1045 }
1046 
1047 int
1048 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1049     ddi_dma_handle_t handle)
1050 {
1051         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1052 
1053         if (dip != ddi_root_node())
1054                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1055 
1056         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1057         return ((*funcp)(dip, rdip, handle));
1058 }
1059 
1060 
1061 int
1062 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1063     ddi_dma_handle_t handle, off_t off, size_t len,
1064     uint_t cache_flags)
1065 {
1066         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1067             off_t, size_t, uint_t);
1068 
1069         if (dip != ddi_root_node())
1070                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1071 
1072         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1073         return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
1074 }
1075 
1076 int
1077 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1078     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1079     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1080 {
1081         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1082             uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1083 
1084         if (dip != ddi_root_node())
1085                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1086 
1087         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
1088         return ((*funcp)(dip, rdip, handle, win, offp, lenp,
1089             cookiep, ccountp));
1090 }
1091 
1092 int
1093 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1094 {
1095         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1096         dev_info_t *dip, *rdip;
1097         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1098             size_t, uint_t);
1099 
1100         /*
1101          * the DMA nexus driver will set DMP_NOSYNC if the
1102          * platform does not require any sync operation. For
1103          * example if the memory is uncached or consistent
1104          * and without any I/O write buffers involved.
1105          */
1106         if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1107                 return (DDI_SUCCESS);
1108 
1109         dip = rdip = hp->dmai_rdip;
1110         if (dip != ddi_root_node())
1111                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1112         funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1113         return ((*funcp)(dip, rdip, h, o, l, whom));
1114 }
1115 
1116 int
1117 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1118 {
1119         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1120         dev_info_t *dip, *rdip;
1121         int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1122 
1123         dip = rdip = hp->dmai_rdip;
1124         if (dip != ddi_root_node())
1125                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1126         funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
1127         return ((*funcp)(dip, rdip, h));
1128 }
1129 
1130 #endif  /* !__sparc */
1131 
1132 int
1133 ddi_dma_free(ddi_dma_handle_t h)
1134 {
1135         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1136 }
1137 
1138 int
1139 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1140 {
1141         ddi_dma_lim_t defalt;
1142         size_t size = len;
1143 
1144         if (!limp) {
1145                 defalt = standard_limits;
1146                 limp = &defalt;
1147         }
1148         return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1149             iopbp, NULL, NULL));
1150 }
1151 
1152 void
1153 ddi_iopb_free(caddr_t iopb)
1154 {
1155         i_ddi_mem_free(iopb, NULL);
1156 }
1157 
1158 int
1159 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1160         uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1161 {
1162         ddi_dma_lim_t defalt;
1163         size_t size = length;
1164 
1165         if (!limits) {
1166                 defalt = standard_limits;
1167                 limits = &defalt;
1168         }
1169         return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1170             1, 0, kaddrp, real_length, NULL));
1171 }
1172 
1173 void
1174 ddi_mem_free(caddr_t kaddr)
1175 {
1176         i_ddi_mem_free(kaddr, NULL);
1177 }
1178 
1179 /*
1180  * DMA attributes, alignment, burst sizes, and transfer minimums
1181  */
1182 int
1183 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1184 {
1185         ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1186 
1187         if (attrp == NULL)
1188                 return (DDI_FAILURE);
1189         *attrp = dimp->dmai_attr;
1190         return (DDI_SUCCESS);
1191 }
1192 
1193 int
1194 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1195 {
1196         ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1197 
1198         if (!dimp)
1199                 return (0);
1200         else
1201                 return (dimp->dmai_burstsizes);
1202 }
1203 
1204 int
1205 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1206 {
1207         ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1208 
1209         if (!dimp || !alignment || !mineffect)
1210                 return (DDI_FAILURE);
1211         if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1212                 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1213         } else {
1214                 if (dimp->dmai_burstsizes & 0xff0000) {
1215                         *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1216                 } else {
1217                         *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1218                 }
1219         }
1220         *mineffect = dimp->dmai_minxfer;
1221         return (DDI_SUCCESS);
1222 }
1223 
1224 int
1225 ddi_iomin(dev_info_t *a, int i, int stream)
1226 {
1227         int r;
1228 
1229         /*
1230          * Make sure that the initial value is sane
1231          */
1232         if (i & (i - 1))
1233                 return (0);
1234         if (i == 0)
1235                 i = (stream) ? 4 : 1;
1236 
1237         r = ddi_ctlops(a, a,
1238             DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1239         if (r != DDI_SUCCESS || (i & (i - 1)))
1240                 return (0);
1241         return (i);
1242 }
1243 
1244 /*
1245  * Given two DMA attribute structures, apply the attributes
1246  * of one to the other, following the rules of attributes
1247  * and the wishes of the caller.
1248  *
1249  * The rules of DMA attribute structures are that you cannot
1250  * make things *less* restrictive as you apply one set
1251  * of attributes to another.
1252  *
1253  */
1254 void
1255 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1256 {
1257         attr->dma_attr_addr_lo =
1258             MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1259         attr->dma_attr_addr_hi =
1260             MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1261         attr->dma_attr_count_max =
1262             MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1263         attr->dma_attr_align =
1264             MAX(attr->dma_attr_align,  mod->dma_attr_align);
1265         attr->dma_attr_burstsizes =
1266             (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1267         attr->dma_attr_minxfer =
1268             maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1269         attr->dma_attr_maxxfer =
1270             MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1271         attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1272         attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1273             (uint_t)mod->dma_attr_sgllen);
1274         attr->dma_attr_granular =
1275             MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1276 }
1277 
1278 /*
1279  * mmap/segmap interface:
1280  */
1281 
1282 /*
1283  * ddi_segmap:          setup the default segment driver. Calls the drivers
1284  *                      XXmmap routine to validate the range to be mapped.
1285  *                      Return ENXIO of the range is not valid.  Create
1286  *                      a seg_dev segment that contains all of the
1287  *                      necessary information and will reference the
1288  *                      default segment driver routines. It returns zero
1289  *                      on success or non-zero on failure.
1290  */
1291 int
1292 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1293     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1294 {
1295         extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1296             off_t, uint_t, uint_t, uint_t, struct cred *);
1297 
1298         return (spec_segmap(dev, offset, asp, addrp, len,
1299             prot, maxprot, flags, credp));
1300 }
1301 
1302 /*
1303  * ddi_map_fault:       Resolve mappings at fault time.  Used by segment
1304  *                      drivers. Allows each successive parent to resolve
1305  *                      address translations and add its mappings to the
1306  *                      mapping list supplied in the page structure. It
1307  *                      returns zero on success or non-zero on failure.
1308  */
1309 
1310 int
1311 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1312     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1313 {
1314         return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1315 }
1316 
1317 /*
1318  * ddi_device_mapping_check:    Called from ddi_segmap_setup.
1319  *      Invokes platform specific DDI to determine whether attributes specified
1320  *      in attr(9s) are valid for the region of memory that will be made
1321  *      available for direct access to user process via the mmap(2) system call.
1322  */
1323 int
1324 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1325     uint_t rnumber, uint_t *hat_flags)
1326 {
1327         ddi_acc_handle_t handle;
1328         ddi_map_req_t mr;
1329         ddi_acc_hdl_t *hp;
1330         int result;
1331         dev_info_t *dip;
1332 
1333         /*
1334          * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1335          * release it immediately since it should already be held by
1336          * a devfs vnode.
1337          */
1338         if ((dip =
1339             e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1340                 return (-1);
1341         ddi_release_devi(dip);          /* for e_ddi_hold_devi_by_dev() */
1342 
1343         /*
1344          * Allocate and initialize the common elements of data
1345          * access handle.
1346          */
1347         handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1348         if (handle == NULL)
1349                 return (-1);
1350 
1351         hp = impl_acc_hdl_get(handle);
1352         hp->ah_vers = VERS_ACCHDL;
1353         hp->ah_dip = dip;
1354         hp->ah_rnumber = rnumber;
1355         hp->ah_offset = 0;
1356         hp->ah_len = 0;
1357         hp->ah_acc = *accattrp;
1358 
1359         /*
1360          * Set up the mapping request and call to parent.
1361          */
1362         mr.map_op = DDI_MO_MAP_HANDLE;
1363         mr.map_type = DDI_MT_RNUMBER;
1364         mr.map_obj.rnumber = rnumber;
1365         mr.map_prot = PROT_READ | PROT_WRITE;
1366         mr.map_flags = DDI_MF_KERNEL_MAPPING;
1367         mr.map_handlep = hp;
1368         mr.map_vers = DDI_MAP_VERSION;
1369         result = ddi_map(dip, &mr, 0, 0, NULL);
1370 
1371         /*
1372          * Region must be mappable, pick up flags from the framework.
1373          */
1374         *hat_flags = hp->ah_hat_flags;
1375 
1376         impl_acc_hdl_free(handle);
1377 
1378         /*
1379          * check for end result.
1380          */
1381         if (result != DDI_SUCCESS)
1382                 return (-1);
1383         return (0);
1384 }
1385 
1386 
1387 /*
1388  * Property functions:   See also, ddipropdefs.h.
1389  *
1390  * These functions are the framework for the property functions,
1391  * i.e. they support software defined properties.  All implementation
1392  * specific property handling (i.e.: self-identifying devices and
1393  * PROM defined properties are handled in the implementation specific
1394  * functions (defined in ddi_implfuncs.h).
1395  */
1396 
1397 /*
1398  * nopropop:    Shouldn't be called, right?
1399  */
1400 int
1401 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1402     char *name, caddr_t valuep, int *lengthp)
1403 {
1404         _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1405         return (DDI_PROP_NOT_FOUND);
1406 }
1407 
1408 #ifdef  DDI_PROP_DEBUG
1409 int ddi_prop_debug_flag = 0;
1410 
1411 int
1412 ddi_prop_debug(int enable)
1413 {
1414         int prev = ddi_prop_debug_flag;
1415 
1416         if ((enable != 0) || (prev != 0))
1417                 printf("ddi_prop_debug: debugging %s\n",
1418                     enable ? "enabled" : "disabled");
1419         ddi_prop_debug_flag = enable;
1420         return (prev);
1421 }
1422 
1423 #endif  /* DDI_PROP_DEBUG */
1424 
1425 /*
1426  * Search a property list for a match, if found return pointer
1427  * to matching prop struct, else return NULL.
1428  */
1429 
1430 ddi_prop_t *
1431 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1432 {
1433         ddi_prop_t      *propp;
1434 
1435         /*
1436          * find the property in child's devinfo:
1437          * Search order defined by this search function is first matching
1438          * property with input dev == DDI_DEV_T_ANY matching any dev or
1439          * dev == propp->prop_dev, name == propp->name, and the correct
1440          * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1441          * value made it this far then it implies a DDI_DEV_T_ANY search.
1442          */
1443         if (dev == DDI_DEV_T_NONE)
1444                 dev = DDI_DEV_T_ANY;
1445 
1446         for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1447 
1448                 if (!DDI_STRSAME(propp->prop_name, name))
1449                         continue;
1450 
1451                 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1452                         continue;
1453 
1454                 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1455                         continue;
1456 
1457                 return (propp);
1458         }
1459 
1460         return ((ddi_prop_t *)0);
1461 }
1462 
1463 /*
1464  * Search for property within devnames structures
1465  */
1466 ddi_prop_t *
1467 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1468 {
1469         major_t         major;
1470         struct devnames *dnp;
1471         ddi_prop_t      *propp;
1472 
1473         /*
1474          * Valid dev_t value is needed to index into the
1475          * correct devnames entry, therefore a dev_t
1476          * value of DDI_DEV_T_ANY is not appropriate.
1477          */
1478         ASSERT(dev != DDI_DEV_T_ANY);
1479         if (dev == DDI_DEV_T_ANY) {
1480                 return ((ddi_prop_t *)0);
1481         }
1482 
1483         major = getmajor(dev);
1484         dnp = &(devnamesp[major]);
1485 
1486         if (dnp->dn_global_prop_ptr == NULL)
1487                 return ((ddi_prop_t *)0);
1488 
1489         LOCK_DEV_OPS(&dnp->dn_lock);
1490 
1491         for (propp = dnp->dn_global_prop_ptr->prop_list;
1492             propp != NULL;
1493             propp = (ddi_prop_t *)propp->prop_next) {
1494 
1495                 if (!DDI_STRSAME(propp->prop_name, name))
1496                         continue;
1497 
1498                 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1499                     (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1500                         continue;
1501 
1502                 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1503                         continue;
1504 
1505                 /* Property found, return it */
1506                 UNLOCK_DEV_OPS(&dnp->dn_lock);
1507                 return (propp);
1508         }
1509 
1510         UNLOCK_DEV_OPS(&dnp->dn_lock);
1511         return ((ddi_prop_t *)0);
1512 }
1513 
1514 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1515 
1516 /*
1517  * ddi_prop_search_global:
1518  *      Search the global property list within devnames
1519  *      for the named property.  Return the encoded value.
1520  */
1521 static int
1522 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1523     void *valuep, uint_t *lengthp)
1524 {
1525         ddi_prop_t      *propp;
1526         caddr_t         buffer;
1527 
1528         propp =  i_ddi_search_global_prop(dev, name, flags);
1529 
1530         /* Property NOT found, bail */
1531         if (propp == (ddi_prop_t *)0)
1532                 return (DDI_PROP_NOT_FOUND);
1533 
1534         if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1535                 return (DDI_PROP_UNDEFINED);
1536 
1537         if ((buffer = kmem_alloc(propp->prop_len,
1538             (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1539                 cmn_err(CE_CONT, prop_no_mem_msg, name);
1540                 return (DDI_PROP_NO_MEMORY);
1541         }
1542 
1543         /*
1544          * Return the encoded data
1545          */
1546         *(caddr_t *)valuep = buffer;
1547         *lengthp = propp->prop_len;
1548         bcopy(propp->prop_val, buffer, propp->prop_len);
1549 
1550         return (DDI_PROP_SUCCESS);
1551 }
1552 
1553 /*
1554  * ddi_prop_search_common:      Lookup and return the encoded value
1555  */
1556 int
1557 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1558     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1559 {
1560         ddi_prop_t      *propp;
1561         int             i;
1562         caddr_t         buffer;
1563         caddr_t         prealloc = NULL;
1564         int             plength = 0;
1565         dev_info_t      *pdip;
1566         int             (*bop)();
1567 
1568         /*CONSTANTCONDITION*/
1569         while (1)  {
1570 
1571                 mutex_enter(&(DEVI(dip)->devi_lock));
1572 
1573 
1574                 /*
1575                  * find the property in child's devinfo:
1576                  * Search order is:
1577                  *      1. driver defined properties
1578                  *      2. system defined properties
1579                  *      3. driver global properties
1580                  *      4. boot defined properties
1581                  */
1582 
1583                 propp = i_ddi_prop_search(dev, name, flags,
1584                     &(DEVI(dip)->devi_drv_prop_ptr));
1585                 if (propp == NULL)  {
1586                         propp = i_ddi_prop_search(dev, name, flags,
1587                             &(DEVI(dip)->devi_sys_prop_ptr));
1588                 }
1589                 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1590                         propp = i_ddi_prop_search(dev, name, flags,
1591                             &DEVI(dip)->devi_global_prop_list->prop_list);
1592                 }
1593 
1594                 if (propp == NULL)  {
1595                         propp = i_ddi_prop_search(dev, name, flags,
1596                             &(DEVI(dip)->devi_hw_prop_ptr));
1597                 }
1598 
1599                 /*
1600                  * Software property found?
1601                  */
1602                 if (propp != (ddi_prop_t *)0)   {
1603 
1604                         /*
1605                          * If explicit undefine, return now.
1606                          */
1607                         if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1608                                 mutex_exit(&(DEVI(dip)->devi_lock));
1609                                 if (prealloc)
1610                                         kmem_free(prealloc, plength);
1611                                 return (DDI_PROP_UNDEFINED);
1612                         }
1613 
1614                         /*
1615                          * If we only want to know if it exists, return now
1616                          */
1617                         if (prop_op == PROP_EXISTS) {
1618                                 mutex_exit(&(DEVI(dip)->devi_lock));
1619                                 ASSERT(prealloc == NULL);
1620                                 return (DDI_PROP_SUCCESS);
1621                         }
1622 
1623                         /*
1624                          * If length only request or prop length == 0,
1625                          * service request and return now.
1626                          */
1627                         if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1628                                 *lengthp = propp->prop_len;
1629 
1630                                 /*
1631                                  * if prop_op is PROP_LEN_AND_VAL_ALLOC
1632                                  * that means prop_len is 0, so set valuep
1633                                  * also to NULL
1634                                  */
1635                                 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1636                                         *(caddr_t *)valuep = NULL;
1637 
1638                                 mutex_exit(&(DEVI(dip)->devi_lock));
1639                                 if (prealloc)
1640                                         kmem_free(prealloc, plength);
1641                                 return (DDI_PROP_SUCCESS);
1642                         }
1643 
1644                         /*
1645                          * If LEN_AND_VAL_ALLOC and the request can sleep,
1646                          * drop the mutex, allocate the buffer, and go
1647                          * through the loop again.  If we already allocated
1648                          * the buffer, and the size of the property changed,
1649                          * keep trying...
1650                          */
1651                         if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1652                             (flags & DDI_PROP_CANSLEEP))  {
1653                                 if (prealloc && (propp->prop_len != plength)) {
1654                                         kmem_free(prealloc, plength);
1655                                         prealloc = NULL;
1656                                 }
1657                                 if (prealloc == NULL)  {
1658                                         plength = propp->prop_len;
1659                                         mutex_exit(&(DEVI(dip)->devi_lock));
1660                                         prealloc = kmem_alloc(plength,
1661                                             KM_SLEEP);
1662                                         continue;
1663                                 }
1664                         }
1665 
1666                         /*
1667                          * Allocate buffer, if required.  Either way,
1668                          * set `buffer' variable.
1669                          */
1670                         i = *lengthp;                   /* Get callers length */
1671                         *lengthp = propp->prop_len;  /* Set callers length */
1672 
1673                         switch (prop_op) {
1674 
1675                         case PROP_LEN_AND_VAL_ALLOC:
1676 
1677                                 if (prealloc == NULL) {
1678                                         buffer = kmem_alloc(propp->prop_len,
1679                                             KM_NOSLEEP);
1680                                 } else {
1681                                         buffer = prealloc;
1682                                 }
1683 
1684                                 if (buffer == NULL)  {
1685                                         mutex_exit(&(DEVI(dip)->devi_lock));
1686                                         cmn_err(CE_CONT, prop_no_mem_msg, name);
1687                                         return (DDI_PROP_NO_MEMORY);
1688                                 }
1689                                 /* Set callers buf ptr */
1690                                 *(caddr_t *)valuep = buffer;
1691                                 break;
1692 
1693                         case PROP_LEN_AND_VAL_BUF:
1694 
1695                                 if (propp->prop_len > (i)) {
1696                                         mutex_exit(&(DEVI(dip)->devi_lock));
1697                                         return (DDI_PROP_BUF_TOO_SMALL);
1698                                 }
1699 
1700                                 buffer = valuep;  /* Get callers buf ptr */
1701                                 break;
1702 
1703                         default:
1704                                 break;
1705                         }
1706 
1707                         /*
1708                          * Do the copy.
1709                          */
1710                         bcopy(propp->prop_val, buffer, propp->prop_len);
1711                         mutex_exit(&(DEVI(dip)->devi_lock));
1712                         return (DDI_PROP_SUCCESS);
1713                 }
1714 
1715                 mutex_exit(&(DEVI(dip)->devi_lock));
1716                 if (prealloc)
1717                         kmem_free(prealloc, plength);
1718                 prealloc = NULL;
1719 
1720                 /*
1721                  * Prop not found, call parent bus_ops to deal with possible
1722                  * h/w layer (possible PROM defined props, etc.) and to
1723                  * possibly ascend the hierarchy, if allowed by flags.
1724                  */
1725                 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1726 
1727                 /*
1728                  * One last call for the root driver PROM props?
1729                  */
1730                 if (dip == ddi_root_node())  {
1731                         return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1732                             flags, name, valuep, (int *)lengthp));
1733                 }
1734 
1735                 /*
1736                  * We may have been called to check for properties
1737                  * within a single devinfo node that has no parent -
1738                  * see make_prop()
1739                  */
1740                 if (pdip == NULL) {
1741                         ASSERT((flags &
1742                             (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1743                             (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1744                         return (DDI_PROP_NOT_FOUND);
1745                 }
1746 
1747                 /*
1748                  * Instead of recursing, we do iterative calls up the tree.
1749                  * As a bit of optimization, skip the bus_op level if the
1750                  * node is a s/w node and if the parent's bus_prop_op function
1751                  * is `ddi_bus_prop_op', because we know that in this case,
1752                  * this function does nothing.
1753                  *
1754                  * 4225415: If the parent isn't attached, or the child
1755                  * hasn't been named by the parent yet, use the default
1756                  * ddi_bus_prop_op as a proxy for the parent.  This
1757                  * allows property lookups in any child/parent state to
1758                  * include 'prom' and inherited properties, even when
1759                  * there are no drivers attached to the child or parent.
1760                  */
1761 
1762                 bop = ddi_bus_prop_op;
1763                 if (i_ddi_devi_attached(pdip) &&
1764                     (i_ddi_node_state(dip) >= DS_INITIALIZED))
1765                         bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1766 
1767                 i = DDI_PROP_NOT_FOUND;
1768 
1769                 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1770                         i = (*bop)(dev, pdip, dip, prop_op,
1771                             flags | DDI_PROP_DONTPASS,
1772                             name, valuep, lengthp);
1773                 }
1774 
1775                 if ((flags & DDI_PROP_DONTPASS) ||
1776                     (i != DDI_PROP_NOT_FOUND))
1777                         return (i);
1778 
1779                 dip = pdip;
1780         }
1781         /*NOTREACHED*/
1782 }
1783 
1784 
1785 /*
1786  * ddi_prop_op: The basic property operator for drivers.
1787  *
1788  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1789  *
1790  *      prop_op                 valuep
1791  *      ------                  ------
1792  *
1793  *      PROP_LEN                <unused>
1794  *
1795  *      PROP_LEN_AND_VAL_BUF    Pointer to callers buffer
1796  *
1797  *      PROP_LEN_AND_VAL_ALLOC  Address of callers pointer (will be set to
1798  *                              address of allocated buffer, if successful)
1799  */
1800 int
1801 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1802     char *name, caddr_t valuep, int *lengthp)
1803 {
1804         int     i;
1805 
1806         ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1807 
1808         /*
1809          * If this was originally an LDI prop lookup then we bail here.
1810          * The reason is that the LDI property lookup interfaces first call
1811          * a drivers prop_op() entry point to allow it to override
1812          * properties.  But if we've made it here, then the driver hasn't
1813          * overriden any properties.  We don't want to continue with the
1814          * property search here because we don't have any type inforamtion.
1815          * When we return failure, the LDI interfaces will then proceed to
1816          * call the typed property interfaces to look up the property.
1817          */
1818         if (mod_flags & DDI_PROP_DYNAMIC)
1819                 return (DDI_PROP_NOT_FOUND);
1820 
1821         /*
1822          * check for pre-typed property consumer asking for typed property:
1823          * see e_ddi_getprop_int64.
1824          */
1825         if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1826                 mod_flags |= DDI_PROP_TYPE_INT64;
1827         mod_flags |= DDI_PROP_TYPE_ANY;
1828 
1829         i = ddi_prop_search_common(dev, dip, prop_op,
1830             mod_flags, name, valuep, (uint_t *)lengthp);
1831         if (i == DDI_PROP_FOUND_1275)
1832                 return (DDI_PROP_SUCCESS);
1833         return (i);
1834 }
1835 
1836 /*
1837  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1838  * maintain size in number of blksize blocks.  Provides a dynamic property
1839  * implementation for size oriented properties based on nblocks64 and blksize
1840  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1841  * is too large.  This interface should not be used with a nblocks64 that
1842  * represents the driver's idea of how to represent unknown, if nblocks is
1843  * unknown use ddi_prop_op.
1844  */
1845 int
1846 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1847     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1848     uint64_t nblocks64, uint_t blksize)
1849 {
1850         uint64_t size64;
1851         int     blkshift;
1852 
1853         /* convert block size to shift value */
1854         ASSERT(BIT_ONLYONESET(blksize));
1855         blkshift = highbit(blksize) - 1;
1856 
1857         /*
1858          * There is no point in supporting nblocks64 values that don't have
1859          * an accurate uint64_t byte count representation.
1860          */
1861         if (nblocks64 >= (UINT64_MAX >> blkshift))
1862                 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1863                     name, valuep, lengthp));
1864 
1865         size64 = nblocks64 << blkshift;
1866         return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1867             name, valuep, lengthp, size64, blksize));
1868 }
1869 
1870 /*
1871  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1872  */
1873 int
1874 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1875     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1876 {
1877         return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1878             mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1879 }
1880 
1881 /*
1882  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1883  * maintain size in bytes. Provides a of dynamic property implementation for
1884  * size oriented properties based on size64 value and blksize passed in by the
1885  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1886  * should not be used with a size64 that represents the driver's idea of how
1887  * to represent unknown, if size is unknown use ddi_prop_op.
1888  *
1889  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1890  * integers. While the most likely interface to request them ([bc]devi_size)
1891  * is declared int (signed) there is no enforcement of this, which means we
1892  * can't enforce limitations here without risking regression.
1893  */
1894 int
1895 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1896     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1897     uint_t blksize)
1898 {
1899         uint64_t nblocks64;
1900         int     callers_length;
1901         caddr_t buffer;
1902         int     blkshift;
1903 
1904         /*
1905          * This is a kludge to support capture of size(9P) pure dynamic
1906          * properties in snapshots for non-cmlb code (without exposing
1907          * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1908          * should be removed.
1909          */
1910         if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1911                 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1912                     {"Size",            DDI_PROP_TYPE_INT64,    S_IFCHR},
1913                     {"Nblocks",         DDI_PROP_TYPE_INT64,    S_IFBLK},
1914                     {NULL}
1915                 };
1916                 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1917         }
1918 
1919         /* convert block size to shift value */
1920         ASSERT(BIT_ONLYONESET(blksize));
1921         blkshift = highbit(blksize) - 1;
1922 
1923         /* compute DEV_BSIZE nblocks value */
1924         nblocks64 = size64 >> blkshift;
1925 
1926         /* get callers length, establish length of our dynamic properties */
1927         callers_length = *lengthp;
1928 
1929         if (strcmp(name, "Nblocks") == 0)
1930                 *lengthp = sizeof (uint64_t);
1931         else if (strcmp(name, "Size") == 0)
1932                 *lengthp = sizeof (uint64_t);
1933         else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1934                 *lengthp = sizeof (uint32_t);
1935         else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1936                 *lengthp = sizeof (uint32_t);
1937         else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1938                 *lengthp = sizeof (uint32_t);
1939         else {
1940                 /* fallback to ddi_prop_op */
1941                 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1942                     name, valuep, lengthp));
1943         }
1944 
1945         /* service request for the length of the property */
1946         if (prop_op == PROP_LEN)
1947                 return (DDI_PROP_SUCCESS);
1948 
1949         switch (prop_op) {
1950         case PROP_LEN_AND_VAL_ALLOC:
1951                 if ((buffer = kmem_alloc(*lengthp,
1952                     (mod_flags & DDI_PROP_CANSLEEP) ?
1953                     KM_SLEEP : KM_NOSLEEP)) == NULL)
1954                         return (DDI_PROP_NO_MEMORY);
1955 
1956                 *(caddr_t *)valuep = buffer;    /* set callers buf ptr */
1957                 break;
1958 
1959         case PROP_LEN_AND_VAL_BUF:
1960                 /* the length of the property and the request must match */
1961                 if (callers_length != *lengthp)
1962                         return (DDI_PROP_INVAL_ARG);
1963 
1964                 buffer = valuep;                /* get callers buf ptr */
1965                 break;
1966 
1967         default:
1968                 return (DDI_PROP_INVAL_ARG);
1969         }
1970 
1971         /* transfer the value into the buffer */
1972         if (strcmp(name, "Nblocks") == 0)
1973                 *((uint64_t *)buffer) = nblocks64;
1974         else if (strcmp(name, "Size") == 0)
1975                 *((uint64_t *)buffer) = size64;
1976         else if (strcmp(name, "nblocks") == 0)
1977                 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1978         else if (strcmp(name, "size") == 0)
1979                 *((uint32_t *)buffer) = (uint32_t)size64;
1980         else if (strcmp(name, "blksize") == 0)
1981                 *((uint32_t *)buffer) = (uint32_t)blksize;
1982         return (DDI_PROP_SUCCESS);
1983 }
1984 
1985 /*
1986  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1987  */
1988 int
1989 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1990     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1991 {
1992         return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1993             mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1994 }
1995 
1996 /*
1997  * Variable length props...
1998  */
1999 
2000 /*
2001  * ddi_getlongprop:     Get variable length property len+val into a buffer
2002  *              allocated by property provider via kmem_alloc. Requester
2003  *              is responsible for freeing returned property via kmem_free.
2004  *
2005  *      Arguments:
2006  *
2007  *      dev_t:  Input:  dev_t of property.
2008  *      dip:    Input:  dev_info_t pointer of child.
2009  *      flags:  Input:  Possible flag modifiers are:
2010  *              DDI_PROP_DONTPASS:      Don't pass to parent if prop not found.
2011  *              DDI_PROP_CANSLEEP:      Memory allocation may sleep.
2012  *      name:   Input:  name of property.
2013  *      valuep: Output: Addr of callers buffer pointer.
2014  *      lengthp:Output: *lengthp will contain prop length on exit.
2015  *
2016  *      Possible Returns:
2017  *
2018  *              DDI_PROP_SUCCESS:       Prop found and returned.
2019  *              DDI_PROP_NOT_FOUND:     Prop not found
2020  *              DDI_PROP_UNDEFINED:     Prop explicitly undefined.
2021  *              DDI_PROP_NO_MEMORY:     Prop found, but unable to alloc mem.
2022  */
2023 
2024 int
2025 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2026     char *name, caddr_t valuep, int *lengthp)
2027 {
2028         return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2029             flags, name, valuep, lengthp));
2030 }
2031 
2032 /*
2033  *
2034  * ddi_getlongprop_buf:         Get long prop into pre-allocated callers
2035  *                              buffer. (no memory allocation by provider).
2036  *
2037  *      dev_t:  Input:  dev_t of property.
2038  *      dip:    Input:  dev_info_t pointer of child.
2039  *      flags:  Input:  DDI_PROP_DONTPASS or NULL
2040  *      name:   Input:  name of property
2041  *      valuep: Input:  ptr to callers buffer.
2042  *      lengthp:I/O:    ptr to length of callers buffer on entry,
2043  *                      actual length of property on exit.
2044  *
2045  *      Possible returns:
2046  *
2047  *              DDI_PROP_SUCCESS        Prop found and returned
2048  *              DDI_PROP_NOT_FOUND      Prop not found
2049  *              DDI_PROP_UNDEFINED      Prop explicitly undefined.
2050  *              DDI_PROP_BUF_TOO_SMALL  Prop found, callers buf too small,
2051  *                                      no value returned, but actual prop
2052  *                                      length returned in *lengthp
2053  *
2054  */
2055 
2056 int
2057 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2058     char *name, caddr_t valuep, int *lengthp)
2059 {
2060         return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2061             flags, name, valuep, lengthp));
2062 }
2063 
2064 /*
2065  * Integer/boolean sized props.
2066  *
2067  * Call is value only... returns found boolean or int sized prop value or
2068  * defvalue if prop not found or is wrong length or is explicitly undefined.
2069  * Only flag is DDI_PROP_DONTPASS...
2070  *
2071  * By convention, this interface returns boolean (0) sized properties
2072  * as value (int)1.
2073  *
2074  * This never returns an error, if property not found or specifically
2075  * undefined, the input `defvalue' is returned.
2076  */
2077 
2078 int
2079 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2080 {
2081         int     propvalue = defvalue;
2082         int     proplength = sizeof (int);
2083         int     error;
2084 
2085         error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2086             flags, name, (caddr_t)&propvalue, &proplength);
2087 
2088         if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2089                 propvalue = 1;
2090 
2091         return (propvalue);
2092 }
2093 
2094 /*
2095  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2096  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2097  */
2098 
2099 int
2100 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2101 {
2102         return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2103 }
2104 
2105 /*
2106  * Allocate a struct prop_driver_data, along with 'size' bytes
2107  * for decoded property data.  This structure is freed by
2108  * calling ddi_prop_free(9F).
2109  */
2110 static void *
2111 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2112 {
2113         struct prop_driver_data *pdd;
2114 
2115         /*
2116          * Allocate a structure with enough memory to store the decoded data.
2117          */
2118         pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2119         pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2120         pdd->pdd_prop_free = prop_free;
2121 
2122         /*
2123          * Return a pointer to the location to put the decoded data.
2124          */
2125         return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2126 }
2127 
2128 /*
2129  * Allocated the memory needed to store the encoded data in the property
2130  * handle.
2131  */
2132 static int
2133 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2134 {
2135         /*
2136          * If size is zero, then set data to NULL and size to 0.  This
2137          * is a boolean property.
2138          */
2139         if (size == 0) {
2140                 ph->ph_size = 0;
2141                 ph->ph_data = NULL;
2142                 ph->ph_cur_pos = NULL;
2143                 ph->ph_save_pos = NULL;
2144         } else {
2145                 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2146                         ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2147                         if (ph->ph_data == NULL)
2148                                 return (DDI_PROP_NO_MEMORY);
2149                 } else
2150                         ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2151                 ph->ph_size = size;
2152                 ph->ph_cur_pos = ph->ph_data;
2153                 ph->ph_save_pos = ph->ph_data;
2154         }
2155         return (DDI_PROP_SUCCESS);
2156 }
2157 
2158 /*
2159  * Free the space allocated by the lookup routines.  Each lookup routine
2160  * returns a pointer to the decoded data to the driver.  The driver then
2161  * passes this pointer back to us.  This data actually lives in a struct
2162  * prop_driver_data.  We use negative indexing to find the beginning of
2163  * the structure and then free the entire structure using the size and
2164  * the free routine stored in the structure.
2165  */
2166 void
2167 ddi_prop_free(void *datap)
2168 {
2169         struct prop_driver_data *pdd;
2170 
2171         /*
2172          * Get the structure
2173          */
2174         pdd = (struct prop_driver_data *)
2175             ((caddr_t)datap - sizeof (struct prop_driver_data));
2176         /*
2177          * Call the free routine to free it
2178          */
2179         (*pdd->pdd_prop_free)(pdd);
2180 }
2181 
2182 /*
2183  * Free the data associated with an array of ints,
2184  * allocated with ddi_prop_decode_alloc().
2185  */
2186 static void
2187 ddi_prop_free_ints(struct prop_driver_data *pdd)
2188 {
2189         kmem_free(pdd, pdd->pdd_size);
2190 }
2191 
2192 /*
2193  * Free a single string property or a single string contained within
2194  * the argv style return value of an array of strings.
2195  */
2196 static void
2197 ddi_prop_free_string(struct prop_driver_data *pdd)
2198 {
2199         kmem_free(pdd, pdd->pdd_size);
2200 
2201 }
2202 
2203 /*
2204  * Free an array of strings.
2205  */
2206 static void
2207 ddi_prop_free_strings(struct prop_driver_data *pdd)
2208 {
2209         kmem_free(pdd, pdd->pdd_size);
2210 }
2211 
2212 /*
2213  * Free the data associated with an array of bytes.
2214  */
2215 static void
2216 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2217 {
2218         kmem_free(pdd, pdd->pdd_size);
2219 }
2220 
2221 /*
2222  * Reset the current location pointer in the property handle to the
2223  * beginning of the data.
2224  */
2225 void
2226 ddi_prop_reset_pos(prop_handle_t *ph)
2227 {
2228         ph->ph_cur_pos = ph->ph_data;
2229         ph->ph_save_pos = ph->ph_data;
2230 }
2231 
2232 /*
2233  * Restore the current location pointer in the property handle to the
2234  * saved position.
2235  */
2236 void
2237 ddi_prop_save_pos(prop_handle_t *ph)
2238 {
2239         ph->ph_save_pos = ph->ph_cur_pos;
2240 }
2241 
2242 /*
2243  * Save the location that the current location pointer is pointing to..
2244  */
2245 void
2246 ddi_prop_restore_pos(prop_handle_t *ph)
2247 {
2248         ph->ph_cur_pos = ph->ph_save_pos;
2249 }
2250 
2251 /*
2252  * Property encode/decode functions
2253  */
2254 
2255 /*
2256  * Decode a single integer property
2257  */
2258 static int
2259 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2260 {
2261         int     i;
2262         int     tmp;
2263 
2264         /*
2265          * If there is nothing to decode return an error
2266          */
2267         if (ph->ph_size == 0)
2268                 return (DDI_PROP_END_OF_DATA);
2269 
2270         /*
2271          * Decode the property as a single integer and return it
2272          * in data if we were able to decode it.
2273          */
2274         i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2275         if (i < DDI_PROP_RESULT_OK) {
2276                 switch (i) {
2277                 case DDI_PROP_RESULT_EOF:
2278                         return (DDI_PROP_END_OF_DATA);
2279 
2280                 case DDI_PROP_RESULT_ERROR:
2281                         return (DDI_PROP_CANNOT_DECODE);
2282                 }
2283         }
2284 
2285         *(int *)data = tmp;
2286         *nelements = 1;
2287         return (DDI_PROP_SUCCESS);
2288 }
2289 
2290 /*
2291  * Decode a single 64 bit integer property
2292  */
2293 static int
2294 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2295 {
2296         int     i;
2297         int64_t tmp;
2298 
2299         /*
2300          * If there is nothing to decode return an error
2301          */
2302         if (ph->ph_size == 0)
2303                 return (DDI_PROP_END_OF_DATA);
2304 
2305         /*
2306          * Decode the property as a single integer and return it
2307          * in data if we were able to decode it.
2308          */
2309         i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2310         if (i < DDI_PROP_RESULT_OK) {
2311                 switch (i) {
2312                 case DDI_PROP_RESULT_EOF:
2313                         return (DDI_PROP_END_OF_DATA);
2314 
2315                 case DDI_PROP_RESULT_ERROR:
2316                         return (DDI_PROP_CANNOT_DECODE);
2317                 }
2318         }
2319 
2320         *(int64_t *)data = tmp;
2321         *nelements = 1;
2322         return (DDI_PROP_SUCCESS);
2323 }
2324 
2325 /*
2326  * Decode an array of integers property
2327  */
2328 static int
2329 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2330 {
2331         int     i;
2332         int     cnt = 0;
2333         int     *tmp;
2334         int     *intp;
2335         int     n;
2336 
2337         /*
2338          * Figure out how many array elements there are by going through the
2339          * data without decoding it first and counting.
2340          */
2341         for (;;) {
2342                 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2343                 if (i < 0)
2344                         break;
2345                 cnt++;
2346         }
2347 
2348         /*
2349          * If there are no elements return an error
2350          */
2351         if (cnt == 0)
2352                 return (DDI_PROP_END_OF_DATA);
2353 
2354         /*
2355          * If we cannot skip through the data, we cannot decode it
2356          */
2357         if (i == DDI_PROP_RESULT_ERROR)
2358                 return (DDI_PROP_CANNOT_DECODE);
2359 
2360         /*
2361          * Reset the data pointer to the beginning of the encoded data
2362          */
2363         ddi_prop_reset_pos(ph);
2364 
2365         /*
2366          * Allocated memory to store the decoded value in.
2367          */
2368         intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2369             ddi_prop_free_ints);
2370 
2371         /*
2372          * Decode each element and place it in the space we just allocated
2373          */
2374         tmp = intp;
2375         for (n = 0; n < cnt; n++, tmp++) {
2376                 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2377                 if (i < DDI_PROP_RESULT_OK) {
2378                         /*
2379                          * Free the space we just allocated
2380                          * and return an error.
2381                          */
2382                         ddi_prop_free(intp);
2383                         switch (i) {
2384                         case DDI_PROP_RESULT_EOF:
2385                                 return (DDI_PROP_END_OF_DATA);
2386 
2387                         case DDI_PROP_RESULT_ERROR:
2388                                 return (DDI_PROP_CANNOT_DECODE);
2389                         }
2390                 }
2391         }
2392 
2393         *nelements = cnt;
2394         *(int **)data = intp;
2395 
2396         return (DDI_PROP_SUCCESS);
2397 }
2398 
2399 /*
2400  * Decode a 64 bit integer array property
2401  */
2402 static int
2403 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2404 {
2405         int     i;
2406         int     n;
2407         int     cnt = 0;
2408         int64_t *tmp;
2409         int64_t *intp;
2410 
2411         /*
2412          * Count the number of array elements by going
2413          * through the data without decoding it.
2414          */
2415         for (;;) {
2416                 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2417                 if (i < 0)
2418                         break;
2419                 cnt++;
2420         }
2421 
2422         /*
2423          * If there are no elements return an error
2424          */
2425         if (cnt == 0)
2426                 return (DDI_PROP_END_OF_DATA);
2427 
2428         /*
2429          * If we cannot skip through the data, we cannot decode it
2430          */
2431         if (i == DDI_PROP_RESULT_ERROR)
2432                 return (DDI_PROP_CANNOT_DECODE);
2433 
2434         /*
2435          * Reset the data pointer to the beginning of the encoded data
2436          */
2437         ddi_prop_reset_pos(ph);
2438 
2439         /*
2440          * Allocate memory to store the decoded value.
2441          */
2442         intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2443             ddi_prop_free_ints);
2444 
2445         /*
2446          * Decode each element and place it in the space allocated
2447          */
2448         tmp = intp;
2449         for (n = 0; n < cnt; n++, tmp++) {
2450                 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2451                 if (i < DDI_PROP_RESULT_OK) {
2452                         /*
2453                          * Free the space we just allocated
2454                          * and return an error.
2455                          */
2456                         ddi_prop_free(intp);
2457                         switch (i) {
2458                         case DDI_PROP_RESULT_EOF:
2459                                 return (DDI_PROP_END_OF_DATA);
2460 
2461                         case DDI_PROP_RESULT_ERROR:
2462                                 return (DDI_PROP_CANNOT_DECODE);
2463                         }
2464                 }
2465         }
2466 
2467         *nelements = cnt;
2468         *(int64_t **)data = intp;
2469 
2470         return (DDI_PROP_SUCCESS);
2471 }
2472 
2473 /*
2474  * Encode an array of integers property (Can be one element)
2475  */
2476 int
2477 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2478 {
2479         int     i;
2480         int     *tmp;
2481         int     cnt;
2482         int     size;
2483 
2484         /*
2485          * If there is no data, we cannot do anything
2486          */
2487         if (nelements == 0)
2488                 return (DDI_PROP_CANNOT_ENCODE);
2489 
2490         /*
2491          * Get the size of an encoded int.
2492          */
2493         size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2494 
2495         if (size < DDI_PROP_RESULT_OK) {
2496                 switch (size) {
2497                 case DDI_PROP_RESULT_EOF:
2498                         return (DDI_PROP_END_OF_DATA);
2499 
2500                 case DDI_PROP_RESULT_ERROR:
2501                         return (DDI_PROP_CANNOT_ENCODE);
2502                 }
2503         }
2504 
2505         /*
2506          * Allocate space in the handle to store the encoded int.
2507          */
2508         if (ddi_prop_encode_alloc(ph, size * nelements) !=
2509             DDI_PROP_SUCCESS)
2510                 return (DDI_PROP_NO_MEMORY);
2511 
2512         /*
2513          * Encode the array of ints.
2514          */
2515         tmp = (int *)data;
2516         for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2517                 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2518                 if (i < DDI_PROP_RESULT_OK) {
2519                         switch (i) {
2520                         case DDI_PROP_RESULT_EOF:
2521                                 return (DDI_PROP_END_OF_DATA);
2522 
2523                         case DDI_PROP_RESULT_ERROR:
2524                                 return (DDI_PROP_CANNOT_ENCODE);
2525                         }
2526                 }
2527         }
2528 
2529         return (DDI_PROP_SUCCESS);
2530 }
2531 
2532 
2533 /*
2534  * Encode a 64 bit integer array property
2535  */
2536 int
2537 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2538 {
2539         int i;
2540         int cnt;
2541         int size;
2542         int64_t *tmp;
2543 
2544         /*
2545          * If there is no data, we cannot do anything
2546          */
2547         if (nelements == 0)
2548                 return (DDI_PROP_CANNOT_ENCODE);
2549 
2550         /*
2551          * Get the size of an encoded 64 bit int.
2552          */
2553         size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2554 
2555         if (size < DDI_PROP_RESULT_OK) {
2556                 switch (size) {
2557                 case DDI_PROP_RESULT_EOF:
2558                         return (DDI_PROP_END_OF_DATA);
2559 
2560                 case DDI_PROP_RESULT_ERROR:
2561                         return (DDI_PROP_CANNOT_ENCODE);
2562                 }
2563         }
2564 
2565         /*
2566          * Allocate space in the handle to store the encoded int.
2567          */
2568         if (ddi_prop_encode_alloc(ph, size * nelements) !=
2569             DDI_PROP_SUCCESS)
2570                 return (DDI_PROP_NO_MEMORY);
2571 
2572         /*
2573          * Encode the array of ints.
2574          */
2575         tmp = (int64_t *)data;
2576         for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2577                 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2578                 if (i < DDI_PROP_RESULT_OK) {
2579                         switch (i) {
2580                         case DDI_PROP_RESULT_EOF:
2581                                 return (DDI_PROP_END_OF_DATA);
2582 
2583                         case DDI_PROP_RESULT_ERROR:
2584                                 return (DDI_PROP_CANNOT_ENCODE);
2585                         }
2586                 }
2587         }
2588 
2589         return (DDI_PROP_SUCCESS);
2590 }
2591 
2592 /*
2593  * Decode a single string property
2594  */
2595 static int
2596 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2597 {
2598         char            *tmp;
2599         char            *str;
2600         int             i;
2601         int             size;
2602 
2603         /*
2604          * If there is nothing to decode return an error
2605          */
2606         if (ph->ph_size == 0)
2607                 return (DDI_PROP_END_OF_DATA);
2608 
2609         /*
2610          * Get the decoded size of the encoded string.
2611          */
2612         size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2613         if (size < DDI_PROP_RESULT_OK) {
2614                 switch (size) {
2615                 case DDI_PROP_RESULT_EOF:
2616                         return (DDI_PROP_END_OF_DATA);
2617 
2618                 case DDI_PROP_RESULT_ERROR:
2619                         return (DDI_PROP_CANNOT_DECODE);
2620                 }
2621         }
2622 
2623         /*
2624          * Allocated memory to store the decoded value in.
2625          */
2626         str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2627 
2628         ddi_prop_reset_pos(ph);
2629 
2630         /*
2631          * Decode the str and place it in the space we just allocated
2632          */
2633         tmp = str;
2634         i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2635         if (i < DDI_PROP_RESULT_OK) {
2636                 /*
2637                  * Free the space we just allocated
2638                  * and return an error.
2639                  */
2640                 ddi_prop_free(str);
2641                 switch (i) {
2642                 case DDI_PROP_RESULT_EOF:
2643                         return (DDI_PROP_END_OF_DATA);
2644 
2645                 case DDI_PROP_RESULT_ERROR:
2646                         return (DDI_PROP_CANNOT_DECODE);
2647                 }
2648         }
2649 
2650         *(char **)data = str;
2651         *nelements = 1;
2652 
2653         return (DDI_PROP_SUCCESS);
2654 }
2655 
2656 /*
2657  * Decode an array of strings.
2658  */
2659 int
2660 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2661 {
2662         int             cnt = 0;
2663         char            **strs;
2664         char            **tmp;
2665         char            *ptr;
2666         int             i;
2667         int             n;
2668         int             size;
2669         size_t          nbytes;
2670 
2671         /*
2672          * Figure out how many array elements there are by going through the
2673          * data without decoding it first and counting.
2674          */
2675         for (;;) {
2676                 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2677                 if (i < 0)
2678                         break;
2679                 cnt++;
2680         }
2681 
2682         /*
2683          * If there are no elements return an error
2684          */
2685         if (cnt == 0)
2686                 return (DDI_PROP_END_OF_DATA);
2687 
2688         /*
2689          * If we cannot skip through the data, we cannot decode it
2690          */
2691         if (i == DDI_PROP_RESULT_ERROR)
2692                 return (DDI_PROP_CANNOT_DECODE);
2693 
2694         /*
2695          * Reset the data pointer to the beginning of the encoded data
2696          */
2697         ddi_prop_reset_pos(ph);
2698 
2699         /*
2700          * Figure out how much memory we need for the sum total
2701          */
2702         nbytes = (cnt + 1) * sizeof (char *);
2703 
2704         for (n = 0; n < cnt; n++) {
2705                 /*
2706                  * Get the decoded size of the current encoded string.
2707                  */
2708                 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2709                 if (size < DDI_PROP_RESULT_OK) {
2710                         switch (size) {
2711                         case DDI_PROP_RESULT_EOF:
2712                                 return (DDI_PROP_END_OF_DATA);
2713 
2714                         case DDI_PROP_RESULT_ERROR:
2715                                 return (DDI_PROP_CANNOT_DECODE);
2716                         }
2717                 }
2718 
2719                 nbytes += size;
2720         }
2721 
2722         /*
2723          * Allocate memory in which to store the decoded strings.
2724          */
2725         strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2726 
2727         /*
2728          * Set up pointers for each string by figuring out yet
2729          * again how long each string is.
2730          */
2731         ddi_prop_reset_pos(ph);
2732         ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2733         for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2734                 /*
2735                  * Get the decoded size of the current encoded string.
2736                  */
2737                 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2738                 if (size < DDI_PROP_RESULT_OK) {
2739                         ddi_prop_free(strs);
2740                         switch (size) {
2741                         case DDI_PROP_RESULT_EOF:
2742                                 return (DDI_PROP_END_OF_DATA);
2743 
2744                         case DDI_PROP_RESULT_ERROR:
2745                                 return (DDI_PROP_CANNOT_DECODE);
2746                         }
2747                 }
2748 
2749                 *tmp = ptr;
2750                 ptr += size;
2751         }
2752 
2753         /*
2754          * String array is terminated by a NULL
2755          */
2756         *tmp = NULL;
2757 
2758         /*
2759          * Finally, we can decode each string
2760          */
2761         ddi_prop_reset_pos(ph);
2762         for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2763                 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2764                 if (i < DDI_PROP_RESULT_OK) {
2765                         /*
2766                          * Free the space we just allocated
2767                          * and return an error
2768                          */
2769                         ddi_prop_free(strs);
2770                         switch (i) {
2771                         case DDI_PROP_RESULT_EOF:
2772                                 return (DDI_PROP_END_OF_DATA);
2773 
2774                         case DDI_PROP_RESULT_ERROR:
2775                                 return (DDI_PROP_CANNOT_DECODE);
2776                         }
2777                 }
2778         }
2779 
2780         *(char ***)data = strs;
2781         *nelements = cnt;
2782 
2783         return (DDI_PROP_SUCCESS);
2784 }
2785 
2786 /*
2787  * Encode a string.
2788  */
2789 int
2790 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2791 {
2792         char            **tmp;
2793         int             size;
2794         int             i;
2795 
2796         /*
2797          * If there is no data, we cannot do anything
2798          */
2799         if (nelements == 0)
2800                 return (DDI_PROP_CANNOT_ENCODE);
2801 
2802         /*
2803          * Get the size of the encoded string.
2804          */
2805         tmp = (char **)data;
2806         size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2807         if (size < DDI_PROP_RESULT_OK) {
2808                 switch (size) {
2809                 case DDI_PROP_RESULT_EOF:
2810                         return (DDI_PROP_END_OF_DATA);
2811 
2812                 case DDI_PROP_RESULT_ERROR:
2813                         return (DDI_PROP_CANNOT_ENCODE);
2814                 }
2815         }
2816 
2817         /*
2818          * Allocate space in the handle to store the encoded string.
2819          */
2820         if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2821                 return (DDI_PROP_NO_MEMORY);
2822 
2823         ddi_prop_reset_pos(ph);
2824 
2825         /*
2826          * Encode the string.
2827          */
2828         tmp = (char **)data;
2829         i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2830         if (i < DDI_PROP_RESULT_OK) {
2831                 switch (i) {
2832                 case DDI_PROP_RESULT_EOF:
2833                         return (DDI_PROP_END_OF_DATA);
2834 
2835                 case DDI_PROP_RESULT_ERROR:
2836                         return (DDI_PROP_CANNOT_ENCODE);
2837                 }
2838         }
2839 
2840         return (DDI_PROP_SUCCESS);
2841 }
2842 
2843 
2844 /*
2845  * Encode an array of strings.
2846  */
2847 int
2848 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2849 {
2850         int             cnt = 0;
2851         char            **tmp;
2852         int             size;
2853         uint_t          total_size;
2854         int             i;
2855 
2856         /*
2857          * If there is no data, we cannot do anything
2858          */
2859         if (nelements == 0)
2860                 return (DDI_PROP_CANNOT_ENCODE);
2861 
2862         /*
2863          * Get the total size required to encode all the strings.
2864          */
2865         total_size = 0;
2866         tmp = (char **)data;
2867         for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2868                 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2869                 if (size < DDI_PROP_RESULT_OK) {
2870                         switch (size) {
2871                         case DDI_PROP_RESULT_EOF:
2872                                 return (DDI_PROP_END_OF_DATA);
2873 
2874                         case DDI_PROP_RESULT_ERROR:
2875                                 return (DDI_PROP_CANNOT_ENCODE);
2876                         }
2877                 }
2878                 total_size += (uint_t)size;
2879         }
2880 
2881         /*
2882          * Allocate space in the handle to store the encoded strings.
2883          */
2884         if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2885                 return (DDI_PROP_NO_MEMORY);
2886 
2887         ddi_prop_reset_pos(ph);
2888 
2889         /*
2890          * Encode the array of strings.
2891          */
2892         tmp = (char **)data;
2893         for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2894                 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2895                 if (i < DDI_PROP_RESULT_OK) {
2896                         switch (i) {
2897                         case DDI_PROP_RESULT_EOF:
2898                                 return (DDI_PROP_END_OF_DATA);
2899 
2900                         case DDI_PROP_RESULT_ERROR:
2901                                 return (DDI_PROP_CANNOT_ENCODE);
2902                         }
2903                 }
2904         }
2905 
2906         return (DDI_PROP_SUCCESS);
2907 }
2908 
2909 
2910 /*
2911  * Decode an array of bytes.
2912  */
2913 static int
2914 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2915 {
2916         uchar_t         *tmp;
2917         int             nbytes;
2918         int             i;
2919 
2920         /*
2921          * If there are no elements return an error
2922          */
2923         if (ph->ph_size == 0)
2924                 return (DDI_PROP_END_OF_DATA);
2925 
2926         /*
2927          * Get the size of the encoded array of bytes.
2928          */
2929         nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2930             data, ph->ph_size);
2931         if (nbytes < DDI_PROP_RESULT_OK) {
2932                 switch (nbytes) {
2933                 case DDI_PROP_RESULT_EOF:
2934                         return (DDI_PROP_END_OF_DATA);
2935 
2936                 case DDI_PROP_RESULT_ERROR:
2937                         return (DDI_PROP_CANNOT_DECODE);
2938                 }
2939         }
2940 
2941         /*
2942          * Allocated memory to store the decoded value in.
2943          */
2944         tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2945 
2946         /*
2947          * Decode each element and place it in the space we just allocated
2948          */
2949         i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2950         if (i < DDI_PROP_RESULT_OK) {
2951                 /*
2952                  * Free the space we just allocated
2953                  * and return an error
2954                  */
2955                 ddi_prop_free(tmp);
2956                 switch (i) {
2957                 case DDI_PROP_RESULT_EOF:
2958                         return (DDI_PROP_END_OF_DATA);
2959 
2960                 case DDI_PROP_RESULT_ERROR:
2961                         return (DDI_PROP_CANNOT_DECODE);
2962                 }
2963         }
2964 
2965         *(uchar_t **)data = tmp;
2966         *nelements = nbytes;
2967 
2968         return (DDI_PROP_SUCCESS);
2969 }
2970 
2971 /*
2972  * Encode an array of bytes.
2973  */
2974 int
2975 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2976 {
2977         int             size;
2978         int             i;
2979 
2980         /*
2981          * If there are no elements, then this is a boolean property,
2982          * so just create a property handle with no data and return.
2983          */
2984         if (nelements == 0) {
2985                 (void) ddi_prop_encode_alloc(ph, 0);
2986                 return (DDI_PROP_SUCCESS);
2987         }
2988 
2989         /*
2990          * Get the size of the encoded array of bytes.
2991          */
2992         size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2993             nelements);
2994         if (size < DDI_PROP_RESULT_OK) {
2995                 switch (size) {
2996                 case DDI_PROP_RESULT_EOF:
2997                         return (DDI_PROP_END_OF_DATA);
2998 
2999                 case DDI_PROP_RESULT_ERROR:
3000                         return (DDI_PROP_CANNOT_DECODE);
3001                 }
3002         }
3003 
3004         /*
3005          * Allocate space in the handle to store the encoded bytes.
3006          */
3007         if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
3008                 return (DDI_PROP_NO_MEMORY);
3009 
3010         /*
3011          * Encode the array of bytes.
3012          */
3013         i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3014             nelements);
3015         if (i < DDI_PROP_RESULT_OK) {
3016                 switch (i) {
3017                 case DDI_PROP_RESULT_EOF:
3018                         return (DDI_PROP_END_OF_DATA);
3019 
3020                 case DDI_PROP_RESULT_ERROR:
3021                         return (DDI_PROP_CANNOT_ENCODE);
3022                 }
3023         }
3024 
3025         return (DDI_PROP_SUCCESS);
3026 }
3027 
3028 /*
3029  * OBP 1275 integer, string and byte operators.
3030  *
3031  * DDI_PROP_CMD_DECODE:
3032  *
3033  *      DDI_PROP_RESULT_ERROR:          cannot decode the data
3034  *      DDI_PROP_RESULT_EOF:            end of data
3035  *      DDI_PROP_OK:                    data was decoded
3036  *
3037  * DDI_PROP_CMD_ENCODE:
3038  *
3039  *      DDI_PROP_RESULT_ERROR:          cannot encode the data
3040  *      DDI_PROP_RESULT_EOF:            end of data
3041  *      DDI_PROP_OK:                    data was encoded
3042  *
3043  * DDI_PROP_CMD_SKIP:
3044  *
3045  *      DDI_PROP_RESULT_ERROR:          cannot skip the data
3046  *      DDI_PROP_RESULT_EOF:            end of data
3047  *      DDI_PROP_OK:                    data was skipped
3048  *
3049  * DDI_PROP_CMD_GET_ESIZE:
3050  *
3051  *      DDI_PROP_RESULT_ERROR:          cannot get encoded size
3052  *      DDI_PROP_RESULT_EOF:            end of data
3053  *      > 0:                         the encoded size
3054  *
3055  * DDI_PROP_CMD_GET_DSIZE:
3056  *
3057  *      DDI_PROP_RESULT_ERROR:          cannot get decoded size
3058  *      DDI_PROP_RESULT_EOF:            end of data
3059  *      > 0:                         the decoded size
3060  */
3061 
3062 /*
3063  * OBP 1275 integer operator
3064  *
3065  * OBP properties are a byte stream of data, so integers may not be
3066  * properly aligned.  Therefore we need to copy them one byte at a time.
3067  */
3068 int
3069 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3070 {
3071         int     i;
3072 
3073         switch (cmd) {
3074         case DDI_PROP_CMD_DECODE:
3075                 /*
3076                  * Check that there is encoded data
3077                  */
3078                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3079                         return (DDI_PROP_RESULT_ERROR);
3080                 if (ph->ph_flags & PH_FROM_PROM) {
3081                         i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3082                         if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3083                             ph->ph_size - i))
3084                                 return (DDI_PROP_RESULT_ERROR);
3085                 } else {
3086                         if (ph->ph_size < sizeof (int) ||
3087                             ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3088                             ph->ph_size - sizeof (int))))
3089                                 return (DDI_PROP_RESULT_ERROR);
3090                 }
3091 
3092                 /*
3093                  * Copy the integer, using the implementation-specific
3094                  * copy function if the property is coming from the PROM.
3095                  */
3096                 if (ph->ph_flags & PH_FROM_PROM) {
3097                         *data = impl_ddi_prop_int_from_prom(
3098                             (uchar_t *)ph->ph_cur_pos,
3099                             (ph->ph_size < PROP_1275_INT_SIZE) ?
3100                             ph->ph_size : PROP_1275_INT_SIZE);
3101                 } else {
3102                         bcopy(ph->ph_cur_pos, data, sizeof (int));
3103                 }
3104 
3105                 /*
3106                  * Move the current location to the start of the next
3107                  * bit of undecoded data.
3108                  */
3109                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3110                     PROP_1275_INT_SIZE;
3111                 return (DDI_PROP_RESULT_OK);
3112 
3113         case DDI_PROP_CMD_ENCODE:
3114                 /*
3115                  * Check that there is room to encoded the data
3116                  */
3117                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3118                     ph->ph_size < PROP_1275_INT_SIZE ||
3119                     ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3120                     ph->ph_size - sizeof (int))))
3121                         return (DDI_PROP_RESULT_ERROR);
3122 
3123                 /*
3124                  * Encode the integer into the byte stream one byte at a
3125                  * time.
3126                  */
3127                 bcopy(data, ph->ph_cur_pos, sizeof (int));
3128 
3129                 /*
3130                  * Move the current location to the start of the next bit of
3131                  * space where we can store encoded data.
3132                  */
3133                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3134                 return (DDI_PROP_RESULT_OK);
3135 
3136         case DDI_PROP_CMD_SKIP:
3137                 /*
3138                  * Check that there is encoded data
3139                  */
3140                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3141                     ph->ph_size < PROP_1275_INT_SIZE)
3142                         return (DDI_PROP_RESULT_ERROR);
3143 
3144 
3145                 if ((caddr_t)ph->ph_cur_pos ==
3146                     (caddr_t)ph->ph_data + ph->ph_size) {
3147                         return (DDI_PROP_RESULT_EOF);
3148                 } else if ((caddr_t)ph->ph_cur_pos >
3149                     (caddr_t)ph->ph_data + ph->ph_size) {
3150                         return (DDI_PROP_RESULT_EOF);
3151                 }
3152 
3153                 /*
3154                  * Move the current location to the start of the next bit of
3155                  * undecoded data.
3156                  */
3157                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3158                 return (DDI_PROP_RESULT_OK);
3159 
3160         case DDI_PROP_CMD_GET_ESIZE:
3161                 /*
3162                  * Return the size of an encoded integer on OBP
3163                  */
3164                 return (PROP_1275_INT_SIZE);
3165 
3166         case DDI_PROP_CMD_GET_DSIZE:
3167                 /*
3168                  * Return the size of a decoded integer on the system.
3169                  */
3170                 return (sizeof (int));
3171 
3172         default:
3173 #ifdef DEBUG
3174                 panic("ddi_prop_1275_int: %x impossible", cmd);
3175                 /*NOTREACHED*/
3176 #else
3177                 return (DDI_PROP_RESULT_ERROR);
3178 #endif  /* DEBUG */
3179         }
3180 }
3181 
3182 /*
3183  * 64 bit integer operator.
3184  *
3185  * This is an extension, defined by Sun, to the 1275 integer
3186  * operator.  This routine handles the encoding/decoding of
3187  * 64 bit integer properties.
3188  */
3189 int
3190 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3191 {
3192 
3193         switch (cmd) {
3194         case DDI_PROP_CMD_DECODE:
3195                 /*
3196                  * Check that there is encoded data
3197                  */
3198                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3199                         return (DDI_PROP_RESULT_ERROR);
3200                 if (ph->ph_flags & PH_FROM_PROM) {
3201                         return (DDI_PROP_RESULT_ERROR);
3202                 } else {
3203                         if (ph->ph_size < sizeof (int64_t) ||
3204                             ((int64_t *)ph->ph_cur_pos >
3205                             ((int64_t *)ph->ph_data +
3206                             ph->ph_size - sizeof (int64_t))))
3207                                 return (DDI_PROP_RESULT_ERROR);
3208                 }
3209                 /*
3210                  * Copy the integer, using the implementation-specific
3211                  * copy function if the property is coming from the PROM.
3212                  */
3213                 if (ph->ph_flags & PH_FROM_PROM) {
3214                         return (DDI_PROP_RESULT_ERROR);
3215                 } else {
3216                         bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3217                 }
3218 
3219                 /*
3220                  * Move the current location to the start of the next
3221                  * bit of undecoded data.
3222                  */
3223                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3224                     sizeof (int64_t);
3225                         return (DDI_PROP_RESULT_OK);
3226 
3227         case DDI_PROP_CMD_ENCODE:
3228                 /*
3229                  * Check that there is room to encoded the data
3230                  */
3231                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3232                     ph->ph_size < sizeof (int64_t) ||
3233                     ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3234                     ph->ph_size - sizeof (int64_t))))
3235                         return (DDI_PROP_RESULT_ERROR);
3236 
3237                 /*
3238                  * Encode the integer into the byte stream one byte at a
3239                  * time.
3240                  */
3241                 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3242 
3243                 /*
3244                  * Move the current location to the start of the next bit of
3245                  * space where we can store encoded data.
3246                  */
3247                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3248                     sizeof (int64_t);
3249                 return (DDI_PROP_RESULT_OK);
3250 
3251         case DDI_PROP_CMD_SKIP:
3252                 /*
3253                  * Check that there is encoded data
3254                  */
3255                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3256                     ph->ph_size < sizeof (int64_t))
3257                         return (DDI_PROP_RESULT_ERROR);
3258 
3259                 if ((caddr_t)ph->ph_cur_pos ==
3260                     (caddr_t)ph->ph_data + ph->ph_size) {
3261                         return (DDI_PROP_RESULT_EOF);
3262                 } else if ((caddr_t)ph->ph_cur_pos >
3263                     (caddr_t)ph->ph_data + ph->ph_size) {
3264                         return (DDI_PROP_RESULT_EOF);
3265                 }
3266 
3267                 /*
3268                  * Move the current location to the start of
3269                  * the next bit of undecoded data.
3270                  */
3271                 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3272                     sizeof (int64_t);
3273                         return (DDI_PROP_RESULT_OK);
3274 
3275         case DDI_PROP_CMD_GET_ESIZE:
3276                 /*
3277                  * Return the size of an encoded integer on OBP
3278                  */
3279                 return (sizeof (int64_t));
3280 
3281         case DDI_PROP_CMD_GET_DSIZE:
3282                 /*
3283                  * Return the size of a decoded integer on the system.
3284                  */
3285                 return (sizeof (int64_t));
3286 
3287         default:
3288 #ifdef DEBUG
3289                 panic("ddi_prop_int64_op: %x impossible", cmd);
3290                 /*NOTREACHED*/
3291 #else
3292                 return (DDI_PROP_RESULT_ERROR);
3293 #endif  /* DEBUG */
3294         }
3295 }
3296 
3297 /*
3298  * OBP 1275 string operator.
3299  *
3300  * OBP strings are NULL terminated.
3301  */
3302 int
3303 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3304 {
3305         int     n;
3306         char    *p;
3307         char    *end;
3308 
3309         switch (cmd) {
3310         case DDI_PROP_CMD_DECODE:
3311                 /*
3312                  * Check that there is encoded data
3313                  */
3314                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3315                         return (DDI_PROP_RESULT_ERROR);
3316                 }
3317 
3318                 /*
3319                  * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3320                  * how to NULL terminate result.
3321                  */
3322                 p = (char *)ph->ph_cur_pos;
3323                 end = (char *)ph->ph_data + ph->ph_size;
3324                 if (p >= end)
3325                         return (DDI_PROP_RESULT_EOF);
3326 
3327                 while (p < end) {
3328                         *data++ = *p;
3329                         if (*p++ == 0) {        /* NULL from OBP */
3330                                 ph->ph_cur_pos = p;
3331                                 return (DDI_PROP_RESULT_OK);
3332                         }
3333                 }
3334 
3335                 /*
3336                  * If OBP did not NULL terminate string, which happens
3337                  * (at least) for 'true'/'false' boolean values, account for
3338                  * the space and store null termination on decode.
3339                  */
3340                 ph->ph_cur_pos = p;
3341                 *data = 0;
3342                 return (DDI_PROP_RESULT_OK);
3343 
3344         case DDI_PROP_CMD_ENCODE:
3345                 /*
3346                  * Check that there is room to encoded the data
3347                  */
3348                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3349                         return (DDI_PROP_RESULT_ERROR);
3350                 }
3351 
3352                 n = strlen(data) + 1;
3353                 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3354                     ph->ph_size - n)) {
3355                         return (DDI_PROP_RESULT_ERROR);
3356                 }
3357 
3358                 /*
3359                  * Copy the NULL terminated string
3360                  */
3361                 bcopy(data, ph->ph_cur_pos, n);
3362 
3363                 /*
3364                  * Move the current location to the start of the next bit of
3365                  * space where we can store encoded data.
3366                  */
3367                 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3368                 return (DDI_PROP_RESULT_OK);
3369 
3370         case DDI_PROP_CMD_SKIP:
3371                 /*
3372                  * Check that there is encoded data
3373                  */
3374                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3375                         return (DDI_PROP_RESULT_ERROR);
3376                 }
3377 
3378                 /*
3379                  * Return the string length plus one for the NULL
3380                  * We know the size of the property, we need to
3381                  * ensure that the string is properly formatted,
3382                  * since we may be looking up random OBP data.
3383                  */
3384                 p = (char *)ph->ph_cur_pos;
3385                 end = (char *)ph->ph_data + ph->ph_size;
3386                 if (p >= end)
3387                         return (DDI_PROP_RESULT_EOF);
3388 
3389                 while (p < end) {
3390                         if (*p++ == 0) {        /* NULL from OBP */
3391                                 ph->ph_cur_pos = p;
3392                                 return (DDI_PROP_RESULT_OK);
3393                         }
3394                 }
3395 
3396                 /*
3397                  * Accommodate the fact that OBP does not always NULL
3398                  * terminate strings.
3399                  */
3400                 ph->ph_cur_pos = p;
3401                 return (DDI_PROP_RESULT_OK);
3402 
3403         case DDI_PROP_CMD_GET_ESIZE:
3404                 /*
3405                  * Return the size of the encoded string on OBP.
3406                  */
3407                 return (strlen(data) + 1);
3408 
3409         case DDI_PROP_CMD_GET_DSIZE:
3410                 /*
3411                  * Return the string length plus one for the NULL.
3412                  * We know the size of the property, we need to
3413                  * ensure that the string is properly formatted,
3414                  * since we may be looking up random OBP data.
3415                  */
3416                 p = (char *)ph->ph_cur_pos;
3417                 end = (char *)ph->ph_data + ph->ph_size;
3418                 if (p >= end)
3419                         return (DDI_PROP_RESULT_EOF);
3420 
3421                 for (n = 0; p < end; n++) {
3422                         if (*p++ == 0) {        /* NULL from OBP */
3423                                 ph->ph_cur_pos = p;
3424                                 return (n + 1);
3425                         }
3426                 }
3427 
3428                 /*
3429                  * If OBP did not NULL terminate string, which happens for
3430                  * 'true'/'false' boolean values, account for the space
3431                  * to store null termination here.
3432                  */
3433                 ph->ph_cur_pos = p;
3434                 return (n + 1);
3435 
3436         default:
3437 #ifdef DEBUG
3438                 panic("ddi_prop_1275_string: %x impossible", cmd);
3439                 /*NOTREACHED*/
3440 #else
3441                 return (DDI_PROP_RESULT_ERROR);
3442 #endif  /* DEBUG */
3443         }
3444 }
3445 
3446 /*
3447  * OBP 1275 byte operator
3448  *
3449  * Caller must specify the number of bytes to get.  OBP encodes bytes
3450  * as a byte so there is a 1-to-1 translation.
3451  */
3452 int
3453 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3454         uint_t nelements)
3455 {
3456         switch (cmd) {
3457         case DDI_PROP_CMD_DECODE:
3458                 /*
3459                  * Check that there is encoded data
3460                  */
3461                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3462                     ph->ph_size < nelements ||
3463                     ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3464                     ph->ph_size - nelements)))
3465                         return (DDI_PROP_RESULT_ERROR);
3466 
3467                 /*
3468                  * Copy out the bytes
3469                  */
3470                 bcopy(ph->ph_cur_pos, data, nelements);
3471 
3472                 /*
3473                  * Move the current location
3474                  */
3475                 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3476                 return (DDI_PROP_RESULT_OK);
3477 
3478         case DDI_PROP_CMD_ENCODE:
3479                 /*
3480                  * Check that there is room to encode the data
3481                  */
3482                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3483                     ph->ph_size < nelements ||
3484                     ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3485                     ph->ph_size - nelements)))
3486                         return (DDI_PROP_RESULT_ERROR);
3487 
3488                 /*
3489                  * Copy in the bytes
3490                  */
3491                 bcopy(data, ph->ph_cur_pos, nelements);
3492 
3493                 /*
3494                  * Move the current location to the start of the next bit of
3495                  * space where we can store encoded data.
3496                  */
3497                 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3498                 return (DDI_PROP_RESULT_OK);
3499 
3500         case DDI_PROP_CMD_SKIP:
3501                 /*
3502                  * Check that there is encoded data
3503                  */
3504                 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3505                     ph->ph_size < nelements)
3506                         return (DDI_PROP_RESULT_ERROR);
3507 
3508                 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3509                     ph->ph_size - nelements))
3510                         return (DDI_PROP_RESULT_EOF);
3511 
3512                 /*
3513                  * Move the current location
3514                  */
3515                 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3516                 return (DDI_PROP_RESULT_OK);
3517 
3518         case DDI_PROP_CMD_GET_ESIZE:
3519                 /*
3520                  * The size in bytes of the encoded size is the
3521                  * same as the decoded size provided by the caller.
3522                  */
3523                 return (nelements);
3524 
3525         case DDI_PROP_CMD_GET_DSIZE:
3526                 /*
3527                  * Just return the number of bytes specified by the caller.
3528                  */
3529                 return (nelements);
3530 
3531         default:
3532 #ifdef DEBUG
3533                 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3534                 /*NOTREACHED*/
3535 #else
3536                 return (DDI_PROP_RESULT_ERROR);
3537 #endif  /* DEBUG */
3538         }
3539 }
3540 
3541 /*
3542  * Used for properties that come from the OBP, hardware configuration files,
3543  * or that are created by calls to ddi_prop_update(9F).
3544  */
3545 static struct prop_handle_ops prop_1275_ops = {
3546         ddi_prop_1275_int,
3547         ddi_prop_1275_string,
3548         ddi_prop_1275_bytes,
3549         ddi_prop_int64_op
3550 };
3551 
3552 
3553 /*
3554  * Interface to create/modify a managed property on child's behalf...
3555  * Flags interpreted are:
3556  *      DDI_PROP_CANSLEEP:      Allow memory allocation to sleep.
3557  *      DDI_PROP_SYSTEM_DEF:    Manipulate system list rather than driver list.
3558  *
3559  * Use same dev_t when modifying or undefining a property.
3560  * Search for properties with DDI_DEV_T_ANY to match first named
3561  * property on the list.
3562  *
3563  * Properties are stored LIFO and subsequently will match the first
3564  * `matching' instance.
3565  */
3566 
3567 /*
3568  * ddi_prop_add:        Add a software defined property
3569  */
3570 
3571 /*
3572  * define to get a new ddi_prop_t.
3573  * km_flags are KM_SLEEP or KM_NOSLEEP.
3574  */
3575 
3576 #define DDI_NEW_PROP_T(km_flags)        \
3577         (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3578 
3579 static int
3580 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3581     char *name, caddr_t value, int length)
3582 {
3583         ddi_prop_t      *new_propp, *propp;
3584         ddi_prop_t      **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3585         int             km_flags = KM_NOSLEEP;
3586         int             name_buf_len;
3587 
3588         /*
3589          * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3590          */
3591 
3592         if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3593                 return (DDI_PROP_INVAL_ARG);
3594 
3595         if (flags & DDI_PROP_CANSLEEP)
3596                 km_flags = KM_SLEEP;
3597 
3598         if (flags & DDI_PROP_SYSTEM_DEF)
3599                 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3600         else if (flags & DDI_PROP_HW_DEF)
3601                 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3602 
3603         if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3604                 cmn_err(CE_CONT, prop_no_mem_msg, name);
3605                 return (DDI_PROP_NO_MEMORY);
3606         }
3607 
3608         /*
3609          * If dev is major number 0, then we need to do a ddi_name_to_major
3610          * to get the real major number for the device.  This needs to be
3611          * done because some drivers need to call ddi_prop_create in their
3612          * attach routines but they don't have a dev.  By creating the dev
3613          * ourself if the major number is 0, drivers will not have to know what
3614          * their major number.  They can just create a dev with major number
3615          * 0 and pass it in.  For device 0, we will be doing a little extra
3616          * work by recreating the same dev that we already have, but its the
3617          * price you pay :-).
3618          *
3619          * This fixes bug #1098060.
3620          */
3621         if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3622                 new_propp->prop_dev =
3623                     makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3624                     getminor(dev));
3625         } else
3626                 new_propp->prop_dev = dev;
3627 
3628         /*
3629          * Allocate space for property name and copy it in...
3630          */
3631 
3632         name_buf_len = strlen(name) + 1;
3633         new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3634         if (new_propp->prop_name == 0)       {
3635                 kmem_free(new_propp, sizeof (ddi_prop_t));
3636                 cmn_err(CE_CONT, prop_no_mem_msg, name);
3637                 return (DDI_PROP_NO_MEMORY);
3638         }
3639         bcopy(name, new_propp->prop_name, name_buf_len);
3640 
3641         /*
3642          * Set the property type
3643          */
3644         new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3645 
3646         /*
3647          * Set length and value ONLY if not an explicit property undefine:
3648          * NOTE: value and length are zero for explicit undefines.
3649          */
3650 
3651         if (flags & DDI_PROP_UNDEF_IT) {
3652                 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3653         } else {
3654                 if ((new_propp->prop_len = length) != 0) {
3655                         new_propp->prop_val = kmem_alloc(length, km_flags);
3656                         if (new_propp->prop_val == 0)  {
3657                                 kmem_free(new_propp->prop_name, name_buf_len);
3658                                 kmem_free(new_propp, sizeof (ddi_prop_t));
3659                                 cmn_err(CE_CONT, prop_no_mem_msg, name);
3660                                 return (DDI_PROP_NO_MEMORY);
3661                         }
3662                         bcopy(value, new_propp->prop_val, length);
3663                 }
3664         }
3665 
3666         /*
3667          * Link property into beginning of list. (Properties are LIFO order.)
3668          */
3669 
3670         mutex_enter(&(DEVI(dip)->devi_lock));
3671         propp = *list_head;
3672         new_propp->prop_next = propp;
3673         *list_head = new_propp;
3674         mutex_exit(&(DEVI(dip)->devi_lock));
3675         return (DDI_PROP_SUCCESS);
3676 }
3677 
3678 
3679 /*
3680  * ddi_prop_change:     Modify a software managed property value
3681  *
3682  *                      Set new length and value if found.
3683  *                      returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3684  *                      input name is the NULL string.
3685  *                      returns DDI_PROP_NO_MEMORY if unable to allocate memory
3686  *
3687  *                      Note: an undef can be modified to be a define,
3688  *                      (you can't go the other way.)
3689  */
3690 
3691 static int
3692 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3693     char *name, caddr_t value, int length)
3694 {
3695         ddi_prop_t      *propp;
3696         ddi_prop_t      **ppropp;
3697         caddr_t         p = NULL;
3698 
3699         if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3700                 return (DDI_PROP_INVAL_ARG);
3701 
3702         /*
3703          * Preallocate buffer, even if we don't need it...
3704          */
3705         if (length != 0)  {
3706                 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3707                     KM_SLEEP : KM_NOSLEEP);
3708                 if (p == NULL)  {
3709                         cmn_err(CE_CONT, prop_no_mem_msg, name);
3710                         return (DDI_PROP_NO_MEMORY);
3711                 }
3712         }
3713 
3714         /*
3715          * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3716          * number, a real dev_t value should be created based upon the dip's
3717          * binding driver.  See ddi_prop_add...
3718          */
3719         if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3720                 dev = makedevice(
3721                     ddi_name_to_major(DEVI(dip)->devi_binding_name),
3722                     getminor(dev));
3723 
3724         /*
3725          * Check to see if the property exists.  If so we modify it.
3726          * Else we create it by calling ddi_prop_add().
3727          */
3728         mutex_enter(&(DEVI(dip)->devi_lock));
3729         ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3730         if (flags & DDI_PROP_SYSTEM_DEF)
3731                 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3732         else if (flags & DDI_PROP_HW_DEF)
3733                 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3734 
3735         if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3736                 /*
3737                  * Need to reallocate buffer?  If so, do it
3738                  * carefully (reuse same space if new prop
3739                  * is same size and non-NULL sized).
3740                  */
3741                 if (length != 0)
3742                         bcopy(value, p, length);
3743 
3744                 if (propp->prop_len != 0)
3745                         kmem_free(propp->prop_val, propp->prop_len);
3746 
3747                 propp->prop_len = length;
3748                 propp->prop_val = p;
3749                 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3750                 mutex_exit(&(DEVI(dip)->devi_lock));
3751                 return (DDI_PROP_SUCCESS);
3752         }
3753 
3754         mutex_exit(&(DEVI(dip)->devi_lock));
3755         if (length != 0)
3756                 kmem_free(p, length);
3757 
3758         return (ddi_prop_add(dev, dip, flags, name, value, length));
3759 }
3760 
3761 /*
3762  * Common update routine used to update and encode a property.  Creates
3763  * a property handle, calls the property encode routine, figures out if
3764  * the property already exists and updates if it does.  Otherwise it
3765  * creates if it does not exist.
3766  */
3767 int
3768 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3769     char *name, void *data, uint_t nelements,
3770     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3771 {
3772         prop_handle_t   ph;
3773         int             rval;
3774         uint_t          ourflags;
3775 
3776         /*
3777          * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3778          * return error.
3779          */
3780         if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3781                 return (DDI_PROP_INVAL_ARG);
3782 
3783         /*
3784          * Create the handle
3785          */
3786         ph.ph_data = NULL;
3787         ph.ph_cur_pos = NULL;
3788         ph.ph_save_pos = NULL;
3789         ph.ph_size = 0;
3790         ph.ph_ops = &prop_1275_ops;
3791 
3792         /*
3793          * ourflags:
3794          * For compatibility with the old interfaces.  The old interfaces
3795          * didn't sleep by default and slept when the flag was set.  These
3796          * interfaces to the opposite.  So the old interfaces now set the
3797          * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3798          *
3799          * ph.ph_flags:
3800          * Blocked data or unblocked data allocation
3801          * for ph.ph_data in ddi_prop_encode_alloc()
3802          */
3803         if (flags & DDI_PROP_DONTSLEEP) {
3804                 ourflags = flags;
3805                 ph.ph_flags = DDI_PROP_DONTSLEEP;
3806         } else {
3807                 ourflags = flags | DDI_PROP_CANSLEEP;
3808                 ph.ph_flags = DDI_PROP_CANSLEEP;
3809         }
3810 
3811         /*
3812          * Encode the data and store it in the property handle by
3813          * calling the prop_encode routine.
3814          */
3815         if ((rval = (*prop_create)(&ph, data, nelements)) !=
3816             DDI_PROP_SUCCESS) {
3817                 if (rval == DDI_PROP_NO_MEMORY)
3818                         cmn_err(CE_CONT, prop_no_mem_msg, name);
3819                 if (ph.ph_size != 0)
3820                         kmem_free(ph.ph_data, ph.ph_size);
3821                 return (rval);
3822         }
3823 
3824         /*
3825          * The old interfaces use a stacking approach to creating
3826          * properties.  If we are being called from the old interfaces,
3827          * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3828          * create without checking.
3829          */
3830         if (flags & DDI_PROP_STACK_CREATE) {
3831                 rval = ddi_prop_add(match_dev, dip,
3832                     ourflags, name, ph.ph_data, ph.ph_size);
3833         } else {
3834                 rval = ddi_prop_change(match_dev, dip,
3835                     ourflags, name, ph.ph_data, ph.ph_size);
3836         }
3837 
3838         /*
3839          * Free the encoded data allocated in the prop_encode routine.
3840          */
3841         if (ph.ph_size != 0)
3842                 kmem_free(ph.ph_data, ph.ph_size);
3843 
3844         return (rval);
3845 }
3846 
3847 
3848 /*
3849  * ddi_prop_create:     Define a managed property:
3850  *                      See above for details.
3851  */
3852 
3853 int
3854 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3855     char *name, caddr_t value, int length)
3856 {
3857         if (!(flag & DDI_PROP_CANSLEEP)) {
3858                 flag |= DDI_PROP_DONTSLEEP;
3859 #ifdef DDI_PROP_DEBUG
3860                 if (length != 0)
3861                         cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3862                             "use ddi_prop_update (prop = %s, node = %s%d)",
3863                             name, ddi_driver_name(dip), ddi_get_instance(dip));
3864 #endif /* DDI_PROP_DEBUG */
3865         }
3866         flag &= ~DDI_PROP_SYSTEM_DEF;
3867         flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3868         return (ddi_prop_update_common(dev, dip, flag, name,
3869             value, length, ddi_prop_fm_encode_bytes));
3870 }
3871 
3872 int
3873 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3874     char *name, caddr_t value, int length)
3875 {
3876         if (!(flag & DDI_PROP_CANSLEEP))
3877                 flag |= DDI_PROP_DONTSLEEP;
3878         flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3879         return (ddi_prop_update_common(dev, dip, flag,
3880             name, value, length, ddi_prop_fm_encode_bytes));
3881 }
3882 
3883 int
3884 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3885     char *name, caddr_t value, int length)
3886 {
3887         ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3888 
3889         /*
3890          * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3891          * return error.
3892          */
3893         if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3894                 return (DDI_PROP_INVAL_ARG);
3895 
3896         if (!(flag & DDI_PROP_CANSLEEP))
3897                 flag |= DDI_PROP_DONTSLEEP;
3898         flag &= ~DDI_PROP_SYSTEM_DEF;
3899         if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3900                 return (DDI_PROP_NOT_FOUND);
3901 
3902         return (ddi_prop_update_common(dev, dip,
3903             (flag | DDI_PROP_TYPE_BYTE), name,
3904             value, length, ddi_prop_fm_encode_bytes));
3905 }
3906 
3907 int
3908 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3909     char *name, caddr_t value, int length)
3910 {
3911         ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3912 
3913         /*
3914          * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3915          * return error.
3916          */
3917         if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3918                 return (DDI_PROP_INVAL_ARG);
3919 
3920         if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3921                 return (DDI_PROP_NOT_FOUND);
3922 
3923         if (!(flag & DDI_PROP_CANSLEEP))
3924                 flag |= DDI_PROP_DONTSLEEP;
3925         return (ddi_prop_update_common(dev, dip,
3926             (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3927             name, value, length, ddi_prop_fm_encode_bytes));
3928 }
3929 
3930 
3931 /*
3932  * Common lookup routine used to lookup and decode a property.
3933  * Creates a property handle, searches for the raw encoded data,
3934  * fills in the handle, and calls the property decode functions
3935  * passed in.
3936  *
3937  * This routine is not static because ddi_bus_prop_op() which lives in
3938  * ddi_impl.c calls it.  No driver should be calling this routine.
3939  */
3940 int
3941 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3942     uint_t flags, char *name, void *data, uint_t *nelements,
3943     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3944 {
3945         int             rval;
3946         uint_t          ourflags;
3947         prop_handle_t   ph;
3948 
3949         if ((match_dev == DDI_DEV_T_NONE) ||
3950             (name == NULL) || (strlen(name) == 0))
3951                 return (DDI_PROP_INVAL_ARG);
3952 
3953         ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3954             flags | DDI_PROP_CANSLEEP;
3955 
3956         /*
3957          * Get the encoded data
3958          */
3959         bzero(&ph, sizeof (prop_handle_t));
3960 
3961         if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3962                 /*
3963                  * For rootnex and unbound dlpi style-2 devices, index into
3964                  * the devnames' array and search the global
3965                  * property list.
3966                  */
3967                 ourflags &= ~DDI_UNBND_DLPI2;
3968                 rval = i_ddi_prop_search_global(match_dev,
3969                     ourflags, name, &ph.ph_data, &ph.ph_size);
3970         } else {
3971                 rval = ddi_prop_search_common(match_dev, dip,
3972                     PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3973                     &ph.ph_data, &ph.ph_size);
3974 
3975         }
3976 
3977         if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3978                 ASSERT(ph.ph_data == NULL);
3979                 ASSERT(ph.ph_size == 0);
3980                 return (rval);
3981         }
3982 
3983         /*
3984          * If the encoded data came from a OBP or software
3985          * use the 1275 OBP decode/encode routines.
3986          */
3987         ph.ph_cur_pos = ph.ph_data;
3988         ph.ph_save_pos = ph.ph_data;
3989         ph.ph_ops = &prop_1275_ops;
3990         ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3991 
3992         rval = (*prop_decoder)(&ph, data, nelements);
3993 
3994         /*
3995          * Free the encoded data
3996          */
3997         if (ph.ph_size != 0)
3998                 kmem_free(ph.ph_data, ph.ph_size);
3999 
4000         return (rval);
4001 }
4002 
4003 /*
4004  * Lookup and return an array of composite properties.  The driver must
4005  * provide the decode routine.
4006  */
4007 int
4008 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4009     uint_t flags, char *name, void *data, uint_t *nelements,
4010     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4011 {
4012         return (ddi_prop_lookup_common(match_dev, dip,
4013             (flags | DDI_PROP_TYPE_COMPOSITE), name,
4014             data, nelements, prop_decoder));
4015 }
4016 
4017 /*
4018  * Return 1 if a property exists (no type checking done).
4019  * Return 0 if it does not exist.
4020  */
4021 int
4022 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4023 {
4024         int     i;
4025         uint_t  x = 0;
4026 
4027         i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4028             flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4029         return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4030 }
4031 
4032 
4033 /*
4034  * Update an array of composite properties.  The driver must
4035  * provide the encode routine.
4036  */
4037 int
4038 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4039     char *name, void *data, uint_t nelements,
4040     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4041 {
4042         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4043             name, data, nelements, prop_create));
4044 }
4045 
4046 /*
4047  * Get a single integer or boolean property and return it.
4048  * If the property does not exists, or cannot be decoded,
4049  * then return the defvalue passed in.
4050  *
4051  * This routine always succeeds.
4052  */
4053 int
4054 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4055     char *name, int defvalue)
4056 {
4057         int     data;
4058         uint_t  nelements;
4059         int     rval;
4060 
4061         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4062             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4063 #ifdef DEBUG
4064                 if (dip != NULL) {
4065                         cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4066                             " 0x%x (prop = %s, node = %s%d)", flags,
4067                             name, ddi_driver_name(dip), ddi_get_instance(dip));
4068                 }
4069 #endif /* DEBUG */
4070                 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4071                     LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4072         }
4073 
4074         if ((rval = ddi_prop_lookup_common(match_dev, dip,
4075             (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4076             ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4077                 if (rval == DDI_PROP_END_OF_DATA)
4078                         data = 1;
4079                 else
4080                         data = defvalue;
4081         }
4082         return (data);
4083 }
4084 
4085 /*
4086  * Get a single 64 bit integer or boolean property and return it.
4087  * If the property does not exists, or cannot be decoded,
4088  * then return the defvalue passed in.
4089  *
4090  * This routine always succeeds.
4091  */
4092 int64_t
4093 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4094     char *name, int64_t defvalue)
4095 {
4096         int64_t data;
4097         uint_t  nelements;
4098         int     rval;
4099 
4100         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4101             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4102 #ifdef DEBUG
4103                 if (dip != NULL) {
4104                         cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4105                             " 0x%x (prop = %s, node = %s%d)", flags,
4106                             name, ddi_driver_name(dip), ddi_get_instance(dip));
4107                 }
4108 #endif /* DEBUG */
4109                 return (DDI_PROP_INVAL_ARG);
4110         }
4111 
4112         if ((rval = ddi_prop_lookup_common(match_dev, dip,
4113             (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4114             name, &data, &nelements, ddi_prop_fm_decode_int64))
4115             != DDI_PROP_SUCCESS) {
4116                 if (rval == DDI_PROP_END_OF_DATA)
4117                         data = 1;
4118                 else
4119                         data = defvalue;
4120         }
4121         return (data);
4122 }
4123 
4124 /*
4125  * Get an array of integer property
4126  */
4127 int
4128 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4129     char *name, int **data, uint_t *nelements)
4130 {
4131         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4132             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4133 #ifdef DEBUG
4134                 if (dip != NULL) {
4135                         cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4136                             "invalid flag 0x%x (prop = %s, node = %s%d)",
4137                             flags, name, ddi_driver_name(dip),
4138                             ddi_get_instance(dip));
4139                 }
4140 #endif /* DEBUG */
4141                 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4142                     LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4143         }
4144 
4145         return (ddi_prop_lookup_common(match_dev, dip,
4146             (flags | DDI_PROP_TYPE_INT), name, data,
4147             nelements, ddi_prop_fm_decode_ints));
4148 }
4149 
4150 /*
4151  * Get an array of 64 bit integer properties
4152  */
4153 int
4154 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4155     char *name, int64_t **data, uint_t *nelements)
4156 {
4157         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4158             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4159 #ifdef DEBUG
4160                 if (dip != NULL) {
4161                         cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4162                             "invalid flag 0x%x (prop = %s, node = %s%d)",
4163                             flags, name, ddi_driver_name(dip),
4164                             ddi_get_instance(dip));
4165                 }
4166 #endif /* DEBUG */
4167                 return (DDI_PROP_INVAL_ARG);
4168         }
4169 
4170         return (ddi_prop_lookup_common(match_dev, dip,
4171             (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4172             name, data, nelements, ddi_prop_fm_decode_int64_array));
4173 }
4174 
4175 /*
4176  * Update a single integer property.  If the property exists on the drivers
4177  * property list it updates, else it creates it.
4178  */
4179 int
4180 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4181     char *name, int data)
4182 {
4183         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4184             name, &data, 1, ddi_prop_fm_encode_ints));
4185 }
4186 
4187 /*
4188  * Update a single 64 bit integer property.
4189  * Update the driver property list if it exists, else create it.
4190  */
4191 int
4192 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4193     char *name, int64_t data)
4194 {
4195         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4196             name, &data, 1, ddi_prop_fm_encode_int64));
4197 }
4198 
4199 int
4200 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4201     char *name, int data)
4202 {
4203         return (ddi_prop_update_common(match_dev, dip,
4204             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4205             name, &data, 1, ddi_prop_fm_encode_ints));
4206 }
4207 
4208 int
4209 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4210     char *name, int64_t data)
4211 {
4212         return (ddi_prop_update_common(match_dev, dip,
4213             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4214             name, &data, 1, ddi_prop_fm_encode_int64));
4215 }
4216 
4217 /*
4218  * Update an array of integer property.  If the property exists on the drivers
4219  * property list it updates, else it creates it.
4220  */
4221 int
4222 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4223     char *name, int *data, uint_t nelements)
4224 {
4225         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4226             name, data, nelements, ddi_prop_fm_encode_ints));
4227 }
4228 
4229 /*
4230  * Update an array of 64 bit integer properties.
4231  * Update the driver property list if it exists, else create it.
4232  */
4233 int
4234 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4235     char *name, int64_t *data, uint_t nelements)
4236 {
4237         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4238             name, data, nelements, ddi_prop_fm_encode_int64));
4239 }
4240 
4241 int
4242 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4243     char *name, int64_t *data, uint_t nelements)
4244 {
4245         return (ddi_prop_update_common(match_dev, dip,
4246             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4247             name, data, nelements, ddi_prop_fm_encode_int64));
4248 }
4249 
4250 int
4251 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4252     char *name, int *data, uint_t nelements)
4253 {
4254         return (ddi_prop_update_common(match_dev, dip,
4255             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4256             name, data, nelements, ddi_prop_fm_encode_ints));
4257 }
4258 
4259 /*
4260  * Get a single string property.
4261  */
4262 int
4263 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4264     char *name, char **data)
4265 {
4266         uint_t x;
4267 
4268         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4269             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4270 #ifdef DEBUG
4271                 if (dip != NULL) {
4272                         cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4273                             "(prop = %s, node = %s%d); invalid bits ignored",
4274                             "ddi_prop_lookup_string", flags, name,
4275                             ddi_driver_name(dip), ddi_get_instance(dip));
4276                 }
4277 #endif /* DEBUG */
4278                 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4279                     LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4280         }
4281 
4282         return (ddi_prop_lookup_common(match_dev, dip,
4283             (flags | DDI_PROP_TYPE_STRING), name, data,
4284             &x, ddi_prop_fm_decode_string));
4285 }
4286 
4287 /*
4288  * Get an array of strings property.
4289  */
4290 int
4291 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4292     char *name, char ***data, uint_t *nelements)
4293 {
4294         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4295             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4296 #ifdef DEBUG
4297                 if (dip != NULL) {
4298                         cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4299                             "invalid flag 0x%x (prop = %s, node = %s%d)",
4300                             flags, name, ddi_driver_name(dip),
4301                             ddi_get_instance(dip));
4302                 }
4303 #endif /* DEBUG */
4304                 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4305                     LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4306         }
4307 
4308         return (ddi_prop_lookup_common(match_dev, dip,
4309             (flags | DDI_PROP_TYPE_STRING), name, data,
4310             nelements, ddi_prop_fm_decode_strings));
4311 }
4312 
4313 /*
4314  * Update a single string property.
4315  */
4316 int
4317 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4318     char *name, char *data)
4319 {
4320         return (ddi_prop_update_common(match_dev, dip,
4321             DDI_PROP_TYPE_STRING, name, &data, 1,
4322             ddi_prop_fm_encode_string));
4323 }
4324 
4325 int
4326 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4327     char *name, char *data)
4328 {
4329         return (ddi_prop_update_common(match_dev, dip,
4330             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4331             name, &data, 1, ddi_prop_fm_encode_string));
4332 }
4333 
4334 
4335 /*
4336  * Update an array of strings property.
4337  */
4338 int
4339 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4340     char *name, char **data, uint_t nelements)
4341 {
4342         return (ddi_prop_update_common(match_dev, dip,
4343             DDI_PROP_TYPE_STRING, name, data, nelements,
4344             ddi_prop_fm_encode_strings));
4345 }
4346 
4347 int
4348 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4349     char *name, char **data, uint_t nelements)
4350 {
4351         return (ddi_prop_update_common(match_dev, dip,
4352             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4353             name, data, nelements,
4354             ddi_prop_fm_encode_strings));
4355 }
4356 
4357 
4358 /*
4359  * Get an array of bytes property.
4360  */
4361 int
4362 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4363     char *name, uchar_t **data, uint_t *nelements)
4364 {
4365         if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4366             LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4367 #ifdef DEBUG
4368                 if (dip != NULL) {
4369                         cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4370                             " invalid flag 0x%x (prop = %s, node = %s%d)",
4371                             flags, name, ddi_driver_name(dip),
4372                             ddi_get_instance(dip));
4373                 }
4374 #endif /* DEBUG */
4375                 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4376                     LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4377         }
4378 
4379         return (ddi_prop_lookup_common(match_dev, dip,
4380             (flags | DDI_PROP_TYPE_BYTE), name, data,
4381             nelements, ddi_prop_fm_decode_bytes));
4382 }
4383 
4384 /*
4385  * Update an array of bytes property.
4386  */
4387 int
4388 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4389     char *name, uchar_t *data, uint_t nelements)
4390 {
4391         if (nelements == 0)
4392                 return (DDI_PROP_INVAL_ARG);
4393 
4394         return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4395             name, data, nelements, ddi_prop_fm_encode_bytes));
4396 }
4397 
4398 
4399 int
4400 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4401     char *name, uchar_t *data, uint_t nelements)
4402 {
4403         if (nelements == 0)
4404                 return (DDI_PROP_INVAL_ARG);
4405 
4406         return (ddi_prop_update_common(match_dev, dip,
4407             DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4408             name, data, nelements, ddi_prop_fm_encode_bytes));
4409 }
4410 
4411 
4412 /*
4413  * ddi_prop_remove_common:      Undefine a managed property:
4414  *                      Input dev_t must match dev_t when defined.
4415  *                      Returns DDI_PROP_NOT_FOUND, possibly.
4416  *                      DDI_PROP_INVAL_ARG is also possible if dev is
4417  *                      DDI_DEV_T_ANY or incoming name is the NULL string.
4418  */
4419 int
4420 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4421 {
4422         ddi_prop_t      **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4423         ddi_prop_t      *propp;
4424         ddi_prop_t      *lastpropp = NULL;
4425 
4426         if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4427             (strlen(name) == 0)) {
4428                 return (DDI_PROP_INVAL_ARG);
4429         }
4430 
4431         if (flag & DDI_PROP_SYSTEM_DEF)
4432                 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4433         else if (flag & DDI_PROP_HW_DEF)
4434                 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4435 
4436         mutex_enter(&(DEVI(dip)->devi_lock));
4437 
4438         for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4439                 if (DDI_STRSAME(propp->prop_name, name) &&
4440                     (dev == propp->prop_dev)) {
4441                         /*
4442                          * Unlink this propp allowing for it to
4443                          * be first in the list:
4444                          */
4445 
4446                         if (lastpropp == NULL)
4447                                 *list_head = propp->prop_next;
4448                         else
4449                                 lastpropp->prop_next = propp->prop_next;
4450 
4451                         mutex_exit(&(DEVI(dip)->devi_lock));
4452 
4453                         /*
4454                          * Free memory and return...
4455                          */
4456                         kmem_free(propp->prop_name,
4457                             strlen(propp->prop_name) + 1);
4458                         if (propp->prop_len != 0)
4459                                 kmem_free(propp->prop_val, propp->prop_len);
4460                         kmem_free(propp, sizeof (ddi_prop_t));
4461                         return (DDI_PROP_SUCCESS);
4462                 }
4463                 lastpropp = propp;
4464         }
4465         mutex_exit(&(DEVI(dip)->devi_lock));
4466         return (DDI_PROP_NOT_FOUND);
4467 }
4468 
4469 int
4470 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4471 {
4472         return (ddi_prop_remove_common(dev, dip, name, 0));
4473 }
4474 
4475 int
4476 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4477 {
4478         return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4479 }
4480 
4481 /*
4482  * e_ddi_prop_list_delete: remove a list of properties
4483  *      Note that the caller needs to provide the required protection
4484  *      (eg. devi_lock if these properties are still attached to a devi)
4485  */
4486 void
4487 e_ddi_prop_list_delete(ddi_prop_t *props)
4488 {
4489         i_ddi_prop_list_delete(props);
4490 }
4491 
4492 /*
4493  * ddi_prop_remove_all_common:
4494  *      Used before unloading a driver to remove
4495  *      all properties. (undefines all dev_t's props.)
4496  *      Also removes `explicitly undefined' props.
4497  *      No errors possible.
4498  */
4499 void
4500 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4501 {
4502         ddi_prop_t      **list_head;
4503 
4504         mutex_enter(&(DEVI(dip)->devi_lock));
4505         if (flag & DDI_PROP_SYSTEM_DEF) {
4506                 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4507         } else if (flag & DDI_PROP_HW_DEF) {
4508                 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4509         } else {
4510                 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4511         }
4512         i_ddi_prop_list_delete(*list_head);
4513         *list_head = NULL;
4514         mutex_exit(&(DEVI(dip)->devi_lock));
4515 }
4516 
4517 
4518 /*
4519  * ddi_prop_remove_all:         Remove all driver prop definitions.
4520  */
4521 
4522 void
4523 ddi_prop_remove_all(dev_info_t *dip)
4524 {
4525         i_ddi_prop_dyn_driver_set(dip, NULL);
4526         ddi_prop_remove_all_common(dip, 0);
4527 }
4528 
4529 /*
4530  * e_ddi_prop_remove_all:       Remove all system prop definitions.
4531  */
4532 
4533 void
4534 e_ddi_prop_remove_all(dev_info_t *dip)
4535 {
4536         ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4537 }
4538 
4539 
4540 /*
4541  * ddi_prop_undefine:   Explicitly undefine a property.  Property
4542  *                      searches which match this property return
4543  *                      the error code DDI_PROP_UNDEFINED.
4544  *
4545  *                      Use ddi_prop_remove to negate effect of
4546  *                      ddi_prop_undefine
4547  *
4548  *                      See above for error returns.
4549  */
4550 
4551 int
4552 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4553 {
4554         if (!(flag & DDI_PROP_CANSLEEP))
4555                 flag |= DDI_PROP_DONTSLEEP;
4556         flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4557         return (ddi_prop_update_common(dev, dip, flag,
4558             name, NULL, 0, ddi_prop_fm_encode_bytes));
4559 }
4560 
4561 int
4562 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4563 {
4564         if (!(flag & DDI_PROP_CANSLEEP))
4565                 flag |= DDI_PROP_DONTSLEEP;
4566         flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4567             DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4568         return (ddi_prop_update_common(dev, dip, flag,
4569             name, NULL, 0, ddi_prop_fm_encode_bytes));
4570 }
4571 
4572 /*
4573  * Support for gathering dynamic properties in devinfo snapshot.
4574  */
4575 void
4576 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4577 {
4578         DEVI(dip)->devi_prop_dyn_driver = dp;
4579 }
4580 
4581 i_ddi_prop_dyn_t *
4582 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4583 {
4584         return (DEVI(dip)->devi_prop_dyn_driver);
4585 }
4586 
4587 void
4588 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4589 {
4590         DEVI(dip)->devi_prop_dyn_parent = dp;
4591 }
4592 
4593 i_ddi_prop_dyn_t *
4594 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4595 {
4596         return (DEVI(dip)->devi_prop_dyn_parent);
4597 }
4598 
4599 void
4600 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4601 {
4602         /* for now we invalidate the entire cached snapshot */
4603         if (dip && dp)
4604                 i_ddi_di_cache_invalidate();
4605 }
4606 
4607 /* ARGSUSED */
4608 void
4609 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4610 {
4611         /* for now we invalidate the entire cached snapshot */
4612         i_ddi_di_cache_invalidate();
4613 }
4614 
4615 
4616 /*
4617  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4618  *
4619  * if input dip != child_dip, then call is on behalf of child
4620  * to search PROM, do it via ddi_prop_search_common() and ascend only
4621  * if allowed.
4622  *
4623  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4624  * to search for PROM defined props only.
4625  *
4626  * Note that the PROM search is done only if the requested dev
4627  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4628  * have no associated dev, thus are automatically associated with
4629  * DDI_DEV_T_NONE.
4630  *
4631  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4632  *
4633  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4634  * that the property resides in the prom.
4635  */
4636 int
4637 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4638     ddi_prop_op_t prop_op, int mod_flags,
4639     char *name, caddr_t valuep, int *lengthp)
4640 {
4641         int     len;
4642         caddr_t buffer;
4643 
4644         /*
4645          * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4646          * look in caller's PROM if it's a self identifying device...
4647          *
4648          * Note that this is very similar to ddi_prop_op, but we
4649          * search the PROM instead of the s/w defined properties,
4650          * and we are called on by the parent driver to do this for
4651          * the child.
4652          */
4653 
4654         if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4655             ndi_dev_is_prom_node(ch_dip) &&
4656             ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4657                 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4658                 if (len == -1) {
4659                         return (DDI_PROP_NOT_FOUND);
4660                 }
4661 
4662                 /*
4663                  * If exists only request, we're done
4664                  */
4665                 if (prop_op == PROP_EXISTS) {
4666                         return (DDI_PROP_FOUND_1275);
4667                 }
4668 
4669                 /*
4670                  * If length only request or prop length == 0, get out
4671                  */
4672                 if ((prop_op == PROP_LEN) || (len == 0)) {
4673                         *lengthp = len;
4674                         return (DDI_PROP_FOUND_1275);
4675                 }
4676 
4677                 /*
4678                  * Allocate buffer if required... (either way `buffer'
4679                  * is receiving address).
4680                  */
4681 
4682                 switch (prop_op) {
4683 
4684                 case PROP_LEN_AND_VAL_ALLOC:
4685 
4686                         buffer = kmem_alloc((size_t)len,
4687                             mod_flags & DDI_PROP_CANSLEEP ?
4688                             KM_SLEEP : KM_NOSLEEP);
4689                         if (buffer == NULL) {
4690                                 return (DDI_PROP_NO_MEMORY);
4691                         }
4692                         *(caddr_t *)valuep = buffer;
4693                         break;
4694 
4695                 case PROP_LEN_AND_VAL_BUF:
4696 
4697                         if (len > (*lengthp)) {
4698                                 *lengthp = len;
4699                                 return (DDI_PROP_BUF_TOO_SMALL);
4700                         }
4701 
4702                         buffer = valuep;
4703                         break;
4704 
4705                 default:
4706                         break;
4707                 }
4708 
4709                 /*
4710                  * Call the PROM function to do the copy.
4711                  */
4712                 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4713                     name, buffer);
4714 
4715                 *lengthp = len; /* return the actual length to the caller */
4716                 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4717                 return (DDI_PROP_FOUND_1275);
4718         }
4719 
4720         return (DDI_PROP_NOT_FOUND);
4721 }
4722 
4723 /*
4724  * The ddi_bus_prop_op default bus nexus prop op function.
4725  *
4726  * Code to search hardware layer (PROM), if it exists,
4727  * on behalf of child, then, if appropriate, ascend and check
4728  * my own software defined properties...
4729  */
4730 int
4731 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4732     ddi_prop_op_t prop_op, int mod_flags,
4733     char *name, caddr_t valuep, int *lengthp)
4734 {
4735         int     error;
4736 
4737         error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4738             name, valuep, lengthp);
4739 
4740         if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4741             error == DDI_PROP_BUF_TOO_SMALL)
4742                 return (error);
4743 
4744         if (error == DDI_PROP_NO_MEMORY) {
4745                 cmn_err(CE_CONT, prop_no_mem_msg, name);
4746                 return (DDI_PROP_NO_MEMORY);
4747         }
4748 
4749         /*
4750          * Check the 'options' node as a last resort
4751          */
4752         if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4753                 return (DDI_PROP_NOT_FOUND);
4754 
4755         if (ch_dip == ddi_root_node())  {
4756                 /*
4757                  * As a last resort, when we've reached
4758                  * the top and still haven't found the
4759                  * property, see if the desired property
4760                  * is attached to the options node.
4761                  *
4762                  * The options dip is attached right after boot.
4763                  */
4764                 ASSERT(options_dip != NULL);
4765                 /*
4766                  * Force the "don't pass" flag to *just* see
4767                  * what the options node has to offer.
4768                  */
4769                 return (ddi_prop_search_common(dev, options_dip, prop_op,
4770                     mod_flags|DDI_PROP_DONTPASS, name, valuep,
4771                     (uint_t *)lengthp));
4772         }
4773 
4774         /*
4775          * Otherwise, continue search with parent's s/w defined properties...
4776          * NOTE: Using `dip' in following call increments the level.
4777          */
4778 
4779         return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4780             name, valuep, (uint_t *)lengthp));
4781 }
4782 
4783 /*
4784  * External property functions used by other parts of the kernel...
4785  */
4786 
4787 /*
4788  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4789  */
4790 
4791 int
4792 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4793     caddr_t valuep, int *lengthp)
4794 {
4795         _NOTE(ARGUNUSED(type))
4796         dev_info_t *devi;
4797         ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4798         int error;
4799 
4800         if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4801                 return (DDI_PROP_NOT_FOUND);
4802 
4803         error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4804         ddi_release_devi(devi);
4805         return (error);
4806 }
4807 
4808 /*
4809  * e_ddi_getlongprop_buf:       See comments for ddi_getlongprop_buf.
4810  */
4811 
4812 int
4813 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4814     caddr_t valuep, int *lengthp)
4815 {
4816         _NOTE(ARGUNUSED(type))
4817         dev_info_t *devi;
4818         ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4819         int error;
4820 
4821         if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4822                 return (DDI_PROP_NOT_FOUND);
4823 
4824         error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4825         ddi_release_devi(devi);
4826         return (error);
4827 }
4828 
4829 /*
4830  * e_ddi_getprop:       See comments for ddi_getprop.
4831  */
4832 int
4833 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4834 {
4835         _NOTE(ARGUNUSED(type))
4836         dev_info_t *devi;
4837         ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4838         int     propvalue = defvalue;
4839         int     proplength = sizeof (int);
4840         int     error;
4841 
4842         if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4843                 return (defvalue);
4844 
4845         error = cdev_prop_op(dev, devi, prop_op,
4846             flags, name, (caddr_t)&propvalue, &proplength);
4847         ddi_release_devi(devi);
4848 
4849         if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4850                 propvalue = 1;
4851 
4852         return (propvalue);
4853 }
4854 
4855 /*
4856  * e_ddi_getprop_int64:
4857  *
4858  * This is a typed interfaces, but predates typed properties. With the
4859  * introduction of typed properties the framework tries to ensure
4860  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4861  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4862  * typed interface invokes legacy (non-typed) interfaces:
4863  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4864  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4865  * this type of lookup as a single operation we invoke the legacy
4866  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4867  * framework ddi_prop_op(9F) implementation is expected to check for
4868  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4869  * (currently TYPE_INT64).
4870  */
4871 int64_t
4872 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4873     int flags, int64_t defvalue)
4874 {
4875         _NOTE(ARGUNUSED(type))
4876         dev_info_t      *devi;
4877         ddi_prop_op_t   prop_op = PROP_LEN_AND_VAL_BUF;
4878         int64_t         propvalue = defvalue;
4879         int             proplength = sizeof (propvalue);
4880         int             error;
4881 
4882         if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4883                 return (defvalue);
4884 
4885         error = cdev_prop_op(dev, devi, prop_op, flags |
4886             DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4887         ddi_release_devi(devi);
4888 
4889         if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4890                 propvalue = 1;
4891 
4892         return (propvalue);
4893 }
4894 
4895 /*
4896  * e_ddi_getproplen:    See comments for ddi_getproplen.
4897  */
4898 int
4899 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4900 {
4901         _NOTE(ARGUNUSED(type))
4902         dev_info_t *devi;
4903         ddi_prop_op_t prop_op = PROP_LEN;
4904         int error;
4905 
4906         if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4907                 return (DDI_PROP_NOT_FOUND);
4908 
4909         error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4910         ddi_release_devi(devi);
4911         return (error);
4912 }
4913 
4914 /*
4915  * Routines to get at elements of the dev_info structure
4916  */
4917 
4918 /*
4919  * ddi_binding_name: Return the driver binding name of the devinfo node
4920  *              This is the name the OS used to bind the node to a driver.
4921  */
4922 char *
4923 ddi_binding_name(dev_info_t *dip)
4924 {
4925         return (DEVI(dip)->devi_binding_name);
4926 }
4927 
4928 /*
4929  * ddi_driver_major: Return the major number of the driver that
4930  *      the supplied devinfo is bound to.  If not yet bound,
4931  *      DDI_MAJOR_T_NONE.
4932  *
4933  * When used by the driver bound to 'devi', this
4934  * function will reliably return the driver major number.
4935  * Other ways of determining the driver major number, such as
4936  *      major = ddi_name_to_major(ddi_get_name(devi));
4937  *      major = ddi_name_to_major(ddi_binding_name(devi));
4938  * can return a different result as the driver/alias binding
4939  * can change dynamically, and thus should be avoided.
4940  */
4941 major_t
4942 ddi_driver_major(dev_info_t *devi)
4943 {
4944         return (DEVI(devi)->devi_major);
4945 }
4946 
4947 /*
4948  * ddi_driver_name: Return the normalized driver name. this is the
4949  *              actual driver name
4950  */
4951 const char *
4952 ddi_driver_name(dev_info_t *devi)
4953 {
4954         major_t major;
4955 
4956         if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4957                 return (ddi_major_to_name(major));
4958 
4959         return (ddi_node_name(devi));
4960 }
4961 
4962 /*
4963  * i_ddi_set_binding_name:      Set binding name.
4964  *
4965  *      Set the binding name to the given name.
4966  *      This routine is for use by the ddi implementation, not by drivers.
4967  */
4968 void
4969 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4970 {
4971         DEVI(dip)->devi_binding_name = name;
4972 
4973 }
4974 
4975 /*
4976  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4977  * the implementation has used to bind the node to a driver.
4978  */
4979 char *
4980 ddi_get_name(dev_info_t *dip)
4981 {
4982         return (DEVI(dip)->devi_binding_name);
4983 }
4984 
4985 /*
4986  * ddi_node_name: Return the name property of the devinfo node
4987  *              This may differ from ddi_binding_name if the node name
4988  *              does not define a binding to a driver (i.e. generic names).
4989  */
4990 char *
4991 ddi_node_name(dev_info_t *dip)
4992 {
4993         return (DEVI(dip)->devi_node_name);
4994 }
4995 
4996 
4997 /*
4998  * ddi_get_nodeid:      Get nodeid stored in dev_info structure.
4999  */
5000 int
5001 ddi_get_nodeid(dev_info_t *dip)
5002 {
5003         return (DEVI(dip)->devi_nodeid);
5004 }
5005 
5006 int
5007 ddi_get_instance(dev_info_t *dip)
5008 {
5009         return (DEVI(dip)->devi_instance);
5010 }
5011 
5012 struct dev_ops *
5013 ddi_get_driver(dev_info_t *dip)
5014 {
5015         return (DEVI(dip)->devi_ops);
5016 }
5017 
5018 void
5019 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5020 {
5021         DEVI(dip)->devi_ops = devo;
5022 }
5023 
5024 /*
5025  * ddi_set_driver_private/ddi_get_driver_private:
5026  * Get/set device driver private data in devinfo.
5027  */
5028 void
5029 ddi_set_driver_private(dev_info_t *dip, void *data)
5030 {
5031         DEVI(dip)->devi_driver_data = data;
5032 }
5033 
5034 void *
5035 ddi_get_driver_private(dev_info_t *dip)
5036 {
5037         return (DEVI(dip)->devi_driver_data);
5038 }
5039 
5040 /*
5041  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5042  */
5043 
5044 dev_info_t *
5045 ddi_get_parent(dev_info_t *dip)
5046 {
5047         return ((dev_info_t *)DEVI(dip)->devi_parent);
5048 }
5049 
5050 dev_info_t *
5051 ddi_get_child(dev_info_t *dip)
5052 {
5053         return ((dev_info_t *)DEVI(dip)->devi_child);
5054 }
5055 
5056 dev_info_t *
5057 ddi_get_next_sibling(dev_info_t *dip)
5058 {
5059         return ((dev_info_t *)DEVI(dip)->devi_sibling);
5060 }
5061 
5062 dev_info_t *
5063 ddi_get_next(dev_info_t *dip)
5064 {
5065         return ((dev_info_t *)DEVI(dip)->devi_next);
5066 }
5067 
5068 void
5069 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5070 {
5071         DEVI(dip)->devi_next = DEVI(nextdip);
5072 }
5073 
5074 /*
5075  * ddi_root_node:               Return root node of devinfo tree
5076  */
5077 
5078 dev_info_t *
5079 ddi_root_node(void)
5080 {
5081         extern dev_info_t *top_devinfo;
5082 
5083         return (top_devinfo);
5084 }
5085 
5086 /*
5087  * Miscellaneous functions:
5088  */
5089 
5090 /*
5091  * Implementation specific hooks
5092  */
5093 
5094 void
5095 ddi_report_dev(dev_info_t *d)
5096 {
5097         char *b;
5098 
5099         (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5100 
5101         /*
5102          * If this devinfo node has cb_ops, it's implicitly accessible from
5103          * userland, so we print its full name together with the instance
5104          * number 'abbreviation' that the driver may use internally.
5105          */
5106         if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5107             (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5108                 cmn_err(CE_CONT, "?%s%d is %s\n",
5109                     ddi_driver_name(d), ddi_get_instance(d),
5110                     ddi_pathname(d, b));
5111                 kmem_free(b, MAXPATHLEN);
5112         }
5113 }
5114 
5115 /*
5116  * ddi_ctlops() is described in the assembler not to buy a new register
5117  * window when it's called and can reduce cost in climbing the device tree
5118  * without using the tail call optimization.
5119  */
5120 int
5121 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5122 {
5123         int ret;
5124 
5125         ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5126             (void *)&rnumber, (void *)result);
5127 
5128         return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5129 }
5130 
5131 int
5132 ddi_dev_nregs(dev_info_t *dev, int *result)
5133 {
5134         return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5135 }
5136 
5137 int
5138 ddi_dev_is_sid(dev_info_t *d)
5139 {
5140         return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5141 }
5142 
5143 int
5144 ddi_slaveonly(dev_info_t *d)
5145 {
5146         return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5147 }
5148 
5149 int
5150 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5151 {
5152         return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5153 }
5154 
5155 int
5156 ddi_streams_driver(dev_info_t *dip)
5157 {
5158         if (i_ddi_devi_attached(dip) &&
5159             (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5160             (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5161                 return (DDI_SUCCESS);
5162         return (DDI_FAILURE);
5163 }
5164 
5165 /*
5166  * callback free list
5167  */
5168 
5169 static int ncallbacks;
5170 static int nc_low = 170;
5171 static int nc_med = 512;
5172 static int nc_high = 2048;
5173 static struct ddi_callback *callbackq;
5174 static struct ddi_callback *callbackqfree;
5175 
5176 /*
5177  * set/run callback lists
5178  */
5179 struct  cbstats {
5180         kstat_named_t   cb_asked;
5181         kstat_named_t   cb_new;
5182         kstat_named_t   cb_run;
5183         kstat_named_t   cb_delete;
5184         kstat_named_t   cb_maxreq;
5185         kstat_named_t   cb_maxlist;
5186         kstat_named_t   cb_alloc;
5187         kstat_named_t   cb_runouts;
5188         kstat_named_t   cb_L2;
5189         kstat_named_t   cb_grow;
5190 } cbstats = {
5191         {"asked",       KSTAT_DATA_UINT32},
5192         {"new",         KSTAT_DATA_UINT32},
5193         {"run",         KSTAT_DATA_UINT32},
5194         {"delete",      KSTAT_DATA_UINT32},
5195         {"maxreq",      KSTAT_DATA_UINT32},
5196         {"maxlist",     KSTAT_DATA_UINT32},
5197         {"alloc",       KSTAT_DATA_UINT32},
5198         {"runouts",     KSTAT_DATA_UINT32},
5199         {"L2",          KSTAT_DATA_UINT32},
5200         {"grow",        KSTAT_DATA_UINT32},
5201 };
5202 
5203 #define nc_asked        cb_asked.value.ui32
5204 #define nc_new          cb_new.value.ui32
5205 #define nc_run          cb_run.value.ui32
5206 #define nc_delete       cb_delete.value.ui32
5207 #define nc_maxreq       cb_maxreq.value.ui32
5208 #define nc_maxlist      cb_maxlist.value.ui32
5209 #define nc_alloc        cb_alloc.value.ui32
5210 #define nc_runouts      cb_runouts.value.ui32
5211 #define nc_L2           cb_L2.value.ui32
5212 #define nc_grow         cb_grow.value.ui32
5213 
5214 static kmutex_t ddi_callback_mutex;
5215 
5216 /*
5217  * callbacks are handled using a L1/L2 cache. The L1 cache
5218  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5219  * we can't get callbacks from the L1 cache [because pageout is doing
5220  * I/O at the time freemem is 0], we allocate callbacks out of the
5221  * L2 cache. The L2 cache is static and depends on the memory size.
5222  * [We might also count the number of devices at probe time and
5223  * allocate one structure per device and adjust for deferred attach]
5224  */
5225 void
5226 impl_ddi_callback_init(void)
5227 {
5228         int     i;
5229         uint_t  physmegs;
5230         kstat_t *ksp;
5231 
5232         physmegs = physmem >> (20 - PAGESHIFT);
5233         if (physmegs < 48) {
5234                 ncallbacks = nc_low;
5235         } else if (physmegs < 128) {
5236                 ncallbacks = nc_med;
5237         } else {
5238                 ncallbacks = nc_high;
5239         }
5240 
5241         /*
5242          * init free list
5243          */
5244         callbackq = kmem_zalloc(
5245             ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5246         for (i = 0; i < ncallbacks-1; i++)
5247                 callbackq[i].c_nfree = &callbackq[i+1];
5248         callbackqfree = callbackq;
5249 
5250         /* init kstats */
5251         if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5252             sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5253                 ksp->ks_data = (void *) &cbstats;
5254                 kstat_install(ksp);
5255         }
5256 
5257 }
5258 
5259 static void
5260 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5261         int count)
5262 {
5263         struct ddi_callback *list, *marker, *new;
5264         size_t size = sizeof (struct ddi_callback);
5265 
5266         list = marker = (struct ddi_callback *)*listid;
5267         while (list != NULL) {
5268                 if (list->c_call == funcp && list->c_arg == arg) {
5269                         list->c_count += count;
5270                         return;
5271                 }
5272                 marker = list;
5273                 list = list->c_nlist;
5274         }
5275         new = kmem_alloc(size, KM_NOSLEEP);
5276         if (new == NULL) {
5277                 new = callbackqfree;
5278                 if (new == NULL) {
5279                         new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5280                             &size, KM_NOSLEEP | KM_PANIC);
5281                         cbstats.nc_grow++;
5282                 } else {
5283                         callbackqfree = new->c_nfree;
5284                         cbstats.nc_L2++;
5285                 }
5286         }
5287         if (marker != NULL) {
5288                 marker->c_nlist = new;
5289         } else {
5290                 *listid = (uintptr_t)new;
5291         }
5292         new->c_size = size;
5293         new->c_nlist = NULL;
5294         new->c_call = funcp;
5295         new->c_arg = arg;
5296         new->c_count = count;
5297         cbstats.nc_new++;
5298         cbstats.nc_alloc++;
5299         if (cbstats.nc_alloc > cbstats.nc_maxlist)
5300                 cbstats.nc_maxlist = cbstats.nc_alloc;
5301 }
5302 
5303 void
5304 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5305 {
5306         mutex_enter(&ddi_callback_mutex);
5307         cbstats.nc_asked++;
5308         if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5309                 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5310         (void) callback_insert(funcp, arg, listid, 1);
5311         mutex_exit(&ddi_callback_mutex);
5312 }
5313 
5314 static void
5315 real_callback_run(void *Queue)
5316 {
5317         int (*funcp)(caddr_t);
5318         caddr_t arg;
5319         int count, rval;
5320         uintptr_t *listid;
5321         struct ddi_callback *list, *marker;
5322         int check_pending = 1;
5323         int pending = 0;
5324 
5325         do {
5326                 mutex_enter(&ddi_callback_mutex);
5327                 listid = Queue;
5328                 list = (struct ddi_callback *)*listid;
5329                 if (list == NULL) {
5330                         mutex_exit(&ddi_callback_mutex);
5331                         return;
5332                 }
5333                 if (check_pending) {
5334                         marker = list;
5335                         while (marker != NULL) {
5336                                 pending += marker->c_count;
5337                                 marker = marker->c_nlist;
5338                         }
5339                         check_pending = 0;
5340                 }
5341                 ASSERT(pending > 0);
5342                 ASSERT(list->c_count > 0);
5343                 funcp = list->c_call;
5344                 arg = list->c_arg;
5345                 count = list->c_count;
5346                 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5347                 if (list >= &callbackq[0] &&
5348                     list <= &callbackq[ncallbacks-1]) {
5349                         list->c_nfree = callbackqfree;
5350                         callbackqfree = list;
5351                 } else
5352                         kmem_free(list, list->c_size);
5353 
5354                 cbstats.nc_delete++;
5355                 cbstats.nc_alloc--;
5356                 mutex_exit(&ddi_callback_mutex);
5357 
5358                 do {
5359                         if ((rval = (*funcp)(arg)) == 0) {
5360                                 pending -= count;
5361                                 mutex_enter(&ddi_callback_mutex);
5362                                 (void) callback_insert(funcp, arg, listid,
5363                                     count);
5364                                 cbstats.nc_runouts++;
5365                         } else {
5366                                 pending--;
5367                                 mutex_enter(&ddi_callback_mutex);
5368                                 cbstats.nc_run++;
5369                         }
5370                         mutex_exit(&ddi_callback_mutex);
5371                 } while (rval != 0 && (--count > 0));
5372         } while (pending > 0);
5373 }
5374 
5375 void
5376 ddi_run_callback(uintptr_t *listid)
5377 {
5378         softcall(real_callback_run, listid);
5379 }
5380 
5381 /*
5382  * ddi_periodic_t
5383  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5384  *     int level)
5385  *
5386  * INTERFACE LEVEL
5387  *      Solaris DDI specific (Solaris DDI)
5388  *
5389  * PARAMETERS
5390  *      func: the callback function
5391  *
5392  *            The callback function will be invoked. The function is invoked
5393  *            in kernel context if the argument level passed is the zero.
5394  *            Otherwise it's invoked in interrupt context at the specified
5395  *            level.
5396  *
5397  *       arg: the argument passed to the callback function
5398  *
5399  *  interval: interval time
5400  *
5401  *    level : callback interrupt level
5402  *
5403  *            If the value is the zero, the callback function is invoked
5404  *            in kernel context. If the value is more than the zero, but
5405  *            less than or equal to ten, the callback function is invoked in
5406  *            interrupt context at the specified interrupt level, which may
5407  *            be used for real time applications.
5408  *
5409  *            This value must be in range of 0-10, which can be a numeric
5410  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5411  *
5412  * DESCRIPTION
5413  *      ddi_periodic_add(9F) schedules the specified function to be
5414  *      periodically invoked in the interval time.
5415  *
5416  *      As well as timeout(9F), the exact time interval over which the function
5417  *      takes effect cannot be guaranteed, but the value given is a close
5418  *      approximation.
5419  *
5420  *      Drivers waiting on behalf of processes with real-time constraints must
5421  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5422  *
5423  * RETURN VALUES
5424  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5425  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5426  *
5427  * CONTEXT
5428  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5429  *      it cannot be called in interrupt context, which is different from
5430  *      timeout(9F).
5431  */
5432 ddi_periodic_t
5433 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5434 {
5435         /*
5436          * Sanity check of the argument level.
5437          */
5438         if (level < DDI_IPL_0 || level > DDI_IPL_10)
5439                 cmn_err(CE_PANIC,
5440                     "ddi_periodic_add: invalid interrupt level (%d).", level);
5441 
5442         /*
5443          * Sanity check of the context. ddi_periodic_add() cannot be
5444          * called in either interrupt context or high interrupt context.
5445          */
5446         if (servicing_interrupt())
5447                 cmn_err(CE_PANIC,
5448                     "ddi_periodic_add: called in (high) interrupt context.");
5449 
5450         return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5451 }
5452 
5453 /*
5454  * void
5455  * ddi_periodic_delete(ddi_periodic_t req)
5456  *
5457  * INTERFACE LEVEL
5458  *     Solaris DDI specific (Solaris DDI)
5459  *
5460  * PARAMETERS
5461  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5462  *     previously.
5463  *
5464  * DESCRIPTION
5465  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5466  *     previously requested.
5467  *
5468  *     ddi_periodic_delete(9F) will not return until the pending request
5469  *     is canceled or executed.
5470  *
5471  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5472  *     timeout which is either running on another CPU, or has already
5473  *     completed causes no problems. However, unlike untimeout(9F), there is
5474  *     no restrictions on the lock which might be held across the call to
5475  *     ddi_periodic_delete(9F).
5476  *
5477  *     Drivers should be structured with the understanding that the arrival of
5478  *     both an interrupt and a timeout for that interrupt can occasionally
5479  *     occur, in either order.
5480  *
5481  * CONTEXT
5482  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5483  *     it cannot be called in interrupt context, which is different from
5484  *     untimeout(9F).
5485  */
5486 void
5487 ddi_periodic_delete(ddi_periodic_t req)
5488 {
5489         /*
5490          * Sanity check of the context. ddi_periodic_delete() cannot be
5491          * called in either interrupt context or high interrupt context.
5492          */
5493         if (servicing_interrupt())
5494                 cmn_err(CE_PANIC,
5495                     "ddi_periodic_delete: called in (high) interrupt context.");
5496 
5497         i_untimeout((timeout_t)req);
5498 }
5499 
5500 dev_info_t *
5501 nodevinfo(dev_t dev, int otyp)
5502 {
5503         _NOTE(ARGUNUSED(dev, otyp))
5504         return ((dev_info_t *)0);
5505 }
5506 
5507 /*
5508  * A driver should support its own getinfo(9E) entry point. This function
5509  * is provided as a convenience for ON drivers that don't expect their
5510  * getinfo(9E) entry point to be called. A driver that uses this must not
5511  * call ddi_create_minor_node.
5512  */
5513 int
5514 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5515 {
5516         _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5517         return (DDI_FAILURE);
5518 }
5519 
5520 /*
5521  * A driver should support its own getinfo(9E) entry point. This function
5522  * is provided as a convenience for ON drivers that where the minor number
5523  * is the instance. Drivers that do not have 1:1 mapping must implement
5524  * their own getinfo(9E) function.
5525  */
5526 int
5527 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5528     void *arg, void **result)
5529 {
5530         _NOTE(ARGUNUSED(dip))
5531         int     instance;
5532 
5533         if (infocmd != DDI_INFO_DEVT2INSTANCE)
5534                 return (DDI_FAILURE);
5535 
5536         instance = getminor((dev_t)(uintptr_t)arg);
5537         *result = (void *)(uintptr_t)instance;
5538         return (DDI_SUCCESS);
5539 }
5540 
5541 int
5542 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5543 {
5544         _NOTE(ARGUNUSED(devi, cmd))
5545         return (DDI_FAILURE);
5546 }
5547 
5548 int
5549 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5550     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5551 {
5552         _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5553         return (DDI_DMA_NOMAPPING);
5554 }
5555 
5556 int
5557 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5558     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5559 {
5560         _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5561         return (DDI_DMA_BADATTR);
5562 }
5563 
5564 int
5565 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5566     ddi_dma_handle_t handle)
5567 {
5568         _NOTE(ARGUNUSED(dip, rdip, handle))
5569         return (DDI_FAILURE);
5570 }
5571 
5572 int
5573 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5574     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5575     ddi_dma_cookie_t *cp, uint_t *ccountp)
5576 {
5577         _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5578         return (DDI_DMA_NOMAPPING);
5579 }
5580 
5581 int
5582 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5583     ddi_dma_handle_t handle)
5584 {
5585         _NOTE(ARGUNUSED(dip, rdip, handle))
5586         return (DDI_FAILURE);
5587 }
5588 
5589 int
5590 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5591     ddi_dma_handle_t handle, off_t off, size_t len,
5592     uint_t cache_flags)
5593 {
5594         _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5595         return (DDI_FAILURE);
5596 }
5597 
5598 int
5599 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5600     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5601     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5602 {
5603         _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5604         return (DDI_FAILURE);
5605 }
5606 
5607 int
5608 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5609     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5610     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5611 {
5612         _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5613         return (DDI_FAILURE);
5614 }
5615 
5616 void
5617 ddivoid(void)
5618 {}
5619 
5620 int
5621 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5622     struct pollhead **pollhdrp)
5623 {
5624         _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5625         return (ENXIO);
5626 }
5627 
5628 cred_t *
5629 ddi_get_cred(void)
5630 {
5631         return (CRED());
5632 }
5633 
5634 clock_t
5635 ddi_get_lbolt(void)
5636 {
5637         return ((clock_t)lbolt_hybrid());
5638 }
5639 
5640 int64_t
5641 ddi_get_lbolt64(void)
5642 {
5643         return (lbolt_hybrid());
5644 }
5645 
5646 time_t
5647 ddi_get_time(void)
5648 {
5649         time_t  now;
5650 
5651         if ((now = gethrestime_sec()) == 0) {
5652                 timestruc_t ts;
5653                 mutex_enter(&tod_lock);
5654                 ts = tod_get();
5655                 mutex_exit(&tod_lock);
5656                 return (ts.tv_sec);
5657         } else {
5658                 return (now);
5659         }
5660 }
5661 
5662 pid_t
5663 ddi_get_pid(void)
5664 {
5665         return (ttoproc(curthread)->p_pid);
5666 }
5667 
5668 kt_did_t
5669 ddi_get_kt_did(void)
5670 {
5671         return (curthread->t_did);
5672 }
5673 
5674 /*
5675  * This function returns B_TRUE if the caller can reasonably expect that a call
5676  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5677  * by user-level signal.  If it returns B_FALSE, then the caller should use
5678  * other means to make certain that the wait will not hang "forever."
5679  *
5680  * It does not check the signal mask, nor for reception of any particular
5681  * signal.
5682  *
5683  * Currently, a thread can receive a signal if it's not a kernel thread and it
5684  * is not in the middle of exit(2) tear-down.  Threads that are in that
5685  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5686  * cv_timedwait, and qwait_sig to qwait.
5687  */
5688 boolean_t
5689 ddi_can_receive_sig(void)
5690 {
5691         proc_t *pp;
5692 
5693         if (curthread->t_proc_flag & TP_LWPEXIT)
5694                 return (B_FALSE);
5695         if ((pp = ttoproc(curthread)) == NULL)
5696                 return (B_FALSE);
5697         return (pp->p_as != &kas);
5698 }
5699 
5700 /*
5701  * Swap bytes in 16-bit [half-]words
5702  */
5703 void
5704 swab(void *src, void *dst, size_t nbytes)
5705 {
5706         uchar_t *pf = (uchar_t *)src;
5707         uchar_t *pt = (uchar_t *)dst;
5708         uchar_t tmp;
5709         int nshorts;
5710 
5711         nshorts = nbytes >> 1;
5712 
5713         while (--nshorts >= 0) {
5714                 tmp = *pf++;
5715                 *pt++ = *pf++;
5716                 *pt++ = tmp;
5717         }
5718 }
5719 
5720 static void
5721 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5722 {
5723         int                     circ;
5724         struct ddi_minor_data   *dp;
5725 
5726         ndi_devi_enter(ddip, &circ);
5727         if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5728                 DEVI(ddip)->devi_minor = dmdp;
5729         } else {
5730                 while (dp->next != (struct ddi_minor_data *)NULL)
5731                         dp = dp->next;
5732                 dp->next = dmdp;
5733         }
5734         ndi_devi_exit(ddip, circ);
5735 }
5736 
5737 /*
5738  * Part of the obsolete SunCluster DDI Hooks.
5739  * Keep for binary compatibility
5740  */
5741 minor_t
5742 ddi_getiminor(dev_t dev)
5743 {
5744         return (getminor(dev));
5745 }
5746 
5747 static int
5748 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5749 {
5750         int se_flag;
5751         int kmem_flag;
5752         int se_err;
5753         char *pathname, *class_name;
5754         sysevent_t *ev = NULL;
5755         sysevent_id_t eid;
5756         sysevent_value_t se_val;
5757         sysevent_attr_list_t *ev_attr_list = NULL;
5758 
5759         /* determine interrupt context */
5760         se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5761         kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5762 
5763         i_ddi_di_cache_invalidate();
5764 
5765 #ifdef DEBUG
5766         if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5767                 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5768                     "interrupt level by driver %s",
5769                     ddi_driver_name(dip));
5770         }
5771 #endif /* DEBUG */
5772 
5773         ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5774         if (ev == NULL) {
5775                 goto fail;
5776         }
5777 
5778         pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5779         if (pathname == NULL) {
5780                 sysevent_free(ev);
5781                 goto fail;
5782         }
5783 
5784         (void) ddi_pathname(dip, pathname);
5785         ASSERT(strlen(pathname));
5786         se_val.value_type = SE_DATA_TYPE_STRING;
5787         se_val.value.sv_string = pathname;
5788         if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5789             &se_val, se_flag) != 0) {
5790                 kmem_free(pathname, MAXPATHLEN);
5791                 sysevent_free(ev);
5792                 goto fail;
5793         }
5794         kmem_free(pathname, MAXPATHLEN);
5795 
5796         /* add the device class attribute */
5797         if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5798                 se_val.value_type = SE_DATA_TYPE_STRING;
5799                 se_val.value.sv_string = class_name;
5800                 if (sysevent_add_attr(&ev_attr_list,
5801                     DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5802                         sysevent_free_attr(ev_attr_list);
5803                         goto fail;
5804                 }
5805         }
5806 
5807         /*
5808          * allow for NULL minor names
5809          */
5810         if (minor_name != NULL) {
5811                 se_val.value.sv_string = minor_name;
5812                 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5813                     &se_val, se_flag) != 0) {
5814                         sysevent_free_attr(ev_attr_list);
5815                         sysevent_free(ev);
5816                         goto fail;
5817                 }
5818         }
5819 
5820         if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5821                 sysevent_free_attr(ev_attr_list);
5822                 sysevent_free(ev);
5823                 goto fail;
5824         }
5825 
5826         if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5827                 if (se_err == SE_NO_TRANSPORT) {
5828                         cmn_err(CE_WARN, "/devices or /dev may not be current "
5829                             "for driver %s (%s). Run devfsadm -i %s",
5830                             ddi_driver_name(dip), "syseventd not responding",
5831                             ddi_driver_name(dip));
5832                 } else {
5833                         sysevent_free(ev);
5834                         goto fail;
5835                 }
5836         }
5837 
5838         sysevent_free(ev);
5839         return (DDI_SUCCESS);
5840 fail:
5841         cmn_err(CE_WARN, "/devices or /dev may not be current "
5842             "for driver %s. Run devfsadm -i %s",
5843             ddi_driver_name(dip), ddi_driver_name(dip));
5844         return (DDI_SUCCESS);
5845 }
5846 
5847 /*
5848  * failing to remove a minor node is not of interest
5849  * therefore we do not generate an error message
5850  */
5851 static int
5852 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5853 {
5854         char *pathname, *class_name;
5855         sysevent_t *ev;
5856         sysevent_id_t eid;
5857         sysevent_value_t se_val;
5858         sysevent_attr_list_t *ev_attr_list = NULL;
5859 
5860         /*
5861          * only log ddi_remove_minor_node() calls outside the scope
5862          * of attach/detach reconfigurations and when the dip is
5863          * still initialized.
5864          */
5865         if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5866             (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5867                 return (DDI_SUCCESS);
5868         }
5869 
5870         i_ddi_di_cache_invalidate();
5871 
5872         ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5873         if (ev == NULL) {
5874                 return (DDI_SUCCESS);
5875         }
5876 
5877         pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5878         if (pathname == NULL) {
5879                 sysevent_free(ev);
5880                 return (DDI_SUCCESS);
5881         }
5882 
5883         (void) ddi_pathname(dip, pathname);
5884         ASSERT(strlen(pathname));
5885         se_val.value_type = SE_DATA_TYPE_STRING;
5886         se_val.value.sv_string = pathname;
5887         if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5888             &se_val, SE_SLEEP) != 0) {
5889                 kmem_free(pathname, MAXPATHLEN);
5890                 sysevent_free(ev);
5891                 return (DDI_SUCCESS);
5892         }
5893 
5894         kmem_free(pathname, MAXPATHLEN);
5895 
5896         /*
5897          * allow for NULL minor names
5898          */
5899         if (minor_name != NULL) {
5900                 se_val.value.sv_string = minor_name;
5901                 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5902                     &se_val, SE_SLEEP) != 0) {
5903                         sysevent_free_attr(ev_attr_list);
5904                         goto fail;
5905                 }
5906         }
5907 
5908         if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5909                 /* add the device class, driver name and instance attributes */
5910 
5911                 se_val.value_type = SE_DATA_TYPE_STRING;
5912                 se_val.value.sv_string = class_name;
5913                 if (sysevent_add_attr(&ev_attr_list,
5914                     DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5915                         sysevent_free_attr(ev_attr_list);
5916                         goto fail;
5917                 }
5918 
5919                 se_val.value_type = SE_DATA_TYPE_STRING;
5920                 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5921                 if (sysevent_add_attr(&ev_attr_list,
5922                     DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5923                         sysevent_free_attr(ev_attr_list);
5924                         goto fail;
5925                 }
5926 
5927                 se_val.value_type = SE_DATA_TYPE_INT32;
5928                 se_val.value.sv_int32 = ddi_get_instance(dip);
5929                 if (sysevent_add_attr(&ev_attr_list,
5930                     DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5931                         sysevent_free_attr(ev_attr_list);
5932                         goto fail;
5933                 }
5934 
5935         }
5936 
5937         if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5938                 sysevent_free_attr(ev_attr_list);
5939         } else {
5940                 (void) log_sysevent(ev, SE_SLEEP, &eid);
5941         }
5942 fail:
5943         sysevent_free(ev);
5944         return (DDI_SUCCESS);
5945 }
5946 
5947 /*
5948  * Derive the device class of the node.
5949  * Device class names aren't defined yet. Until this is done we use
5950  * devfs event subclass names as device class names.
5951  */
5952 static int
5953 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5954 {
5955         int rv = DDI_SUCCESS;
5956 
5957         if (i_ddi_devi_class(dip) == NULL) {
5958                 if (strncmp(node_type, DDI_NT_BLOCK,
5959                     sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5960                     (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5961                     node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5962                     strcmp(node_type, DDI_NT_FD) != 0) {
5963 
5964                         rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5965 
5966                 } else if (strncmp(node_type, DDI_NT_NET,
5967                     sizeof (DDI_NT_NET) - 1) == 0 &&
5968                     (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5969                     node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5970 
5971                         rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5972 
5973                 } else if (strncmp(node_type, DDI_NT_PRINTER,
5974                     sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5975                     (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5976                     node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5977 
5978                         rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5979 
5980                 } else if (strncmp(node_type, DDI_PSEUDO,
5981                     sizeof (DDI_PSEUDO) -1) == 0 &&
5982                     (strncmp(ESC_LOFI, ddi_node_name(dip),
5983                     sizeof (ESC_LOFI) -1) == 0)) {
5984                         rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5985                 }
5986         }
5987 
5988         return (rv);
5989 }
5990 
5991 /*
5992  * Check compliance with PSARC 2003/375:
5993  *
5994  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5995  * exceed IFNAMSIZ (16) characters in length.
5996  */
5997 static boolean_t
5998 verify_name(char *name)
5999 {
6000         size_t  len = strlen(name);
6001         char    *cp;
6002 
6003         if (len == 0 || len > IFNAMSIZ)
6004                 return (B_FALSE);
6005 
6006         for (cp = name; *cp != '\0'; cp++) {
6007                 if (!isalnum(*cp) && *cp != '_')
6008                         return (B_FALSE);
6009         }
6010 
6011         return (B_TRUE);
6012 }
6013 
6014 /*
6015  * ddi_create_minor_common:     Create a  ddi_minor_data structure and
6016  *                              attach it to the given devinfo node.
6017  */
6018 
6019 int
6020 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
6021     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
6022     const char *read_priv, const char *write_priv, mode_t priv_mode)
6023 {
6024         struct ddi_minor_data *dmdp;
6025         major_t major;
6026 
6027         if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6028                 return (DDI_FAILURE);
6029 
6030         if (name == NULL)
6031                 return (DDI_FAILURE);
6032 
6033         /*
6034          * Log a message if the minor number the driver is creating
6035          * is not expressible on the on-disk filesystem (currently
6036          * this is limited to 18 bits both by UFS). The device can
6037          * be opened via devfs, but not by device special files created
6038          * via mknod().
6039          */
6040         if (minor_num > L_MAXMIN32) {
6041                 cmn_err(CE_WARN,
6042                     "%s%d:%s minor 0x%x too big for 32-bit applications",
6043                     ddi_driver_name(dip), ddi_get_instance(dip),
6044                     name, minor_num);
6045                 return (DDI_FAILURE);
6046         }
6047 
6048         /* dip must be bound and attached */
6049         major = ddi_driver_major(dip);
6050         ASSERT(major != DDI_MAJOR_T_NONE);
6051 
6052         /*
6053          * Default node_type to DDI_PSEUDO and issue notice in debug mode
6054          */
6055         if (node_type == NULL) {
6056                 node_type = DDI_PSEUDO;
6057                 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6058                     " minor node %s; default to DDI_PSEUDO",
6059                     ddi_driver_name(dip), ddi_get_instance(dip), name));
6060         }
6061 
6062         /*
6063          * If the driver is a network driver, ensure that the name falls within
6064          * the interface naming constraints specified by PSARC/2003/375.
6065          */
6066         if (strcmp(node_type, DDI_NT_NET) == 0) {
6067                 if (!verify_name(name))
6068                         return (DDI_FAILURE);
6069 
6070                 if (mtype == DDM_MINOR) {
6071                         struct devnames *dnp = &devnamesp[major];
6072 
6073                         /* Mark driver as a network driver */
6074                         LOCK_DEV_OPS(&dnp->dn_lock);
6075                         dnp->dn_flags |= DN_NETWORK_DRIVER;
6076 
6077                         /*
6078                          * If this minor node is created during the device
6079                          * attachment, this is a physical network device.
6080                          * Mark the driver as a physical network driver.
6081                          */
6082                         if (DEVI_IS_ATTACHING(dip))
6083                                 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
6084                         UNLOCK_DEV_OPS(&dnp->dn_lock);
6085                 }
6086         }
6087 
6088         if (mtype == DDM_MINOR) {
6089                 if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6090                     DDI_SUCCESS)
6091                         return (DDI_FAILURE);
6092         }
6093 
6094         /*
6095          * Take care of minor number information for the node.
6096          */
6097 
6098         if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6099             KM_NOSLEEP)) == NULL) {
6100                 return (DDI_FAILURE);
6101         }
6102         if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6103                 kmem_free(dmdp, sizeof (struct ddi_minor_data));
6104                 return (DDI_FAILURE);
6105         }
6106         dmdp->dip = dip;
6107         dmdp->ddm_dev = makedevice(major, minor_num);
6108         dmdp->ddm_spec_type = spec_type;
6109         dmdp->ddm_node_type = node_type;
6110         dmdp->type = mtype;
6111         if (flag & CLONE_DEV) {
6112                 dmdp->type = DDM_ALIAS;
6113                 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6114         }
6115         if (flag & PRIVONLY_DEV) {
6116                 dmdp->ddm_flags |= DM_NO_FSPERM;
6117         }
6118         if (read_priv || write_priv) {
6119                 dmdp->ddm_node_priv =
6120                     devpolicy_priv_by_name(read_priv, write_priv);
6121         }
6122         dmdp->ddm_priv_mode = priv_mode;
6123 
6124         ddi_append_minor_node(dip, dmdp);
6125 
6126         /*
6127          * only log ddi_create_minor_node() calls which occur
6128          * outside the scope of attach(9e)/detach(9e) reconfigurations
6129          */
6130         if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6131             mtype != DDM_INTERNAL_PATH) {
6132                 (void) i_log_devfs_minor_create(dip, name);
6133         }
6134 
6135         /*
6136          * Check if any dacf rules match the creation of this minor node
6137          */
6138         dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6139         return (DDI_SUCCESS);
6140 }
6141 
6142 int
6143 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6144     minor_t minor_num, char *node_type, int flag)
6145 {
6146         return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6147             node_type, flag, DDM_MINOR, NULL, NULL, 0));
6148 }
6149 
6150 int
6151 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6152     minor_t minor_num, char *node_type, int flag,
6153     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6154 {
6155         return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6156             node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6157 }
6158 
6159 int
6160 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6161     minor_t minor_num, char *node_type, int flag)
6162 {
6163         return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6164             node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6165 }
6166 
6167 /*
6168  * Internal (non-ddi) routine for drivers to export names known
6169  * to the kernel (especially ddi_pathname_to_dev_t and friends)
6170  * but not exported externally to /dev
6171  */
6172 int
6173 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6174     minor_t minor_num)
6175 {
6176         return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6177             "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6178 }
6179 
6180 void
6181 ddi_remove_minor_node(dev_info_t *dip, char *name)
6182 {
6183         int                     circ;
6184         struct ddi_minor_data   *dmdp, *dmdp1;
6185         struct ddi_minor_data   **dmdp_prev;
6186 
6187         ndi_devi_enter(dip, &circ);
6188         dmdp_prev = &DEVI(dip)->devi_minor;
6189         dmdp = DEVI(dip)->devi_minor;
6190         while (dmdp != NULL) {
6191                 dmdp1 = dmdp->next;
6192                 if ((name == NULL || (dmdp->ddm_name != NULL &&
6193                     strcmp(name, dmdp->ddm_name) == 0))) {
6194                         if (dmdp->ddm_name != NULL) {
6195                                 if (dmdp->type != DDM_INTERNAL_PATH)
6196                                         (void) i_log_devfs_minor_remove(dip,
6197                                             dmdp->ddm_name);
6198                                 kmem_free(dmdp->ddm_name,
6199                                     strlen(dmdp->ddm_name) + 1);
6200                         }
6201                         /*
6202                          * Release device privilege, if any.
6203                          * Release dacf client data associated with this minor
6204                          * node by storing NULL.
6205                          */
6206                         if (dmdp->ddm_node_priv)
6207                                 dpfree(dmdp->ddm_node_priv);
6208                         dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6209                         kmem_free(dmdp, sizeof (struct ddi_minor_data));
6210                         *dmdp_prev = dmdp1;
6211                         /*
6212                          * OK, we found it, so get out now -- if we drive on,
6213                          * we will strcmp against garbage.  See 1139209.
6214                          */
6215                         if (name != NULL)
6216                                 break;
6217                 } else {
6218                         dmdp_prev = &dmdp->next;
6219                 }
6220                 dmdp = dmdp1;
6221         }
6222         ndi_devi_exit(dip, circ);
6223 }
6224 
6225 
6226 int
6227 ddi_in_panic()
6228 {
6229         return (panicstr != NULL);
6230 }
6231 
6232 
6233 /*
6234  * Find first bit set in a mask (returned counting from 1 up)
6235  */
6236 
6237 int
6238 ddi_ffs(long mask)
6239 {
6240         return (ffs(mask));
6241 }
6242 
6243 /*
6244  * Find last bit set. Take mask and clear
6245  * all but the most significant bit, and
6246  * then let ffs do the rest of the work.
6247  *
6248  * Algorithm courtesy of Steve Chessin.
6249  */
6250 
6251 int
6252 ddi_fls(long mask)
6253 {
6254         while (mask) {
6255                 long nx;
6256 
6257                 if ((nx = (mask & (mask - 1))) == 0)
6258                         break;
6259                 mask = nx;
6260         }
6261         return (ffs(mask));
6262 }
6263 
6264 /*
6265  * The ddi_soft_state_* routines comprise generic storage management utilities
6266  * for driver soft state structures (in "the old days," this was done with
6267  * statically sized array - big systems and dynamic loading and unloading
6268  * make heap allocation more attractive).
6269  */
6270 
6271 /*
6272  * Allocate a set of pointers to 'n_items' objects of size 'size'
6273  * bytes.  Each pointer is initialized to nil.
6274  *
6275  * The 'size' and 'n_items' values are stashed in the opaque
6276  * handle returned to the caller.
6277  *
6278  * This implementation interprets 'set of pointers' to mean 'array
6279  * of pointers' but note that nothing in the interface definition
6280  * precludes an implementation that uses, for example, a linked list.
6281  * However there should be a small efficiency gain from using an array
6282  * at lookup time.
6283  *
6284  * NOTE As an optimization, we make our growable array allocations in
6285  *      powers of two (bytes), since that's how much kmem_alloc (currently)
6286  *      gives us anyway.  It should save us some free/realloc's ..
6287  *
6288  *      As a further optimization, we make the growable array start out
6289  *      with MIN_N_ITEMS in it.
6290  */
6291 
6292 #define MIN_N_ITEMS     8       /* 8 void *'s == 32 bytes */
6293 
6294 int
6295 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6296 {
6297         i_ddi_soft_state        *ss;
6298 
6299         if (state_p == NULL || size == 0)
6300                 return (EINVAL);
6301 
6302         ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6303         mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6304         ss->size = size;
6305 
6306         if (n_items < MIN_N_ITEMS)
6307                 ss->n_items = MIN_N_ITEMS;
6308         else {
6309                 int bitlog;
6310 
6311                 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6312                         bitlog--;
6313                 ss->n_items = 1 << bitlog;
6314         }
6315 
6316         ASSERT(ss->n_items >= n_items);
6317 
6318         ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6319 
6320         *state_p = ss;
6321         return (0);
6322 }
6323 
6324 /*
6325  * Allocate a state structure of size 'size' to be associated
6326  * with item 'item'.
6327  *
6328  * In this implementation, the array is extended to
6329  * allow the requested offset, if needed.
6330  */
6331 int
6332 ddi_soft_state_zalloc(void *state, int item)
6333 {
6334         i_ddi_soft_state        *ss = (i_ddi_soft_state *)state;
6335         void                    **array;
6336         void                    *new_element;
6337 
6338         if ((state == NULL) || (item < 0))
6339                 return (DDI_FAILURE);
6340 
6341         mutex_enter(&ss->lock);
6342         if (ss->size == 0) {
6343                 mutex_exit(&ss->lock);
6344                 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6345                     mod_containing_pc(caller()));
6346                 return (DDI_FAILURE);
6347         }
6348 
6349         array = ss->array;   /* NULL if ss->n_items == 0 */
6350         ASSERT(ss->n_items != 0 && array != NULL);
6351 
6352         /*
6353          * refuse to tread on an existing element
6354          */
6355         if (item < ss->n_items && array[item] != NULL) {
6356                 mutex_exit(&ss->lock);
6357                 return (DDI_FAILURE);
6358         }
6359 
6360         /*
6361          * Allocate a new element to plug in
6362          */
6363         new_element = kmem_zalloc(ss->size, KM_SLEEP);
6364 
6365         /*
6366          * Check if the array is big enough, if not, grow it.
6367          */
6368         if (item >= ss->n_items) {
6369                 void                    **new_array;
6370                 size_t                  new_n_items;
6371                 struct i_ddi_soft_state *dirty;
6372 
6373                 /*
6374                  * Allocate a new array of the right length, copy
6375                  * all the old pointers to the new array, then
6376                  * if it exists at all, put the old array on the
6377                  * dirty list.
6378                  *
6379                  * Note that we can't kmem_free() the old array.
6380                  *
6381                  * Why -- well the 'get' operation is 'mutex-free', so we
6382                  * can't easily catch a suspended thread that is just about
6383                  * to dereference the array we just grew out of.  So we
6384                  * cons up a header and put it on a list of 'dirty'
6385                  * pointer arrays.  (Dirty in the sense that there may
6386                  * be suspended threads somewhere that are in the middle
6387                  * of referencing them).  Fortunately, we -can- garbage
6388                  * collect it all at ddi_soft_state_fini time.
6389                  */
6390                 new_n_items = ss->n_items;
6391                 while (new_n_items < (1 + item))
6392                         new_n_items <<= 1;        /* double array size .. */
6393 
6394                 ASSERT(new_n_items >= (1 + item));   /* sanity check! */
6395 
6396                 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6397                     KM_SLEEP);
6398                 /*
6399                  * Copy the pointers into the new array
6400                  */
6401                 bcopy(array, new_array, ss->n_items * sizeof (void *));
6402 
6403                 /*
6404                  * Save the old array on the dirty list
6405                  */
6406                 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6407                 dirty->array = ss->array;
6408                 dirty->n_items = ss->n_items;
6409                 dirty->next = ss->next;
6410                 ss->next = dirty;
6411 
6412                 ss->array = (array = new_array);
6413                 ss->n_items = new_n_items;
6414         }
6415 
6416         ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6417 
6418         array[item] = new_element;
6419 
6420         mutex_exit(&ss->lock);
6421         return (DDI_SUCCESS);
6422 }
6423 
6424 /*
6425  * Fetch a pointer to the allocated soft state structure.
6426  *
6427  * This is designed to be cheap.
6428  *
6429  * There's an argument that there should be more checking for
6430  * nil pointers and out of bounds on the array.. but we do a lot
6431  * of that in the alloc/free routines.
6432  *
6433  * An array has the convenience that we don't need to lock read-access
6434  * to it c.f. a linked list.  However our "expanding array" strategy
6435  * means that we should hold a readers lock on the i_ddi_soft_state
6436  * structure.
6437  *
6438  * However, from a performance viewpoint, we need to do it without
6439  * any locks at all -- this also makes it a leaf routine.  The algorithm
6440  * is 'lock-free' because we only discard the pointer arrays at
6441  * ddi_soft_state_fini() time.
6442  */
6443 void *
6444 ddi_get_soft_state(void *state, int item)
6445 {
6446         i_ddi_soft_state        *ss = (i_ddi_soft_state *)state;
6447 
6448         ASSERT((ss != NULL) && (item >= 0));
6449 
6450         if (item < ss->n_items && ss->array != NULL)
6451                 return (ss->array[item]);
6452         return (NULL);
6453 }
6454 
6455 /*
6456  * Free the state structure corresponding to 'item.'   Freeing an
6457  * element that has either gone or was never allocated is not
6458  * considered an error.  Note that we free the state structure, but
6459  * we don't shrink our pointer array, or discard 'dirty' arrays,
6460  * since even a few pointers don't really waste too much memory.
6461  *
6462  * Passing an item number that is out of bounds, or a null pointer will
6463  * provoke an error message.
6464  */
6465 void
6466 ddi_soft_state_free(void *state, int item)
6467 {
6468         i_ddi_soft_state        *ss = (i_ddi_soft_state *)state;
6469         void                    **array;
6470         void                    *element;
6471         static char             msg[] = "ddi_soft_state_free:";
6472 
6473         if (ss == NULL) {
6474                 cmn_err(CE_WARN, "%s null handle: %s",
6475                     msg, mod_containing_pc(caller()));
6476                 return;
6477         }
6478 
6479         element = NULL;
6480 
6481         mutex_enter(&ss->lock);
6482 
6483         if ((array = ss->array) == NULL || ss->size == 0) {
6484                 cmn_err(CE_WARN, "%s bad handle: %s",
6485                     msg, mod_containing_pc(caller()));
6486         } else if (item < 0 || item >= ss->n_items) {
6487                 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6488                     msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6489         } else if (array[item] != NULL) {
6490                 element = array[item];
6491                 array[item] = NULL;
6492         }
6493 
6494         mutex_exit(&ss->lock);
6495 
6496         if (element)
6497                 kmem_free(element, ss->size);
6498 }
6499 
6500 /*
6501  * Free the entire set of pointers, and any
6502  * soft state structures contained therein.
6503  *
6504  * Note that we don't grab the ss->lock mutex, even though
6505  * we're inspecting the various fields of the data structure.
6506  *
6507  * There is an implicit assumption that this routine will
6508  * never run concurrently with any of the above on this
6509  * particular state structure i.e. by the time the driver
6510  * calls this routine, there should be no other threads
6511  * running in the driver.
6512  */
6513 void
6514 ddi_soft_state_fini(void **state_p)
6515 {
6516         i_ddi_soft_state        *ss, *dirty;
6517         int                     item;
6518         static char             msg[] = "ddi_soft_state_fini:";
6519 
6520         if (state_p == NULL ||
6521             (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6522                 cmn_err(CE_WARN, "%s null handle: %s",
6523                     msg, mod_containing_pc(caller()));
6524                 return;
6525         }
6526 
6527         if (ss->size == 0) {
6528                 cmn_err(CE_WARN, "%s bad handle: %s",
6529                     msg, mod_containing_pc(caller()));
6530                 return;
6531         }
6532 
6533         if (ss->n_items > 0) {
6534                 for (item = 0; item < ss->n_items; item++)
6535                         ddi_soft_state_free(ss, item);
6536                 kmem_free(ss->array, ss->n_items * sizeof (void *));
6537         }
6538 
6539         /*
6540          * Now delete any dirty arrays from previous 'grow' operations
6541          */
6542         for (dirty = ss->next; dirty; dirty = ss->next) {
6543                 ss->next = dirty->next;
6544                 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6545                 kmem_free(dirty, sizeof (*dirty));
6546         }
6547 
6548         mutex_destroy(&ss->lock);
6549         kmem_free(ss, sizeof (*ss));
6550 
6551         *state_p = NULL;
6552 }
6553 
6554 #define SS_N_ITEMS_PER_HASH     16
6555 #define SS_MIN_HASH_SZ          16
6556 #define SS_MAX_HASH_SZ          4096
6557 
6558 int
6559 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6560     int n_items)
6561 {
6562         i_ddi_soft_state_bystr  *sss;
6563         int                     hash_sz;
6564 
6565         ASSERT(state_p && size && n_items);
6566         if ((state_p == NULL) || (size == 0) || (n_items == 0))
6567                 return (EINVAL);
6568 
6569         /* current implementation is based on hash, convert n_items to hash */
6570         hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6571         if (hash_sz < SS_MIN_HASH_SZ)
6572                 hash_sz = SS_MIN_HASH_SZ;
6573         else if (hash_sz > SS_MAX_HASH_SZ)
6574                 hash_sz = SS_MAX_HASH_SZ;
6575 
6576         /* allocate soft_state pool */
6577         sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6578         sss->ss_size = size;
6579         sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6580             hash_sz, mod_hash_null_valdtor);
6581         *state_p = (ddi_soft_state_bystr *)sss;
6582         return (0);
6583 }
6584 
6585 int
6586 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6587 {
6588         i_ddi_soft_state_bystr  *sss = (i_ddi_soft_state_bystr *)state;
6589         void                    *sso;
6590         char                    *dup_str;
6591 
6592         ASSERT(sss && str && sss->ss_mod_hash);
6593         if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6594                 return (DDI_FAILURE);
6595         sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6596         dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6597         if (mod_hash_insert(sss->ss_mod_hash,
6598             (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6599                 return (DDI_SUCCESS);
6600 
6601         /*
6602          * The only error from an strhash insert is caused by a duplicate key.
6603          * We refuse to tread on an existing elements, so free and fail.
6604          */
6605         kmem_free(dup_str, strlen(dup_str) + 1);
6606         kmem_free(sso, sss->ss_size);
6607         return (DDI_FAILURE);
6608 }
6609 
6610 void *
6611 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6612 {
6613         i_ddi_soft_state_bystr  *sss = (i_ddi_soft_state_bystr *)state;
6614         void                    *sso;
6615 
6616         ASSERT(sss && str && sss->ss_mod_hash);
6617         if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6618                 return (NULL);
6619 
6620         if (mod_hash_find(sss->ss_mod_hash,
6621             (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6622                 return (sso);
6623         return (NULL);
6624 }
6625 
6626 void
6627 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6628 {
6629         i_ddi_soft_state_bystr  *sss = (i_ddi_soft_state_bystr *)state;
6630         void                    *sso;
6631 
6632         ASSERT(sss && str && sss->ss_mod_hash);
6633         if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6634                 return;
6635 
6636         (void) mod_hash_remove(sss->ss_mod_hash,
6637             (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6638         kmem_free(sso, sss->ss_size);
6639 }
6640 
6641 void
6642 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6643 {
6644         i_ddi_soft_state_bystr  *sss;
6645 
6646         ASSERT(state_p);
6647         if (state_p == NULL)
6648                 return;
6649 
6650         sss = (i_ddi_soft_state_bystr *)(*state_p);
6651         if (sss == NULL)
6652                 return;
6653 
6654         ASSERT(sss->ss_mod_hash);
6655         if (sss->ss_mod_hash) {
6656                 mod_hash_destroy_strhash(sss->ss_mod_hash);
6657                 sss->ss_mod_hash = NULL;
6658         }
6659 
6660         kmem_free(sss, sizeof (*sss));
6661         *state_p = NULL;
6662 }
6663 
6664 /*
6665  * The ddi_strid_* routines provide string-to-index management utilities.
6666  */
6667 /* allocate and initialize an strid set */
6668 int
6669 ddi_strid_init(ddi_strid **strid_p, int n_items)
6670 {
6671         i_ddi_strid     *ss;
6672         int             hash_sz;
6673 
6674         if (strid_p == NULL)
6675                 return (DDI_FAILURE);
6676 
6677         /* current implementation is based on hash, convert n_items to hash */
6678         hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6679         if (hash_sz < SS_MIN_HASH_SZ)
6680                 hash_sz = SS_MIN_HASH_SZ;
6681         else if (hash_sz > SS_MAX_HASH_SZ)
6682                 hash_sz = SS_MAX_HASH_SZ;
6683 
6684         ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6685         ss->strid_chunksz = n_items;
6686         ss->strid_spacesz = n_items;
6687         ss->strid_space = id_space_create("strid", 1, n_items);
6688         ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6689             mod_hash_null_valdtor);
6690         ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6691             mod_hash_null_valdtor);
6692         *strid_p = (ddi_strid *)ss;
6693         return (DDI_SUCCESS);
6694 }
6695 
6696 /* allocate an id mapping within the specified set for str, return id */
6697 static id_t
6698 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6699 {
6700         i_ddi_strid     *ss = (i_ddi_strid *)strid;
6701         id_t            id;
6702         char            *s;
6703 
6704         ASSERT(ss && str);
6705         if ((ss == NULL) || (str == NULL))
6706                 return (0);
6707 
6708         /*
6709          * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6710          * range as compressed as possible.  This is important to minimize
6711          * the amount of space used when the id is used as a ddi_soft_state
6712          * index by the caller.
6713          *
6714          * If the id list is exhausted, increase the size of the list
6715          * by the chuck size specified in ddi_strid_init and reattempt
6716          * the allocation
6717          */
6718         if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6719                 id_space_extend(ss->strid_space, ss->strid_spacesz,
6720                     ss->strid_spacesz + ss->strid_chunksz);
6721                 ss->strid_spacesz += ss->strid_chunksz;
6722                 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6723                         return (0);
6724         }
6725 
6726         /*
6727          * NOTE: since we create and destroy in unison we can save space by
6728          * using bystr key as the byid value.  This means destroy must occur
6729          * in (byid, bystr) order.
6730          */
6731         s = i_ddi_strdup(str, KM_SLEEP);
6732         if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6733             (mod_hash_val_t)(intptr_t)id) != 0) {
6734                 ddi_strid_free(strid, id);
6735                 return (0);
6736         }
6737         if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6738             (mod_hash_val_t)s) != 0) {
6739                 ddi_strid_free(strid, id);
6740                 return (0);
6741         }
6742 
6743         /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6744         return (id);
6745 }
6746 
6747 /* allocate an id mapping within the specified set for str, return id */
6748 id_t
6749 ddi_strid_alloc(ddi_strid *strid, char *str)
6750 {
6751         return (i_ddi_strid_alloc(strid, str));
6752 }
6753 
6754 /* return the id within the specified strid given the str */
6755 id_t
6756 ddi_strid_str2id(ddi_strid *strid, char *str)
6757 {
6758         i_ddi_strid     *ss = (i_ddi_strid *)strid;
6759         id_t            id = 0;
6760         mod_hash_val_t  hv;
6761 
6762         ASSERT(ss && str);
6763         if (ss && str && (mod_hash_find(ss->strid_bystr,
6764             (mod_hash_key_t)str, &hv) == 0))
6765                 id = (int)(intptr_t)hv;
6766         return (id);
6767 }
6768 
6769 /* return str within the specified strid given the id */
6770 char *
6771 ddi_strid_id2str(ddi_strid *strid, id_t id)
6772 {
6773         i_ddi_strid     *ss = (i_ddi_strid *)strid;
6774         char            *str = NULL;
6775         mod_hash_val_t  hv;
6776 
6777         ASSERT(ss && id > 0);
6778         if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6779             (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6780                 str = (char *)hv;
6781         return (str);
6782 }
6783 
6784 /* free the id mapping within the specified strid */
6785 void
6786 ddi_strid_free(ddi_strid *strid, id_t id)
6787 {
6788         i_ddi_strid     *ss = (i_ddi_strid *)strid;
6789         char            *str;
6790 
6791         ASSERT(ss && id > 0);
6792         if ((ss == NULL) || (id <= 0))
6793                 return;
6794 
6795         /* bystr key is byid value: destroy order must be (byid, bystr) */
6796         str = ddi_strid_id2str(strid, id);
6797         (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6798         id_free(ss->strid_space, id);
6799 
6800         if (str)
6801                 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6802 }
6803 
6804 /* destroy the strid set */
6805 void
6806 ddi_strid_fini(ddi_strid **strid_p)
6807 {
6808         i_ddi_strid     *ss;
6809 
6810         ASSERT(strid_p);
6811         if (strid_p == NULL)
6812                 return;
6813 
6814         ss = (i_ddi_strid *)(*strid_p);
6815         if (ss == NULL)
6816                 return;
6817 
6818         /* bystr key is byid value: destroy order must be (byid, bystr) */
6819         if (ss->strid_byid)
6820                 mod_hash_destroy_hash(ss->strid_byid);
6821         if (ss->strid_byid)
6822                 mod_hash_destroy_hash(ss->strid_bystr);
6823         if (ss->strid_space)
6824                 id_space_destroy(ss->strid_space);
6825         kmem_free(ss, sizeof (*ss));
6826         *strid_p = NULL;
6827 }
6828 
6829 /*
6830  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6831  * Storage is double buffered to prevent updates during devi_addr use -
6832  * double buffering is adaquate for reliable ddi_deviname() consumption.
6833  * The double buffer is not freed until dev_info structure destruction
6834  * (by i_ddi_free_node).
6835  */
6836 void
6837 ddi_set_name_addr(dev_info_t *dip, char *name)
6838 {
6839         char    *buf = DEVI(dip)->devi_addr_buf;
6840         char    *newaddr;
6841 
6842         if (buf == NULL) {
6843                 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6844                 DEVI(dip)->devi_addr_buf = buf;
6845         }
6846 
6847         if (name) {
6848                 ASSERT(strlen(name) < MAXNAMELEN);
6849                 newaddr = (DEVI(dip)->devi_addr == buf) ?
6850                     (buf + MAXNAMELEN) : buf;
6851                 (void) strlcpy(newaddr, name, MAXNAMELEN);
6852         } else
6853                 newaddr = NULL;
6854 
6855         DEVI(dip)->devi_addr = newaddr;
6856 }
6857 
6858 char *
6859 ddi_get_name_addr(dev_info_t *dip)
6860 {
6861         return (DEVI(dip)->devi_addr);
6862 }
6863 
6864 void
6865 ddi_set_parent_data(dev_info_t *dip, void *pd)
6866 {
6867         DEVI(dip)->devi_parent_data = pd;
6868 }
6869 
6870 void *
6871 ddi_get_parent_data(dev_info_t *dip)
6872 {
6873         return (DEVI(dip)->devi_parent_data);
6874 }
6875 
6876 /*
6877  * ddi_name_to_major: returns the major number of a named module,
6878  * derived from the current driver alias binding.
6879  *
6880  * Caveat: drivers should avoid the use of this function, in particular
6881  * together with ddi_get_name/ddi_binding name, as per
6882  *      major = ddi_name_to_major(ddi_get_name(devi));
6883  * ddi_name_to_major() relies on the state of the device/alias binding,
6884  * which can and does change dynamically as aliases are administered
6885  * over time.  An attached device instance cannot rely on the major
6886  * number returned by ddi_name_to_major() to match its own major number.
6887  *
6888  * For driver use, ddi_driver_major() reliably returns the major number
6889  * for the module to which the device was bound at attach time over
6890  * the life of the instance.
6891  *      major = ddi_driver_major(dev_info_t *)
6892  */
6893 major_t
6894 ddi_name_to_major(char *name)
6895 {
6896         return (mod_name_to_major(name));
6897 }
6898 
6899 /*
6900  * ddi_major_to_name: Returns the module name bound to a major number.
6901  */
6902 char *
6903 ddi_major_to_name(major_t major)
6904 {
6905         return (mod_major_to_name(major));
6906 }
6907 
6908 /*
6909  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6910  * pointed at by 'name.'  A devinfo node is named as a result of calling
6911  * ddi_initchild().
6912  *
6913  * Note: the driver must be held before calling this function!
6914  */
6915 char *
6916 ddi_deviname(dev_info_t *dip, char *name)
6917 {
6918         char *addrname;
6919         char none = '\0';
6920 
6921         if (dip == ddi_root_node()) {
6922                 *name = '\0';
6923                 return (name);
6924         }
6925 
6926         if (i_ddi_node_state(dip) < DS_BOUND) {
6927                 addrname = &none;
6928         } else {
6929                 /*
6930                  * Use ddi_get_name_addr() without checking state so we get
6931                  * a unit-address if we are called after ddi_set_name_addr()
6932                  * by nexus DDI_CTL_INITCHILD code, but before completing
6933                  * node promotion to DS_INITIALIZED.  We currently have
6934                  * two situations where we are called in this state:
6935                  *   o  For framework processing of a path-oriented alias.
6936                  *   o  If a SCSA nexus driver calls ddi_devid_register()
6937                  *      from it's tran_tgt_init(9E) implementation.
6938                  */
6939                 addrname = ddi_get_name_addr(dip);
6940                 if (addrname == NULL)
6941                         addrname = &none;
6942         }
6943 
6944         if (*addrname == '\0') {
6945                 (void) sprintf(name, "/%s", ddi_node_name(dip));
6946         } else {
6947                 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6948         }
6949 
6950         return (name);
6951 }
6952 
6953 /*
6954  * Spits out the name of device node, typically name@addr, for a given node,
6955  * using the driver name, not the nodename.
6956  *
6957  * Used by match_parent. Not to be used elsewhere.
6958  */
6959 char *
6960 i_ddi_parname(dev_info_t *dip, char *name)
6961 {
6962         char *addrname;
6963 
6964         if (dip == ddi_root_node()) {
6965                 *name = '\0';
6966                 return (name);
6967         }
6968 
6969         ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6970 
6971         if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6972                 (void) sprintf(name, "%s", ddi_binding_name(dip));
6973         else
6974                 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6975         return (name);
6976 }
6977 
6978 static char *
6979 pathname_work(dev_info_t *dip, char *path)
6980 {
6981         char *bp;
6982 
6983         if (dip == ddi_root_node()) {
6984                 *path = '\0';
6985                 return (path);
6986         }
6987         (void) pathname_work(ddi_get_parent(dip), path);
6988         bp = path + strlen(path);
6989         (void) ddi_deviname(dip, bp);
6990         return (path);
6991 }
6992 
6993 char *
6994 ddi_pathname(dev_info_t *dip, char *path)
6995 {
6996         return (pathname_work(dip, path));
6997 }
6998 
6999 char *
7000 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
7001 {
7002         if (dmdp->dip == NULL)
7003                 *path = '\0';
7004         else {
7005                 (void) ddi_pathname(dmdp->dip, path);
7006                 if (dmdp->ddm_name) {
7007                         (void) strcat(path, ":");
7008                         (void) strcat(path, dmdp->ddm_name);
7009                 }
7010         }
7011         return (path);
7012 }
7013 
7014 static char *
7015 pathname_work_obp(dev_info_t *dip, char *path)
7016 {
7017         char *bp;
7018         char *obp_path;
7019 
7020         /*
7021          * look up the "obp-path" property, return the path if it exists
7022          */
7023         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
7024             "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
7025                 (void) strcpy(path, obp_path);
7026                 ddi_prop_free(obp_path);
7027                 return (path);
7028         }
7029 
7030         /*
7031          * stop at root, no obp path
7032          */
7033         if (dip == ddi_root_node()) {
7034                 return (NULL);
7035         }
7036 
7037         obp_path = pathname_work_obp(ddi_get_parent(dip), path);
7038         if (obp_path == NULL)
7039                 return (NULL);
7040 
7041         /*
7042          * append our component to parent's obp path
7043          */
7044         bp = path + strlen(path);
7045         if (*(bp - 1) != '/')
7046                 (void) strcat(bp++, "/");
7047         (void) ddi_deviname(dip, bp);
7048         return (path);
7049 }
7050 
7051 /*
7052  * return the 'obp-path' based path for the given node, or NULL if the node
7053  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
7054  * function can't be called from interrupt context (since we need to
7055  * lookup a string property).
7056  */
7057 char *
7058 ddi_pathname_obp(dev_info_t *dip, char *path)
7059 {
7060         ASSERT(!servicing_interrupt());
7061         if (dip == NULL || path == NULL)
7062                 return (NULL);
7063 
7064         /* split work into a separate function to aid debugging */
7065         return (pathname_work_obp(dip, path));
7066 }
7067 
7068 int
7069 ddi_pathname_obp_set(dev_info_t *dip, char *component)
7070 {
7071         dev_info_t *pdip;
7072         char *obp_path = NULL;
7073         int rc = DDI_FAILURE;
7074 
7075         if (dip == NULL)
7076                 return (DDI_FAILURE);
7077 
7078         obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7079 
7080         pdip = ddi_get_parent(dip);
7081 
7082         if (ddi_pathname_obp(pdip, obp_path) == NULL) {
7083                 (void) ddi_pathname(pdip, obp_path);
7084         }
7085 
7086         if (component) {
7087                 (void) strncat(obp_path, "/", MAXPATHLEN);
7088                 (void) strncat(obp_path, component, MAXPATHLEN);
7089         }
7090         rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
7091             obp_path);
7092 
7093         if (obp_path)
7094                 kmem_free(obp_path, MAXPATHLEN);
7095 
7096         return (rc);
7097 }
7098 
7099 /*
7100  * Given a dev_t, return the pathname of the corresponding device in the
7101  * buffer pointed at by "path."  The buffer is assumed to be large enough
7102  * to hold the pathname of the device (MAXPATHLEN).
7103  *
7104  * The pathname of a device is the pathname of the devinfo node to which
7105  * the device "belongs," concatenated with the character ':' and the name
7106  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
7107  * just the pathname of the devinfo node is returned without driving attach
7108  * of that node.  For a non-zero spec_type, an attach is performed and a
7109  * search of the minor list occurs.
7110  *
7111  * It is possible that the path associated with the dev_t is not
7112  * currently available in the devinfo tree.  In order to have a
7113  * dev_t, a device must have been discovered before, which means
7114  * that the path is always in the instance tree.  The one exception
7115  * to this is if the dev_t is associated with a pseudo driver, in
7116  * which case the device must exist on the pseudo branch of the
7117  * devinfo tree as a result of parsing .conf files.
7118  */
7119 int
7120 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
7121 {
7122         int             circ;
7123         major_t         major = getmajor(devt);
7124         int             instance;
7125         dev_info_t      *dip;
7126         char            *minorname;
7127         char            *drvname;
7128 
7129         if (major >= devcnt)
7130                 goto fail;
7131         if (major == clone_major) {
7132                 /* clone has no minor nodes, manufacture the path here */
7133                 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
7134                         goto fail;
7135 
7136                 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
7137                 return (DDI_SUCCESS);
7138         }
7139 
7140         /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
7141         if ((instance = dev_to_instance(devt)) == -1)
7142                 goto fail;
7143 
7144         /* reconstruct the path given the major/instance */
7145         if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
7146                 goto fail;
7147 
7148         /* if spec_type given we must drive attach and search minor nodes */
7149         if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
7150                 /* attach the path so we can search minors */
7151                 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
7152                         goto fail;
7153 
7154                 /* Add minorname to path. */
7155                 ndi_devi_enter(dip, &circ);
7156                 minorname = i_ddi_devtspectype_to_minorname(dip,
7157                     devt, spec_type);
7158                 if (minorname) {
7159                         (void) strcat(path, ":");
7160                         (void) strcat(path, minorname);
7161                 }
7162                 ndi_devi_exit(dip, circ);
7163                 ddi_release_devi(dip);
7164                 if (minorname == NULL)
7165                         goto fail;
7166         }
7167         ASSERT(strlen(path) < MAXPATHLEN);
7168         return (DDI_SUCCESS);
7169 
7170 fail:   *path = 0;
7171         return (DDI_FAILURE);
7172 }
7173 
7174 /*
7175  * Given a major number and an instance, return the path.
7176  * This interface does NOT drive attach.
7177  */
7178 int
7179 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
7180 {
7181         struct devnames *dnp;
7182         dev_info_t      *dip;
7183 
7184         if ((major >= devcnt) || (instance == -1)) {
7185                 *path = 0;
7186                 return (DDI_FAILURE);
7187         }
7188 
7189         /* look for the major/instance in the instance tree */
7190         if (e_ddi_instance_majorinstance_to_path(major, instance,
7191             path) == DDI_SUCCESS) {
7192                 ASSERT(strlen(path) < MAXPATHLEN);
7193                 return (DDI_SUCCESS);
7194         }
7195 
7196         /*
7197          * Not in instance tree, find the instance on the per driver list and
7198          * construct path to instance via ddi_pathname(). This is how paths
7199          * down the 'pseudo' branch are constructed.
7200          */
7201         dnp = &(devnamesp[major]);
7202         LOCK_DEV_OPS(&(dnp->dn_lock));
7203         for (dip = dnp->dn_head; dip;
7204             dip = (dev_info_t *)DEVI(dip)->devi_next) {
7205                 /* Skip if instance does not match. */
7206                 if (DEVI(dip)->devi_instance != instance)
7207                         continue;
7208 
7209                 /*
7210                  * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
7211                  * node demotion, so it is not an effective way of ensuring
7212                  * that the ddi_pathname result has a unit-address.  Instead,
7213                  * we reverify the node state after calling ddi_pathname().
7214                  */
7215                 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
7216                         (void) ddi_pathname(dip, path);
7217                         if (i_ddi_node_state(dip) < DS_INITIALIZED)
7218                                 continue;
7219                         UNLOCK_DEV_OPS(&(dnp->dn_lock));
7220                         ASSERT(strlen(path) < MAXPATHLEN);
7221                         return (DDI_SUCCESS);
7222                 }
7223         }
7224         UNLOCK_DEV_OPS(&(dnp->dn_lock));
7225 
7226         /* can't reconstruct the path */
7227         *path = 0;
7228         return (DDI_FAILURE);
7229 }
7230 
7231 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
7232 
7233 /*
7234  * Given the dip for a network interface return the ppa for that interface.
7235  *
7236  * In all cases except GLD v0 drivers, the ppa == instance.
7237  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
7238  * So for these drivers when the attach routine calls gld_register(),
7239  * the GLD framework creates an integer property called "gld_driver_ppa"
7240  * that can be queried here.
7241  *
7242  * The only time this function is used is when a system is booting over nfs.
7243  * In this case the system has to resolve the pathname of the boot device
7244  * to it's ppa.
7245  */
7246 int
7247 i_ddi_devi_get_ppa(dev_info_t *dip)
7248 {
7249         return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
7250             DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
7251             GLD_DRIVER_PPA, ddi_get_instance(dip)));
7252 }
7253 
7254 /*
7255  * i_ddi_devi_set_ppa() should only be called from gld_register()
7256  * and only for GLD v0 drivers
7257  */
7258 void
7259 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
7260 {
7261         (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
7262 }
7263 
7264 
7265 /*
7266  * Private DDI Console bell functions.
7267  */
7268 void
7269 ddi_ring_console_bell(clock_t duration)
7270 {
7271         if (ddi_console_bell_func != NULL)
7272                 (*ddi_console_bell_func)(duration);
7273 }
7274 
7275 void
7276 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
7277 {
7278         ddi_console_bell_func = bellfunc;
7279 }
7280 
7281 int
7282 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
7283         int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
7284 {
7285         int (*funcp)() = ddi_dma_allochdl;
7286         ddi_dma_attr_t dma_attr;
7287         struct bus_ops *bop;
7288 
7289         if (attr == (ddi_dma_attr_t *)0)
7290                 return (DDI_DMA_BADATTR);
7291 
7292         dma_attr = *attr;
7293 
7294         bop = DEVI(dip)->devi_ops->devo_bus_ops;
7295         if (bop && bop->bus_dma_allochdl)
7296                 funcp = bop->bus_dma_allochdl;
7297 
7298         return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
7299 }
7300 
7301 void
7302 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
7303 {
7304         ddi_dma_handle_t h = *handlep;
7305         (void) ddi_dma_freehdl(HD, HD, h);
7306 }
7307 
7308 static uintptr_t dma_mem_list_id = 0;
7309 
7310 
7311 int
7312 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
7313         ddi_device_acc_attr_t *accattrp, uint_t flags,
7314         int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
7315         size_t *real_length, ddi_acc_handle_t *handlep)
7316 {
7317         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7318         dev_info_t *dip = hp->dmai_rdip;
7319         ddi_acc_hdl_t *ap;
7320         ddi_dma_attr_t *attrp = &hp->dmai_attr;
7321         uint_t sleepflag, xfermodes;
7322         int (*fp)(caddr_t);
7323         int rval;
7324 
7325         if (waitfp == DDI_DMA_SLEEP)
7326                 fp = (int (*)())KM_SLEEP;
7327         else if (waitfp == DDI_DMA_DONTWAIT)
7328                 fp = (int (*)())KM_NOSLEEP;
7329         else
7330                 fp = waitfp;
7331         *handlep = impl_acc_hdl_alloc(fp, arg);
7332         if (*handlep == NULL)
7333                 return (DDI_FAILURE);
7334 
7335 /* SPARC mappings are always cacheable, as SPARC guarantees cache coherency. */
7336 #ifndef __sparc
7337         /* Transform attributes into correct cache flags. */
7338         if ((flags & IOMEM_DATA_MASK) == 0) {
7339                 switch (accattrp->devacc_attr_dataorder) {
7340                 case DDI_STRICTORDER_ACC:
7341                         flags |= IOMEM_DATA_UNCACHED;
7342                         break;
7343                 case DDI_MERGING_OK_ACC:
7344                         flags |= IOMEM_DATA_UC_WR_COMBINE;
7345                         break;
7346                 default:
7347                         flags |= IOMEM_DATA_CACHED;
7348                         break;
7349                 }
7350         }
7351 #endif
7352 
7353         /* check if the cache attributes are supported */
7354         if (i_ddi_check_cache_attr(flags) == B_FALSE)
7355                 return (DDI_FAILURE);
7356 
7357         /*
7358          * Transfer the meaningful bits to xfermodes.
7359          * Double-check if the 3rd party driver correctly sets the bits.
7360          * If not, set DDI_DMA_STREAMING to keep compatibility.
7361          */
7362         xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7363         if (xfermodes == 0) {
7364                 xfermodes = DDI_DMA_STREAMING;
7365         }
7366 
7367         /*
7368          * initialize the common elements of data access handle
7369          */
7370         ap = impl_acc_hdl_get(*handlep);
7371         ap->ah_vers = VERS_ACCHDL;
7372         ap->ah_dip = dip;
7373         ap->ah_offset = 0;
7374         ap->ah_len = 0;
7375         ap->ah_xfermodes = flags;
7376         ap->ah_acc = *accattrp;
7377 
7378         sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7379         if (xfermodes == DDI_DMA_CONSISTENT) {
7380                 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7381                     flags, accattrp, kaddrp, NULL, ap);
7382                 *real_length = length;
7383         } else {
7384                 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7385                     flags, accattrp, kaddrp, real_length, ap);
7386         }
7387         if (rval == DDI_SUCCESS) {
7388                 ap->ah_len = (off_t)(*real_length);
7389                 ap->ah_addr = *kaddrp;
7390         } else {
7391                 impl_acc_hdl_free(*handlep);
7392                 *handlep = (ddi_acc_handle_t)NULL;
7393                 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7394                         ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7395                 }
7396                 rval = DDI_FAILURE;
7397         }
7398         return (rval);
7399 }
7400 
7401 void
7402 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7403 {
7404         ddi_acc_hdl_t *ap;
7405 
7406         ap = impl_acc_hdl_get(*handlep);
7407         ASSERT(ap);
7408 
7409         i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7410 
7411         /*
7412          * free the handle
7413          */
7414         impl_acc_hdl_free(*handlep);
7415         *handlep = (ddi_acc_handle_t)NULL;
7416 
7417         if (dma_mem_list_id != 0) {
7418                 ddi_run_callback(&dma_mem_list_id);
7419         }
7420 }
7421 
7422 int
7423 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7424         uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7425         ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7426 {
7427         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7428         dev_info_t *dip, *rdip;
7429         struct ddi_dma_req dmareq;
7430         int (*funcp)();
7431 
7432         dmareq.dmar_flags = flags;
7433         dmareq.dmar_fp = waitfp;
7434         dmareq.dmar_arg = arg;
7435         dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7436 
7437         if (bp->b_flags & B_PAGEIO) {
7438                 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7439                 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7440                 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7441                     (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7442         } else {
7443                 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7444                 if (bp->b_flags & B_SHADOW) {
7445                         dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7446                             bp->b_shadow;
7447                         dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7448                 } else {
7449                         dmareq.dmar_object.dmao_type =
7450                             (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7451                             DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7452                         dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7453                 }
7454 
7455                 /*
7456                  * If the buffer has no proc pointer, or the proc
7457                  * struct has the kernel address space, or the buffer has
7458                  * been marked B_REMAPPED (meaning that it is now
7459                  * mapped into the kernel's address space), then
7460                  * the address space is kas (kernel address space).
7461                  */
7462                 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7463                     (bp->b_flags & B_REMAPPED)) {
7464                         dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7465                 } else {
7466                         dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7467                             bp->b_proc->p_as;
7468                 }
7469         }
7470 
7471         dip = rdip = hp->dmai_rdip;
7472         if (dip != ddi_root_node())
7473                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7474         funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7475         return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7476 }
7477 
7478 int
7479 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7480         caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7481         caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7482 {
7483         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7484         dev_info_t *dip, *rdip;
7485         struct ddi_dma_req dmareq;
7486         int (*funcp)();
7487 
7488         if (len == (uint_t)0) {
7489                 return (DDI_DMA_NOMAPPING);
7490         }
7491         dmareq.dmar_flags = flags;
7492         dmareq.dmar_fp = waitfp;
7493         dmareq.dmar_arg = arg;
7494         dmareq.dmar_object.dmao_size = len;
7495         dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7496         dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7497         dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7498         dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7499 
7500         dip = rdip = hp->dmai_rdip;
7501         if (dip != ddi_root_node())
7502                 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7503         funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7504         return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7505 }
7506 
7507 void
7508 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7509 {
7510         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7511         ddi_dma_cookie_t *cp;
7512 
7513         cp = hp->dmai_cookie;
7514         ASSERT(cp);
7515 
7516         cookiep->dmac_notused = cp->dmac_notused;
7517         cookiep->dmac_type = cp->dmac_type;
7518         cookiep->dmac_address = cp->dmac_address;
7519         cookiep->dmac_size = cp->dmac_size;
7520         hp->dmai_cookie++;
7521 }
7522 
7523 int
7524 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7525 {
7526         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7527         if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7528                 return (DDI_FAILURE);
7529         } else {
7530                 *nwinp = hp->dmai_nwin;
7531                 return (DDI_SUCCESS);
7532         }
7533 }
7534 
7535 int
7536 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7537         size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7538 {
7539         int (*funcp)() = ddi_dma_win;
7540         struct bus_ops *bop;
7541 
7542         bop = DEVI(HD)->devi_ops->devo_bus_ops;
7543         if (bop && bop->bus_dma_win)
7544                 funcp = bop->bus_dma_win;
7545 
7546         return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7547 }
7548 
7549 int
7550 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7551 {
7552         return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7553             &burstsizes, 0, 0));
7554 }
7555 
7556 int
7557 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7558 {
7559         return (hp->dmai_fault);
7560 }
7561 
7562 int
7563 ddi_check_dma_handle(ddi_dma_handle_t handle)
7564 {
7565         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7566         int (*check)(ddi_dma_impl_t *);
7567 
7568         if ((check = hp->dmai_fault_check) == NULL)
7569                 check = i_ddi_dma_fault_check;
7570 
7571         return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7572 }
7573 
7574 void
7575 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7576 {
7577         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7578         void (*notify)(ddi_dma_impl_t *);
7579 
7580         if (!hp->dmai_fault) {
7581                 hp->dmai_fault = 1;
7582                 if ((notify = hp->dmai_fault_notify) != NULL)
7583                         (*notify)(hp);
7584         }
7585 }
7586 
7587 void
7588 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7589 {
7590         ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7591         void (*notify)(ddi_dma_impl_t *);
7592 
7593         if (hp->dmai_fault) {
7594                 hp->dmai_fault = 0;
7595                 if ((notify = hp->dmai_fault_notify) != NULL)
7596                         (*notify)(hp);
7597         }
7598 }
7599 
7600 /*
7601  * register mapping routines.
7602  */
7603 int
7604 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7605         offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7606         ddi_acc_handle_t *handle)
7607 {
7608         ddi_map_req_t mr;
7609         ddi_acc_hdl_t *hp;
7610         int result;
7611 
7612         /*
7613          * Allocate and initialize the common elements of data access handle.
7614          */
7615         *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7616         hp = impl_acc_hdl_get(*handle);
7617         hp->ah_vers = VERS_ACCHDL;
7618         hp->ah_dip = dip;
7619         hp->ah_rnumber = rnumber;
7620         hp->ah_offset = offset;
7621         hp->ah_len = len;
7622         hp->ah_acc = *accattrp;
7623 
7624         /*
7625          * Set up the mapping request and call to parent.
7626          */
7627         mr.map_op = DDI_MO_MAP_LOCKED;
7628         mr.map_type = DDI_MT_RNUMBER;
7629         mr.map_obj.rnumber = rnumber;
7630         mr.map_prot = PROT_READ | PROT_WRITE;
7631         mr.map_flags = DDI_MF_KERNEL_MAPPING;
7632         mr.map_handlep = hp;
7633         mr.map_vers = DDI_MAP_VERSION;
7634         result = ddi_map(dip, &mr, offset, len, addrp);
7635 
7636         /*
7637          * check for end result
7638          */
7639         if (result != DDI_SUCCESS) {
7640                 impl_acc_hdl_free(*handle);
7641                 *handle = (ddi_acc_handle_t)NULL;
7642         } else {
7643                 hp->ah_addr = *addrp;
7644         }
7645 
7646         return (result);
7647 }
7648 
7649 void
7650 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7651 {
7652         ddi_map_req_t mr;
7653         ddi_acc_hdl_t *hp;
7654 
7655         hp = impl_acc_hdl_get(*handlep);
7656         ASSERT(hp);
7657 
7658         mr.map_op = DDI_MO_UNMAP;
7659         mr.map_type = DDI_MT_RNUMBER;
7660         mr.map_obj.rnumber = hp->ah_rnumber;
7661         mr.map_prot = PROT_READ | PROT_WRITE;
7662         mr.map_flags = DDI_MF_KERNEL_MAPPING;
7663         mr.map_handlep = hp;
7664         mr.map_vers = DDI_MAP_VERSION;
7665 
7666         /*
7667          * Call my parent to unmap my regs.
7668          */
7669         (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7670             hp->ah_len, &hp->ah_addr);
7671         /*
7672          * free the handle
7673          */
7674         impl_acc_hdl_free(*handlep);
7675         *handlep = (ddi_acc_handle_t)NULL;
7676 }
7677 
7678 int
7679 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7680         ssize_t dev_advcnt, uint_t dev_datasz)
7681 {
7682         uint8_t *b;
7683         uint16_t *w;
7684         uint32_t *l;
7685         uint64_t *ll;
7686 
7687         /* check for total byte count is multiple of data transfer size */
7688         if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7689                 return (DDI_FAILURE);
7690 
7691         switch (dev_datasz) {
7692         case DDI_DATA_SZ01_ACC:
7693                 for (b = (uint8_t *)dev_addr;
7694                     bytecount != 0; bytecount -= 1, b += dev_advcnt)
7695                         ddi_put8(handle, b, 0);
7696                 break;
7697         case DDI_DATA_SZ02_ACC:
7698                 for (w = (uint16_t *)dev_addr;
7699                     bytecount != 0; bytecount -= 2, w += dev_advcnt)
7700                         ddi_put16(handle, w, 0);
7701                 break;
7702         case DDI_DATA_SZ04_ACC:
7703                 for (l = (uint32_t *)dev_addr;
7704                     bytecount != 0; bytecount -= 4, l += dev_advcnt)
7705                         ddi_put32(handle, l, 0);
7706                 break;
7707         case DDI_DATA_SZ08_ACC:
7708                 for (ll = (uint64_t *)dev_addr;
7709                     bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7710                         ddi_put64(handle, ll, 0x0ll);
7711                 break;
7712         default:
7713                 return (DDI_FAILURE);
7714         }
7715         return (DDI_SUCCESS);
7716 }
7717 
7718 int
7719 ddi_device_copy(
7720         ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7721         ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7722         size_t bytecount, uint_t dev_datasz)
7723 {
7724         uint8_t *b_src, *b_dst;
7725         uint16_t *w_src, *w_dst;
7726         uint32_t *l_src, *l_dst;
7727         uint64_t *ll_src, *ll_dst;
7728 
7729         /* check for total byte count is multiple of data transfer size */
7730         if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7731                 return (DDI_FAILURE);
7732 
7733         switch (dev_datasz) {
7734         case DDI_DATA_SZ01_ACC:
7735                 b_src = (uint8_t *)src_addr;
7736                 b_dst = (uint8_t *)dest_addr;
7737 
7738                 for (; bytecount != 0; bytecount -= 1) {
7739                         ddi_put8(dest_handle, b_dst,
7740                             ddi_get8(src_handle, b_src));
7741                         b_dst += dest_advcnt;
7742                         b_src += src_advcnt;
7743                 }
7744                 break;
7745         case DDI_DATA_SZ02_ACC:
7746                 w_src = (uint16_t *)src_addr;
7747                 w_dst = (uint16_t *)dest_addr;
7748 
7749                 for (; bytecount != 0; bytecount -= 2) {
7750                         ddi_put16(dest_handle, w_dst,
7751                             ddi_get16(src_handle, w_src));
7752                         w_dst += dest_advcnt;
7753                         w_src += src_advcnt;
7754                 }
7755                 break;
7756         case DDI_DATA_SZ04_ACC:
7757                 l_src = (uint32_t *)src_addr;
7758                 l_dst = (uint32_t *)dest_addr;
7759 
7760                 for (; bytecount != 0; bytecount -= 4) {
7761                         ddi_put32(dest_handle, l_dst,
7762                             ddi_get32(src_handle, l_src));
7763                         l_dst += dest_advcnt;
7764                         l_src += src_advcnt;
7765                 }
7766                 break;
7767         case DDI_DATA_SZ08_ACC:
7768                 ll_src = (uint64_t *)src_addr;
7769                 ll_dst = (uint64_t *)dest_addr;
7770 
7771                 for (; bytecount != 0; bytecount -= 8) {
7772                         ddi_put64(dest_handle, ll_dst,
7773                             ddi_get64(src_handle, ll_src));
7774                         ll_dst += dest_advcnt;
7775                         ll_src += src_advcnt;
7776                 }
7777                 break;
7778         default:
7779                 return (DDI_FAILURE);
7780         }
7781         return (DDI_SUCCESS);
7782 }
7783 
7784 #define swap16(value)  \
7785         ((((value) & 0xff) << 8) | ((value) >> 8))
7786 
7787 #define swap32(value)   \
7788         (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7789         (uint32_t)swap16((uint16_t)((value) >> 16)))
7790 
7791 #define swap64(value)   \
7792         (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7793             << 32) | \
7794         (uint64_t)swap32((uint32_t)((value) >> 32)))
7795 
7796 uint16_t
7797 ddi_swap16(uint16_t value)
7798 {
7799         return (swap16(value));
7800 }
7801 
7802 uint32_t
7803 ddi_swap32(uint32_t value)
7804 {
7805         return (swap32(value));
7806 }
7807 
7808 uint64_t
7809 ddi_swap64(uint64_t value)
7810 {
7811         return (swap64(value));
7812 }
7813 
7814 /*
7815  * Convert a binding name to a driver name.
7816  * A binding name is the name used to determine the driver for a
7817  * device - it may be either an alias for the driver or the name
7818  * of the driver itself.
7819  */
7820 char *
7821 i_binding_to_drv_name(char *bname)
7822 {
7823         major_t major_no;
7824 
7825         ASSERT(bname != NULL);
7826 
7827         if ((major_no = ddi_name_to_major(bname)) == -1)
7828                 return (NULL);
7829         return (ddi_major_to_name(major_no));
7830 }
7831 
7832 /*
7833  * Search for minor name that has specified dev_t and spec_type.
7834  * If spec_type is zero then any dev_t match works.  Since we
7835  * are returning a pointer to the minor name string, we require the
7836  * caller to do the locking.
7837  */
7838 char *
7839 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7840 {
7841         struct ddi_minor_data   *dmdp;
7842 
7843         /*
7844          * The did layered driver currently intentionally returns a
7845          * devinfo ptr for an underlying sd instance based on a did
7846          * dev_t. In this case it is not an error.
7847          *
7848          * The did layered driver is associated with Sun Cluster.
7849          */
7850         ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7851             (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7852 
7853         ASSERT(DEVI_BUSY_OWNED(dip));
7854         for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7855                 if (((dmdp->type == DDM_MINOR) ||
7856                     (dmdp->type == DDM_INTERNAL_PATH) ||
7857                     (dmdp->type == DDM_DEFAULT)) &&
7858                     (dmdp->ddm_dev == dev) &&
7859                     ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7860                     (dmdp->ddm_spec_type == spec_type)))
7861                         return (dmdp->ddm_name);
7862         }
7863 
7864         return (NULL);
7865 }
7866 
7867 /*
7868  * Find the devt and spectype of the specified minor_name.
7869  * Return DDI_FAILURE if minor_name not found. Since we are
7870  * returning everything via arguments we can do the locking.
7871  */
7872 int
7873 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7874         dev_t *devtp, int *spectypep)
7875 {
7876         int                     circ;
7877         struct ddi_minor_data   *dmdp;
7878 
7879         /* deal with clone minor nodes */
7880         if (dip == clone_dip) {
7881                 major_t major;
7882                 /*
7883                  * Make sure minor_name is a STREAMS driver.
7884                  * We load the driver but don't attach to any instances.
7885                  */
7886 
7887                 major = ddi_name_to_major(minor_name);
7888                 if (major == DDI_MAJOR_T_NONE)
7889                         return (DDI_FAILURE);
7890 
7891                 if (ddi_hold_driver(major) == NULL)
7892                         return (DDI_FAILURE);
7893 
7894                 if (STREAMSTAB(major) == NULL) {
7895                         ddi_rele_driver(major);
7896                         return (DDI_FAILURE);
7897                 }
7898                 ddi_rele_driver(major);
7899 
7900                 if (devtp)
7901                         *devtp = makedevice(clone_major, (minor_t)major);
7902 
7903                 if (spectypep)
7904                         *spectypep = S_IFCHR;
7905 
7906                 return (DDI_SUCCESS);
7907         }
7908 
7909         ndi_devi_enter(dip, &circ);
7910         for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7911                 if (((dmdp->type != DDM_MINOR) &&
7912                     (dmdp->type != DDM_INTERNAL_PATH) &&
7913                     (dmdp->type != DDM_DEFAULT)) ||
7914                     strcmp(minor_name, dmdp->ddm_name))
7915                         continue;
7916 
7917                 if (devtp)
7918                         *devtp = dmdp->ddm_dev;
7919 
7920                 if (spectypep)
7921                         *spectypep = dmdp->ddm_spec_type;
7922 
7923                 ndi_devi_exit(dip, circ);
7924                 return (DDI_SUCCESS);
7925         }
7926         ndi_devi_exit(dip, circ);
7927 
7928         return (DDI_FAILURE);
7929 }
7930 
7931 static kmutex_t devid_gen_mutex;
7932 static short    devid_gen_number;
7933 
7934 #ifdef DEBUG
7935 
7936 static int      devid_register_corrupt = 0;
7937 static int      devid_register_corrupt_major = 0;
7938 static int      devid_register_corrupt_hint = 0;
7939 static int      devid_register_corrupt_hint_major = 0;
7940 
7941 static int devid_lyr_debug = 0;
7942 
7943 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)         \
7944         if (devid_lyr_debug)                                    \
7945                 ddi_debug_devid_devts(msg, ndevs, devs)
7946 
7947 #else
7948 
7949 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7950 
7951 #endif /* DEBUG */
7952 
7953 
7954 #ifdef  DEBUG
7955 
7956 static void
7957 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7958 {
7959         int i;
7960 
7961         cmn_err(CE_CONT, "%s:\n", msg);
7962         for (i = 0; i < ndevs; i++) {
7963                 cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7964         }
7965 }
7966 
7967 static void
7968 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7969 {
7970         int i;
7971 
7972         cmn_err(CE_CONT, "%s:\n", msg);
7973         for (i = 0; i < npaths; i++) {
7974                 cmn_err(CE_CONT, "    %s\n", paths[i]);
7975         }
7976 }
7977 
7978 static void
7979 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7980 {
7981         int i;
7982 
7983         cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7984         for (i = 0; i < ndevs; i++) {
7985                 cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7986         }
7987 }
7988 
7989 #endif  /* DEBUG */
7990 
7991 /*
7992  * Register device id into DDI framework.
7993  * Must be called when the driver is bound.
7994  */
7995 static int
7996 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7997 {
7998         impl_devid_t    *i_devid = (impl_devid_t *)devid;
7999         size_t          driver_len;
8000         const char      *driver_name;
8001         char            *devid_str;
8002         major_t         major;
8003 
8004         if ((dip == NULL) ||
8005             ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
8006                 return (DDI_FAILURE);
8007 
8008         /* verify that the devid is valid */
8009         if (ddi_devid_valid(devid) != DDI_SUCCESS)
8010                 return (DDI_FAILURE);
8011 
8012         /* Updating driver name hint in devid */
8013         driver_name = ddi_driver_name(dip);
8014         driver_len = strlen(driver_name);
8015         if (driver_len > DEVID_HINT_SIZE) {
8016                 /* Pick up last four characters of driver name */
8017                 driver_name += driver_len - DEVID_HINT_SIZE;
8018                 driver_len = DEVID_HINT_SIZE;
8019         }
8020         bzero(i_devid->did_driver, DEVID_HINT_SIZE);
8021         bcopy(driver_name, i_devid->did_driver, driver_len);
8022 
8023 #ifdef DEBUG
8024         /* Corrupt the devid for testing. */
8025         if (devid_register_corrupt)
8026                 i_devid->did_id[0] += devid_register_corrupt;
8027         if (devid_register_corrupt_major &&
8028             (major == devid_register_corrupt_major))
8029                 i_devid->did_id[0] += 1;
8030         if (devid_register_corrupt_hint)
8031                 i_devid->did_driver[0] += devid_register_corrupt_hint;
8032         if (devid_register_corrupt_hint_major &&
8033             (major == devid_register_corrupt_hint_major))
8034                 i_devid->did_driver[0] += 1;
8035 #endif /* DEBUG */
8036 
8037         /* encode the devid as a string */
8038         if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
8039                 return (DDI_FAILURE);
8040 
8041         /* add string as a string property */
8042         if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
8043             DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
8044                 cmn_err(CE_WARN, "%s%d: devid property update failed",
8045                     ddi_driver_name(dip), ddi_get_instance(dip));
8046                 ddi_devid_str_free(devid_str);
8047                 return (DDI_FAILURE);
8048         }
8049 
8050         /* keep pointer to devid string for interrupt context fma code */
8051         if (DEVI(dip)->devi_devid_str)
8052                 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8053         DEVI(dip)->devi_devid_str = devid_str;
8054         return (DDI_SUCCESS);
8055 }
8056 
8057 int
8058 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
8059 {
8060         int rval;
8061 
8062         rval = i_ddi_devid_register(dip, devid);
8063         if (rval == DDI_SUCCESS) {
8064                 /*
8065                  * Register devid in devid-to-path cache
8066                  */
8067                 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
8068                         mutex_enter(&DEVI(dip)->devi_lock);
8069                         DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
8070                         mutex_exit(&DEVI(dip)->devi_lock);
8071                 } else if (ddi_get_name_addr(dip)) {
8072                         /*
8073                          * We only expect cache_register DDI_FAILURE when we
8074                          * can't form the full path because of NULL devi_addr.
8075                          */
8076                         cmn_err(CE_WARN, "%s%d: failed to cache devid",
8077                             ddi_driver_name(dip), ddi_get_instance(dip));
8078                 }
8079         } else {
8080                 cmn_err(CE_WARN, "%s%d: failed to register devid",
8081                     ddi_driver_name(dip), ddi_get_instance(dip));
8082         }
8083         return (rval);
8084 }
8085 
8086 /*
8087  * Remove (unregister) device id from DDI framework.
8088  * Must be called when device is detached.
8089  */
8090 static void
8091 i_ddi_devid_unregister(dev_info_t *dip)
8092 {
8093         if (DEVI(dip)->devi_devid_str) {
8094                 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8095                 DEVI(dip)->devi_devid_str = NULL;
8096         }
8097 
8098         /* remove the devid property */
8099         (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
8100 }
8101 
8102 void
8103 ddi_devid_unregister(dev_info_t *dip)
8104 {
8105         mutex_enter(&DEVI(dip)->devi_lock);
8106         DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
8107         mutex_exit(&DEVI(dip)->devi_lock);
8108         e_devid_cache_unregister(dip);
8109         i_ddi_devid_unregister(dip);
8110 }
8111 
8112 /*
8113  * Allocate and initialize a device id.
8114  */
8115 int
8116 ddi_devid_init(
8117         dev_info_t      *dip,
8118         ushort_t        devid_type,
8119         ushort_t        nbytes,
8120         void            *id,
8121         ddi_devid_t     *ret_devid)
8122 {
8123         impl_devid_t    *i_devid;
8124         int             sz = sizeof (*i_devid) + nbytes - sizeof (char);
8125         int             driver_len;
8126         const char      *driver_name;
8127 
8128         switch (devid_type) {
8129         case DEVID_SCSI3_WWN:
8130                 /*FALLTHRU*/
8131         case DEVID_SCSI_SERIAL:
8132                 /*FALLTHRU*/
8133         case DEVID_ATA_SERIAL:
8134                 /*FALLTHRU*/
8135         case DEVID_ENCAP:
8136                 if (nbytes == 0)
8137                         return (DDI_FAILURE);
8138                 if (id == NULL)
8139                         return (DDI_FAILURE);
8140                 break;
8141         case DEVID_FAB:
8142                 if (nbytes != 0)
8143                         return (DDI_FAILURE);
8144                 if (id != NULL)
8145                         return (DDI_FAILURE);
8146                 nbytes = sizeof (int) +
8147                     sizeof (struct timeval32) + sizeof (short);
8148                 sz += nbytes;
8149                 break;
8150         default:
8151                 return (DDI_FAILURE);
8152         }
8153 
8154         if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
8155                 return (DDI_FAILURE);
8156 
8157         i_devid->did_magic_hi = DEVID_MAGIC_MSB;
8158         i_devid->did_magic_lo = DEVID_MAGIC_LSB;
8159         i_devid->did_rev_hi = DEVID_REV_MSB;
8160         i_devid->did_rev_lo = DEVID_REV_LSB;
8161         DEVID_FORMTYPE(i_devid, devid_type);
8162         DEVID_FORMLEN(i_devid, nbytes);
8163 
8164         /* Fill in driver name hint */
8165         driver_name = ddi_driver_name(dip);
8166         driver_len = strlen(driver_name);
8167         if (driver_len > DEVID_HINT_SIZE) {
8168                 /* Pick up last four characters of driver name */
8169                 driver_name += driver_len - DEVID_HINT_SIZE;
8170                 driver_len = DEVID_HINT_SIZE;
8171         }
8172 
8173         bcopy(driver_name, i_devid->did_driver, driver_len);
8174 
8175         /* Fill in id field */
8176         if (devid_type == DEVID_FAB) {
8177                 char            *cp;
8178                 uint32_t        hostid;
8179                 struct timeval32 timestamp32;
8180                 int             i;
8181                 int             *ip;
8182                 short           gen;
8183 
8184                 /* increase the generation number */
8185                 mutex_enter(&devid_gen_mutex);
8186                 gen = devid_gen_number++;
8187                 mutex_exit(&devid_gen_mutex);
8188 
8189                 cp = i_devid->did_id;
8190 
8191                 /* Fill in host id (big-endian byte ordering) */
8192                 hostid = zone_get_hostid(NULL);
8193                 *cp++ = hibyte(hiword(hostid));
8194                 *cp++ = lobyte(hiword(hostid));
8195                 *cp++ = hibyte(loword(hostid));
8196                 *cp++ = lobyte(loword(hostid));
8197 
8198                 /*
8199                  * Fill in timestamp (big-endian byte ordering)
8200                  *
8201                  * (Note that the format may have to be changed
8202                  * before 2038 comes around, though it's arguably
8203                  * unique enough as it is..)
8204                  */
8205                 uniqtime32(×tamp32);
8206                 ip = (int *)×tamp32;
8207                 for (i = 0;
8208                     i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
8209                         int     val;
8210                         val = *ip;
8211                         *cp++ = hibyte(hiword(val));
8212                         *cp++ = lobyte(hiword(val));
8213                         *cp++ = hibyte(loword(val));
8214                         *cp++ = lobyte(loword(val));
8215                 }
8216 
8217                 /* fill in the generation number */
8218                 *cp++ = hibyte(gen);
8219                 *cp++ = lobyte(gen);
8220         } else
8221                 bcopy(id, i_devid->did_id, nbytes);
8222 
8223         /* return device id */
8224         *ret_devid = (ddi_devid_t)i_devid;
8225         return (DDI_SUCCESS);
8226 }
8227 
8228 int
8229 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
8230 {
8231         return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
8232 }
8233 
8234 int
8235 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
8236 {
8237         char            *devidstr;
8238 
8239         ASSERT(dev != DDI_DEV_T_NONE);
8240 
8241         /* look up the property, devt specific first */
8242         if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
8243             DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
8244                 if ((dev == DDI_DEV_T_ANY) ||
8245                     (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
8246                     DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
8247                     DDI_PROP_SUCCESS)) {
8248                         return (DDI_FAILURE);
8249                 }
8250         }
8251 
8252         /* convert to binary form */
8253         if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
8254                 ddi_prop_free(devidstr);
8255                 return (DDI_FAILURE);
8256         }
8257         ddi_prop_free(devidstr);
8258         return (DDI_SUCCESS);
8259 }
8260 
8261 /*
8262  * Return a copy of the device id for dev_t
8263  */
8264 int
8265 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
8266 {
8267         dev_info_t      *dip;
8268         int             rval;
8269 
8270         /* get the dip */
8271         if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
8272                 return (DDI_FAILURE);
8273 
8274         rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
8275 
8276         ddi_release_devi(dip);          /* e_ddi_hold_devi_by_dev() */
8277         return (rval);
8278 }
8279 
8280 /*
8281  * Return a copy of the minor name for dev_t and spec_type
8282  */
8283 int
8284 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
8285 {
8286         char            *buf;
8287         int             circ;
8288         dev_info_t      *dip;
8289         char            *nm;
8290         int             rval;
8291 
8292         if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
8293                 *minor_name = NULL;
8294                 return (DDI_FAILURE);
8295         }
8296 
8297         /* Find the minor name and copy into max size buf */
8298         buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
8299         ndi_devi_enter(dip, &circ);
8300         nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
8301         if (nm)
8302                 (void) strcpy(buf, nm);
8303         ndi_devi_exit(dip, circ);
8304         ddi_release_devi(dip);  /* e_ddi_hold_devi_by_dev() */
8305 
8306         if (nm) {
8307                 /* duplicate into min size buf for return result */
8308                 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
8309                 rval = DDI_SUCCESS;
8310         } else {
8311                 *minor_name = NULL;
8312                 rval = DDI_FAILURE;
8313         }
8314 
8315         /* free max size buf and return */
8316         kmem_free(buf, MAXNAMELEN);
8317         return (rval);
8318 }
8319 
8320 int
8321 ddi_lyr_devid_to_devlist(
8322         ddi_devid_t     devid,
8323         char            *minor_name,
8324         int             *retndevs,
8325         dev_t           **retdevs)
8326 {
8327         ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8328 
8329         if (e_devid_cache_to_devt_list(devid, minor_name,
8330             retndevs, retdevs) == DDI_SUCCESS) {
8331                 ASSERT(*retndevs > 0);
8332                 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8333                     *retndevs, *retdevs);
8334                 return (DDI_SUCCESS);
8335         }
8336 
8337         if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8338                 return (DDI_FAILURE);
8339         }
8340 
8341         if (e_devid_cache_to_devt_list(devid, minor_name,
8342             retndevs, retdevs) == DDI_SUCCESS) {
8343                 ASSERT(*retndevs > 0);
8344                 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8345                     *retndevs, *retdevs);
8346                 return (DDI_SUCCESS);
8347         }
8348 
8349         return (DDI_FAILURE);
8350 }
8351 
8352 void
8353 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8354 {
8355         kmem_free(devlist, sizeof (dev_t) * ndevs);
8356 }
8357 
8358 /*
8359  * Note: This will need to be fixed if we ever allow processes to
8360  * have more than one data model per exec.
8361  */
8362 model_t
8363 ddi_mmap_get_model(void)
8364 {
8365         return (get_udatamodel());
8366 }
8367 
8368 model_t
8369 ddi_model_convert_from(model_t model)
8370 {
8371         return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8372 }
8373 
8374 /*
8375  * ddi interfaces managing storage and retrieval of eventcookies.
8376  */
8377 
8378 /*
8379  * Invoke bus nexus driver's implementation of the
8380  * (*bus_remove_eventcall)() interface to remove a registered
8381  * callback handler for "event".
8382  */
8383 int
8384 ddi_remove_event_handler(ddi_callback_id_t id)
8385 {
8386         ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8387         dev_info_t *ddip;
8388 
8389         ASSERT(cb);
8390         if (!cb) {
8391                 return (DDI_FAILURE);
8392         }
8393 
8394         ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8395         return (ndi_busop_remove_eventcall(ddip, id));
8396 }
8397 
8398 /*
8399  * Invoke bus nexus driver's implementation of the
8400  * (*bus_add_eventcall)() interface to register a callback handler
8401  * for "event".
8402  */
8403 int
8404 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8405     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8406     void *arg, ddi_callback_id_t *id)
8407 {
8408         return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8409 }
8410 
8411 
8412 /*
8413  * Return a handle for event "name" by calling up the device tree
8414  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8415  * by a bus nexus or top of dev_info tree is reached.
8416  */
8417 int
8418 ddi_get_eventcookie(dev_info_t *dip, char *name,
8419     ddi_eventcookie_t *event_cookiep)
8420 {
8421         return (ndi_busop_get_eventcookie(dip, dip,
8422             name, event_cookiep));
8423 }
8424 
8425 /*
8426  * This procedure is provided as the general callback function when
8427  * umem_lockmemory calls as_add_callback for long term memory locking.
8428  * When as_unmap, as_setprot, or as_free encounter segments which have
8429  * locked memory, this callback will be invoked.
8430  */
8431 void
8432 umem_lock_undo(struct as *as, void *arg, uint_t event)
8433 {
8434         _NOTE(ARGUNUSED(as, event))
8435         struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8436 
8437         /*
8438          * Call the cleanup function.  Decrement the cookie reference
8439          * count, if it goes to zero, return the memory for the cookie.
8440          * The i_ddi_umem_unlock for this cookie may or may not have been
8441          * called already.  It is the responsibility of the caller of
8442          * umem_lockmemory to handle the case of the cleanup routine
8443          * being called after a ddi_umem_unlock for the cookie
8444          * was called.
8445          */
8446 
8447         (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8448 
8449         /* remove the cookie if reference goes to zero */
8450         if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8451                 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8452         }
8453 }
8454 
8455 /*
8456  * The following two Consolidation Private routines provide generic
8457  * interfaces to increase/decrease the amount of device-locked memory.
8458  *
8459  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8460  * must be called every time i_ddi_incr_locked_memory() is called.
8461  */
8462 int
8463 /* ARGSUSED */
8464 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8465 {
8466         ASSERT(procp != NULL);
8467         mutex_enter(&procp->p_lock);
8468         if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8469                 mutex_exit(&procp->p_lock);
8470                 return (ENOMEM);
8471         }
8472         mutex_exit(&procp->p_lock);
8473         return (0);
8474 }
8475 
8476 /*
8477  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8478  * must be called every time i_ddi_decr_locked_memory() is called.
8479  */
8480 /* ARGSUSED */
8481 void
8482 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8483 {
8484         ASSERT(procp != NULL);
8485         mutex_enter(&procp->p_lock);
8486         rctl_decr_locked_mem(procp, NULL, dec, 1);
8487         mutex_exit(&procp->p_lock);
8488 }
8489 
8490 /*
8491  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8492  * charge device locked memory to the max-locked-memory rctl.  Tracking
8493  * device locked memory causes the rctl locks to get hot under high-speed
8494  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8495  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8496  * flag tells us if the rctl value should be updated when unlocking the memory,
8497  * in case the rctl gets changed after the memory was locked.  Any device
8498  * locked memory in that rare case will not be counted toward the rctl limit.
8499  *
8500  * When tracking the locked memory, the kproject_t parameter is always NULL
8501  * in the code paths:
8502  *      i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8503  *      i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8504  * Thus, we always use the tk_proj member to check the projp setting.
8505  */
8506 static void
8507 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8508 {
8509         proc_t          *p;
8510         kproject_t      *projp;
8511         zone_t          *zonep;
8512 
8513         ASSERT(cookie);
8514         p = cookie->procp;
8515         ASSERT(p);
8516 
8517         zonep = p->p_zone;
8518         projp = p->p_task->tk_proj;
8519 
8520         ASSERT(zonep);
8521         ASSERT(projp);
8522 
8523         if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8524             projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8525                 cookie->upd_max_lock_rctl = 0;
8526         else
8527                 cookie->upd_max_lock_rctl = 1;
8528 }
8529 
8530 /*
8531  * This routine checks if the max-locked-memory resource ctl is
8532  * exceeded, if not increments it, grabs a hold on the project.
8533  * Returns 0 if successful otherwise returns error code
8534  */
8535 static int
8536 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8537 {
8538         proc_t          *procp;
8539         int             ret;
8540 
8541         ASSERT(cookie);
8542         if (cookie->upd_max_lock_rctl == 0)
8543                 return (0);
8544 
8545         procp = cookie->procp;
8546         ASSERT(procp);
8547 
8548         if ((ret = i_ddi_incr_locked_memory(procp,
8549             cookie->size)) != 0) {
8550                 return (ret);
8551         }
8552         return (0);
8553 }
8554 
8555 /*
8556  * Decrements the max-locked-memory resource ctl and releases
8557  * the hold on the project that was acquired during umem_incr_devlockmem
8558  */
8559 static void
8560 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8561 {
8562         proc_t          *proc;
8563 
8564         if (cookie->upd_max_lock_rctl == 0)
8565                 return;
8566 
8567         proc = (proc_t *)cookie->procp;
8568         if (!proc)
8569                 return;
8570 
8571         i_ddi_decr_locked_memory(proc, cookie->size);
8572 }
8573 
8574 /*
8575  * A consolidation private function which is essentially equivalent to
8576  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8577  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8578  * the ops_vector is valid.
8579  *
8580  * Lock the virtual address range in the current process and create a
8581  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8582  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8583  * to user space.
8584  *
8585  * Note: The resource control accounting currently uses a full charge model
8586  * in other words attempts to lock the same/overlapping areas of memory
8587  * will deduct the full size of the buffer from the projects running
8588  * counter for the device locked memory.
8589  *
8590  * addr, size should be PAGESIZE aligned
8591  *
8592  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8593  *      identifies whether the locked memory will be read or written or both
8594  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8595  * be maintained for an indefinitely long period (essentially permanent),
8596  * rather than for what would be required for a typical I/O completion.
8597  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8598  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8599  * This is to prevent a deadlock if a file truncation is attempted after
8600  * after the locking is done.
8601  *
8602  * Returns 0 on success
8603  *      EINVAL - for invalid parameters
8604  *      EPERM, ENOMEM and other error codes returned by as_pagelock
8605  *      ENOMEM - is returned if the current request to lock memory exceeds
8606  *              *.max-locked-memory resource control value.
8607  *      EFAULT - memory pertains to a regular file mapped shared and
8608  *              and DDI_UMEMLOCK_LONGTERM flag is set
8609  *      EAGAIN - could not start the ddi_umem_unlock list processing thread
8610  */
8611 int
8612 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8613                 struct umem_callback_ops *ops_vector,
8614                 proc_t *procp)
8615 {
8616         int     error;
8617         struct ddi_umem_cookie *p;
8618         void    (*driver_callback)() = NULL;
8619         struct as *as;
8620         struct seg              *seg;
8621         vnode_t                 *vp;
8622 
8623         /* Allow device drivers to not have to reference "curproc" */
8624         if (procp == NULL)
8625                 procp = curproc;
8626         as = procp->p_as;
8627         *cookie = NULL;         /* in case of any error return */
8628 
8629         /* These are the only three valid flags */
8630         if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8631             DDI_UMEMLOCK_LONGTERM)) != 0)
8632                 return (EINVAL);
8633 
8634         /* At least one (can be both) of the two access flags must be set */
8635         if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8636                 return (EINVAL);
8637 
8638         /* addr and len must be page-aligned */
8639         if (((uintptr_t)addr & PAGEOFFSET) != 0)
8640                 return (EINVAL);
8641 
8642         if ((len & PAGEOFFSET) != 0)
8643                 return (EINVAL);
8644 
8645         /*
8646          * For longterm locking a driver callback must be specified; if
8647          * not longterm then a callback is optional.
8648          */
8649         if (ops_vector != NULL) {
8650                 if (ops_vector->cbo_umem_callback_version !=
8651                     UMEM_CALLBACK_VERSION)
8652                         return (EINVAL);
8653                 else
8654                         driver_callback = ops_vector->cbo_umem_lock_cleanup;
8655         }
8656         if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8657                 return (EINVAL);
8658 
8659         /*
8660          * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8661          * be called on first ddi_umem_lock or umem_lockmemory call.
8662          */
8663         if (ddi_umem_unlock_thread == NULL)
8664                 i_ddi_umem_unlock_thread_start();
8665 
8666         /* Allocate memory for the cookie */
8667         p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8668 
8669         /* Convert the flags to seg_rw type */
8670         if (flags & DDI_UMEMLOCK_WRITE) {
8671                 p->s_flags = S_WRITE;
8672         } else {
8673                 p->s_flags = S_READ;
8674         }
8675 
8676         /* Store procp in cookie for later iosetup/unlock */
8677         p->procp = (void *)procp;
8678 
8679         /*
8680          * Store the struct as pointer in cookie for later use by
8681          * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8682          * is called after relvm is called.
8683          */
8684         p->asp = as;
8685 
8686         /*
8687          * The size field is needed for lockmem accounting.
8688          */
8689         p->size = len;
8690         init_lockedmem_rctl_flag(p);
8691 
8692         if (umem_incr_devlockmem(p) != 0) {
8693                 /*
8694                  * The requested memory cannot be locked
8695                  */
8696                 kmem_free(p, sizeof (struct ddi_umem_cookie));
8697                 *cookie = (ddi_umem_cookie_t)NULL;
8698                 return (ENOMEM);
8699         }
8700 
8701         /* Lock the pages corresponding to addr, len in memory */
8702         error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8703         if (error != 0) {
8704                 umem_decr_devlockmem(p);
8705                 kmem_free(p, sizeof (struct ddi_umem_cookie));
8706                 *cookie = (ddi_umem_cookie_t)NULL;
8707                 return (error);
8708         }
8709 
8710         /*
8711          * For longterm locking the addr must pertain to a seg_vn segment or
8712          * or a seg_spt segment.
8713          * If the segment pertains to a regular file, it cannot be
8714          * mapped MAP_SHARED.
8715          * This is to prevent a deadlock if a file truncation is attempted
8716          * after the locking is done.
8717          * Doing this after as_pagelock guarantees persistence of the as; if
8718          * an unacceptable segment is found, the cleanup includes calling
8719          * as_pageunlock before returning EFAULT.
8720          *
8721          * segdev is allowed here as it is already locked.  This allows
8722          * for memory exported by drivers through mmap() (which is already
8723          * locked) to be allowed for LONGTERM.
8724          */
8725         if (flags & DDI_UMEMLOCK_LONGTERM) {
8726                 extern  struct seg_ops segspt_shmops;
8727                 extern  struct seg_ops segdev_ops;
8728                 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8729                 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8730                         if (seg == NULL || seg->s_base > addr + len)
8731                                 break;
8732                         if (seg->s_ops == &segdev_ops)
8733                                 continue;
8734                         if (((seg->s_ops != &segvn_ops) &&
8735                             (seg->s_ops != &segspt_shmops)) ||
8736                             ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8737                             vp != NULL && vp->v_type == VREG) &&
8738                             (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8739                                 as_pageunlock(as, p->pparray,
8740                                     addr, len, p->s_flags);
8741                                 AS_LOCK_EXIT(as, &as->a_lock);
8742                                 umem_decr_devlockmem(p);
8743                                 kmem_free(p, sizeof (struct ddi_umem_cookie));
8744                                 *cookie = (ddi_umem_cookie_t)NULL;
8745                                 return (EFAULT);
8746                         }
8747                 }
8748                 AS_LOCK_EXIT(as, &as->a_lock);
8749         }
8750 
8751 
8752         /* Initialize the fields in the ddi_umem_cookie */
8753         p->cvaddr = addr;
8754         p->type = UMEM_LOCKED;
8755         if (driver_callback != NULL) {
8756                 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8757                 p->cook_refcnt = 2;
8758                 p->callbacks = *ops_vector;
8759         } else {
8760                 /* only i_ddi_umme_unlock needs the cookie */
8761                 p->cook_refcnt = 1;
8762         }
8763 
8764         *cookie = (ddi_umem_cookie_t)p;
8765 
8766         /*
8767          * If a driver callback was specified, add an entry to the
8768          * as struct callback list. The as_pagelock above guarantees
8769          * the persistence of as.
8770          */
8771         if (driver_callback) {
8772                 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8773                     addr, len, KM_SLEEP);
8774                 if (error != 0) {
8775                         as_pageunlock(as, p->pparray,
8776                             addr, len, p->s_flags);
8777                         umem_decr_devlockmem(p);
8778                         kmem_free(p, sizeof (struct ddi_umem_cookie));
8779                         *cookie = (ddi_umem_cookie_t)NULL;
8780                 }
8781         }
8782         return (error);
8783 }
8784 
8785 /*
8786  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8787  * the cookie.  Called from i_ddi_umem_unlock_thread.
8788  */
8789 
8790 static void
8791 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8792 {
8793         uint_t  rc;
8794 
8795         /*
8796          * There is no way to determine whether a callback to
8797          * umem_lock_undo was registered via as_add_callback.
8798          * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8799          * a valid callback function structure.)  as_delete_callback
8800          * is called to delete a possible registered callback.  If the
8801          * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8802          * indicates that there was a callback registered, and that is was
8803          * successfully deleted.  Thus, the cookie reference count
8804          * will never be decremented by umem_lock_undo.  Just return the
8805          * memory for the cookie, since both users of the cookie are done.
8806          * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8807          * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8808          * indicates that callback processing is taking place and, and
8809          * umem_lock_undo is, or will be, executing, and thus decrementing
8810          * the cookie reference count when it is complete.
8811          *
8812          * This needs to be done before as_pageunlock so that the
8813          * persistence of as is guaranteed because of the locked pages.
8814          *
8815          */
8816         rc = as_delete_callback(p->asp, p);
8817 
8818 
8819         /*
8820          * The proc->p_as will be stale if i_ddi_umem_unlock is called
8821          * after relvm is called so use p->asp.
8822          */
8823         as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8824 
8825         /*
8826          * Now that we have unlocked the memory decrement the
8827          * *.max-locked-memory rctl
8828          */
8829         umem_decr_devlockmem(p);
8830 
8831         if (rc == AS_CALLBACK_DELETED) {
8832                 /* umem_lock_undo will not happen, return the cookie memory */
8833                 ASSERT(p->cook_refcnt == 2);
8834                 kmem_free(p, sizeof (struct ddi_umem_cookie));
8835         } else {
8836                 /*
8837                  * umem_undo_lock may happen if as_delete_callback returned
8838                  * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8839                  * reference count, atomically, and return the cookie
8840                  * memory if the reference count goes to zero.  The only
8841                  * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8842                  * case, just return the cookie memory.
8843                  */
8844                 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8845                     (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8846                     == 0)) {
8847                         kmem_free(p, sizeof (struct ddi_umem_cookie));
8848                 }
8849         }
8850 }
8851 
8852 /*
8853  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8854  *
8855  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8856  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8857  * via calls to ddi_umem_unlock.
8858  */
8859 
8860 static void
8861 i_ddi_umem_unlock_thread(void)
8862 {
8863         struct ddi_umem_cookie  *ret_cookie;
8864         callb_cpr_t     cprinfo;
8865 
8866         /* process the ddi_umem_unlock list */
8867         CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8868             callb_generic_cpr, "unlock_thread");
8869         for (;;) {
8870                 mutex_enter(&ddi_umem_unlock_mutex);
8871                 if (ddi_umem_unlock_head != NULL) {     /* list not empty */
8872                         ret_cookie = ddi_umem_unlock_head;
8873                         /* take if off the list */
8874                         if ((ddi_umem_unlock_head =
8875                             ddi_umem_unlock_head->unl_forw) == NULL) {
8876                                 ddi_umem_unlock_tail = NULL;
8877                         }
8878                         mutex_exit(&ddi_umem_unlock_mutex);
8879                         /* unlock the pages in this cookie */
8880                         (void) i_ddi_umem_unlock(ret_cookie);
8881                 } else {   /* list is empty, wait for next ddi_umem_unlock */
8882                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
8883                         cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8884                         CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8885                         mutex_exit(&ddi_umem_unlock_mutex);
8886                 }
8887         }
8888         /* ddi_umem_unlock_thread does not exit */
8889         /* NOTREACHED */
8890 }
8891 
8892 /*
8893  * Start the thread that will process the ddi_umem_unlock list if it is
8894  * not already started (i_ddi_umem_unlock_thread).
8895  */
8896 static void
8897 i_ddi_umem_unlock_thread_start(void)
8898 {
8899         mutex_enter(&ddi_umem_unlock_mutex);
8900         if (ddi_umem_unlock_thread == NULL) {
8901                 ddi_umem_unlock_thread = thread_create(NULL, 0,
8902                     i_ddi_umem_unlock_thread, NULL, 0, &p0,
8903                     TS_RUN, minclsyspri);
8904         }
8905         mutex_exit(&ddi_umem_unlock_mutex);
8906 }
8907 
8908 /*
8909  * Lock the virtual address range in the current process and create a
8910  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8911  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8912  * to user space.
8913  *
8914  * Note: The resource control accounting currently uses a full charge model
8915  * in other words attempts to lock the same/overlapping areas of memory
8916  * will deduct the full size of the buffer from the projects running
8917  * counter for the device locked memory. This applies to umem_lockmemory too.
8918  *
8919  * addr, size should be PAGESIZE aligned
8920  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8921  *      identifies whether the locked memory will be read or written or both
8922  *
8923  * Returns 0 on success
8924  *      EINVAL - for invalid parameters
8925  *      EPERM, ENOMEM and other error codes returned by as_pagelock
8926  *      ENOMEM - is returned if the current request to lock memory exceeds
8927  *              *.max-locked-memory resource control value.
8928  *      EAGAIN - could not start the ddi_umem_unlock list processing thread
8929  */
8930 int
8931 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8932 {
8933         int     error;
8934         struct ddi_umem_cookie *p;
8935 
8936         *cookie = NULL;         /* in case of any error return */
8937 
8938         /* These are the only two valid flags */
8939         if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8940                 return (EINVAL);
8941         }
8942 
8943         /* At least one of the two flags (or both) must be set */
8944         if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8945                 return (EINVAL);
8946         }
8947 
8948         /* addr and len must be page-aligned */
8949         if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8950                 return (EINVAL);
8951         }
8952 
8953         if ((len & PAGEOFFSET) != 0) {
8954                 return (EINVAL);
8955         }
8956 
8957         /*
8958          * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8959          * be called on first ddi_umem_lock or umem_lockmemory call.
8960          */
8961         if (ddi_umem_unlock_thread == NULL)
8962                 i_ddi_umem_unlock_thread_start();
8963 
8964         /* Allocate memory for the cookie */
8965         p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8966 
8967         /* Convert the flags to seg_rw type */
8968         if (flags & DDI_UMEMLOCK_WRITE) {
8969                 p->s_flags = S_WRITE;
8970         } else {
8971                 p->s_flags = S_READ;
8972         }
8973 
8974         /* Store curproc in cookie for later iosetup/unlock */
8975         p->procp = (void *)curproc;
8976 
8977         /*
8978          * Store the struct as pointer in cookie for later use by
8979          * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8980          * is called after relvm is called.
8981          */
8982         p->asp = curproc->p_as;
8983         /*
8984          * The size field is needed for lockmem accounting.
8985          */
8986         p->size = len;
8987         init_lockedmem_rctl_flag(p);
8988 
8989         if (umem_incr_devlockmem(p) != 0) {
8990                 /*
8991                  * The requested memory cannot be locked
8992                  */
8993                 kmem_free(p, sizeof (struct ddi_umem_cookie));
8994                 *cookie = (ddi_umem_cookie_t)NULL;
8995                 return (ENOMEM);
8996         }
8997 
8998         /* Lock the pages corresponding to addr, len in memory */
8999         error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
9000             addr, len, p->s_flags);
9001         if (error != 0) {
9002                 umem_decr_devlockmem(p);
9003                 kmem_free(p, sizeof (struct ddi_umem_cookie));
9004                 *cookie = (ddi_umem_cookie_t)NULL;
9005                 return (error);
9006         }
9007 
9008         /* Initialize the fields in the ddi_umem_cookie */
9009         p->cvaddr = addr;
9010         p->type = UMEM_LOCKED;
9011         p->cook_refcnt = 1;
9012 
9013         *cookie = (ddi_umem_cookie_t)p;
9014         return (error);
9015 }
9016 
9017 /*
9018  * Add the cookie to the ddi_umem_unlock list.  Pages will be
9019  * unlocked by i_ddi_umem_unlock_thread.
9020  */
9021 
9022 void
9023 ddi_umem_unlock(ddi_umem_cookie_t cookie)
9024 {
9025         struct ddi_umem_cookie  *p = (struct ddi_umem_cookie *)cookie;
9026 
9027         ASSERT(p->type == UMEM_LOCKED);
9028         ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
9029         ASSERT(ddi_umem_unlock_thread != NULL);
9030 
9031         p->unl_forw = (struct ddi_umem_cookie *)NULL;        /* end of list */
9032         /*
9033          * Queue the unlock request and notify i_ddi_umem_unlock thread
9034          * if it's called in the interrupt context. Otherwise, unlock pages
9035          * immediately.
9036          */
9037         if (servicing_interrupt()) {
9038                 /* queue the unlock request and notify the thread */
9039                 mutex_enter(&ddi_umem_unlock_mutex);
9040                 if (ddi_umem_unlock_head == NULL) {
9041                         ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
9042                         cv_broadcast(&ddi_umem_unlock_cv);
9043                 } else {
9044                         ddi_umem_unlock_tail->unl_forw = p;
9045                         ddi_umem_unlock_tail = p;
9046                 }
9047                 mutex_exit(&ddi_umem_unlock_mutex);
9048         } else {
9049                 /* unlock the pages right away */
9050                 (void) i_ddi_umem_unlock(p);
9051         }
9052 }
9053 
9054 /*
9055  * Create a buf structure from a ddi_umem_cookie
9056  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
9057  *              (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
9058  * off, len - identifies the portion of the memory represented by the cookie
9059  *              that the buf points to.
9060  *      NOTE: off, len need to follow the alignment/size restrictions of the
9061  *              device (dev) that this buf will be passed to. Some devices
9062  *              will accept unrestricted alignment/size, whereas others (such as
9063  *              st) require some block-size alignment/size. It is the caller's
9064  *              responsibility to ensure that the alignment/size restrictions
9065  *              are met (we cannot assert as we do not know the restrictions)
9066  *
9067  * direction - is one of B_READ or B_WRITE and needs to be compatible with
9068  *              the flags used in ddi_umem_lock
9069  *
9070  * The following three arguments are used to initialize fields in the
9071  * buf structure and are uninterpreted by this routine.
9072  *
9073  * dev
9074  * blkno
9075  * iodone
9076  *
9077  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
9078  *
9079  * Returns a buf structure pointer on success (to be freed by freerbuf)
9080  *      NULL on any parameter error or memory alloc failure
9081  *
9082  */
9083 struct buf *
9084 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
9085         int direction, dev_t dev, daddr_t blkno,
9086         int (*iodone)(struct buf *), int sleepflag)
9087 {
9088         struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
9089         struct buf *bp;
9090 
9091         /*
9092          * check for valid cookie offset, len
9093          */
9094         if ((off + len) > p->size) {
9095                 return (NULL);
9096         }
9097 
9098         if (len > p->size) {
9099                 return (NULL);
9100         }
9101 
9102         /* direction has to be one of B_READ or B_WRITE */
9103         if ((direction != B_READ) && (direction != B_WRITE)) {
9104                 return (NULL);
9105         }
9106 
9107         /* These are the only two valid sleepflags */
9108         if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
9109                 return (NULL);
9110         }
9111 
9112         /*
9113          * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
9114          */
9115         if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
9116                 return (NULL);
9117         }
9118 
9119         /* If type is KMEM_NON_PAGEABLE procp is NULL */
9120         ASSERT((p->type == KMEM_NON_PAGEABLE) ?
9121             (p->procp == NULL) : (p->procp != NULL));
9122 
9123         bp = kmem_alloc(sizeof (struct buf), sleepflag);
9124         if (bp == NULL) {
9125                 return (NULL);
9126         }
9127         bioinit(bp);
9128 
9129         bp->b_flags = B_BUSY | B_PHYS | direction;
9130         bp->b_edev = dev;
9131         bp->b_lblkno = blkno;
9132         bp->b_iodone = iodone;
9133         bp->b_bcount = len;
9134         bp->b_proc = (proc_t *)p->procp;
9135         ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9136         bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
9137         if (p->pparray != NULL) {
9138                 bp->b_flags |= B_SHADOW;
9139                 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9140                 bp->b_shadow = p->pparray + btop(off);
9141         }
9142         return (bp);
9143 }
9144 
9145 /*
9146  * Fault-handling and related routines
9147  */
9148 
9149 ddi_devstate_t
9150 ddi_get_devstate(dev_info_t *dip)
9151 {
9152         if (DEVI_IS_DEVICE_OFFLINE(dip))
9153                 return (DDI_DEVSTATE_OFFLINE);
9154         else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
9155                 return (DDI_DEVSTATE_DOWN);
9156         else if (DEVI_IS_BUS_QUIESCED(dip))
9157                 return (DDI_DEVSTATE_QUIESCED);
9158         else if (DEVI_IS_DEVICE_DEGRADED(dip))
9159                 return (DDI_DEVSTATE_DEGRADED);
9160         else
9161                 return (DDI_DEVSTATE_UP);
9162 }
9163 
9164 void
9165 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
9166         ddi_fault_location_t location, const char *message)
9167 {
9168         struct ddi_fault_event_data fd;
9169         ddi_eventcookie_t ec;
9170 
9171         /*
9172          * Assemble all the information into a fault-event-data structure
9173          */
9174         fd.f_dip = dip;
9175         fd.f_impact = impact;
9176         fd.f_location = location;
9177         fd.f_message = message;
9178         fd.f_oldstate = ddi_get_devstate(dip);
9179 
9180         /*
9181          * Get eventcookie from defining parent.
9182          */
9183         if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
9184             DDI_SUCCESS)
9185                 return;
9186 
9187         (void) ndi_post_event(dip, dip, ec, &fd);
9188 }
9189 
9190 char *
9191 i_ddi_devi_class(dev_info_t *dip)
9192 {
9193         return (DEVI(dip)->devi_device_class);
9194 }
9195 
9196 int
9197 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
9198 {
9199         struct dev_info *devi = DEVI(dip);
9200 
9201         mutex_enter(&devi->devi_lock);
9202 
9203         if (devi->devi_device_class)
9204                 kmem_free(devi->devi_device_class,
9205                     strlen(devi->devi_device_class) + 1);
9206 
9207         if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
9208             != NULL) {
9209                 mutex_exit(&devi->devi_lock);
9210                 return (DDI_SUCCESS);
9211         }
9212 
9213         mutex_exit(&devi->devi_lock);
9214 
9215         return (DDI_FAILURE);
9216 }
9217 
9218 
9219 /*
9220  * Task Queues DDI interfaces.
9221  */
9222 
9223 /* ARGSUSED */
9224 ddi_taskq_t *
9225 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
9226     pri_t pri, uint_t cflags)
9227 {
9228         char full_name[TASKQ_NAMELEN];
9229         const char *tq_name;
9230         int nodeid = 0;
9231 
9232         if (dip == NULL)
9233                 tq_name = name;
9234         else {
9235                 nodeid = ddi_get_instance(dip);
9236 
9237                 if (name == NULL)
9238                         name = "tq";
9239 
9240                 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
9241                     ddi_driver_name(dip), name);
9242 
9243                 tq_name = full_name;
9244         }
9245 
9246         return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
9247             pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
9248             nthreads, INT_MAX, TASKQ_PREPOPULATE));
9249 }
9250 
9251 void
9252 ddi_taskq_destroy(ddi_taskq_t *tq)
9253 {
9254         taskq_destroy((taskq_t *)tq);
9255 }
9256 
9257 int
9258 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
9259     void *arg, uint_t dflags)
9260 {
9261         taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
9262             dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
9263 
9264         return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
9265 }
9266 
9267 void
9268 ddi_taskq_wait(ddi_taskq_t *tq)
9269 {
9270         taskq_wait((taskq_t *)tq);
9271 }
9272 
9273 void
9274 ddi_taskq_suspend(ddi_taskq_t *tq)
9275 {
9276         taskq_suspend((taskq_t *)tq);
9277 }
9278 
9279 boolean_t
9280 ddi_taskq_suspended(ddi_taskq_t *tq)
9281 {
9282         return (taskq_suspended((taskq_t *)tq));
9283 }
9284 
9285 void
9286 ddi_taskq_resume(ddi_taskq_t *tq)
9287 {
9288         taskq_resume((taskq_t *)tq);
9289 }
9290 
9291 int
9292 ddi_parse(
9293         const char      *ifname,
9294         char            *alnum,
9295         uint_t          *nump)
9296 {
9297         const char      *p;
9298         int             l;
9299         ulong_t         num;
9300         boolean_t       nonum = B_TRUE;
9301         char            c;
9302 
9303         l = strlen(ifname);
9304         for (p = ifname + l; p != ifname; l--) {
9305                 c = *--p;
9306                 if (!isdigit(c)) {
9307                         (void) strlcpy(alnum, ifname, l + 1);
9308                         if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9309                                 return (DDI_FAILURE);
9310                         break;
9311                 }
9312                 nonum = B_FALSE;
9313         }
9314         if (l == 0 || nonum)
9315                 return (DDI_FAILURE);
9316 
9317         *nump = num;
9318         return (DDI_SUCCESS);
9319 }
9320 
9321 /*
9322  * Default initialization function for drivers that don't need to quiesce.
9323  */
9324 /* ARGSUSED */
9325 int
9326 ddi_quiesce_not_needed(dev_info_t *dip)
9327 {
9328         return (DDI_SUCCESS);
9329 }
9330 
9331 /*
9332  * Initialization function for drivers that should implement quiesce()
9333  * but haven't yet.
9334  */
9335 /* ARGSUSED */
9336 int
9337 ddi_quiesce_not_supported(dev_info_t *dip)
9338 {
9339         return (DDI_FAILURE);
9340 }
9341 
9342 char *
9343 ddi_strdup(const char *str, int flag)
9344 {
9345         int     n;
9346         char    *ptr;
9347 
9348         ASSERT(str != NULL);
9349         ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9350 
9351         n = strlen(str);
9352         if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9353                 return (NULL);
9354         bcopy(str, ptr, n + 1);
9355         return (ptr);
9356 }
9357 
9358 char *
9359 strdup(const char *str)
9360 {
9361         return (ddi_strdup(str, KM_SLEEP));
9362 }
9363 
9364 void
9365 strfree(char *str)
9366 {
9367         ASSERT(str != NULL);
9368         kmem_free(str, strlen(str) + 1);
9369 }
9370 
9371 /*
9372  * Generic DDI callback interfaces.
9373  */
9374 
9375 int
9376 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9377     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9378 {
9379         ddi_cb_t        *cbp;
9380 
9381         ASSERT(dip != NULL);
9382         ASSERT(DDI_CB_FLAG_VALID(flags));
9383         ASSERT(cbfunc != NULL);
9384         ASSERT(ret_hdlp != NULL);
9385 
9386         /* Sanity check the context */
9387         ASSERT(!servicing_interrupt());
9388         if (servicing_interrupt())
9389                 return (DDI_FAILURE);
9390 
9391         /* Validate parameters */
9392         if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9393             (cbfunc == NULL) || (ret_hdlp == NULL))
9394                 return (DDI_EINVAL);
9395 
9396         /* Check for previous registration */
9397         if (DEVI(dip)->devi_cb_p != NULL)
9398                 return (DDI_EALREADY);
9399 
9400         /* Allocate and initialize callback */
9401         cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9402         cbp->cb_dip = dip;
9403         cbp->cb_func = cbfunc;
9404         cbp->cb_arg1 = arg1;
9405         cbp->cb_arg2 = arg2;
9406         cbp->cb_flags = flags;
9407         DEVI(dip)->devi_cb_p = cbp;
9408 
9409         /* If adding an IRM callback, notify IRM */
9410         if (flags & DDI_CB_FLAG_INTR)
9411                 i_ddi_irm_set_cb(dip, B_TRUE);
9412 
9413         *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9414         return (DDI_SUCCESS);
9415 }
9416 
9417 int
9418 ddi_cb_unregister(ddi_cb_handle_t hdl)
9419 {
9420         ddi_cb_t        *cbp;
9421         dev_info_t      *dip;
9422 
9423         ASSERT(hdl != NULL);
9424 
9425         /* Sanity check the context */
9426         ASSERT(!servicing_interrupt());
9427         if (servicing_interrupt())
9428                 return (DDI_FAILURE);
9429 
9430         /* Validate parameters */
9431         if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9432             ((dip = cbp->cb_dip) == NULL))
9433                 return (DDI_EINVAL);
9434 
9435         /* If removing an IRM callback, notify IRM */
9436         if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9437                 i_ddi_irm_set_cb(dip, B_FALSE);
9438 
9439         /* Destroy the callback */
9440         kmem_free(cbp, sizeof (ddi_cb_t));
9441         DEVI(dip)->devi_cb_p = NULL;
9442 
9443         return (DDI_SUCCESS);
9444 }
9445 
9446 /*
9447  * Platform independent DR routines
9448  */
9449 
9450 static int
9451 ndi2errno(int n)
9452 {
9453         int err = 0;
9454 
9455         switch (n) {
9456                 case NDI_NOMEM:
9457                         err = ENOMEM;
9458                         break;
9459                 case NDI_BUSY:
9460                         err = EBUSY;
9461                         break;
9462                 case NDI_FAULT:
9463                         err = EFAULT;
9464                         break;
9465                 case NDI_FAILURE:
9466                         err = EIO;
9467                         break;
9468                 case NDI_SUCCESS:
9469                         break;
9470                 case NDI_BADHANDLE:
9471                 default:
9472                         err = EINVAL;
9473                         break;
9474         }
9475         return (err);
9476 }
9477 
9478 /*
9479  * Prom tree node list
9480  */
9481 struct ptnode {
9482         pnode_t         nodeid;
9483         struct ptnode   *next;
9484 };
9485 
9486 /*
9487  * Prom tree walk arg
9488  */
9489 struct pta {
9490         dev_info_t      *pdip;
9491         devi_branch_t   *bp;
9492         uint_t          flags;
9493         dev_info_t      *fdip;
9494         struct ptnode   *head;
9495 };
9496 
9497 static void
9498 visit_node(pnode_t nodeid, struct pta *ap)
9499 {
9500         struct ptnode   **nextp;
9501         int             (*select)(pnode_t, void *, uint_t);
9502 
9503         ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9504 
9505         select = ap->bp->create.prom_branch_select;
9506 
9507         ASSERT(select);
9508 
9509         if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9510 
9511                 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9512                         ;
9513 
9514                 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9515 
9516                 (*nextp)->nodeid = nodeid;
9517         }
9518 
9519         if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9520                 return;
9521 
9522         nodeid = prom_childnode(nodeid);
9523         while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9524                 visit_node(nodeid, ap);
9525                 nodeid = prom_nextnode(nodeid);
9526         }
9527 }
9528 
9529 /*
9530  * NOTE: The caller of this function must check for device contracts
9531  * or LDI callbacks against this dip before setting the dip offline.
9532  */
9533 static int
9534 set_infant_dip_offline(dev_info_t *dip, void *arg)
9535 {
9536         char    *path = (char *)arg;
9537 
9538         ASSERT(dip);
9539         ASSERT(arg);
9540 
9541         if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9542                 (void) ddi_pathname(dip, path);
9543                 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9544                     "node: %s", path);
9545                 return (DDI_FAILURE);
9546         }
9547 
9548         mutex_enter(&(DEVI(dip)->devi_lock));
9549         if (!DEVI_IS_DEVICE_OFFLINE(dip))
9550                 DEVI_SET_DEVICE_OFFLINE(dip);
9551         mutex_exit(&(DEVI(dip)->devi_lock));
9552 
9553         return (DDI_SUCCESS);
9554 }
9555 
9556 typedef struct result {
9557         char    *path;
9558         int     result;
9559 } result_t;
9560 
9561 static int
9562 dip_set_offline(dev_info_t *dip, void *arg)
9563 {
9564         int end;
9565         result_t *resp = (result_t *)arg;
9566 
9567         ASSERT(dip);
9568         ASSERT(resp);
9569 
9570         /*
9571          * We stop the walk if e_ddi_offline_notify() returns
9572          * failure, because this implies that one or more consumers
9573          * (either LDI or contract based) has blocked the offline.
9574          * So there is no point in conitnuing the walk
9575          */
9576         if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9577                 resp->result = DDI_FAILURE;
9578                 return (DDI_WALK_TERMINATE);
9579         }
9580 
9581         /*
9582          * If set_infant_dip_offline() returns failure, it implies
9583          * that we failed to set a particular dip offline. This
9584          * does not imply that the offline as a whole should fail.
9585          * We want to do the best we can, so we continue the walk.
9586          */
9587         if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9588                 end = DDI_SUCCESS;
9589         else
9590                 end = DDI_FAILURE;
9591 
9592         e_ddi_offline_finalize(dip, end);
9593 
9594         return (DDI_WALK_CONTINUE);
9595 }
9596 
9597 /*
9598  * The call to e_ddi_offline_notify() exists for the
9599  * unlikely error case that a branch we are trying to
9600  * create already exists and has device contracts or LDI
9601  * event callbacks against it.
9602  *
9603  * We allow create to succeed for such branches only if
9604  * no constraints block the offline.
9605  */
9606 static int
9607 branch_set_offline(dev_info_t *dip, char *path)
9608 {
9609         int             circ;
9610         int             end;
9611         result_t        res;
9612 
9613 
9614         if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9615                 return (DDI_FAILURE);
9616         }
9617 
9618         if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9619                 end = DDI_SUCCESS;
9620         else
9621                 end = DDI_FAILURE;
9622 
9623         e_ddi_offline_finalize(dip, end);
9624 
9625         if (end == DDI_FAILURE)
9626                 return (DDI_FAILURE);
9627 
9628         res.result = DDI_SUCCESS;
9629         res.path = path;
9630 
9631         ndi_devi_enter(dip, &circ);
9632         ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9633         ndi_devi_exit(dip, circ);
9634 
9635         return (res.result);
9636 }
9637 
9638 /*ARGSUSED*/
9639 static int
9640 create_prom_branch(void *arg, int has_changed)
9641 {
9642         int             circ;
9643         int             exists, rv;
9644         pnode_t         nodeid;
9645         struct ptnode   *tnp;
9646         dev_info_t      *dip;
9647         struct pta      *ap = arg;
9648         devi_branch_t   *bp;
9649         char            *path;
9650 
9651         ASSERT(ap);
9652         ASSERT(ap->fdip == NULL);
9653         ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9654 
9655         bp = ap->bp;
9656 
9657         nodeid = ddi_get_nodeid(ap->pdip);
9658         if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9659                 cmn_err(CE_WARN, "create_prom_branch: invalid "
9660                     "nodeid: 0x%x", nodeid);
9661                 return (EINVAL);
9662         }
9663 
9664         ap->head = NULL;
9665 
9666         nodeid = prom_childnode(nodeid);
9667         while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9668                 visit_node(nodeid, ap);
9669                 nodeid = prom_nextnode(nodeid);
9670         }
9671 
9672         if (ap->head == NULL)
9673                 return (ENODEV);
9674 
9675         path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9676         rv = 0;
9677         while ((tnp = ap->head) != NULL) {
9678                 ap->head = tnp->next;
9679 
9680                 ndi_devi_enter(ap->pdip, &circ);
9681 
9682                 /*
9683                  * Check if the branch already exists.
9684                  */
9685                 exists = 0;
9686                 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9687                 if (dip != NULL) {
9688                         exists = 1;
9689 
9690                         /* Parent is held busy, so release hold */
9691                         ndi_rele_devi(dip);
9692 #ifdef  DEBUG
9693                         cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9694                             " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9695 #endif
9696                 } else {
9697                         dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9698                 }
9699 
9700                 kmem_free(tnp, sizeof (struct ptnode));
9701 
9702                 /*
9703                  * Hold the branch if it is not already held
9704                  */
9705                 if (dip && !exists) {
9706                         e_ddi_branch_hold(dip);
9707                 }
9708 
9709                 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9710 
9711                 /*
9712                  * Set all dips in the newly created branch offline so that
9713                  * only a "configure" operation can attach
9714                  * the branch
9715                  */
9716                 if (dip == NULL || branch_set_offline(dip, path)
9717                     == DDI_FAILURE) {
9718                         ndi_devi_exit(ap->pdip, circ);
9719                         rv = EIO;
9720                         continue;
9721                 }
9722 
9723                 ASSERT(ddi_get_parent(dip) == ap->pdip);
9724 
9725                 ndi_devi_exit(ap->pdip, circ);
9726 
9727                 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9728                         int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9729                         if (error && rv == 0)
9730                                 rv = error;
9731                 }
9732 
9733                 /*
9734                  * Invoke devi_branch_callback() (if it exists) only for
9735                  * newly created branches
9736                  */
9737                 if (bp->devi_branch_callback && !exists)
9738                         bp->devi_branch_callback(dip, bp->arg, 0);
9739         }
9740 
9741         kmem_free(path, MAXPATHLEN);
9742 
9743         return (rv);
9744 }
9745 
9746 static int
9747 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9748 {
9749         int                     rv, circ, len;
9750         int                     i, flags, ret;
9751         dev_info_t              *dip;
9752         char                    *nbuf;
9753         char                    *path;
9754         static const char       *noname = "<none>";
9755 
9756         ASSERT(pdip);
9757         ASSERT(DEVI_BUSY_OWNED(pdip));
9758 
9759         flags = 0;
9760 
9761         /*
9762          * Creating the root of a branch ?
9763          */
9764         if (rdipp) {
9765                 *rdipp = NULL;
9766                 flags = DEVI_BRANCH_ROOT;
9767         }
9768 
9769         ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9770         rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9771 
9772         nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9773 
9774         if (rv == DDI_WALK_ERROR) {
9775                 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9776                     " properties on devinfo node %p",  (void *)dip);
9777                 goto fail;
9778         }
9779 
9780         len = OBP_MAXDRVNAME;
9781         if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9782             DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9783             != DDI_PROP_SUCCESS) {
9784                 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9785                     "no name property", (void *)dip);
9786                 goto fail;
9787         }
9788 
9789         ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9790         if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9791                 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9792                     " for devinfo node %p", nbuf, (void *)dip);
9793                 goto fail;
9794         }
9795 
9796         kmem_free(nbuf, OBP_MAXDRVNAME);
9797 
9798         /*
9799          * Ignore bind failures just like boot does
9800          */
9801         (void) ndi_devi_bind_driver(dip, 0);
9802 
9803         switch (rv) {
9804         case DDI_WALK_CONTINUE:
9805         case DDI_WALK_PRUNESIB:
9806                 ndi_devi_enter(dip, &circ);
9807 
9808                 i = DDI_WALK_CONTINUE;
9809                 for (; i == DDI_WALK_CONTINUE; ) {
9810                         i = sid_node_create(dip, bp, NULL);
9811                 }
9812 
9813                 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9814                 if (i == DDI_WALK_ERROR)
9815                         rv = i;
9816                 /*
9817                  * If PRUNESIB stop creating siblings
9818                  * of dip's child. Subsequent walk behavior
9819                  * is determined by rv returned by dip.
9820                  */
9821 
9822                 ndi_devi_exit(dip, circ);
9823                 break;
9824         case DDI_WALK_TERMINATE:
9825                 /*
9826                  * Don't create children and ask our parent
9827                  * to not create siblings either.
9828                  */
9829                 rv = DDI_WALK_PRUNESIB;
9830                 break;
9831         case DDI_WALK_PRUNECHILD:
9832                 /*
9833                  * Don't create children, but ask parent to continue
9834                  * with siblings.
9835                  */
9836                 rv = DDI_WALK_CONTINUE;
9837                 break;
9838         default:
9839                 ASSERT(0);
9840                 break;
9841         }
9842 
9843         if (rdipp)
9844                 *rdipp = dip;
9845 
9846         /*
9847          * Set device offline - only the "configure" op should cause an attach.
9848          * Note that it is safe to set the dip offline without checking
9849          * for either device contract or layered driver (LDI) based constraints
9850          * since there cannot be any contracts or LDI opens of this device.
9851          * This is because this node is a newly created dip with the parent busy
9852          * held, so no other thread can come in and attach this dip. A dip that
9853          * has never been attached cannot have contracts since by definition
9854          * a device contract (an agreement between a process and a device minor
9855          * node) can only be created against a device that has minor nodes
9856          * i.e is attached. Similarly an LDI open will only succeed if the
9857          * dip is attached. We assert below that the dip is not attached.
9858          */
9859         ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9860         path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9861         ret = set_infant_dip_offline(dip, path);
9862         ASSERT(ret == DDI_SUCCESS);
9863         kmem_free(path, MAXPATHLEN);
9864 
9865         return (rv);
9866 fail:
9867         (void) ndi_devi_free(dip);
9868         kmem_free(nbuf, OBP_MAXDRVNAME);
9869         return (DDI_WALK_ERROR);
9870 }
9871 
9872 static int
9873 create_sid_branch(
9874         dev_info_t      *pdip,
9875         devi_branch_t   *bp,
9876         dev_info_t      **dipp,
9877         uint_t          flags)
9878 {
9879         int             rv = 0, state = DDI_WALK_CONTINUE;
9880         dev_info_t      *rdip;
9881 
9882         while (state == DDI_WALK_CONTINUE) {
9883                 int     circ;
9884 
9885                 ndi_devi_enter(pdip, &circ);
9886 
9887                 state = sid_node_create(pdip, bp, &rdip);
9888                 if (rdip == NULL) {
9889                         ndi_devi_exit(pdip, circ);
9890                         ASSERT(state == DDI_WALK_ERROR);
9891                         break;
9892                 }
9893 
9894                 e_ddi_branch_hold(rdip);
9895 
9896                 ndi_devi_exit(pdip, circ);
9897 
9898                 if (flags & DEVI_BRANCH_CONFIGURE) {
9899                         int error = e_ddi_branch_configure(rdip, dipp, 0);
9900                         if (error && rv == 0)
9901                                 rv = error;
9902                 }
9903 
9904                 /*
9905                  * devi_branch_callback() is optional
9906                  */
9907                 if (bp->devi_branch_callback)
9908                         bp->devi_branch_callback(rdip, bp->arg, 0);
9909         }
9910 
9911         ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9912 
9913         return (state == DDI_WALK_ERROR ? EIO : rv);
9914 }
9915 
9916 int
9917 e_ddi_branch_create(
9918         dev_info_t      *pdip,
9919         devi_branch_t   *bp,
9920         dev_info_t      **dipp,
9921         uint_t          flags)
9922 {
9923         int prom_devi, sid_devi, error;
9924 
9925         if (pdip == NULL || bp == NULL || bp->type == 0)
9926                 return (EINVAL);
9927 
9928         prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9929         sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9930 
9931         if (prom_devi && bp->create.prom_branch_select == NULL)
9932                 return (EINVAL);
9933         else if (sid_devi && bp->create.sid_branch_create == NULL)
9934                 return (EINVAL);
9935         else if (!prom_devi && !sid_devi)
9936                 return (EINVAL);
9937 
9938         if (flags & DEVI_BRANCH_EVENT)
9939                 return (EINVAL);
9940 
9941         if (prom_devi) {
9942                 struct pta pta = {0};
9943 
9944                 pta.pdip = pdip;
9945                 pta.bp = bp;
9946                 pta.flags = flags;
9947 
9948                 error = prom_tree_access(create_prom_branch, &pta, NULL);
9949 
9950                 if (dipp)
9951                         *dipp = pta.fdip;
9952                 else if (pta.fdip)
9953                         ndi_rele_devi(pta.fdip);
9954         } else {
9955                 error = create_sid_branch(pdip, bp, dipp, flags);
9956         }
9957 
9958         return (error);
9959 }
9960 
9961 int
9962 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9963 {
9964         int             rv;
9965         char            *devnm;
9966         dev_info_t      *pdip;
9967 
9968         if (dipp)
9969                 *dipp = NULL;
9970 
9971         if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9972                 return (EINVAL);
9973 
9974         pdip = ddi_get_parent(rdip);
9975 
9976         ndi_hold_devi(pdip);
9977 
9978         if (!e_ddi_branch_held(rdip)) {
9979                 ndi_rele_devi(pdip);
9980                 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9981                     "dip(%p) not held", (void *)rdip);
9982                 return (EINVAL);
9983         }
9984 
9985         if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9986                 /*
9987                  * First attempt to bind a driver. If we fail, return
9988                  * success (On some platforms, dips for some device
9989                  * types (CPUs) may not have a driver)
9990                  */
9991                 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9992                         ndi_rele_devi(pdip);
9993                         return (0);
9994                 }
9995 
9996                 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9997                         rv = NDI_FAILURE;
9998                         goto out;
9999                 }
10000         }
10001 
10002         ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
10003 
10004         devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
10005 
10006         (void) ddi_deviname(rdip, devnm);
10007 
10008         if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
10009             NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
10010                 /* release hold from ndi_devi_config_one() */
10011                 ndi_rele_devi(rdip);
10012         }
10013 
10014         kmem_free(devnm, MAXNAMELEN + 1);
10015 out:
10016         if (rv != NDI_SUCCESS && dipp && rdip) {
10017                 ndi_hold_devi(rdip);
10018                 *dipp = rdip;
10019         }
10020         ndi_rele_devi(pdip);
10021         return (ndi2errno(rv));
10022 }
10023 
10024 void
10025 e_ddi_branch_hold(dev_info_t *rdip)
10026 {
10027         if (e_ddi_branch_held(rdip)) {
10028                 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
10029                 return;
10030         }
10031 
10032         mutex_enter(&DEVI(rdip)->devi_lock);
10033         if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
10034                 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
10035                 DEVI(rdip)->devi_ref++;
10036         }
10037         ASSERT(DEVI(rdip)->devi_ref > 0);
10038         mutex_exit(&DEVI(rdip)->devi_lock);
10039 }
10040 
10041 int
10042 e_ddi_branch_held(dev_info_t *rdip)
10043 {
10044         int rv = 0;
10045 
10046         mutex_enter(&DEVI(rdip)->devi_lock);
10047         if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
10048             DEVI(rdip)->devi_ref > 0) {
10049                 rv = 1;
10050         }
10051         mutex_exit(&DEVI(rdip)->devi_lock);
10052 
10053         return (rv);
10054 }
10055 
10056 void
10057 e_ddi_branch_rele(dev_info_t *rdip)
10058 {
10059         mutex_enter(&DEVI(rdip)->devi_lock);
10060         DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
10061         DEVI(rdip)->devi_ref--;
10062         mutex_exit(&DEVI(rdip)->devi_lock);
10063 }
10064 
10065 int
10066 e_ddi_branch_unconfigure(
10067         dev_info_t *rdip,
10068         dev_info_t **dipp,
10069         uint_t flags)
10070 {
10071         int     circ, rv;
10072         int     destroy;
10073         char    *devnm;
10074         uint_t  nflags;
10075         dev_info_t *pdip;
10076 
10077         if (dipp)
10078                 *dipp = NULL;
10079 
10080         if (rdip == NULL)
10081                 return (EINVAL);
10082 
10083         pdip = ddi_get_parent(rdip);
10084 
10085         ASSERT(pdip);
10086 
10087         /*
10088          * Check if caller holds pdip busy - can cause deadlocks during
10089          * devfs_clean()
10090          */
10091         if (DEVI_BUSY_OWNED(pdip)) {
10092                 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
10093                     " devinfo node(%p) is busy held", (void *)pdip);
10094                 return (EINVAL);
10095         }
10096 
10097         destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
10098 
10099         devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
10100 
10101         ndi_devi_enter(pdip, &circ);
10102         (void) ddi_deviname(rdip, devnm);
10103         ndi_devi_exit(pdip, circ);
10104 
10105         /*
10106          * ddi_deviname() returns a component name with / prepended.
10107          */
10108         (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
10109 
10110         ndi_devi_enter(pdip, &circ);
10111 
10112         /*
10113          * Recreate device name as it may have changed state (init/uninit)
10114          * when parent busy lock was dropped for devfs_clean()
10115          */
10116         (void) ddi_deviname(rdip, devnm);
10117 
10118         if (!e_ddi_branch_held(rdip)) {
10119                 kmem_free(devnm, MAXNAMELEN + 1);
10120                 ndi_devi_exit(pdip, circ);
10121                 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
10122                     destroy ? "destroy" : "unconfigure", (void *)rdip);
10123                 return (EINVAL);
10124         }
10125 
10126         /*
10127          * Release hold on the branch. This is ok since we are holding the
10128          * parent busy. If rdip is not removed, we must do a hold on the
10129          * branch before returning.
10130          */
10131         e_ddi_branch_rele(rdip);
10132 
10133         nflags = NDI_DEVI_OFFLINE;
10134         if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
10135                 nflags |= NDI_DEVI_REMOVE;
10136                 destroy = 1;
10137         } else {
10138                 nflags |= NDI_UNCONFIG;         /* uninit but don't remove */
10139         }
10140 
10141         if (flags & DEVI_BRANCH_EVENT)
10142                 nflags |= NDI_POST_EVENT;
10143 
10144         if (i_ddi_devi_attached(pdip) &&
10145             (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
10146                 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
10147         } else {
10148                 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
10149                 if (rv == NDI_SUCCESS) {
10150                         ASSERT(!destroy || ddi_get_child(rdip) == NULL);
10151                         rv = ndi_devi_offline(rdip, nflags);
10152                 }
10153         }
10154 
10155         if (!destroy || rv != NDI_SUCCESS) {
10156                 /* The dip still exists, so do a hold */
10157                 e_ddi_branch_hold(rdip);
10158         }
10159 out:
10160         kmem_free(devnm, MAXNAMELEN + 1);
10161         ndi_devi_exit(pdip, circ);
10162         return (ndi2errno(rv));
10163 }
10164 
10165 int
10166 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
10167 {
10168         return (e_ddi_branch_unconfigure(rdip, dipp,
10169             flag|DEVI_BRANCH_DESTROY));
10170 }
10171 
10172 /*
10173  * Number of chains for hash table
10174  */
10175 #define NUMCHAINS       17
10176 
10177 /*
10178  * Devinfo busy arg
10179  */
10180 struct devi_busy {
10181         int dv_total;
10182         int s_total;
10183         mod_hash_t *dv_hash;
10184         mod_hash_t *s_hash;
10185         int (*callback)(dev_info_t *, void *, uint_t);
10186         void *arg;
10187 };
10188 
10189 static int
10190 visit_dip(dev_info_t *dip, void *arg)
10191 {
10192         uintptr_t sbusy, dvbusy, ref;
10193         struct devi_busy *bsp = arg;
10194 
10195         ASSERT(bsp->callback);
10196 
10197         /*
10198          * A dip cannot be busy if its reference count is 0
10199          */
10200         if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
10201                 return (bsp->callback(dip, bsp->arg, 0));
10202         }
10203 
10204         if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
10205                 dvbusy = 0;
10206 
10207         /*
10208          * To catch device opens currently maintained on specfs common snodes.
10209          */
10210         if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
10211                 sbusy = 0;
10212 
10213 #ifdef  DEBUG
10214         if (ref < sbusy || ref < dvbusy) {
10215                 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
10216                     "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
10217         }
10218 #endif
10219 
10220         dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
10221 
10222         return (bsp->callback(dip, bsp->arg, dvbusy));
10223 }
10224 
10225 static int
10226 visit_snode(struct snode *sp, void *arg)
10227 {
10228         uintptr_t sbusy;
10229         dev_info_t *dip;
10230         int count;
10231         struct devi_busy *bsp = arg;
10232 
10233         ASSERT(sp);
10234 
10235         /*
10236          * The stable lock is held. This prevents
10237          * the snode and its associated dip from
10238          * going away.
10239          */
10240         dip = NULL;
10241         count = spec_devi_open_count(sp, &dip);
10242 
10243         if (count <= 0)
10244                 return (DDI_WALK_CONTINUE);
10245 
10246         ASSERT(dip);
10247 
10248         if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
10249                 sbusy = count;
10250         else
10251                 sbusy += count;
10252 
10253         if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
10254                 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
10255                     "sbusy = %lu", "e_ddi_branch_referenced",
10256                     (void *)dip, sbusy);
10257         }
10258 
10259         bsp->s_total += count;
10260 
10261         return (DDI_WALK_CONTINUE);
10262 }
10263 
10264 static void
10265 visit_dvnode(struct dv_node *dv, void *arg)
10266 {
10267         uintptr_t dvbusy;
10268         uint_t count;
10269         struct vnode *vp;
10270         struct devi_busy *bsp = arg;
10271 
10272         ASSERT(dv && dv->dv_devi);
10273 
10274         vp = DVTOV(dv);
10275 
10276         mutex_enter(&vp->v_lock);
10277         count = vp->v_count;
10278         mutex_exit(&vp->v_lock);
10279 
10280         if (!count)
10281                 return;
10282 
10283         if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
10284             (mod_hash_val_t *)&dvbusy))
10285                 dvbusy = count;
10286         else
10287                 dvbusy += count;
10288 
10289         if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
10290             (mod_hash_val_t)dvbusy)) {
10291                 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
10292                     "dvbusy=%lu", "e_ddi_branch_referenced",
10293                     (void *)dv->dv_devi, dvbusy);
10294         }
10295 
10296         bsp->dv_total += count;
10297 }
10298 
10299 /*
10300  * Returns reference count on success or -1 on failure.
10301  */
10302 int
10303 e_ddi_branch_referenced(
10304         dev_info_t *rdip,
10305         int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
10306         void *arg)
10307 {
10308         int circ;
10309         char *path;
10310         dev_info_t *pdip;
10311         struct devi_busy bsa = {0};
10312 
10313         ASSERT(rdip);
10314 
10315         path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10316 
10317         ndi_hold_devi(rdip);
10318 
10319         pdip = ddi_get_parent(rdip);
10320 
10321         ASSERT(pdip);
10322 
10323         /*
10324          * Check if caller holds pdip busy - can cause deadlocks during
10325          * devfs_walk()
10326          */
10327         if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10328                 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10329                     "devinfo branch(%p) not held or parent busy held",
10330                     (void *)rdip);
10331                 ndi_rele_devi(rdip);
10332                 kmem_free(path, MAXPATHLEN);
10333                 return (-1);
10334         }
10335 
10336         ndi_devi_enter(pdip, &circ);
10337         (void) ddi_pathname(rdip, path);
10338         ndi_devi_exit(pdip, circ);
10339 
10340         bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10341             mod_hash_null_valdtor, sizeof (struct dev_info));
10342 
10343         bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10344             mod_hash_null_valdtor, sizeof (struct snode));
10345 
10346         if (devfs_walk(path, visit_dvnode, &bsa)) {
10347                 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10348                     "devfs walk failed for: %s", path);
10349                 kmem_free(path, MAXPATHLEN);
10350                 bsa.s_total = bsa.dv_total = -1;
10351                 goto out;
10352         }
10353 
10354         kmem_free(path, MAXPATHLEN);
10355 
10356         /*
10357          * Walk the snode table to detect device opens, which are currently
10358          * maintained on specfs common snodes.
10359          */
10360         spec_snode_walk(visit_snode, &bsa);
10361 
10362         if (callback == NULL)
10363                 goto out;
10364 
10365         bsa.callback = callback;
10366         bsa.arg = arg;
10367 
10368         if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10369                 ndi_devi_enter(rdip, &circ);
10370                 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10371                 ndi_devi_exit(rdip, circ);
10372         }
10373 
10374 out:
10375         ndi_rele_devi(rdip);
10376         mod_hash_destroy_ptrhash(bsa.s_hash);
10377         mod_hash_destroy_ptrhash(bsa.dv_hash);
10378         return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10379 }