1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25 */
26
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 #include <sys/ddi_periodic.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 #include <sys/zone.h>
86 #include <sys/clock_impl.h>
87 #include <sys/ddi.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
92
93 extern pri_t minclsyspri;
94
95 extern rctl_hndl_t rc_project_locked_mem;
96 extern rctl_hndl_t rc_zone_locked_mem;
97
98 #ifdef DEBUG
99 static int sunddi_debug = 0;
100 #endif /* DEBUG */
101
102 /* ddi_umem_unlock miscellaneous */
103
104 static void i_ddi_umem_unlock_thread_start(void);
105
106 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */
107 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */
108 static kthread_t *ddi_umem_unlock_thread;
109 /*
110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
111 */
112 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114
115 /*
116 * DDI(Sun) Function and flag definitions:
117 */
118
119 #if defined(__x86)
120 /*
121 * Used to indicate which entries were chosen from a range.
122 */
123 char *chosen_reg = "chosen-reg";
124 #endif
125
126 /*
127 * Function used to ring system console bell
128 */
129 void (*ddi_console_bell_func)(clock_t duration);
130
131 /*
132 * Creating register mappings and handling interrupts:
133 */
134
135 /*
136 * Generic ddi_map: Call parent to fulfill request...
137 */
138
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141 off_t len, caddr_t *addrp)
142 {
143 dev_info_t *pdip;
144
145 ASSERT(dp);
146 pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 dp, mp, offset, len, addrp));
149 }
150
151 /*
152 * ddi_apply_range: (Called by nexi only.)
153 * Apply ranges in parent node dp, to child regspec rp...
154 */
155
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 return (i_ddi_apply_range(dp, rdip, rp));
160 }
161
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164 off_t len)
165 {
166 ddi_map_req_t mr;
167 #if defined(__x86)
168 struct {
169 int bus;
170 int addr;
171 int size;
172 } reg, *reglist;
173 uint_t length;
174 int rc;
175
176 /*
177 * get the 'registers' or the 'reg' property.
178 * We look up the reg property as an array of
179 * int's.
180 */
181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length);
183 if (rc != DDI_PROP_SUCCESS)
184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length);
186 if (rc == DDI_PROP_SUCCESS) {
187 /*
188 * point to the required entry.
189 */
190 reg = reglist[rnumber];
191 reg.addr += offset;
192 if (len != 0)
193 reg.size = len;
194 /*
195 * make a new property containing ONLY the required tuple.
196 */
197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int)))
199 != DDI_PROP_SUCCESS) {
200 cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 "property", DEVI(dip)->devi_name,
202 DEVI(dip)->devi_instance, chosen_reg);
203 }
204 /*
205 * free the memory allocated by
206 * ddi_prop_lookup_int_array ().
207 */
208 ddi_prop_free((void *)reglist);
209 }
210 #endif
211 mr.map_op = DDI_MO_MAP_LOCKED;
212 mr.map_type = DDI_MT_RNUMBER;
213 mr.map_obj.rnumber = rnumber;
214 mr.map_prot = PROT_READ | PROT_WRITE;
215 mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 mr.map_handlep = NULL;
217 mr.map_vers = DDI_MAP_VERSION;
218
219 /*
220 * Call my parent to map in my regs.
221 */
222
223 return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228 off_t len)
229 {
230 ddi_map_req_t mr;
231
232 mr.map_op = DDI_MO_UNMAP;
233 mr.map_type = DDI_MT_RNUMBER;
234 mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */
236 mr.map_obj.rnumber = rnumber;
237 mr.map_handlep = NULL;
238 mr.map_vers = DDI_MAP_VERSION;
239
240 /*
241 * Call my parent to unmap my regs.
242 */
243
244 (void) ddi_map(dip, &mr, offset, len, kaddrp);
245 *kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257
258 /*
259 * nullbusmap: The/DDI default bus_map entry point for nexi
260 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
261 * with no HAT/MMU layer to be programmed at this level.
262 *
263 * If the call is to map by rnumber, return an error,
264 * otherwise pass anything else up the tree to my parent.
265 */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 _NOTE(ARGUNUSED(rdip))
271 if (mp->map_type == DDI_MT_RNUMBER)
272 return (DDI_ME_UNSUPPORTED);
273
274 return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276
277 /*
278 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279 * Only for use by nexi using the reg/range paradigm.
280 */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286
287
288 /*
289 * Note that we allow the dip to be nil because we may be called
290 * prior even to the instantiation of the devinfo tree itself - all
291 * regular leaf and nexus drivers should always use a non-nil dip!
292 *
293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294 * simply get a synchronous fault as soon as we touch a missing address.
295 *
296 * Poke is rather more carefully handled because we might poke to a write
297 * buffer, "succeed", then only find some time later that we got an
298 * asynchronous fault that indicated that the address we were writing to
299 * was not really backed by hardware.
300 */
301
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304 void *addr, void *value_p)
305 {
306 union {
307 uint64_t u64;
308 uint32_t u32;
309 uint16_t u16;
310 uint8_t u8;
311 } peekpoke_value;
312
313 peekpoke_ctlops_t peekpoke_args;
314 uint64_t dummy_result;
315 int rval;
316
317 /* Note: size is assumed to be correct; it is not checked. */
318 peekpoke_args.size = size;
319 peekpoke_args.dev_addr = (uintptr_t)addr;
320 peekpoke_args.handle = NULL;
321 peekpoke_args.repcount = 1;
322 peekpoke_args.flags = 0;
323
324 if (cmd == DDI_CTLOPS_POKE) {
325 switch (size) {
326 case sizeof (uint8_t):
327 peekpoke_value.u8 = *(uint8_t *)value_p;
328 break;
329 case sizeof (uint16_t):
330 peekpoke_value.u16 = *(uint16_t *)value_p;
331 break;
332 case sizeof (uint32_t):
333 peekpoke_value.u32 = *(uint32_t *)value_p;
334 break;
335 case sizeof (uint64_t):
336 peekpoke_value.u64 = *(uint64_t *)value_p;
337 break;
338 }
339 }
340
341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342
343 if (devi != NULL)
344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 &dummy_result);
346 else
347 rval = peekpoke_mem(cmd, &peekpoke_args);
348
349 /*
350 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 */
352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 switch (size) {
354 case sizeof (uint8_t):
355 *(uint8_t *)value_p = peekpoke_value.u8;
356 break;
357 case sizeof (uint16_t):
358 *(uint16_t *)value_p = peekpoke_value.u16;
359 break;
360 case sizeof (uint32_t):
361 *(uint32_t *)value_p = peekpoke_value.u32;
362 break;
363 case sizeof (uint64_t):
364 *(uint64_t *)value_p = peekpoke_value.u64;
365 break;
366 }
367 }
368
369 return (rval);
370 }
371
372 /*
373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375 */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 switch (size) {
380 case sizeof (uint8_t):
381 case sizeof (uint16_t):
382 case sizeof (uint32_t):
383 case sizeof (uint64_t):
384 break;
385 default:
386 return (DDI_FAILURE);
387 }
388
389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 switch (size) {
396 case sizeof (uint8_t):
397 case sizeof (uint16_t):
398 case sizeof (uint32_t):
399 case sizeof (uint64_t):
400 break;
401 default:
402 return (DDI_FAILURE);
403 }
404
405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407
408 int
409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 {
411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 val_p));
413 }
414
415 int
416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 {
418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 val_p));
420 }
421
422 int
423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 {
425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 val_p));
427 }
428
429 int
430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 {
432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 val_p));
434 }
435
436
437 /*
438 * We need to separate the old interfaces from the new ones and leave them
439 * in here for a while. Previous versions of the OS defined the new interfaces
440 * to the old interfaces. This way we can fix things up so that we can
441 * eventually remove these interfaces.
442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443 * or earlier will actually have a reference to ddi_peekc in the binary.
444 */
445 #ifdef _ILP32
446 int
447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 {
449 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 val_p));
451 }
452
453 int
454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 {
456 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 val_p));
458 }
459
460 int
461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 {
463 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 val_p));
465 }
466
467 int
468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 {
470 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 val_p));
472 }
473 #endif /* _ILP32 */
474
475 int
476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 {
478 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480
481 int
482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 {
484 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486
487 int
488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 {
490 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492
493 int
494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 {
496 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 }
498
499 /*
500 * We need to separate the old interfaces from the new ones and leave them
501 * in here for a while. Previous versions of the OS defined the new interfaces
502 * to the old interfaces. This way we can fix things up so that we can
503 * eventually remove these interfaces.
504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505 * or earlier will actually have a reference to ddi_pokec in the binary.
506 */
507 #ifdef _ILP32
508 int
509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 {
511 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513
514 int
515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 {
517 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519
520 int
521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 {
523 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525
526 int
527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 {
529 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 }
531 #endif /* _ILP32 */
532
533 /*
534 * ddi_peekpokeio() is used primarily by the mem drivers for moving
535 * data to and from uio structures via peek and poke. Note that we
536 * use "internal" routines ddi_peek and ddi_poke to make this go
537 * slightly faster, avoiding the call overhead ..
538 */
539 int
540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541 caddr_t addr, size_t len, uint_t xfersize)
542 {
543 int64_t ibuffer;
544 int8_t w8;
545 size_t sz;
546 int o;
547
548 if (xfersize > sizeof (long))
549 xfersize = sizeof (long);
550
551 while (len != 0) {
552 if ((len | (uintptr_t)addr) & 1) {
553 sz = sizeof (int8_t);
554 if (rw == UIO_WRITE) {
555 if ((o = uwritec(uio)) == -1)
556 return (DDI_FAILURE);
557 if (ddi_poke8(devi, (int8_t *)addr,
558 (int8_t)o) != DDI_SUCCESS)
559 return (DDI_FAILURE);
560 } else {
561 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 (int8_t *)addr, &w8) != DDI_SUCCESS)
563 return (DDI_FAILURE);
564 if (ureadc(w8, uio))
565 return (DDI_FAILURE);
566 }
567 } else {
568 switch (xfersize) {
569 case sizeof (int64_t):
570 if (((len | (uintptr_t)addr) &
571 (sizeof (int64_t) - 1)) == 0) {
572 sz = xfersize;
573 break;
574 }
575 /*FALLTHROUGH*/
576 case sizeof (int32_t):
577 if (((len | (uintptr_t)addr) &
578 (sizeof (int32_t) - 1)) == 0) {
579 sz = xfersize;
580 break;
581 }
582 /*FALLTHROUGH*/
583 default:
584 /*
585 * This still assumes that we might have an
586 * I/O bus out there that permits 16-bit
587 * transfers (and that it would be upset by
588 * 32-bit transfers from such locations).
589 */
590 sz = sizeof (int16_t);
591 break;
592 }
593
594 if (rw == UIO_READ) {
595 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 addr, &ibuffer) != DDI_SUCCESS)
597 return (DDI_FAILURE);
598 }
599
600 if (uiomove(&ibuffer, sz, rw, uio))
601 return (DDI_FAILURE);
602
603 if (rw == UIO_WRITE) {
604 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 addr, &ibuffer) != DDI_SUCCESS)
606 return (DDI_FAILURE);
607 }
608 }
609 addr += sz;
610 len -= sz;
611 }
612 return (DDI_SUCCESS);
613 }
614
615 /*
616 * These routines are used by drivers that do layered ioctls
617 * On sparc, they're implemented in assembler to avoid spilling
618 * register windows in the common (copyin) case ..
619 */
620 #if !defined(__sparc)
621 int
622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 {
624 if (flags & FKIOCTL)
625 return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 return (copyin(buf, kernbuf, size));
627 }
628
629 int
630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 {
632 if (flags & FKIOCTL)
633 return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 return (copyout(buf, kernbuf, size));
635 }
636 #endif /* !__sparc */
637
638 /*
639 * Conversions in nexus pagesize units. We don't duplicate the
640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641 * routines anyway.
642 */
643 unsigned long
644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 {
646 unsigned long pages;
647
648 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 return (pages);
650 }
651
652 unsigned long
653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 {
655 unsigned long pages;
656
657 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 return (pages);
659 }
660
661 unsigned long
662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 {
664 unsigned long bytes;
665
666 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 return (bytes);
668 }
669
670 unsigned int
671 ddi_enter_critical(void)
672 {
673 return ((uint_t)spl7());
674 }
675
676 void
677 ddi_exit_critical(unsigned int spl)
678 {
679 splx((int)spl);
680 }
681
682 /*
683 * Nexus ctlops punter
684 */
685
686 #if !defined(__sparc)
687 /*
688 * Request bus_ctl parent to handle a bus_ctl request
689 *
690 * (The sparc version is in sparc_ddi.s)
691 */
692 int
693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 {
695 int (*fp)();
696
697 if (!d || !r)
698 return (DDI_FAILURE);
699
700 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 return (DDI_FAILURE);
702
703 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 return ((*fp)(d, r, op, a, v));
705 }
706
707 #endif
708
709 /*
710 * DMA/DVMA setup
711 */
712
713 #if !defined(__sparc)
714 /*
715 * Request bus_dma_ctl parent to fiddle with a dma request.
716 *
717 * (The sparc version is in sparc_subr.s)
718 */
719 int
720 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
721 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
722 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
723 {
724 int (*fp)();
725
726 if (dip != ddi_root_node())
727 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
728 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
729 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
730 }
731 #endif
732
733 /*
734 * For all DMA control functions, call the DMA control
735 * routine and return status.
736 *
737 * Just plain assume that the parent is to be called.
738 * If a nexus driver or a thread outside the framework
739 * of a nexus driver or a leaf driver calls these functions,
740 * it is up to them to deal with the fact that the parent's
741 * bus_dma_ctl function will be the first one called.
742 */
743
744 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
745
746 /*
747 * This routine is left in place to satisfy link dependencies
748 * for any 3rd party nexus drivers that rely on it. It is never
749 * called, though.
750 */
751 /*ARGSUSED*/
752 int
753 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
754 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
755 {
756 return (DDI_FAILURE);
757 }
758
759 #if !defined(__sparc)
760
761 /*
762 * The SPARC versions of these routines are done in assembler to
763 * save register windows, so they're in sparc_subr.s.
764 */
765
766 int
767 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
768 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
769 {
770 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
771 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
772
773 if (dip != ddi_root_node())
774 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
775
776 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
777 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
778 }
779
780 int
781 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
782 {
783 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
784
785 if (dip != ddi_root_node())
786 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
787
788 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
789 return ((*funcp)(dip, rdip, handlep));
790 }
791
792 int
793 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
794 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
795 ddi_dma_cookie_t *cp, uint_t *ccountp)
796 {
797 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
798 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
799
800 if (dip != ddi_root_node())
801 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
802
803 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
804 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
805 }
806
807 int
808 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
809 ddi_dma_handle_t handle)
810 {
811 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
812
813 if (dip != ddi_root_node())
814 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
815
816 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
817 return ((*funcp)(dip, rdip, handle));
818 }
819
820
821 int
822 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
823 ddi_dma_handle_t handle, off_t off, size_t len,
824 uint_t cache_flags)
825 {
826 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
827 off_t, size_t, uint_t);
828
829 if (dip != ddi_root_node())
830 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
831
832 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
833 return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
834 }
835
836 int
837 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
838 ddi_dma_handle_t handle, uint_t win, off_t *offp,
839 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
840 {
841 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
842 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
843
844 if (dip != ddi_root_node())
845 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
846
847 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
848 return ((*funcp)(dip, rdip, handle, win, offp, lenp,
849 cookiep, ccountp));
850 }
851
852 int
853 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
854 {
855 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
856 dev_info_t *dip, *rdip;
857 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
858 size_t, uint_t);
859
860 /*
861 * the DMA nexus driver will set DMP_NOSYNC if the
862 * platform does not require any sync operation. For
863 * example if the memory is uncached or consistent
864 * and without any I/O write buffers involved.
865 */
866 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
867 return (DDI_SUCCESS);
868
869 dip = rdip = hp->dmai_rdip;
870 if (dip != ddi_root_node())
871 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
872 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
873 return ((*funcp)(dip, rdip, h, o, l, whom));
874 }
875
876 int
877 ddi_dma_unbind_handle(ddi_dma_handle_t h)
878 {
879 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
880 dev_info_t *dip, *rdip;
881 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
882
883 dip = rdip = hp->dmai_rdip;
884 if (dip != ddi_root_node())
885 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
886 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
887 return ((*funcp)(dip, rdip, h));
888 }
889
890 #endif /* !__sparc */
891
892 /*
893 * DMA burst sizes, and transfer minimums
894 */
895
896 int
897 ddi_dma_burstsizes(ddi_dma_handle_t handle)
898 {
899 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
900
901 if (!dimp)
902 return (0);
903 else
904 return (dimp->dmai_burstsizes);
905 }
906
907 /*
908 * Given two DMA attribute structures, apply the attributes
909 * of one to the other, following the rules of attributes
910 * and the wishes of the caller.
911 *
912 * The rules of DMA attribute structures are that you cannot
913 * make things *less* restrictive as you apply one set
914 * of attributes to another.
915 *
916 */
917 void
918 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
919 {
920 attr->dma_attr_addr_lo =
921 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
922 attr->dma_attr_addr_hi =
923 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
924 attr->dma_attr_count_max =
925 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
926 attr->dma_attr_align =
927 MAX(attr->dma_attr_align, mod->dma_attr_align);
928 attr->dma_attr_burstsizes =
929 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
930 attr->dma_attr_minxfer =
931 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
932 attr->dma_attr_maxxfer =
933 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
934 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
935 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
936 (uint_t)mod->dma_attr_sgllen);
937 attr->dma_attr_granular =
938 MAX(attr->dma_attr_granular, mod->dma_attr_granular);
939 }
940
941 /*
942 * mmap/segmap interface:
943 */
944
945 /*
946 * ddi_segmap: setup the default segment driver. Calls the drivers
947 * XXmmap routine to validate the range to be mapped.
948 * Return ENXIO of the range is not valid. Create
949 * a seg_dev segment that contains all of the
950 * necessary information and will reference the
951 * default segment driver routines. It returns zero
952 * on success or non-zero on failure.
953 */
954 int
955 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
956 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
957 {
958 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
959 off_t, uint_t, uint_t, uint_t, struct cred *);
960
961 return (spec_segmap(dev, offset, asp, addrp, len,
962 prot, maxprot, flags, credp));
963 }
964
965 /*
966 * ddi_map_fault: Resolve mappings at fault time. Used by segment
967 * drivers. Allows each successive parent to resolve
968 * address translations and add its mappings to the
969 * mapping list supplied in the page structure. It
970 * returns zero on success or non-zero on failure.
971 */
972
973 int
974 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
975 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
976 {
977 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
978 }
979
980 /*
981 * ddi_device_mapping_check: Called from ddi_segmap_setup.
982 * Invokes platform specific DDI to determine whether attributes specified
983 * in attr(9s) are valid for the region of memory that will be made
984 * available for direct access to user process via the mmap(2) system call.
985 */
986 int
987 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
988 uint_t rnumber, uint_t *hat_flags)
989 {
990 ddi_acc_handle_t handle;
991 ddi_map_req_t mr;
992 ddi_acc_hdl_t *hp;
993 int result;
994 dev_info_t *dip;
995
996 /*
997 * we use e_ddi_hold_devi_by_dev to search for the devi. We
998 * release it immediately since it should already be held by
999 * a devfs vnode.
1000 */
1001 if ((dip =
1002 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1003 return (-1);
1004 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */
1005
1006 /*
1007 * Allocate and initialize the common elements of data
1008 * access handle.
1009 */
1010 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1011 if (handle == NULL)
1012 return (-1);
1013
1014 hp = impl_acc_hdl_get(handle);
1015 hp->ah_vers = VERS_ACCHDL;
1016 hp->ah_dip = dip;
1017 hp->ah_rnumber = rnumber;
1018 hp->ah_offset = 0;
1019 hp->ah_len = 0;
1020 hp->ah_acc = *accattrp;
1021
1022 /*
1023 * Set up the mapping request and call to parent.
1024 */
1025 mr.map_op = DDI_MO_MAP_HANDLE;
1026 mr.map_type = DDI_MT_RNUMBER;
1027 mr.map_obj.rnumber = rnumber;
1028 mr.map_prot = PROT_READ | PROT_WRITE;
1029 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1030 mr.map_handlep = hp;
1031 mr.map_vers = DDI_MAP_VERSION;
1032 result = ddi_map(dip, &mr, 0, 0, NULL);
1033
1034 /*
1035 * Region must be mappable, pick up flags from the framework.
1036 */
1037 *hat_flags = hp->ah_hat_flags;
1038
1039 impl_acc_hdl_free(handle);
1040
1041 /*
1042 * check for end result.
1043 */
1044 if (result != DDI_SUCCESS)
1045 return (-1);
1046 return (0);
1047 }
1048
1049
1050 /*
1051 * Property functions: See also, ddipropdefs.h.
1052 *
1053 * These functions are the framework for the property functions,
1054 * i.e. they support software defined properties. All implementation
1055 * specific property handling (i.e.: self-identifying devices and
1056 * PROM defined properties are handled in the implementation specific
1057 * functions (defined in ddi_implfuncs.h).
1058 */
1059
1060 /*
1061 * nopropop: Shouldn't be called, right?
1062 */
1063 int
1064 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1065 char *name, caddr_t valuep, int *lengthp)
1066 {
1067 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1068 return (DDI_PROP_NOT_FOUND);
1069 }
1070
1071 #ifdef DDI_PROP_DEBUG
1072 int ddi_prop_debug_flag = 0;
1073
1074 int
1075 ddi_prop_debug(int enable)
1076 {
1077 int prev = ddi_prop_debug_flag;
1078
1079 if ((enable != 0) || (prev != 0))
1080 printf("ddi_prop_debug: debugging %s\n",
1081 enable ? "enabled" : "disabled");
1082 ddi_prop_debug_flag = enable;
1083 return (prev);
1084 }
1085
1086 #endif /* DDI_PROP_DEBUG */
1087
1088 /*
1089 * Search a property list for a match, if found return pointer
1090 * to matching prop struct, else return NULL.
1091 */
1092
1093 ddi_prop_t *
1094 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1095 {
1096 ddi_prop_t *propp;
1097
1098 /*
1099 * find the property in child's devinfo:
1100 * Search order defined by this search function is first matching
1101 * property with input dev == DDI_DEV_T_ANY matching any dev or
1102 * dev == propp->prop_dev, name == propp->name, and the correct
1103 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1104 * value made it this far then it implies a DDI_DEV_T_ANY search.
1105 */
1106 if (dev == DDI_DEV_T_NONE)
1107 dev = DDI_DEV_T_ANY;
1108
1109 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
1110
1111 if (!DDI_STRSAME(propp->prop_name, name))
1112 continue;
1113
1114 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1115 continue;
1116
1117 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1118 continue;
1119
1120 return (propp);
1121 }
1122
1123 return ((ddi_prop_t *)0);
1124 }
1125
1126 /*
1127 * Search for property within devnames structures
1128 */
1129 ddi_prop_t *
1130 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1131 {
1132 major_t major;
1133 struct devnames *dnp;
1134 ddi_prop_t *propp;
1135
1136 /*
1137 * Valid dev_t value is needed to index into the
1138 * correct devnames entry, therefore a dev_t
1139 * value of DDI_DEV_T_ANY is not appropriate.
1140 */
1141 ASSERT(dev != DDI_DEV_T_ANY);
1142 if (dev == DDI_DEV_T_ANY) {
1143 return ((ddi_prop_t *)0);
1144 }
1145
1146 major = getmajor(dev);
1147 dnp = &(devnamesp[major]);
1148
1149 if (dnp->dn_global_prop_ptr == NULL)
1150 return ((ddi_prop_t *)0);
1151
1152 LOCK_DEV_OPS(&dnp->dn_lock);
1153
1154 for (propp = dnp->dn_global_prop_ptr->prop_list;
1155 propp != NULL;
1156 propp = (ddi_prop_t *)propp->prop_next) {
1157
1158 if (!DDI_STRSAME(propp->prop_name, name))
1159 continue;
1160
1161 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1162 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1163 continue;
1164
1165 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1166 continue;
1167
1168 /* Property found, return it */
1169 UNLOCK_DEV_OPS(&dnp->dn_lock);
1170 return (propp);
1171 }
1172
1173 UNLOCK_DEV_OPS(&dnp->dn_lock);
1174 return ((ddi_prop_t *)0);
1175 }
1176
1177 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1178
1179 /*
1180 * ddi_prop_search_global:
1181 * Search the global property list within devnames
1182 * for the named property. Return the encoded value.
1183 */
1184 static int
1185 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1186 void *valuep, uint_t *lengthp)
1187 {
1188 ddi_prop_t *propp;
1189 caddr_t buffer;
1190
1191 propp = i_ddi_search_global_prop(dev, name, flags);
1192
1193 /* Property NOT found, bail */
1194 if (propp == (ddi_prop_t *)0)
1195 return (DDI_PROP_NOT_FOUND);
1196
1197 if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1198 return (DDI_PROP_UNDEFINED);
1199
1200 if ((buffer = kmem_alloc(propp->prop_len,
1201 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1202 cmn_err(CE_CONT, prop_no_mem_msg, name);
1203 return (DDI_PROP_NO_MEMORY);
1204 }
1205
1206 /*
1207 * Return the encoded data
1208 */
1209 *(caddr_t *)valuep = buffer;
1210 *lengthp = propp->prop_len;
1211 bcopy(propp->prop_val, buffer, propp->prop_len);
1212
1213 return (DDI_PROP_SUCCESS);
1214 }
1215
1216 /*
1217 * ddi_prop_search_common: Lookup and return the encoded value
1218 */
1219 int
1220 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1221 uint_t flags, char *name, void *valuep, uint_t *lengthp)
1222 {
1223 ddi_prop_t *propp;
1224 int i;
1225 caddr_t buffer;
1226 caddr_t prealloc = NULL;
1227 int plength = 0;
1228 dev_info_t *pdip;
1229 int (*bop)();
1230
1231 /*CONSTANTCONDITION*/
1232 while (1) {
1233
1234 mutex_enter(&(DEVI(dip)->devi_lock));
1235
1236
1237 /*
1238 * find the property in child's devinfo:
1239 * Search order is:
1240 * 1. driver defined properties
1241 * 2. system defined properties
1242 * 3. driver global properties
1243 * 4. boot defined properties
1244 */
1245
1246 propp = i_ddi_prop_search(dev, name, flags,
1247 &(DEVI(dip)->devi_drv_prop_ptr));
1248 if (propp == NULL) {
1249 propp = i_ddi_prop_search(dev, name, flags,
1250 &(DEVI(dip)->devi_sys_prop_ptr));
1251 }
1252 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1253 propp = i_ddi_prop_search(dev, name, flags,
1254 &DEVI(dip)->devi_global_prop_list->prop_list);
1255 }
1256
1257 if (propp == NULL) {
1258 propp = i_ddi_prop_search(dev, name, flags,
1259 &(DEVI(dip)->devi_hw_prop_ptr));
1260 }
1261
1262 /*
1263 * Software property found?
1264 */
1265 if (propp != (ddi_prop_t *)0) {
1266
1267 /*
1268 * If explicit undefine, return now.
1269 */
1270 if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1271 mutex_exit(&(DEVI(dip)->devi_lock));
1272 if (prealloc)
1273 kmem_free(prealloc, plength);
1274 return (DDI_PROP_UNDEFINED);
1275 }
1276
1277 /*
1278 * If we only want to know if it exists, return now
1279 */
1280 if (prop_op == PROP_EXISTS) {
1281 mutex_exit(&(DEVI(dip)->devi_lock));
1282 ASSERT(prealloc == NULL);
1283 return (DDI_PROP_SUCCESS);
1284 }
1285
1286 /*
1287 * If length only request or prop length == 0,
1288 * service request and return now.
1289 */
1290 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1291 *lengthp = propp->prop_len;
1292
1293 /*
1294 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1295 * that means prop_len is 0, so set valuep
1296 * also to NULL
1297 */
1298 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1299 *(caddr_t *)valuep = NULL;
1300
1301 mutex_exit(&(DEVI(dip)->devi_lock));
1302 if (prealloc)
1303 kmem_free(prealloc, plength);
1304 return (DDI_PROP_SUCCESS);
1305 }
1306
1307 /*
1308 * If LEN_AND_VAL_ALLOC and the request can sleep,
1309 * drop the mutex, allocate the buffer, and go
1310 * through the loop again. If we already allocated
1311 * the buffer, and the size of the property changed,
1312 * keep trying...
1313 */
1314 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1315 (flags & DDI_PROP_CANSLEEP)) {
1316 if (prealloc && (propp->prop_len != plength)) {
1317 kmem_free(prealloc, plength);
1318 prealloc = NULL;
1319 }
1320 if (prealloc == NULL) {
1321 plength = propp->prop_len;
1322 mutex_exit(&(DEVI(dip)->devi_lock));
1323 prealloc = kmem_alloc(plength,
1324 KM_SLEEP);
1325 continue;
1326 }
1327 }
1328
1329 /*
1330 * Allocate buffer, if required. Either way,
1331 * set `buffer' variable.
1332 */
1333 i = *lengthp; /* Get callers length */
1334 *lengthp = propp->prop_len; /* Set callers length */
1335
1336 switch (prop_op) {
1337
1338 case PROP_LEN_AND_VAL_ALLOC:
1339
1340 if (prealloc == NULL) {
1341 buffer = kmem_alloc(propp->prop_len,
1342 KM_NOSLEEP);
1343 } else {
1344 buffer = prealloc;
1345 }
1346
1347 if (buffer == NULL) {
1348 mutex_exit(&(DEVI(dip)->devi_lock));
1349 cmn_err(CE_CONT, prop_no_mem_msg, name);
1350 return (DDI_PROP_NO_MEMORY);
1351 }
1352 /* Set callers buf ptr */
1353 *(caddr_t *)valuep = buffer;
1354 break;
1355
1356 case PROP_LEN_AND_VAL_BUF:
1357
1358 if (propp->prop_len > (i)) {
1359 mutex_exit(&(DEVI(dip)->devi_lock));
1360 return (DDI_PROP_BUF_TOO_SMALL);
1361 }
1362
1363 buffer = valuep; /* Get callers buf ptr */
1364 break;
1365
1366 default:
1367 break;
1368 }
1369
1370 /*
1371 * Do the copy.
1372 */
1373 bcopy(propp->prop_val, buffer, propp->prop_len);
1374 mutex_exit(&(DEVI(dip)->devi_lock));
1375 return (DDI_PROP_SUCCESS);
1376 }
1377
1378 mutex_exit(&(DEVI(dip)->devi_lock));
1379 if (prealloc)
1380 kmem_free(prealloc, plength);
1381 prealloc = NULL;
1382
1383 /*
1384 * Prop not found, call parent bus_ops to deal with possible
1385 * h/w layer (possible PROM defined props, etc.) and to
1386 * possibly ascend the hierarchy, if allowed by flags.
1387 */
1388 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1389
1390 /*
1391 * One last call for the root driver PROM props?
1392 */
1393 if (dip == ddi_root_node()) {
1394 return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1395 flags, name, valuep, (int *)lengthp));
1396 }
1397
1398 /*
1399 * We may have been called to check for properties
1400 * within a single devinfo node that has no parent -
1401 * see make_prop()
1402 */
1403 if (pdip == NULL) {
1404 ASSERT((flags &
1405 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1406 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1407 return (DDI_PROP_NOT_FOUND);
1408 }
1409
1410 /*
1411 * Instead of recursing, we do iterative calls up the tree.
1412 * As a bit of optimization, skip the bus_op level if the
1413 * node is a s/w node and if the parent's bus_prop_op function
1414 * is `ddi_bus_prop_op', because we know that in this case,
1415 * this function does nothing.
1416 *
1417 * 4225415: If the parent isn't attached, or the child
1418 * hasn't been named by the parent yet, use the default
1419 * ddi_bus_prop_op as a proxy for the parent. This
1420 * allows property lookups in any child/parent state to
1421 * include 'prom' and inherited properties, even when
1422 * there are no drivers attached to the child or parent.
1423 */
1424
1425 bop = ddi_bus_prop_op;
1426 if (i_ddi_devi_attached(pdip) &&
1427 (i_ddi_node_state(dip) >= DS_INITIALIZED))
1428 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1429
1430 i = DDI_PROP_NOT_FOUND;
1431
1432 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1433 i = (*bop)(dev, pdip, dip, prop_op,
1434 flags | DDI_PROP_DONTPASS,
1435 name, valuep, lengthp);
1436 }
1437
1438 if ((flags & DDI_PROP_DONTPASS) ||
1439 (i != DDI_PROP_NOT_FOUND))
1440 return (i);
1441
1442 dip = pdip;
1443 }
1444 /*NOTREACHED*/
1445 }
1446
1447
1448 /*
1449 * ddi_prop_op: The basic property operator for drivers.
1450 *
1451 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1452 *
1453 * prop_op valuep
1454 * ------ ------
1455 *
1456 * PROP_LEN <unused>
1457 *
1458 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1459 *
1460 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1461 * address of allocated buffer, if successful)
1462 */
1463 int
1464 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1465 char *name, caddr_t valuep, int *lengthp)
1466 {
1467 int i;
1468
1469 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1470
1471 /*
1472 * If this was originally an LDI prop lookup then we bail here.
1473 * The reason is that the LDI property lookup interfaces first call
1474 * a drivers prop_op() entry point to allow it to override
1475 * properties. But if we've made it here, then the driver hasn't
1476 * overriden any properties. We don't want to continue with the
1477 * property search here because we don't have any type inforamtion.
1478 * When we return failure, the LDI interfaces will then proceed to
1479 * call the typed property interfaces to look up the property.
1480 */
1481 if (mod_flags & DDI_PROP_DYNAMIC)
1482 return (DDI_PROP_NOT_FOUND);
1483
1484 /*
1485 * check for pre-typed property consumer asking for typed property:
1486 * see e_ddi_getprop_int64.
1487 */
1488 if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1489 mod_flags |= DDI_PROP_TYPE_INT64;
1490 mod_flags |= DDI_PROP_TYPE_ANY;
1491
1492 i = ddi_prop_search_common(dev, dip, prop_op,
1493 mod_flags, name, valuep, (uint_t *)lengthp);
1494 if (i == DDI_PROP_FOUND_1275)
1495 return (DDI_PROP_SUCCESS);
1496 return (i);
1497 }
1498
1499 /*
1500 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1501 * maintain size in number of blksize blocks. Provides a dynamic property
1502 * implementation for size oriented properties based on nblocks64 and blksize
1503 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1504 * is too large. This interface should not be used with a nblocks64 that
1505 * represents the driver's idea of how to represent unknown, if nblocks is
1506 * unknown use ddi_prop_op.
1507 */
1508 int
1509 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1510 int mod_flags, char *name, caddr_t valuep, int *lengthp,
1511 uint64_t nblocks64, uint_t blksize)
1512 {
1513 uint64_t size64;
1514 int blkshift;
1515
1516 /* convert block size to shift value */
1517 ASSERT(BIT_ONLYONESET(blksize));
1518 blkshift = highbit(blksize) - 1;
1519
1520 /*
1521 * There is no point in supporting nblocks64 values that don't have
1522 * an accurate uint64_t byte count representation.
1523 */
1524 if (nblocks64 >= (UINT64_MAX >> blkshift))
1525 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1526 name, valuep, lengthp));
1527
1528 size64 = nblocks64 << blkshift;
1529 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1530 name, valuep, lengthp, size64, blksize));
1531 }
1532
1533 /*
1534 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1535 */
1536 int
1537 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1538 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1539 {
1540 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1541 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1542 }
1543
1544 /*
1545 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1546 * maintain size in bytes. Provides a of dynamic property implementation for
1547 * size oriented properties based on size64 value and blksize passed in by the
1548 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1549 * should not be used with a size64 that represents the driver's idea of how
1550 * to represent unknown, if size is unknown use ddi_prop_op.
1551 *
1552 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1553 * integers. While the most likely interface to request them ([bc]devi_size)
1554 * is declared int (signed) there is no enforcement of this, which means we
1555 * can't enforce limitations here without risking regression.
1556 */
1557 int
1558 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1559 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1560 uint_t blksize)
1561 {
1562 uint64_t nblocks64;
1563 int callers_length;
1564 caddr_t buffer;
1565 int blkshift;
1566
1567 /*
1568 * This is a kludge to support capture of size(9P) pure dynamic
1569 * properties in snapshots for non-cmlb code (without exposing
1570 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1571 * should be removed.
1572 */
1573 if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1574 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1575 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
1576 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
1577 {NULL}
1578 };
1579 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1580 }
1581
1582 /* convert block size to shift value */
1583 ASSERT(BIT_ONLYONESET(blksize));
1584 blkshift = highbit(blksize) - 1;
1585
1586 /* compute DEV_BSIZE nblocks value */
1587 nblocks64 = size64 >> blkshift;
1588
1589 /* get callers length, establish length of our dynamic properties */
1590 callers_length = *lengthp;
1591
1592 if (strcmp(name, "Nblocks") == 0)
1593 *lengthp = sizeof (uint64_t);
1594 else if (strcmp(name, "Size") == 0)
1595 *lengthp = sizeof (uint64_t);
1596 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1597 *lengthp = sizeof (uint32_t);
1598 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1599 *lengthp = sizeof (uint32_t);
1600 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1601 *lengthp = sizeof (uint32_t);
1602 else {
1603 /* fallback to ddi_prop_op */
1604 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1605 name, valuep, lengthp));
1606 }
1607
1608 /* service request for the length of the property */
1609 if (prop_op == PROP_LEN)
1610 return (DDI_PROP_SUCCESS);
1611
1612 switch (prop_op) {
1613 case PROP_LEN_AND_VAL_ALLOC:
1614 if ((buffer = kmem_alloc(*lengthp,
1615 (mod_flags & DDI_PROP_CANSLEEP) ?
1616 KM_SLEEP : KM_NOSLEEP)) == NULL)
1617 return (DDI_PROP_NO_MEMORY);
1618
1619 *(caddr_t *)valuep = buffer; /* set callers buf ptr */
1620 break;
1621
1622 case PROP_LEN_AND_VAL_BUF:
1623 /* the length of the property and the request must match */
1624 if (callers_length != *lengthp)
1625 return (DDI_PROP_INVAL_ARG);
1626
1627 buffer = valuep; /* get callers buf ptr */
1628 break;
1629
1630 default:
1631 return (DDI_PROP_INVAL_ARG);
1632 }
1633
1634 /* transfer the value into the buffer */
1635 if (strcmp(name, "Nblocks") == 0)
1636 *((uint64_t *)buffer) = nblocks64;
1637 else if (strcmp(name, "Size") == 0)
1638 *((uint64_t *)buffer) = size64;
1639 else if (strcmp(name, "nblocks") == 0)
1640 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1641 else if (strcmp(name, "size") == 0)
1642 *((uint32_t *)buffer) = (uint32_t)size64;
1643 else if (strcmp(name, "blksize") == 0)
1644 *((uint32_t *)buffer) = (uint32_t)blksize;
1645 return (DDI_PROP_SUCCESS);
1646 }
1647
1648 /*
1649 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1650 */
1651 int
1652 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1653 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1654 {
1655 return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1656 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1657 }
1658
1659 /*
1660 * Variable length props...
1661 */
1662
1663 /*
1664 * ddi_getlongprop: Get variable length property len+val into a buffer
1665 * allocated by property provider via kmem_alloc. Requester
1666 * is responsible for freeing returned property via kmem_free.
1667 *
1668 * Arguments:
1669 *
1670 * dev_t: Input: dev_t of property.
1671 * dip: Input: dev_info_t pointer of child.
1672 * flags: Input: Possible flag modifiers are:
1673 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1674 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1675 * name: Input: name of property.
1676 * valuep: Output: Addr of callers buffer pointer.
1677 * lengthp:Output: *lengthp will contain prop length on exit.
1678 *
1679 * Possible Returns:
1680 *
1681 * DDI_PROP_SUCCESS: Prop found and returned.
1682 * DDI_PROP_NOT_FOUND: Prop not found
1683 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1684 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1685 */
1686
1687 int
1688 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1689 char *name, caddr_t valuep, int *lengthp)
1690 {
1691 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1692 flags, name, valuep, lengthp));
1693 }
1694
1695 /*
1696 *
1697 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1698 * buffer. (no memory allocation by provider).
1699 *
1700 * dev_t: Input: dev_t of property.
1701 * dip: Input: dev_info_t pointer of child.
1702 * flags: Input: DDI_PROP_DONTPASS or NULL
1703 * name: Input: name of property
1704 * valuep: Input: ptr to callers buffer.
1705 * lengthp:I/O: ptr to length of callers buffer on entry,
1706 * actual length of property on exit.
1707 *
1708 * Possible returns:
1709 *
1710 * DDI_PROP_SUCCESS Prop found and returned
1711 * DDI_PROP_NOT_FOUND Prop not found
1712 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1713 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1714 * no value returned, but actual prop
1715 * length returned in *lengthp
1716 *
1717 */
1718
1719 int
1720 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1721 char *name, caddr_t valuep, int *lengthp)
1722 {
1723 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1724 flags, name, valuep, lengthp));
1725 }
1726
1727 /*
1728 * Integer/boolean sized props.
1729 *
1730 * Call is value only... returns found boolean or int sized prop value or
1731 * defvalue if prop not found or is wrong length or is explicitly undefined.
1732 * Only flag is DDI_PROP_DONTPASS...
1733 *
1734 * By convention, this interface returns boolean (0) sized properties
1735 * as value (int)1.
1736 *
1737 * This never returns an error, if property not found or specifically
1738 * undefined, the input `defvalue' is returned.
1739 */
1740
1741 int
1742 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1743 {
1744 int propvalue = defvalue;
1745 int proplength = sizeof (int);
1746 int error;
1747
1748 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1749 flags, name, (caddr_t)&propvalue, &proplength);
1750
1751 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1752 propvalue = 1;
1753
1754 return (propvalue);
1755 }
1756
1757 /*
1758 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1759 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1760 */
1761
1762 int
1763 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1764 {
1765 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1766 }
1767
1768 /*
1769 * Allocate a struct prop_driver_data, along with 'size' bytes
1770 * for decoded property data. This structure is freed by
1771 * calling ddi_prop_free(9F).
1772 */
1773 static void *
1774 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1775 {
1776 struct prop_driver_data *pdd;
1777
1778 /*
1779 * Allocate a structure with enough memory to store the decoded data.
1780 */
1781 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1782 pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1783 pdd->pdd_prop_free = prop_free;
1784
1785 /*
1786 * Return a pointer to the location to put the decoded data.
1787 */
1788 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1789 }
1790
1791 /*
1792 * Allocated the memory needed to store the encoded data in the property
1793 * handle.
1794 */
1795 static int
1796 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1797 {
1798 /*
1799 * If size is zero, then set data to NULL and size to 0. This
1800 * is a boolean property.
1801 */
1802 if (size == 0) {
1803 ph->ph_size = 0;
1804 ph->ph_data = NULL;
1805 ph->ph_cur_pos = NULL;
1806 ph->ph_save_pos = NULL;
1807 } else {
1808 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1809 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1810 if (ph->ph_data == NULL)
1811 return (DDI_PROP_NO_MEMORY);
1812 } else
1813 ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1814 ph->ph_size = size;
1815 ph->ph_cur_pos = ph->ph_data;
1816 ph->ph_save_pos = ph->ph_data;
1817 }
1818 return (DDI_PROP_SUCCESS);
1819 }
1820
1821 /*
1822 * Free the space allocated by the lookup routines. Each lookup routine
1823 * returns a pointer to the decoded data to the driver. The driver then
1824 * passes this pointer back to us. This data actually lives in a struct
1825 * prop_driver_data. We use negative indexing to find the beginning of
1826 * the structure and then free the entire structure using the size and
1827 * the free routine stored in the structure.
1828 */
1829 void
1830 ddi_prop_free(void *datap)
1831 {
1832 struct prop_driver_data *pdd;
1833
1834 /*
1835 * Get the structure
1836 */
1837 pdd = (struct prop_driver_data *)
1838 ((caddr_t)datap - sizeof (struct prop_driver_data));
1839 /*
1840 * Call the free routine to free it
1841 */
1842 (*pdd->pdd_prop_free)(pdd);
1843 }
1844
1845 /*
1846 * Free the data associated with an array of ints,
1847 * allocated with ddi_prop_decode_alloc().
1848 */
1849 static void
1850 ddi_prop_free_ints(struct prop_driver_data *pdd)
1851 {
1852 kmem_free(pdd, pdd->pdd_size);
1853 }
1854
1855 /*
1856 * Free a single string property or a single string contained within
1857 * the argv style return value of an array of strings.
1858 */
1859 static void
1860 ddi_prop_free_string(struct prop_driver_data *pdd)
1861 {
1862 kmem_free(pdd, pdd->pdd_size);
1863
1864 }
1865
1866 /*
1867 * Free an array of strings.
1868 */
1869 static void
1870 ddi_prop_free_strings(struct prop_driver_data *pdd)
1871 {
1872 kmem_free(pdd, pdd->pdd_size);
1873 }
1874
1875 /*
1876 * Free the data associated with an array of bytes.
1877 */
1878 static void
1879 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1880 {
1881 kmem_free(pdd, pdd->pdd_size);
1882 }
1883
1884 /*
1885 * Reset the current location pointer in the property handle to the
1886 * beginning of the data.
1887 */
1888 void
1889 ddi_prop_reset_pos(prop_handle_t *ph)
1890 {
1891 ph->ph_cur_pos = ph->ph_data;
1892 ph->ph_save_pos = ph->ph_data;
1893 }
1894
1895 /*
1896 * Restore the current location pointer in the property handle to the
1897 * saved position.
1898 */
1899 void
1900 ddi_prop_save_pos(prop_handle_t *ph)
1901 {
1902 ph->ph_save_pos = ph->ph_cur_pos;
1903 }
1904
1905 /*
1906 * Save the location that the current location pointer is pointing to..
1907 */
1908 void
1909 ddi_prop_restore_pos(prop_handle_t *ph)
1910 {
1911 ph->ph_cur_pos = ph->ph_save_pos;
1912 }
1913
1914 /*
1915 * Property encode/decode functions
1916 */
1917
1918 /*
1919 * Decode a single integer property
1920 */
1921 static int
1922 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1923 {
1924 int i;
1925 int tmp;
1926
1927 /*
1928 * If there is nothing to decode return an error
1929 */
1930 if (ph->ph_size == 0)
1931 return (DDI_PROP_END_OF_DATA);
1932
1933 /*
1934 * Decode the property as a single integer and return it
1935 * in data if we were able to decode it.
1936 */
1937 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1938 if (i < DDI_PROP_RESULT_OK) {
1939 switch (i) {
1940 case DDI_PROP_RESULT_EOF:
1941 return (DDI_PROP_END_OF_DATA);
1942
1943 case DDI_PROP_RESULT_ERROR:
1944 return (DDI_PROP_CANNOT_DECODE);
1945 }
1946 }
1947
1948 *(int *)data = tmp;
1949 *nelements = 1;
1950 return (DDI_PROP_SUCCESS);
1951 }
1952
1953 /*
1954 * Decode a single 64 bit integer property
1955 */
1956 static int
1957 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1958 {
1959 int i;
1960 int64_t tmp;
1961
1962 /*
1963 * If there is nothing to decode return an error
1964 */
1965 if (ph->ph_size == 0)
1966 return (DDI_PROP_END_OF_DATA);
1967
1968 /*
1969 * Decode the property as a single integer and return it
1970 * in data if we were able to decode it.
1971 */
1972 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1973 if (i < DDI_PROP_RESULT_OK) {
1974 switch (i) {
1975 case DDI_PROP_RESULT_EOF:
1976 return (DDI_PROP_END_OF_DATA);
1977
1978 case DDI_PROP_RESULT_ERROR:
1979 return (DDI_PROP_CANNOT_DECODE);
1980 }
1981 }
1982
1983 *(int64_t *)data = tmp;
1984 *nelements = 1;
1985 return (DDI_PROP_SUCCESS);
1986 }
1987
1988 /*
1989 * Decode an array of integers property
1990 */
1991 static int
1992 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1993 {
1994 int i;
1995 int cnt = 0;
1996 int *tmp;
1997 int *intp;
1998 int n;
1999
2000 /*
2001 * Figure out how many array elements there are by going through the
2002 * data without decoding it first and counting.
2003 */
2004 for (;;) {
2005 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2006 if (i < 0)
2007 break;
2008 cnt++;
2009 }
2010
2011 /*
2012 * If there are no elements return an error
2013 */
2014 if (cnt == 0)
2015 return (DDI_PROP_END_OF_DATA);
2016
2017 /*
2018 * If we cannot skip through the data, we cannot decode it
2019 */
2020 if (i == DDI_PROP_RESULT_ERROR)
2021 return (DDI_PROP_CANNOT_DECODE);
2022
2023 /*
2024 * Reset the data pointer to the beginning of the encoded data
2025 */
2026 ddi_prop_reset_pos(ph);
2027
2028 /*
2029 * Allocated memory to store the decoded value in.
2030 */
2031 intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2032 ddi_prop_free_ints);
2033
2034 /*
2035 * Decode each element and place it in the space we just allocated
2036 */
2037 tmp = intp;
2038 for (n = 0; n < cnt; n++, tmp++) {
2039 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2040 if (i < DDI_PROP_RESULT_OK) {
2041 /*
2042 * Free the space we just allocated
2043 * and return an error.
2044 */
2045 ddi_prop_free(intp);
2046 switch (i) {
2047 case DDI_PROP_RESULT_EOF:
2048 return (DDI_PROP_END_OF_DATA);
2049
2050 case DDI_PROP_RESULT_ERROR:
2051 return (DDI_PROP_CANNOT_DECODE);
2052 }
2053 }
2054 }
2055
2056 *nelements = cnt;
2057 *(int **)data = intp;
2058
2059 return (DDI_PROP_SUCCESS);
2060 }
2061
2062 /*
2063 * Decode a 64 bit integer array property
2064 */
2065 static int
2066 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2067 {
2068 int i;
2069 int n;
2070 int cnt = 0;
2071 int64_t *tmp;
2072 int64_t *intp;
2073
2074 /*
2075 * Count the number of array elements by going
2076 * through the data without decoding it.
2077 */
2078 for (;;) {
2079 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2080 if (i < 0)
2081 break;
2082 cnt++;
2083 }
2084
2085 /*
2086 * If there are no elements return an error
2087 */
2088 if (cnt == 0)
2089 return (DDI_PROP_END_OF_DATA);
2090
2091 /*
2092 * If we cannot skip through the data, we cannot decode it
2093 */
2094 if (i == DDI_PROP_RESULT_ERROR)
2095 return (DDI_PROP_CANNOT_DECODE);
2096
2097 /*
2098 * Reset the data pointer to the beginning of the encoded data
2099 */
2100 ddi_prop_reset_pos(ph);
2101
2102 /*
2103 * Allocate memory to store the decoded value.
2104 */
2105 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2106 ddi_prop_free_ints);
2107
2108 /*
2109 * Decode each element and place it in the space allocated
2110 */
2111 tmp = intp;
2112 for (n = 0; n < cnt; n++, tmp++) {
2113 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2114 if (i < DDI_PROP_RESULT_OK) {
2115 /*
2116 * Free the space we just allocated
2117 * and return an error.
2118 */
2119 ddi_prop_free(intp);
2120 switch (i) {
2121 case DDI_PROP_RESULT_EOF:
2122 return (DDI_PROP_END_OF_DATA);
2123
2124 case DDI_PROP_RESULT_ERROR:
2125 return (DDI_PROP_CANNOT_DECODE);
2126 }
2127 }
2128 }
2129
2130 *nelements = cnt;
2131 *(int64_t **)data = intp;
2132
2133 return (DDI_PROP_SUCCESS);
2134 }
2135
2136 /*
2137 * Encode an array of integers property (Can be one element)
2138 */
2139 int
2140 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2141 {
2142 int i;
2143 int *tmp;
2144 int cnt;
2145 int size;
2146
2147 /*
2148 * If there is no data, we cannot do anything
2149 */
2150 if (nelements == 0)
2151 return (DDI_PROP_CANNOT_ENCODE);
2152
2153 /*
2154 * Get the size of an encoded int.
2155 */
2156 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2157
2158 if (size < DDI_PROP_RESULT_OK) {
2159 switch (size) {
2160 case DDI_PROP_RESULT_EOF:
2161 return (DDI_PROP_END_OF_DATA);
2162
2163 case DDI_PROP_RESULT_ERROR:
2164 return (DDI_PROP_CANNOT_ENCODE);
2165 }
2166 }
2167
2168 /*
2169 * Allocate space in the handle to store the encoded int.
2170 */
2171 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2172 DDI_PROP_SUCCESS)
2173 return (DDI_PROP_NO_MEMORY);
2174
2175 /*
2176 * Encode the array of ints.
2177 */
2178 tmp = (int *)data;
2179 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2180 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2181 if (i < DDI_PROP_RESULT_OK) {
2182 switch (i) {
2183 case DDI_PROP_RESULT_EOF:
2184 return (DDI_PROP_END_OF_DATA);
2185
2186 case DDI_PROP_RESULT_ERROR:
2187 return (DDI_PROP_CANNOT_ENCODE);
2188 }
2189 }
2190 }
2191
2192 return (DDI_PROP_SUCCESS);
2193 }
2194
2195
2196 /*
2197 * Encode a 64 bit integer array property
2198 */
2199 int
2200 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2201 {
2202 int i;
2203 int cnt;
2204 int size;
2205 int64_t *tmp;
2206
2207 /*
2208 * If there is no data, we cannot do anything
2209 */
2210 if (nelements == 0)
2211 return (DDI_PROP_CANNOT_ENCODE);
2212
2213 /*
2214 * Get the size of an encoded 64 bit int.
2215 */
2216 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2217
2218 if (size < DDI_PROP_RESULT_OK) {
2219 switch (size) {
2220 case DDI_PROP_RESULT_EOF:
2221 return (DDI_PROP_END_OF_DATA);
2222
2223 case DDI_PROP_RESULT_ERROR:
2224 return (DDI_PROP_CANNOT_ENCODE);
2225 }
2226 }
2227
2228 /*
2229 * Allocate space in the handle to store the encoded int.
2230 */
2231 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2232 DDI_PROP_SUCCESS)
2233 return (DDI_PROP_NO_MEMORY);
2234
2235 /*
2236 * Encode the array of ints.
2237 */
2238 tmp = (int64_t *)data;
2239 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2240 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2241 if (i < DDI_PROP_RESULT_OK) {
2242 switch (i) {
2243 case DDI_PROP_RESULT_EOF:
2244 return (DDI_PROP_END_OF_DATA);
2245
2246 case DDI_PROP_RESULT_ERROR:
2247 return (DDI_PROP_CANNOT_ENCODE);
2248 }
2249 }
2250 }
2251
2252 return (DDI_PROP_SUCCESS);
2253 }
2254
2255 /*
2256 * Decode a single string property
2257 */
2258 static int
2259 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2260 {
2261 char *tmp;
2262 char *str;
2263 int i;
2264 int size;
2265
2266 /*
2267 * If there is nothing to decode return an error
2268 */
2269 if (ph->ph_size == 0)
2270 return (DDI_PROP_END_OF_DATA);
2271
2272 /*
2273 * Get the decoded size of the encoded string.
2274 */
2275 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2276 if (size < DDI_PROP_RESULT_OK) {
2277 switch (size) {
2278 case DDI_PROP_RESULT_EOF:
2279 return (DDI_PROP_END_OF_DATA);
2280
2281 case DDI_PROP_RESULT_ERROR:
2282 return (DDI_PROP_CANNOT_DECODE);
2283 }
2284 }
2285
2286 /*
2287 * Allocated memory to store the decoded value in.
2288 */
2289 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2290
2291 ddi_prop_reset_pos(ph);
2292
2293 /*
2294 * Decode the str and place it in the space we just allocated
2295 */
2296 tmp = str;
2297 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2298 if (i < DDI_PROP_RESULT_OK) {
2299 /*
2300 * Free the space we just allocated
2301 * and return an error.
2302 */
2303 ddi_prop_free(str);
2304 switch (i) {
2305 case DDI_PROP_RESULT_EOF:
2306 return (DDI_PROP_END_OF_DATA);
2307
2308 case DDI_PROP_RESULT_ERROR:
2309 return (DDI_PROP_CANNOT_DECODE);
2310 }
2311 }
2312
2313 *(char **)data = str;
2314 *nelements = 1;
2315
2316 return (DDI_PROP_SUCCESS);
2317 }
2318
2319 /*
2320 * Decode an array of strings.
2321 */
2322 int
2323 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2324 {
2325 int cnt = 0;
2326 char **strs;
2327 char **tmp;
2328 char *ptr;
2329 int i;
2330 int n;
2331 int size;
2332 size_t nbytes;
2333
2334 /*
2335 * Figure out how many array elements there are by going through the
2336 * data without decoding it first and counting.
2337 */
2338 for (;;) {
2339 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2340 if (i < 0)
2341 break;
2342 cnt++;
2343 }
2344
2345 /*
2346 * If there are no elements return an error
2347 */
2348 if (cnt == 0)
2349 return (DDI_PROP_END_OF_DATA);
2350
2351 /*
2352 * If we cannot skip through the data, we cannot decode it
2353 */
2354 if (i == DDI_PROP_RESULT_ERROR)
2355 return (DDI_PROP_CANNOT_DECODE);
2356
2357 /*
2358 * Reset the data pointer to the beginning of the encoded data
2359 */
2360 ddi_prop_reset_pos(ph);
2361
2362 /*
2363 * Figure out how much memory we need for the sum total
2364 */
2365 nbytes = (cnt + 1) * sizeof (char *);
2366
2367 for (n = 0; n < cnt; n++) {
2368 /*
2369 * Get the decoded size of the current encoded string.
2370 */
2371 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2372 if (size < DDI_PROP_RESULT_OK) {
2373 switch (size) {
2374 case DDI_PROP_RESULT_EOF:
2375 return (DDI_PROP_END_OF_DATA);
2376
2377 case DDI_PROP_RESULT_ERROR:
2378 return (DDI_PROP_CANNOT_DECODE);
2379 }
2380 }
2381
2382 nbytes += size;
2383 }
2384
2385 /*
2386 * Allocate memory in which to store the decoded strings.
2387 */
2388 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2389
2390 /*
2391 * Set up pointers for each string by figuring out yet
2392 * again how long each string is.
2393 */
2394 ddi_prop_reset_pos(ph);
2395 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2396 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2397 /*
2398 * Get the decoded size of the current encoded string.
2399 */
2400 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2401 if (size < DDI_PROP_RESULT_OK) {
2402 ddi_prop_free(strs);
2403 switch (size) {
2404 case DDI_PROP_RESULT_EOF:
2405 return (DDI_PROP_END_OF_DATA);
2406
2407 case DDI_PROP_RESULT_ERROR:
2408 return (DDI_PROP_CANNOT_DECODE);
2409 }
2410 }
2411
2412 *tmp = ptr;
2413 ptr += size;
2414 }
2415
2416 /*
2417 * String array is terminated by a NULL
2418 */
2419 *tmp = NULL;
2420
2421 /*
2422 * Finally, we can decode each string
2423 */
2424 ddi_prop_reset_pos(ph);
2425 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2426 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2427 if (i < DDI_PROP_RESULT_OK) {
2428 /*
2429 * Free the space we just allocated
2430 * and return an error
2431 */
2432 ddi_prop_free(strs);
2433 switch (i) {
2434 case DDI_PROP_RESULT_EOF:
2435 return (DDI_PROP_END_OF_DATA);
2436
2437 case DDI_PROP_RESULT_ERROR:
2438 return (DDI_PROP_CANNOT_DECODE);
2439 }
2440 }
2441 }
2442
2443 *(char ***)data = strs;
2444 *nelements = cnt;
2445
2446 return (DDI_PROP_SUCCESS);
2447 }
2448
2449 /*
2450 * Encode a string.
2451 */
2452 int
2453 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2454 {
2455 char **tmp;
2456 int size;
2457 int i;
2458
2459 /*
2460 * If there is no data, we cannot do anything
2461 */
2462 if (nelements == 0)
2463 return (DDI_PROP_CANNOT_ENCODE);
2464
2465 /*
2466 * Get the size of the encoded string.
2467 */
2468 tmp = (char **)data;
2469 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2470 if (size < DDI_PROP_RESULT_OK) {
2471 switch (size) {
2472 case DDI_PROP_RESULT_EOF:
2473 return (DDI_PROP_END_OF_DATA);
2474
2475 case DDI_PROP_RESULT_ERROR:
2476 return (DDI_PROP_CANNOT_ENCODE);
2477 }
2478 }
2479
2480 /*
2481 * Allocate space in the handle to store the encoded string.
2482 */
2483 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2484 return (DDI_PROP_NO_MEMORY);
2485
2486 ddi_prop_reset_pos(ph);
2487
2488 /*
2489 * Encode the string.
2490 */
2491 tmp = (char **)data;
2492 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2493 if (i < DDI_PROP_RESULT_OK) {
2494 switch (i) {
2495 case DDI_PROP_RESULT_EOF:
2496 return (DDI_PROP_END_OF_DATA);
2497
2498 case DDI_PROP_RESULT_ERROR:
2499 return (DDI_PROP_CANNOT_ENCODE);
2500 }
2501 }
2502
2503 return (DDI_PROP_SUCCESS);
2504 }
2505
2506
2507 /*
2508 * Encode an array of strings.
2509 */
2510 int
2511 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2512 {
2513 int cnt = 0;
2514 char **tmp;
2515 int size;
2516 uint_t total_size;
2517 int i;
2518
2519 /*
2520 * If there is no data, we cannot do anything
2521 */
2522 if (nelements == 0)
2523 return (DDI_PROP_CANNOT_ENCODE);
2524
2525 /*
2526 * Get the total size required to encode all the strings.
2527 */
2528 total_size = 0;
2529 tmp = (char **)data;
2530 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2531 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2532 if (size < DDI_PROP_RESULT_OK) {
2533 switch (size) {
2534 case DDI_PROP_RESULT_EOF:
2535 return (DDI_PROP_END_OF_DATA);
2536
2537 case DDI_PROP_RESULT_ERROR:
2538 return (DDI_PROP_CANNOT_ENCODE);
2539 }
2540 }
2541 total_size += (uint_t)size;
2542 }
2543
2544 /*
2545 * Allocate space in the handle to store the encoded strings.
2546 */
2547 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2548 return (DDI_PROP_NO_MEMORY);
2549
2550 ddi_prop_reset_pos(ph);
2551
2552 /*
2553 * Encode the array of strings.
2554 */
2555 tmp = (char **)data;
2556 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2557 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2558 if (i < DDI_PROP_RESULT_OK) {
2559 switch (i) {
2560 case DDI_PROP_RESULT_EOF:
2561 return (DDI_PROP_END_OF_DATA);
2562
2563 case DDI_PROP_RESULT_ERROR:
2564 return (DDI_PROP_CANNOT_ENCODE);
2565 }
2566 }
2567 }
2568
2569 return (DDI_PROP_SUCCESS);
2570 }
2571
2572
2573 /*
2574 * Decode an array of bytes.
2575 */
2576 static int
2577 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2578 {
2579 uchar_t *tmp;
2580 int nbytes;
2581 int i;
2582
2583 /*
2584 * If there are no elements return an error
2585 */
2586 if (ph->ph_size == 0)
2587 return (DDI_PROP_END_OF_DATA);
2588
2589 /*
2590 * Get the size of the encoded array of bytes.
2591 */
2592 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2593 data, ph->ph_size);
2594 if (nbytes < DDI_PROP_RESULT_OK) {
2595 switch (nbytes) {
2596 case DDI_PROP_RESULT_EOF:
2597 return (DDI_PROP_END_OF_DATA);
2598
2599 case DDI_PROP_RESULT_ERROR:
2600 return (DDI_PROP_CANNOT_DECODE);
2601 }
2602 }
2603
2604 /*
2605 * Allocated memory to store the decoded value in.
2606 */
2607 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2608
2609 /*
2610 * Decode each element and place it in the space we just allocated
2611 */
2612 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2613 if (i < DDI_PROP_RESULT_OK) {
2614 /*
2615 * Free the space we just allocated
2616 * and return an error
2617 */
2618 ddi_prop_free(tmp);
2619 switch (i) {
2620 case DDI_PROP_RESULT_EOF:
2621 return (DDI_PROP_END_OF_DATA);
2622
2623 case DDI_PROP_RESULT_ERROR:
2624 return (DDI_PROP_CANNOT_DECODE);
2625 }
2626 }
2627
2628 *(uchar_t **)data = tmp;
2629 *nelements = nbytes;
2630
2631 return (DDI_PROP_SUCCESS);
2632 }
2633
2634 /*
2635 * Encode an array of bytes.
2636 */
2637 int
2638 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2639 {
2640 int size;
2641 int i;
2642
2643 /*
2644 * If there are no elements, then this is a boolean property,
2645 * so just create a property handle with no data and return.
2646 */
2647 if (nelements == 0) {
2648 (void) ddi_prop_encode_alloc(ph, 0);
2649 return (DDI_PROP_SUCCESS);
2650 }
2651
2652 /*
2653 * Get the size of the encoded array of bytes.
2654 */
2655 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2656 nelements);
2657 if (size < DDI_PROP_RESULT_OK) {
2658 switch (size) {
2659 case DDI_PROP_RESULT_EOF:
2660 return (DDI_PROP_END_OF_DATA);
2661
2662 case DDI_PROP_RESULT_ERROR:
2663 return (DDI_PROP_CANNOT_DECODE);
2664 }
2665 }
2666
2667 /*
2668 * Allocate space in the handle to store the encoded bytes.
2669 */
2670 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2671 return (DDI_PROP_NO_MEMORY);
2672
2673 /*
2674 * Encode the array of bytes.
2675 */
2676 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2677 nelements);
2678 if (i < DDI_PROP_RESULT_OK) {
2679 switch (i) {
2680 case DDI_PROP_RESULT_EOF:
2681 return (DDI_PROP_END_OF_DATA);
2682
2683 case DDI_PROP_RESULT_ERROR:
2684 return (DDI_PROP_CANNOT_ENCODE);
2685 }
2686 }
2687
2688 return (DDI_PROP_SUCCESS);
2689 }
2690
2691 /*
2692 * OBP 1275 integer, string and byte operators.
2693 *
2694 * DDI_PROP_CMD_DECODE:
2695 *
2696 * DDI_PROP_RESULT_ERROR: cannot decode the data
2697 * DDI_PROP_RESULT_EOF: end of data
2698 * DDI_PROP_OK: data was decoded
2699 *
2700 * DDI_PROP_CMD_ENCODE:
2701 *
2702 * DDI_PROP_RESULT_ERROR: cannot encode the data
2703 * DDI_PROP_RESULT_EOF: end of data
2704 * DDI_PROP_OK: data was encoded
2705 *
2706 * DDI_PROP_CMD_SKIP:
2707 *
2708 * DDI_PROP_RESULT_ERROR: cannot skip the data
2709 * DDI_PROP_RESULT_EOF: end of data
2710 * DDI_PROP_OK: data was skipped
2711 *
2712 * DDI_PROP_CMD_GET_ESIZE:
2713 *
2714 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2715 * DDI_PROP_RESULT_EOF: end of data
2716 * > 0: the encoded size
2717 *
2718 * DDI_PROP_CMD_GET_DSIZE:
2719 *
2720 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2721 * DDI_PROP_RESULT_EOF: end of data
2722 * > 0: the decoded size
2723 */
2724
2725 /*
2726 * OBP 1275 integer operator
2727 *
2728 * OBP properties are a byte stream of data, so integers may not be
2729 * properly aligned. Therefore we need to copy them one byte at a time.
2730 */
2731 int
2732 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2733 {
2734 int i;
2735
2736 switch (cmd) {
2737 case DDI_PROP_CMD_DECODE:
2738 /*
2739 * Check that there is encoded data
2740 */
2741 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2742 return (DDI_PROP_RESULT_ERROR);
2743 if (ph->ph_flags & PH_FROM_PROM) {
2744 i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2745 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2746 ph->ph_size - i))
2747 return (DDI_PROP_RESULT_ERROR);
2748 } else {
2749 if (ph->ph_size < sizeof (int) ||
2750 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2751 ph->ph_size - sizeof (int))))
2752 return (DDI_PROP_RESULT_ERROR);
2753 }
2754
2755 /*
2756 * Copy the integer, using the implementation-specific
2757 * copy function if the property is coming from the PROM.
2758 */
2759 if (ph->ph_flags & PH_FROM_PROM) {
2760 *data = impl_ddi_prop_int_from_prom(
2761 (uchar_t *)ph->ph_cur_pos,
2762 (ph->ph_size < PROP_1275_INT_SIZE) ?
2763 ph->ph_size : PROP_1275_INT_SIZE);
2764 } else {
2765 bcopy(ph->ph_cur_pos, data, sizeof (int));
2766 }
2767
2768 /*
2769 * Move the current location to the start of the next
2770 * bit of undecoded data.
2771 */
2772 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2773 PROP_1275_INT_SIZE;
2774 return (DDI_PROP_RESULT_OK);
2775
2776 case DDI_PROP_CMD_ENCODE:
2777 /*
2778 * Check that there is room to encoded the data
2779 */
2780 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2781 ph->ph_size < PROP_1275_INT_SIZE ||
2782 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2783 ph->ph_size - sizeof (int))))
2784 return (DDI_PROP_RESULT_ERROR);
2785
2786 /*
2787 * Encode the integer into the byte stream one byte at a
2788 * time.
2789 */
2790 bcopy(data, ph->ph_cur_pos, sizeof (int));
2791
2792 /*
2793 * Move the current location to the start of the next bit of
2794 * space where we can store encoded data.
2795 */
2796 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2797 return (DDI_PROP_RESULT_OK);
2798
2799 case DDI_PROP_CMD_SKIP:
2800 /*
2801 * Check that there is encoded data
2802 */
2803 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2804 ph->ph_size < PROP_1275_INT_SIZE)
2805 return (DDI_PROP_RESULT_ERROR);
2806
2807
2808 if ((caddr_t)ph->ph_cur_pos ==
2809 (caddr_t)ph->ph_data + ph->ph_size) {
2810 return (DDI_PROP_RESULT_EOF);
2811 } else if ((caddr_t)ph->ph_cur_pos >
2812 (caddr_t)ph->ph_data + ph->ph_size) {
2813 return (DDI_PROP_RESULT_EOF);
2814 }
2815
2816 /*
2817 * Move the current location to the start of the next bit of
2818 * undecoded data.
2819 */
2820 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2821 return (DDI_PROP_RESULT_OK);
2822
2823 case DDI_PROP_CMD_GET_ESIZE:
2824 /*
2825 * Return the size of an encoded integer on OBP
2826 */
2827 return (PROP_1275_INT_SIZE);
2828
2829 case DDI_PROP_CMD_GET_DSIZE:
2830 /*
2831 * Return the size of a decoded integer on the system.
2832 */
2833 return (sizeof (int));
2834
2835 default:
2836 #ifdef DEBUG
2837 panic("ddi_prop_1275_int: %x impossible", cmd);
2838 /*NOTREACHED*/
2839 #else
2840 return (DDI_PROP_RESULT_ERROR);
2841 #endif /* DEBUG */
2842 }
2843 }
2844
2845 /*
2846 * 64 bit integer operator.
2847 *
2848 * This is an extension, defined by Sun, to the 1275 integer
2849 * operator. This routine handles the encoding/decoding of
2850 * 64 bit integer properties.
2851 */
2852 int
2853 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2854 {
2855
2856 switch (cmd) {
2857 case DDI_PROP_CMD_DECODE:
2858 /*
2859 * Check that there is encoded data
2860 */
2861 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2862 return (DDI_PROP_RESULT_ERROR);
2863 if (ph->ph_flags & PH_FROM_PROM) {
2864 return (DDI_PROP_RESULT_ERROR);
2865 } else {
2866 if (ph->ph_size < sizeof (int64_t) ||
2867 ((int64_t *)ph->ph_cur_pos >
2868 ((int64_t *)ph->ph_data +
2869 ph->ph_size - sizeof (int64_t))))
2870 return (DDI_PROP_RESULT_ERROR);
2871 }
2872 /*
2873 * Copy the integer, using the implementation-specific
2874 * copy function if the property is coming from the PROM.
2875 */
2876 if (ph->ph_flags & PH_FROM_PROM) {
2877 return (DDI_PROP_RESULT_ERROR);
2878 } else {
2879 bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2880 }
2881
2882 /*
2883 * Move the current location to the start of the next
2884 * bit of undecoded data.
2885 */
2886 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2887 sizeof (int64_t);
2888 return (DDI_PROP_RESULT_OK);
2889
2890 case DDI_PROP_CMD_ENCODE:
2891 /*
2892 * Check that there is room to encoded the data
2893 */
2894 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2895 ph->ph_size < sizeof (int64_t) ||
2896 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2897 ph->ph_size - sizeof (int64_t))))
2898 return (DDI_PROP_RESULT_ERROR);
2899
2900 /*
2901 * Encode the integer into the byte stream one byte at a
2902 * time.
2903 */
2904 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2905
2906 /*
2907 * Move the current location to the start of the next bit of
2908 * space where we can store encoded data.
2909 */
2910 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2911 sizeof (int64_t);
2912 return (DDI_PROP_RESULT_OK);
2913
2914 case DDI_PROP_CMD_SKIP:
2915 /*
2916 * Check that there is encoded data
2917 */
2918 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2919 ph->ph_size < sizeof (int64_t))
2920 return (DDI_PROP_RESULT_ERROR);
2921
2922 if ((caddr_t)ph->ph_cur_pos ==
2923 (caddr_t)ph->ph_data + ph->ph_size) {
2924 return (DDI_PROP_RESULT_EOF);
2925 } else if ((caddr_t)ph->ph_cur_pos >
2926 (caddr_t)ph->ph_data + ph->ph_size) {
2927 return (DDI_PROP_RESULT_EOF);
2928 }
2929
2930 /*
2931 * Move the current location to the start of
2932 * the next bit of undecoded data.
2933 */
2934 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2935 sizeof (int64_t);
2936 return (DDI_PROP_RESULT_OK);
2937
2938 case DDI_PROP_CMD_GET_ESIZE:
2939 /*
2940 * Return the size of an encoded integer on OBP
2941 */
2942 return (sizeof (int64_t));
2943
2944 case DDI_PROP_CMD_GET_DSIZE:
2945 /*
2946 * Return the size of a decoded integer on the system.
2947 */
2948 return (sizeof (int64_t));
2949
2950 default:
2951 #ifdef DEBUG
2952 panic("ddi_prop_int64_op: %x impossible", cmd);
2953 /*NOTREACHED*/
2954 #else
2955 return (DDI_PROP_RESULT_ERROR);
2956 #endif /* DEBUG */
2957 }
2958 }
2959
2960 /*
2961 * OBP 1275 string operator.
2962 *
2963 * OBP strings are NULL terminated.
2964 */
2965 int
2966 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2967 {
2968 int n;
2969 char *p;
2970 char *end;
2971
2972 switch (cmd) {
2973 case DDI_PROP_CMD_DECODE:
2974 /*
2975 * Check that there is encoded data
2976 */
2977 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2978 return (DDI_PROP_RESULT_ERROR);
2979 }
2980
2981 /*
2982 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2983 * how to NULL terminate result.
2984 */
2985 p = (char *)ph->ph_cur_pos;
2986 end = (char *)ph->ph_data + ph->ph_size;
2987 if (p >= end)
2988 return (DDI_PROP_RESULT_EOF);
2989
2990 while (p < end) {
2991 *data++ = *p;
2992 if (*p++ == 0) { /* NULL from OBP */
2993 ph->ph_cur_pos = p;
2994 return (DDI_PROP_RESULT_OK);
2995 }
2996 }
2997
2998 /*
2999 * If OBP did not NULL terminate string, which happens
3000 * (at least) for 'true'/'false' boolean values, account for
3001 * the space and store null termination on decode.
3002 */
3003 ph->ph_cur_pos = p;
3004 *data = 0;
3005 return (DDI_PROP_RESULT_OK);
3006
3007 case DDI_PROP_CMD_ENCODE:
3008 /*
3009 * Check that there is room to encoded the data
3010 */
3011 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3012 return (DDI_PROP_RESULT_ERROR);
3013 }
3014
3015 n = strlen(data) + 1;
3016 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3017 ph->ph_size - n)) {
3018 return (DDI_PROP_RESULT_ERROR);
3019 }
3020
3021 /*
3022 * Copy the NULL terminated string
3023 */
3024 bcopy(data, ph->ph_cur_pos, n);
3025
3026 /*
3027 * Move the current location to the start of the next bit of
3028 * space where we can store encoded data.
3029 */
3030 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3031 return (DDI_PROP_RESULT_OK);
3032
3033 case DDI_PROP_CMD_SKIP:
3034 /*
3035 * Check that there is encoded data
3036 */
3037 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3038 return (DDI_PROP_RESULT_ERROR);
3039 }
3040
3041 /*
3042 * Return the string length plus one for the NULL
3043 * We know the size of the property, we need to
3044 * ensure that the string is properly formatted,
3045 * since we may be looking up random OBP data.
3046 */
3047 p = (char *)ph->ph_cur_pos;
3048 end = (char *)ph->ph_data + ph->ph_size;
3049 if (p >= end)
3050 return (DDI_PROP_RESULT_EOF);
3051
3052 while (p < end) {
3053 if (*p++ == 0) { /* NULL from OBP */
3054 ph->ph_cur_pos = p;
3055 return (DDI_PROP_RESULT_OK);
3056 }
3057 }
3058
3059 /*
3060 * Accommodate the fact that OBP does not always NULL
3061 * terminate strings.
3062 */
3063 ph->ph_cur_pos = p;
3064 return (DDI_PROP_RESULT_OK);
3065
3066 case DDI_PROP_CMD_GET_ESIZE:
3067 /*
3068 * Return the size of the encoded string on OBP.
3069 */
3070 return (strlen(data) + 1);
3071
3072 case DDI_PROP_CMD_GET_DSIZE:
3073 /*
3074 * Return the string length plus one for the NULL.
3075 * We know the size of the property, we need to
3076 * ensure that the string is properly formatted,
3077 * since we may be looking up random OBP data.
3078 */
3079 p = (char *)ph->ph_cur_pos;
3080 end = (char *)ph->ph_data + ph->ph_size;
3081 if (p >= end)
3082 return (DDI_PROP_RESULT_EOF);
3083
3084 for (n = 0; p < end; n++) {
3085 if (*p++ == 0) { /* NULL from OBP */
3086 ph->ph_cur_pos = p;
3087 return (n + 1);
3088 }
3089 }
3090
3091 /*
3092 * If OBP did not NULL terminate string, which happens for
3093 * 'true'/'false' boolean values, account for the space
3094 * to store null termination here.
3095 */
3096 ph->ph_cur_pos = p;
3097 return (n + 1);
3098
3099 default:
3100 #ifdef DEBUG
3101 panic("ddi_prop_1275_string: %x impossible", cmd);
3102 /*NOTREACHED*/
3103 #else
3104 return (DDI_PROP_RESULT_ERROR);
3105 #endif /* DEBUG */
3106 }
3107 }
3108
3109 /*
3110 * OBP 1275 byte operator
3111 *
3112 * Caller must specify the number of bytes to get. OBP encodes bytes
3113 * as a byte so there is a 1-to-1 translation.
3114 */
3115 int
3116 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3117 uint_t nelements)
3118 {
3119 switch (cmd) {
3120 case DDI_PROP_CMD_DECODE:
3121 /*
3122 * Check that there is encoded data
3123 */
3124 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3125 ph->ph_size < nelements ||
3126 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3127 ph->ph_size - nelements)))
3128 return (DDI_PROP_RESULT_ERROR);
3129
3130 /*
3131 * Copy out the bytes
3132 */
3133 bcopy(ph->ph_cur_pos, data, nelements);
3134
3135 /*
3136 * Move the current location
3137 */
3138 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3139 return (DDI_PROP_RESULT_OK);
3140
3141 case DDI_PROP_CMD_ENCODE:
3142 /*
3143 * Check that there is room to encode the data
3144 */
3145 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3146 ph->ph_size < nelements ||
3147 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3148 ph->ph_size - nelements)))
3149 return (DDI_PROP_RESULT_ERROR);
3150
3151 /*
3152 * Copy in the bytes
3153 */
3154 bcopy(data, ph->ph_cur_pos, nelements);
3155
3156 /*
3157 * Move the current location to the start of the next bit of
3158 * space where we can store encoded data.
3159 */
3160 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3161 return (DDI_PROP_RESULT_OK);
3162
3163 case DDI_PROP_CMD_SKIP:
3164 /*
3165 * Check that there is encoded data
3166 */
3167 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3168 ph->ph_size < nelements)
3169 return (DDI_PROP_RESULT_ERROR);
3170
3171 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3172 ph->ph_size - nelements))
3173 return (DDI_PROP_RESULT_EOF);
3174
3175 /*
3176 * Move the current location
3177 */
3178 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3179 return (DDI_PROP_RESULT_OK);
3180
3181 case DDI_PROP_CMD_GET_ESIZE:
3182 /*
3183 * The size in bytes of the encoded size is the
3184 * same as the decoded size provided by the caller.
3185 */
3186 return (nelements);
3187
3188 case DDI_PROP_CMD_GET_DSIZE:
3189 /*
3190 * Just return the number of bytes specified by the caller.
3191 */
3192 return (nelements);
3193
3194 default:
3195 #ifdef DEBUG
3196 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3197 /*NOTREACHED*/
3198 #else
3199 return (DDI_PROP_RESULT_ERROR);
3200 #endif /* DEBUG */
3201 }
3202 }
3203
3204 /*
3205 * Used for properties that come from the OBP, hardware configuration files,
3206 * or that are created by calls to ddi_prop_update(9F).
3207 */
3208 static struct prop_handle_ops prop_1275_ops = {
3209 ddi_prop_1275_int,
3210 ddi_prop_1275_string,
3211 ddi_prop_1275_bytes,
3212 ddi_prop_int64_op
3213 };
3214
3215
3216 /*
3217 * Interface to create/modify a managed property on child's behalf...
3218 * Flags interpreted are:
3219 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3220 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3221 *
3222 * Use same dev_t when modifying or undefining a property.
3223 * Search for properties with DDI_DEV_T_ANY to match first named
3224 * property on the list.
3225 *
3226 * Properties are stored LIFO and subsequently will match the first
3227 * `matching' instance.
3228 */
3229
3230 /*
3231 * ddi_prop_add: Add a software defined property
3232 */
3233
3234 /*
3235 * define to get a new ddi_prop_t.
3236 * km_flags are KM_SLEEP or KM_NOSLEEP.
3237 */
3238
3239 #define DDI_NEW_PROP_T(km_flags) \
3240 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3241
3242 static int
3243 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3244 char *name, caddr_t value, int length)
3245 {
3246 ddi_prop_t *new_propp, *propp;
3247 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3248 int km_flags = KM_NOSLEEP;
3249 int name_buf_len;
3250
3251 /*
3252 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3253 */
3254
3255 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3256 return (DDI_PROP_INVAL_ARG);
3257
3258 if (flags & DDI_PROP_CANSLEEP)
3259 km_flags = KM_SLEEP;
3260
3261 if (flags & DDI_PROP_SYSTEM_DEF)
3262 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3263 else if (flags & DDI_PROP_HW_DEF)
3264 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3265
3266 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) {
3267 cmn_err(CE_CONT, prop_no_mem_msg, name);
3268 return (DDI_PROP_NO_MEMORY);
3269 }
3270
3271 /*
3272 * If dev is major number 0, then we need to do a ddi_name_to_major
3273 * to get the real major number for the device. This needs to be
3274 * done because some drivers need to call ddi_prop_create in their
3275 * attach routines but they don't have a dev. By creating the dev
3276 * ourself if the major number is 0, drivers will not have to know what
3277 * their major number. They can just create a dev with major number
3278 * 0 and pass it in. For device 0, we will be doing a little extra
3279 * work by recreating the same dev that we already have, but its the
3280 * price you pay :-).
3281 *
3282 * This fixes bug #1098060.
3283 */
3284 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3285 new_propp->prop_dev =
3286 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3287 getminor(dev));
3288 } else
3289 new_propp->prop_dev = dev;
3290
3291 /*
3292 * Allocate space for property name and copy it in...
3293 */
3294
3295 name_buf_len = strlen(name) + 1;
3296 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3297 if (new_propp->prop_name == 0) {
3298 kmem_free(new_propp, sizeof (ddi_prop_t));
3299 cmn_err(CE_CONT, prop_no_mem_msg, name);
3300 return (DDI_PROP_NO_MEMORY);
3301 }
3302 bcopy(name, new_propp->prop_name, name_buf_len);
3303
3304 /*
3305 * Set the property type
3306 */
3307 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3308
3309 /*
3310 * Set length and value ONLY if not an explicit property undefine:
3311 * NOTE: value and length are zero for explicit undefines.
3312 */
3313
3314 if (flags & DDI_PROP_UNDEF_IT) {
3315 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3316 } else {
3317 if ((new_propp->prop_len = length) != 0) {
3318 new_propp->prop_val = kmem_alloc(length, km_flags);
3319 if (new_propp->prop_val == 0) {
3320 kmem_free(new_propp->prop_name, name_buf_len);
3321 kmem_free(new_propp, sizeof (ddi_prop_t));
3322 cmn_err(CE_CONT, prop_no_mem_msg, name);
3323 return (DDI_PROP_NO_MEMORY);
3324 }
3325 bcopy(value, new_propp->prop_val, length);
3326 }
3327 }
3328
3329 /*
3330 * Link property into beginning of list. (Properties are LIFO order.)
3331 */
3332
3333 mutex_enter(&(DEVI(dip)->devi_lock));
3334 propp = *list_head;
3335 new_propp->prop_next = propp;
3336 *list_head = new_propp;
3337 mutex_exit(&(DEVI(dip)->devi_lock));
3338 return (DDI_PROP_SUCCESS);
3339 }
3340
3341
3342 /*
3343 * ddi_prop_change: Modify a software managed property value
3344 *
3345 * Set new length and value if found.
3346 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3347 * input name is the NULL string.
3348 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3349 *
3350 * Note: an undef can be modified to be a define,
3351 * (you can't go the other way.)
3352 */
3353
3354 static int
3355 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3356 char *name, caddr_t value, int length)
3357 {
3358 ddi_prop_t *propp;
3359 ddi_prop_t **ppropp;
3360 caddr_t p = NULL;
3361
3362 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3363 return (DDI_PROP_INVAL_ARG);
3364
3365 /*
3366 * Preallocate buffer, even if we don't need it...
3367 */
3368 if (length != 0) {
3369 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3370 KM_SLEEP : KM_NOSLEEP);
3371 if (p == NULL) {
3372 cmn_err(CE_CONT, prop_no_mem_msg, name);
3373 return (DDI_PROP_NO_MEMORY);
3374 }
3375 }
3376
3377 /*
3378 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3379 * number, a real dev_t value should be created based upon the dip's
3380 * binding driver. See ddi_prop_add...
3381 */
3382 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3383 dev = makedevice(
3384 ddi_name_to_major(DEVI(dip)->devi_binding_name),
3385 getminor(dev));
3386
3387 /*
3388 * Check to see if the property exists. If so we modify it.
3389 * Else we create it by calling ddi_prop_add().
3390 */
3391 mutex_enter(&(DEVI(dip)->devi_lock));
3392 ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3393 if (flags & DDI_PROP_SYSTEM_DEF)
3394 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3395 else if (flags & DDI_PROP_HW_DEF)
3396 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3397
3398 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3399 /*
3400 * Need to reallocate buffer? If so, do it
3401 * carefully (reuse same space if new prop
3402 * is same size and non-NULL sized).
3403 */
3404 if (length != 0)
3405 bcopy(value, p, length);
3406
3407 if (propp->prop_len != 0)
3408 kmem_free(propp->prop_val, propp->prop_len);
3409
3410 propp->prop_len = length;
3411 propp->prop_val = p;
3412 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3413 mutex_exit(&(DEVI(dip)->devi_lock));
3414 return (DDI_PROP_SUCCESS);
3415 }
3416
3417 mutex_exit(&(DEVI(dip)->devi_lock));
3418 if (length != 0)
3419 kmem_free(p, length);
3420
3421 return (ddi_prop_add(dev, dip, flags, name, value, length));
3422 }
3423
3424 /*
3425 * Common update routine used to update and encode a property. Creates
3426 * a property handle, calls the property encode routine, figures out if
3427 * the property already exists and updates if it does. Otherwise it
3428 * creates if it does not exist.
3429 */
3430 int
3431 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3432 char *name, void *data, uint_t nelements,
3433 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3434 {
3435 prop_handle_t ph;
3436 int rval;
3437 uint_t ourflags;
3438
3439 /*
3440 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3441 * return error.
3442 */
3443 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3444 return (DDI_PROP_INVAL_ARG);
3445
3446 /*
3447 * Create the handle
3448 */
3449 ph.ph_data = NULL;
3450 ph.ph_cur_pos = NULL;
3451 ph.ph_save_pos = NULL;
3452 ph.ph_size = 0;
3453 ph.ph_ops = &prop_1275_ops;
3454
3455 /*
3456 * ourflags:
3457 * For compatibility with the old interfaces. The old interfaces
3458 * didn't sleep by default and slept when the flag was set. These
3459 * interfaces to the opposite. So the old interfaces now set the
3460 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3461 *
3462 * ph.ph_flags:
3463 * Blocked data or unblocked data allocation
3464 * for ph.ph_data in ddi_prop_encode_alloc()
3465 */
3466 if (flags & DDI_PROP_DONTSLEEP) {
3467 ourflags = flags;
3468 ph.ph_flags = DDI_PROP_DONTSLEEP;
3469 } else {
3470 ourflags = flags | DDI_PROP_CANSLEEP;
3471 ph.ph_flags = DDI_PROP_CANSLEEP;
3472 }
3473
3474 /*
3475 * Encode the data and store it in the property handle by
3476 * calling the prop_encode routine.
3477 */
3478 if ((rval = (*prop_create)(&ph, data, nelements)) !=
3479 DDI_PROP_SUCCESS) {
3480 if (rval == DDI_PROP_NO_MEMORY)
3481 cmn_err(CE_CONT, prop_no_mem_msg, name);
3482 if (ph.ph_size != 0)
3483 kmem_free(ph.ph_data, ph.ph_size);
3484 return (rval);
3485 }
3486
3487 /*
3488 * The old interfaces use a stacking approach to creating
3489 * properties. If we are being called from the old interfaces,
3490 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3491 * create without checking.
3492 */
3493 if (flags & DDI_PROP_STACK_CREATE) {
3494 rval = ddi_prop_add(match_dev, dip,
3495 ourflags, name, ph.ph_data, ph.ph_size);
3496 } else {
3497 rval = ddi_prop_change(match_dev, dip,
3498 ourflags, name, ph.ph_data, ph.ph_size);
3499 }
3500
3501 /*
3502 * Free the encoded data allocated in the prop_encode routine.
3503 */
3504 if (ph.ph_size != 0)
3505 kmem_free(ph.ph_data, ph.ph_size);
3506
3507 return (rval);
3508 }
3509
3510
3511 /*
3512 * ddi_prop_create: Define a managed property:
3513 * See above for details.
3514 */
3515
3516 int
3517 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3518 char *name, caddr_t value, int length)
3519 {
3520 if (!(flag & DDI_PROP_CANSLEEP)) {
3521 flag |= DDI_PROP_DONTSLEEP;
3522 #ifdef DDI_PROP_DEBUG
3523 if (length != 0)
3524 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3525 "use ddi_prop_update (prop = %s, node = %s%d)",
3526 name, ddi_driver_name(dip), ddi_get_instance(dip));
3527 #endif /* DDI_PROP_DEBUG */
3528 }
3529 flag &= ~DDI_PROP_SYSTEM_DEF;
3530 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3531 return (ddi_prop_update_common(dev, dip, flag, name,
3532 value, length, ddi_prop_fm_encode_bytes));
3533 }
3534
3535 int
3536 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3537 char *name, caddr_t value, int length)
3538 {
3539 if (!(flag & DDI_PROP_CANSLEEP))
3540 flag |= DDI_PROP_DONTSLEEP;
3541 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3542 return (ddi_prop_update_common(dev, dip, flag,
3543 name, value, length, ddi_prop_fm_encode_bytes));
3544 }
3545
3546 int
3547 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3548 char *name, caddr_t value, int length)
3549 {
3550 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3551
3552 /*
3553 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3554 * return error.
3555 */
3556 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3557 return (DDI_PROP_INVAL_ARG);
3558
3559 if (!(flag & DDI_PROP_CANSLEEP))
3560 flag |= DDI_PROP_DONTSLEEP;
3561 flag &= ~DDI_PROP_SYSTEM_DEF;
3562 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3563 return (DDI_PROP_NOT_FOUND);
3564
3565 return (ddi_prop_update_common(dev, dip,
3566 (flag | DDI_PROP_TYPE_BYTE), name,
3567 value, length, ddi_prop_fm_encode_bytes));
3568 }
3569
3570 int
3571 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3572 char *name, caddr_t value, int length)
3573 {
3574 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3575
3576 /*
3577 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3578 * return error.
3579 */
3580 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3581 return (DDI_PROP_INVAL_ARG);
3582
3583 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3584 return (DDI_PROP_NOT_FOUND);
3585
3586 if (!(flag & DDI_PROP_CANSLEEP))
3587 flag |= DDI_PROP_DONTSLEEP;
3588 return (ddi_prop_update_common(dev, dip,
3589 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3590 name, value, length, ddi_prop_fm_encode_bytes));
3591 }
3592
3593
3594 /*
3595 * Common lookup routine used to lookup and decode a property.
3596 * Creates a property handle, searches for the raw encoded data,
3597 * fills in the handle, and calls the property decode functions
3598 * passed in.
3599 *
3600 * This routine is not static because ddi_bus_prop_op() which lives in
3601 * ddi_impl.c calls it. No driver should be calling this routine.
3602 */
3603 int
3604 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3605 uint_t flags, char *name, void *data, uint_t *nelements,
3606 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3607 {
3608 int rval;
3609 uint_t ourflags;
3610 prop_handle_t ph;
3611
3612 if ((match_dev == DDI_DEV_T_NONE) ||
3613 (name == NULL) || (strlen(name) == 0))
3614 return (DDI_PROP_INVAL_ARG);
3615
3616 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3617 flags | DDI_PROP_CANSLEEP;
3618
3619 /*
3620 * Get the encoded data
3621 */
3622 bzero(&ph, sizeof (prop_handle_t));
3623
3624 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3625 /*
3626 * For rootnex and unbound dlpi style-2 devices, index into
3627 * the devnames' array and search the global
3628 * property list.
3629 */
3630 ourflags &= ~DDI_UNBND_DLPI2;
3631 rval = i_ddi_prop_search_global(match_dev,
3632 ourflags, name, &ph.ph_data, &ph.ph_size);
3633 } else {
3634 rval = ddi_prop_search_common(match_dev, dip,
3635 PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3636 &ph.ph_data, &ph.ph_size);
3637
3638 }
3639
3640 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3641 ASSERT(ph.ph_data == NULL);
3642 ASSERT(ph.ph_size == 0);
3643 return (rval);
3644 }
3645
3646 /*
3647 * If the encoded data came from a OBP or software
3648 * use the 1275 OBP decode/encode routines.
3649 */
3650 ph.ph_cur_pos = ph.ph_data;
3651 ph.ph_save_pos = ph.ph_data;
3652 ph.ph_ops = &prop_1275_ops;
3653 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3654
3655 rval = (*prop_decoder)(&ph, data, nelements);
3656
3657 /*
3658 * Free the encoded data
3659 */
3660 if (ph.ph_size != 0)
3661 kmem_free(ph.ph_data, ph.ph_size);
3662
3663 return (rval);
3664 }
3665
3666 /*
3667 * Lookup and return an array of composite properties. The driver must
3668 * provide the decode routine.
3669 */
3670 int
3671 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3672 uint_t flags, char *name, void *data, uint_t *nelements,
3673 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3674 {
3675 return (ddi_prop_lookup_common(match_dev, dip,
3676 (flags | DDI_PROP_TYPE_COMPOSITE), name,
3677 data, nelements, prop_decoder));
3678 }
3679
3680 /*
3681 * Return 1 if a property exists (no type checking done).
3682 * Return 0 if it does not exist.
3683 */
3684 int
3685 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3686 {
3687 int i;
3688 uint_t x = 0;
3689
3690 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3691 flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3692 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3693 }
3694
3695
3696 /*
3697 * Update an array of composite properties. The driver must
3698 * provide the encode routine.
3699 */
3700 int
3701 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3702 char *name, void *data, uint_t nelements,
3703 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3704 {
3705 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3706 name, data, nelements, prop_create));
3707 }
3708
3709 /*
3710 * Get a single integer or boolean property and return it.
3711 * If the property does not exists, or cannot be decoded,
3712 * then return the defvalue passed in.
3713 *
3714 * This routine always succeeds.
3715 */
3716 int
3717 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3718 char *name, int defvalue)
3719 {
3720 int data;
3721 uint_t nelements;
3722 int rval;
3723
3724 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3725 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3726 #ifdef DEBUG
3727 if (dip != NULL) {
3728 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3729 " 0x%x (prop = %s, node = %s%d)", flags,
3730 name, ddi_driver_name(dip), ddi_get_instance(dip));
3731 }
3732 #endif /* DEBUG */
3733 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3734 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3735 }
3736
3737 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3738 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3739 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3740 if (rval == DDI_PROP_END_OF_DATA)
3741 data = 1;
3742 else
3743 data = defvalue;
3744 }
3745 return (data);
3746 }
3747
3748 /*
3749 * Get a single 64 bit integer or boolean property and return it.
3750 * If the property does not exists, or cannot be decoded,
3751 * then return the defvalue passed in.
3752 *
3753 * This routine always succeeds.
3754 */
3755 int64_t
3756 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3757 char *name, int64_t defvalue)
3758 {
3759 int64_t data;
3760 uint_t nelements;
3761 int rval;
3762
3763 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3764 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3765 #ifdef DEBUG
3766 if (dip != NULL) {
3767 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3768 " 0x%x (prop = %s, node = %s%d)", flags,
3769 name, ddi_driver_name(dip), ddi_get_instance(dip));
3770 }
3771 #endif /* DEBUG */
3772 return (DDI_PROP_INVAL_ARG);
3773 }
3774
3775 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3776 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3777 name, &data, &nelements, ddi_prop_fm_decode_int64))
3778 != DDI_PROP_SUCCESS) {
3779 if (rval == DDI_PROP_END_OF_DATA)
3780 data = 1;
3781 else
3782 data = defvalue;
3783 }
3784 return (data);
3785 }
3786
3787 /*
3788 * Get an array of integer property
3789 */
3790 int
3791 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3792 char *name, int **data, uint_t *nelements)
3793 {
3794 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3795 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3796 #ifdef DEBUG
3797 if (dip != NULL) {
3798 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3799 "invalid flag 0x%x (prop = %s, node = %s%d)",
3800 flags, name, ddi_driver_name(dip),
3801 ddi_get_instance(dip));
3802 }
3803 #endif /* DEBUG */
3804 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3805 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3806 }
3807
3808 return (ddi_prop_lookup_common(match_dev, dip,
3809 (flags | DDI_PROP_TYPE_INT), name, data,
3810 nelements, ddi_prop_fm_decode_ints));
3811 }
3812
3813 /*
3814 * Get an array of 64 bit integer properties
3815 */
3816 int
3817 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3818 char *name, int64_t **data, uint_t *nelements)
3819 {
3820 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3821 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3822 #ifdef DEBUG
3823 if (dip != NULL) {
3824 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3825 "invalid flag 0x%x (prop = %s, node = %s%d)",
3826 flags, name, ddi_driver_name(dip),
3827 ddi_get_instance(dip));
3828 }
3829 #endif /* DEBUG */
3830 return (DDI_PROP_INVAL_ARG);
3831 }
3832
3833 return (ddi_prop_lookup_common(match_dev, dip,
3834 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3835 name, data, nelements, ddi_prop_fm_decode_int64_array));
3836 }
3837
3838 /*
3839 * Update a single integer property. If the property exists on the drivers
3840 * property list it updates, else it creates it.
3841 */
3842 int
3843 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3844 char *name, int data)
3845 {
3846 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3847 name, &data, 1, ddi_prop_fm_encode_ints));
3848 }
3849
3850 /*
3851 * Update a single 64 bit integer property.
3852 * Update the driver property list if it exists, else create it.
3853 */
3854 int
3855 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3856 char *name, int64_t data)
3857 {
3858 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3859 name, &data, 1, ddi_prop_fm_encode_int64));
3860 }
3861
3862 int
3863 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3864 char *name, int data)
3865 {
3866 return (ddi_prop_update_common(match_dev, dip,
3867 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3868 name, &data, 1, ddi_prop_fm_encode_ints));
3869 }
3870
3871 int
3872 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3873 char *name, int64_t data)
3874 {
3875 return (ddi_prop_update_common(match_dev, dip,
3876 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3877 name, &data, 1, ddi_prop_fm_encode_int64));
3878 }
3879
3880 /*
3881 * Update an array of integer property. If the property exists on the drivers
3882 * property list it updates, else it creates it.
3883 */
3884 int
3885 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3886 char *name, int *data, uint_t nelements)
3887 {
3888 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3889 name, data, nelements, ddi_prop_fm_encode_ints));
3890 }
3891
3892 /*
3893 * Update an array of 64 bit integer properties.
3894 * Update the driver property list if it exists, else create it.
3895 */
3896 int
3897 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3898 char *name, int64_t *data, uint_t nelements)
3899 {
3900 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3901 name, data, nelements, ddi_prop_fm_encode_int64));
3902 }
3903
3904 int
3905 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3906 char *name, int64_t *data, uint_t nelements)
3907 {
3908 return (ddi_prop_update_common(match_dev, dip,
3909 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3910 name, data, nelements, ddi_prop_fm_encode_int64));
3911 }
3912
3913 int
3914 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3915 char *name, int *data, uint_t nelements)
3916 {
3917 return (ddi_prop_update_common(match_dev, dip,
3918 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3919 name, data, nelements, ddi_prop_fm_encode_ints));
3920 }
3921
3922 /*
3923 * Get a single string property.
3924 */
3925 int
3926 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3927 char *name, char **data)
3928 {
3929 uint_t x;
3930
3931 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3932 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3933 #ifdef DEBUG
3934 if (dip != NULL) {
3935 cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3936 "(prop = %s, node = %s%d); invalid bits ignored",
3937 "ddi_prop_lookup_string", flags, name,
3938 ddi_driver_name(dip), ddi_get_instance(dip));
3939 }
3940 #endif /* DEBUG */
3941 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3942 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3943 }
3944
3945 return (ddi_prop_lookup_common(match_dev, dip,
3946 (flags | DDI_PROP_TYPE_STRING), name, data,
3947 &x, ddi_prop_fm_decode_string));
3948 }
3949
3950 /*
3951 * Get an array of strings property.
3952 */
3953 int
3954 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3955 char *name, char ***data, uint_t *nelements)
3956 {
3957 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3958 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3959 #ifdef DEBUG
3960 if (dip != NULL) {
3961 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3962 "invalid flag 0x%x (prop = %s, node = %s%d)",
3963 flags, name, ddi_driver_name(dip),
3964 ddi_get_instance(dip));
3965 }
3966 #endif /* DEBUG */
3967 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3968 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3969 }
3970
3971 return (ddi_prop_lookup_common(match_dev, dip,
3972 (flags | DDI_PROP_TYPE_STRING), name, data,
3973 nelements, ddi_prop_fm_decode_strings));
3974 }
3975
3976 /*
3977 * Update a single string property.
3978 */
3979 int
3980 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3981 char *name, char *data)
3982 {
3983 return (ddi_prop_update_common(match_dev, dip,
3984 DDI_PROP_TYPE_STRING, name, &data, 1,
3985 ddi_prop_fm_encode_string));
3986 }
3987
3988 int
3989 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3990 char *name, char *data)
3991 {
3992 return (ddi_prop_update_common(match_dev, dip,
3993 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3994 name, &data, 1, ddi_prop_fm_encode_string));
3995 }
3996
3997
3998 /*
3999 * Update an array of strings property.
4000 */
4001 int
4002 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4003 char *name, char **data, uint_t nelements)
4004 {
4005 return (ddi_prop_update_common(match_dev, dip,
4006 DDI_PROP_TYPE_STRING, name, data, nelements,
4007 ddi_prop_fm_encode_strings));
4008 }
4009
4010 int
4011 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4012 char *name, char **data, uint_t nelements)
4013 {
4014 return (ddi_prop_update_common(match_dev, dip,
4015 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4016 name, data, nelements,
4017 ddi_prop_fm_encode_strings));
4018 }
4019
4020
4021 /*
4022 * Get an array of bytes property.
4023 */
4024 int
4025 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4026 char *name, uchar_t **data, uint_t *nelements)
4027 {
4028 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4029 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4030 #ifdef DEBUG
4031 if (dip != NULL) {
4032 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4033 " invalid flag 0x%x (prop = %s, node = %s%d)",
4034 flags, name, ddi_driver_name(dip),
4035 ddi_get_instance(dip));
4036 }
4037 #endif /* DEBUG */
4038 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4039 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4040 }
4041
4042 return (ddi_prop_lookup_common(match_dev, dip,
4043 (flags | DDI_PROP_TYPE_BYTE), name, data,
4044 nelements, ddi_prop_fm_decode_bytes));
4045 }
4046
4047 /*
4048 * Update an array of bytes property.
4049 */
4050 int
4051 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4052 char *name, uchar_t *data, uint_t nelements)
4053 {
4054 if (nelements == 0)
4055 return (DDI_PROP_INVAL_ARG);
4056
4057 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4058 name, data, nelements, ddi_prop_fm_encode_bytes));
4059 }
4060
4061
4062 int
4063 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4064 char *name, uchar_t *data, uint_t nelements)
4065 {
4066 if (nelements == 0)
4067 return (DDI_PROP_INVAL_ARG);
4068
4069 return (ddi_prop_update_common(match_dev, dip,
4070 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4071 name, data, nelements, ddi_prop_fm_encode_bytes));
4072 }
4073
4074
4075 /*
4076 * ddi_prop_remove_common: Undefine a managed property:
4077 * Input dev_t must match dev_t when defined.
4078 * Returns DDI_PROP_NOT_FOUND, possibly.
4079 * DDI_PROP_INVAL_ARG is also possible if dev is
4080 * DDI_DEV_T_ANY or incoming name is the NULL string.
4081 */
4082 int
4083 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4084 {
4085 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4086 ddi_prop_t *propp;
4087 ddi_prop_t *lastpropp = NULL;
4088
4089 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4090 (strlen(name) == 0)) {
4091 return (DDI_PROP_INVAL_ARG);
4092 }
4093
4094 if (flag & DDI_PROP_SYSTEM_DEF)
4095 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4096 else if (flag & DDI_PROP_HW_DEF)
4097 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4098
4099 mutex_enter(&(DEVI(dip)->devi_lock));
4100
4101 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
4102 if (DDI_STRSAME(propp->prop_name, name) &&
4103 (dev == propp->prop_dev)) {
4104 /*
4105 * Unlink this propp allowing for it to
4106 * be first in the list:
4107 */
4108
4109 if (lastpropp == NULL)
4110 *list_head = propp->prop_next;
4111 else
4112 lastpropp->prop_next = propp->prop_next;
4113
4114 mutex_exit(&(DEVI(dip)->devi_lock));
4115
4116 /*
4117 * Free memory and return...
4118 */
4119 kmem_free(propp->prop_name,
4120 strlen(propp->prop_name) + 1);
4121 if (propp->prop_len != 0)
4122 kmem_free(propp->prop_val, propp->prop_len);
4123 kmem_free(propp, sizeof (ddi_prop_t));
4124 return (DDI_PROP_SUCCESS);
4125 }
4126 lastpropp = propp;
4127 }
4128 mutex_exit(&(DEVI(dip)->devi_lock));
4129 return (DDI_PROP_NOT_FOUND);
4130 }
4131
4132 int
4133 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4134 {
4135 return (ddi_prop_remove_common(dev, dip, name, 0));
4136 }
4137
4138 int
4139 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4140 {
4141 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4142 }
4143
4144 /*
4145 * e_ddi_prop_list_delete: remove a list of properties
4146 * Note that the caller needs to provide the required protection
4147 * (eg. devi_lock if these properties are still attached to a devi)
4148 */
4149 void
4150 e_ddi_prop_list_delete(ddi_prop_t *props)
4151 {
4152 i_ddi_prop_list_delete(props);
4153 }
4154
4155 /*
4156 * ddi_prop_remove_all_common:
4157 * Used before unloading a driver to remove
4158 * all properties. (undefines all dev_t's props.)
4159 * Also removes `explicitly undefined' props.
4160 * No errors possible.
4161 */
4162 void
4163 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4164 {
4165 ddi_prop_t **list_head;
4166
4167 mutex_enter(&(DEVI(dip)->devi_lock));
4168 if (flag & DDI_PROP_SYSTEM_DEF) {
4169 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4170 } else if (flag & DDI_PROP_HW_DEF) {
4171 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4172 } else {
4173 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4174 }
4175 i_ddi_prop_list_delete(*list_head);
4176 *list_head = NULL;
4177 mutex_exit(&(DEVI(dip)->devi_lock));
4178 }
4179
4180
4181 /*
4182 * ddi_prop_remove_all: Remove all driver prop definitions.
4183 */
4184
4185 void
4186 ddi_prop_remove_all(dev_info_t *dip)
4187 {
4188 i_ddi_prop_dyn_driver_set(dip, NULL);
4189 ddi_prop_remove_all_common(dip, 0);
4190 }
4191
4192 /*
4193 * e_ddi_prop_remove_all: Remove all system prop definitions.
4194 */
4195
4196 void
4197 e_ddi_prop_remove_all(dev_info_t *dip)
4198 {
4199 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4200 }
4201
4202
4203 /*
4204 * ddi_prop_undefine: Explicitly undefine a property. Property
4205 * searches which match this property return
4206 * the error code DDI_PROP_UNDEFINED.
4207 *
4208 * Use ddi_prop_remove to negate effect of
4209 * ddi_prop_undefine
4210 *
4211 * See above for error returns.
4212 */
4213
4214 int
4215 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4216 {
4217 if (!(flag & DDI_PROP_CANSLEEP))
4218 flag |= DDI_PROP_DONTSLEEP;
4219 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4220 return (ddi_prop_update_common(dev, dip, flag,
4221 name, NULL, 0, ddi_prop_fm_encode_bytes));
4222 }
4223
4224 int
4225 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4226 {
4227 if (!(flag & DDI_PROP_CANSLEEP))
4228 flag |= DDI_PROP_DONTSLEEP;
4229 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4230 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4231 return (ddi_prop_update_common(dev, dip, flag,
4232 name, NULL, 0, ddi_prop_fm_encode_bytes));
4233 }
4234
4235 /*
4236 * Support for gathering dynamic properties in devinfo snapshot.
4237 */
4238 void
4239 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4240 {
4241 DEVI(dip)->devi_prop_dyn_driver = dp;
4242 }
4243
4244 i_ddi_prop_dyn_t *
4245 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4246 {
4247 return (DEVI(dip)->devi_prop_dyn_driver);
4248 }
4249
4250 void
4251 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4252 {
4253 DEVI(dip)->devi_prop_dyn_parent = dp;
4254 }
4255
4256 i_ddi_prop_dyn_t *
4257 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4258 {
4259 return (DEVI(dip)->devi_prop_dyn_parent);
4260 }
4261
4262 void
4263 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4264 {
4265 /* for now we invalidate the entire cached snapshot */
4266 if (dip && dp)
4267 i_ddi_di_cache_invalidate();
4268 }
4269
4270 /* ARGSUSED */
4271 void
4272 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4273 {
4274 /* for now we invalidate the entire cached snapshot */
4275 i_ddi_di_cache_invalidate();
4276 }
4277
4278
4279 /*
4280 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4281 *
4282 * if input dip != child_dip, then call is on behalf of child
4283 * to search PROM, do it via ddi_prop_search_common() and ascend only
4284 * if allowed.
4285 *
4286 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4287 * to search for PROM defined props only.
4288 *
4289 * Note that the PROM search is done only if the requested dev
4290 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4291 * have no associated dev, thus are automatically associated with
4292 * DDI_DEV_T_NONE.
4293 *
4294 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4295 *
4296 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4297 * that the property resides in the prom.
4298 */
4299 int
4300 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4301 ddi_prop_op_t prop_op, int mod_flags,
4302 char *name, caddr_t valuep, int *lengthp)
4303 {
4304 int len;
4305 caddr_t buffer;
4306
4307 /*
4308 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4309 * look in caller's PROM if it's a self identifying device...
4310 *
4311 * Note that this is very similar to ddi_prop_op, but we
4312 * search the PROM instead of the s/w defined properties,
4313 * and we are called on by the parent driver to do this for
4314 * the child.
4315 */
4316
4317 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4318 ndi_dev_is_prom_node(ch_dip) &&
4319 ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4320 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4321 if (len == -1) {
4322 return (DDI_PROP_NOT_FOUND);
4323 }
4324
4325 /*
4326 * If exists only request, we're done
4327 */
4328 if (prop_op == PROP_EXISTS) {
4329 return (DDI_PROP_FOUND_1275);
4330 }
4331
4332 /*
4333 * If length only request or prop length == 0, get out
4334 */
4335 if ((prop_op == PROP_LEN) || (len == 0)) {
4336 *lengthp = len;
4337 return (DDI_PROP_FOUND_1275);
4338 }
4339
4340 /*
4341 * Allocate buffer if required... (either way `buffer'
4342 * is receiving address).
4343 */
4344
4345 switch (prop_op) {
4346
4347 case PROP_LEN_AND_VAL_ALLOC:
4348
4349 buffer = kmem_alloc((size_t)len,
4350 mod_flags & DDI_PROP_CANSLEEP ?
4351 KM_SLEEP : KM_NOSLEEP);
4352 if (buffer == NULL) {
4353 return (DDI_PROP_NO_MEMORY);
4354 }
4355 *(caddr_t *)valuep = buffer;
4356 break;
4357
4358 case PROP_LEN_AND_VAL_BUF:
4359
4360 if (len > (*lengthp)) {
4361 *lengthp = len;
4362 return (DDI_PROP_BUF_TOO_SMALL);
4363 }
4364
4365 buffer = valuep;
4366 break;
4367
4368 default:
4369 break;
4370 }
4371
4372 /*
4373 * Call the PROM function to do the copy.
4374 */
4375 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4376 name, buffer);
4377
4378 *lengthp = len; /* return the actual length to the caller */
4379 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4380 return (DDI_PROP_FOUND_1275);
4381 }
4382
4383 return (DDI_PROP_NOT_FOUND);
4384 }
4385
4386 /*
4387 * The ddi_bus_prop_op default bus nexus prop op function.
4388 *
4389 * Code to search hardware layer (PROM), if it exists,
4390 * on behalf of child, then, if appropriate, ascend and check
4391 * my own software defined properties...
4392 */
4393 int
4394 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4395 ddi_prop_op_t prop_op, int mod_flags,
4396 char *name, caddr_t valuep, int *lengthp)
4397 {
4398 int error;
4399
4400 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4401 name, valuep, lengthp);
4402
4403 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4404 error == DDI_PROP_BUF_TOO_SMALL)
4405 return (error);
4406
4407 if (error == DDI_PROP_NO_MEMORY) {
4408 cmn_err(CE_CONT, prop_no_mem_msg, name);
4409 return (DDI_PROP_NO_MEMORY);
4410 }
4411
4412 /*
4413 * Check the 'options' node as a last resort
4414 */
4415 if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4416 return (DDI_PROP_NOT_FOUND);
4417
4418 if (ch_dip == ddi_root_node()) {
4419 /*
4420 * As a last resort, when we've reached
4421 * the top and still haven't found the
4422 * property, see if the desired property
4423 * is attached to the options node.
4424 *
4425 * The options dip is attached right after boot.
4426 */
4427 ASSERT(options_dip != NULL);
4428 /*
4429 * Force the "don't pass" flag to *just* see
4430 * what the options node has to offer.
4431 */
4432 return (ddi_prop_search_common(dev, options_dip, prop_op,
4433 mod_flags|DDI_PROP_DONTPASS, name, valuep,
4434 (uint_t *)lengthp));
4435 }
4436
4437 /*
4438 * Otherwise, continue search with parent's s/w defined properties...
4439 * NOTE: Using `dip' in following call increments the level.
4440 */
4441
4442 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4443 name, valuep, (uint_t *)lengthp));
4444 }
4445
4446 /*
4447 * External property functions used by other parts of the kernel...
4448 */
4449
4450 /*
4451 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4452 */
4453
4454 int
4455 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4456 caddr_t valuep, int *lengthp)
4457 {
4458 _NOTE(ARGUNUSED(type))
4459 dev_info_t *devi;
4460 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4461 int error;
4462
4463 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4464 return (DDI_PROP_NOT_FOUND);
4465
4466 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4467 ddi_release_devi(devi);
4468 return (error);
4469 }
4470
4471 /*
4472 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4473 */
4474
4475 int
4476 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4477 caddr_t valuep, int *lengthp)
4478 {
4479 _NOTE(ARGUNUSED(type))
4480 dev_info_t *devi;
4481 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4482 int error;
4483
4484 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4485 return (DDI_PROP_NOT_FOUND);
4486
4487 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4488 ddi_release_devi(devi);
4489 return (error);
4490 }
4491
4492 /*
4493 * e_ddi_getprop: See comments for ddi_getprop.
4494 */
4495 int
4496 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4497 {
4498 _NOTE(ARGUNUSED(type))
4499 dev_info_t *devi;
4500 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4501 int propvalue = defvalue;
4502 int proplength = sizeof (int);
4503 int error;
4504
4505 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4506 return (defvalue);
4507
4508 error = cdev_prop_op(dev, devi, prop_op,
4509 flags, name, (caddr_t)&propvalue, &proplength);
4510 ddi_release_devi(devi);
4511
4512 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4513 propvalue = 1;
4514
4515 return (propvalue);
4516 }
4517
4518 /*
4519 * e_ddi_getprop_int64:
4520 *
4521 * This is a typed interfaces, but predates typed properties. With the
4522 * introduction of typed properties the framework tries to ensure
4523 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4524 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4525 * typed interface invokes legacy (non-typed) interfaces:
4526 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4527 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4528 * this type of lookup as a single operation we invoke the legacy
4529 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4530 * framework ddi_prop_op(9F) implementation is expected to check for
4531 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4532 * (currently TYPE_INT64).
4533 */
4534 int64_t
4535 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4536 int flags, int64_t defvalue)
4537 {
4538 _NOTE(ARGUNUSED(type))
4539 dev_info_t *devi;
4540 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4541 int64_t propvalue = defvalue;
4542 int proplength = sizeof (propvalue);
4543 int error;
4544
4545 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4546 return (defvalue);
4547
4548 error = cdev_prop_op(dev, devi, prop_op, flags |
4549 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4550 ddi_release_devi(devi);
4551
4552 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4553 propvalue = 1;
4554
4555 return (propvalue);
4556 }
4557
4558 /*
4559 * e_ddi_getproplen: See comments for ddi_getproplen.
4560 */
4561 int
4562 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4563 {
4564 _NOTE(ARGUNUSED(type))
4565 dev_info_t *devi;
4566 ddi_prop_op_t prop_op = PROP_LEN;
4567 int error;
4568
4569 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4570 return (DDI_PROP_NOT_FOUND);
4571
4572 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4573 ddi_release_devi(devi);
4574 return (error);
4575 }
4576
4577 /*
4578 * Routines to get at elements of the dev_info structure
4579 */
4580
4581 /*
4582 * ddi_binding_name: Return the driver binding name of the devinfo node
4583 * This is the name the OS used to bind the node to a driver.
4584 */
4585 char *
4586 ddi_binding_name(dev_info_t *dip)
4587 {
4588 return (DEVI(dip)->devi_binding_name);
4589 }
4590
4591 /*
4592 * ddi_driver_major: Return the major number of the driver that
4593 * the supplied devinfo is bound to. If not yet bound,
4594 * DDI_MAJOR_T_NONE.
4595 *
4596 * When used by the driver bound to 'devi', this
4597 * function will reliably return the driver major number.
4598 * Other ways of determining the driver major number, such as
4599 * major = ddi_name_to_major(ddi_get_name(devi));
4600 * major = ddi_name_to_major(ddi_binding_name(devi));
4601 * can return a different result as the driver/alias binding
4602 * can change dynamically, and thus should be avoided.
4603 */
4604 major_t
4605 ddi_driver_major(dev_info_t *devi)
4606 {
4607 return (DEVI(devi)->devi_major);
4608 }
4609
4610 /*
4611 * ddi_driver_name: Return the normalized driver name. this is the
4612 * actual driver name
4613 */
4614 const char *
4615 ddi_driver_name(dev_info_t *devi)
4616 {
4617 major_t major;
4618
4619 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4620 return (ddi_major_to_name(major));
4621
4622 return (ddi_node_name(devi));
4623 }
4624
4625 /*
4626 * i_ddi_set_binding_name: Set binding name.
4627 *
4628 * Set the binding name to the given name.
4629 * This routine is for use by the ddi implementation, not by drivers.
4630 */
4631 void
4632 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4633 {
4634 DEVI(dip)->devi_binding_name = name;
4635
4636 }
4637
4638 /*
4639 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4640 * the implementation has used to bind the node to a driver.
4641 */
4642 char *
4643 ddi_get_name(dev_info_t *dip)
4644 {
4645 return (DEVI(dip)->devi_binding_name);
4646 }
4647
4648 /*
4649 * ddi_node_name: Return the name property of the devinfo node
4650 * This may differ from ddi_binding_name if the node name
4651 * does not define a binding to a driver (i.e. generic names).
4652 */
4653 char *
4654 ddi_node_name(dev_info_t *dip)
4655 {
4656 return (DEVI(dip)->devi_node_name);
4657 }
4658
4659
4660 /*
4661 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4662 */
4663 int
4664 ddi_get_nodeid(dev_info_t *dip)
4665 {
4666 return (DEVI(dip)->devi_nodeid);
4667 }
4668
4669 int
4670 ddi_get_instance(dev_info_t *dip)
4671 {
4672 return (DEVI(dip)->devi_instance);
4673 }
4674
4675 struct dev_ops *
4676 ddi_get_driver(dev_info_t *dip)
4677 {
4678 return (DEVI(dip)->devi_ops);
4679 }
4680
4681 void
4682 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4683 {
4684 DEVI(dip)->devi_ops = devo;
4685 }
4686
4687 /*
4688 * ddi_set_driver_private/ddi_get_driver_private:
4689 * Get/set device driver private data in devinfo.
4690 */
4691 void
4692 ddi_set_driver_private(dev_info_t *dip, void *data)
4693 {
4694 DEVI(dip)->devi_driver_data = data;
4695 }
4696
4697 void *
4698 ddi_get_driver_private(dev_info_t *dip)
4699 {
4700 return (DEVI(dip)->devi_driver_data);
4701 }
4702
4703 /*
4704 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4705 */
4706
4707 dev_info_t *
4708 ddi_get_parent(dev_info_t *dip)
4709 {
4710 return ((dev_info_t *)DEVI(dip)->devi_parent);
4711 }
4712
4713 dev_info_t *
4714 ddi_get_child(dev_info_t *dip)
4715 {
4716 return ((dev_info_t *)DEVI(dip)->devi_child);
4717 }
4718
4719 dev_info_t *
4720 ddi_get_next_sibling(dev_info_t *dip)
4721 {
4722 return ((dev_info_t *)DEVI(dip)->devi_sibling);
4723 }
4724
4725 dev_info_t *
4726 ddi_get_next(dev_info_t *dip)
4727 {
4728 return ((dev_info_t *)DEVI(dip)->devi_next);
4729 }
4730
4731 void
4732 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4733 {
4734 DEVI(dip)->devi_next = DEVI(nextdip);
4735 }
4736
4737 /*
4738 * ddi_root_node: Return root node of devinfo tree
4739 */
4740
4741 dev_info_t *
4742 ddi_root_node(void)
4743 {
4744 extern dev_info_t *top_devinfo;
4745
4746 return (top_devinfo);
4747 }
4748
4749 /*
4750 * Miscellaneous functions:
4751 */
4752
4753 /*
4754 * Implementation specific hooks
4755 */
4756
4757 void
4758 ddi_report_dev(dev_info_t *d)
4759 {
4760 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4761 }
4762
4763 /*
4764 * ddi_ctlops() is described in the assembler not to buy a new register
4765 * window when it's called and can reduce cost in climbing the device tree
4766 * without using the tail call optimization.
4767 */
4768 int
4769 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4770 {
4771 int ret;
4772
4773 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4774 (void *)&rnumber, (void *)result);
4775
4776 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4777 }
4778
4779 int
4780 ddi_dev_nregs(dev_info_t *dev, int *result)
4781 {
4782 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4783 }
4784
4785 int
4786 ddi_dev_is_sid(dev_info_t *d)
4787 {
4788 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4789 }
4790
4791 int
4792 ddi_slaveonly(dev_info_t *d)
4793 {
4794 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4795 }
4796
4797 int
4798 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4799 {
4800 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4801 }
4802
4803 int
4804 ddi_streams_driver(dev_info_t *dip)
4805 {
4806 if (i_ddi_devi_attached(dip) &&
4807 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4808 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4809 return (DDI_SUCCESS);
4810 return (DDI_FAILURE);
4811 }
4812
4813 /*
4814 * callback free list
4815 */
4816
4817 static int ncallbacks;
4818 static int nc_low = 170;
4819 static int nc_med = 512;
4820 static int nc_high = 2048;
4821 static struct ddi_callback *callbackq;
4822 static struct ddi_callback *callbackqfree;
4823
4824 /*
4825 * set/run callback lists
4826 */
4827 struct cbstats {
4828 kstat_named_t cb_asked;
4829 kstat_named_t cb_new;
4830 kstat_named_t cb_run;
4831 kstat_named_t cb_delete;
4832 kstat_named_t cb_maxreq;
4833 kstat_named_t cb_maxlist;
4834 kstat_named_t cb_alloc;
4835 kstat_named_t cb_runouts;
4836 kstat_named_t cb_L2;
4837 kstat_named_t cb_grow;
4838 } cbstats = {
4839 {"asked", KSTAT_DATA_UINT32},
4840 {"new", KSTAT_DATA_UINT32},
4841 {"run", KSTAT_DATA_UINT32},
4842 {"delete", KSTAT_DATA_UINT32},
4843 {"maxreq", KSTAT_DATA_UINT32},
4844 {"maxlist", KSTAT_DATA_UINT32},
4845 {"alloc", KSTAT_DATA_UINT32},
4846 {"runouts", KSTAT_DATA_UINT32},
4847 {"L2", KSTAT_DATA_UINT32},
4848 {"grow", KSTAT_DATA_UINT32},
4849 };
4850
4851 #define nc_asked cb_asked.value.ui32
4852 #define nc_new cb_new.value.ui32
4853 #define nc_run cb_run.value.ui32
4854 #define nc_delete cb_delete.value.ui32
4855 #define nc_maxreq cb_maxreq.value.ui32
4856 #define nc_maxlist cb_maxlist.value.ui32
4857 #define nc_alloc cb_alloc.value.ui32
4858 #define nc_runouts cb_runouts.value.ui32
4859 #define nc_L2 cb_L2.value.ui32
4860 #define nc_grow cb_grow.value.ui32
4861
4862 static kmutex_t ddi_callback_mutex;
4863
4864 /*
4865 * callbacks are handled using a L1/L2 cache. The L1 cache
4866 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4867 * we can't get callbacks from the L1 cache [because pageout is doing
4868 * I/O at the time freemem is 0], we allocate callbacks out of the
4869 * L2 cache. The L2 cache is static and depends on the memory size.
4870 * [We might also count the number of devices at probe time and
4871 * allocate one structure per device and adjust for deferred attach]
4872 */
4873 void
4874 impl_ddi_callback_init(void)
4875 {
4876 int i;
4877 uint_t physmegs;
4878 kstat_t *ksp;
4879
4880 physmegs = physmem >> (20 - PAGESHIFT);
4881 if (physmegs < 48) {
4882 ncallbacks = nc_low;
4883 } else if (physmegs < 128) {
4884 ncallbacks = nc_med;
4885 } else {
4886 ncallbacks = nc_high;
4887 }
4888
4889 /*
4890 * init free list
4891 */
4892 callbackq = kmem_zalloc(
4893 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4894 for (i = 0; i < ncallbacks-1; i++)
4895 callbackq[i].c_nfree = &callbackq[i+1];
4896 callbackqfree = callbackq;
4897
4898 /* init kstats */
4899 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4900 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4901 ksp->ks_data = (void *) &cbstats;
4902 kstat_install(ksp);
4903 }
4904
4905 }
4906
4907 static void
4908 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4909 int count)
4910 {
4911 struct ddi_callback *list, *marker, *new;
4912 size_t size = sizeof (struct ddi_callback);
4913
4914 list = marker = (struct ddi_callback *)*listid;
4915 while (list != NULL) {
4916 if (list->c_call == funcp && list->c_arg == arg) {
4917 list->c_count += count;
4918 return;
4919 }
4920 marker = list;
4921 list = list->c_nlist;
4922 }
4923 new = kmem_alloc(size, KM_NOSLEEP);
4924 if (new == NULL) {
4925 new = callbackqfree;
4926 if (new == NULL) {
4927 new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4928 &size, KM_NOSLEEP | KM_PANIC);
4929 cbstats.nc_grow++;
4930 } else {
4931 callbackqfree = new->c_nfree;
4932 cbstats.nc_L2++;
4933 }
4934 }
4935 if (marker != NULL) {
4936 marker->c_nlist = new;
4937 } else {
4938 *listid = (uintptr_t)new;
4939 }
4940 new->c_size = size;
4941 new->c_nlist = NULL;
4942 new->c_call = funcp;
4943 new->c_arg = arg;
4944 new->c_count = count;
4945 cbstats.nc_new++;
4946 cbstats.nc_alloc++;
4947 if (cbstats.nc_alloc > cbstats.nc_maxlist)
4948 cbstats.nc_maxlist = cbstats.nc_alloc;
4949 }
4950
4951 void
4952 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4953 {
4954 mutex_enter(&ddi_callback_mutex);
4955 cbstats.nc_asked++;
4956 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4957 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4958 (void) callback_insert(funcp, arg, listid, 1);
4959 mutex_exit(&ddi_callback_mutex);
4960 }
4961
4962 static void
4963 real_callback_run(void *Queue)
4964 {
4965 int (*funcp)(caddr_t);
4966 caddr_t arg;
4967 int count, rval;
4968 uintptr_t *listid;
4969 struct ddi_callback *list, *marker;
4970 int check_pending = 1;
4971 int pending = 0;
4972
4973 do {
4974 mutex_enter(&ddi_callback_mutex);
4975 listid = Queue;
4976 list = (struct ddi_callback *)*listid;
4977 if (list == NULL) {
4978 mutex_exit(&ddi_callback_mutex);
4979 return;
4980 }
4981 if (check_pending) {
4982 marker = list;
4983 while (marker != NULL) {
4984 pending += marker->c_count;
4985 marker = marker->c_nlist;
4986 }
4987 check_pending = 0;
4988 }
4989 ASSERT(pending > 0);
4990 ASSERT(list->c_count > 0);
4991 funcp = list->c_call;
4992 arg = list->c_arg;
4993 count = list->c_count;
4994 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
4995 if (list >= &callbackq[0] &&
4996 list <= &callbackq[ncallbacks-1]) {
4997 list->c_nfree = callbackqfree;
4998 callbackqfree = list;
4999 } else
5000 kmem_free(list, list->c_size);
5001
5002 cbstats.nc_delete++;
5003 cbstats.nc_alloc--;
5004 mutex_exit(&ddi_callback_mutex);
5005
5006 do {
5007 if ((rval = (*funcp)(arg)) == 0) {
5008 pending -= count;
5009 mutex_enter(&ddi_callback_mutex);
5010 (void) callback_insert(funcp, arg, listid,
5011 count);
5012 cbstats.nc_runouts++;
5013 } else {
5014 pending--;
5015 mutex_enter(&ddi_callback_mutex);
5016 cbstats.nc_run++;
5017 }
5018 mutex_exit(&ddi_callback_mutex);
5019 } while (rval != 0 && (--count > 0));
5020 } while (pending > 0);
5021 }
5022
5023 void
5024 ddi_run_callback(uintptr_t *listid)
5025 {
5026 softcall(real_callback_run, listid);
5027 }
5028
5029 /*
5030 * ddi_periodic_t
5031 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5032 * int level)
5033 *
5034 * INTERFACE LEVEL
5035 * Solaris DDI specific (Solaris DDI)
5036 *
5037 * PARAMETERS
5038 * func: the callback function
5039 *
5040 * The callback function will be invoked. The function is invoked
5041 * in kernel context if the argument level passed is the zero.
5042 * Otherwise it's invoked in interrupt context at the specified
5043 * level.
5044 *
5045 * arg: the argument passed to the callback function
5046 *
5047 * interval: interval time
5048 *
5049 * level : callback interrupt level
5050 *
5051 * If the value is the zero, the callback function is invoked
5052 * in kernel context. If the value is more than the zero, but
5053 * less than or equal to ten, the callback function is invoked in
5054 * interrupt context at the specified interrupt level, which may
5055 * be used for real time applications.
5056 *
5057 * This value must be in range of 0-10, which can be a numeric
5058 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5059 *
5060 * DESCRIPTION
5061 * ddi_periodic_add(9F) schedules the specified function to be
5062 * periodically invoked in the interval time.
5063 *
5064 * As well as timeout(9F), the exact time interval over which the function
5065 * takes effect cannot be guaranteed, but the value given is a close
5066 * approximation.
5067 *
5068 * Drivers waiting on behalf of processes with real-time constraints must
5069 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5070 *
5071 * RETURN VALUES
5072 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5073 * which must be used for ddi_periodic_delete(9F) to specify the request.
5074 *
5075 * CONTEXT
5076 * ddi_periodic_add(9F) can be called in user or kernel context, but
5077 * it cannot be called in interrupt context, which is different from
5078 * timeout(9F).
5079 */
5080 ddi_periodic_t
5081 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5082 {
5083 /*
5084 * Sanity check of the argument level.
5085 */
5086 if (level < DDI_IPL_0 || level > DDI_IPL_10)
5087 cmn_err(CE_PANIC,
5088 "ddi_periodic_add: invalid interrupt level (%d).", level);
5089
5090 /*
5091 * Sanity check of the context. ddi_periodic_add() cannot be
5092 * called in either interrupt context or high interrupt context.
5093 */
5094 if (servicing_interrupt())
5095 cmn_err(CE_PANIC,
5096 "ddi_periodic_add: called in (high) interrupt context.");
5097
5098 return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5099 }
5100
5101 /*
5102 * void
5103 * ddi_periodic_delete(ddi_periodic_t req)
5104 *
5105 * INTERFACE LEVEL
5106 * Solaris DDI specific (Solaris DDI)
5107 *
5108 * PARAMETERS
5109 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5110 * previously.
5111 *
5112 * DESCRIPTION
5113 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5114 * previously requested.
5115 *
5116 * ddi_periodic_delete(9F) will not return until the pending request
5117 * is canceled or executed.
5118 *
5119 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5120 * timeout which is either running on another CPU, or has already
5121 * completed causes no problems. However, unlike untimeout(9F), there is
5122 * no restrictions on the lock which might be held across the call to
5123 * ddi_periodic_delete(9F).
5124 *
5125 * Drivers should be structured with the understanding that the arrival of
5126 * both an interrupt and a timeout for that interrupt can occasionally
5127 * occur, in either order.
5128 *
5129 * CONTEXT
5130 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5131 * it cannot be called in interrupt context, which is different from
5132 * untimeout(9F).
5133 */
5134 void
5135 ddi_periodic_delete(ddi_periodic_t req)
5136 {
5137 /*
5138 * Sanity check of the context. ddi_periodic_delete() cannot be
5139 * called in either interrupt context or high interrupt context.
5140 */
5141 if (servicing_interrupt())
5142 cmn_err(CE_PANIC,
5143 "ddi_periodic_delete: called in (high) interrupt context.");
5144
5145 i_untimeout((timeout_t)req);
5146 }
5147
5148 dev_info_t *
5149 nodevinfo(dev_t dev, int otyp)
5150 {
5151 _NOTE(ARGUNUSED(dev, otyp))
5152 return ((dev_info_t *)0);
5153 }
5154
5155 /*
5156 * A driver should support its own getinfo(9E) entry point. This function
5157 * is provided as a convenience for ON drivers that don't expect their
5158 * getinfo(9E) entry point to be called. A driver that uses this must not
5159 * call ddi_create_minor_node.
5160 */
5161 int
5162 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5163 {
5164 _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5165 return (DDI_FAILURE);
5166 }
5167
5168 /*
5169 * A driver should support its own getinfo(9E) entry point. This function
5170 * is provided as a convenience for ON drivers that where the minor number
5171 * is the instance. Drivers that do not have 1:1 mapping must implement
5172 * their own getinfo(9E) function.
5173 */
5174 int
5175 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5176 void *arg, void **result)
5177 {
5178 _NOTE(ARGUNUSED(dip))
5179 int instance;
5180
5181 if (infocmd != DDI_INFO_DEVT2INSTANCE)
5182 return (DDI_FAILURE);
5183
5184 instance = getminor((dev_t)(uintptr_t)arg);
5185 *result = (void *)(uintptr_t)instance;
5186 return (DDI_SUCCESS);
5187 }
5188
5189 int
5190 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5191 {
5192 _NOTE(ARGUNUSED(devi, cmd))
5193 return (DDI_FAILURE);
5194 }
5195
5196 int
5197 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5198 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5199 {
5200 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5201 return (DDI_DMA_NOMAPPING);
5202 }
5203
5204 int
5205 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5206 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5207 {
5208 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5209 return (DDI_DMA_BADATTR);
5210 }
5211
5212 int
5213 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5214 ddi_dma_handle_t handle)
5215 {
5216 _NOTE(ARGUNUSED(dip, rdip, handle))
5217 return (DDI_FAILURE);
5218 }
5219
5220 int
5221 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5222 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5223 ddi_dma_cookie_t *cp, uint_t *ccountp)
5224 {
5225 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5226 return (DDI_DMA_NOMAPPING);
5227 }
5228
5229 int
5230 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5231 ddi_dma_handle_t handle)
5232 {
5233 _NOTE(ARGUNUSED(dip, rdip, handle))
5234 return (DDI_FAILURE);
5235 }
5236
5237 int
5238 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5239 ddi_dma_handle_t handle, off_t off, size_t len,
5240 uint_t cache_flags)
5241 {
5242 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5243 return (DDI_FAILURE);
5244 }
5245
5246 int
5247 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5248 ddi_dma_handle_t handle, uint_t win, off_t *offp,
5249 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5250 {
5251 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5252 return (DDI_FAILURE);
5253 }
5254
5255 int
5256 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5257 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5258 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5259 {
5260 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5261 return (DDI_FAILURE);
5262 }
5263
5264 void
5265 ddivoid(void)
5266 {}
5267
5268 int
5269 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5270 struct pollhead **pollhdrp)
5271 {
5272 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5273 return (ENXIO);
5274 }
5275
5276 cred_t *
5277 ddi_get_cred(void)
5278 {
5279 return (CRED());
5280 }
5281
5282 clock_t
5283 ddi_get_lbolt(void)
5284 {
5285 return ((clock_t)lbolt_hybrid());
5286 }
5287
5288 int64_t
5289 ddi_get_lbolt64(void)
5290 {
5291 return (lbolt_hybrid());
5292 }
5293
5294 time_t
5295 ddi_get_time(void)
5296 {
5297 time_t now;
5298
5299 if ((now = gethrestime_sec()) == 0) {
5300 timestruc_t ts;
5301 mutex_enter(&tod_lock);
5302 ts = tod_get();
5303 mutex_exit(&tod_lock);
5304 return (ts.tv_sec);
5305 } else {
5306 return (now);
5307 }
5308 }
5309
5310 pid_t
5311 ddi_get_pid(void)
5312 {
5313 return (ttoproc(curthread)->p_pid);
5314 }
5315
5316 kt_did_t
5317 ddi_get_kt_did(void)
5318 {
5319 return (curthread->t_did);
5320 }
5321
5322 /*
5323 * This function returns B_TRUE if the caller can reasonably expect that a call
5324 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5325 * by user-level signal. If it returns B_FALSE, then the caller should use
5326 * other means to make certain that the wait will not hang "forever."
5327 *
5328 * It does not check the signal mask, nor for reception of any particular
5329 * signal.
5330 *
5331 * Currently, a thread can receive a signal if it's not a kernel thread and it
5332 * is not in the middle of exit(2) tear-down. Threads that are in that
5333 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5334 * cv_timedwait, and qwait_sig to qwait.
5335 */
5336 boolean_t
5337 ddi_can_receive_sig(void)
5338 {
5339 proc_t *pp;
5340
5341 if (curthread->t_proc_flag & TP_LWPEXIT)
5342 return (B_FALSE);
5343 if ((pp = ttoproc(curthread)) == NULL)
5344 return (B_FALSE);
5345 return (pp->p_as != &kas);
5346 }
5347
5348 /*
5349 * Swap bytes in 16-bit [half-]words
5350 */
5351 void
5352 swab(void *src, void *dst, size_t nbytes)
5353 {
5354 uchar_t *pf = (uchar_t *)src;
5355 uchar_t *pt = (uchar_t *)dst;
5356 uchar_t tmp;
5357 int nshorts;
5358
5359 nshorts = nbytes >> 1;
5360
5361 while (--nshorts >= 0) {
5362 tmp = *pf++;
5363 *pt++ = *pf++;
5364 *pt++ = tmp;
5365 }
5366 }
5367
5368 static void
5369 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5370 {
5371 int circ;
5372 struct ddi_minor_data *dp;
5373
5374 ndi_devi_enter(ddip, &circ);
5375 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5376 DEVI(ddip)->devi_minor = dmdp;
5377 } else {
5378 while (dp->next != (struct ddi_minor_data *)NULL)
5379 dp = dp->next;
5380 dp->next = dmdp;
5381 }
5382 ndi_devi_exit(ddip, circ);
5383 }
5384
5385 /*
5386 * Part of the obsolete SunCluster DDI Hooks.
5387 * Keep for binary compatibility
5388 */
5389 minor_t
5390 ddi_getiminor(dev_t dev)
5391 {
5392 return (getminor(dev));
5393 }
5394
5395 static int
5396 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5397 {
5398 int se_flag;
5399 int kmem_flag;
5400 int se_err;
5401 char *pathname, *class_name;
5402 sysevent_t *ev = NULL;
5403 sysevent_id_t eid;
5404 sysevent_value_t se_val;
5405 sysevent_attr_list_t *ev_attr_list = NULL;
5406
5407 /* determine interrupt context */
5408 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5409 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5410
5411 i_ddi_di_cache_invalidate();
5412
5413 #ifdef DEBUG
5414 if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5415 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5416 "interrupt level by driver %s",
5417 ddi_driver_name(dip));
5418 }
5419 #endif /* DEBUG */
5420
5421 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5422 if (ev == NULL) {
5423 goto fail;
5424 }
5425
5426 pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5427 if (pathname == NULL) {
5428 sysevent_free(ev);
5429 goto fail;
5430 }
5431
5432 (void) ddi_pathname(dip, pathname);
5433 ASSERT(strlen(pathname));
5434 se_val.value_type = SE_DATA_TYPE_STRING;
5435 se_val.value.sv_string = pathname;
5436 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5437 &se_val, se_flag) != 0) {
5438 kmem_free(pathname, MAXPATHLEN);
5439 sysevent_free(ev);
5440 goto fail;
5441 }
5442 kmem_free(pathname, MAXPATHLEN);
5443
5444 /* add the device class attribute */
5445 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5446 se_val.value_type = SE_DATA_TYPE_STRING;
5447 se_val.value.sv_string = class_name;
5448 if (sysevent_add_attr(&ev_attr_list,
5449 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5450 sysevent_free_attr(ev_attr_list);
5451 goto fail;
5452 }
5453 }
5454
5455 /*
5456 * allow for NULL minor names
5457 */
5458 if (minor_name != NULL) {
5459 se_val.value.sv_string = minor_name;
5460 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5461 &se_val, se_flag) != 0) {
5462 sysevent_free_attr(ev_attr_list);
5463 sysevent_free(ev);
5464 goto fail;
5465 }
5466 }
5467
5468 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5469 sysevent_free_attr(ev_attr_list);
5470 sysevent_free(ev);
5471 goto fail;
5472 }
5473
5474 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5475 if (se_err == SE_NO_TRANSPORT) {
5476 cmn_err(CE_WARN, "/devices or /dev may not be current "
5477 "for driver %s (%s). Run devfsadm -i %s",
5478 ddi_driver_name(dip), "syseventd not responding",
5479 ddi_driver_name(dip));
5480 } else {
5481 sysevent_free(ev);
5482 goto fail;
5483 }
5484 }
5485
5486 sysevent_free(ev);
5487 return (DDI_SUCCESS);
5488 fail:
5489 cmn_err(CE_WARN, "/devices or /dev may not be current "
5490 "for driver %s. Run devfsadm -i %s",
5491 ddi_driver_name(dip), ddi_driver_name(dip));
5492 return (DDI_SUCCESS);
5493 }
5494
5495 /*
5496 * failing to remove a minor node is not of interest
5497 * therefore we do not generate an error message
5498 */
5499 static int
5500 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5501 {
5502 char *pathname, *class_name;
5503 sysevent_t *ev;
5504 sysevent_id_t eid;
5505 sysevent_value_t se_val;
5506 sysevent_attr_list_t *ev_attr_list = NULL;
5507
5508 /*
5509 * only log ddi_remove_minor_node() calls outside the scope
5510 * of attach/detach reconfigurations and when the dip is
5511 * still initialized.
5512 */
5513 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5514 (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5515 return (DDI_SUCCESS);
5516 }
5517
5518 i_ddi_di_cache_invalidate();
5519
5520 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5521 if (ev == NULL) {
5522 return (DDI_SUCCESS);
5523 }
5524
5525 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5526 if (pathname == NULL) {
5527 sysevent_free(ev);
5528 return (DDI_SUCCESS);
5529 }
5530
5531 (void) ddi_pathname(dip, pathname);
5532 ASSERT(strlen(pathname));
5533 se_val.value_type = SE_DATA_TYPE_STRING;
5534 se_val.value.sv_string = pathname;
5535 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5536 &se_val, SE_SLEEP) != 0) {
5537 kmem_free(pathname, MAXPATHLEN);
5538 sysevent_free(ev);
5539 return (DDI_SUCCESS);
5540 }
5541
5542 kmem_free(pathname, MAXPATHLEN);
5543
5544 /*
5545 * allow for NULL minor names
5546 */
5547 if (minor_name != NULL) {
5548 se_val.value.sv_string = minor_name;
5549 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5550 &se_val, SE_SLEEP) != 0) {
5551 sysevent_free_attr(ev_attr_list);
5552 goto fail;
5553 }
5554 }
5555
5556 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5557 /* add the device class, driver name and instance attributes */
5558
5559 se_val.value_type = SE_DATA_TYPE_STRING;
5560 se_val.value.sv_string = class_name;
5561 if (sysevent_add_attr(&ev_attr_list,
5562 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5563 sysevent_free_attr(ev_attr_list);
5564 goto fail;
5565 }
5566
5567 se_val.value_type = SE_DATA_TYPE_STRING;
5568 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5569 if (sysevent_add_attr(&ev_attr_list,
5570 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5571 sysevent_free_attr(ev_attr_list);
5572 goto fail;
5573 }
5574
5575 se_val.value_type = SE_DATA_TYPE_INT32;
5576 se_val.value.sv_int32 = ddi_get_instance(dip);
5577 if (sysevent_add_attr(&ev_attr_list,
5578 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5579 sysevent_free_attr(ev_attr_list);
5580 goto fail;
5581 }
5582
5583 }
5584
5585 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5586 sysevent_free_attr(ev_attr_list);
5587 } else {
5588 (void) log_sysevent(ev, SE_SLEEP, &eid);
5589 }
5590 fail:
5591 sysevent_free(ev);
5592 return (DDI_SUCCESS);
5593 }
5594
5595 /*
5596 * Derive the device class of the node.
5597 * Device class names aren't defined yet. Until this is done we use
5598 * devfs event subclass names as device class names.
5599 */
5600 static int
5601 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5602 {
5603 int rv = DDI_SUCCESS;
5604
5605 if (i_ddi_devi_class(dip) == NULL) {
5606 if (strncmp(node_type, DDI_NT_BLOCK,
5607 sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5608 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5609 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5610 strcmp(node_type, DDI_NT_FD) != 0) {
5611
5612 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5613
5614 } else if (strncmp(node_type, DDI_NT_NET,
5615 sizeof (DDI_NT_NET) - 1) == 0 &&
5616 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5617 node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5618
5619 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5620
5621 } else if (strncmp(node_type, DDI_NT_PRINTER,
5622 sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5623 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5624 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5625
5626 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5627
5628 } else if (strncmp(node_type, DDI_PSEUDO,
5629 sizeof (DDI_PSEUDO) -1) == 0 &&
5630 (strncmp(ESC_LOFI, ddi_node_name(dip),
5631 sizeof (ESC_LOFI) -1) == 0)) {
5632 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5633 }
5634 }
5635
5636 return (rv);
5637 }
5638
5639 /*
5640 * Check compliance with PSARC 2003/375:
5641 *
5642 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5643 * exceed IFNAMSIZ (16) characters in length.
5644 */
5645 static boolean_t
5646 verify_name(char *name)
5647 {
5648 size_t len = strlen(name);
5649 char *cp;
5650
5651 if (len == 0 || len > IFNAMSIZ)
5652 return (B_FALSE);
5653
5654 for (cp = name; *cp != '\0'; cp++) {
5655 if (!isalnum(*cp) && *cp != '_')
5656 return (B_FALSE);
5657 }
5658
5659 return (B_TRUE);
5660 }
5661
5662 /*
5663 * ddi_create_minor_common: Create a ddi_minor_data structure and
5664 * attach it to the given devinfo node.
5665 */
5666
5667 int
5668 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5669 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5670 const char *read_priv, const char *write_priv, mode_t priv_mode)
5671 {
5672 struct ddi_minor_data *dmdp;
5673 major_t major;
5674
5675 if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5676 return (DDI_FAILURE);
5677
5678 if (name == NULL)
5679 return (DDI_FAILURE);
5680
5681 /*
5682 * Log a message if the minor number the driver is creating
5683 * is not expressible on the on-disk filesystem (currently
5684 * this is limited to 18 bits both by UFS). The device can
5685 * be opened via devfs, but not by device special files created
5686 * via mknod().
5687 */
5688 if (minor_num > L_MAXMIN32) {
5689 cmn_err(CE_WARN,
5690 "%s%d:%s minor 0x%x too big for 32-bit applications",
5691 ddi_driver_name(dip), ddi_get_instance(dip),
5692 name, minor_num);
5693 return (DDI_FAILURE);
5694 }
5695
5696 /* dip must be bound and attached */
5697 major = ddi_driver_major(dip);
5698 ASSERT(major != DDI_MAJOR_T_NONE);
5699
5700 /*
5701 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5702 */
5703 if (node_type == NULL) {
5704 node_type = DDI_PSEUDO;
5705 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5706 " minor node %s; default to DDI_PSEUDO",
5707 ddi_driver_name(dip), ddi_get_instance(dip), name));
5708 }
5709
5710 /*
5711 * If the driver is a network driver, ensure that the name falls within
5712 * the interface naming constraints specified by PSARC/2003/375.
5713 */
5714 if (strcmp(node_type, DDI_NT_NET) == 0) {
5715 if (!verify_name(name))
5716 return (DDI_FAILURE);
5717
5718 if (mtype == DDM_MINOR) {
5719 struct devnames *dnp = &devnamesp[major];
5720
5721 /* Mark driver as a network driver */
5722 LOCK_DEV_OPS(&dnp->dn_lock);
5723 dnp->dn_flags |= DN_NETWORK_DRIVER;
5724
5725 /*
5726 * If this minor node is created during the device
5727 * attachment, this is a physical network device.
5728 * Mark the driver as a physical network driver.
5729 */
5730 if (DEVI_IS_ATTACHING(dip))
5731 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5732 UNLOCK_DEV_OPS(&dnp->dn_lock);
5733 }
5734 }
5735
5736 if (mtype == DDM_MINOR) {
5737 if (derive_devi_class(dip, node_type, KM_NOSLEEP) !=
5738 DDI_SUCCESS)
5739 return (DDI_FAILURE);
5740 }
5741
5742 /*
5743 * Take care of minor number information for the node.
5744 */
5745
5746 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5747 KM_NOSLEEP)) == NULL) {
5748 return (DDI_FAILURE);
5749 }
5750 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5751 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5752 return (DDI_FAILURE);
5753 }
5754 dmdp->dip = dip;
5755 dmdp->ddm_dev = makedevice(major, minor_num);
5756 dmdp->ddm_spec_type = spec_type;
5757 dmdp->ddm_node_type = node_type;
5758 dmdp->type = mtype;
5759 if (flag & CLONE_DEV) {
5760 dmdp->type = DDM_ALIAS;
5761 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5762 }
5763 if (flag & PRIVONLY_DEV) {
5764 dmdp->ddm_flags |= DM_NO_FSPERM;
5765 }
5766 if (read_priv || write_priv) {
5767 dmdp->ddm_node_priv =
5768 devpolicy_priv_by_name(read_priv, write_priv);
5769 }
5770 dmdp->ddm_priv_mode = priv_mode;
5771
5772 ddi_append_minor_node(dip, dmdp);
5773
5774 /*
5775 * only log ddi_create_minor_node() calls which occur
5776 * outside the scope of attach(9e)/detach(9e) reconfigurations
5777 */
5778 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5779 mtype != DDM_INTERNAL_PATH) {
5780 (void) i_log_devfs_minor_create(dip, name);
5781 }
5782
5783 /*
5784 * Check if any dacf rules match the creation of this minor node
5785 */
5786 dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5787 return (DDI_SUCCESS);
5788 }
5789
5790 int
5791 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5792 minor_t minor_num, char *node_type, int flag)
5793 {
5794 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5795 node_type, flag, DDM_MINOR, NULL, NULL, 0));
5796 }
5797
5798 int
5799 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5800 minor_t minor_num, char *node_type, int flag,
5801 const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5802 {
5803 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5804 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5805 }
5806
5807 int
5808 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5809 minor_t minor_num, char *node_type, int flag)
5810 {
5811 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5812 node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5813 }
5814
5815 /*
5816 * Internal (non-ddi) routine for drivers to export names known
5817 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5818 * but not exported externally to /dev
5819 */
5820 int
5821 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5822 minor_t minor_num)
5823 {
5824 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5825 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5826 }
5827
5828 void
5829 ddi_remove_minor_node(dev_info_t *dip, char *name)
5830 {
5831 int circ;
5832 struct ddi_minor_data *dmdp, *dmdp1;
5833 struct ddi_minor_data **dmdp_prev;
5834
5835 ndi_devi_enter(dip, &circ);
5836 dmdp_prev = &DEVI(dip)->devi_minor;
5837 dmdp = DEVI(dip)->devi_minor;
5838 while (dmdp != NULL) {
5839 dmdp1 = dmdp->next;
5840 if ((name == NULL || (dmdp->ddm_name != NULL &&
5841 strcmp(name, dmdp->ddm_name) == 0))) {
5842 if (dmdp->ddm_name != NULL) {
5843 if (dmdp->type != DDM_INTERNAL_PATH)
5844 (void) i_log_devfs_minor_remove(dip,
5845 dmdp->ddm_name);
5846 kmem_free(dmdp->ddm_name,
5847 strlen(dmdp->ddm_name) + 1);
5848 }
5849 /*
5850 * Release device privilege, if any.
5851 * Release dacf client data associated with this minor
5852 * node by storing NULL.
5853 */
5854 if (dmdp->ddm_node_priv)
5855 dpfree(dmdp->ddm_node_priv);
5856 dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5857 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5858 *dmdp_prev = dmdp1;
5859 /*
5860 * OK, we found it, so get out now -- if we drive on,
5861 * we will strcmp against garbage. See 1139209.
5862 */
5863 if (name != NULL)
5864 break;
5865 } else {
5866 dmdp_prev = &dmdp->next;
5867 }
5868 dmdp = dmdp1;
5869 }
5870 ndi_devi_exit(dip, circ);
5871 }
5872
5873
5874 int
5875 ddi_in_panic()
5876 {
5877 return (panicstr != NULL);
5878 }
5879
5880
5881 /*
5882 * Find first bit set in a mask (returned counting from 1 up)
5883 */
5884
5885 int
5886 ddi_ffs(long mask)
5887 {
5888 return (ffs(mask));
5889 }
5890
5891 /*
5892 * Find last bit set. Take mask and clear
5893 * all but the most significant bit, and
5894 * then let ffs do the rest of the work.
5895 *
5896 * Algorithm courtesy of Steve Chessin.
5897 */
5898
5899 int
5900 ddi_fls(long mask)
5901 {
5902 while (mask) {
5903 long nx;
5904
5905 if ((nx = (mask & (mask - 1))) == 0)
5906 break;
5907 mask = nx;
5908 }
5909 return (ffs(mask));
5910 }
5911
5912 /*
5913 * The ddi_soft_state_* routines comprise generic storage management utilities
5914 * for driver soft state structures (in "the old days," this was done with
5915 * statically sized array - big systems and dynamic loading and unloading
5916 * make heap allocation more attractive).
5917 */
5918
5919 /*
5920 * Allocate a set of pointers to 'n_items' objects of size 'size'
5921 * bytes. Each pointer is initialized to nil.
5922 *
5923 * The 'size' and 'n_items' values are stashed in the opaque
5924 * handle returned to the caller.
5925 *
5926 * This implementation interprets 'set of pointers' to mean 'array
5927 * of pointers' but note that nothing in the interface definition
5928 * precludes an implementation that uses, for example, a linked list.
5929 * However there should be a small efficiency gain from using an array
5930 * at lookup time.
5931 *
5932 * NOTE As an optimization, we make our growable array allocations in
5933 * powers of two (bytes), since that's how much kmem_alloc (currently)
5934 * gives us anyway. It should save us some free/realloc's ..
5935 *
5936 * As a further optimization, we make the growable array start out
5937 * with MIN_N_ITEMS in it.
5938 */
5939
5940 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
5941
5942 int
5943 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5944 {
5945 i_ddi_soft_state *ss;
5946
5947 if (state_p == NULL || size == 0)
5948 return (EINVAL);
5949
5950 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5951 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5952 ss->size = size;
5953
5954 if (n_items < MIN_N_ITEMS)
5955 ss->n_items = MIN_N_ITEMS;
5956 else {
5957 int bitlog;
5958
5959 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5960 bitlog--;
5961 ss->n_items = 1 << bitlog;
5962 }
5963
5964 ASSERT(ss->n_items >= n_items);
5965
5966 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5967
5968 *state_p = ss;
5969 return (0);
5970 }
5971
5972 /*
5973 * Allocate a state structure of size 'size' to be associated
5974 * with item 'item'.
5975 *
5976 * In this implementation, the array is extended to
5977 * allow the requested offset, if needed.
5978 */
5979 int
5980 ddi_soft_state_zalloc(void *state, int item)
5981 {
5982 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
5983 void **array;
5984 void *new_element;
5985
5986 if ((state == NULL) || (item < 0))
5987 return (DDI_FAILURE);
5988
5989 mutex_enter(&ss->lock);
5990 if (ss->size == 0) {
5991 mutex_exit(&ss->lock);
5992 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5993 mod_containing_pc(caller()));
5994 return (DDI_FAILURE);
5995 }
5996
5997 array = ss->array; /* NULL if ss->n_items == 0 */
5998 ASSERT(ss->n_items != 0 && array != NULL);
5999
6000 /*
6001 * refuse to tread on an existing element
6002 */
6003 if (item < ss->n_items && array[item] != NULL) {
6004 mutex_exit(&ss->lock);
6005 return (DDI_FAILURE);
6006 }
6007
6008 /*
6009 * Allocate a new element to plug in
6010 */
6011 new_element = kmem_zalloc(ss->size, KM_SLEEP);
6012
6013 /*
6014 * Check if the array is big enough, if not, grow it.
6015 */
6016 if (item >= ss->n_items) {
6017 void **new_array;
6018 size_t new_n_items;
6019 struct i_ddi_soft_state *dirty;
6020
6021 /*
6022 * Allocate a new array of the right length, copy
6023 * all the old pointers to the new array, then
6024 * if it exists at all, put the old array on the
6025 * dirty list.
6026 *
6027 * Note that we can't kmem_free() the old array.
6028 *
6029 * Why -- well the 'get' operation is 'mutex-free', so we
6030 * can't easily catch a suspended thread that is just about
6031 * to dereference the array we just grew out of. So we
6032 * cons up a header and put it on a list of 'dirty'
6033 * pointer arrays. (Dirty in the sense that there may
6034 * be suspended threads somewhere that are in the middle
6035 * of referencing them). Fortunately, we -can- garbage
6036 * collect it all at ddi_soft_state_fini time.
6037 */
6038 new_n_items = ss->n_items;
6039 while (new_n_items < (1 + item))
6040 new_n_items <<= 1; /* double array size .. */
6041
6042 ASSERT(new_n_items >= (1 + item)); /* sanity check! */
6043
6044 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6045 KM_SLEEP);
6046 /*
6047 * Copy the pointers into the new array
6048 */
6049 bcopy(array, new_array, ss->n_items * sizeof (void *));
6050
6051 /*
6052 * Save the old array on the dirty list
6053 */
6054 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6055 dirty->array = ss->array;
6056 dirty->n_items = ss->n_items;
6057 dirty->next = ss->next;
6058 ss->next = dirty;
6059
6060 ss->array = (array = new_array);
6061 ss->n_items = new_n_items;
6062 }
6063
6064 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6065
6066 array[item] = new_element;
6067
6068 mutex_exit(&ss->lock);
6069 return (DDI_SUCCESS);
6070 }
6071
6072 /*
6073 * Fetch a pointer to the allocated soft state structure.
6074 *
6075 * This is designed to be cheap.
6076 *
6077 * There's an argument that there should be more checking for
6078 * nil pointers and out of bounds on the array.. but we do a lot
6079 * of that in the alloc/free routines.
6080 *
6081 * An array has the convenience that we don't need to lock read-access
6082 * to it c.f. a linked list. However our "expanding array" strategy
6083 * means that we should hold a readers lock on the i_ddi_soft_state
6084 * structure.
6085 *
6086 * However, from a performance viewpoint, we need to do it without
6087 * any locks at all -- this also makes it a leaf routine. The algorithm
6088 * is 'lock-free' because we only discard the pointer arrays at
6089 * ddi_soft_state_fini() time.
6090 */
6091 void *
6092 ddi_get_soft_state(void *state, int item)
6093 {
6094 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6095
6096 ASSERT((ss != NULL) && (item >= 0));
6097
6098 if (item < ss->n_items && ss->array != NULL)
6099 return (ss->array[item]);
6100 return (NULL);
6101 }
6102
6103 /*
6104 * Free the state structure corresponding to 'item.' Freeing an
6105 * element that has either gone or was never allocated is not
6106 * considered an error. Note that we free the state structure, but
6107 * we don't shrink our pointer array, or discard 'dirty' arrays,
6108 * since even a few pointers don't really waste too much memory.
6109 *
6110 * Passing an item number that is out of bounds, or a null pointer will
6111 * provoke an error message.
6112 */
6113 void
6114 ddi_soft_state_free(void *state, int item)
6115 {
6116 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6117 void **array;
6118 void *element;
6119 static char msg[] = "ddi_soft_state_free:";
6120
6121 if (ss == NULL) {
6122 cmn_err(CE_WARN, "%s null handle: %s",
6123 msg, mod_containing_pc(caller()));
6124 return;
6125 }
6126
6127 element = NULL;
6128
6129 mutex_enter(&ss->lock);
6130
6131 if ((array = ss->array) == NULL || ss->size == 0) {
6132 cmn_err(CE_WARN, "%s bad handle: %s",
6133 msg, mod_containing_pc(caller()));
6134 } else if (item < 0 || item >= ss->n_items) {
6135 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6136 msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6137 } else if (array[item] != NULL) {
6138 element = array[item];
6139 array[item] = NULL;
6140 }
6141
6142 mutex_exit(&ss->lock);
6143
6144 if (element)
6145 kmem_free(element, ss->size);
6146 }
6147
6148 /*
6149 * Free the entire set of pointers, and any
6150 * soft state structures contained therein.
6151 *
6152 * Note that we don't grab the ss->lock mutex, even though
6153 * we're inspecting the various fields of the data structure.
6154 *
6155 * There is an implicit assumption that this routine will
6156 * never run concurrently with any of the above on this
6157 * particular state structure i.e. by the time the driver
6158 * calls this routine, there should be no other threads
6159 * running in the driver.
6160 */
6161 void
6162 ddi_soft_state_fini(void **state_p)
6163 {
6164 i_ddi_soft_state *ss, *dirty;
6165 int item;
6166 static char msg[] = "ddi_soft_state_fini:";
6167
6168 if (state_p == NULL ||
6169 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6170 cmn_err(CE_WARN, "%s null handle: %s",
6171 msg, mod_containing_pc(caller()));
6172 return;
6173 }
6174
6175 if (ss->size == 0) {
6176 cmn_err(CE_WARN, "%s bad handle: %s",
6177 msg, mod_containing_pc(caller()));
6178 return;
6179 }
6180
6181 if (ss->n_items > 0) {
6182 for (item = 0; item < ss->n_items; item++)
6183 ddi_soft_state_free(ss, item);
6184 kmem_free(ss->array, ss->n_items * sizeof (void *));
6185 }
6186
6187 /*
6188 * Now delete any dirty arrays from previous 'grow' operations
6189 */
6190 for (dirty = ss->next; dirty; dirty = ss->next) {
6191 ss->next = dirty->next;
6192 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6193 kmem_free(dirty, sizeof (*dirty));
6194 }
6195
6196 mutex_destroy(&ss->lock);
6197 kmem_free(ss, sizeof (*ss));
6198
6199 *state_p = NULL;
6200 }
6201
6202 #define SS_N_ITEMS_PER_HASH 16
6203 #define SS_MIN_HASH_SZ 16
6204 #define SS_MAX_HASH_SZ 4096
6205
6206 int
6207 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6208 int n_items)
6209 {
6210 i_ddi_soft_state_bystr *sss;
6211 int hash_sz;
6212
6213 ASSERT(state_p && size && n_items);
6214 if ((state_p == NULL) || (size == 0) || (n_items == 0))
6215 return (EINVAL);
6216
6217 /* current implementation is based on hash, convert n_items to hash */
6218 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6219 if (hash_sz < SS_MIN_HASH_SZ)
6220 hash_sz = SS_MIN_HASH_SZ;
6221 else if (hash_sz > SS_MAX_HASH_SZ)
6222 hash_sz = SS_MAX_HASH_SZ;
6223
6224 /* allocate soft_state pool */
6225 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6226 sss->ss_size = size;
6227 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6228 hash_sz, mod_hash_null_valdtor);
6229 *state_p = (ddi_soft_state_bystr *)sss;
6230 return (0);
6231 }
6232
6233 int
6234 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6235 {
6236 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6237 void *sso;
6238 char *dup_str;
6239
6240 ASSERT(sss && str && sss->ss_mod_hash);
6241 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6242 return (DDI_FAILURE);
6243 sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6244 dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6245 if (mod_hash_insert(sss->ss_mod_hash,
6246 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6247 return (DDI_SUCCESS);
6248
6249 /*
6250 * The only error from an strhash insert is caused by a duplicate key.
6251 * We refuse to tread on an existing elements, so free and fail.
6252 */
6253 kmem_free(dup_str, strlen(dup_str) + 1);
6254 kmem_free(sso, sss->ss_size);
6255 return (DDI_FAILURE);
6256 }
6257
6258 void *
6259 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6260 {
6261 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6262 void *sso;
6263
6264 ASSERT(sss && str && sss->ss_mod_hash);
6265 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6266 return (NULL);
6267
6268 if (mod_hash_find(sss->ss_mod_hash,
6269 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6270 return (sso);
6271 return (NULL);
6272 }
6273
6274 void
6275 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6276 {
6277 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6278 void *sso;
6279
6280 ASSERT(sss && str && sss->ss_mod_hash);
6281 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6282 return;
6283
6284 (void) mod_hash_remove(sss->ss_mod_hash,
6285 (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6286 kmem_free(sso, sss->ss_size);
6287 }
6288
6289 void
6290 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6291 {
6292 i_ddi_soft_state_bystr *sss;
6293
6294 ASSERT(state_p);
6295 if (state_p == NULL)
6296 return;
6297
6298 sss = (i_ddi_soft_state_bystr *)(*state_p);
6299 if (sss == NULL)
6300 return;
6301
6302 ASSERT(sss->ss_mod_hash);
6303 if (sss->ss_mod_hash) {
6304 mod_hash_destroy_strhash(sss->ss_mod_hash);
6305 sss->ss_mod_hash = NULL;
6306 }
6307
6308 kmem_free(sss, sizeof (*sss));
6309 *state_p = NULL;
6310 }
6311
6312 /*
6313 * The ddi_strid_* routines provide string-to-index management utilities.
6314 */
6315 /* allocate and initialize an strid set */
6316 int
6317 ddi_strid_init(ddi_strid **strid_p, int n_items)
6318 {
6319 i_ddi_strid *ss;
6320 int hash_sz;
6321
6322 if (strid_p == NULL)
6323 return (DDI_FAILURE);
6324
6325 /* current implementation is based on hash, convert n_items to hash */
6326 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6327 if (hash_sz < SS_MIN_HASH_SZ)
6328 hash_sz = SS_MIN_HASH_SZ;
6329 else if (hash_sz > SS_MAX_HASH_SZ)
6330 hash_sz = SS_MAX_HASH_SZ;
6331
6332 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6333 ss->strid_chunksz = n_items;
6334 ss->strid_spacesz = n_items;
6335 ss->strid_space = id_space_create("strid", 1, n_items);
6336 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6337 mod_hash_null_valdtor);
6338 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6339 mod_hash_null_valdtor);
6340 *strid_p = (ddi_strid *)ss;
6341 return (DDI_SUCCESS);
6342 }
6343
6344 /* allocate an id mapping within the specified set for str, return id */
6345 static id_t
6346 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6347 {
6348 i_ddi_strid *ss = (i_ddi_strid *)strid;
6349 id_t id;
6350 char *s;
6351
6352 ASSERT(ss && str);
6353 if ((ss == NULL) || (str == NULL))
6354 return (0);
6355
6356 /*
6357 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6358 * range as compressed as possible. This is important to minimize
6359 * the amount of space used when the id is used as a ddi_soft_state
6360 * index by the caller.
6361 *
6362 * If the id list is exhausted, increase the size of the list
6363 * by the chuck size specified in ddi_strid_init and reattempt
6364 * the allocation
6365 */
6366 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6367 id_space_extend(ss->strid_space, ss->strid_spacesz,
6368 ss->strid_spacesz + ss->strid_chunksz);
6369 ss->strid_spacesz += ss->strid_chunksz;
6370 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6371 return (0);
6372 }
6373
6374 /*
6375 * NOTE: since we create and destroy in unison we can save space by
6376 * using bystr key as the byid value. This means destroy must occur
6377 * in (byid, bystr) order.
6378 */
6379 s = i_ddi_strdup(str, KM_SLEEP);
6380 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6381 (mod_hash_val_t)(intptr_t)id) != 0) {
6382 ddi_strid_free(strid, id);
6383 return (0);
6384 }
6385 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6386 (mod_hash_val_t)s) != 0) {
6387 ddi_strid_free(strid, id);
6388 return (0);
6389 }
6390
6391 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6392 return (id);
6393 }
6394
6395 /* allocate an id mapping within the specified set for str, return id */
6396 id_t
6397 ddi_strid_alloc(ddi_strid *strid, char *str)
6398 {
6399 return (i_ddi_strid_alloc(strid, str));
6400 }
6401
6402 /* return the id within the specified strid given the str */
6403 id_t
6404 ddi_strid_str2id(ddi_strid *strid, char *str)
6405 {
6406 i_ddi_strid *ss = (i_ddi_strid *)strid;
6407 id_t id = 0;
6408 mod_hash_val_t hv;
6409
6410 ASSERT(ss && str);
6411 if (ss && str && (mod_hash_find(ss->strid_bystr,
6412 (mod_hash_key_t)str, &hv) == 0))
6413 id = (int)(intptr_t)hv;
6414 return (id);
6415 }
6416
6417 /* return str within the specified strid given the id */
6418 char *
6419 ddi_strid_id2str(ddi_strid *strid, id_t id)
6420 {
6421 i_ddi_strid *ss = (i_ddi_strid *)strid;
6422 char *str = NULL;
6423 mod_hash_val_t hv;
6424
6425 ASSERT(ss && id > 0);
6426 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6427 (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6428 str = (char *)hv;
6429 return (str);
6430 }
6431
6432 /* free the id mapping within the specified strid */
6433 void
6434 ddi_strid_free(ddi_strid *strid, id_t id)
6435 {
6436 i_ddi_strid *ss = (i_ddi_strid *)strid;
6437 char *str;
6438
6439 ASSERT(ss && id > 0);
6440 if ((ss == NULL) || (id <= 0))
6441 return;
6442
6443 /* bystr key is byid value: destroy order must be (byid, bystr) */
6444 str = ddi_strid_id2str(strid, id);
6445 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6446 id_free(ss->strid_space, id);
6447
6448 if (str)
6449 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6450 }
6451
6452 /* destroy the strid set */
6453 void
6454 ddi_strid_fini(ddi_strid **strid_p)
6455 {
6456 i_ddi_strid *ss;
6457
6458 ASSERT(strid_p);
6459 if (strid_p == NULL)
6460 return;
6461
6462 ss = (i_ddi_strid *)(*strid_p);
6463 if (ss == NULL)
6464 return;
6465
6466 /* bystr key is byid value: destroy order must be (byid, bystr) */
6467 if (ss->strid_byid)
6468 mod_hash_destroy_hash(ss->strid_byid);
6469 if (ss->strid_byid)
6470 mod_hash_destroy_hash(ss->strid_bystr);
6471 if (ss->strid_space)
6472 id_space_destroy(ss->strid_space);
6473 kmem_free(ss, sizeof (*ss));
6474 *strid_p = NULL;
6475 }
6476
6477 /*
6478 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6479 * Storage is double buffered to prevent updates during devi_addr use -
6480 * double buffering is adaquate for reliable ddi_deviname() consumption.
6481 * The double buffer is not freed until dev_info structure destruction
6482 * (by i_ddi_free_node).
6483 */
6484 void
6485 ddi_set_name_addr(dev_info_t *dip, char *name)
6486 {
6487 char *buf = DEVI(dip)->devi_addr_buf;
6488 char *newaddr;
6489
6490 if (buf == NULL) {
6491 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6492 DEVI(dip)->devi_addr_buf = buf;
6493 }
6494
6495 if (name) {
6496 ASSERT(strlen(name) < MAXNAMELEN);
6497 newaddr = (DEVI(dip)->devi_addr == buf) ?
6498 (buf + MAXNAMELEN) : buf;
6499 (void) strlcpy(newaddr, name, MAXNAMELEN);
6500 } else
6501 newaddr = NULL;
6502
6503 DEVI(dip)->devi_addr = newaddr;
6504 }
6505
6506 char *
6507 ddi_get_name_addr(dev_info_t *dip)
6508 {
6509 return (DEVI(dip)->devi_addr);
6510 }
6511
6512 void
6513 ddi_set_parent_data(dev_info_t *dip, void *pd)
6514 {
6515 DEVI(dip)->devi_parent_data = pd;
6516 }
6517
6518 void *
6519 ddi_get_parent_data(dev_info_t *dip)
6520 {
6521 return (DEVI(dip)->devi_parent_data);
6522 }
6523
6524 /*
6525 * ddi_name_to_major: returns the major number of a named module,
6526 * derived from the current driver alias binding.
6527 *
6528 * Caveat: drivers should avoid the use of this function, in particular
6529 * together with ddi_get_name/ddi_binding name, as per
6530 * major = ddi_name_to_major(ddi_get_name(devi));
6531 * ddi_name_to_major() relies on the state of the device/alias binding,
6532 * which can and does change dynamically as aliases are administered
6533 * over time. An attached device instance cannot rely on the major
6534 * number returned by ddi_name_to_major() to match its own major number.
6535 *
6536 * For driver use, ddi_driver_major() reliably returns the major number
6537 * for the module to which the device was bound at attach time over
6538 * the life of the instance.
6539 * major = ddi_driver_major(dev_info_t *)
6540 */
6541 major_t
6542 ddi_name_to_major(char *name)
6543 {
6544 return (mod_name_to_major(name));
6545 }
6546
6547 /*
6548 * ddi_major_to_name: Returns the module name bound to a major number.
6549 */
6550 char *
6551 ddi_major_to_name(major_t major)
6552 {
6553 return (mod_major_to_name(major));
6554 }
6555
6556 /*
6557 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6558 * pointed at by 'name.' A devinfo node is named as a result of calling
6559 * ddi_initchild().
6560 *
6561 * Note: the driver must be held before calling this function!
6562 */
6563 char *
6564 ddi_deviname(dev_info_t *dip, char *name)
6565 {
6566 char *addrname;
6567 char none = '\0';
6568
6569 if (dip == ddi_root_node()) {
6570 *name = '\0';
6571 return (name);
6572 }
6573
6574 if (i_ddi_node_state(dip) < DS_BOUND) {
6575 addrname = &none;
6576 } else {
6577 /*
6578 * Use ddi_get_name_addr() without checking state so we get
6579 * a unit-address if we are called after ddi_set_name_addr()
6580 * by nexus DDI_CTL_INITCHILD code, but before completing
6581 * node promotion to DS_INITIALIZED. We currently have
6582 * two situations where we are called in this state:
6583 * o For framework processing of a path-oriented alias.
6584 * o If a SCSA nexus driver calls ddi_devid_register()
6585 * from it's tran_tgt_init(9E) implementation.
6586 */
6587 addrname = ddi_get_name_addr(dip);
6588 if (addrname == NULL)
6589 addrname = &none;
6590 }
6591
6592 if (*addrname == '\0') {
6593 (void) sprintf(name, "/%s", ddi_node_name(dip));
6594 } else {
6595 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6596 }
6597
6598 return (name);
6599 }
6600
6601 /*
6602 * Spits out the name of device node, typically name@addr, for a given node,
6603 * using the driver name, not the nodename.
6604 *
6605 * Used by match_parent. Not to be used elsewhere.
6606 */
6607 char *
6608 i_ddi_parname(dev_info_t *dip, char *name)
6609 {
6610 char *addrname;
6611
6612 if (dip == ddi_root_node()) {
6613 *name = '\0';
6614 return (name);
6615 }
6616
6617 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6618
6619 if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6620 (void) sprintf(name, "%s", ddi_binding_name(dip));
6621 else
6622 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6623 return (name);
6624 }
6625
6626 static char *
6627 pathname_work(dev_info_t *dip, char *path)
6628 {
6629 char *bp;
6630
6631 if (dip == ddi_root_node()) {
6632 *path = '\0';
6633 return (path);
6634 }
6635 (void) pathname_work(ddi_get_parent(dip), path);
6636 bp = path + strlen(path);
6637 (void) ddi_deviname(dip, bp);
6638 return (path);
6639 }
6640
6641 char *
6642 ddi_pathname(dev_info_t *dip, char *path)
6643 {
6644 return (pathname_work(dip, path));
6645 }
6646
6647 char *
6648 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6649 {
6650 if (dmdp->dip == NULL)
6651 *path = '\0';
6652 else {
6653 (void) ddi_pathname(dmdp->dip, path);
6654 if (dmdp->ddm_name) {
6655 (void) strcat(path, ":");
6656 (void) strcat(path, dmdp->ddm_name);
6657 }
6658 }
6659 return (path);
6660 }
6661
6662 static char *
6663 pathname_work_obp(dev_info_t *dip, char *path)
6664 {
6665 char *bp;
6666 char *obp_path;
6667
6668 /*
6669 * look up the "obp-path" property, return the path if it exists
6670 */
6671 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6672 "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6673 (void) strcpy(path, obp_path);
6674 ddi_prop_free(obp_path);
6675 return (path);
6676 }
6677
6678 /*
6679 * stop at root, no obp path
6680 */
6681 if (dip == ddi_root_node()) {
6682 return (NULL);
6683 }
6684
6685 obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6686 if (obp_path == NULL)
6687 return (NULL);
6688
6689 /*
6690 * append our component to parent's obp path
6691 */
6692 bp = path + strlen(path);
6693 if (*(bp - 1) != '/')
6694 (void) strcat(bp++, "/");
6695 (void) ddi_deviname(dip, bp);
6696 return (path);
6697 }
6698
6699 /*
6700 * return the 'obp-path' based path for the given node, or NULL if the node
6701 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6702 * function can't be called from interrupt context (since we need to
6703 * lookup a string property).
6704 */
6705 char *
6706 ddi_pathname_obp(dev_info_t *dip, char *path)
6707 {
6708 ASSERT(!servicing_interrupt());
6709 if (dip == NULL || path == NULL)
6710 return (NULL);
6711
6712 /* split work into a separate function to aid debugging */
6713 return (pathname_work_obp(dip, path));
6714 }
6715
6716 int
6717 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6718 {
6719 dev_info_t *pdip;
6720 char *obp_path = NULL;
6721 int rc = DDI_FAILURE;
6722
6723 if (dip == NULL)
6724 return (DDI_FAILURE);
6725
6726 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6727
6728 pdip = ddi_get_parent(dip);
6729
6730 if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6731 (void) ddi_pathname(pdip, obp_path);
6732 }
6733
6734 if (component) {
6735 (void) strncat(obp_path, "/", MAXPATHLEN);
6736 (void) strncat(obp_path, component, MAXPATHLEN);
6737 }
6738 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6739 obp_path);
6740
6741 if (obp_path)
6742 kmem_free(obp_path, MAXPATHLEN);
6743
6744 return (rc);
6745 }
6746
6747 /*
6748 * Given a dev_t, return the pathname of the corresponding device in the
6749 * buffer pointed at by "path." The buffer is assumed to be large enough
6750 * to hold the pathname of the device (MAXPATHLEN).
6751 *
6752 * The pathname of a device is the pathname of the devinfo node to which
6753 * the device "belongs," concatenated with the character ':' and the name
6754 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6755 * just the pathname of the devinfo node is returned without driving attach
6756 * of that node. For a non-zero spec_type, an attach is performed and a
6757 * search of the minor list occurs.
6758 *
6759 * It is possible that the path associated with the dev_t is not
6760 * currently available in the devinfo tree. In order to have a
6761 * dev_t, a device must have been discovered before, which means
6762 * that the path is always in the instance tree. The one exception
6763 * to this is if the dev_t is associated with a pseudo driver, in
6764 * which case the device must exist on the pseudo branch of the
6765 * devinfo tree as a result of parsing .conf files.
6766 */
6767 int
6768 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6769 {
6770 int circ;
6771 major_t major = getmajor(devt);
6772 int instance;
6773 dev_info_t *dip;
6774 char *minorname;
6775 char *drvname;
6776
6777 if (major >= devcnt)
6778 goto fail;
6779 if (major == clone_major) {
6780 /* clone has no minor nodes, manufacture the path here */
6781 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6782 goto fail;
6783
6784 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6785 return (DDI_SUCCESS);
6786 }
6787
6788 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6789 if ((instance = dev_to_instance(devt)) == -1)
6790 goto fail;
6791
6792 /* reconstruct the path given the major/instance */
6793 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6794 goto fail;
6795
6796 /* if spec_type given we must drive attach and search minor nodes */
6797 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6798 /* attach the path so we can search minors */
6799 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6800 goto fail;
6801
6802 /* Add minorname to path. */
6803 ndi_devi_enter(dip, &circ);
6804 minorname = i_ddi_devtspectype_to_minorname(dip,
6805 devt, spec_type);
6806 if (minorname) {
6807 (void) strcat(path, ":");
6808 (void) strcat(path, minorname);
6809 }
6810 ndi_devi_exit(dip, circ);
6811 ddi_release_devi(dip);
6812 if (minorname == NULL)
6813 goto fail;
6814 }
6815 ASSERT(strlen(path) < MAXPATHLEN);
6816 return (DDI_SUCCESS);
6817
6818 fail: *path = 0;
6819 return (DDI_FAILURE);
6820 }
6821
6822 /*
6823 * Given a major number and an instance, return the path.
6824 * This interface does NOT drive attach.
6825 */
6826 int
6827 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6828 {
6829 struct devnames *dnp;
6830 dev_info_t *dip;
6831
6832 if ((major >= devcnt) || (instance == -1)) {
6833 *path = 0;
6834 return (DDI_FAILURE);
6835 }
6836
6837 /* look for the major/instance in the instance tree */
6838 if (e_ddi_instance_majorinstance_to_path(major, instance,
6839 path) == DDI_SUCCESS) {
6840 ASSERT(strlen(path) < MAXPATHLEN);
6841 return (DDI_SUCCESS);
6842 }
6843
6844 /*
6845 * Not in instance tree, find the instance on the per driver list and
6846 * construct path to instance via ddi_pathname(). This is how paths
6847 * down the 'pseudo' branch are constructed.
6848 */
6849 dnp = &(devnamesp[major]);
6850 LOCK_DEV_OPS(&(dnp->dn_lock));
6851 for (dip = dnp->dn_head; dip;
6852 dip = (dev_info_t *)DEVI(dip)->devi_next) {
6853 /* Skip if instance does not match. */
6854 if (DEVI(dip)->devi_instance != instance)
6855 continue;
6856
6857 /*
6858 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6859 * node demotion, so it is not an effective way of ensuring
6860 * that the ddi_pathname result has a unit-address. Instead,
6861 * we reverify the node state after calling ddi_pathname().
6862 */
6863 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6864 (void) ddi_pathname(dip, path);
6865 if (i_ddi_node_state(dip) < DS_INITIALIZED)
6866 continue;
6867 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6868 ASSERT(strlen(path) < MAXPATHLEN);
6869 return (DDI_SUCCESS);
6870 }
6871 }
6872 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6873
6874 /* can't reconstruct the path */
6875 *path = 0;
6876 return (DDI_FAILURE);
6877 }
6878
6879 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6880
6881 /*
6882 * Given the dip for a network interface return the ppa for that interface.
6883 *
6884 * In all cases except GLD v0 drivers, the ppa == instance.
6885 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6886 * So for these drivers when the attach routine calls gld_register(),
6887 * the GLD framework creates an integer property called "gld_driver_ppa"
6888 * that can be queried here.
6889 *
6890 * The only time this function is used is when a system is booting over nfs.
6891 * In this case the system has to resolve the pathname of the boot device
6892 * to it's ppa.
6893 */
6894 int
6895 i_ddi_devi_get_ppa(dev_info_t *dip)
6896 {
6897 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6898 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6899 GLD_DRIVER_PPA, ddi_get_instance(dip)));
6900 }
6901
6902 /*
6903 * i_ddi_devi_set_ppa() should only be called from gld_register()
6904 * and only for GLD v0 drivers
6905 */
6906 void
6907 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6908 {
6909 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6910 }
6911
6912
6913 /*
6914 * Private DDI Console bell functions.
6915 */
6916 void
6917 ddi_ring_console_bell(clock_t duration)
6918 {
6919 if (ddi_console_bell_func != NULL)
6920 (*ddi_console_bell_func)(duration);
6921 }
6922
6923 void
6924 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6925 {
6926 ddi_console_bell_func = bellfunc;
6927 }
6928
6929 int
6930 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6931 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6932 {
6933 int (*funcp)() = ddi_dma_allochdl;
6934 ddi_dma_attr_t dma_attr;
6935 struct bus_ops *bop;
6936
6937 if (attr == (ddi_dma_attr_t *)0)
6938 return (DDI_DMA_BADATTR);
6939
6940 dma_attr = *attr;
6941
6942 bop = DEVI(dip)->devi_ops->devo_bus_ops;
6943 if (bop && bop->bus_dma_allochdl)
6944 funcp = bop->bus_dma_allochdl;
6945
6946 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6947 }
6948
6949 void
6950 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6951 {
6952 ddi_dma_handle_t h = *handlep;
6953 (void) ddi_dma_freehdl(HD, HD, h);
6954 }
6955
6956 static uintptr_t dma_mem_list_id = 0;
6957
6958
6959 int
6960 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6961 ddi_device_acc_attr_t *accattrp, uint_t flags,
6962 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6963 size_t *real_length, ddi_acc_handle_t *handlep)
6964 {
6965 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6966 dev_info_t *dip = hp->dmai_rdip;
6967 ddi_acc_hdl_t *ap;
6968 ddi_dma_attr_t *attrp = &hp->dmai_attr;
6969 uint_t sleepflag, xfermodes;
6970 int (*fp)(caddr_t);
6971 int rval;
6972
6973 if (waitfp == DDI_DMA_SLEEP)
6974 fp = (int (*)())KM_SLEEP;
6975 else if (waitfp == DDI_DMA_DONTWAIT)
6976 fp = (int (*)())KM_NOSLEEP;
6977 else
6978 fp = waitfp;
6979 *handlep = impl_acc_hdl_alloc(fp, arg);
6980 if (*handlep == NULL)
6981 return (DDI_FAILURE);
6982
6983 /* check if the cache attributes are supported */
6984 if (i_ddi_check_cache_attr(flags) == B_FALSE)
6985 return (DDI_FAILURE);
6986
6987 /*
6988 * Transfer the meaningful bits to xfermodes.
6989 * Double-check if the 3rd party driver correctly sets the bits.
6990 * If not, set DDI_DMA_STREAMING to keep compatibility.
6991 */
6992 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6993 if (xfermodes == 0) {
6994 xfermodes = DDI_DMA_STREAMING;
6995 }
6996
6997 /*
6998 * initialize the common elements of data access handle
6999 */
7000 ap = impl_acc_hdl_get(*handlep);
7001 ap->ah_vers = VERS_ACCHDL;
7002 ap->ah_dip = dip;
7003 ap->ah_offset = 0;
7004 ap->ah_len = 0;
7005 ap->ah_xfermodes = flags;
7006 ap->ah_acc = *accattrp;
7007
7008 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7009 if (xfermodes == DDI_DMA_CONSISTENT) {
7010 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7011 flags, accattrp, kaddrp, NULL, ap);
7012 *real_length = length;
7013 } else {
7014 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7015 flags, accattrp, kaddrp, real_length, ap);
7016 }
7017 if (rval == DDI_SUCCESS) {
7018 ap->ah_len = (off_t)(*real_length);
7019 ap->ah_addr = *kaddrp;
7020 } else {
7021 impl_acc_hdl_free(*handlep);
7022 *handlep = (ddi_acc_handle_t)NULL;
7023 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7024 ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7025 }
7026 rval = DDI_FAILURE;
7027 }
7028 return (rval);
7029 }
7030
7031 void
7032 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7033 {
7034 ddi_acc_hdl_t *ap;
7035
7036 ap = impl_acc_hdl_get(*handlep);
7037 ASSERT(ap);
7038
7039 i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7040
7041 /*
7042 * free the handle
7043 */
7044 impl_acc_hdl_free(*handlep);
7045 *handlep = (ddi_acc_handle_t)NULL;
7046
7047 if (dma_mem_list_id != 0) {
7048 ddi_run_callback(&dma_mem_list_id);
7049 }
7050 }
7051
7052 int
7053 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7054 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7055 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7056 {
7057 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7058 dev_info_t *dip, *rdip;
7059 struct ddi_dma_req dmareq;
7060 int (*funcp)();
7061
7062 dmareq.dmar_flags = flags;
7063 dmareq.dmar_fp = waitfp;
7064 dmareq.dmar_arg = arg;
7065 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7066
7067 if (bp->b_flags & B_PAGEIO) {
7068 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7069 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7070 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7071 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7072 } else {
7073 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7074 if (bp->b_flags & B_SHADOW) {
7075 dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7076 bp->b_shadow;
7077 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7078 } else {
7079 dmareq.dmar_object.dmao_type =
7080 (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7081 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7082 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7083 }
7084
7085 /*
7086 * If the buffer has no proc pointer, or the proc
7087 * struct has the kernel address space, or the buffer has
7088 * been marked B_REMAPPED (meaning that it is now
7089 * mapped into the kernel's address space), then
7090 * the address space is kas (kernel address space).
7091 */
7092 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7093 (bp->b_flags & B_REMAPPED)) {
7094 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7095 } else {
7096 dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7097 bp->b_proc->p_as;
7098 }
7099 }
7100
7101 dip = rdip = hp->dmai_rdip;
7102 if (dip != ddi_root_node())
7103 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7104 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7105 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7106 }
7107
7108 int
7109 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7110 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7111 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7112 {
7113 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7114 dev_info_t *dip, *rdip;
7115 struct ddi_dma_req dmareq;
7116 int (*funcp)();
7117
7118 if (len == (uint_t)0) {
7119 return (DDI_DMA_NOMAPPING);
7120 }
7121 dmareq.dmar_flags = flags;
7122 dmareq.dmar_fp = waitfp;
7123 dmareq.dmar_arg = arg;
7124 dmareq.dmar_object.dmao_size = len;
7125 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7126 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7127 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7128 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7129
7130 dip = rdip = hp->dmai_rdip;
7131 if (dip != ddi_root_node())
7132 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7133 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7134 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7135 }
7136
7137 void
7138 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7139 {
7140 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7141 ddi_dma_cookie_t *cp;
7142
7143 cp = hp->dmai_cookie;
7144 ASSERT(cp);
7145
7146 cookiep->dmac_notused = cp->dmac_notused;
7147 cookiep->dmac_type = cp->dmac_type;
7148 cookiep->dmac_address = cp->dmac_address;
7149 cookiep->dmac_size = cp->dmac_size;
7150 hp->dmai_cookie++;
7151 }
7152
7153 int
7154 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7155 {
7156 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7157 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7158 return (DDI_FAILURE);
7159 } else {
7160 *nwinp = hp->dmai_nwin;
7161 return (DDI_SUCCESS);
7162 }
7163 }
7164
7165 int
7166 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7167 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7168 {
7169 int (*funcp)() = ddi_dma_win;
7170 struct bus_ops *bop;
7171
7172 bop = DEVI(HD)->devi_ops->devo_bus_ops;
7173 if (bop && bop->bus_dma_win)
7174 funcp = bop->bus_dma_win;
7175
7176 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7177 }
7178
7179 int
7180 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7181 {
7182 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7183 &burstsizes, 0, 0));
7184 }
7185
7186 int
7187 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7188 {
7189 return (hp->dmai_fault);
7190 }
7191
7192 int
7193 ddi_check_dma_handle(ddi_dma_handle_t handle)
7194 {
7195 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7196 int (*check)(ddi_dma_impl_t *);
7197
7198 if ((check = hp->dmai_fault_check) == NULL)
7199 check = i_ddi_dma_fault_check;
7200
7201 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7202 }
7203
7204 void
7205 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7206 {
7207 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7208 void (*notify)(ddi_dma_impl_t *);
7209
7210 if (!hp->dmai_fault) {
7211 hp->dmai_fault = 1;
7212 if ((notify = hp->dmai_fault_notify) != NULL)
7213 (*notify)(hp);
7214 }
7215 }
7216
7217 void
7218 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7219 {
7220 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7221 void (*notify)(ddi_dma_impl_t *);
7222
7223 if (hp->dmai_fault) {
7224 hp->dmai_fault = 0;
7225 if ((notify = hp->dmai_fault_notify) != NULL)
7226 (*notify)(hp);
7227 }
7228 }
7229
7230 /*
7231 * register mapping routines.
7232 */
7233 int
7234 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7235 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7236 ddi_acc_handle_t *handle)
7237 {
7238 ddi_map_req_t mr;
7239 ddi_acc_hdl_t *hp;
7240 int result;
7241
7242 /*
7243 * Allocate and initialize the common elements of data access handle.
7244 */
7245 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7246 hp = impl_acc_hdl_get(*handle);
7247 hp->ah_vers = VERS_ACCHDL;
7248 hp->ah_dip = dip;
7249 hp->ah_rnumber = rnumber;
7250 hp->ah_offset = offset;
7251 hp->ah_len = len;
7252 hp->ah_acc = *accattrp;
7253
7254 /*
7255 * Set up the mapping request and call to parent.
7256 */
7257 mr.map_op = DDI_MO_MAP_LOCKED;
7258 mr.map_type = DDI_MT_RNUMBER;
7259 mr.map_obj.rnumber = rnumber;
7260 mr.map_prot = PROT_READ | PROT_WRITE;
7261 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7262 mr.map_handlep = hp;
7263 mr.map_vers = DDI_MAP_VERSION;
7264 result = ddi_map(dip, &mr, offset, len, addrp);
7265
7266 /*
7267 * check for end result
7268 */
7269 if (result != DDI_SUCCESS) {
7270 impl_acc_hdl_free(*handle);
7271 *handle = (ddi_acc_handle_t)NULL;
7272 } else {
7273 hp->ah_addr = *addrp;
7274 }
7275
7276 return (result);
7277 }
7278
7279 void
7280 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7281 {
7282 ddi_map_req_t mr;
7283 ddi_acc_hdl_t *hp;
7284
7285 hp = impl_acc_hdl_get(*handlep);
7286 ASSERT(hp);
7287
7288 mr.map_op = DDI_MO_UNMAP;
7289 mr.map_type = DDI_MT_RNUMBER;
7290 mr.map_obj.rnumber = hp->ah_rnumber;
7291 mr.map_prot = PROT_READ | PROT_WRITE;
7292 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7293 mr.map_handlep = hp;
7294 mr.map_vers = DDI_MAP_VERSION;
7295
7296 /*
7297 * Call my parent to unmap my regs.
7298 */
7299 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7300 hp->ah_len, &hp->ah_addr);
7301 /*
7302 * free the handle
7303 */
7304 impl_acc_hdl_free(*handlep);
7305 *handlep = (ddi_acc_handle_t)NULL;
7306 }
7307
7308 int
7309 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7310 ssize_t dev_advcnt, uint_t dev_datasz)
7311 {
7312 uint8_t *b;
7313 uint16_t *w;
7314 uint32_t *l;
7315 uint64_t *ll;
7316
7317 /* check for total byte count is multiple of data transfer size */
7318 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7319 return (DDI_FAILURE);
7320
7321 switch (dev_datasz) {
7322 case DDI_DATA_SZ01_ACC:
7323 for (b = (uint8_t *)dev_addr;
7324 bytecount != 0; bytecount -= 1, b += dev_advcnt)
7325 ddi_put8(handle, b, 0);
7326 break;
7327 case DDI_DATA_SZ02_ACC:
7328 for (w = (uint16_t *)dev_addr;
7329 bytecount != 0; bytecount -= 2, w += dev_advcnt)
7330 ddi_put16(handle, w, 0);
7331 break;
7332 case DDI_DATA_SZ04_ACC:
7333 for (l = (uint32_t *)dev_addr;
7334 bytecount != 0; bytecount -= 4, l += dev_advcnt)
7335 ddi_put32(handle, l, 0);
7336 break;
7337 case DDI_DATA_SZ08_ACC:
7338 for (ll = (uint64_t *)dev_addr;
7339 bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7340 ddi_put64(handle, ll, 0x0ll);
7341 break;
7342 default:
7343 return (DDI_FAILURE);
7344 }
7345 return (DDI_SUCCESS);
7346 }
7347
7348 int
7349 ddi_device_copy(
7350 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7351 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7352 size_t bytecount, uint_t dev_datasz)
7353 {
7354 uint8_t *b_src, *b_dst;
7355 uint16_t *w_src, *w_dst;
7356 uint32_t *l_src, *l_dst;
7357 uint64_t *ll_src, *ll_dst;
7358
7359 /* check for total byte count is multiple of data transfer size */
7360 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7361 return (DDI_FAILURE);
7362
7363 switch (dev_datasz) {
7364 case DDI_DATA_SZ01_ACC:
7365 b_src = (uint8_t *)src_addr;
7366 b_dst = (uint8_t *)dest_addr;
7367
7368 for (; bytecount != 0; bytecount -= 1) {
7369 ddi_put8(dest_handle, b_dst,
7370 ddi_get8(src_handle, b_src));
7371 b_dst += dest_advcnt;
7372 b_src += src_advcnt;
7373 }
7374 break;
7375 case DDI_DATA_SZ02_ACC:
7376 w_src = (uint16_t *)src_addr;
7377 w_dst = (uint16_t *)dest_addr;
7378
7379 for (; bytecount != 0; bytecount -= 2) {
7380 ddi_put16(dest_handle, w_dst,
7381 ddi_get16(src_handle, w_src));
7382 w_dst += dest_advcnt;
7383 w_src += src_advcnt;
7384 }
7385 break;
7386 case DDI_DATA_SZ04_ACC:
7387 l_src = (uint32_t *)src_addr;
7388 l_dst = (uint32_t *)dest_addr;
7389
7390 for (; bytecount != 0; bytecount -= 4) {
7391 ddi_put32(dest_handle, l_dst,
7392 ddi_get32(src_handle, l_src));
7393 l_dst += dest_advcnt;
7394 l_src += src_advcnt;
7395 }
7396 break;
7397 case DDI_DATA_SZ08_ACC:
7398 ll_src = (uint64_t *)src_addr;
7399 ll_dst = (uint64_t *)dest_addr;
7400
7401 for (; bytecount != 0; bytecount -= 8) {
7402 ddi_put64(dest_handle, ll_dst,
7403 ddi_get64(src_handle, ll_src));
7404 ll_dst += dest_advcnt;
7405 ll_src += src_advcnt;
7406 }
7407 break;
7408 default:
7409 return (DDI_FAILURE);
7410 }
7411 return (DDI_SUCCESS);
7412 }
7413
7414 #define swap16(value) \
7415 ((((value) & 0xff) << 8) | ((value) >> 8))
7416
7417 #define swap32(value) \
7418 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7419 (uint32_t)swap16((uint16_t)((value) >> 16)))
7420
7421 #define swap64(value) \
7422 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7423 << 32) | \
7424 (uint64_t)swap32((uint32_t)((value) >> 32)))
7425
7426 uint16_t
7427 ddi_swap16(uint16_t value)
7428 {
7429 return (swap16(value));
7430 }
7431
7432 uint32_t
7433 ddi_swap32(uint32_t value)
7434 {
7435 return (swap32(value));
7436 }
7437
7438 uint64_t
7439 ddi_swap64(uint64_t value)
7440 {
7441 return (swap64(value));
7442 }
7443
7444 /*
7445 * Convert a binding name to a driver name.
7446 * A binding name is the name used to determine the driver for a
7447 * device - it may be either an alias for the driver or the name
7448 * of the driver itself.
7449 */
7450 char *
7451 i_binding_to_drv_name(char *bname)
7452 {
7453 major_t major_no;
7454
7455 ASSERT(bname != NULL);
7456
7457 if ((major_no = ddi_name_to_major(bname)) == -1)
7458 return (NULL);
7459 return (ddi_major_to_name(major_no));
7460 }
7461
7462 /*
7463 * Search for minor name that has specified dev_t and spec_type.
7464 * If spec_type is zero then any dev_t match works. Since we
7465 * are returning a pointer to the minor name string, we require the
7466 * caller to do the locking.
7467 */
7468 char *
7469 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7470 {
7471 struct ddi_minor_data *dmdp;
7472
7473 /*
7474 * The did layered driver currently intentionally returns a
7475 * devinfo ptr for an underlying sd instance based on a did
7476 * dev_t. In this case it is not an error.
7477 *
7478 * The did layered driver is associated with Sun Cluster.
7479 */
7480 ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7481 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7482
7483 ASSERT(DEVI_BUSY_OWNED(dip));
7484 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7485 if (((dmdp->type == DDM_MINOR) ||
7486 (dmdp->type == DDM_INTERNAL_PATH) ||
7487 (dmdp->type == DDM_DEFAULT)) &&
7488 (dmdp->ddm_dev == dev) &&
7489 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7490 (dmdp->ddm_spec_type == spec_type)))
7491 return (dmdp->ddm_name);
7492 }
7493
7494 return (NULL);
7495 }
7496
7497 /*
7498 * Find the devt and spectype of the specified minor_name.
7499 * Return DDI_FAILURE if minor_name not found. Since we are
7500 * returning everything via arguments we can do the locking.
7501 */
7502 int
7503 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7504 dev_t *devtp, int *spectypep)
7505 {
7506 int circ;
7507 struct ddi_minor_data *dmdp;
7508
7509 /* deal with clone minor nodes */
7510 if (dip == clone_dip) {
7511 major_t major;
7512 /*
7513 * Make sure minor_name is a STREAMS driver.
7514 * We load the driver but don't attach to any instances.
7515 */
7516
7517 major = ddi_name_to_major(minor_name);
7518 if (major == DDI_MAJOR_T_NONE)
7519 return (DDI_FAILURE);
7520
7521 if (ddi_hold_driver(major) == NULL)
7522 return (DDI_FAILURE);
7523
7524 if (STREAMSTAB(major) == NULL) {
7525 ddi_rele_driver(major);
7526 return (DDI_FAILURE);
7527 }
7528 ddi_rele_driver(major);
7529
7530 if (devtp)
7531 *devtp = makedevice(clone_major, (minor_t)major);
7532
7533 if (spectypep)
7534 *spectypep = S_IFCHR;
7535
7536 return (DDI_SUCCESS);
7537 }
7538
7539 ndi_devi_enter(dip, &circ);
7540 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7541 if (((dmdp->type != DDM_MINOR) &&
7542 (dmdp->type != DDM_INTERNAL_PATH) &&
7543 (dmdp->type != DDM_DEFAULT)) ||
7544 strcmp(minor_name, dmdp->ddm_name))
7545 continue;
7546
7547 if (devtp)
7548 *devtp = dmdp->ddm_dev;
7549
7550 if (spectypep)
7551 *spectypep = dmdp->ddm_spec_type;
7552
7553 ndi_devi_exit(dip, circ);
7554 return (DDI_SUCCESS);
7555 }
7556 ndi_devi_exit(dip, circ);
7557
7558 return (DDI_FAILURE);
7559 }
7560
7561 static kmutex_t devid_gen_mutex;
7562 static short devid_gen_number;
7563
7564 #ifdef DEBUG
7565
7566 static int devid_register_corrupt = 0;
7567 static int devid_register_corrupt_major = 0;
7568 static int devid_register_corrupt_hint = 0;
7569 static int devid_register_corrupt_hint_major = 0;
7570
7571 static int devid_lyr_debug = 0;
7572
7573 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7574 if (devid_lyr_debug) \
7575 ddi_debug_devid_devts(msg, ndevs, devs)
7576
7577 #else
7578
7579 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7580
7581 #endif /* DEBUG */
7582
7583
7584 #ifdef DEBUG
7585
7586 static void
7587 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7588 {
7589 int i;
7590
7591 cmn_err(CE_CONT, "%s:\n", msg);
7592 for (i = 0; i < ndevs; i++) {
7593 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7594 }
7595 }
7596
7597 static void
7598 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7599 {
7600 int i;
7601
7602 cmn_err(CE_CONT, "%s:\n", msg);
7603 for (i = 0; i < npaths; i++) {
7604 cmn_err(CE_CONT, " %s\n", paths[i]);
7605 }
7606 }
7607
7608 static void
7609 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7610 {
7611 int i;
7612
7613 cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7614 for (i = 0; i < ndevs; i++) {
7615 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7616 }
7617 }
7618
7619 #endif /* DEBUG */
7620
7621 /*
7622 * Register device id into DDI framework.
7623 * Must be called when the driver is bound.
7624 */
7625 static int
7626 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7627 {
7628 impl_devid_t *i_devid = (impl_devid_t *)devid;
7629 size_t driver_len;
7630 const char *driver_name;
7631 char *devid_str;
7632 major_t major;
7633
7634 if ((dip == NULL) ||
7635 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7636 return (DDI_FAILURE);
7637
7638 /* verify that the devid is valid */
7639 if (ddi_devid_valid(devid) != DDI_SUCCESS)
7640 return (DDI_FAILURE);
7641
7642 /* Updating driver name hint in devid */
7643 driver_name = ddi_driver_name(dip);
7644 driver_len = strlen(driver_name);
7645 if (driver_len > DEVID_HINT_SIZE) {
7646 /* Pick up last four characters of driver name */
7647 driver_name += driver_len - DEVID_HINT_SIZE;
7648 driver_len = DEVID_HINT_SIZE;
7649 }
7650 bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7651 bcopy(driver_name, i_devid->did_driver, driver_len);
7652
7653 #ifdef DEBUG
7654 /* Corrupt the devid for testing. */
7655 if (devid_register_corrupt)
7656 i_devid->did_id[0] += devid_register_corrupt;
7657 if (devid_register_corrupt_major &&
7658 (major == devid_register_corrupt_major))
7659 i_devid->did_id[0] += 1;
7660 if (devid_register_corrupt_hint)
7661 i_devid->did_driver[0] += devid_register_corrupt_hint;
7662 if (devid_register_corrupt_hint_major &&
7663 (major == devid_register_corrupt_hint_major))
7664 i_devid->did_driver[0] += 1;
7665 #endif /* DEBUG */
7666
7667 /* encode the devid as a string */
7668 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7669 return (DDI_FAILURE);
7670
7671 /* add string as a string property */
7672 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7673 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7674 cmn_err(CE_WARN, "%s%d: devid property update failed",
7675 ddi_driver_name(dip), ddi_get_instance(dip));
7676 ddi_devid_str_free(devid_str);
7677 return (DDI_FAILURE);
7678 }
7679
7680 /* keep pointer to devid string for interrupt context fma code */
7681 if (DEVI(dip)->devi_devid_str)
7682 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7683 DEVI(dip)->devi_devid_str = devid_str;
7684 return (DDI_SUCCESS);
7685 }
7686
7687 int
7688 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7689 {
7690 int rval;
7691
7692 rval = i_ddi_devid_register(dip, devid);
7693 if (rval == DDI_SUCCESS) {
7694 /*
7695 * Register devid in devid-to-path cache
7696 */
7697 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7698 mutex_enter(&DEVI(dip)->devi_lock);
7699 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7700 mutex_exit(&DEVI(dip)->devi_lock);
7701 } else if (ddi_get_name_addr(dip)) {
7702 /*
7703 * We only expect cache_register DDI_FAILURE when we
7704 * can't form the full path because of NULL devi_addr.
7705 */
7706 cmn_err(CE_WARN, "%s%d: failed to cache devid",
7707 ddi_driver_name(dip), ddi_get_instance(dip));
7708 }
7709 } else {
7710 cmn_err(CE_WARN, "%s%d: failed to register devid",
7711 ddi_driver_name(dip), ddi_get_instance(dip));
7712 }
7713 return (rval);
7714 }
7715
7716 /*
7717 * Remove (unregister) device id from DDI framework.
7718 * Must be called when device is detached.
7719 */
7720 static void
7721 i_ddi_devid_unregister(dev_info_t *dip)
7722 {
7723 if (DEVI(dip)->devi_devid_str) {
7724 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7725 DEVI(dip)->devi_devid_str = NULL;
7726 }
7727
7728 /* remove the devid property */
7729 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7730 }
7731
7732 void
7733 ddi_devid_unregister(dev_info_t *dip)
7734 {
7735 mutex_enter(&DEVI(dip)->devi_lock);
7736 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7737 mutex_exit(&DEVI(dip)->devi_lock);
7738 e_devid_cache_unregister(dip);
7739 i_ddi_devid_unregister(dip);
7740 }
7741
7742 /*
7743 * Allocate and initialize a device id.
7744 */
7745 int
7746 ddi_devid_init(
7747 dev_info_t *dip,
7748 ushort_t devid_type,
7749 ushort_t nbytes,
7750 void *id,
7751 ddi_devid_t *ret_devid)
7752 {
7753 impl_devid_t *i_devid;
7754 int sz = sizeof (*i_devid) + nbytes - sizeof (char);
7755 int driver_len;
7756 const char *driver_name;
7757
7758 switch (devid_type) {
7759 case DEVID_SCSI3_WWN:
7760 /*FALLTHRU*/
7761 case DEVID_SCSI_SERIAL:
7762 /*FALLTHRU*/
7763 case DEVID_ATA_SERIAL:
7764 /*FALLTHRU*/
7765 case DEVID_ENCAP:
7766 if (nbytes == 0)
7767 return (DDI_FAILURE);
7768 if (id == NULL)
7769 return (DDI_FAILURE);
7770 break;
7771 case DEVID_FAB:
7772 if (nbytes != 0)
7773 return (DDI_FAILURE);
7774 if (id != NULL)
7775 return (DDI_FAILURE);
7776 nbytes = sizeof (int) +
7777 sizeof (struct timeval32) + sizeof (short);
7778 sz += nbytes;
7779 break;
7780 default:
7781 return (DDI_FAILURE);
7782 }
7783
7784 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7785 return (DDI_FAILURE);
7786
7787 i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7788 i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7789 i_devid->did_rev_hi = DEVID_REV_MSB;
7790 i_devid->did_rev_lo = DEVID_REV_LSB;
7791 DEVID_FORMTYPE(i_devid, devid_type);
7792 DEVID_FORMLEN(i_devid, nbytes);
7793
7794 /* Fill in driver name hint */
7795 driver_name = ddi_driver_name(dip);
7796 driver_len = strlen(driver_name);
7797 if (driver_len > DEVID_HINT_SIZE) {
7798 /* Pick up last four characters of driver name */
7799 driver_name += driver_len - DEVID_HINT_SIZE;
7800 driver_len = DEVID_HINT_SIZE;
7801 }
7802
7803 bcopy(driver_name, i_devid->did_driver, driver_len);
7804
7805 /* Fill in id field */
7806 if (devid_type == DEVID_FAB) {
7807 char *cp;
7808 uint32_t hostid;
7809 struct timeval32 timestamp32;
7810 int i;
7811 int *ip;
7812 short gen;
7813
7814 /* increase the generation number */
7815 mutex_enter(&devid_gen_mutex);
7816 gen = devid_gen_number++;
7817 mutex_exit(&devid_gen_mutex);
7818
7819 cp = i_devid->did_id;
7820
7821 /* Fill in host id (big-endian byte ordering) */
7822 hostid = zone_get_hostid(NULL);
7823 *cp++ = hibyte(hiword(hostid));
7824 *cp++ = lobyte(hiword(hostid));
7825 *cp++ = hibyte(loword(hostid));
7826 *cp++ = lobyte(loword(hostid));
7827
7828 /*
7829 * Fill in timestamp (big-endian byte ordering)
7830 *
7831 * (Note that the format may have to be changed
7832 * before 2038 comes around, though it's arguably
7833 * unique enough as it is..)
7834 */
7835 uniqtime32(×tamp32);
7836 ip = (int *)×tamp32;
7837 for (i = 0;
7838 i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7839 int val;
7840 val = *ip;
7841 *cp++ = hibyte(hiword(val));
7842 *cp++ = lobyte(hiword(val));
7843 *cp++ = hibyte(loword(val));
7844 *cp++ = lobyte(loword(val));
7845 }
7846
7847 /* fill in the generation number */
7848 *cp++ = hibyte(gen);
7849 *cp++ = lobyte(gen);
7850 } else
7851 bcopy(id, i_devid->did_id, nbytes);
7852
7853 /* return device id */
7854 *ret_devid = (ddi_devid_t)i_devid;
7855 return (DDI_SUCCESS);
7856 }
7857
7858 int
7859 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7860 {
7861 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7862 }
7863
7864 int
7865 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7866 {
7867 char *devidstr;
7868
7869 ASSERT(dev != DDI_DEV_T_NONE);
7870
7871 /* look up the property, devt specific first */
7872 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7873 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7874 if ((dev == DDI_DEV_T_ANY) ||
7875 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7876 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7877 DDI_PROP_SUCCESS)) {
7878 return (DDI_FAILURE);
7879 }
7880 }
7881
7882 /* convert to binary form */
7883 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7884 ddi_prop_free(devidstr);
7885 return (DDI_FAILURE);
7886 }
7887 ddi_prop_free(devidstr);
7888 return (DDI_SUCCESS);
7889 }
7890
7891 /*
7892 * Return a copy of the device id for dev_t
7893 */
7894 int
7895 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7896 {
7897 dev_info_t *dip;
7898 int rval;
7899
7900 /* get the dip */
7901 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7902 return (DDI_FAILURE);
7903
7904 rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7905
7906 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7907 return (rval);
7908 }
7909
7910 /*
7911 * Return a copy of the minor name for dev_t and spec_type
7912 */
7913 int
7914 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7915 {
7916 char *buf;
7917 int circ;
7918 dev_info_t *dip;
7919 char *nm;
7920 int rval;
7921
7922 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7923 *minor_name = NULL;
7924 return (DDI_FAILURE);
7925 }
7926
7927 /* Find the minor name and copy into max size buf */
7928 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7929 ndi_devi_enter(dip, &circ);
7930 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7931 if (nm)
7932 (void) strcpy(buf, nm);
7933 ndi_devi_exit(dip, circ);
7934 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7935
7936 if (nm) {
7937 /* duplicate into min size buf for return result */
7938 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
7939 rval = DDI_SUCCESS;
7940 } else {
7941 *minor_name = NULL;
7942 rval = DDI_FAILURE;
7943 }
7944
7945 /* free max size buf and return */
7946 kmem_free(buf, MAXNAMELEN);
7947 return (rval);
7948 }
7949
7950 int
7951 ddi_lyr_devid_to_devlist(
7952 ddi_devid_t devid,
7953 char *minor_name,
7954 int *retndevs,
7955 dev_t **retdevs)
7956 {
7957 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7958
7959 if (e_devid_cache_to_devt_list(devid, minor_name,
7960 retndevs, retdevs) == DDI_SUCCESS) {
7961 ASSERT(*retndevs > 0);
7962 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7963 *retndevs, *retdevs);
7964 return (DDI_SUCCESS);
7965 }
7966
7967 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7968 return (DDI_FAILURE);
7969 }
7970
7971 if (e_devid_cache_to_devt_list(devid, minor_name,
7972 retndevs, retdevs) == DDI_SUCCESS) {
7973 ASSERT(*retndevs > 0);
7974 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7975 *retndevs, *retdevs);
7976 return (DDI_SUCCESS);
7977 }
7978
7979 return (DDI_FAILURE);
7980 }
7981
7982 void
7983 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7984 {
7985 kmem_free(devlist, sizeof (dev_t) * ndevs);
7986 }
7987
7988 /*
7989 * Note: This will need to be fixed if we ever allow processes to
7990 * have more than one data model per exec.
7991 */
7992 model_t
7993 ddi_mmap_get_model(void)
7994 {
7995 return (get_udatamodel());
7996 }
7997
7998 model_t
7999 ddi_model_convert_from(model_t model)
8000 {
8001 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8002 }
8003
8004 /*
8005 * ddi interfaces managing storage and retrieval of eventcookies.
8006 */
8007
8008 /*
8009 * Invoke bus nexus driver's implementation of the
8010 * (*bus_remove_eventcall)() interface to remove a registered
8011 * callback handler for "event".
8012 */
8013 int
8014 ddi_remove_event_handler(ddi_callback_id_t id)
8015 {
8016 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8017 dev_info_t *ddip;
8018
8019 ASSERT(cb);
8020 if (!cb) {
8021 return (DDI_FAILURE);
8022 }
8023
8024 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8025 return (ndi_busop_remove_eventcall(ddip, id));
8026 }
8027
8028 /*
8029 * Invoke bus nexus driver's implementation of the
8030 * (*bus_add_eventcall)() interface to register a callback handler
8031 * for "event".
8032 */
8033 int
8034 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8035 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8036 void *arg, ddi_callback_id_t *id)
8037 {
8038 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8039 }
8040
8041
8042 /*
8043 * Return a handle for event "name" by calling up the device tree
8044 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8045 * by a bus nexus or top of dev_info tree is reached.
8046 */
8047 int
8048 ddi_get_eventcookie(dev_info_t *dip, char *name,
8049 ddi_eventcookie_t *event_cookiep)
8050 {
8051 return (ndi_busop_get_eventcookie(dip, dip,
8052 name, event_cookiep));
8053 }
8054
8055 /*
8056 * This procedure is provided as the general callback function when
8057 * umem_lockmemory calls as_add_callback for long term memory locking.
8058 * When as_unmap, as_setprot, or as_free encounter segments which have
8059 * locked memory, this callback will be invoked.
8060 */
8061 void
8062 umem_lock_undo(struct as *as, void *arg, uint_t event)
8063 {
8064 _NOTE(ARGUNUSED(as, event))
8065 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8066
8067 /*
8068 * Call the cleanup function. Decrement the cookie reference
8069 * count, if it goes to zero, return the memory for the cookie.
8070 * The i_ddi_umem_unlock for this cookie may or may not have been
8071 * called already. It is the responsibility of the caller of
8072 * umem_lockmemory to handle the case of the cleanup routine
8073 * being called after a ddi_umem_unlock for the cookie
8074 * was called.
8075 */
8076
8077 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8078
8079 /* remove the cookie if reference goes to zero */
8080 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8081 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8082 }
8083 }
8084
8085 /*
8086 * The following two Consolidation Private routines provide generic
8087 * interfaces to increase/decrease the amount of device-locked memory.
8088 *
8089 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8090 * must be called every time i_ddi_incr_locked_memory() is called.
8091 */
8092 int
8093 /* ARGSUSED */
8094 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8095 {
8096 ASSERT(procp != NULL);
8097 mutex_enter(&procp->p_lock);
8098 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8099 mutex_exit(&procp->p_lock);
8100 return (ENOMEM);
8101 }
8102 mutex_exit(&procp->p_lock);
8103 return (0);
8104 }
8105
8106 /*
8107 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8108 * must be called every time i_ddi_decr_locked_memory() is called.
8109 */
8110 /* ARGSUSED */
8111 void
8112 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8113 {
8114 ASSERT(procp != NULL);
8115 mutex_enter(&procp->p_lock);
8116 rctl_decr_locked_mem(procp, NULL, dec, 1);
8117 mutex_exit(&procp->p_lock);
8118 }
8119
8120 /*
8121 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8122 * charge device locked memory to the max-locked-memory rctl. Tracking
8123 * device locked memory causes the rctl locks to get hot under high-speed
8124 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8125 * we bypass charging the locked memory to the rctl altogether. The cookie's
8126 * flag tells us if the rctl value should be updated when unlocking the memory,
8127 * in case the rctl gets changed after the memory was locked. Any device
8128 * locked memory in that rare case will not be counted toward the rctl limit.
8129 *
8130 * When tracking the locked memory, the kproject_t parameter is always NULL
8131 * in the code paths:
8132 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8133 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8134 * Thus, we always use the tk_proj member to check the projp setting.
8135 */
8136 static void
8137 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8138 {
8139 proc_t *p;
8140 kproject_t *projp;
8141 zone_t *zonep;
8142
8143 ASSERT(cookie);
8144 p = cookie->procp;
8145 ASSERT(p);
8146
8147 zonep = p->p_zone;
8148 projp = p->p_task->tk_proj;
8149
8150 ASSERT(zonep);
8151 ASSERT(projp);
8152
8153 if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8154 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8155 cookie->upd_max_lock_rctl = 0;
8156 else
8157 cookie->upd_max_lock_rctl = 1;
8158 }
8159
8160 /*
8161 * This routine checks if the max-locked-memory resource ctl is
8162 * exceeded, if not increments it, grabs a hold on the project.
8163 * Returns 0 if successful otherwise returns error code
8164 */
8165 static int
8166 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8167 {
8168 proc_t *procp;
8169 int ret;
8170
8171 ASSERT(cookie);
8172 if (cookie->upd_max_lock_rctl == 0)
8173 return (0);
8174
8175 procp = cookie->procp;
8176 ASSERT(procp);
8177
8178 if ((ret = i_ddi_incr_locked_memory(procp,
8179 cookie->size)) != 0) {
8180 return (ret);
8181 }
8182 return (0);
8183 }
8184
8185 /*
8186 * Decrements the max-locked-memory resource ctl and releases
8187 * the hold on the project that was acquired during umem_incr_devlockmem
8188 */
8189 static void
8190 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8191 {
8192 proc_t *proc;
8193
8194 if (cookie->upd_max_lock_rctl == 0)
8195 return;
8196
8197 proc = (proc_t *)cookie->procp;
8198 if (!proc)
8199 return;
8200
8201 i_ddi_decr_locked_memory(proc, cookie->size);
8202 }
8203
8204 /*
8205 * A consolidation private function which is essentially equivalent to
8206 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8207 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8208 * the ops_vector is valid.
8209 *
8210 * Lock the virtual address range in the current process and create a
8211 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8212 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8213 * to user space.
8214 *
8215 * Note: The resource control accounting currently uses a full charge model
8216 * in other words attempts to lock the same/overlapping areas of memory
8217 * will deduct the full size of the buffer from the projects running
8218 * counter for the device locked memory.
8219 *
8220 * addr, size should be PAGESIZE aligned
8221 *
8222 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8223 * identifies whether the locked memory will be read or written or both
8224 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8225 * be maintained for an indefinitely long period (essentially permanent),
8226 * rather than for what would be required for a typical I/O completion.
8227 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8228 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8229 * This is to prevent a deadlock if a file truncation is attempted after
8230 * after the locking is done.
8231 *
8232 * Returns 0 on success
8233 * EINVAL - for invalid parameters
8234 * EPERM, ENOMEM and other error codes returned by as_pagelock
8235 * ENOMEM - is returned if the current request to lock memory exceeds
8236 * *.max-locked-memory resource control value.
8237 * EFAULT - memory pertains to a regular file mapped shared and
8238 * and DDI_UMEMLOCK_LONGTERM flag is set
8239 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8240 */
8241 int
8242 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8243 struct umem_callback_ops *ops_vector,
8244 proc_t *procp)
8245 {
8246 int error;
8247 struct ddi_umem_cookie *p;
8248 void (*driver_callback)() = NULL;
8249 struct as *as;
8250 struct seg *seg;
8251 vnode_t *vp;
8252
8253 /* Allow device drivers to not have to reference "curproc" */
8254 if (procp == NULL)
8255 procp = curproc;
8256 as = procp->p_as;
8257 *cookie = NULL; /* in case of any error return */
8258
8259 /* These are the only three valid flags */
8260 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8261 DDI_UMEMLOCK_LONGTERM)) != 0)
8262 return (EINVAL);
8263
8264 /* At least one (can be both) of the two access flags must be set */
8265 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8266 return (EINVAL);
8267
8268 /* addr and len must be page-aligned */
8269 if (((uintptr_t)addr & PAGEOFFSET) != 0)
8270 return (EINVAL);
8271
8272 if ((len & PAGEOFFSET) != 0)
8273 return (EINVAL);
8274
8275 /*
8276 * For longterm locking a driver callback must be specified; if
8277 * not longterm then a callback is optional.
8278 */
8279 if (ops_vector != NULL) {
8280 if (ops_vector->cbo_umem_callback_version !=
8281 UMEM_CALLBACK_VERSION)
8282 return (EINVAL);
8283 else
8284 driver_callback = ops_vector->cbo_umem_lock_cleanup;
8285 }
8286 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8287 return (EINVAL);
8288
8289 /*
8290 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8291 * be called on first ddi_umem_lock or umem_lockmemory call.
8292 */
8293 if (ddi_umem_unlock_thread == NULL)
8294 i_ddi_umem_unlock_thread_start();
8295
8296 /* Allocate memory for the cookie */
8297 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8298
8299 /* Convert the flags to seg_rw type */
8300 if (flags & DDI_UMEMLOCK_WRITE) {
8301 p->s_flags = S_WRITE;
8302 } else {
8303 p->s_flags = S_READ;
8304 }
8305
8306 /* Store procp in cookie for later iosetup/unlock */
8307 p->procp = (void *)procp;
8308
8309 /*
8310 * Store the struct as pointer in cookie for later use by
8311 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8312 * is called after relvm is called.
8313 */
8314 p->asp = as;
8315
8316 /*
8317 * The size field is needed for lockmem accounting.
8318 */
8319 p->size = len;
8320 init_lockedmem_rctl_flag(p);
8321
8322 if (umem_incr_devlockmem(p) != 0) {
8323 /*
8324 * The requested memory cannot be locked
8325 */
8326 kmem_free(p, sizeof (struct ddi_umem_cookie));
8327 *cookie = (ddi_umem_cookie_t)NULL;
8328 return (ENOMEM);
8329 }
8330
8331 /* Lock the pages corresponding to addr, len in memory */
8332 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8333 if (error != 0) {
8334 umem_decr_devlockmem(p);
8335 kmem_free(p, sizeof (struct ddi_umem_cookie));
8336 *cookie = (ddi_umem_cookie_t)NULL;
8337 return (error);
8338 }
8339
8340 /*
8341 * For longterm locking the addr must pertain to a seg_vn segment or
8342 * or a seg_spt segment.
8343 * If the segment pertains to a regular file, it cannot be
8344 * mapped MAP_SHARED.
8345 * This is to prevent a deadlock if a file truncation is attempted
8346 * after the locking is done.
8347 * Doing this after as_pagelock guarantees persistence of the as; if
8348 * an unacceptable segment is found, the cleanup includes calling
8349 * as_pageunlock before returning EFAULT.
8350 *
8351 * segdev is allowed here as it is already locked. This allows
8352 * for memory exported by drivers through mmap() (which is already
8353 * locked) to be allowed for LONGTERM.
8354 */
8355 if (flags & DDI_UMEMLOCK_LONGTERM) {
8356 extern struct seg_ops segspt_shmops;
8357 extern struct seg_ops segdev_ops;
8358 AS_LOCK_ENTER(as, RW_READER);
8359 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8360 if (seg == NULL || seg->s_base > addr + len)
8361 break;
8362 if (seg->s_ops == &segdev_ops)
8363 continue;
8364 if (((seg->s_ops != &segvn_ops) &&
8365 (seg->s_ops != &segspt_shmops)) ||
8366 ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8367 vp != NULL && vp->v_type == VREG) &&
8368 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8369 as_pageunlock(as, p->pparray,
8370 addr, len, p->s_flags);
8371 AS_LOCK_EXIT(as);
8372 umem_decr_devlockmem(p);
8373 kmem_free(p, sizeof (struct ddi_umem_cookie));
8374 *cookie = (ddi_umem_cookie_t)NULL;
8375 return (EFAULT);
8376 }
8377 }
8378 AS_LOCK_EXIT(as);
8379 }
8380
8381
8382 /* Initialize the fields in the ddi_umem_cookie */
8383 p->cvaddr = addr;
8384 p->type = UMEM_LOCKED;
8385 if (driver_callback != NULL) {
8386 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8387 p->cook_refcnt = 2;
8388 p->callbacks = *ops_vector;
8389 } else {
8390 /* only i_ddi_umme_unlock needs the cookie */
8391 p->cook_refcnt = 1;
8392 }
8393
8394 *cookie = (ddi_umem_cookie_t)p;
8395
8396 /*
8397 * If a driver callback was specified, add an entry to the
8398 * as struct callback list. The as_pagelock above guarantees
8399 * the persistence of as.
8400 */
8401 if (driver_callback) {
8402 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8403 addr, len, KM_SLEEP);
8404 if (error != 0) {
8405 as_pageunlock(as, p->pparray,
8406 addr, len, p->s_flags);
8407 umem_decr_devlockmem(p);
8408 kmem_free(p, sizeof (struct ddi_umem_cookie));
8409 *cookie = (ddi_umem_cookie_t)NULL;
8410 }
8411 }
8412 return (error);
8413 }
8414
8415 /*
8416 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8417 * the cookie. Called from i_ddi_umem_unlock_thread.
8418 */
8419
8420 static void
8421 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8422 {
8423 uint_t rc;
8424
8425 /*
8426 * There is no way to determine whether a callback to
8427 * umem_lock_undo was registered via as_add_callback.
8428 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8429 * a valid callback function structure.) as_delete_callback
8430 * is called to delete a possible registered callback. If the
8431 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8432 * indicates that there was a callback registered, and that is was
8433 * successfully deleted. Thus, the cookie reference count
8434 * will never be decremented by umem_lock_undo. Just return the
8435 * memory for the cookie, since both users of the cookie are done.
8436 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8437 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8438 * indicates that callback processing is taking place and, and
8439 * umem_lock_undo is, or will be, executing, and thus decrementing
8440 * the cookie reference count when it is complete.
8441 *
8442 * This needs to be done before as_pageunlock so that the
8443 * persistence of as is guaranteed because of the locked pages.
8444 *
8445 */
8446 rc = as_delete_callback(p->asp, p);
8447
8448
8449 /*
8450 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8451 * after relvm is called so use p->asp.
8452 */
8453 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8454
8455 /*
8456 * Now that we have unlocked the memory decrement the
8457 * *.max-locked-memory rctl
8458 */
8459 umem_decr_devlockmem(p);
8460
8461 if (rc == AS_CALLBACK_DELETED) {
8462 /* umem_lock_undo will not happen, return the cookie memory */
8463 ASSERT(p->cook_refcnt == 2);
8464 kmem_free(p, sizeof (struct ddi_umem_cookie));
8465 } else {
8466 /*
8467 * umem_undo_lock may happen if as_delete_callback returned
8468 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8469 * reference count, atomically, and return the cookie
8470 * memory if the reference count goes to zero. The only
8471 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8472 * case, just return the cookie memory.
8473 */
8474 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8475 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8476 == 0)) {
8477 kmem_free(p, sizeof (struct ddi_umem_cookie));
8478 }
8479 }
8480 }
8481
8482 /*
8483 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8484 *
8485 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8486 * until it is empty. Then, wait for more to be added. This thread is awoken
8487 * via calls to ddi_umem_unlock.
8488 */
8489
8490 static void
8491 i_ddi_umem_unlock_thread(void)
8492 {
8493 struct ddi_umem_cookie *ret_cookie;
8494 callb_cpr_t cprinfo;
8495
8496 /* process the ddi_umem_unlock list */
8497 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8498 callb_generic_cpr, "unlock_thread");
8499 for (;;) {
8500 mutex_enter(&ddi_umem_unlock_mutex);
8501 if (ddi_umem_unlock_head != NULL) { /* list not empty */
8502 ret_cookie = ddi_umem_unlock_head;
8503 /* take if off the list */
8504 if ((ddi_umem_unlock_head =
8505 ddi_umem_unlock_head->unl_forw) == NULL) {
8506 ddi_umem_unlock_tail = NULL;
8507 }
8508 mutex_exit(&ddi_umem_unlock_mutex);
8509 /* unlock the pages in this cookie */
8510 (void) i_ddi_umem_unlock(ret_cookie);
8511 } else { /* list is empty, wait for next ddi_umem_unlock */
8512 CALLB_CPR_SAFE_BEGIN(&cprinfo);
8513 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8514 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8515 mutex_exit(&ddi_umem_unlock_mutex);
8516 }
8517 }
8518 /* ddi_umem_unlock_thread does not exit */
8519 /* NOTREACHED */
8520 }
8521
8522 /*
8523 * Start the thread that will process the ddi_umem_unlock list if it is
8524 * not already started (i_ddi_umem_unlock_thread).
8525 */
8526 static void
8527 i_ddi_umem_unlock_thread_start(void)
8528 {
8529 mutex_enter(&ddi_umem_unlock_mutex);
8530 if (ddi_umem_unlock_thread == NULL) {
8531 ddi_umem_unlock_thread = thread_create(NULL, 0,
8532 i_ddi_umem_unlock_thread, NULL, 0, &p0,
8533 TS_RUN, minclsyspri);
8534 }
8535 mutex_exit(&ddi_umem_unlock_mutex);
8536 }
8537
8538 /*
8539 * Lock the virtual address range in the current process and create a
8540 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8541 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8542 * to user space.
8543 *
8544 * Note: The resource control accounting currently uses a full charge model
8545 * in other words attempts to lock the same/overlapping areas of memory
8546 * will deduct the full size of the buffer from the projects running
8547 * counter for the device locked memory. This applies to umem_lockmemory too.
8548 *
8549 * addr, size should be PAGESIZE aligned
8550 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8551 * identifies whether the locked memory will be read or written or both
8552 *
8553 * Returns 0 on success
8554 * EINVAL - for invalid parameters
8555 * EPERM, ENOMEM and other error codes returned by as_pagelock
8556 * ENOMEM - is returned if the current request to lock memory exceeds
8557 * *.max-locked-memory resource control value.
8558 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8559 */
8560 int
8561 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8562 {
8563 int error;
8564 struct ddi_umem_cookie *p;
8565
8566 *cookie = NULL; /* in case of any error return */
8567
8568 /* These are the only two valid flags */
8569 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8570 return (EINVAL);
8571 }
8572
8573 /* At least one of the two flags (or both) must be set */
8574 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8575 return (EINVAL);
8576 }
8577
8578 /* addr and len must be page-aligned */
8579 if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8580 return (EINVAL);
8581 }
8582
8583 if ((len & PAGEOFFSET) != 0) {
8584 return (EINVAL);
8585 }
8586
8587 /*
8588 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8589 * be called on first ddi_umem_lock or umem_lockmemory call.
8590 */
8591 if (ddi_umem_unlock_thread == NULL)
8592 i_ddi_umem_unlock_thread_start();
8593
8594 /* Allocate memory for the cookie */
8595 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8596
8597 /* Convert the flags to seg_rw type */
8598 if (flags & DDI_UMEMLOCK_WRITE) {
8599 p->s_flags = S_WRITE;
8600 } else {
8601 p->s_flags = S_READ;
8602 }
8603
8604 /* Store curproc in cookie for later iosetup/unlock */
8605 p->procp = (void *)curproc;
8606
8607 /*
8608 * Store the struct as pointer in cookie for later use by
8609 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8610 * is called after relvm is called.
8611 */
8612 p->asp = curproc->p_as;
8613 /*
8614 * The size field is needed for lockmem accounting.
8615 */
8616 p->size = len;
8617 init_lockedmem_rctl_flag(p);
8618
8619 if (umem_incr_devlockmem(p) != 0) {
8620 /*
8621 * The requested memory cannot be locked
8622 */
8623 kmem_free(p, sizeof (struct ddi_umem_cookie));
8624 *cookie = (ddi_umem_cookie_t)NULL;
8625 return (ENOMEM);
8626 }
8627
8628 /* Lock the pages corresponding to addr, len in memory */
8629 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8630 addr, len, p->s_flags);
8631 if (error != 0) {
8632 umem_decr_devlockmem(p);
8633 kmem_free(p, sizeof (struct ddi_umem_cookie));
8634 *cookie = (ddi_umem_cookie_t)NULL;
8635 return (error);
8636 }
8637
8638 /* Initialize the fields in the ddi_umem_cookie */
8639 p->cvaddr = addr;
8640 p->type = UMEM_LOCKED;
8641 p->cook_refcnt = 1;
8642
8643 *cookie = (ddi_umem_cookie_t)p;
8644 return (error);
8645 }
8646
8647 /*
8648 * Add the cookie to the ddi_umem_unlock list. Pages will be
8649 * unlocked by i_ddi_umem_unlock_thread.
8650 */
8651
8652 void
8653 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8654 {
8655 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8656
8657 ASSERT(p->type == UMEM_LOCKED);
8658 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8659 ASSERT(ddi_umem_unlock_thread != NULL);
8660
8661 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */
8662 /*
8663 * Queue the unlock request and notify i_ddi_umem_unlock thread
8664 * if it's called in the interrupt context. Otherwise, unlock pages
8665 * immediately.
8666 */
8667 if (servicing_interrupt()) {
8668 /* queue the unlock request and notify the thread */
8669 mutex_enter(&ddi_umem_unlock_mutex);
8670 if (ddi_umem_unlock_head == NULL) {
8671 ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8672 cv_broadcast(&ddi_umem_unlock_cv);
8673 } else {
8674 ddi_umem_unlock_tail->unl_forw = p;
8675 ddi_umem_unlock_tail = p;
8676 }
8677 mutex_exit(&ddi_umem_unlock_mutex);
8678 } else {
8679 /* unlock the pages right away */
8680 (void) i_ddi_umem_unlock(p);
8681 }
8682 }
8683
8684 /*
8685 * Create a buf structure from a ddi_umem_cookie
8686 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8687 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8688 * off, len - identifies the portion of the memory represented by the cookie
8689 * that the buf points to.
8690 * NOTE: off, len need to follow the alignment/size restrictions of the
8691 * device (dev) that this buf will be passed to. Some devices
8692 * will accept unrestricted alignment/size, whereas others (such as
8693 * st) require some block-size alignment/size. It is the caller's
8694 * responsibility to ensure that the alignment/size restrictions
8695 * are met (we cannot assert as we do not know the restrictions)
8696 *
8697 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8698 * the flags used in ddi_umem_lock
8699 *
8700 * The following three arguments are used to initialize fields in the
8701 * buf structure and are uninterpreted by this routine.
8702 *
8703 * dev
8704 * blkno
8705 * iodone
8706 *
8707 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8708 *
8709 * Returns a buf structure pointer on success (to be freed by freerbuf)
8710 * NULL on any parameter error or memory alloc failure
8711 *
8712 */
8713 struct buf *
8714 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8715 int direction, dev_t dev, daddr_t blkno,
8716 int (*iodone)(struct buf *), int sleepflag)
8717 {
8718 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8719 struct buf *bp;
8720
8721 /*
8722 * check for valid cookie offset, len
8723 */
8724 if ((off + len) > p->size) {
8725 return (NULL);
8726 }
8727
8728 if (len > p->size) {
8729 return (NULL);
8730 }
8731
8732 /* direction has to be one of B_READ or B_WRITE */
8733 if ((direction != B_READ) && (direction != B_WRITE)) {
8734 return (NULL);
8735 }
8736
8737 /* These are the only two valid sleepflags */
8738 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8739 return (NULL);
8740 }
8741
8742 /*
8743 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8744 */
8745 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8746 return (NULL);
8747 }
8748
8749 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8750 ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8751 (p->procp == NULL) : (p->procp != NULL));
8752
8753 bp = kmem_alloc(sizeof (struct buf), sleepflag);
8754 if (bp == NULL) {
8755 return (NULL);
8756 }
8757 bioinit(bp);
8758
8759 bp->b_flags = B_BUSY | B_PHYS | direction;
8760 bp->b_edev = dev;
8761 bp->b_lblkno = blkno;
8762 bp->b_iodone = iodone;
8763 bp->b_bcount = len;
8764 bp->b_proc = (proc_t *)p->procp;
8765 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8766 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8767 if (p->pparray != NULL) {
8768 bp->b_flags |= B_SHADOW;
8769 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8770 bp->b_shadow = p->pparray + btop(off);
8771 }
8772 return (bp);
8773 }
8774
8775 /*
8776 * Fault-handling and related routines
8777 */
8778
8779 ddi_devstate_t
8780 ddi_get_devstate(dev_info_t *dip)
8781 {
8782 if (DEVI_IS_DEVICE_OFFLINE(dip))
8783 return (DDI_DEVSTATE_OFFLINE);
8784 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8785 return (DDI_DEVSTATE_DOWN);
8786 else if (DEVI_IS_BUS_QUIESCED(dip))
8787 return (DDI_DEVSTATE_QUIESCED);
8788 else if (DEVI_IS_DEVICE_DEGRADED(dip))
8789 return (DDI_DEVSTATE_DEGRADED);
8790 else
8791 return (DDI_DEVSTATE_UP);
8792 }
8793
8794 void
8795 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8796 ddi_fault_location_t location, const char *message)
8797 {
8798 struct ddi_fault_event_data fd;
8799 ddi_eventcookie_t ec;
8800
8801 /*
8802 * Assemble all the information into a fault-event-data structure
8803 */
8804 fd.f_dip = dip;
8805 fd.f_impact = impact;
8806 fd.f_location = location;
8807 fd.f_message = message;
8808 fd.f_oldstate = ddi_get_devstate(dip);
8809
8810 /*
8811 * Get eventcookie from defining parent.
8812 */
8813 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8814 DDI_SUCCESS)
8815 return;
8816
8817 (void) ndi_post_event(dip, dip, ec, &fd);
8818 }
8819
8820 char *
8821 i_ddi_devi_class(dev_info_t *dip)
8822 {
8823 return (DEVI(dip)->devi_device_class);
8824 }
8825
8826 int
8827 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8828 {
8829 struct dev_info *devi = DEVI(dip);
8830
8831 mutex_enter(&devi->devi_lock);
8832
8833 if (devi->devi_device_class)
8834 kmem_free(devi->devi_device_class,
8835 strlen(devi->devi_device_class) + 1);
8836
8837 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8838 != NULL) {
8839 mutex_exit(&devi->devi_lock);
8840 return (DDI_SUCCESS);
8841 }
8842
8843 mutex_exit(&devi->devi_lock);
8844
8845 return (DDI_FAILURE);
8846 }
8847
8848
8849 /*
8850 * Task Queues DDI interfaces.
8851 */
8852
8853 /* ARGSUSED */
8854 ddi_taskq_t *
8855 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8856 pri_t pri, uint_t cflags)
8857 {
8858 char full_name[TASKQ_NAMELEN];
8859 const char *tq_name;
8860 int nodeid = 0;
8861
8862 if (dip == NULL)
8863 tq_name = name;
8864 else {
8865 nodeid = ddi_get_instance(dip);
8866
8867 if (name == NULL)
8868 name = "tq";
8869
8870 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
8871 ddi_driver_name(dip), name);
8872
8873 tq_name = full_name;
8874 }
8875
8876 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8877 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8878 nthreads, INT_MAX, TASKQ_PREPOPULATE));
8879 }
8880
8881 void
8882 ddi_taskq_destroy(ddi_taskq_t *tq)
8883 {
8884 taskq_destroy((taskq_t *)tq);
8885 }
8886
8887 int
8888 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8889 void *arg, uint_t dflags)
8890 {
8891 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8892 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8893
8894 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8895 }
8896
8897 void
8898 ddi_taskq_wait(ddi_taskq_t *tq)
8899 {
8900 taskq_wait((taskq_t *)tq);
8901 }
8902
8903 void
8904 ddi_taskq_suspend(ddi_taskq_t *tq)
8905 {
8906 taskq_suspend((taskq_t *)tq);
8907 }
8908
8909 boolean_t
8910 ddi_taskq_suspended(ddi_taskq_t *tq)
8911 {
8912 return (taskq_suspended((taskq_t *)tq));
8913 }
8914
8915 void
8916 ddi_taskq_resume(ddi_taskq_t *tq)
8917 {
8918 taskq_resume((taskq_t *)tq);
8919 }
8920
8921 int
8922 ddi_parse(
8923 const char *ifname,
8924 char *alnum,
8925 uint_t *nump)
8926 {
8927 const char *p;
8928 int l;
8929 ulong_t num;
8930 boolean_t nonum = B_TRUE;
8931 char c;
8932
8933 l = strlen(ifname);
8934 for (p = ifname + l; p != ifname; l--) {
8935 c = *--p;
8936 if (!isdigit(c)) {
8937 (void) strlcpy(alnum, ifname, l + 1);
8938 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8939 return (DDI_FAILURE);
8940 break;
8941 }
8942 nonum = B_FALSE;
8943 }
8944 if (l == 0 || nonum)
8945 return (DDI_FAILURE);
8946
8947 *nump = num;
8948 return (DDI_SUCCESS);
8949 }
8950
8951 /*
8952 * Default initialization function for drivers that don't need to quiesce.
8953 */
8954 /* ARGSUSED */
8955 int
8956 ddi_quiesce_not_needed(dev_info_t *dip)
8957 {
8958 return (DDI_SUCCESS);
8959 }
8960
8961 /*
8962 * Initialization function for drivers that should implement quiesce()
8963 * but haven't yet.
8964 */
8965 /* ARGSUSED */
8966 int
8967 ddi_quiesce_not_supported(dev_info_t *dip)
8968 {
8969 return (DDI_FAILURE);
8970 }
8971
8972 char *
8973 ddi_strdup(const char *str, int flag)
8974 {
8975 int n;
8976 char *ptr;
8977
8978 ASSERT(str != NULL);
8979 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
8980
8981 n = strlen(str);
8982 if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
8983 return (NULL);
8984 bcopy(str, ptr, n + 1);
8985 return (ptr);
8986 }
8987
8988 char *
8989 strdup(const char *str)
8990 {
8991 return (ddi_strdup(str, KM_SLEEP));
8992 }
8993
8994 void
8995 strfree(char *str)
8996 {
8997 ASSERT(str != NULL);
8998 kmem_free(str, strlen(str) + 1);
8999 }
9000
9001 /*
9002 * Generic DDI callback interfaces.
9003 */
9004
9005 int
9006 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9007 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9008 {
9009 ddi_cb_t *cbp;
9010
9011 ASSERT(dip != NULL);
9012 ASSERT(DDI_CB_FLAG_VALID(flags));
9013 ASSERT(cbfunc != NULL);
9014 ASSERT(ret_hdlp != NULL);
9015
9016 /* Sanity check the context */
9017 ASSERT(!servicing_interrupt());
9018 if (servicing_interrupt())
9019 return (DDI_FAILURE);
9020
9021 /* Validate parameters */
9022 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9023 (cbfunc == NULL) || (ret_hdlp == NULL))
9024 return (DDI_EINVAL);
9025
9026 /* Check for previous registration */
9027 if (DEVI(dip)->devi_cb_p != NULL)
9028 return (DDI_EALREADY);
9029
9030 /* Allocate and initialize callback */
9031 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9032 cbp->cb_dip = dip;
9033 cbp->cb_func = cbfunc;
9034 cbp->cb_arg1 = arg1;
9035 cbp->cb_arg2 = arg2;
9036 cbp->cb_flags = flags;
9037 DEVI(dip)->devi_cb_p = cbp;
9038
9039 /* If adding an IRM callback, notify IRM */
9040 if (flags & DDI_CB_FLAG_INTR)
9041 i_ddi_irm_set_cb(dip, B_TRUE);
9042
9043 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9044 return (DDI_SUCCESS);
9045 }
9046
9047 int
9048 ddi_cb_unregister(ddi_cb_handle_t hdl)
9049 {
9050 ddi_cb_t *cbp;
9051 dev_info_t *dip;
9052
9053 ASSERT(hdl != NULL);
9054
9055 /* Sanity check the context */
9056 ASSERT(!servicing_interrupt());
9057 if (servicing_interrupt())
9058 return (DDI_FAILURE);
9059
9060 /* Validate parameters */
9061 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9062 ((dip = cbp->cb_dip) == NULL))
9063 return (DDI_EINVAL);
9064
9065 /* If removing an IRM callback, notify IRM */
9066 if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9067 i_ddi_irm_set_cb(dip, B_FALSE);
9068
9069 /* Destroy the callback */
9070 kmem_free(cbp, sizeof (ddi_cb_t));
9071 DEVI(dip)->devi_cb_p = NULL;
9072
9073 return (DDI_SUCCESS);
9074 }
9075
9076 /*
9077 * Platform independent DR routines
9078 */
9079
9080 static int
9081 ndi2errno(int n)
9082 {
9083 int err = 0;
9084
9085 switch (n) {
9086 case NDI_NOMEM:
9087 err = ENOMEM;
9088 break;
9089 case NDI_BUSY:
9090 err = EBUSY;
9091 break;
9092 case NDI_FAULT:
9093 err = EFAULT;
9094 break;
9095 case NDI_FAILURE:
9096 err = EIO;
9097 break;
9098 case NDI_SUCCESS:
9099 break;
9100 case NDI_BADHANDLE:
9101 default:
9102 err = EINVAL;
9103 break;
9104 }
9105 return (err);
9106 }
9107
9108 /*
9109 * Prom tree node list
9110 */
9111 struct ptnode {
9112 pnode_t nodeid;
9113 struct ptnode *next;
9114 };
9115
9116 /*
9117 * Prom tree walk arg
9118 */
9119 struct pta {
9120 dev_info_t *pdip;
9121 devi_branch_t *bp;
9122 uint_t flags;
9123 dev_info_t *fdip;
9124 struct ptnode *head;
9125 };
9126
9127 static void
9128 visit_node(pnode_t nodeid, struct pta *ap)
9129 {
9130 struct ptnode **nextp;
9131 int (*select)(pnode_t, void *, uint_t);
9132
9133 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9134
9135 select = ap->bp->create.prom_branch_select;
9136
9137 ASSERT(select);
9138
9139 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9140
9141 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9142 ;
9143
9144 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9145
9146 (*nextp)->nodeid = nodeid;
9147 }
9148
9149 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9150 return;
9151
9152 nodeid = prom_childnode(nodeid);
9153 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9154 visit_node(nodeid, ap);
9155 nodeid = prom_nextnode(nodeid);
9156 }
9157 }
9158
9159 /*
9160 * NOTE: The caller of this function must check for device contracts
9161 * or LDI callbacks against this dip before setting the dip offline.
9162 */
9163 static int
9164 set_infant_dip_offline(dev_info_t *dip, void *arg)
9165 {
9166 char *path = (char *)arg;
9167
9168 ASSERT(dip);
9169 ASSERT(arg);
9170
9171 if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9172 (void) ddi_pathname(dip, path);
9173 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9174 "node: %s", path);
9175 return (DDI_FAILURE);
9176 }
9177
9178 mutex_enter(&(DEVI(dip)->devi_lock));
9179 if (!DEVI_IS_DEVICE_OFFLINE(dip))
9180 DEVI_SET_DEVICE_OFFLINE(dip);
9181 mutex_exit(&(DEVI(dip)->devi_lock));
9182
9183 return (DDI_SUCCESS);
9184 }
9185
9186 typedef struct result {
9187 char *path;
9188 int result;
9189 } result_t;
9190
9191 static int
9192 dip_set_offline(dev_info_t *dip, void *arg)
9193 {
9194 int end;
9195 result_t *resp = (result_t *)arg;
9196
9197 ASSERT(dip);
9198 ASSERT(resp);
9199
9200 /*
9201 * We stop the walk if e_ddi_offline_notify() returns
9202 * failure, because this implies that one or more consumers
9203 * (either LDI or contract based) has blocked the offline.
9204 * So there is no point in conitnuing the walk
9205 */
9206 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9207 resp->result = DDI_FAILURE;
9208 return (DDI_WALK_TERMINATE);
9209 }
9210
9211 /*
9212 * If set_infant_dip_offline() returns failure, it implies
9213 * that we failed to set a particular dip offline. This
9214 * does not imply that the offline as a whole should fail.
9215 * We want to do the best we can, so we continue the walk.
9216 */
9217 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9218 end = DDI_SUCCESS;
9219 else
9220 end = DDI_FAILURE;
9221
9222 e_ddi_offline_finalize(dip, end);
9223
9224 return (DDI_WALK_CONTINUE);
9225 }
9226
9227 /*
9228 * The call to e_ddi_offline_notify() exists for the
9229 * unlikely error case that a branch we are trying to
9230 * create already exists and has device contracts or LDI
9231 * event callbacks against it.
9232 *
9233 * We allow create to succeed for such branches only if
9234 * no constraints block the offline.
9235 */
9236 static int
9237 branch_set_offline(dev_info_t *dip, char *path)
9238 {
9239 int circ;
9240 int end;
9241 result_t res;
9242
9243
9244 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9245 return (DDI_FAILURE);
9246 }
9247
9248 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9249 end = DDI_SUCCESS;
9250 else
9251 end = DDI_FAILURE;
9252
9253 e_ddi_offline_finalize(dip, end);
9254
9255 if (end == DDI_FAILURE)
9256 return (DDI_FAILURE);
9257
9258 res.result = DDI_SUCCESS;
9259 res.path = path;
9260
9261 ndi_devi_enter(dip, &circ);
9262 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9263 ndi_devi_exit(dip, circ);
9264
9265 return (res.result);
9266 }
9267
9268 /*ARGSUSED*/
9269 static int
9270 create_prom_branch(void *arg, int has_changed)
9271 {
9272 int circ;
9273 int exists, rv;
9274 pnode_t nodeid;
9275 struct ptnode *tnp;
9276 dev_info_t *dip;
9277 struct pta *ap = arg;
9278 devi_branch_t *bp;
9279 char *path;
9280
9281 ASSERT(ap);
9282 ASSERT(ap->fdip == NULL);
9283 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9284
9285 bp = ap->bp;
9286
9287 nodeid = ddi_get_nodeid(ap->pdip);
9288 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9289 cmn_err(CE_WARN, "create_prom_branch: invalid "
9290 "nodeid: 0x%x", nodeid);
9291 return (EINVAL);
9292 }
9293
9294 ap->head = NULL;
9295
9296 nodeid = prom_childnode(nodeid);
9297 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9298 visit_node(nodeid, ap);
9299 nodeid = prom_nextnode(nodeid);
9300 }
9301
9302 if (ap->head == NULL)
9303 return (ENODEV);
9304
9305 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9306 rv = 0;
9307 while ((tnp = ap->head) != NULL) {
9308 ap->head = tnp->next;
9309
9310 ndi_devi_enter(ap->pdip, &circ);
9311
9312 /*
9313 * Check if the branch already exists.
9314 */
9315 exists = 0;
9316 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9317 if (dip != NULL) {
9318 exists = 1;
9319
9320 /* Parent is held busy, so release hold */
9321 ndi_rele_devi(dip);
9322 #ifdef DEBUG
9323 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9324 " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9325 #endif
9326 } else {
9327 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9328 }
9329
9330 kmem_free(tnp, sizeof (struct ptnode));
9331
9332 /*
9333 * Hold the branch if it is not already held
9334 */
9335 if (dip && !exists) {
9336 e_ddi_branch_hold(dip);
9337 }
9338
9339 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9340
9341 /*
9342 * Set all dips in the newly created branch offline so that
9343 * only a "configure" operation can attach
9344 * the branch
9345 */
9346 if (dip == NULL || branch_set_offline(dip, path)
9347 == DDI_FAILURE) {
9348 ndi_devi_exit(ap->pdip, circ);
9349 rv = EIO;
9350 continue;
9351 }
9352
9353 ASSERT(ddi_get_parent(dip) == ap->pdip);
9354
9355 ndi_devi_exit(ap->pdip, circ);
9356
9357 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9358 int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9359 if (error && rv == 0)
9360 rv = error;
9361 }
9362
9363 /*
9364 * Invoke devi_branch_callback() (if it exists) only for
9365 * newly created branches
9366 */
9367 if (bp->devi_branch_callback && !exists)
9368 bp->devi_branch_callback(dip, bp->arg, 0);
9369 }
9370
9371 kmem_free(path, MAXPATHLEN);
9372
9373 return (rv);
9374 }
9375
9376 static int
9377 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9378 {
9379 int rv, circ, len;
9380 int i, flags, ret;
9381 dev_info_t *dip;
9382 char *nbuf;
9383 char *path;
9384 static const char *noname = "<none>";
9385
9386 ASSERT(pdip);
9387 ASSERT(DEVI_BUSY_OWNED(pdip));
9388
9389 flags = 0;
9390
9391 /*
9392 * Creating the root of a branch ?
9393 */
9394 if (rdipp) {
9395 *rdipp = NULL;
9396 flags = DEVI_BRANCH_ROOT;
9397 }
9398
9399 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9400 rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9401
9402 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9403
9404 if (rv == DDI_WALK_ERROR) {
9405 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9406 " properties on devinfo node %p", (void *)dip);
9407 goto fail;
9408 }
9409
9410 len = OBP_MAXDRVNAME;
9411 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9412 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9413 != DDI_PROP_SUCCESS) {
9414 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9415 "no name property", (void *)dip);
9416 goto fail;
9417 }
9418
9419 ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9420 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9421 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9422 " for devinfo node %p", nbuf, (void *)dip);
9423 goto fail;
9424 }
9425
9426 kmem_free(nbuf, OBP_MAXDRVNAME);
9427
9428 /*
9429 * Ignore bind failures just like boot does
9430 */
9431 (void) ndi_devi_bind_driver(dip, 0);
9432
9433 switch (rv) {
9434 case DDI_WALK_CONTINUE:
9435 case DDI_WALK_PRUNESIB:
9436 ndi_devi_enter(dip, &circ);
9437
9438 i = DDI_WALK_CONTINUE;
9439 for (; i == DDI_WALK_CONTINUE; ) {
9440 i = sid_node_create(dip, bp, NULL);
9441 }
9442
9443 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9444 if (i == DDI_WALK_ERROR)
9445 rv = i;
9446 /*
9447 * If PRUNESIB stop creating siblings
9448 * of dip's child. Subsequent walk behavior
9449 * is determined by rv returned by dip.
9450 */
9451
9452 ndi_devi_exit(dip, circ);
9453 break;
9454 case DDI_WALK_TERMINATE:
9455 /*
9456 * Don't create children and ask our parent
9457 * to not create siblings either.
9458 */
9459 rv = DDI_WALK_PRUNESIB;
9460 break;
9461 case DDI_WALK_PRUNECHILD:
9462 /*
9463 * Don't create children, but ask parent to continue
9464 * with siblings.
9465 */
9466 rv = DDI_WALK_CONTINUE;
9467 break;
9468 default:
9469 ASSERT(0);
9470 break;
9471 }
9472
9473 if (rdipp)
9474 *rdipp = dip;
9475
9476 /*
9477 * Set device offline - only the "configure" op should cause an attach.
9478 * Note that it is safe to set the dip offline without checking
9479 * for either device contract or layered driver (LDI) based constraints
9480 * since there cannot be any contracts or LDI opens of this device.
9481 * This is because this node is a newly created dip with the parent busy
9482 * held, so no other thread can come in and attach this dip. A dip that
9483 * has never been attached cannot have contracts since by definition
9484 * a device contract (an agreement between a process and a device minor
9485 * node) can only be created against a device that has minor nodes
9486 * i.e is attached. Similarly an LDI open will only succeed if the
9487 * dip is attached. We assert below that the dip is not attached.
9488 */
9489 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9490 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9491 ret = set_infant_dip_offline(dip, path);
9492 ASSERT(ret == DDI_SUCCESS);
9493 kmem_free(path, MAXPATHLEN);
9494
9495 return (rv);
9496 fail:
9497 (void) ndi_devi_free(dip);
9498 kmem_free(nbuf, OBP_MAXDRVNAME);
9499 return (DDI_WALK_ERROR);
9500 }
9501
9502 static int
9503 create_sid_branch(
9504 dev_info_t *pdip,
9505 devi_branch_t *bp,
9506 dev_info_t **dipp,
9507 uint_t flags)
9508 {
9509 int rv = 0, state = DDI_WALK_CONTINUE;
9510 dev_info_t *rdip;
9511
9512 while (state == DDI_WALK_CONTINUE) {
9513 int circ;
9514
9515 ndi_devi_enter(pdip, &circ);
9516
9517 state = sid_node_create(pdip, bp, &rdip);
9518 if (rdip == NULL) {
9519 ndi_devi_exit(pdip, circ);
9520 ASSERT(state == DDI_WALK_ERROR);
9521 break;
9522 }
9523
9524 e_ddi_branch_hold(rdip);
9525
9526 ndi_devi_exit(pdip, circ);
9527
9528 if (flags & DEVI_BRANCH_CONFIGURE) {
9529 int error = e_ddi_branch_configure(rdip, dipp, 0);
9530 if (error && rv == 0)
9531 rv = error;
9532 }
9533
9534 /*
9535 * devi_branch_callback() is optional
9536 */
9537 if (bp->devi_branch_callback)
9538 bp->devi_branch_callback(rdip, bp->arg, 0);
9539 }
9540
9541 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9542
9543 return (state == DDI_WALK_ERROR ? EIO : rv);
9544 }
9545
9546 int
9547 e_ddi_branch_create(
9548 dev_info_t *pdip,
9549 devi_branch_t *bp,
9550 dev_info_t **dipp,
9551 uint_t flags)
9552 {
9553 int prom_devi, sid_devi, error;
9554
9555 if (pdip == NULL || bp == NULL || bp->type == 0)
9556 return (EINVAL);
9557
9558 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9559 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9560
9561 if (prom_devi && bp->create.prom_branch_select == NULL)
9562 return (EINVAL);
9563 else if (sid_devi && bp->create.sid_branch_create == NULL)
9564 return (EINVAL);
9565 else if (!prom_devi && !sid_devi)
9566 return (EINVAL);
9567
9568 if (flags & DEVI_BRANCH_EVENT)
9569 return (EINVAL);
9570
9571 if (prom_devi) {
9572 struct pta pta = {0};
9573
9574 pta.pdip = pdip;
9575 pta.bp = bp;
9576 pta.flags = flags;
9577
9578 error = prom_tree_access(create_prom_branch, &pta, NULL);
9579
9580 if (dipp)
9581 *dipp = pta.fdip;
9582 else if (pta.fdip)
9583 ndi_rele_devi(pta.fdip);
9584 } else {
9585 error = create_sid_branch(pdip, bp, dipp, flags);
9586 }
9587
9588 return (error);
9589 }
9590
9591 int
9592 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9593 {
9594 int rv;
9595 char *devnm;
9596 dev_info_t *pdip;
9597
9598 if (dipp)
9599 *dipp = NULL;
9600
9601 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9602 return (EINVAL);
9603
9604 pdip = ddi_get_parent(rdip);
9605
9606 ndi_hold_devi(pdip);
9607
9608 if (!e_ddi_branch_held(rdip)) {
9609 ndi_rele_devi(pdip);
9610 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9611 "dip(%p) not held", (void *)rdip);
9612 return (EINVAL);
9613 }
9614
9615 if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9616 /*
9617 * First attempt to bind a driver. If we fail, return
9618 * success (On some platforms, dips for some device
9619 * types (CPUs) may not have a driver)
9620 */
9621 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9622 ndi_rele_devi(pdip);
9623 return (0);
9624 }
9625
9626 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9627 rv = NDI_FAILURE;
9628 goto out;
9629 }
9630 }
9631
9632 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9633
9634 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9635
9636 (void) ddi_deviname(rdip, devnm);
9637
9638 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9639 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9640 /* release hold from ndi_devi_config_one() */
9641 ndi_rele_devi(rdip);
9642 }
9643
9644 kmem_free(devnm, MAXNAMELEN + 1);
9645 out:
9646 if (rv != NDI_SUCCESS && dipp && rdip) {
9647 ndi_hold_devi(rdip);
9648 *dipp = rdip;
9649 }
9650 ndi_rele_devi(pdip);
9651 return (ndi2errno(rv));
9652 }
9653
9654 void
9655 e_ddi_branch_hold(dev_info_t *rdip)
9656 {
9657 if (e_ddi_branch_held(rdip)) {
9658 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9659 return;
9660 }
9661
9662 mutex_enter(&DEVI(rdip)->devi_lock);
9663 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9664 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9665 DEVI(rdip)->devi_ref++;
9666 }
9667 ASSERT(DEVI(rdip)->devi_ref > 0);
9668 mutex_exit(&DEVI(rdip)->devi_lock);
9669 }
9670
9671 int
9672 e_ddi_branch_held(dev_info_t *rdip)
9673 {
9674 int rv = 0;
9675
9676 mutex_enter(&DEVI(rdip)->devi_lock);
9677 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9678 DEVI(rdip)->devi_ref > 0) {
9679 rv = 1;
9680 }
9681 mutex_exit(&DEVI(rdip)->devi_lock);
9682
9683 return (rv);
9684 }
9685
9686 void
9687 e_ddi_branch_rele(dev_info_t *rdip)
9688 {
9689 mutex_enter(&DEVI(rdip)->devi_lock);
9690 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9691 DEVI(rdip)->devi_ref--;
9692 mutex_exit(&DEVI(rdip)->devi_lock);
9693 }
9694
9695 int
9696 e_ddi_branch_unconfigure(
9697 dev_info_t *rdip,
9698 dev_info_t **dipp,
9699 uint_t flags)
9700 {
9701 int circ, rv;
9702 int destroy;
9703 char *devnm;
9704 uint_t nflags;
9705 dev_info_t *pdip;
9706
9707 if (dipp)
9708 *dipp = NULL;
9709
9710 if (rdip == NULL)
9711 return (EINVAL);
9712
9713 pdip = ddi_get_parent(rdip);
9714
9715 ASSERT(pdip);
9716
9717 /*
9718 * Check if caller holds pdip busy - can cause deadlocks during
9719 * devfs_clean()
9720 */
9721 if (DEVI_BUSY_OWNED(pdip)) {
9722 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9723 " devinfo node(%p) is busy held", (void *)pdip);
9724 return (EINVAL);
9725 }
9726
9727 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9728
9729 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9730
9731 ndi_devi_enter(pdip, &circ);
9732 (void) ddi_deviname(rdip, devnm);
9733 ndi_devi_exit(pdip, circ);
9734
9735 /*
9736 * ddi_deviname() returns a component name with / prepended.
9737 */
9738 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9739
9740 ndi_devi_enter(pdip, &circ);
9741
9742 /*
9743 * Recreate device name as it may have changed state (init/uninit)
9744 * when parent busy lock was dropped for devfs_clean()
9745 */
9746 (void) ddi_deviname(rdip, devnm);
9747
9748 if (!e_ddi_branch_held(rdip)) {
9749 kmem_free(devnm, MAXNAMELEN + 1);
9750 ndi_devi_exit(pdip, circ);
9751 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9752 destroy ? "destroy" : "unconfigure", (void *)rdip);
9753 return (EINVAL);
9754 }
9755
9756 /*
9757 * Release hold on the branch. This is ok since we are holding the
9758 * parent busy. If rdip is not removed, we must do a hold on the
9759 * branch before returning.
9760 */
9761 e_ddi_branch_rele(rdip);
9762
9763 nflags = NDI_DEVI_OFFLINE;
9764 if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9765 nflags |= NDI_DEVI_REMOVE;
9766 destroy = 1;
9767 } else {
9768 nflags |= NDI_UNCONFIG; /* uninit but don't remove */
9769 }
9770
9771 if (flags & DEVI_BRANCH_EVENT)
9772 nflags |= NDI_POST_EVENT;
9773
9774 if (i_ddi_devi_attached(pdip) &&
9775 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9776 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9777 } else {
9778 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9779 if (rv == NDI_SUCCESS) {
9780 ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9781 rv = ndi_devi_offline(rdip, nflags);
9782 }
9783 }
9784
9785 if (!destroy || rv != NDI_SUCCESS) {
9786 /* The dip still exists, so do a hold */
9787 e_ddi_branch_hold(rdip);
9788 }
9789 out:
9790 kmem_free(devnm, MAXNAMELEN + 1);
9791 ndi_devi_exit(pdip, circ);
9792 return (ndi2errno(rv));
9793 }
9794
9795 int
9796 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9797 {
9798 return (e_ddi_branch_unconfigure(rdip, dipp,
9799 flag|DEVI_BRANCH_DESTROY));
9800 }
9801
9802 /*
9803 * Number of chains for hash table
9804 */
9805 #define NUMCHAINS 17
9806
9807 /*
9808 * Devinfo busy arg
9809 */
9810 struct devi_busy {
9811 int dv_total;
9812 int s_total;
9813 mod_hash_t *dv_hash;
9814 mod_hash_t *s_hash;
9815 int (*callback)(dev_info_t *, void *, uint_t);
9816 void *arg;
9817 };
9818
9819 static int
9820 visit_dip(dev_info_t *dip, void *arg)
9821 {
9822 uintptr_t sbusy, dvbusy, ref;
9823 struct devi_busy *bsp = arg;
9824
9825 ASSERT(bsp->callback);
9826
9827 /*
9828 * A dip cannot be busy if its reference count is 0
9829 */
9830 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9831 return (bsp->callback(dip, bsp->arg, 0));
9832 }
9833
9834 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9835 dvbusy = 0;
9836
9837 /*
9838 * To catch device opens currently maintained on specfs common snodes.
9839 */
9840 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9841 sbusy = 0;
9842
9843 #ifdef DEBUG
9844 if (ref < sbusy || ref < dvbusy) {
9845 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9846 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9847 }
9848 #endif
9849
9850 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9851
9852 return (bsp->callback(dip, bsp->arg, dvbusy));
9853 }
9854
9855 static int
9856 visit_snode(struct snode *sp, void *arg)
9857 {
9858 uintptr_t sbusy;
9859 dev_info_t *dip;
9860 int count;
9861 struct devi_busy *bsp = arg;
9862
9863 ASSERT(sp);
9864
9865 /*
9866 * The stable lock is held. This prevents
9867 * the snode and its associated dip from
9868 * going away.
9869 */
9870 dip = NULL;
9871 count = spec_devi_open_count(sp, &dip);
9872
9873 if (count <= 0)
9874 return (DDI_WALK_CONTINUE);
9875
9876 ASSERT(dip);
9877
9878 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9879 sbusy = count;
9880 else
9881 sbusy += count;
9882
9883 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9884 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9885 "sbusy = %lu", "e_ddi_branch_referenced",
9886 (void *)dip, sbusy);
9887 }
9888
9889 bsp->s_total += count;
9890
9891 return (DDI_WALK_CONTINUE);
9892 }
9893
9894 static void
9895 visit_dvnode(struct dv_node *dv, void *arg)
9896 {
9897 uintptr_t dvbusy;
9898 uint_t count;
9899 struct vnode *vp;
9900 struct devi_busy *bsp = arg;
9901
9902 ASSERT(dv && dv->dv_devi);
9903
9904 vp = DVTOV(dv);
9905
9906 mutex_enter(&vp->v_lock);
9907 count = vp->v_count;
9908 mutex_exit(&vp->v_lock);
9909
9910 if (!count)
9911 return;
9912
9913 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9914 (mod_hash_val_t *)&dvbusy))
9915 dvbusy = count;
9916 else
9917 dvbusy += count;
9918
9919 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9920 (mod_hash_val_t)dvbusy)) {
9921 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9922 "dvbusy=%lu", "e_ddi_branch_referenced",
9923 (void *)dv->dv_devi, dvbusy);
9924 }
9925
9926 bsp->dv_total += count;
9927 }
9928
9929 /*
9930 * Returns reference count on success or -1 on failure.
9931 */
9932 int
9933 e_ddi_branch_referenced(
9934 dev_info_t *rdip,
9935 int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9936 void *arg)
9937 {
9938 int circ;
9939 char *path;
9940 dev_info_t *pdip;
9941 struct devi_busy bsa = {0};
9942
9943 ASSERT(rdip);
9944
9945 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9946
9947 ndi_hold_devi(rdip);
9948
9949 pdip = ddi_get_parent(rdip);
9950
9951 ASSERT(pdip);
9952
9953 /*
9954 * Check if caller holds pdip busy - can cause deadlocks during
9955 * devfs_walk()
9956 */
9957 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9958 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
9959 "devinfo branch(%p) not held or parent busy held",
9960 (void *)rdip);
9961 ndi_rele_devi(rdip);
9962 kmem_free(path, MAXPATHLEN);
9963 return (-1);
9964 }
9965
9966 ndi_devi_enter(pdip, &circ);
9967 (void) ddi_pathname(rdip, path);
9968 ndi_devi_exit(pdip, circ);
9969
9970 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
9971 mod_hash_null_valdtor, sizeof (struct dev_info));
9972
9973 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
9974 mod_hash_null_valdtor, sizeof (struct snode));
9975
9976 if (devfs_walk(path, visit_dvnode, &bsa)) {
9977 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
9978 "devfs walk failed for: %s", path);
9979 kmem_free(path, MAXPATHLEN);
9980 bsa.s_total = bsa.dv_total = -1;
9981 goto out;
9982 }
9983
9984 kmem_free(path, MAXPATHLEN);
9985
9986 /*
9987 * Walk the snode table to detect device opens, which are currently
9988 * maintained on specfs common snodes.
9989 */
9990 spec_snode_walk(visit_snode, &bsa);
9991
9992 if (callback == NULL)
9993 goto out;
9994
9995 bsa.callback = callback;
9996 bsa.arg = arg;
9997
9998 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
9999 ndi_devi_enter(rdip, &circ);
10000 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10001 ndi_devi_exit(rdip, circ);
10002 }
10003
10004 out:
10005 ndi_rele_devi(rdip);
10006 mod_hash_destroy_ptrhash(bsa.s_hash);
10007 mod_hash_destroy_ptrhash(bsa.dv_hash);
10008 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10009 }