Print this page
NEX-15925 pseudonex, rootnex, and friends don't need to log useless device announcements
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/sunddi.c
+++ new/usr/src/uts/common/os/sunddi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25 25 */
26 26
27 27 #include <sys/note.h>
28 28 #include <sys/types.h>
29 29 #include <sys/param.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/buf.h>
32 32 #include <sys/uio.h>
33 33 #include <sys/cred.h>
34 34 #include <sys/poll.h>
35 35 #include <sys/mman.h>
36 36 #include <sys/kmem.h>
37 37 #include <sys/model.h>
38 38 #include <sys/file.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/open.h>
41 41 #include <sys/user.h>
42 42 #include <sys/t_lock.h>
43 43 #include <sys/vm.h>
44 44 #include <sys/stat.h>
45 45 #include <vm/hat.h>
46 46 #include <vm/seg.h>
47 47 #include <vm/seg_vn.h>
48 48 #include <vm/seg_dev.h>
49 49 #include <vm/as.h>
50 50 #include <sys/cmn_err.h>
51 51 #include <sys/cpuvar.h>
52 52 #include <sys/debug.h>
53 53 #include <sys/autoconf.h>
54 54 #include <sys/sunddi.h>
55 55 #include <sys/esunddi.h>
56 56 #include <sys/sunndi.h>
57 57 #include <sys/kstat.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 61 #include <sys/ddi_periodic.h>
62 62 #include <sys/hwconf.h>
63 63 #include <sys/pathname.h>
64 64 #include <sys/modctl.h>
65 65 #include <sys/epm.h>
66 66 #include <sys/devctl.h>
67 67 #include <sys/callb.h>
68 68 #include <sys/cladm.h>
69 69 #include <sys/sysevent.h>
70 70 #include <sys/dacf_impl.h>
71 71 #include <sys/ddidevmap.h>
72 72 #include <sys/bootconf.h>
73 73 #include <sys/disp.h>
74 74 #include <sys/atomic.h>
75 75 #include <sys/promif.h>
76 76 #include <sys/instance.h>
77 77 #include <sys/sysevent/eventdefs.h>
78 78 #include <sys/task.h>
79 79 #include <sys/project.h>
80 80 #include <sys/taskq.h>
81 81 #include <sys/devpolicy.h>
82 82 #include <sys/ctype.h>
83 83 #include <net/if.h>
84 84 #include <sys/rctl.h>
85 85 #include <sys/zone.h>
86 86 #include <sys/clock_impl.h>
87 87 #include <sys/ddi.h>
88 88 #include <sys/modhash.h>
89 89 #include <sys/sunldi_impl.h>
90 90 #include <sys/fs/dv_node.h>
91 91 #include <sys/fs/snode.h>
92 92
93 93 extern pri_t minclsyspri;
94 94
95 95 extern rctl_hndl_t rc_project_locked_mem;
96 96 extern rctl_hndl_t rc_zone_locked_mem;
97 97
98 98 #ifdef DEBUG
99 99 static int sunddi_debug = 0;
100 100 #endif /* DEBUG */
101 101
102 102 /* ddi_umem_unlock miscellaneous */
103 103
104 104 static void i_ddi_umem_unlock_thread_start(void);
105 105
106 106 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */
107 107 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */
108 108 static kthread_t *ddi_umem_unlock_thread;
109 109 /*
110 110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
111 111 */
112 112 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 113 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114 114
115 115 /*
116 116 * DDI(Sun) Function and flag definitions:
117 117 */
118 118
119 119 #if defined(__x86)
120 120 /*
121 121 * Used to indicate which entries were chosen from a range.
122 122 */
123 123 char *chosen_reg = "chosen-reg";
124 124 #endif
125 125
126 126 /*
127 127 * Function used to ring system console bell
128 128 */
129 129 void (*ddi_console_bell_func)(clock_t duration);
130 130
131 131 /*
132 132 * Creating register mappings and handling interrupts:
133 133 */
134 134
135 135 /*
136 136 * Generic ddi_map: Call parent to fulfill request...
137 137 */
138 138
139 139 int
140 140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141 141 off_t len, caddr_t *addrp)
142 142 {
143 143 dev_info_t *pdip;
144 144
145 145 ASSERT(dp);
146 146 pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 148 dp, mp, offset, len, addrp));
149 149 }
150 150
151 151 /*
152 152 * ddi_apply_range: (Called by nexi only.)
153 153 * Apply ranges in parent node dp, to child regspec rp...
154 154 */
155 155
156 156 int
157 157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 158 {
159 159 return (i_ddi_apply_range(dp, rdip, rp));
160 160 }
161 161
162 162 int
163 163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164 164 off_t len)
165 165 {
166 166 ddi_map_req_t mr;
167 167 #if defined(__x86)
168 168 struct {
169 169 int bus;
170 170 int addr;
171 171 int size;
172 172 } reg, *reglist;
173 173 uint_t length;
174 174 int rc;
175 175
176 176 /*
177 177 * get the 'registers' or the 'reg' property.
178 178 * We look up the reg property as an array of
179 179 * int's.
180 180 */
181 181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length);
183 183 if (rc != DDI_PROP_SUCCESS)
184 184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length);
186 186 if (rc == DDI_PROP_SUCCESS) {
187 187 /*
188 188 * point to the required entry.
189 189 */
190 190 reg = reglist[rnumber];
191 191 reg.addr += offset;
192 192 if (len != 0)
193 193 reg.size = len;
194 194 /*
195 195 * make a new property containing ONLY the required tuple.
196 196 */
197 197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int)))
199 199 != DDI_PROP_SUCCESS) {
200 200 cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 201 "property", DEVI(dip)->devi_name,
202 202 DEVI(dip)->devi_instance, chosen_reg);
203 203 }
204 204 /*
205 205 * free the memory allocated by
206 206 * ddi_prop_lookup_int_array ().
207 207 */
208 208 ddi_prop_free((void *)reglist);
209 209 }
210 210 #endif
211 211 mr.map_op = DDI_MO_MAP_LOCKED;
212 212 mr.map_type = DDI_MT_RNUMBER;
213 213 mr.map_obj.rnumber = rnumber;
214 214 mr.map_prot = PROT_READ | PROT_WRITE;
215 215 mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 216 mr.map_handlep = NULL;
217 217 mr.map_vers = DDI_MAP_VERSION;
218 218
219 219 /*
220 220 * Call my parent to map in my regs.
221 221 */
222 222
223 223 return (ddi_map(dip, &mr, offset, len, kaddrp));
224 224 }
225 225
226 226 void
227 227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228 228 off_t len)
229 229 {
230 230 ddi_map_req_t mr;
231 231
232 232 mr.map_op = DDI_MO_UNMAP;
233 233 mr.map_type = DDI_MT_RNUMBER;
234 234 mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */
236 236 mr.map_obj.rnumber = rnumber;
237 237 mr.map_handlep = NULL;
238 238 mr.map_vers = DDI_MAP_VERSION;
239 239
240 240 /*
241 241 * Call my parent to unmap my regs.
242 242 */
243 243
244 244 (void) ddi_map(dip, &mr, offset, len, kaddrp);
245 245 *kaddrp = (caddr_t)0;
246 246 #if defined(__x86)
247 247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 248 #endif
249 249 }
250 250
251 251 int
252 252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 253 off_t offset, off_t len, caddr_t *vaddrp)
254 254 {
255 255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 256 }
257 257
258 258 /*
259 259 * nullbusmap: The/DDI default bus_map entry point for nexi
260 260 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
261 261 * with no HAT/MMU layer to be programmed at this level.
262 262 *
263 263 * If the call is to map by rnumber, return an error,
264 264 * otherwise pass anything else up the tree to my parent.
265 265 */
266 266 int
267 267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 268 off_t offset, off_t len, caddr_t *vaddrp)
269 269 {
270 270 _NOTE(ARGUNUSED(rdip))
271 271 if (mp->map_type == DDI_MT_RNUMBER)
272 272 return (DDI_ME_UNSUPPORTED);
273 273
274 274 return (ddi_map(dip, mp, offset, len, vaddrp));
275 275 }
276 276
277 277 /*
278 278 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279 279 * Only for use by nexi using the reg/range paradigm.
280 280 */
281 281 struct regspec *
282 282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 283 {
284 284 return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 285 }
286 286
287 287
288 288 /*
289 289 * Note that we allow the dip to be nil because we may be called
290 290 * prior even to the instantiation of the devinfo tree itself - all
291 291 * regular leaf and nexus drivers should always use a non-nil dip!
292 292 *
293 293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294 294 * simply get a synchronous fault as soon as we touch a missing address.
295 295 *
296 296 * Poke is rather more carefully handled because we might poke to a write
297 297 * buffer, "succeed", then only find some time later that we got an
298 298 * asynchronous fault that indicated that the address we were writing to
299 299 * was not really backed by hardware.
300 300 */
301 301
302 302 static int
303 303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304 304 void *addr, void *value_p)
305 305 {
306 306 union {
307 307 uint64_t u64;
308 308 uint32_t u32;
309 309 uint16_t u16;
310 310 uint8_t u8;
311 311 } peekpoke_value;
312 312
313 313 peekpoke_ctlops_t peekpoke_args;
314 314 uint64_t dummy_result;
315 315 int rval;
316 316
317 317 /* Note: size is assumed to be correct; it is not checked. */
318 318 peekpoke_args.size = size;
319 319 peekpoke_args.dev_addr = (uintptr_t)addr;
320 320 peekpoke_args.handle = NULL;
321 321 peekpoke_args.repcount = 1;
322 322 peekpoke_args.flags = 0;
323 323
324 324 if (cmd == DDI_CTLOPS_POKE) {
325 325 switch (size) {
326 326 case sizeof (uint8_t):
327 327 peekpoke_value.u8 = *(uint8_t *)value_p;
328 328 break;
329 329 case sizeof (uint16_t):
330 330 peekpoke_value.u16 = *(uint16_t *)value_p;
331 331 break;
332 332 case sizeof (uint32_t):
333 333 peekpoke_value.u32 = *(uint32_t *)value_p;
334 334 break;
335 335 case sizeof (uint64_t):
336 336 peekpoke_value.u64 = *(uint64_t *)value_p;
337 337 break;
338 338 }
339 339 }
340 340
341 341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 342
343 343 if (devi != NULL)
344 344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 345 &dummy_result);
346 346 else
347 347 rval = peekpoke_mem(cmd, &peekpoke_args);
348 348
349 349 /*
350 350 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 351 */
352 352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 353 switch (size) {
354 354 case sizeof (uint8_t):
355 355 *(uint8_t *)value_p = peekpoke_value.u8;
356 356 break;
357 357 case sizeof (uint16_t):
358 358 *(uint16_t *)value_p = peekpoke_value.u16;
359 359 break;
360 360 case sizeof (uint32_t):
361 361 *(uint32_t *)value_p = peekpoke_value.u32;
362 362 break;
363 363 case sizeof (uint64_t):
364 364 *(uint64_t *)value_p = peekpoke_value.u64;
365 365 break;
366 366 }
367 367 }
368 368
369 369 return (rval);
370 370 }
371 371
372 372 /*
373 373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374 374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375 375 */
376 376 int
377 377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 378 {
379 379 switch (size) {
380 380 case sizeof (uint8_t):
381 381 case sizeof (uint16_t):
382 382 case sizeof (uint32_t):
383 383 case sizeof (uint64_t):
384 384 break;
385 385 default:
386 386 return (DDI_FAILURE);
387 387 }
388 388
389 389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 390 }
391 391
392 392 int
393 393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 394 {
395 395 switch (size) {
396 396 case sizeof (uint8_t):
397 397 case sizeof (uint16_t):
398 398 case sizeof (uint32_t):
399 399 case sizeof (uint64_t):
400 400 break;
401 401 default:
402 402 return (DDI_FAILURE);
403 403 }
404 404
405 405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 406 }
407 407
408 408 int
409 409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 410 {
411 411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 412 val_p));
413 413 }
414 414
415 415 int
416 416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 417 {
418 418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 419 val_p));
420 420 }
421 421
422 422 int
423 423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 424 {
425 425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 426 val_p));
427 427 }
428 428
429 429 int
430 430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 431 {
432 432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 433 val_p));
434 434 }
435 435
436 436
437 437 /*
438 438 * We need to separate the old interfaces from the new ones and leave them
439 439 * in here for a while. Previous versions of the OS defined the new interfaces
440 440 * to the old interfaces. This way we can fix things up so that we can
441 441 * eventually remove these interfaces.
442 442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443 443 * or earlier will actually have a reference to ddi_peekc in the binary.
444 444 */
445 445 #ifdef _ILP32
446 446 int
447 447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 448 {
449 449 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 450 val_p));
451 451 }
452 452
453 453 int
454 454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 455 {
456 456 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 457 val_p));
458 458 }
459 459
460 460 int
461 461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 462 {
463 463 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 464 val_p));
465 465 }
466 466
467 467 int
468 468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 469 {
470 470 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 471 val_p));
472 472 }
473 473 #endif /* _ILP32 */
474 474
475 475 int
476 476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 477 {
478 478 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 479 }
480 480
481 481 int
482 482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 483 {
484 484 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 485 }
486 486
487 487 int
488 488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 489 {
490 490 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 491 }
492 492
493 493 int
494 494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 495 {
496 496 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 497 }
498 498
499 499 /*
500 500 * We need to separate the old interfaces from the new ones and leave them
501 501 * in here for a while. Previous versions of the OS defined the new interfaces
502 502 * to the old interfaces. This way we can fix things up so that we can
503 503 * eventually remove these interfaces.
504 504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505 505 * or earlier will actually have a reference to ddi_pokec in the binary.
506 506 */
507 507 #ifdef _ILP32
508 508 int
509 509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 510 {
511 511 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 512 }
513 513
514 514 int
515 515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 516 {
517 517 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 518 }
519 519
520 520 int
521 521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 522 {
523 523 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 524 }
525 525
526 526 int
527 527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 528 {
529 529 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 530 }
531 531 #endif /* _ILP32 */
532 532
533 533 /*
534 534 * ddi_peekpokeio() is used primarily by the mem drivers for moving
535 535 * data to and from uio structures via peek and poke. Note that we
536 536 * use "internal" routines ddi_peek and ddi_poke to make this go
537 537 * slightly faster, avoiding the call overhead ..
538 538 */
539 539 int
540 540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541 541 caddr_t addr, size_t len, uint_t xfersize)
542 542 {
543 543 int64_t ibuffer;
544 544 int8_t w8;
545 545 size_t sz;
546 546 int o;
547 547
548 548 if (xfersize > sizeof (long))
549 549 xfersize = sizeof (long);
550 550
551 551 while (len != 0) {
552 552 if ((len | (uintptr_t)addr) & 1) {
553 553 sz = sizeof (int8_t);
554 554 if (rw == UIO_WRITE) {
555 555 if ((o = uwritec(uio)) == -1)
556 556 return (DDI_FAILURE);
557 557 if (ddi_poke8(devi, (int8_t *)addr,
558 558 (int8_t)o) != DDI_SUCCESS)
559 559 return (DDI_FAILURE);
560 560 } else {
561 561 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 562 (int8_t *)addr, &w8) != DDI_SUCCESS)
563 563 return (DDI_FAILURE);
564 564 if (ureadc(w8, uio))
565 565 return (DDI_FAILURE);
566 566 }
567 567 } else {
568 568 switch (xfersize) {
569 569 case sizeof (int64_t):
570 570 if (((len | (uintptr_t)addr) &
571 571 (sizeof (int64_t) - 1)) == 0) {
572 572 sz = xfersize;
573 573 break;
574 574 }
575 575 /*FALLTHROUGH*/
576 576 case sizeof (int32_t):
577 577 if (((len | (uintptr_t)addr) &
578 578 (sizeof (int32_t) - 1)) == 0) {
579 579 sz = xfersize;
580 580 break;
581 581 }
582 582 /*FALLTHROUGH*/
583 583 default:
584 584 /*
585 585 * This still assumes that we might have an
586 586 * I/O bus out there that permits 16-bit
587 587 * transfers (and that it would be upset by
588 588 * 32-bit transfers from such locations).
589 589 */
590 590 sz = sizeof (int16_t);
591 591 break;
592 592 }
593 593
594 594 if (rw == UIO_READ) {
595 595 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 596 addr, &ibuffer) != DDI_SUCCESS)
597 597 return (DDI_FAILURE);
598 598 }
599 599
600 600 if (uiomove(&ibuffer, sz, rw, uio))
601 601 return (DDI_FAILURE);
602 602
603 603 if (rw == UIO_WRITE) {
604 604 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 605 addr, &ibuffer) != DDI_SUCCESS)
606 606 return (DDI_FAILURE);
607 607 }
608 608 }
609 609 addr += sz;
610 610 len -= sz;
611 611 }
612 612 return (DDI_SUCCESS);
613 613 }
614 614
615 615 /*
616 616 * These routines are used by drivers that do layered ioctls
617 617 * On sparc, they're implemented in assembler to avoid spilling
618 618 * register windows in the common (copyin) case ..
619 619 */
620 620 #if !defined(__sparc)
621 621 int
622 622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 623 {
624 624 if (flags & FKIOCTL)
625 625 return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 626 return (copyin(buf, kernbuf, size));
627 627 }
628 628
629 629 int
630 630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 631 {
632 632 if (flags & FKIOCTL)
633 633 return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 634 return (copyout(buf, kernbuf, size));
635 635 }
636 636 #endif /* !__sparc */
637 637
638 638 /*
639 639 * Conversions in nexus pagesize units. We don't duplicate the
640 640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641 641 * routines anyway.
642 642 */
643 643 unsigned long
644 644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 645 {
646 646 unsigned long pages;
647 647
648 648 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 649 return (pages);
650 650 }
651 651
652 652 unsigned long
653 653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 654 {
655 655 unsigned long pages;
656 656
657 657 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 658 return (pages);
659 659 }
660 660
661 661 unsigned long
662 662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 663 {
664 664 unsigned long bytes;
665 665
666 666 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 667 return (bytes);
668 668 }
669 669
670 670 unsigned int
671 671 ddi_enter_critical(void)
672 672 {
673 673 return ((uint_t)spl7());
674 674 }
675 675
676 676 void
677 677 ddi_exit_critical(unsigned int spl)
678 678 {
679 679 splx((int)spl);
680 680 }
681 681
682 682 /*
683 683 * Nexus ctlops punter
684 684 */
685 685
686 686 #if !defined(__sparc)
687 687 /*
688 688 * Request bus_ctl parent to handle a bus_ctl request
689 689 *
690 690 * (The sparc version is in sparc_ddi.s)
691 691 */
692 692 int
693 693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 694 {
695 695 int (*fp)();
696 696
697 697 if (!d || !r)
698 698 return (DDI_FAILURE);
699 699
700 700 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 701 return (DDI_FAILURE);
702 702
703 703 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 704 return ((*fp)(d, r, op, a, v));
705 705 }
706 706
707 707 #endif
708 708
709 709 /*
710 710 * DMA/DVMA setup
711 711 */
712 712
713 713 #if !defined(__sparc)
714 714 /*
715 715 * Request bus_dma_ctl parent to fiddle with a dma request.
716 716 *
717 717 * (The sparc version is in sparc_subr.s)
718 718 */
719 719 int
720 720 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
721 721 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
722 722 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
723 723 {
724 724 int (*fp)();
725 725
726 726 if (dip != ddi_root_node())
727 727 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
728 728 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
729 729 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
730 730 }
731 731 #endif
732 732
733 733 /*
734 734 * For all DMA control functions, call the DMA control
735 735 * routine and return status.
736 736 *
737 737 * Just plain assume that the parent is to be called.
738 738 * If a nexus driver or a thread outside the framework
739 739 * of a nexus driver or a leaf driver calls these functions,
740 740 * it is up to them to deal with the fact that the parent's
741 741 * bus_dma_ctl function will be the first one called.
742 742 */
743 743
744 744 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
745 745
746 746 /*
747 747 * This routine is left in place to satisfy link dependencies
748 748 * for any 3rd party nexus drivers that rely on it. It is never
749 749 * called, though.
750 750 */
751 751 /*ARGSUSED*/
752 752 int
753 753 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
754 754 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
755 755 {
756 756 return (DDI_FAILURE);
757 757 }
758 758
759 759 #if !defined(__sparc)
760 760
761 761 /*
762 762 * The SPARC versions of these routines are done in assembler to
763 763 * save register windows, so they're in sparc_subr.s.
764 764 */
765 765
766 766 int
767 767 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
768 768 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
769 769 {
770 770 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
771 771 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
772 772
773 773 if (dip != ddi_root_node())
774 774 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
775 775
776 776 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
777 777 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
778 778 }
779 779
780 780 int
781 781 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
782 782 {
783 783 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
784 784
785 785 if (dip != ddi_root_node())
786 786 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
787 787
788 788 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
789 789 return ((*funcp)(dip, rdip, handlep));
790 790 }
791 791
792 792 int
793 793 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
794 794 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
795 795 ddi_dma_cookie_t *cp, uint_t *ccountp)
796 796 {
797 797 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
798 798 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
799 799
800 800 if (dip != ddi_root_node())
801 801 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
802 802
803 803 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
804 804 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
805 805 }
806 806
807 807 int
808 808 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
809 809 ddi_dma_handle_t handle)
810 810 {
811 811 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
812 812
813 813 if (dip != ddi_root_node())
814 814 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
815 815
816 816 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
817 817 return ((*funcp)(dip, rdip, handle));
818 818 }
819 819
820 820
821 821 int
822 822 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
823 823 ddi_dma_handle_t handle, off_t off, size_t len,
824 824 uint_t cache_flags)
825 825 {
826 826 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
827 827 off_t, size_t, uint_t);
828 828
829 829 if (dip != ddi_root_node())
830 830 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
831 831
832 832 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
833 833 return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
834 834 }
835 835
836 836 int
837 837 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
838 838 ddi_dma_handle_t handle, uint_t win, off_t *offp,
839 839 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
840 840 {
841 841 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
842 842 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
843 843
844 844 if (dip != ddi_root_node())
845 845 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
846 846
847 847 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
848 848 return ((*funcp)(dip, rdip, handle, win, offp, lenp,
849 849 cookiep, ccountp));
850 850 }
851 851
852 852 int
853 853 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
854 854 {
855 855 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
856 856 dev_info_t *dip, *rdip;
857 857 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
858 858 size_t, uint_t);
859 859
860 860 /*
861 861 * the DMA nexus driver will set DMP_NOSYNC if the
862 862 * platform does not require any sync operation. For
863 863 * example if the memory is uncached or consistent
864 864 * and without any I/O write buffers involved.
865 865 */
866 866 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
867 867 return (DDI_SUCCESS);
868 868
869 869 dip = rdip = hp->dmai_rdip;
870 870 if (dip != ddi_root_node())
871 871 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
872 872 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
873 873 return ((*funcp)(dip, rdip, h, o, l, whom));
874 874 }
875 875
876 876 int
877 877 ddi_dma_unbind_handle(ddi_dma_handle_t h)
878 878 {
879 879 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
880 880 dev_info_t *dip, *rdip;
881 881 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
882 882
883 883 dip = rdip = hp->dmai_rdip;
884 884 if (dip != ddi_root_node())
885 885 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
886 886 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
887 887 return ((*funcp)(dip, rdip, h));
888 888 }
889 889
890 890 #endif /* !__sparc */
891 891
892 892 /*
893 893 * DMA burst sizes, and transfer minimums
894 894 */
895 895
896 896 int
897 897 ddi_dma_burstsizes(ddi_dma_handle_t handle)
898 898 {
899 899 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
900 900
901 901 if (!dimp)
902 902 return (0);
903 903 else
904 904 return (dimp->dmai_burstsizes);
905 905 }
906 906
907 907 /*
908 908 * Given two DMA attribute structures, apply the attributes
909 909 * of one to the other, following the rules of attributes
910 910 * and the wishes of the caller.
911 911 *
912 912 * The rules of DMA attribute structures are that you cannot
913 913 * make things *less* restrictive as you apply one set
914 914 * of attributes to another.
915 915 *
916 916 */
917 917 void
918 918 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
919 919 {
920 920 attr->dma_attr_addr_lo =
921 921 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
922 922 attr->dma_attr_addr_hi =
923 923 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
924 924 attr->dma_attr_count_max =
925 925 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
926 926 attr->dma_attr_align =
927 927 MAX(attr->dma_attr_align, mod->dma_attr_align);
928 928 attr->dma_attr_burstsizes =
929 929 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
930 930 attr->dma_attr_minxfer =
931 931 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
932 932 attr->dma_attr_maxxfer =
933 933 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
934 934 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
935 935 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
936 936 (uint_t)mod->dma_attr_sgllen);
937 937 attr->dma_attr_granular =
938 938 MAX(attr->dma_attr_granular, mod->dma_attr_granular);
939 939 }
940 940
941 941 /*
942 942 * mmap/segmap interface:
943 943 */
944 944
945 945 /*
946 946 * ddi_segmap: setup the default segment driver. Calls the drivers
947 947 * XXmmap routine to validate the range to be mapped.
948 948 * Return ENXIO of the range is not valid. Create
949 949 * a seg_dev segment that contains all of the
950 950 * necessary information and will reference the
951 951 * default segment driver routines. It returns zero
952 952 * on success or non-zero on failure.
953 953 */
954 954 int
955 955 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
956 956 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
957 957 {
958 958 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
959 959 off_t, uint_t, uint_t, uint_t, struct cred *);
960 960
961 961 return (spec_segmap(dev, offset, asp, addrp, len,
962 962 prot, maxprot, flags, credp));
963 963 }
964 964
965 965 /*
966 966 * ddi_map_fault: Resolve mappings at fault time. Used by segment
967 967 * drivers. Allows each successive parent to resolve
968 968 * address translations and add its mappings to the
969 969 * mapping list supplied in the page structure. It
970 970 * returns zero on success or non-zero on failure.
971 971 */
972 972
973 973 int
974 974 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
975 975 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
976 976 {
977 977 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
978 978 }
979 979
980 980 /*
981 981 * ddi_device_mapping_check: Called from ddi_segmap_setup.
982 982 * Invokes platform specific DDI to determine whether attributes specified
983 983 * in attr(9s) are valid for the region of memory that will be made
984 984 * available for direct access to user process via the mmap(2) system call.
985 985 */
986 986 int
987 987 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
988 988 uint_t rnumber, uint_t *hat_flags)
989 989 {
990 990 ddi_acc_handle_t handle;
991 991 ddi_map_req_t mr;
992 992 ddi_acc_hdl_t *hp;
993 993 int result;
994 994 dev_info_t *dip;
995 995
996 996 /*
997 997 * we use e_ddi_hold_devi_by_dev to search for the devi. We
998 998 * release it immediately since it should already be held by
999 999 * a devfs vnode.
1000 1000 */
1001 1001 if ((dip =
1002 1002 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1003 1003 return (-1);
1004 1004 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */
1005 1005
1006 1006 /*
1007 1007 * Allocate and initialize the common elements of data
1008 1008 * access handle.
1009 1009 */
1010 1010 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1011 1011 if (handle == NULL)
1012 1012 return (-1);
1013 1013
1014 1014 hp = impl_acc_hdl_get(handle);
1015 1015 hp->ah_vers = VERS_ACCHDL;
1016 1016 hp->ah_dip = dip;
1017 1017 hp->ah_rnumber = rnumber;
1018 1018 hp->ah_offset = 0;
1019 1019 hp->ah_len = 0;
1020 1020 hp->ah_acc = *accattrp;
1021 1021
1022 1022 /*
1023 1023 * Set up the mapping request and call to parent.
1024 1024 */
1025 1025 mr.map_op = DDI_MO_MAP_HANDLE;
1026 1026 mr.map_type = DDI_MT_RNUMBER;
1027 1027 mr.map_obj.rnumber = rnumber;
1028 1028 mr.map_prot = PROT_READ | PROT_WRITE;
1029 1029 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1030 1030 mr.map_handlep = hp;
1031 1031 mr.map_vers = DDI_MAP_VERSION;
1032 1032 result = ddi_map(dip, &mr, 0, 0, NULL);
1033 1033
1034 1034 /*
1035 1035 * Region must be mappable, pick up flags from the framework.
1036 1036 */
1037 1037 *hat_flags = hp->ah_hat_flags;
1038 1038
1039 1039 impl_acc_hdl_free(handle);
1040 1040
1041 1041 /*
1042 1042 * check for end result.
1043 1043 */
1044 1044 if (result != DDI_SUCCESS)
1045 1045 return (-1);
1046 1046 return (0);
1047 1047 }
1048 1048
1049 1049
1050 1050 /*
1051 1051 * Property functions: See also, ddipropdefs.h.
1052 1052 *
1053 1053 * These functions are the framework for the property functions,
1054 1054 * i.e. they support software defined properties. All implementation
1055 1055 * specific property handling (i.e.: self-identifying devices and
1056 1056 * PROM defined properties are handled in the implementation specific
1057 1057 * functions (defined in ddi_implfuncs.h).
1058 1058 */
1059 1059
1060 1060 /*
1061 1061 * nopropop: Shouldn't be called, right?
1062 1062 */
1063 1063 int
1064 1064 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1065 1065 char *name, caddr_t valuep, int *lengthp)
1066 1066 {
1067 1067 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1068 1068 return (DDI_PROP_NOT_FOUND);
1069 1069 }
1070 1070
1071 1071 #ifdef DDI_PROP_DEBUG
1072 1072 int ddi_prop_debug_flag = 0;
1073 1073
1074 1074 int
1075 1075 ddi_prop_debug(int enable)
1076 1076 {
1077 1077 int prev = ddi_prop_debug_flag;
1078 1078
1079 1079 if ((enable != 0) || (prev != 0))
1080 1080 printf("ddi_prop_debug: debugging %s\n",
1081 1081 enable ? "enabled" : "disabled");
1082 1082 ddi_prop_debug_flag = enable;
1083 1083 return (prev);
1084 1084 }
1085 1085
1086 1086 #endif /* DDI_PROP_DEBUG */
1087 1087
1088 1088 /*
1089 1089 * Search a property list for a match, if found return pointer
1090 1090 * to matching prop struct, else return NULL.
1091 1091 */
1092 1092
1093 1093 ddi_prop_t *
1094 1094 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1095 1095 {
1096 1096 ddi_prop_t *propp;
1097 1097
1098 1098 /*
1099 1099 * find the property in child's devinfo:
1100 1100 * Search order defined by this search function is first matching
1101 1101 * property with input dev == DDI_DEV_T_ANY matching any dev or
1102 1102 * dev == propp->prop_dev, name == propp->name, and the correct
1103 1103 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1104 1104 * value made it this far then it implies a DDI_DEV_T_ANY search.
1105 1105 */
1106 1106 if (dev == DDI_DEV_T_NONE)
1107 1107 dev = DDI_DEV_T_ANY;
1108 1108
1109 1109 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
1110 1110
1111 1111 if (!DDI_STRSAME(propp->prop_name, name))
1112 1112 continue;
1113 1113
1114 1114 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1115 1115 continue;
1116 1116
1117 1117 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1118 1118 continue;
1119 1119
1120 1120 return (propp);
1121 1121 }
1122 1122
1123 1123 return ((ddi_prop_t *)0);
1124 1124 }
1125 1125
1126 1126 /*
1127 1127 * Search for property within devnames structures
1128 1128 */
1129 1129 ddi_prop_t *
1130 1130 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1131 1131 {
1132 1132 major_t major;
1133 1133 struct devnames *dnp;
1134 1134 ddi_prop_t *propp;
1135 1135
1136 1136 /*
1137 1137 * Valid dev_t value is needed to index into the
1138 1138 * correct devnames entry, therefore a dev_t
1139 1139 * value of DDI_DEV_T_ANY is not appropriate.
1140 1140 */
1141 1141 ASSERT(dev != DDI_DEV_T_ANY);
1142 1142 if (dev == DDI_DEV_T_ANY) {
1143 1143 return ((ddi_prop_t *)0);
1144 1144 }
1145 1145
1146 1146 major = getmajor(dev);
1147 1147 dnp = &(devnamesp[major]);
1148 1148
1149 1149 if (dnp->dn_global_prop_ptr == NULL)
1150 1150 return ((ddi_prop_t *)0);
1151 1151
1152 1152 LOCK_DEV_OPS(&dnp->dn_lock);
1153 1153
1154 1154 for (propp = dnp->dn_global_prop_ptr->prop_list;
1155 1155 propp != NULL;
1156 1156 propp = (ddi_prop_t *)propp->prop_next) {
1157 1157
1158 1158 if (!DDI_STRSAME(propp->prop_name, name))
1159 1159 continue;
1160 1160
1161 1161 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1162 1162 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1163 1163 continue;
1164 1164
1165 1165 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1166 1166 continue;
1167 1167
1168 1168 /* Property found, return it */
1169 1169 UNLOCK_DEV_OPS(&dnp->dn_lock);
1170 1170 return (propp);
1171 1171 }
1172 1172
1173 1173 UNLOCK_DEV_OPS(&dnp->dn_lock);
1174 1174 return ((ddi_prop_t *)0);
1175 1175 }
1176 1176
1177 1177 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1178 1178
1179 1179 /*
1180 1180 * ddi_prop_search_global:
1181 1181 * Search the global property list within devnames
1182 1182 * for the named property. Return the encoded value.
1183 1183 */
1184 1184 static int
1185 1185 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1186 1186 void *valuep, uint_t *lengthp)
1187 1187 {
1188 1188 ddi_prop_t *propp;
1189 1189 caddr_t buffer;
1190 1190
1191 1191 propp = i_ddi_search_global_prop(dev, name, flags);
1192 1192
1193 1193 /* Property NOT found, bail */
1194 1194 if (propp == (ddi_prop_t *)0)
1195 1195 return (DDI_PROP_NOT_FOUND);
1196 1196
1197 1197 if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1198 1198 return (DDI_PROP_UNDEFINED);
1199 1199
1200 1200 if ((buffer = kmem_alloc(propp->prop_len,
1201 1201 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1202 1202 cmn_err(CE_CONT, prop_no_mem_msg, name);
1203 1203 return (DDI_PROP_NO_MEMORY);
1204 1204 }
1205 1205
1206 1206 /*
1207 1207 * Return the encoded data
1208 1208 */
1209 1209 *(caddr_t *)valuep = buffer;
1210 1210 *lengthp = propp->prop_len;
1211 1211 bcopy(propp->prop_val, buffer, propp->prop_len);
1212 1212
1213 1213 return (DDI_PROP_SUCCESS);
1214 1214 }
1215 1215
1216 1216 /*
1217 1217 * ddi_prop_search_common: Lookup and return the encoded value
1218 1218 */
1219 1219 int
1220 1220 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1221 1221 uint_t flags, char *name, void *valuep, uint_t *lengthp)
1222 1222 {
1223 1223 ddi_prop_t *propp;
1224 1224 int i;
1225 1225 caddr_t buffer;
1226 1226 caddr_t prealloc = NULL;
1227 1227 int plength = 0;
1228 1228 dev_info_t *pdip;
1229 1229 int (*bop)();
1230 1230
1231 1231 /*CONSTANTCONDITION*/
1232 1232 while (1) {
1233 1233
1234 1234 mutex_enter(&(DEVI(dip)->devi_lock));
1235 1235
1236 1236
1237 1237 /*
1238 1238 * find the property in child's devinfo:
1239 1239 * Search order is:
1240 1240 * 1. driver defined properties
1241 1241 * 2. system defined properties
1242 1242 * 3. driver global properties
1243 1243 * 4. boot defined properties
1244 1244 */
1245 1245
1246 1246 propp = i_ddi_prop_search(dev, name, flags,
1247 1247 &(DEVI(dip)->devi_drv_prop_ptr));
1248 1248 if (propp == NULL) {
1249 1249 propp = i_ddi_prop_search(dev, name, flags,
1250 1250 &(DEVI(dip)->devi_sys_prop_ptr));
1251 1251 }
1252 1252 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1253 1253 propp = i_ddi_prop_search(dev, name, flags,
1254 1254 &DEVI(dip)->devi_global_prop_list->prop_list);
1255 1255 }
1256 1256
1257 1257 if (propp == NULL) {
1258 1258 propp = i_ddi_prop_search(dev, name, flags,
1259 1259 &(DEVI(dip)->devi_hw_prop_ptr));
1260 1260 }
1261 1261
1262 1262 /*
1263 1263 * Software property found?
1264 1264 */
1265 1265 if (propp != (ddi_prop_t *)0) {
1266 1266
1267 1267 /*
1268 1268 * If explicit undefine, return now.
1269 1269 */
1270 1270 if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1271 1271 mutex_exit(&(DEVI(dip)->devi_lock));
1272 1272 if (prealloc)
1273 1273 kmem_free(prealloc, plength);
1274 1274 return (DDI_PROP_UNDEFINED);
1275 1275 }
1276 1276
1277 1277 /*
1278 1278 * If we only want to know if it exists, return now
1279 1279 */
1280 1280 if (prop_op == PROP_EXISTS) {
1281 1281 mutex_exit(&(DEVI(dip)->devi_lock));
1282 1282 ASSERT(prealloc == NULL);
1283 1283 return (DDI_PROP_SUCCESS);
1284 1284 }
1285 1285
1286 1286 /*
1287 1287 * If length only request or prop length == 0,
1288 1288 * service request and return now.
1289 1289 */
1290 1290 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1291 1291 *lengthp = propp->prop_len;
1292 1292
1293 1293 /*
1294 1294 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1295 1295 * that means prop_len is 0, so set valuep
1296 1296 * also to NULL
1297 1297 */
1298 1298 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1299 1299 *(caddr_t *)valuep = NULL;
1300 1300
1301 1301 mutex_exit(&(DEVI(dip)->devi_lock));
1302 1302 if (prealloc)
1303 1303 kmem_free(prealloc, plength);
1304 1304 return (DDI_PROP_SUCCESS);
1305 1305 }
1306 1306
1307 1307 /*
1308 1308 * If LEN_AND_VAL_ALLOC and the request can sleep,
1309 1309 * drop the mutex, allocate the buffer, and go
1310 1310 * through the loop again. If we already allocated
1311 1311 * the buffer, and the size of the property changed,
1312 1312 * keep trying...
1313 1313 */
1314 1314 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1315 1315 (flags & DDI_PROP_CANSLEEP)) {
1316 1316 if (prealloc && (propp->prop_len != plength)) {
1317 1317 kmem_free(prealloc, plength);
1318 1318 prealloc = NULL;
1319 1319 }
1320 1320 if (prealloc == NULL) {
1321 1321 plength = propp->prop_len;
1322 1322 mutex_exit(&(DEVI(dip)->devi_lock));
1323 1323 prealloc = kmem_alloc(plength,
1324 1324 KM_SLEEP);
1325 1325 continue;
1326 1326 }
1327 1327 }
1328 1328
1329 1329 /*
1330 1330 * Allocate buffer, if required. Either way,
1331 1331 * set `buffer' variable.
1332 1332 */
1333 1333 i = *lengthp; /* Get callers length */
1334 1334 *lengthp = propp->prop_len; /* Set callers length */
1335 1335
1336 1336 switch (prop_op) {
1337 1337
1338 1338 case PROP_LEN_AND_VAL_ALLOC:
1339 1339
1340 1340 if (prealloc == NULL) {
1341 1341 buffer = kmem_alloc(propp->prop_len,
1342 1342 KM_NOSLEEP);
1343 1343 } else {
1344 1344 buffer = prealloc;
1345 1345 }
1346 1346
1347 1347 if (buffer == NULL) {
1348 1348 mutex_exit(&(DEVI(dip)->devi_lock));
1349 1349 cmn_err(CE_CONT, prop_no_mem_msg, name);
1350 1350 return (DDI_PROP_NO_MEMORY);
1351 1351 }
1352 1352 /* Set callers buf ptr */
1353 1353 *(caddr_t *)valuep = buffer;
1354 1354 break;
1355 1355
1356 1356 case PROP_LEN_AND_VAL_BUF:
1357 1357
1358 1358 if (propp->prop_len > (i)) {
1359 1359 mutex_exit(&(DEVI(dip)->devi_lock));
1360 1360 return (DDI_PROP_BUF_TOO_SMALL);
1361 1361 }
1362 1362
1363 1363 buffer = valuep; /* Get callers buf ptr */
1364 1364 break;
1365 1365
1366 1366 default:
1367 1367 break;
1368 1368 }
1369 1369
1370 1370 /*
1371 1371 * Do the copy.
1372 1372 */
1373 1373 bcopy(propp->prop_val, buffer, propp->prop_len);
1374 1374 mutex_exit(&(DEVI(dip)->devi_lock));
1375 1375 return (DDI_PROP_SUCCESS);
1376 1376 }
1377 1377
1378 1378 mutex_exit(&(DEVI(dip)->devi_lock));
1379 1379 if (prealloc)
1380 1380 kmem_free(prealloc, plength);
1381 1381 prealloc = NULL;
1382 1382
1383 1383 /*
1384 1384 * Prop not found, call parent bus_ops to deal with possible
1385 1385 * h/w layer (possible PROM defined props, etc.) and to
1386 1386 * possibly ascend the hierarchy, if allowed by flags.
1387 1387 */
1388 1388 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1389 1389
1390 1390 /*
1391 1391 * One last call for the root driver PROM props?
1392 1392 */
1393 1393 if (dip == ddi_root_node()) {
1394 1394 return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1395 1395 flags, name, valuep, (int *)lengthp));
1396 1396 }
1397 1397
1398 1398 /*
1399 1399 * We may have been called to check for properties
1400 1400 * within a single devinfo node that has no parent -
1401 1401 * see make_prop()
1402 1402 */
1403 1403 if (pdip == NULL) {
1404 1404 ASSERT((flags &
1405 1405 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1406 1406 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1407 1407 return (DDI_PROP_NOT_FOUND);
1408 1408 }
1409 1409
1410 1410 /*
1411 1411 * Instead of recursing, we do iterative calls up the tree.
1412 1412 * As a bit of optimization, skip the bus_op level if the
1413 1413 * node is a s/w node and if the parent's bus_prop_op function
1414 1414 * is `ddi_bus_prop_op', because we know that in this case,
1415 1415 * this function does nothing.
1416 1416 *
1417 1417 * 4225415: If the parent isn't attached, or the child
1418 1418 * hasn't been named by the parent yet, use the default
1419 1419 * ddi_bus_prop_op as a proxy for the parent. This
1420 1420 * allows property lookups in any child/parent state to
1421 1421 * include 'prom' and inherited properties, even when
1422 1422 * there are no drivers attached to the child or parent.
1423 1423 */
1424 1424
1425 1425 bop = ddi_bus_prop_op;
1426 1426 if (i_ddi_devi_attached(pdip) &&
1427 1427 (i_ddi_node_state(dip) >= DS_INITIALIZED))
1428 1428 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1429 1429
1430 1430 i = DDI_PROP_NOT_FOUND;
1431 1431
1432 1432 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1433 1433 i = (*bop)(dev, pdip, dip, prop_op,
1434 1434 flags | DDI_PROP_DONTPASS,
1435 1435 name, valuep, lengthp);
1436 1436 }
1437 1437
1438 1438 if ((flags & DDI_PROP_DONTPASS) ||
1439 1439 (i != DDI_PROP_NOT_FOUND))
1440 1440 return (i);
1441 1441
1442 1442 dip = pdip;
1443 1443 }
1444 1444 /*NOTREACHED*/
1445 1445 }
1446 1446
1447 1447
1448 1448 /*
1449 1449 * ddi_prop_op: The basic property operator for drivers.
1450 1450 *
1451 1451 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1452 1452 *
1453 1453 * prop_op valuep
1454 1454 * ------ ------
1455 1455 *
1456 1456 * PROP_LEN <unused>
1457 1457 *
1458 1458 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1459 1459 *
1460 1460 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1461 1461 * address of allocated buffer, if successful)
1462 1462 */
1463 1463 int
1464 1464 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1465 1465 char *name, caddr_t valuep, int *lengthp)
1466 1466 {
1467 1467 int i;
1468 1468
1469 1469 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1470 1470
1471 1471 /*
1472 1472 * If this was originally an LDI prop lookup then we bail here.
1473 1473 * The reason is that the LDI property lookup interfaces first call
1474 1474 * a drivers prop_op() entry point to allow it to override
1475 1475 * properties. But if we've made it here, then the driver hasn't
1476 1476 * overriden any properties. We don't want to continue with the
1477 1477 * property search here because we don't have any type inforamtion.
1478 1478 * When we return failure, the LDI interfaces will then proceed to
1479 1479 * call the typed property interfaces to look up the property.
1480 1480 */
1481 1481 if (mod_flags & DDI_PROP_DYNAMIC)
1482 1482 return (DDI_PROP_NOT_FOUND);
1483 1483
1484 1484 /*
1485 1485 * check for pre-typed property consumer asking for typed property:
1486 1486 * see e_ddi_getprop_int64.
1487 1487 */
1488 1488 if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1489 1489 mod_flags |= DDI_PROP_TYPE_INT64;
1490 1490 mod_flags |= DDI_PROP_TYPE_ANY;
1491 1491
1492 1492 i = ddi_prop_search_common(dev, dip, prop_op,
1493 1493 mod_flags, name, valuep, (uint_t *)lengthp);
1494 1494 if (i == DDI_PROP_FOUND_1275)
1495 1495 return (DDI_PROP_SUCCESS);
1496 1496 return (i);
1497 1497 }
1498 1498
1499 1499 /*
1500 1500 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1501 1501 * maintain size in number of blksize blocks. Provides a dynamic property
1502 1502 * implementation for size oriented properties based on nblocks64 and blksize
1503 1503 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1504 1504 * is too large. This interface should not be used with a nblocks64 that
1505 1505 * represents the driver's idea of how to represent unknown, if nblocks is
1506 1506 * unknown use ddi_prop_op.
1507 1507 */
1508 1508 int
1509 1509 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1510 1510 int mod_flags, char *name, caddr_t valuep, int *lengthp,
1511 1511 uint64_t nblocks64, uint_t blksize)
1512 1512 {
1513 1513 uint64_t size64;
1514 1514 int blkshift;
1515 1515
1516 1516 /* convert block size to shift value */
1517 1517 ASSERT(BIT_ONLYONESET(blksize));
1518 1518 blkshift = highbit(blksize) - 1;
1519 1519
1520 1520 /*
1521 1521 * There is no point in supporting nblocks64 values that don't have
1522 1522 * an accurate uint64_t byte count representation.
1523 1523 */
1524 1524 if (nblocks64 >= (UINT64_MAX >> blkshift))
1525 1525 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1526 1526 name, valuep, lengthp));
1527 1527
1528 1528 size64 = nblocks64 << blkshift;
1529 1529 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1530 1530 name, valuep, lengthp, size64, blksize));
1531 1531 }
1532 1532
1533 1533 /*
1534 1534 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1535 1535 */
1536 1536 int
1537 1537 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1538 1538 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1539 1539 {
1540 1540 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1541 1541 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1542 1542 }
1543 1543
1544 1544 /*
1545 1545 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1546 1546 * maintain size in bytes. Provides a of dynamic property implementation for
1547 1547 * size oriented properties based on size64 value and blksize passed in by the
1548 1548 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1549 1549 * should not be used with a size64 that represents the driver's idea of how
1550 1550 * to represent unknown, if size is unknown use ddi_prop_op.
1551 1551 *
1552 1552 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1553 1553 * integers. While the most likely interface to request them ([bc]devi_size)
1554 1554 * is declared int (signed) there is no enforcement of this, which means we
1555 1555 * can't enforce limitations here without risking regression.
1556 1556 */
1557 1557 int
1558 1558 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1559 1559 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1560 1560 uint_t blksize)
1561 1561 {
1562 1562 uint64_t nblocks64;
1563 1563 int callers_length;
1564 1564 caddr_t buffer;
1565 1565 int blkshift;
1566 1566
1567 1567 /*
1568 1568 * This is a kludge to support capture of size(9P) pure dynamic
1569 1569 * properties in snapshots for non-cmlb code (without exposing
1570 1570 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1571 1571 * should be removed.
1572 1572 */
1573 1573 if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1574 1574 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1575 1575 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
1576 1576 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
1577 1577 {NULL}
1578 1578 };
1579 1579 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1580 1580 }
1581 1581
1582 1582 /* convert block size to shift value */
1583 1583 ASSERT(BIT_ONLYONESET(blksize));
1584 1584 blkshift = highbit(blksize) - 1;
1585 1585
1586 1586 /* compute DEV_BSIZE nblocks value */
1587 1587 nblocks64 = size64 >> blkshift;
1588 1588
1589 1589 /* get callers length, establish length of our dynamic properties */
1590 1590 callers_length = *lengthp;
1591 1591
1592 1592 if (strcmp(name, "Nblocks") == 0)
1593 1593 *lengthp = sizeof (uint64_t);
1594 1594 else if (strcmp(name, "Size") == 0)
1595 1595 *lengthp = sizeof (uint64_t);
1596 1596 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1597 1597 *lengthp = sizeof (uint32_t);
1598 1598 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1599 1599 *lengthp = sizeof (uint32_t);
1600 1600 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1601 1601 *lengthp = sizeof (uint32_t);
1602 1602 else {
1603 1603 /* fallback to ddi_prop_op */
1604 1604 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1605 1605 name, valuep, lengthp));
1606 1606 }
1607 1607
1608 1608 /* service request for the length of the property */
1609 1609 if (prop_op == PROP_LEN)
1610 1610 return (DDI_PROP_SUCCESS);
1611 1611
1612 1612 switch (prop_op) {
1613 1613 case PROP_LEN_AND_VAL_ALLOC:
1614 1614 if ((buffer = kmem_alloc(*lengthp,
1615 1615 (mod_flags & DDI_PROP_CANSLEEP) ?
1616 1616 KM_SLEEP : KM_NOSLEEP)) == NULL)
1617 1617 return (DDI_PROP_NO_MEMORY);
1618 1618
1619 1619 *(caddr_t *)valuep = buffer; /* set callers buf ptr */
1620 1620 break;
1621 1621
1622 1622 case PROP_LEN_AND_VAL_BUF:
1623 1623 /* the length of the property and the request must match */
1624 1624 if (callers_length != *lengthp)
1625 1625 return (DDI_PROP_INVAL_ARG);
1626 1626
1627 1627 buffer = valuep; /* get callers buf ptr */
1628 1628 break;
1629 1629
1630 1630 default:
1631 1631 return (DDI_PROP_INVAL_ARG);
1632 1632 }
1633 1633
1634 1634 /* transfer the value into the buffer */
1635 1635 if (strcmp(name, "Nblocks") == 0)
1636 1636 *((uint64_t *)buffer) = nblocks64;
1637 1637 else if (strcmp(name, "Size") == 0)
1638 1638 *((uint64_t *)buffer) = size64;
1639 1639 else if (strcmp(name, "nblocks") == 0)
1640 1640 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1641 1641 else if (strcmp(name, "size") == 0)
1642 1642 *((uint32_t *)buffer) = (uint32_t)size64;
1643 1643 else if (strcmp(name, "blksize") == 0)
1644 1644 *((uint32_t *)buffer) = (uint32_t)blksize;
1645 1645 return (DDI_PROP_SUCCESS);
1646 1646 }
1647 1647
1648 1648 /*
1649 1649 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1650 1650 */
1651 1651 int
1652 1652 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1653 1653 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1654 1654 {
1655 1655 return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1656 1656 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1657 1657 }
1658 1658
1659 1659 /*
1660 1660 * Variable length props...
1661 1661 */
1662 1662
1663 1663 /*
1664 1664 * ddi_getlongprop: Get variable length property len+val into a buffer
1665 1665 * allocated by property provider via kmem_alloc. Requester
1666 1666 * is responsible for freeing returned property via kmem_free.
1667 1667 *
1668 1668 * Arguments:
1669 1669 *
1670 1670 * dev_t: Input: dev_t of property.
1671 1671 * dip: Input: dev_info_t pointer of child.
1672 1672 * flags: Input: Possible flag modifiers are:
1673 1673 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1674 1674 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1675 1675 * name: Input: name of property.
1676 1676 * valuep: Output: Addr of callers buffer pointer.
1677 1677 * lengthp:Output: *lengthp will contain prop length on exit.
1678 1678 *
1679 1679 * Possible Returns:
1680 1680 *
1681 1681 * DDI_PROP_SUCCESS: Prop found and returned.
1682 1682 * DDI_PROP_NOT_FOUND: Prop not found
1683 1683 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1684 1684 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1685 1685 */
1686 1686
1687 1687 int
1688 1688 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1689 1689 char *name, caddr_t valuep, int *lengthp)
1690 1690 {
1691 1691 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1692 1692 flags, name, valuep, lengthp));
1693 1693 }
1694 1694
1695 1695 /*
1696 1696 *
1697 1697 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1698 1698 * buffer. (no memory allocation by provider).
1699 1699 *
1700 1700 * dev_t: Input: dev_t of property.
1701 1701 * dip: Input: dev_info_t pointer of child.
1702 1702 * flags: Input: DDI_PROP_DONTPASS or NULL
1703 1703 * name: Input: name of property
1704 1704 * valuep: Input: ptr to callers buffer.
1705 1705 * lengthp:I/O: ptr to length of callers buffer on entry,
1706 1706 * actual length of property on exit.
1707 1707 *
1708 1708 * Possible returns:
1709 1709 *
1710 1710 * DDI_PROP_SUCCESS Prop found and returned
1711 1711 * DDI_PROP_NOT_FOUND Prop not found
1712 1712 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1713 1713 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1714 1714 * no value returned, but actual prop
1715 1715 * length returned in *lengthp
1716 1716 *
1717 1717 */
1718 1718
1719 1719 int
1720 1720 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1721 1721 char *name, caddr_t valuep, int *lengthp)
1722 1722 {
1723 1723 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1724 1724 flags, name, valuep, lengthp));
1725 1725 }
1726 1726
1727 1727 /*
1728 1728 * Integer/boolean sized props.
1729 1729 *
1730 1730 * Call is value only... returns found boolean or int sized prop value or
1731 1731 * defvalue if prop not found or is wrong length or is explicitly undefined.
1732 1732 * Only flag is DDI_PROP_DONTPASS...
1733 1733 *
1734 1734 * By convention, this interface returns boolean (0) sized properties
1735 1735 * as value (int)1.
1736 1736 *
1737 1737 * This never returns an error, if property not found or specifically
1738 1738 * undefined, the input `defvalue' is returned.
1739 1739 */
1740 1740
1741 1741 int
1742 1742 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1743 1743 {
1744 1744 int propvalue = defvalue;
1745 1745 int proplength = sizeof (int);
1746 1746 int error;
1747 1747
1748 1748 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1749 1749 flags, name, (caddr_t)&propvalue, &proplength);
1750 1750
1751 1751 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1752 1752 propvalue = 1;
1753 1753
1754 1754 return (propvalue);
1755 1755 }
1756 1756
1757 1757 /*
1758 1758 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1759 1759 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1760 1760 */
1761 1761
1762 1762 int
1763 1763 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1764 1764 {
1765 1765 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1766 1766 }
1767 1767
1768 1768 /*
1769 1769 * Allocate a struct prop_driver_data, along with 'size' bytes
1770 1770 * for decoded property data. This structure is freed by
1771 1771 * calling ddi_prop_free(9F).
1772 1772 */
1773 1773 static void *
1774 1774 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1775 1775 {
1776 1776 struct prop_driver_data *pdd;
1777 1777
1778 1778 /*
1779 1779 * Allocate a structure with enough memory to store the decoded data.
1780 1780 */
1781 1781 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1782 1782 pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1783 1783 pdd->pdd_prop_free = prop_free;
1784 1784
1785 1785 /*
1786 1786 * Return a pointer to the location to put the decoded data.
1787 1787 */
1788 1788 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1789 1789 }
1790 1790
1791 1791 /*
1792 1792 * Allocated the memory needed to store the encoded data in the property
1793 1793 * handle.
1794 1794 */
1795 1795 static int
1796 1796 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1797 1797 {
1798 1798 /*
1799 1799 * If size is zero, then set data to NULL and size to 0. This
1800 1800 * is a boolean property.
1801 1801 */
1802 1802 if (size == 0) {
1803 1803 ph->ph_size = 0;
1804 1804 ph->ph_data = NULL;
1805 1805 ph->ph_cur_pos = NULL;
1806 1806 ph->ph_save_pos = NULL;
1807 1807 } else {
1808 1808 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1809 1809 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1810 1810 if (ph->ph_data == NULL)
1811 1811 return (DDI_PROP_NO_MEMORY);
1812 1812 } else
1813 1813 ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1814 1814 ph->ph_size = size;
1815 1815 ph->ph_cur_pos = ph->ph_data;
1816 1816 ph->ph_save_pos = ph->ph_data;
1817 1817 }
1818 1818 return (DDI_PROP_SUCCESS);
1819 1819 }
1820 1820
1821 1821 /*
1822 1822 * Free the space allocated by the lookup routines. Each lookup routine
1823 1823 * returns a pointer to the decoded data to the driver. The driver then
1824 1824 * passes this pointer back to us. This data actually lives in a struct
1825 1825 * prop_driver_data. We use negative indexing to find the beginning of
1826 1826 * the structure and then free the entire structure using the size and
1827 1827 * the free routine stored in the structure.
1828 1828 */
1829 1829 void
1830 1830 ddi_prop_free(void *datap)
1831 1831 {
1832 1832 struct prop_driver_data *pdd;
1833 1833
1834 1834 /*
1835 1835 * Get the structure
1836 1836 */
1837 1837 pdd = (struct prop_driver_data *)
1838 1838 ((caddr_t)datap - sizeof (struct prop_driver_data));
1839 1839 /*
1840 1840 * Call the free routine to free it
1841 1841 */
1842 1842 (*pdd->pdd_prop_free)(pdd);
1843 1843 }
1844 1844
1845 1845 /*
1846 1846 * Free the data associated with an array of ints,
1847 1847 * allocated with ddi_prop_decode_alloc().
1848 1848 */
1849 1849 static void
1850 1850 ddi_prop_free_ints(struct prop_driver_data *pdd)
1851 1851 {
1852 1852 kmem_free(pdd, pdd->pdd_size);
1853 1853 }
1854 1854
1855 1855 /*
1856 1856 * Free a single string property or a single string contained within
1857 1857 * the argv style return value of an array of strings.
1858 1858 */
1859 1859 static void
1860 1860 ddi_prop_free_string(struct prop_driver_data *pdd)
1861 1861 {
1862 1862 kmem_free(pdd, pdd->pdd_size);
1863 1863
1864 1864 }
1865 1865
1866 1866 /*
1867 1867 * Free an array of strings.
1868 1868 */
1869 1869 static void
1870 1870 ddi_prop_free_strings(struct prop_driver_data *pdd)
1871 1871 {
1872 1872 kmem_free(pdd, pdd->pdd_size);
1873 1873 }
1874 1874
1875 1875 /*
1876 1876 * Free the data associated with an array of bytes.
1877 1877 */
1878 1878 static void
1879 1879 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1880 1880 {
1881 1881 kmem_free(pdd, pdd->pdd_size);
1882 1882 }
1883 1883
1884 1884 /*
1885 1885 * Reset the current location pointer in the property handle to the
1886 1886 * beginning of the data.
1887 1887 */
1888 1888 void
1889 1889 ddi_prop_reset_pos(prop_handle_t *ph)
1890 1890 {
1891 1891 ph->ph_cur_pos = ph->ph_data;
1892 1892 ph->ph_save_pos = ph->ph_data;
1893 1893 }
1894 1894
1895 1895 /*
1896 1896 * Restore the current location pointer in the property handle to the
1897 1897 * saved position.
1898 1898 */
1899 1899 void
1900 1900 ddi_prop_save_pos(prop_handle_t *ph)
1901 1901 {
1902 1902 ph->ph_save_pos = ph->ph_cur_pos;
1903 1903 }
1904 1904
1905 1905 /*
1906 1906 * Save the location that the current location pointer is pointing to..
1907 1907 */
1908 1908 void
1909 1909 ddi_prop_restore_pos(prop_handle_t *ph)
1910 1910 {
1911 1911 ph->ph_cur_pos = ph->ph_save_pos;
1912 1912 }
1913 1913
1914 1914 /*
1915 1915 * Property encode/decode functions
1916 1916 */
1917 1917
1918 1918 /*
1919 1919 * Decode a single integer property
1920 1920 */
1921 1921 static int
1922 1922 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1923 1923 {
1924 1924 int i;
1925 1925 int tmp;
1926 1926
1927 1927 /*
1928 1928 * If there is nothing to decode return an error
1929 1929 */
1930 1930 if (ph->ph_size == 0)
1931 1931 return (DDI_PROP_END_OF_DATA);
1932 1932
1933 1933 /*
1934 1934 * Decode the property as a single integer and return it
1935 1935 * in data if we were able to decode it.
1936 1936 */
1937 1937 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1938 1938 if (i < DDI_PROP_RESULT_OK) {
1939 1939 switch (i) {
1940 1940 case DDI_PROP_RESULT_EOF:
1941 1941 return (DDI_PROP_END_OF_DATA);
1942 1942
1943 1943 case DDI_PROP_RESULT_ERROR:
1944 1944 return (DDI_PROP_CANNOT_DECODE);
1945 1945 }
1946 1946 }
1947 1947
1948 1948 *(int *)data = tmp;
1949 1949 *nelements = 1;
1950 1950 return (DDI_PROP_SUCCESS);
1951 1951 }
1952 1952
1953 1953 /*
1954 1954 * Decode a single 64 bit integer property
1955 1955 */
1956 1956 static int
1957 1957 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1958 1958 {
1959 1959 int i;
1960 1960 int64_t tmp;
1961 1961
1962 1962 /*
1963 1963 * If there is nothing to decode return an error
1964 1964 */
1965 1965 if (ph->ph_size == 0)
1966 1966 return (DDI_PROP_END_OF_DATA);
1967 1967
1968 1968 /*
1969 1969 * Decode the property as a single integer and return it
1970 1970 * in data if we were able to decode it.
1971 1971 */
1972 1972 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1973 1973 if (i < DDI_PROP_RESULT_OK) {
1974 1974 switch (i) {
1975 1975 case DDI_PROP_RESULT_EOF:
1976 1976 return (DDI_PROP_END_OF_DATA);
1977 1977
1978 1978 case DDI_PROP_RESULT_ERROR:
1979 1979 return (DDI_PROP_CANNOT_DECODE);
1980 1980 }
1981 1981 }
1982 1982
1983 1983 *(int64_t *)data = tmp;
1984 1984 *nelements = 1;
1985 1985 return (DDI_PROP_SUCCESS);
1986 1986 }
1987 1987
1988 1988 /*
1989 1989 * Decode an array of integers property
1990 1990 */
1991 1991 static int
1992 1992 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1993 1993 {
1994 1994 int i;
1995 1995 int cnt = 0;
1996 1996 int *tmp;
1997 1997 int *intp;
1998 1998 int n;
1999 1999
2000 2000 /*
2001 2001 * Figure out how many array elements there are by going through the
2002 2002 * data without decoding it first and counting.
2003 2003 */
2004 2004 for (;;) {
2005 2005 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2006 2006 if (i < 0)
2007 2007 break;
2008 2008 cnt++;
2009 2009 }
2010 2010
2011 2011 /*
2012 2012 * If there are no elements return an error
2013 2013 */
2014 2014 if (cnt == 0)
2015 2015 return (DDI_PROP_END_OF_DATA);
2016 2016
2017 2017 /*
2018 2018 * If we cannot skip through the data, we cannot decode it
2019 2019 */
2020 2020 if (i == DDI_PROP_RESULT_ERROR)
2021 2021 return (DDI_PROP_CANNOT_DECODE);
2022 2022
2023 2023 /*
2024 2024 * Reset the data pointer to the beginning of the encoded data
2025 2025 */
2026 2026 ddi_prop_reset_pos(ph);
2027 2027
2028 2028 /*
2029 2029 * Allocated memory to store the decoded value in.
2030 2030 */
2031 2031 intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2032 2032 ddi_prop_free_ints);
2033 2033
2034 2034 /*
2035 2035 * Decode each element and place it in the space we just allocated
2036 2036 */
2037 2037 tmp = intp;
2038 2038 for (n = 0; n < cnt; n++, tmp++) {
2039 2039 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2040 2040 if (i < DDI_PROP_RESULT_OK) {
2041 2041 /*
2042 2042 * Free the space we just allocated
2043 2043 * and return an error.
2044 2044 */
2045 2045 ddi_prop_free(intp);
2046 2046 switch (i) {
2047 2047 case DDI_PROP_RESULT_EOF:
2048 2048 return (DDI_PROP_END_OF_DATA);
2049 2049
2050 2050 case DDI_PROP_RESULT_ERROR:
2051 2051 return (DDI_PROP_CANNOT_DECODE);
2052 2052 }
2053 2053 }
2054 2054 }
2055 2055
2056 2056 *nelements = cnt;
2057 2057 *(int **)data = intp;
2058 2058
2059 2059 return (DDI_PROP_SUCCESS);
2060 2060 }
2061 2061
2062 2062 /*
2063 2063 * Decode a 64 bit integer array property
2064 2064 */
2065 2065 static int
2066 2066 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2067 2067 {
2068 2068 int i;
2069 2069 int n;
2070 2070 int cnt = 0;
2071 2071 int64_t *tmp;
2072 2072 int64_t *intp;
2073 2073
2074 2074 /*
2075 2075 * Count the number of array elements by going
2076 2076 * through the data without decoding it.
2077 2077 */
2078 2078 for (;;) {
2079 2079 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2080 2080 if (i < 0)
2081 2081 break;
2082 2082 cnt++;
2083 2083 }
2084 2084
2085 2085 /*
2086 2086 * If there are no elements return an error
2087 2087 */
2088 2088 if (cnt == 0)
2089 2089 return (DDI_PROP_END_OF_DATA);
2090 2090
2091 2091 /*
2092 2092 * If we cannot skip through the data, we cannot decode it
2093 2093 */
2094 2094 if (i == DDI_PROP_RESULT_ERROR)
2095 2095 return (DDI_PROP_CANNOT_DECODE);
2096 2096
2097 2097 /*
2098 2098 * Reset the data pointer to the beginning of the encoded data
2099 2099 */
2100 2100 ddi_prop_reset_pos(ph);
2101 2101
2102 2102 /*
2103 2103 * Allocate memory to store the decoded value.
2104 2104 */
2105 2105 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2106 2106 ddi_prop_free_ints);
2107 2107
2108 2108 /*
2109 2109 * Decode each element and place it in the space allocated
2110 2110 */
2111 2111 tmp = intp;
2112 2112 for (n = 0; n < cnt; n++, tmp++) {
2113 2113 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2114 2114 if (i < DDI_PROP_RESULT_OK) {
2115 2115 /*
2116 2116 * Free the space we just allocated
2117 2117 * and return an error.
2118 2118 */
2119 2119 ddi_prop_free(intp);
2120 2120 switch (i) {
2121 2121 case DDI_PROP_RESULT_EOF:
2122 2122 return (DDI_PROP_END_OF_DATA);
2123 2123
2124 2124 case DDI_PROP_RESULT_ERROR:
2125 2125 return (DDI_PROP_CANNOT_DECODE);
2126 2126 }
2127 2127 }
2128 2128 }
2129 2129
2130 2130 *nelements = cnt;
2131 2131 *(int64_t **)data = intp;
2132 2132
2133 2133 return (DDI_PROP_SUCCESS);
2134 2134 }
2135 2135
2136 2136 /*
2137 2137 * Encode an array of integers property (Can be one element)
2138 2138 */
2139 2139 int
2140 2140 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2141 2141 {
2142 2142 int i;
2143 2143 int *tmp;
2144 2144 int cnt;
2145 2145 int size;
2146 2146
2147 2147 /*
2148 2148 * If there is no data, we cannot do anything
2149 2149 */
2150 2150 if (nelements == 0)
2151 2151 return (DDI_PROP_CANNOT_ENCODE);
2152 2152
2153 2153 /*
2154 2154 * Get the size of an encoded int.
2155 2155 */
2156 2156 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2157 2157
2158 2158 if (size < DDI_PROP_RESULT_OK) {
2159 2159 switch (size) {
2160 2160 case DDI_PROP_RESULT_EOF:
2161 2161 return (DDI_PROP_END_OF_DATA);
2162 2162
2163 2163 case DDI_PROP_RESULT_ERROR:
2164 2164 return (DDI_PROP_CANNOT_ENCODE);
2165 2165 }
2166 2166 }
2167 2167
2168 2168 /*
2169 2169 * Allocate space in the handle to store the encoded int.
2170 2170 */
2171 2171 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2172 2172 DDI_PROP_SUCCESS)
2173 2173 return (DDI_PROP_NO_MEMORY);
2174 2174
2175 2175 /*
2176 2176 * Encode the array of ints.
2177 2177 */
2178 2178 tmp = (int *)data;
2179 2179 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2180 2180 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2181 2181 if (i < DDI_PROP_RESULT_OK) {
2182 2182 switch (i) {
2183 2183 case DDI_PROP_RESULT_EOF:
2184 2184 return (DDI_PROP_END_OF_DATA);
2185 2185
2186 2186 case DDI_PROP_RESULT_ERROR:
2187 2187 return (DDI_PROP_CANNOT_ENCODE);
2188 2188 }
2189 2189 }
2190 2190 }
2191 2191
2192 2192 return (DDI_PROP_SUCCESS);
2193 2193 }
2194 2194
2195 2195
2196 2196 /*
2197 2197 * Encode a 64 bit integer array property
2198 2198 */
2199 2199 int
2200 2200 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2201 2201 {
2202 2202 int i;
2203 2203 int cnt;
2204 2204 int size;
2205 2205 int64_t *tmp;
2206 2206
2207 2207 /*
2208 2208 * If there is no data, we cannot do anything
2209 2209 */
2210 2210 if (nelements == 0)
2211 2211 return (DDI_PROP_CANNOT_ENCODE);
2212 2212
2213 2213 /*
2214 2214 * Get the size of an encoded 64 bit int.
2215 2215 */
2216 2216 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2217 2217
2218 2218 if (size < DDI_PROP_RESULT_OK) {
2219 2219 switch (size) {
2220 2220 case DDI_PROP_RESULT_EOF:
2221 2221 return (DDI_PROP_END_OF_DATA);
2222 2222
2223 2223 case DDI_PROP_RESULT_ERROR:
2224 2224 return (DDI_PROP_CANNOT_ENCODE);
2225 2225 }
2226 2226 }
2227 2227
2228 2228 /*
2229 2229 * Allocate space in the handle to store the encoded int.
2230 2230 */
2231 2231 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2232 2232 DDI_PROP_SUCCESS)
2233 2233 return (DDI_PROP_NO_MEMORY);
2234 2234
2235 2235 /*
2236 2236 * Encode the array of ints.
2237 2237 */
2238 2238 tmp = (int64_t *)data;
2239 2239 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2240 2240 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2241 2241 if (i < DDI_PROP_RESULT_OK) {
2242 2242 switch (i) {
2243 2243 case DDI_PROP_RESULT_EOF:
2244 2244 return (DDI_PROP_END_OF_DATA);
2245 2245
2246 2246 case DDI_PROP_RESULT_ERROR:
2247 2247 return (DDI_PROP_CANNOT_ENCODE);
2248 2248 }
2249 2249 }
2250 2250 }
2251 2251
2252 2252 return (DDI_PROP_SUCCESS);
2253 2253 }
2254 2254
2255 2255 /*
2256 2256 * Decode a single string property
2257 2257 */
2258 2258 static int
2259 2259 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2260 2260 {
2261 2261 char *tmp;
2262 2262 char *str;
2263 2263 int i;
2264 2264 int size;
2265 2265
2266 2266 /*
2267 2267 * If there is nothing to decode return an error
2268 2268 */
2269 2269 if (ph->ph_size == 0)
2270 2270 return (DDI_PROP_END_OF_DATA);
2271 2271
2272 2272 /*
2273 2273 * Get the decoded size of the encoded string.
2274 2274 */
2275 2275 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2276 2276 if (size < DDI_PROP_RESULT_OK) {
2277 2277 switch (size) {
2278 2278 case DDI_PROP_RESULT_EOF:
2279 2279 return (DDI_PROP_END_OF_DATA);
2280 2280
2281 2281 case DDI_PROP_RESULT_ERROR:
2282 2282 return (DDI_PROP_CANNOT_DECODE);
2283 2283 }
2284 2284 }
2285 2285
2286 2286 /*
2287 2287 * Allocated memory to store the decoded value in.
2288 2288 */
2289 2289 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2290 2290
2291 2291 ddi_prop_reset_pos(ph);
2292 2292
2293 2293 /*
2294 2294 * Decode the str and place it in the space we just allocated
2295 2295 */
2296 2296 tmp = str;
2297 2297 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2298 2298 if (i < DDI_PROP_RESULT_OK) {
2299 2299 /*
2300 2300 * Free the space we just allocated
2301 2301 * and return an error.
2302 2302 */
2303 2303 ddi_prop_free(str);
2304 2304 switch (i) {
2305 2305 case DDI_PROP_RESULT_EOF:
2306 2306 return (DDI_PROP_END_OF_DATA);
2307 2307
2308 2308 case DDI_PROP_RESULT_ERROR:
2309 2309 return (DDI_PROP_CANNOT_DECODE);
2310 2310 }
2311 2311 }
2312 2312
2313 2313 *(char **)data = str;
2314 2314 *nelements = 1;
2315 2315
2316 2316 return (DDI_PROP_SUCCESS);
2317 2317 }
2318 2318
2319 2319 /*
2320 2320 * Decode an array of strings.
2321 2321 */
2322 2322 int
2323 2323 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2324 2324 {
2325 2325 int cnt = 0;
2326 2326 char **strs;
2327 2327 char **tmp;
2328 2328 char *ptr;
2329 2329 int i;
2330 2330 int n;
2331 2331 int size;
2332 2332 size_t nbytes;
2333 2333
2334 2334 /*
2335 2335 * Figure out how many array elements there are by going through the
2336 2336 * data without decoding it first and counting.
2337 2337 */
2338 2338 for (;;) {
2339 2339 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2340 2340 if (i < 0)
2341 2341 break;
2342 2342 cnt++;
2343 2343 }
2344 2344
2345 2345 /*
2346 2346 * If there are no elements return an error
2347 2347 */
2348 2348 if (cnt == 0)
2349 2349 return (DDI_PROP_END_OF_DATA);
2350 2350
2351 2351 /*
2352 2352 * If we cannot skip through the data, we cannot decode it
2353 2353 */
2354 2354 if (i == DDI_PROP_RESULT_ERROR)
2355 2355 return (DDI_PROP_CANNOT_DECODE);
2356 2356
2357 2357 /*
2358 2358 * Reset the data pointer to the beginning of the encoded data
2359 2359 */
2360 2360 ddi_prop_reset_pos(ph);
2361 2361
2362 2362 /*
2363 2363 * Figure out how much memory we need for the sum total
2364 2364 */
2365 2365 nbytes = (cnt + 1) * sizeof (char *);
2366 2366
2367 2367 for (n = 0; n < cnt; n++) {
2368 2368 /*
2369 2369 * Get the decoded size of the current encoded string.
2370 2370 */
2371 2371 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2372 2372 if (size < DDI_PROP_RESULT_OK) {
2373 2373 switch (size) {
2374 2374 case DDI_PROP_RESULT_EOF:
2375 2375 return (DDI_PROP_END_OF_DATA);
2376 2376
2377 2377 case DDI_PROP_RESULT_ERROR:
2378 2378 return (DDI_PROP_CANNOT_DECODE);
2379 2379 }
2380 2380 }
2381 2381
2382 2382 nbytes += size;
2383 2383 }
2384 2384
2385 2385 /*
2386 2386 * Allocate memory in which to store the decoded strings.
2387 2387 */
2388 2388 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2389 2389
2390 2390 /*
2391 2391 * Set up pointers for each string by figuring out yet
2392 2392 * again how long each string is.
2393 2393 */
2394 2394 ddi_prop_reset_pos(ph);
2395 2395 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2396 2396 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2397 2397 /*
2398 2398 * Get the decoded size of the current encoded string.
2399 2399 */
2400 2400 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2401 2401 if (size < DDI_PROP_RESULT_OK) {
2402 2402 ddi_prop_free(strs);
2403 2403 switch (size) {
2404 2404 case DDI_PROP_RESULT_EOF:
2405 2405 return (DDI_PROP_END_OF_DATA);
2406 2406
2407 2407 case DDI_PROP_RESULT_ERROR:
2408 2408 return (DDI_PROP_CANNOT_DECODE);
2409 2409 }
2410 2410 }
2411 2411
2412 2412 *tmp = ptr;
2413 2413 ptr += size;
2414 2414 }
2415 2415
2416 2416 /*
2417 2417 * String array is terminated by a NULL
2418 2418 */
2419 2419 *tmp = NULL;
2420 2420
2421 2421 /*
2422 2422 * Finally, we can decode each string
2423 2423 */
2424 2424 ddi_prop_reset_pos(ph);
2425 2425 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2426 2426 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2427 2427 if (i < DDI_PROP_RESULT_OK) {
2428 2428 /*
2429 2429 * Free the space we just allocated
2430 2430 * and return an error
2431 2431 */
2432 2432 ddi_prop_free(strs);
2433 2433 switch (i) {
2434 2434 case DDI_PROP_RESULT_EOF:
2435 2435 return (DDI_PROP_END_OF_DATA);
2436 2436
2437 2437 case DDI_PROP_RESULT_ERROR:
2438 2438 return (DDI_PROP_CANNOT_DECODE);
2439 2439 }
2440 2440 }
2441 2441 }
2442 2442
2443 2443 *(char ***)data = strs;
2444 2444 *nelements = cnt;
2445 2445
2446 2446 return (DDI_PROP_SUCCESS);
2447 2447 }
2448 2448
2449 2449 /*
2450 2450 * Encode a string.
2451 2451 */
2452 2452 int
2453 2453 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2454 2454 {
2455 2455 char **tmp;
2456 2456 int size;
2457 2457 int i;
2458 2458
2459 2459 /*
2460 2460 * If there is no data, we cannot do anything
2461 2461 */
2462 2462 if (nelements == 0)
2463 2463 return (DDI_PROP_CANNOT_ENCODE);
2464 2464
2465 2465 /*
2466 2466 * Get the size of the encoded string.
2467 2467 */
2468 2468 tmp = (char **)data;
2469 2469 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2470 2470 if (size < DDI_PROP_RESULT_OK) {
2471 2471 switch (size) {
2472 2472 case DDI_PROP_RESULT_EOF:
2473 2473 return (DDI_PROP_END_OF_DATA);
2474 2474
2475 2475 case DDI_PROP_RESULT_ERROR:
2476 2476 return (DDI_PROP_CANNOT_ENCODE);
2477 2477 }
2478 2478 }
2479 2479
2480 2480 /*
2481 2481 * Allocate space in the handle to store the encoded string.
2482 2482 */
2483 2483 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2484 2484 return (DDI_PROP_NO_MEMORY);
2485 2485
2486 2486 ddi_prop_reset_pos(ph);
2487 2487
2488 2488 /*
2489 2489 * Encode the string.
2490 2490 */
2491 2491 tmp = (char **)data;
2492 2492 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2493 2493 if (i < DDI_PROP_RESULT_OK) {
2494 2494 switch (i) {
2495 2495 case DDI_PROP_RESULT_EOF:
2496 2496 return (DDI_PROP_END_OF_DATA);
2497 2497
2498 2498 case DDI_PROP_RESULT_ERROR:
2499 2499 return (DDI_PROP_CANNOT_ENCODE);
2500 2500 }
2501 2501 }
2502 2502
2503 2503 return (DDI_PROP_SUCCESS);
2504 2504 }
2505 2505
2506 2506
2507 2507 /*
2508 2508 * Encode an array of strings.
2509 2509 */
2510 2510 int
2511 2511 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2512 2512 {
2513 2513 int cnt = 0;
2514 2514 char **tmp;
2515 2515 int size;
2516 2516 uint_t total_size;
2517 2517 int i;
2518 2518
2519 2519 /*
2520 2520 * If there is no data, we cannot do anything
2521 2521 */
2522 2522 if (nelements == 0)
2523 2523 return (DDI_PROP_CANNOT_ENCODE);
2524 2524
2525 2525 /*
2526 2526 * Get the total size required to encode all the strings.
2527 2527 */
2528 2528 total_size = 0;
2529 2529 tmp = (char **)data;
2530 2530 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2531 2531 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2532 2532 if (size < DDI_PROP_RESULT_OK) {
2533 2533 switch (size) {
2534 2534 case DDI_PROP_RESULT_EOF:
2535 2535 return (DDI_PROP_END_OF_DATA);
2536 2536
2537 2537 case DDI_PROP_RESULT_ERROR:
2538 2538 return (DDI_PROP_CANNOT_ENCODE);
2539 2539 }
2540 2540 }
2541 2541 total_size += (uint_t)size;
2542 2542 }
2543 2543
2544 2544 /*
2545 2545 * Allocate space in the handle to store the encoded strings.
2546 2546 */
2547 2547 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2548 2548 return (DDI_PROP_NO_MEMORY);
2549 2549
2550 2550 ddi_prop_reset_pos(ph);
2551 2551
2552 2552 /*
2553 2553 * Encode the array of strings.
2554 2554 */
2555 2555 tmp = (char **)data;
2556 2556 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2557 2557 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2558 2558 if (i < DDI_PROP_RESULT_OK) {
2559 2559 switch (i) {
2560 2560 case DDI_PROP_RESULT_EOF:
2561 2561 return (DDI_PROP_END_OF_DATA);
2562 2562
2563 2563 case DDI_PROP_RESULT_ERROR:
2564 2564 return (DDI_PROP_CANNOT_ENCODE);
2565 2565 }
2566 2566 }
2567 2567 }
2568 2568
2569 2569 return (DDI_PROP_SUCCESS);
2570 2570 }
2571 2571
2572 2572
2573 2573 /*
2574 2574 * Decode an array of bytes.
2575 2575 */
2576 2576 static int
2577 2577 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2578 2578 {
2579 2579 uchar_t *tmp;
2580 2580 int nbytes;
2581 2581 int i;
2582 2582
2583 2583 /*
2584 2584 * If there are no elements return an error
2585 2585 */
2586 2586 if (ph->ph_size == 0)
2587 2587 return (DDI_PROP_END_OF_DATA);
2588 2588
2589 2589 /*
2590 2590 * Get the size of the encoded array of bytes.
2591 2591 */
2592 2592 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2593 2593 data, ph->ph_size);
2594 2594 if (nbytes < DDI_PROP_RESULT_OK) {
2595 2595 switch (nbytes) {
2596 2596 case DDI_PROP_RESULT_EOF:
2597 2597 return (DDI_PROP_END_OF_DATA);
2598 2598
2599 2599 case DDI_PROP_RESULT_ERROR:
2600 2600 return (DDI_PROP_CANNOT_DECODE);
2601 2601 }
2602 2602 }
2603 2603
2604 2604 /*
2605 2605 * Allocated memory to store the decoded value in.
2606 2606 */
2607 2607 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2608 2608
2609 2609 /*
2610 2610 * Decode each element and place it in the space we just allocated
2611 2611 */
2612 2612 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2613 2613 if (i < DDI_PROP_RESULT_OK) {
2614 2614 /*
2615 2615 * Free the space we just allocated
2616 2616 * and return an error
2617 2617 */
2618 2618 ddi_prop_free(tmp);
2619 2619 switch (i) {
2620 2620 case DDI_PROP_RESULT_EOF:
2621 2621 return (DDI_PROP_END_OF_DATA);
2622 2622
2623 2623 case DDI_PROP_RESULT_ERROR:
2624 2624 return (DDI_PROP_CANNOT_DECODE);
2625 2625 }
2626 2626 }
2627 2627
2628 2628 *(uchar_t **)data = tmp;
2629 2629 *nelements = nbytes;
2630 2630
2631 2631 return (DDI_PROP_SUCCESS);
2632 2632 }
2633 2633
2634 2634 /*
2635 2635 * Encode an array of bytes.
2636 2636 */
2637 2637 int
2638 2638 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2639 2639 {
2640 2640 int size;
2641 2641 int i;
2642 2642
2643 2643 /*
2644 2644 * If there are no elements, then this is a boolean property,
2645 2645 * so just create a property handle with no data and return.
2646 2646 */
2647 2647 if (nelements == 0) {
2648 2648 (void) ddi_prop_encode_alloc(ph, 0);
2649 2649 return (DDI_PROP_SUCCESS);
2650 2650 }
2651 2651
2652 2652 /*
2653 2653 * Get the size of the encoded array of bytes.
2654 2654 */
2655 2655 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2656 2656 nelements);
2657 2657 if (size < DDI_PROP_RESULT_OK) {
2658 2658 switch (size) {
2659 2659 case DDI_PROP_RESULT_EOF:
2660 2660 return (DDI_PROP_END_OF_DATA);
2661 2661
2662 2662 case DDI_PROP_RESULT_ERROR:
2663 2663 return (DDI_PROP_CANNOT_DECODE);
2664 2664 }
2665 2665 }
2666 2666
2667 2667 /*
2668 2668 * Allocate space in the handle to store the encoded bytes.
2669 2669 */
2670 2670 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2671 2671 return (DDI_PROP_NO_MEMORY);
2672 2672
2673 2673 /*
2674 2674 * Encode the array of bytes.
2675 2675 */
2676 2676 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2677 2677 nelements);
2678 2678 if (i < DDI_PROP_RESULT_OK) {
2679 2679 switch (i) {
2680 2680 case DDI_PROP_RESULT_EOF:
2681 2681 return (DDI_PROP_END_OF_DATA);
2682 2682
2683 2683 case DDI_PROP_RESULT_ERROR:
2684 2684 return (DDI_PROP_CANNOT_ENCODE);
2685 2685 }
2686 2686 }
2687 2687
2688 2688 return (DDI_PROP_SUCCESS);
2689 2689 }
2690 2690
2691 2691 /*
2692 2692 * OBP 1275 integer, string and byte operators.
2693 2693 *
2694 2694 * DDI_PROP_CMD_DECODE:
2695 2695 *
2696 2696 * DDI_PROP_RESULT_ERROR: cannot decode the data
2697 2697 * DDI_PROP_RESULT_EOF: end of data
2698 2698 * DDI_PROP_OK: data was decoded
2699 2699 *
2700 2700 * DDI_PROP_CMD_ENCODE:
2701 2701 *
2702 2702 * DDI_PROP_RESULT_ERROR: cannot encode the data
2703 2703 * DDI_PROP_RESULT_EOF: end of data
2704 2704 * DDI_PROP_OK: data was encoded
2705 2705 *
2706 2706 * DDI_PROP_CMD_SKIP:
2707 2707 *
2708 2708 * DDI_PROP_RESULT_ERROR: cannot skip the data
2709 2709 * DDI_PROP_RESULT_EOF: end of data
2710 2710 * DDI_PROP_OK: data was skipped
2711 2711 *
2712 2712 * DDI_PROP_CMD_GET_ESIZE:
2713 2713 *
2714 2714 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2715 2715 * DDI_PROP_RESULT_EOF: end of data
2716 2716 * > 0: the encoded size
2717 2717 *
2718 2718 * DDI_PROP_CMD_GET_DSIZE:
2719 2719 *
2720 2720 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2721 2721 * DDI_PROP_RESULT_EOF: end of data
2722 2722 * > 0: the decoded size
2723 2723 */
2724 2724
2725 2725 /*
2726 2726 * OBP 1275 integer operator
2727 2727 *
2728 2728 * OBP properties are a byte stream of data, so integers may not be
2729 2729 * properly aligned. Therefore we need to copy them one byte at a time.
2730 2730 */
2731 2731 int
2732 2732 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2733 2733 {
2734 2734 int i;
2735 2735
2736 2736 switch (cmd) {
2737 2737 case DDI_PROP_CMD_DECODE:
2738 2738 /*
2739 2739 * Check that there is encoded data
2740 2740 */
2741 2741 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2742 2742 return (DDI_PROP_RESULT_ERROR);
2743 2743 if (ph->ph_flags & PH_FROM_PROM) {
2744 2744 i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2745 2745 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2746 2746 ph->ph_size - i))
2747 2747 return (DDI_PROP_RESULT_ERROR);
2748 2748 } else {
2749 2749 if (ph->ph_size < sizeof (int) ||
2750 2750 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2751 2751 ph->ph_size - sizeof (int))))
2752 2752 return (DDI_PROP_RESULT_ERROR);
2753 2753 }
2754 2754
2755 2755 /*
2756 2756 * Copy the integer, using the implementation-specific
2757 2757 * copy function if the property is coming from the PROM.
2758 2758 */
2759 2759 if (ph->ph_flags & PH_FROM_PROM) {
2760 2760 *data = impl_ddi_prop_int_from_prom(
2761 2761 (uchar_t *)ph->ph_cur_pos,
2762 2762 (ph->ph_size < PROP_1275_INT_SIZE) ?
2763 2763 ph->ph_size : PROP_1275_INT_SIZE);
2764 2764 } else {
2765 2765 bcopy(ph->ph_cur_pos, data, sizeof (int));
2766 2766 }
2767 2767
2768 2768 /*
2769 2769 * Move the current location to the start of the next
2770 2770 * bit of undecoded data.
2771 2771 */
2772 2772 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2773 2773 PROP_1275_INT_SIZE;
2774 2774 return (DDI_PROP_RESULT_OK);
2775 2775
2776 2776 case DDI_PROP_CMD_ENCODE:
2777 2777 /*
2778 2778 * Check that there is room to encoded the data
2779 2779 */
2780 2780 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2781 2781 ph->ph_size < PROP_1275_INT_SIZE ||
2782 2782 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2783 2783 ph->ph_size - sizeof (int))))
2784 2784 return (DDI_PROP_RESULT_ERROR);
2785 2785
2786 2786 /*
2787 2787 * Encode the integer into the byte stream one byte at a
2788 2788 * time.
2789 2789 */
2790 2790 bcopy(data, ph->ph_cur_pos, sizeof (int));
2791 2791
2792 2792 /*
2793 2793 * Move the current location to the start of the next bit of
2794 2794 * space where we can store encoded data.
2795 2795 */
2796 2796 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2797 2797 return (DDI_PROP_RESULT_OK);
2798 2798
2799 2799 case DDI_PROP_CMD_SKIP:
2800 2800 /*
2801 2801 * Check that there is encoded data
2802 2802 */
2803 2803 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2804 2804 ph->ph_size < PROP_1275_INT_SIZE)
2805 2805 return (DDI_PROP_RESULT_ERROR);
2806 2806
2807 2807
2808 2808 if ((caddr_t)ph->ph_cur_pos ==
2809 2809 (caddr_t)ph->ph_data + ph->ph_size) {
2810 2810 return (DDI_PROP_RESULT_EOF);
2811 2811 } else if ((caddr_t)ph->ph_cur_pos >
2812 2812 (caddr_t)ph->ph_data + ph->ph_size) {
2813 2813 return (DDI_PROP_RESULT_EOF);
2814 2814 }
2815 2815
2816 2816 /*
2817 2817 * Move the current location to the start of the next bit of
2818 2818 * undecoded data.
2819 2819 */
2820 2820 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2821 2821 return (DDI_PROP_RESULT_OK);
2822 2822
2823 2823 case DDI_PROP_CMD_GET_ESIZE:
2824 2824 /*
2825 2825 * Return the size of an encoded integer on OBP
2826 2826 */
2827 2827 return (PROP_1275_INT_SIZE);
2828 2828
2829 2829 case DDI_PROP_CMD_GET_DSIZE:
2830 2830 /*
2831 2831 * Return the size of a decoded integer on the system.
2832 2832 */
2833 2833 return (sizeof (int));
2834 2834
2835 2835 default:
2836 2836 #ifdef DEBUG
2837 2837 panic("ddi_prop_1275_int: %x impossible", cmd);
2838 2838 /*NOTREACHED*/
2839 2839 #else
2840 2840 return (DDI_PROP_RESULT_ERROR);
2841 2841 #endif /* DEBUG */
2842 2842 }
2843 2843 }
2844 2844
2845 2845 /*
2846 2846 * 64 bit integer operator.
2847 2847 *
2848 2848 * This is an extension, defined by Sun, to the 1275 integer
2849 2849 * operator. This routine handles the encoding/decoding of
2850 2850 * 64 bit integer properties.
2851 2851 */
2852 2852 int
2853 2853 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2854 2854 {
2855 2855
2856 2856 switch (cmd) {
2857 2857 case DDI_PROP_CMD_DECODE:
2858 2858 /*
2859 2859 * Check that there is encoded data
2860 2860 */
2861 2861 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2862 2862 return (DDI_PROP_RESULT_ERROR);
2863 2863 if (ph->ph_flags & PH_FROM_PROM) {
2864 2864 return (DDI_PROP_RESULT_ERROR);
2865 2865 } else {
2866 2866 if (ph->ph_size < sizeof (int64_t) ||
2867 2867 ((int64_t *)ph->ph_cur_pos >
2868 2868 ((int64_t *)ph->ph_data +
2869 2869 ph->ph_size - sizeof (int64_t))))
2870 2870 return (DDI_PROP_RESULT_ERROR);
2871 2871 }
2872 2872 /*
2873 2873 * Copy the integer, using the implementation-specific
2874 2874 * copy function if the property is coming from the PROM.
2875 2875 */
2876 2876 if (ph->ph_flags & PH_FROM_PROM) {
2877 2877 return (DDI_PROP_RESULT_ERROR);
2878 2878 } else {
2879 2879 bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2880 2880 }
2881 2881
2882 2882 /*
2883 2883 * Move the current location to the start of the next
2884 2884 * bit of undecoded data.
2885 2885 */
2886 2886 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2887 2887 sizeof (int64_t);
2888 2888 return (DDI_PROP_RESULT_OK);
2889 2889
2890 2890 case DDI_PROP_CMD_ENCODE:
2891 2891 /*
2892 2892 * Check that there is room to encoded the data
2893 2893 */
2894 2894 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2895 2895 ph->ph_size < sizeof (int64_t) ||
2896 2896 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2897 2897 ph->ph_size - sizeof (int64_t))))
2898 2898 return (DDI_PROP_RESULT_ERROR);
2899 2899
2900 2900 /*
2901 2901 * Encode the integer into the byte stream one byte at a
2902 2902 * time.
2903 2903 */
2904 2904 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2905 2905
2906 2906 /*
2907 2907 * Move the current location to the start of the next bit of
2908 2908 * space where we can store encoded data.
2909 2909 */
2910 2910 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2911 2911 sizeof (int64_t);
2912 2912 return (DDI_PROP_RESULT_OK);
2913 2913
2914 2914 case DDI_PROP_CMD_SKIP:
2915 2915 /*
2916 2916 * Check that there is encoded data
2917 2917 */
2918 2918 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2919 2919 ph->ph_size < sizeof (int64_t))
2920 2920 return (DDI_PROP_RESULT_ERROR);
2921 2921
2922 2922 if ((caddr_t)ph->ph_cur_pos ==
2923 2923 (caddr_t)ph->ph_data + ph->ph_size) {
2924 2924 return (DDI_PROP_RESULT_EOF);
2925 2925 } else if ((caddr_t)ph->ph_cur_pos >
2926 2926 (caddr_t)ph->ph_data + ph->ph_size) {
2927 2927 return (DDI_PROP_RESULT_EOF);
2928 2928 }
2929 2929
2930 2930 /*
2931 2931 * Move the current location to the start of
2932 2932 * the next bit of undecoded data.
2933 2933 */
2934 2934 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2935 2935 sizeof (int64_t);
2936 2936 return (DDI_PROP_RESULT_OK);
2937 2937
2938 2938 case DDI_PROP_CMD_GET_ESIZE:
2939 2939 /*
2940 2940 * Return the size of an encoded integer on OBP
2941 2941 */
2942 2942 return (sizeof (int64_t));
2943 2943
2944 2944 case DDI_PROP_CMD_GET_DSIZE:
2945 2945 /*
2946 2946 * Return the size of a decoded integer on the system.
2947 2947 */
2948 2948 return (sizeof (int64_t));
2949 2949
2950 2950 default:
2951 2951 #ifdef DEBUG
2952 2952 panic("ddi_prop_int64_op: %x impossible", cmd);
2953 2953 /*NOTREACHED*/
2954 2954 #else
2955 2955 return (DDI_PROP_RESULT_ERROR);
2956 2956 #endif /* DEBUG */
2957 2957 }
2958 2958 }
2959 2959
2960 2960 /*
2961 2961 * OBP 1275 string operator.
2962 2962 *
2963 2963 * OBP strings are NULL terminated.
2964 2964 */
2965 2965 int
2966 2966 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2967 2967 {
2968 2968 int n;
2969 2969 char *p;
2970 2970 char *end;
2971 2971
2972 2972 switch (cmd) {
2973 2973 case DDI_PROP_CMD_DECODE:
2974 2974 /*
2975 2975 * Check that there is encoded data
2976 2976 */
2977 2977 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2978 2978 return (DDI_PROP_RESULT_ERROR);
2979 2979 }
2980 2980
2981 2981 /*
2982 2982 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2983 2983 * how to NULL terminate result.
2984 2984 */
2985 2985 p = (char *)ph->ph_cur_pos;
2986 2986 end = (char *)ph->ph_data + ph->ph_size;
2987 2987 if (p >= end)
2988 2988 return (DDI_PROP_RESULT_EOF);
2989 2989
2990 2990 while (p < end) {
2991 2991 *data++ = *p;
2992 2992 if (*p++ == 0) { /* NULL from OBP */
2993 2993 ph->ph_cur_pos = p;
2994 2994 return (DDI_PROP_RESULT_OK);
2995 2995 }
2996 2996 }
2997 2997
2998 2998 /*
2999 2999 * If OBP did not NULL terminate string, which happens
3000 3000 * (at least) for 'true'/'false' boolean values, account for
3001 3001 * the space and store null termination on decode.
3002 3002 */
3003 3003 ph->ph_cur_pos = p;
3004 3004 *data = 0;
3005 3005 return (DDI_PROP_RESULT_OK);
3006 3006
3007 3007 case DDI_PROP_CMD_ENCODE:
3008 3008 /*
3009 3009 * Check that there is room to encoded the data
3010 3010 */
3011 3011 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3012 3012 return (DDI_PROP_RESULT_ERROR);
3013 3013 }
3014 3014
3015 3015 n = strlen(data) + 1;
3016 3016 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3017 3017 ph->ph_size - n)) {
3018 3018 return (DDI_PROP_RESULT_ERROR);
3019 3019 }
3020 3020
3021 3021 /*
3022 3022 * Copy the NULL terminated string
3023 3023 */
3024 3024 bcopy(data, ph->ph_cur_pos, n);
3025 3025
3026 3026 /*
3027 3027 * Move the current location to the start of the next bit of
3028 3028 * space where we can store encoded data.
3029 3029 */
3030 3030 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3031 3031 return (DDI_PROP_RESULT_OK);
3032 3032
3033 3033 case DDI_PROP_CMD_SKIP:
3034 3034 /*
3035 3035 * Check that there is encoded data
3036 3036 */
3037 3037 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3038 3038 return (DDI_PROP_RESULT_ERROR);
3039 3039 }
3040 3040
3041 3041 /*
3042 3042 * Return the string length plus one for the NULL
3043 3043 * We know the size of the property, we need to
3044 3044 * ensure that the string is properly formatted,
3045 3045 * since we may be looking up random OBP data.
3046 3046 */
3047 3047 p = (char *)ph->ph_cur_pos;
3048 3048 end = (char *)ph->ph_data + ph->ph_size;
3049 3049 if (p >= end)
3050 3050 return (DDI_PROP_RESULT_EOF);
3051 3051
3052 3052 while (p < end) {
3053 3053 if (*p++ == 0) { /* NULL from OBP */
3054 3054 ph->ph_cur_pos = p;
3055 3055 return (DDI_PROP_RESULT_OK);
3056 3056 }
3057 3057 }
3058 3058
3059 3059 /*
3060 3060 * Accommodate the fact that OBP does not always NULL
3061 3061 * terminate strings.
3062 3062 */
3063 3063 ph->ph_cur_pos = p;
3064 3064 return (DDI_PROP_RESULT_OK);
3065 3065
3066 3066 case DDI_PROP_CMD_GET_ESIZE:
3067 3067 /*
3068 3068 * Return the size of the encoded string on OBP.
3069 3069 */
3070 3070 return (strlen(data) + 1);
3071 3071
3072 3072 case DDI_PROP_CMD_GET_DSIZE:
3073 3073 /*
3074 3074 * Return the string length plus one for the NULL.
3075 3075 * We know the size of the property, we need to
3076 3076 * ensure that the string is properly formatted,
3077 3077 * since we may be looking up random OBP data.
3078 3078 */
3079 3079 p = (char *)ph->ph_cur_pos;
3080 3080 end = (char *)ph->ph_data + ph->ph_size;
3081 3081 if (p >= end)
3082 3082 return (DDI_PROP_RESULT_EOF);
3083 3083
3084 3084 for (n = 0; p < end; n++) {
3085 3085 if (*p++ == 0) { /* NULL from OBP */
3086 3086 ph->ph_cur_pos = p;
3087 3087 return (n + 1);
3088 3088 }
3089 3089 }
3090 3090
3091 3091 /*
3092 3092 * If OBP did not NULL terminate string, which happens for
3093 3093 * 'true'/'false' boolean values, account for the space
3094 3094 * to store null termination here.
3095 3095 */
3096 3096 ph->ph_cur_pos = p;
3097 3097 return (n + 1);
3098 3098
3099 3099 default:
3100 3100 #ifdef DEBUG
3101 3101 panic("ddi_prop_1275_string: %x impossible", cmd);
3102 3102 /*NOTREACHED*/
3103 3103 #else
3104 3104 return (DDI_PROP_RESULT_ERROR);
3105 3105 #endif /* DEBUG */
3106 3106 }
3107 3107 }
3108 3108
3109 3109 /*
3110 3110 * OBP 1275 byte operator
3111 3111 *
3112 3112 * Caller must specify the number of bytes to get. OBP encodes bytes
3113 3113 * as a byte so there is a 1-to-1 translation.
3114 3114 */
3115 3115 int
3116 3116 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3117 3117 uint_t nelements)
3118 3118 {
3119 3119 switch (cmd) {
3120 3120 case DDI_PROP_CMD_DECODE:
3121 3121 /*
3122 3122 * Check that there is encoded data
3123 3123 */
3124 3124 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3125 3125 ph->ph_size < nelements ||
3126 3126 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3127 3127 ph->ph_size - nelements)))
3128 3128 return (DDI_PROP_RESULT_ERROR);
3129 3129
3130 3130 /*
3131 3131 * Copy out the bytes
3132 3132 */
3133 3133 bcopy(ph->ph_cur_pos, data, nelements);
3134 3134
3135 3135 /*
3136 3136 * Move the current location
3137 3137 */
3138 3138 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3139 3139 return (DDI_PROP_RESULT_OK);
3140 3140
3141 3141 case DDI_PROP_CMD_ENCODE:
3142 3142 /*
3143 3143 * Check that there is room to encode the data
3144 3144 */
3145 3145 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3146 3146 ph->ph_size < nelements ||
3147 3147 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3148 3148 ph->ph_size - nelements)))
3149 3149 return (DDI_PROP_RESULT_ERROR);
3150 3150
3151 3151 /*
3152 3152 * Copy in the bytes
3153 3153 */
3154 3154 bcopy(data, ph->ph_cur_pos, nelements);
3155 3155
3156 3156 /*
3157 3157 * Move the current location to the start of the next bit of
3158 3158 * space where we can store encoded data.
3159 3159 */
3160 3160 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3161 3161 return (DDI_PROP_RESULT_OK);
3162 3162
3163 3163 case DDI_PROP_CMD_SKIP:
3164 3164 /*
3165 3165 * Check that there is encoded data
3166 3166 */
3167 3167 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3168 3168 ph->ph_size < nelements)
3169 3169 return (DDI_PROP_RESULT_ERROR);
3170 3170
3171 3171 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3172 3172 ph->ph_size - nelements))
3173 3173 return (DDI_PROP_RESULT_EOF);
3174 3174
3175 3175 /*
3176 3176 * Move the current location
3177 3177 */
3178 3178 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3179 3179 return (DDI_PROP_RESULT_OK);
3180 3180
3181 3181 case DDI_PROP_CMD_GET_ESIZE:
3182 3182 /*
3183 3183 * The size in bytes of the encoded size is the
3184 3184 * same as the decoded size provided by the caller.
3185 3185 */
3186 3186 return (nelements);
3187 3187
3188 3188 case DDI_PROP_CMD_GET_DSIZE:
3189 3189 /*
3190 3190 * Just return the number of bytes specified by the caller.
3191 3191 */
3192 3192 return (nelements);
3193 3193
3194 3194 default:
3195 3195 #ifdef DEBUG
3196 3196 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3197 3197 /*NOTREACHED*/
3198 3198 #else
3199 3199 return (DDI_PROP_RESULT_ERROR);
3200 3200 #endif /* DEBUG */
3201 3201 }
3202 3202 }
3203 3203
3204 3204 /*
3205 3205 * Used for properties that come from the OBP, hardware configuration files,
3206 3206 * or that are created by calls to ddi_prop_update(9F).
3207 3207 */
3208 3208 static struct prop_handle_ops prop_1275_ops = {
3209 3209 ddi_prop_1275_int,
3210 3210 ddi_prop_1275_string,
3211 3211 ddi_prop_1275_bytes,
3212 3212 ddi_prop_int64_op
3213 3213 };
3214 3214
3215 3215
3216 3216 /*
3217 3217 * Interface to create/modify a managed property on child's behalf...
3218 3218 * Flags interpreted are:
3219 3219 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3220 3220 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3221 3221 *
3222 3222 * Use same dev_t when modifying or undefining a property.
3223 3223 * Search for properties with DDI_DEV_T_ANY to match first named
3224 3224 * property on the list.
3225 3225 *
3226 3226 * Properties are stored LIFO and subsequently will match the first
3227 3227 * `matching' instance.
3228 3228 */
3229 3229
3230 3230 /*
3231 3231 * ddi_prop_add: Add a software defined property
3232 3232 */
3233 3233
3234 3234 /*
3235 3235 * define to get a new ddi_prop_t.
3236 3236 * km_flags are KM_SLEEP or KM_NOSLEEP.
3237 3237 */
3238 3238
3239 3239 #define DDI_NEW_PROP_T(km_flags) \
3240 3240 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3241 3241
3242 3242 static int
3243 3243 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3244 3244 char *name, caddr_t value, int length)
3245 3245 {
3246 3246 ddi_prop_t *new_propp, *propp;
3247 3247 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3248 3248 int km_flags = KM_NOSLEEP;
3249 3249 int name_buf_len;
3250 3250
3251 3251 /*
3252 3252 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3253 3253 */
3254 3254
3255 3255 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3256 3256 return (DDI_PROP_INVAL_ARG);
3257 3257
3258 3258 if (flags & DDI_PROP_CANSLEEP)
3259 3259 km_flags = KM_SLEEP;
3260 3260
3261 3261 if (flags & DDI_PROP_SYSTEM_DEF)
3262 3262 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3263 3263 else if (flags & DDI_PROP_HW_DEF)
3264 3264 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3265 3265
3266 3266 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) {
3267 3267 cmn_err(CE_CONT, prop_no_mem_msg, name);
3268 3268 return (DDI_PROP_NO_MEMORY);
3269 3269 }
3270 3270
3271 3271 /*
3272 3272 * If dev is major number 0, then we need to do a ddi_name_to_major
3273 3273 * to get the real major number for the device. This needs to be
3274 3274 * done because some drivers need to call ddi_prop_create in their
3275 3275 * attach routines but they don't have a dev. By creating the dev
3276 3276 * ourself if the major number is 0, drivers will not have to know what
3277 3277 * their major number. They can just create a dev with major number
3278 3278 * 0 and pass it in. For device 0, we will be doing a little extra
3279 3279 * work by recreating the same dev that we already have, but its the
3280 3280 * price you pay :-).
3281 3281 *
3282 3282 * This fixes bug #1098060.
3283 3283 */
3284 3284 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3285 3285 new_propp->prop_dev =
3286 3286 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3287 3287 getminor(dev));
3288 3288 } else
3289 3289 new_propp->prop_dev = dev;
3290 3290
3291 3291 /*
3292 3292 * Allocate space for property name and copy it in...
3293 3293 */
3294 3294
3295 3295 name_buf_len = strlen(name) + 1;
3296 3296 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3297 3297 if (new_propp->prop_name == 0) {
3298 3298 kmem_free(new_propp, sizeof (ddi_prop_t));
3299 3299 cmn_err(CE_CONT, prop_no_mem_msg, name);
3300 3300 return (DDI_PROP_NO_MEMORY);
3301 3301 }
3302 3302 bcopy(name, new_propp->prop_name, name_buf_len);
3303 3303
3304 3304 /*
3305 3305 * Set the property type
3306 3306 */
3307 3307 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3308 3308
3309 3309 /*
3310 3310 * Set length and value ONLY if not an explicit property undefine:
3311 3311 * NOTE: value and length are zero for explicit undefines.
3312 3312 */
3313 3313
3314 3314 if (flags & DDI_PROP_UNDEF_IT) {
3315 3315 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3316 3316 } else {
3317 3317 if ((new_propp->prop_len = length) != 0) {
3318 3318 new_propp->prop_val = kmem_alloc(length, km_flags);
3319 3319 if (new_propp->prop_val == 0) {
3320 3320 kmem_free(new_propp->prop_name, name_buf_len);
3321 3321 kmem_free(new_propp, sizeof (ddi_prop_t));
3322 3322 cmn_err(CE_CONT, prop_no_mem_msg, name);
3323 3323 return (DDI_PROP_NO_MEMORY);
3324 3324 }
3325 3325 bcopy(value, new_propp->prop_val, length);
3326 3326 }
3327 3327 }
3328 3328
3329 3329 /*
3330 3330 * Link property into beginning of list. (Properties are LIFO order.)
3331 3331 */
3332 3332
3333 3333 mutex_enter(&(DEVI(dip)->devi_lock));
3334 3334 propp = *list_head;
3335 3335 new_propp->prop_next = propp;
3336 3336 *list_head = new_propp;
3337 3337 mutex_exit(&(DEVI(dip)->devi_lock));
3338 3338 return (DDI_PROP_SUCCESS);
3339 3339 }
3340 3340
3341 3341
3342 3342 /*
3343 3343 * ddi_prop_change: Modify a software managed property value
3344 3344 *
3345 3345 * Set new length and value if found.
3346 3346 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3347 3347 * input name is the NULL string.
3348 3348 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3349 3349 *
3350 3350 * Note: an undef can be modified to be a define,
3351 3351 * (you can't go the other way.)
3352 3352 */
3353 3353
3354 3354 static int
3355 3355 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3356 3356 char *name, caddr_t value, int length)
3357 3357 {
3358 3358 ddi_prop_t *propp;
3359 3359 ddi_prop_t **ppropp;
3360 3360 caddr_t p = NULL;
3361 3361
3362 3362 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3363 3363 return (DDI_PROP_INVAL_ARG);
3364 3364
3365 3365 /*
3366 3366 * Preallocate buffer, even if we don't need it...
3367 3367 */
3368 3368 if (length != 0) {
3369 3369 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3370 3370 KM_SLEEP : KM_NOSLEEP);
3371 3371 if (p == NULL) {
3372 3372 cmn_err(CE_CONT, prop_no_mem_msg, name);
3373 3373 return (DDI_PROP_NO_MEMORY);
3374 3374 }
3375 3375 }
3376 3376
3377 3377 /*
3378 3378 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3379 3379 * number, a real dev_t value should be created based upon the dip's
3380 3380 * binding driver. See ddi_prop_add...
3381 3381 */
3382 3382 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3383 3383 dev = makedevice(
3384 3384 ddi_name_to_major(DEVI(dip)->devi_binding_name),
3385 3385 getminor(dev));
3386 3386
3387 3387 /*
3388 3388 * Check to see if the property exists. If so we modify it.
3389 3389 * Else we create it by calling ddi_prop_add().
3390 3390 */
3391 3391 mutex_enter(&(DEVI(dip)->devi_lock));
3392 3392 ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3393 3393 if (flags & DDI_PROP_SYSTEM_DEF)
3394 3394 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3395 3395 else if (flags & DDI_PROP_HW_DEF)
3396 3396 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3397 3397
3398 3398 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3399 3399 /*
3400 3400 * Need to reallocate buffer? If so, do it
3401 3401 * carefully (reuse same space if new prop
3402 3402 * is same size and non-NULL sized).
3403 3403 */
3404 3404 if (length != 0)
3405 3405 bcopy(value, p, length);
3406 3406
3407 3407 if (propp->prop_len != 0)
3408 3408 kmem_free(propp->prop_val, propp->prop_len);
3409 3409
3410 3410 propp->prop_len = length;
3411 3411 propp->prop_val = p;
3412 3412 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3413 3413 mutex_exit(&(DEVI(dip)->devi_lock));
3414 3414 return (DDI_PROP_SUCCESS);
3415 3415 }
3416 3416
3417 3417 mutex_exit(&(DEVI(dip)->devi_lock));
3418 3418 if (length != 0)
3419 3419 kmem_free(p, length);
3420 3420
3421 3421 return (ddi_prop_add(dev, dip, flags, name, value, length));
3422 3422 }
3423 3423
3424 3424 /*
3425 3425 * Common update routine used to update and encode a property. Creates
3426 3426 * a property handle, calls the property encode routine, figures out if
3427 3427 * the property already exists and updates if it does. Otherwise it
3428 3428 * creates if it does not exist.
3429 3429 */
3430 3430 int
3431 3431 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3432 3432 char *name, void *data, uint_t nelements,
3433 3433 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3434 3434 {
3435 3435 prop_handle_t ph;
3436 3436 int rval;
3437 3437 uint_t ourflags;
3438 3438
3439 3439 /*
3440 3440 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3441 3441 * return error.
3442 3442 */
3443 3443 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3444 3444 return (DDI_PROP_INVAL_ARG);
3445 3445
3446 3446 /*
3447 3447 * Create the handle
3448 3448 */
3449 3449 ph.ph_data = NULL;
3450 3450 ph.ph_cur_pos = NULL;
3451 3451 ph.ph_save_pos = NULL;
3452 3452 ph.ph_size = 0;
3453 3453 ph.ph_ops = &prop_1275_ops;
3454 3454
3455 3455 /*
3456 3456 * ourflags:
3457 3457 * For compatibility with the old interfaces. The old interfaces
3458 3458 * didn't sleep by default and slept when the flag was set. These
3459 3459 * interfaces to the opposite. So the old interfaces now set the
3460 3460 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3461 3461 *
3462 3462 * ph.ph_flags:
3463 3463 * Blocked data or unblocked data allocation
3464 3464 * for ph.ph_data in ddi_prop_encode_alloc()
3465 3465 */
3466 3466 if (flags & DDI_PROP_DONTSLEEP) {
3467 3467 ourflags = flags;
3468 3468 ph.ph_flags = DDI_PROP_DONTSLEEP;
3469 3469 } else {
3470 3470 ourflags = flags | DDI_PROP_CANSLEEP;
3471 3471 ph.ph_flags = DDI_PROP_CANSLEEP;
3472 3472 }
3473 3473
3474 3474 /*
3475 3475 * Encode the data and store it in the property handle by
3476 3476 * calling the prop_encode routine.
3477 3477 */
3478 3478 if ((rval = (*prop_create)(&ph, data, nelements)) !=
3479 3479 DDI_PROP_SUCCESS) {
3480 3480 if (rval == DDI_PROP_NO_MEMORY)
3481 3481 cmn_err(CE_CONT, prop_no_mem_msg, name);
3482 3482 if (ph.ph_size != 0)
3483 3483 kmem_free(ph.ph_data, ph.ph_size);
3484 3484 return (rval);
3485 3485 }
3486 3486
3487 3487 /*
3488 3488 * The old interfaces use a stacking approach to creating
3489 3489 * properties. If we are being called from the old interfaces,
3490 3490 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3491 3491 * create without checking.
3492 3492 */
3493 3493 if (flags & DDI_PROP_STACK_CREATE) {
3494 3494 rval = ddi_prop_add(match_dev, dip,
3495 3495 ourflags, name, ph.ph_data, ph.ph_size);
3496 3496 } else {
3497 3497 rval = ddi_prop_change(match_dev, dip,
3498 3498 ourflags, name, ph.ph_data, ph.ph_size);
3499 3499 }
3500 3500
3501 3501 /*
3502 3502 * Free the encoded data allocated in the prop_encode routine.
3503 3503 */
3504 3504 if (ph.ph_size != 0)
3505 3505 kmem_free(ph.ph_data, ph.ph_size);
3506 3506
3507 3507 return (rval);
3508 3508 }
3509 3509
3510 3510
3511 3511 /*
3512 3512 * ddi_prop_create: Define a managed property:
3513 3513 * See above for details.
3514 3514 */
3515 3515
3516 3516 int
3517 3517 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3518 3518 char *name, caddr_t value, int length)
3519 3519 {
3520 3520 if (!(flag & DDI_PROP_CANSLEEP)) {
3521 3521 flag |= DDI_PROP_DONTSLEEP;
3522 3522 #ifdef DDI_PROP_DEBUG
3523 3523 if (length != 0)
3524 3524 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3525 3525 "use ddi_prop_update (prop = %s, node = %s%d)",
3526 3526 name, ddi_driver_name(dip), ddi_get_instance(dip));
3527 3527 #endif /* DDI_PROP_DEBUG */
3528 3528 }
3529 3529 flag &= ~DDI_PROP_SYSTEM_DEF;
3530 3530 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3531 3531 return (ddi_prop_update_common(dev, dip, flag, name,
3532 3532 value, length, ddi_prop_fm_encode_bytes));
3533 3533 }
3534 3534
3535 3535 int
3536 3536 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3537 3537 char *name, caddr_t value, int length)
3538 3538 {
3539 3539 if (!(flag & DDI_PROP_CANSLEEP))
3540 3540 flag |= DDI_PROP_DONTSLEEP;
3541 3541 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3542 3542 return (ddi_prop_update_common(dev, dip, flag,
3543 3543 name, value, length, ddi_prop_fm_encode_bytes));
3544 3544 }
3545 3545
3546 3546 int
3547 3547 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3548 3548 char *name, caddr_t value, int length)
3549 3549 {
3550 3550 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3551 3551
3552 3552 /*
3553 3553 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3554 3554 * return error.
3555 3555 */
3556 3556 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3557 3557 return (DDI_PROP_INVAL_ARG);
3558 3558
3559 3559 if (!(flag & DDI_PROP_CANSLEEP))
3560 3560 flag |= DDI_PROP_DONTSLEEP;
3561 3561 flag &= ~DDI_PROP_SYSTEM_DEF;
3562 3562 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3563 3563 return (DDI_PROP_NOT_FOUND);
3564 3564
3565 3565 return (ddi_prop_update_common(dev, dip,
3566 3566 (flag | DDI_PROP_TYPE_BYTE), name,
3567 3567 value, length, ddi_prop_fm_encode_bytes));
3568 3568 }
3569 3569
3570 3570 int
3571 3571 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3572 3572 char *name, caddr_t value, int length)
3573 3573 {
3574 3574 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3575 3575
3576 3576 /*
3577 3577 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3578 3578 * return error.
3579 3579 */
3580 3580 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3581 3581 return (DDI_PROP_INVAL_ARG);
3582 3582
3583 3583 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3584 3584 return (DDI_PROP_NOT_FOUND);
3585 3585
3586 3586 if (!(flag & DDI_PROP_CANSLEEP))
3587 3587 flag |= DDI_PROP_DONTSLEEP;
3588 3588 return (ddi_prop_update_common(dev, dip,
3589 3589 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3590 3590 name, value, length, ddi_prop_fm_encode_bytes));
3591 3591 }
3592 3592
3593 3593
3594 3594 /*
3595 3595 * Common lookup routine used to lookup and decode a property.
3596 3596 * Creates a property handle, searches for the raw encoded data,
3597 3597 * fills in the handle, and calls the property decode functions
3598 3598 * passed in.
3599 3599 *
3600 3600 * This routine is not static because ddi_bus_prop_op() which lives in
3601 3601 * ddi_impl.c calls it. No driver should be calling this routine.
3602 3602 */
3603 3603 int
3604 3604 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3605 3605 uint_t flags, char *name, void *data, uint_t *nelements,
3606 3606 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3607 3607 {
3608 3608 int rval;
3609 3609 uint_t ourflags;
3610 3610 prop_handle_t ph;
3611 3611
3612 3612 if ((match_dev == DDI_DEV_T_NONE) ||
3613 3613 (name == NULL) || (strlen(name) == 0))
3614 3614 return (DDI_PROP_INVAL_ARG);
3615 3615
3616 3616 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3617 3617 flags | DDI_PROP_CANSLEEP;
3618 3618
3619 3619 /*
3620 3620 * Get the encoded data
3621 3621 */
3622 3622 bzero(&ph, sizeof (prop_handle_t));
3623 3623
3624 3624 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3625 3625 /*
3626 3626 * For rootnex and unbound dlpi style-2 devices, index into
3627 3627 * the devnames' array and search the global
3628 3628 * property list.
3629 3629 */
3630 3630 ourflags &= ~DDI_UNBND_DLPI2;
3631 3631 rval = i_ddi_prop_search_global(match_dev,
3632 3632 ourflags, name, &ph.ph_data, &ph.ph_size);
3633 3633 } else {
3634 3634 rval = ddi_prop_search_common(match_dev, dip,
3635 3635 PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3636 3636 &ph.ph_data, &ph.ph_size);
3637 3637
3638 3638 }
3639 3639
3640 3640 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3641 3641 ASSERT(ph.ph_data == NULL);
3642 3642 ASSERT(ph.ph_size == 0);
3643 3643 return (rval);
3644 3644 }
3645 3645
3646 3646 /*
3647 3647 * If the encoded data came from a OBP or software
3648 3648 * use the 1275 OBP decode/encode routines.
3649 3649 */
3650 3650 ph.ph_cur_pos = ph.ph_data;
3651 3651 ph.ph_save_pos = ph.ph_data;
3652 3652 ph.ph_ops = &prop_1275_ops;
3653 3653 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3654 3654
3655 3655 rval = (*prop_decoder)(&ph, data, nelements);
3656 3656
3657 3657 /*
3658 3658 * Free the encoded data
3659 3659 */
3660 3660 if (ph.ph_size != 0)
3661 3661 kmem_free(ph.ph_data, ph.ph_size);
3662 3662
3663 3663 return (rval);
3664 3664 }
3665 3665
3666 3666 /*
3667 3667 * Lookup and return an array of composite properties. The driver must
3668 3668 * provide the decode routine.
3669 3669 */
3670 3670 int
3671 3671 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3672 3672 uint_t flags, char *name, void *data, uint_t *nelements,
3673 3673 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3674 3674 {
3675 3675 return (ddi_prop_lookup_common(match_dev, dip,
3676 3676 (flags | DDI_PROP_TYPE_COMPOSITE), name,
3677 3677 data, nelements, prop_decoder));
3678 3678 }
3679 3679
3680 3680 /*
3681 3681 * Return 1 if a property exists (no type checking done).
3682 3682 * Return 0 if it does not exist.
3683 3683 */
3684 3684 int
3685 3685 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3686 3686 {
3687 3687 int i;
3688 3688 uint_t x = 0;
3689 3689
3690 3690 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3691 3691 flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3692 3692 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3693 3693 }
3694 3694
3695 3695
3696 3696 /*
3697 3697 * Update an array of composite properties. The driver must
3698 3698 * provide the encode routine.
3699 3699 */
3700 3700 int
3701 3701 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3702 3702 char *name, void *data, uint_t nelements,
3703 3703 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3704 3704 {
3705 3705 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3706 3706 name, data, nelements, prop_create));
3707 3707 }
3708 3708
3709 3709 /*
3710 3710 * Get a single integer or boolean property and return it.
3711 3711 * If the property does not exists, or cannot be decoded,
3712 3712 * then return the defvalue passed in.
3713 3713 *
3714 3714 * This routine always succeeds.
3715 3715 */
3716 3716 int
3717 3717 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3718 3718 char *name, int defvalue)
3719 3719 {
3720 3720 int data;
3721 3721 uint_t nelements;
3722 3722 int rval;
3723 3723
3724 3724 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3725 3725 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3726 3726 #ifdef DEBUG
3727 3727 if (dip != NULL) {
3728 3728 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3729 3729 " 0x%x (prop = %s, node = %s%d)", flags,
3730 3730 name, ddi_driver_name(dip), ddi_get_instance(dip));
3731 3731 }
3732 3732 #endif /* DEBUG */
3733 3733 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3734 3734 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3735 3735 }
3736 3736
3737 3737 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3738 3738 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3739 3739 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3740 3740 if (rval == DDI_PROP_END_OF_DATA)
3741 3741 data = 1;
3742 3742 else
3743 3743 data = defvalue;
3744 3744 }
3745 3745 return (data);
3746 3746 }
3747 3747
3748 3748 /*
3749 3749 * Get a single 64 bit integer or boolean property and return it.
3750 3750 * If the property does not exists, or cannot be decoded,
3751 3751 * then return the defvalue passed in.
3752 3752 *
3753 3753 * This routine always succeeds.
3754 3754 */
3755 3755 int64_t
3756 3756 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3757 3757 char *name, int64_t defvalue)
3758 3758 {
3759 3759 int64_t data;
3760 3760 uint_t nelements;
3761 3761 int rval;
3762 3762
3763 3763 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3764 3764 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3765 3765 #ifdef DEBUG
3766 3766 if (dip != NULL) {
3767 3767 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3768 3768 " 0x%x (prop = %s, node = %s%d)", flags,
3769 3769 name, ddi_driver_name(dip), ddi_get_instance(dip));
3770 3770 }
3771 3771 #endif /* DEBUG */
3772 3772 return (DDI_PROP_INVAL_ARG);
3773 3773 }
3774 3774
3775 3775 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3776 3776 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3777 3777 name, &data, &nelements, ddi_prop_fm_decode_int64))
3778 3778 != DDI_PROP_SUCCESS) {
3779 3779 if (rval == DDI_PROP_END_OF_DATA)
3780 3780 data = 1;
3781 3781 else
3782 3782 data = defvalue;
3783 3783 }
3784 3784 return (data);
3785 3785 }
3786 3786
3787 3787 /*
3788 3788 * Get an array of integer property
3789 3789 */
3790 3790 int
3791 3791 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3792 3792 char *name, int **data, uint_t *nelements)
3793 3793 {
3794 3794 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3795 3795 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3796 3796 #ifdef DEBUG
3797 3797 if (dip != NULL) {
3798 3798 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3799 3799 "invalid flag 0x%x (prop = %s, node = %s%d)",
3800 3800 flags, name, ddi_driver_name(dip),
3801 3801 ddi_get_instance(dip));
3802 3802 }
3803 3803 #endif /* DEBUG */
3804 3804 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3805 3805 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3806 3806 }
3807 3807
3808 3808 return (ddi_prop_lookup_common(match_dev, dip,
3809 3809 (flags | DDI_PROP_TYPE_INT), name, data,
3810 3810 nelements, ddi_prop_fm_decode_ints));
3811 3811 }
3812 3812
3813 3813 /*
3814 3814 * Get an array of 64 bit integer properties
3815 3815 */
3816 3816 int
3817 3817 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3818 3818 char *name, int64_t **data, uint_t *nelements)
3819 3819 {
3820 3820 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3821 3821 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3822 3822 #ifdef DEBUG
3823 3823 if (dip != NULL) {
3824 3824 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3825 3825 "invalid flag 0x%x (prop = %s, node = %s%d)",
3826 3826 flags, name, ddi_driver_name(dip),
3827 3827 ddi_get_instance(dip));
3828 3828 }
3829 3829 #endif /* DEBUG */
3830 3830 return (DDI_PROP_INVAL_ARG);
3831 3831 }
3832 3832
3833 3833 return (ddi_prop_lookup_common(match_dev, dip,
3834 3834 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3835 3835 name, data, nelements, ddi_prop_fm_decode_int64_array));
3836 3836 }
3837 3837
3838 3838 /*
3839 3839 * Update a single integer property. If the property exists on the drivers
3840 3840 * property list it updates, else it creates it.
3841 3841 */
3842 3842 int
3843 3843 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3844 3844 char *name, int data)
3845 3845 {
3846 3846 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3847 3847 name, &data, 1, ddi_prop_fm_encode_ints));
3848 3848 }
3849 3849
3850 3850 /*
3851 3851 * Update a single 64 bit integer property.
3852 3852 * Update the driver property list if it exists, else create it.
3853 3853 */
3854 3854 int
3855 3855 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3856 3856 char *name, int64_t data)
3857 3857 {
3858 3858 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3859 3859 name, &data, 1, ddi_prop_fm_encode_int64));
3860 3860 }
3861 3861
3862 3862 int
3863 3863 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3864 3864 char *name, int data)
3865 3865 {
3866 3866 return (ddi_prop_update_common(match_dev, dip,
3867 3867 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3868 3868 name, &data, 1, ddi_prop_fm_encode_ints));
3869 3869 }
3870 3870
3871 3871 int
3872 3872 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3873 3873 char *name, int64_t data)
3874 3874 {
3875 3875 return (ddi_prop_update_common(match_dev, dip,
3876 3876 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3877 3877 name, &data, 1, ddi_prop_fm_encode_int64));
3878 3878 }
3879 3879
3880 3880 /*
3881 3881 * Update an array of integer property. If the property exists on the drivers
3882 3882 * property list it updates, else it creates it.
3883 3883 */
3884 3884 int
3885 3885 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3886 3886 char *name, int *data, uint_t nelements)
3887 3887 {
3888 3888 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3889 3889 name, data, nelements, ddi_prop_fm_encode_ints));
3890 3890 }
3891 3891
3892 3892 /*
3893 3893 * Update an array of 64 bit integer properties.
3894 3894 * Update the driver property list if it exists, else create it.
3895 3895 */
3896 3896 int
3897 3897 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3898 3898 char *name, int64_t *data, uint_t nelements)
3899 3899 {
3900 3900 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3901 3901 name, data, nelements, ddi_prop_fm_encode_int64));
3902 3902 }
3903 3903
3904 3904 int
3905 3905 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3906 3906 char *name, int64_t *data, uint_t nelements)
3907 3907 {
3908 3908 return (ddi_prop_update_common(match_dev, dip,
3909 3909 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3910 3910 name, data, nelements, ddi_prop_fm_encode_int64));
3911 3911 }
3912 3912
3913 3913 int
3914 3914 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3915 3915 char *name, int *data, uint_t nelements)
3916 3916 {
3917 3917 return (ddi_prop_update_common(match_dev, dip,
3918 3918 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3919 3919 name, data, nelements, ddi_prop_fm_encode_ints));
3920 3920 }
3921 3921
3922 3922 /*
3923 3923 * Get a single string property.
3924 3924 */
3925 3925 int
3926 3926 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3927 3927 char *name, char **data)
3928 3928 {
3929 3929 uint_t x;
3930 3930
3931 3931 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3932 3932 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3933 3933 #ifdef DEBUG
3934 3934 if (dip != NULL) {
3935 3935 cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3936 3936 "(prop = %s, node = %s%d); invalid bits ignored",
3937 3937 "ddi_prop_lookup_string", flags, name,
3938 3938 ddi_driver_name(dip), ddi_get_instance(dip));
3939 3939 }
3940 3940 #endif /* DEBUG */
3941 3941 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3942 3942 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3943 3943 }
3944 3944
3945 3945 return (ddi_prop_lookup_common(match_dev, dip,
3946 3946 (flags | DDI_PROP_TYPE_STRING), name, data,
3947 3947 &x, ddi_prop_fm_decode_string));
3948 3948 }
3949 3949
3950 3950 /*
3951 3951 * Get an array of strings property.
3952 3952 */
3953 3953 int
3954 3954 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3955 3955 char *name, char ***data, uint_t *nelements)
3956 3956 {
3957 3957 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3958 3958 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3959 3959 #ifdef DEBUG
3960 3960 if (dip != NULL) {
3961 3961 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3962 3962 "invalid flag 0x%x (prop = %s, node = %s%d)",
3963 3963 flags, name, ddi_driver_name(dip),
3964 3964 ddi_get_instance(dip));
3965 3965 }
3966 3966 #endif /* DEBUG */
3967 3967 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3968 3968 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3969 3969 }
3970 3970
3971 3971 return (ddi_prop_lookup_common(match_dev, dip,
3972 3972 (flags | DDI_PROP_TYPE_STRING), name, data,
3973 3973 nelements, ddi_prop_fm_decode_strings));
3974 3974 }
3975 3975
3976 3976 /*
3977 3977 * Update a single string property.
3978 3978 */
3979 3979 int
3980 3980 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3981 3981 char *name, char *data)
3982 3982 {
3983 3983 return (ddi_prop_update_common(match_dev, dip,
3984 3984 DDI_PROP_TYPE_STRING, name, &data, 1,
3985 3985 ddi_prop_fm_encode_string));
3986 3986 }
3987 3987
3988 3988 int
3989 3989 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3990 3990 char *name, char *data)
3991 3991 {
3992 3992 return (ddi_prop_update_common(match_dev, dip,
3993 3993 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3994 3994 name, &data, 1, ddi_prop_fm_encode_string));
3995 3995 }
3996 3996
3997 3997
3998 3998 /*
3999 3999 * Update an array of strings property.
4000 4000 */
4001 4001 int
4002 4002 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4003 4003 char *name, char **data, uint_t nelements)
4004 4004 {
4005 4005 return (ddi_prop_update_common(match_dev, dip,
4006 4006 DDI_PROP_TYPE_STRING, name, data, nelements,
4007 4007 ddi_prop_fm_encode_strings));
4008 4008 }
4009 4009
4010 4010 int
4011 4011 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4012 4012 char *name, char **data, uint_t nelements)
4013 4013 {
4014 4014 return (ddi_prop_update_common(match_dev, dip,
4015 4015 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4016 4016 name, data, nelements,
4017 4017 ddi_prop_fm_encode_strings));
4018 4018 }
4019 4019
4020 4020
4021 4021 /*
4022 4022 * Get an array of bytes property.
4023 4023 */
4024 4024 int
4025 4025 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4026 4026 char *name, uchar_t **data, uint_t *nelements)
4027 4027 {
4028 4028 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4029 4029 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4030 4030 #ifdef DEBUG
4031 4031 if (dip != NULL) {
4032 4032 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4033 4033 " invalid flag 0x%x (prop = %s, node = %s%d)",
4034 4034 flags, name, ddi_driver_name(dip),
4035 4035 ddi_get_instance(dip));
4036 4036 }
4037 4037 #endif /* DEBUG */
4038 4038 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4039 4039 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4040 4040 }
4041 4041
4042 4042 return (ddi_prop_lookup_common(match_dev, dip,
4043 4043 (flags | DDI_PROP_TYPE_BYTE), name, data,
4044 4044 nelements, ddi_prop_fm_decode_bytes));
4045 4045 }
4046 4046
4047 4047 /*
4048 4048 * Update an array of bytes property.
4049 4049 */
4050 4050 int
4051 4051 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4052 4052 char *name, uchar_t *data, uint_t nelements)
4053 4053 {
4054 4054 if (nelements == 0)
4055 4055 return (DDI_PROP_INVAL_ARG);
4056 4056
4057 4057 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4058 4058 name, data, nelements, ddi_prop_fm_encode_bytes));
4059 4059 }
4060 4060
4061 4061
4062 4062 int
4063 4063 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4064 4064 char *name, uchar_t *data, uint_t nelements)
4065 4065 {
4066 4066 if (nelements == 0)
4067 4067 return (DDI_PROP_INVAL_ARG);
4068 4068
4069 4069 return (ddi_prop_update_common(match_dev, dip,
4070 4070 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4071 4071 name, data, nelements, ddi_prop_fm_encode_bytes));
4072 4072 }
4073 4073
4074 4074
4075 4075 /*
4076 4076 * ddi_prop_remove_common: Undefine a managed property:
4077 4077 * Input dev_t must match dev_t when defined.
4078 4078 * Returns DDI_PROP_NOT_FOUND, possibly.
4079 4079 * DDI_PROP_INVAL_ARG is also possible if dev is
4080 4080 * DDI_DEV_T_ANY or incoming name is the NULL string.
4081 4081 */
4082 4082 int
4083 4083 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4084 4084 {
4085 4085 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4086 4086 ddi_prop_t *propp;
4087 4087 ddi_prop_t *lastpropp = NULL;
4088 4088
4089 4089 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4090 4090 (strlen(name) == 0)) {
4091 4091 return (DDI_PROP_INVAL_ARG);
4092 4092 }
4093 4093
4094 4094 if (flag & DDI_PROP_SYSTEM_DEF)
4095 4095 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4096 4096 else if (flag & DDI_PROP_HW_DEF)
4097 4097 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4098 4098
4099 4099 mutex_enter(&(DEVI(dip)->devi_lock));
4100 4100
4101 4101 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
4102 4102 if (DDI_STRSAME(propp->prop_name, name) &&
4103 4103 (dev == propp->prop_dev)) {
4104 4104 /*
4105 4105 * Unlink this propp allowing for it to
4106 4106 * be first in the list:
4107 4107 */
4108 4108
4109 4109 if (lastpropp == NULL)
4110 4110 *list_head = propp->prop_next;
4111 4111 else
4112 4112 lastpropp->prop_next = propp->prop_next;
4113 4113
4114 4114 mutex_exit(&(DEVI(dip)->devi_lock));
4115 4115
4116 4116 /*
4117 4117 * Free memory and return...
4118 4118 */
4119 4119 kmem_free(propp->prop_name,
4120 4120 strlen(propp->prop_name) + 1);
4121 4121 if (propp->prop_len != 0)
4122 4122 kmem_free(propp->prop_val, propp->prop_len);
4123 4123 kmem_free(propp, sizeof (ddi_prop_t));
4124 4124 return (DDI_PROP_SUCCESS);
4125 4125 }
4126 4126 lastpropp = propp;
4127 4127 }
4128 4128 mutex_exit(&(DEVI(dip)->devi_lock));
4129 4129 return (DDI_PROP_NOT_FOUND);
4130 4130 }
4131 4131
4132 4132 int
4133 4133 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4134 4134 {
4135 4135 return (ddi_prop_remove_common(dev, dip, name, 0));
4136 4136 }
4137 4137
4138 4138 int
4139 4139 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4140 4140 {
4141 4141 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4142 4142 }
4143 4143
4144 4144 /*
4145 4145 * e_ddi_prop_list_delete: remove a list of properties
4146 4146 * Note that the caller needs to provide the required protection
4147 4147 * (eg. devi_lock if these properties are still attached to a devi)
4148 4148 */
4149 4149 void
4150 4150 e_ddi_prop_list_delete(ddi_prop_t *props)
4151 4151 {
4152 4152 i_ddi_prop_list_delete(props);
4153 4153 }
4154 4154
4155 4155 /*
4156 4156 * ddi_prop_remove_all_common:
4157 4157 * Used before unloading a driver to remove
4158 4158 * all properties. (undefines all dev_t's props.)
4159 4159 * Also removes `explicitly undefined' props.
4160 4160 * No errors possible.
4161 4161 */
4162 4162 void
4163 4163 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4164 4164 {
4165 4165 ddi_prop_t **list_head;
4166 4166
4167 4167 mutex_enter(&(DEVI(dip)->devi_lock));
4168 4168 if (flag & DDI_PROP_SYSTEM_DEF) {
4169 4169 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4170 4170 } else if (flag & DDI_PROP_HW_DEF) {
4171 4171 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4172 4172 } else {
4173 4173 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4174 4174 }
4175 4175 i_ddi_prop_list_delete(*list_head);
4176 4176 *list_head = NULL;
4177 4177 mutex_exit(&(DEVI(dip)->devi_lock));
4178 4178 }
4179 4179
4180 4180
4181 4181 /*
4182 4182 * ddi_prop_remove_all: Remove all driver prop definitions.
4183 4183 */
4184 4184
4185 4185 void
4186 4186 ddi_prop_remove_all(dev_info_t *dip)
4187 4187 {
4188 4188 i_ddi_prop_dyn_driver_set(dip, NULL);
4189 4189 ddi_prop_remove_all_common(dip, 0);
4190 4190 }
4191 4191
4192 4192 /*
4193 4193 * e_ddi_prop_remove_all: Remove all system prop definitions.
4194 4194 */
4195 4195
4196 4196 void
4197 4197 e_ddi_prop_remove_all(dev_info_t *dip)
4198 4198 {
4199 4199 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4200 4200 }
4201 4201
4202 4202
4203 4203 /*
4204 4204 * ddi_prop_undefine: Explicitly undefine a property. Property
4205 4205 * searches which match this property return
4206 4206 * the error code DDI_PROP_UNDEFINED.
4207 4207 *
4208 4208 * Use ddi_prop_remove to negate effect of
4209 4209 * ddi_prop_undefine
4210 4210 *
4211 4211 * See above for error returns.
4212 4212 */
4213 4213
4214 4214 int
4215 4215 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4216 4216 {
4217 4217 if (!(flag & DDI_PROP_CANSLEEP))
4218 4218 flag |= DDI_PROP_DONTSLEEP;
4219 4219 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4220 4220 return (ddi_prop_update_common(dev, dip, flag,
4221 4221 name, NULL, 0, ddi_prop_fm_encode_bytes));
4222 4222 }
4223 4223
4224 4224 int
4225 4225 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4226 4226 {
4227 4227 if (!(flag & DDI_PROP_CANSLEEP))
4228 4228 flag |= DDI_PROP_DONTSLEEP;
4229 4229 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4230 4230 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4231 4231 return (ddi_prop_update_common(dev, dip, flag,
4232 4232 name, NULL, 0, ddi_prop_fm_encode_bytes));
4233 4233 }
4234 4234
4235 4235 /*
4236 4236 * Support for gathering dynamic properties in devinfo snapshot.
4237 4237 */
4238 4238 void
4239 4239 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4240 4240 {
4241 4241 DEVI(dip)->devi_prop_dyn_driver = dp;
4242 4242 }
4243 4243
4244 4244 i_ddi_prop_dyn_t *
4245 4245 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4246 4246 {
4247 4247 return (DEVI(dip)->devi_prop_dyn_driver);
4248 4248 }
4249 4249
4250 4250 void
4251 4251 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4252 4252 {
4253 4253 DEVI(dip)->devi_prop_dyn_parent = dp;
4254 4254 }
4255 4255
4256 4256 i_ddi_prop_dyn_t *
4257 4257 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4258 4258 {
4259 4259 return (DEVI(dip)->devi_prop_dyn_parent);
4260 4260 }
4261 4261
4262 4262 void
4263 4263 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4264 4264 {
4265 4265 /* for now we invalidate the entire cached snapshot */
4266 4266 if (dip && dp)
4267 4267 i_ddi_di_cache_invalidate();
4268 4268 }
4269 4269
4270 4270 /* ARGSUSED */
4271 4271 void
4272 4272 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4273 4273 {
4274 4274 /* for now we invalidate the entire cached snapshot */
4275 4275 i_ddi_di_cache_invalidate();
4276 4276 }
4277 4277
4278 4278
4279 4279 /*
4280 4280 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4281 4281 *
4282 4282 * if input dip != child_dip, then call is on behalf of child
4283 4283 * to search PROM, do it via ddi_prop_search_common() and ascend only
4284 4284 * if allowed.
4285 4285 *
4286 4286 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4287 4287 * to search for PROM defined props only.
4288 4288 *
4289 4289 * Note that the PROM search is done only if the requested dev
4290 4290 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4291 4291 * have no associated dev, thus are automatically associated with
4292 4292 * DDI_DEV_T_NONE.
4293 4293 *
4294 4294 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4295 4295 *
4296 4296 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4297 4297 * that the property resides in the prom.
4298 4298 */
4299 4299 int
4300 4300 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4301 4301 ddi_prop_op_t prop_op, int mod_flags,
4302 4302 char *name, caddr_t valuep, int *lengthp)
4303 4303 {
4304 4304 int len;
4305 4305 caddr_t buffer;
4306 4306
4307 4307 /*
4308 4308 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4309 4309 * look in caller's PROM if it's a self identifying device...
4310 4310 *
4311 4311 * Note that this is very similar to ddi_prop_op, but we
4312 4312 * search the PROM instead of the s/w defined properties,
4313 4313 * and we are called on by the parent driver to do this for
4314 4314 * the child.
4315 4315 */
4316 4316
4317 4317 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4318 4318 ndi_dev_is_prom_node(ch_dip) &&
4319 4319 ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4320 4320 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4321 4321 if (len == -1) {
4322 4322 return (DDI_PROP_NOT_FOUND);
4323 4323 }
4324 4324
4325 4325 /*
4326 4326 * If exists only request, we're done
4327 4327 */
4328 4328 if (prop_op == PROP_EXISTS) {
4329 4329 return (DDI_PROP_FOUND_1275);
4330 4330 }
4331 4331
4332 4332 /*
4333 4333 * If length only request or prop length == 0, get out
4334 4334 */
4335 4335 if ((prop_op == PROP_LEN) || (len == 0)) {
4336 4336 *lengthp = len;
4337 4337 return (DDI_PROP_FOUND_1275);
4338 4338 }
4339 4339
4340 4340 /*
4341 4341 * Allocate buffer if required... (either way `buffer'
4342 4342 * is receiving address).
4343 4343 */
4344 4344
4345 4345 switch (prop_op) {
4346 4346
4347 4347 case PROP_LEN_AND_VAL_ALLOC:
4348 4348
4349 4349 buffer = kmem_alloc((size_t)len,
4350 4350 mod_flags & DDI_PROP_CANSLEEP ?
4351 4351 KM_SLEEP : KM_NOSLEEP);
4352 4352 if (buffer == NULL) {
4353 4353 return (DDI_PROP_NO_MEMORY);
4354 4354 }
4355 4355 *(caddr_t *)valuep = buffer;
4356 4356 break;
4357 4357
4358 4358 case PROP_LEN_AND_VAL_BUF:
4359 4359
4360 4360 if (len > (*lengthp)) {
4361 4361 *lengthp = len;
4362 4362 return (DDI_PROP_BUF_TOO_SMALL);
4363 4363 }
4364 4364
4365 4365 buffer = valuep;
4366 4366 break;
4367 4367
4368 4368 default:
4369 4369 break;
4370 4370 }
4371 4371
4372 4372 /*
4373 4373 * Call the PROM function to do the copy.
4374 4374 */
4375 4375 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4376 4376 name, buffer);
4377 4377
4378 4378 *lengthp = len; /* return the actual length to the caller */
4379 4379 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4380 4380 return (DDI_PROP_FOUND_1275);
4381 4381 }
4382 4382
4383 4383 return (DDI_PROP_NOT_FOUND);
4384 4384 }
4385 4385
4386 4386 /*
4387 4387 * The ddi_bus_prop_op default bus nexus prop op function.
4388 4388 *
4389 4389 * Code to search hardware layer (PROM), if it exists,
4390 4390 * on behalf of child, then, if appropriate, ascend and check
4391 4391 * my own software defined properties...
4392 4392 */
4393 4393 int
4394 4394 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4395 4395 ddi_prop_op_t prop_op, int mod_flags,
4396 4396 char *name, caddr_t valuep, int *lengthp)
4397 4397 {
4398 4398 int error;
4399 4399
4400 4400 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4401 4401 name, valuep, lengthp);
4402 4402
4403 4403 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4404 4404 error == DDI_PROP_BUF_TOO_SMALL)
4405 4405 return (error);
4406 4406
4407 4407 if (error == DDI_PROP_NO_MEMORY) {
4408 4408 cmn_err(CE_CONT, prop_no_mem_msg, name);
4409 4409 return (DDI_PROP_NO_MEMORY);
4410 4410 }
4411 4411
4412 4412 /*
4413 4413 * Check the 'options' node as a last resort
4414 4414 */
4415 4415 if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4416 4416 return (DDI_PROP_NOT_FOUND);
4417 4417
4418 4418 if (ch_dip == ddi_root_node()) {
4419 4419 /*
4420 4420 * As a last resort, when we've reached
4421 4421 * the top and still haven't found the
4422 4422 * property, see if the desired property
4423 4423 * is attached to the options node.
4424 4424 *
4425 4425 * The options dip is attached right after boot.
4426 4426 */
4427 4427 ASSERT(options_dip != NULL);
4428 4428 /*
4429 4429 * Force the "don't pass" flag to *just* see
4430 4430 * what the options node has to offer.
4431 4431 */
4432 4432 return (ddi_prop_search_common(dev, options_dip, prop_op,
4433 4433 mod_flags|DDI_PROP_DONTPASS, name, valuep,
4434 4434 (uint_t *)lengthp));
4435 4435 }
4436 4436
4437 4437 /*
4438 4438 * Otherwise, continue search with parent's s/w defined properties...
4439 4439 * NOTE: Using `dip' in following call increments the level.
4440 4440 */
4441 4441
4442 4442 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4443 4443 name, valuep, (uint_t *)lengthp));
4444 4444 }
4445 4445
4446 4446 /*
4447 4447 * External property functions used by other parts of the kernel...
4448 4448 */
4449 4449
4450 4450 /*
4451 4451 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4452 4452 */
4453 4453
4454 4454 int
4455 4455 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4456 4456 caddr_t valuep, int *lengthp)
4457 4457 {
4458 4458 _NOTE(ARGUNUSED(type))
4459 4459 dev_info_t *devi;
4460 4460 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4461 4461 int error;
4462 4462
4463 4463 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4464 4464 return (DDI_PROP_NOT_FOUND);
4465 4465
4466 4466 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4467 4467 ddi_release_devi(devi);
4468 4468 return (error);
4469 4469 }
4470 4470
4471 4471 /*
4472 4472 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4473 4473 */
4474 4474
4475 4475 int
4476 4476 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4477 4477 caddr_t valuep, int *lengthp)
4478 4478 {
4479 4479 _NOTE(ARGUNUSED(type))
4480 4480 dev_info_t *devi;
4481 4481 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4482 4482 int error;
4483 4483
4484 4484 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4485 4485 return (DDI_PROP_NOT_FOUND);
4486 4486
4487 4487 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4488 4488 ddi_release_devi(devi);
4489 4489 return (error);
4490 4490 }
4491 4491
4492 4492 /*
4493 4493 * e_ddi_getprop: See comments for ddi_getprop.
4494 4494 */
4495 4495 int
4496 4496 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4497 4497 {
4498 4498 _NOTE(ARGUNUSED(type))
4499 4499 dev_info_t *devi;
4500 4500 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4501 4501 int propvalue = defvalue;
4502 4502 int proplength = sizeof (int);
4503 4503 int error;
4504 4504
4505 4505 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4506 4506 return (defvalue);
4507 4507
4508 4508 error = cdev_prop_op(dev, devi, prop_op,
4509 4509 flags, name, (caddr_t)&propvalue, &proplength);
4510 4510 ddi_release_devi(devi);
4511 4511
4512 4512 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4513 4513 propvalue = 1;
4514 4514
4515 4515 return (propvalue);
4516 4516 }
4517 4517
4518 4518 /*
4519 4519 * e_ddi_getprop_int64:
4520 4520 *
4521 4521 * This is a typed interfaces, but predates typed properties. With the
4522 4522 * introduction of typed properties the framework tries to ensure
4523 4523 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4524 4524 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4525 4525 * typed interface invokes legacy (non-typed) interfaces:
4526 4526 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4527 4527 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4528 4528 * this type of lookup as a single operation we invoke the legacy
4529 4529 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4530 4530 * framework ddi_prop_op(9F) implementation is expected to check for
4531 4531 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4532 4532 * (currently TYPE_INT64).
4533 4533 */
4534 4534 int64_t
4535 4535 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4536 4536 int flags, int64_t defvalue)
4537 4537 {
4538 4538 _NOTE(ARGUNUSED(type))
4539 4539 dev_info_t *devi;
4540 4540 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4541 4541 int64_t propvalue = defvalue;
4542 4542 int proplength = sizeof (propvalue);
4543 4543 int error;
4544 4544
4545 4545 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4546 4546 return (defvalue);
4547 4547
4548 4548 error = cdev_prop_op(dev, devi, prop_op, flags |
4549 4549 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4550 4550 ddi_release_devi(devi);
4551 4551
4552 4552 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4553 4553 propvalue = 1;
4554 4554
4555 4555 return (propvalue);
4556 4556 }
4557 4557
4558 4558 /*
4559 4559 * e_ddi_getproplen: See comments for ddi_getproplen.
4560 4560 */
4561 4561 int
4562 4562 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4563 4563 {
4564 4564 _NOTE(ARGUNUSED(type))
4565 4565 dev_info_t *devi;
4566 4566 ddi_prop_op_t prop_op = PROP_LEN;
4567 4567 int error;
4568 4568
4569 4569 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4570 4570 return (DDI_PROP_NOT_FOUND);
4571 4571
4572 4572 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4573 4573 ddi_release_devi(devi);
4574 4574 return (error);
4575 4575 }
4576 4576
4577 4577 /*
4578 4578 * Routines to get at elements of the dev_info structure
4579 4579 */
4580 4580
4581 4581 /*
4582 4582 * ddi_binding_name: Return the driver binding name of the devinfo node
4583 4583 * This is the name the OS used to bind the node to a driver.
4584 4584 */
4585 4585 char *
4586 4586 ddi_binding_name(dev_info_t *dip)
4587 4587 {
4588 4588 return (DEVI(dip)->devi_binding_name);
4589 4589 }
4590 4590
4591 4591 /*
4592 4592 * ddi_driver_major: Return the major number of the driver that
4593 4593 * the supplied devinfo is bound to. If not yet bound,
4594 4594 * DDI_MAJOR_T_NONE.
4595 4595 *
4596 4596 * When used by the driver bound to 'devi', this
4597 4597 * function will reliably return the driver major number.
4598 4598 * Other ways of determining the driver major number, such as
4599 4599 * major = ddi_name_to_major(ddi_get_name(devi));
4600 4600 * major = ddi_name_to_major(ddi_binding_name(devi));
4601 4601 * can return a different result as the driver/alias binding
4602 4602 * can change dynamically, and thus should be avoided.
4603 4603 */
4604 4604 major_t
4605 4605 ddi_driver_major(dev_info_t *devi)
4606 4606 {
4607 4607 return (DEVI(devi)->devi_major);
4608 4608 }
4609 4609
4610 4610 /*
4611 4611 * ddi_driver_name: Return the normalized driver name. this is the
4612 4612 * actual driver name
4613 4613 */
4614 4614 const char *
4615 4615 ddi_driver_name(dev_info_t *devi)
4616 4616 {
4617 4617 major_t major;
4618 4618
4619 4619 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4620 4620 return (ddi_major_to_name(major));
4621 4621
4622 4622 return (ddi_node_name(devi));
4623 4623 }
4624 4624
4625 4625 /*
4626 4626 * i_ddi_set_binding_name: Set binding name.
4627 4627 *
4628 4628 * Set the binding name to the given name.
4629 4629 * This routine is for use by the ddi implementation, not by drivers.
4630 4630 */
4631 4631 void
4632 4632 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4633 4633 {
4634 4634 DEVI(dip)->devi_binding_name = name;
4635 4635
4636 4636 }
4637 4637
4638 4638 /*
4639 4639 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4640 4640 * the implementation has used to bind the node to a driver.
4641 4641 */
4642 4642 char *
4643 4643 ddi_get_name(dev_info_t *dip)
4644 4644 {
4645 4645 return (DEVI(dip)->devi_binding_name);
4646 4646 }
4647 4647
4648 4648 /*
4649 4649 * ddi_node_name: Return the name property of the devinfo node
4650 4650 * This may differ from ddi_binding_name if the node name
4651 4651 * does not define a binding to a driver (i.e. generic names).
4652 4652 */
4653 4653 char *
4654 4654 ddi_node_name(dev_info_t *dip)
4655 4655 {
4656 4656 return (DEVI(dip)->devi_node_name);
4657 4657 }
4658 4658
4659 4659
4660 4660 /*
4661 4661 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4662 4662 */
4663 4663 int
4664 4664 ddi_get_nodeid(dev_info_t *dip)
4665 4665 {
4666 4666 return (DEVI(dip)->devi_nodeid);
4667 4667 }
4668 4668
4669 4669 int
4670 4670 ddi_get_instance(dev_info_t *dip)
4671 4671 {
4672 4672 return (DEVI(dip)->devi_instance);
4673 4673 }
4674 4674
4675 4675 struct dev_ops *
4676 4676 ddi_get_driver(dev_info_t *dip)
4677 4677 {
4678 4678 return (DEVI(dip)->devi_ops);
4679 4679 }
4680 4680
4681 4681 void
4682 4682 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4683 4683 {
4684 4684 DEVI(dip)->devi_ops = devo;
4685 4685 }
4686 4686
4687 4687 /*
4688 4688 * ddi_set_driver_private/ddi_get_driver_private:
4689 4689 * Get/set device driver private data in devinfo.
4690 4690 */
4691 4691 void
4692 4692 ddi_set_driver_private(dev_info_t *dip, void *data)
4693 4693 {
4694 4694 DEVI(dip)->devi_driver_data = data;
4695 4695 }
4696 4696
4697 4697 void *
4698 4698 ddi_get_driver_private(dev_info_t *dip)
4699 4699 {
4700 4700 return (DEVI(dip)->devi_driver_data);
4701 4701 }
4702 4702
4703 4703 /*
4704 4704 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4705 4705 */
4706 4706
4707 4707 dev_info_t *
4708 4708 ddi_get_parent(dev_info_t *dip)
4709 4709 {
4710 4710 return ((dev_info_t *)DEVI(dip)->devi_parent);
4711 4711 }
4712 4712
4713 4713 dev_info_t *
4714 4714 ddi_get_child(dev_info_t *dip)
4715 4715 {
4716 4716 return ((dev_info_t *)DEVI(dip)->devi_child);
4717 4717 }
4718 4718
4719 4719 dev_info_t *
4720 4720 ddi_get_next_sibling(dev_info_t *dip)
4721 4721 {
4722 4722 return ((dev_info_t *)DEVI(dip)->devi_sibling);
4723 4723 }
4724 4724
4725 4725 dev_info_t *
4726 4726 ddi_get_next(dev_info_t *dip)
4727 4727 {
4728 4728 return ((dev_info_t *)DEVI(dip)->devi_next);
4729 4729 }
4730 4730
4731 4731 void
4732 4732 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4733 4733 {
4734 4734 DEVI(dip)->devi_next = DEVI(nextdip);
4735 4735 }
4736 4736
4737 4737 /*
4738 4738 * ddi_root_node: Return root node of devinfo tree
4739 4739 */
4740 4740
4741 4741 dev_info_t *
4742 4742 ddi_root_node(void)
4743 4743 {
4744 4744 extern dev_info_t *top_devinfo;
4745 4745
4746 4746 return (top_devinfo);
4747 4747 }
4748 4748
4749 4749 /*
|
↓ open down ↓ |
4749 lines elided |
↑ open up ↑ |
4750 4750 * Miscellaneous functions:
4751 4751 */
4752 4752
4753 4753 /*
4754 4754 * Implementation specific hooks
4755 4755 */
4756 4756
4757 4757 void
4758 4758 ddi_report_dev(dev_info_t *d)
4759 4759 {
4760 - char *b;
4761 -
4762 4760 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4763 -
4764 - /*
4765 - * If this devinfo node has cb_ops, it's implicitly accessible from
4766 - * userland, so we print its full name together with the instance
4767 - * number 'abbreviation' that the driver may use internally.
4768 - */
4769 - if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4770 - (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4771 - cmn_err(CE_CONT, "?%s%d is %s\n",
4772 - ddi_driver_name(d), ddi_get_instance(d),
4773 - ddi_pathname(d, b));
4774 - kmem_free(b, MAXPATHLEN);
4775 - }
4776 4761 }
4777 4762
4778 4763 /*
4779 4764 * ddi_ctlops() is described in the assembler not to buy a new register
4780 4765 * window when it's called and can reduce cost in climbing the device tree
4781 4766 * without using the tail call optimization.
4782 4767 */
4783 4768 int
4784 4769 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4785 4770 {
4786 4771 int ret;
4787 4772
4788 4773 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4789 4774 (void *)&rnumber, (void *)result);
4790 4775
4791 4776 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4792 4777 }
4793 4778
4794 4779 int
4795 4780 ddi_dev_nregs(dev_info_t *dev, int *result)
4796 4781 {
4797 4782 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4798 4783 }
4799 4784
4800 4785 int
4801 4786 ddi_dev_is_sid(dev_info_t *d)
4802 4787 {
4803 4788 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4804 4789 }
4805 4790
4806 4791 int
4807 4792 ddi_slaveonly(dev_info_t *d)
4808 4793 {
4809 4794 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4810 4795 }
4811 4796
4812 4797 int
4813 4798 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4814 4799 {
4815 4800 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4816 4801 }
4817 4802
4818 4803 int
4819 4804 ddi_streams_driver(dev_info_t *dip)
4820 4805 {
4821 4806 if (i_ddi_devi_attached(dip) &&
4822 4807 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4823 4808 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4824 4809 return (DDI_SUCCESS);
4825 4810 return (DDI_FAILURE);
4826 4811 }
4827 4812
4828 4813 /*
4829 4814 * callback free list
4830 4815 */
4831 4816
4832 4817 static int ncallbacks;
4833 4818 static int nc_low = 170;
4834 4819 static int nc_med = 512;
4835 4820 static int nc_high = 2048;
4836 4821 static struct ddi_callback *callbackq;
4837 4822 static struct ddi_callback *callbackqfree;
4838 4823
4839 4824 /*
4840 4825 * set/run callback lists
4841 4826 */
4842 4827 struct cbstats {
4843 4828 kstat_named_t cb_asked;
4844 4829 kstat_named_t cb_new;
4845 4830 kstat_named_t cb_run;
4846 4831 kstat_named_t cb_delete;
4847 4832 kstat_named_t cb_maxreq;
4848 4833 kstat_named_t cb_maxlist;
4849 4834 kstat_named_t cb_alloc;
4850 4835 kstat_named_t cb_runouts;
4851 4836 kstat_named_t cb_L2;
4852 4837 kstat_named_t cb_grow;
4853 4838 } cbstats = {
4854 4839 {"asked", KSTAT_DATA_UINT32},
4855 4840 {"new", KSTAT_DATA_UINT32},
4856 4841 {"run", KSTAT_DATA_UINT32},
4857 4842 {"delete", KSTAT_DATA_UINT32},
4858 4843 {"maxreq", KSTAT_DATA_UINT32},
4859 4844 {"maxlist", KSTAT_DATA_UINT32},
4860 4845 {"alloc", KSTAT_DATA_UINT32},
4861 4846 {"runouts", KSTAT_DATA_UINT32},
4862 4847 {"L2", KSTAT_DATA_UINT32},
4863 4848 {"grow", KSTAT_DATA_UINT32},
4864 4849 };
4865 4850
4866 4851 #define nc_asked cb_asked.value.ui32
4867 4852 #define nc_new cb_new.value.ui32
4868 4853 #define nc_run cb_run.value.ui32
4869 4854 #define nc_delete cb_delete.value.ui32
4870 4855 #define nc_maxreq cb_maxreq.value.ui32
4871 4856 #define nc_maxlist cb_maxlist.value.ui32
4872 4857 #define nc_alloc cb_alloc.value.ui32
4873 4858 #define nc_runouts cb_runouts.value.ui32
4874 4859 #define nc_L2 cb_L2.value.ui32
4875 4860 #define nc_grow cb_grow.value.ui32
4876 4861
4877 4862 static kmutex_t ddi_callback_mutex;
4878 4863
4879 4864 /*
4880 4865 * callbacks are handled using a L1/L2 cache. The L1 cache
4881 4866 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4882 4867 * we can't get callbacks from the L1 cache [because pageout is doing
4883 4868 * I/O at the time freemem is 0], we allocate callbacks out of the
4884 4869 * L2 cache. The L2 cache is static and depends on the memory size.
4885 4870 * [We might also count the number of devices at probe time and
4886 4871 * allocate one structure per device and adjust for deferred attach]
4887 4872 */
4888 4873 void
4889 4874 impl_ddi_callback_init(void)
4890 4875 {
4891 4876 int i;
4892 4877 uint_t physmegs;
4893 4878 kstat_t *ksp;
4894 4879
4895 4880 physmegs = physmem >> (20 - PAGESHIFT);
4896 4881 if (physmegs < 48) {
4897 4882 ncallbacks = nc_low;
4898 4883 } else if (physmegs < 128) {
4899 4884 ncallbacks = nc_med;
4900 4885 } else {
4901 4886 ncallbacks = nc_high;
4902 4887 }
4903 4888
4904 4889 /*
4905 4890 * init free list
4906 4891 */
4907 4892 callbackq = kmem_zalloc(
4908 4893 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4909 4894 for (i = 0; i < ncallbacks-1; i++)
4910 4895 callbackq[i].c_nfree = &callbackq[i+1];
4911 4896 callbackqfree = callbackq;
4912 4897
4913 4898 /* init kstats */
4914 4899 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4915 4900 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4916 4901 ksp->ks_data = (void *) &cbstats;
4917 4902 kstat_install(ksp);
4918 4903 }
4919 4904
4920 4905 }
4921 4906
4922 4907 static void
4923 4908 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4924 4909 int count)
4925 4910 {
4926 4911 struct ddi_callback *list, *marker, *new;
4927 4912 size_t size = sizeof (struct ddi_callback);
4928 4913
4929 4914 list = marker = (struct ddi_callback *)*listid;
4930 4915 while (list != NULL) {
4931 4916 if (list->c_call == funcp && list->c_arg == arg) {
4932 4917 list->c_count += count;
4933 4918 return;
4934 4919 }
4935 4920 marker = list;
4936 4921 list = list->c_nlist;
4937 4922 }
4938 4923 new = kmem_alloc(size, KM_NOSLEEP);
4939 4924 if (new == NULL) {
4940 4925 new = callbackqfree;
4941 4926 if (new == NULL) {
4942 4927 new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4943 4928 &size, KM_NOSLEEP | KM_PANIC);
4944 4929 cbstats.nc_grow++;
4945 4930 } else {
4946 4931 callbackqfree = new->c_nfree;
4947 4932 cbstats.nc_L2++;
4948 4933 }
4949 4934 }
4950 4935 if (marker != NULL) {
4951 4936 marker->c_nlist = new;
4952 4937 } else {
4953 4938 *listid = (uintptr_t)new;
4954 4939 }
4955 4940 new->c_size = size;
4956 4941 new->c_nlist = NULL;
4957 4942 new->c_call = funcp;
4958 4943 new->c_arg = arg;
4959 4944 new->c_count = count;
4960 4945 cbstats.nc_new++;
4961 4946 cbstats.nc_alloc++;
4962 4947 if (cbstats.nc_alloc > cbstats.nc_maxlist)
4963 4948 cbstats.nc_maxlist = cbstats.nc_alloc;
4964 4949 }
4965 4950
4966 4951 void
4967 4952 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4968 4953 {
4969 4954 mutex_enter(&ddi_callback_mutex);
4970 4955 cbstats.nc_asked++;
4971 4956 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4972 4957 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4973 4958 (void) callback_insert(funcp, arg, listid, 1);
4974 4959 mutex_exit(&ddi_callback_mutex);
4975 4960 }
4976 4961
4977 4962 static void
4978 4963 real_callback_run(void *Queue)
4979 4964 {
4980 4965 int (*funcp)(caddr_t);
4981 4966 caddr_t arg;
4982 4967 int count, rval;
4983 4968 uintptr_t *listid;
4984 4969 struct ddi_callback *list, *marker;
4985 4970 int check_pending = 1;
4986 4971 int pending = 0;
4987 4972
4988 4973 do {
4989 4974 mutex_enter(&ddi_callback_mutex);
4990 4975 listid = Queue;
4991 4976 list = (struct ddi_callback *)*listid;
4992 4977 if (list == NULL) {
4993 4978 mutex_exit(&ddi_callback_mutex);
4994 4979 return;
4995 4980 }
4996 4981 if (check_pending) {
4997 4982 marker = list;
4998 4983 while (marker != NULL) {
4999 4984 pending += marker->c_count;
5000 4985 marker = marker->c_nlist;
5001 4986 }
5002 4987 check_pending = 0;
5003 4988 }
5004 4989 ASSERT(pending > 0);
5005 4990 ASSERT(list->c_count > 0);
5006 4991 funcp = list->c_call;
5007 4992 arg = list->c_arg;
5008 4993 count = list->c_count;
5009 4994 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5010 4995 if (list >= &callbackq[0] &&
5011 4996 list <= &callbackq[ncallbacks-1]) {
5012 4997 list->c_nfree = callbackqfree;
5013 4998 callbackqfree = list;
5014 4999 } else
5015 5000 kmem_free(list, list->c_size);
5016 5001
5017 5002 cbstats.nc_delete++;
5018 5003 cbstats.nc_alloc--;
5019 5004 mutex_exit(&ddi_callback_mutex);
5020 5005
5021 5006 do {
5022 5007 if ((rval = (*funcp)(arg)) == 0) {
5023 5008 pending -= count;
5024 5009 mutex_enter(&ddi_callback_mutex);
5025 5010 (void) callback_insert(funcp, arg, listid,
5026 5011 count);
5027 5012 cbstats.nc_runouts++;
5028 5013 } else {
5029 5014 pending--;
5030 5015 mutex_enter(&ddi_callback_mutex);
5031 5016 cbstats.nc_run++;
5032 5017 }
5033 5018 mutex_exit(&ddi_callback_mutex);
5034 5019 } while (rval != 0 && (--count > 0));
5035 5020 } while (pending > 0);
5036 5021 }
5037 5022
5038 5023 void
5039 5024 ddi_run_callback(uintptr_t *listid)
5040 5025 {
5041 5026 softcall(real_callback_run, listid);
5042 5027 }
5043 5028
5044 5029 /*
5045 5030 * ddi_periodic_t
5046 5031 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5047 5032 * int level)
5048 5033 *
5049 5034 * INTERFACE LEVEL
5050 5035 * Solaris DDI specific (Solaris DDI)
5051 5036 *
5052 5037 * PARAMETERS
5053 5038 * func: the callback function
5054 5039 *
5055 5040 * The callback function will be invoked. The function is invoked
5056 5041 * in kernel context if the argument level passed is the zero.
5057 5042 * Otherwise it's invoked in interrupt context at the specified
5058 5043 * level.
5059 5044 *
5060 5045 * arg: the argument passed to the callback function
5061 5046 *
5062 5047 * interval: interval time
5063 5048 *
5064 5049 * level : callback interrupt level
5065 5050 *
5066 5051 * If the value is the zero, the callback function is invoked
5067 5052 * in kernel context. If the value is more than the zero, but
5068 5053 * less than or equal to ten, the callback function is invoked in
5069 5054 * interrupt context at the specified interrupt level, which may
5070 5055 * be used for real time applications.
5071 5056 *
5072 5057 * This value must be in range of 0-10, which can be a numeric
5073 5058 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5074 5059 *
5075 5060 * DESCRIPTION
5076 5061 * ddi_periodic_add(9F) schedules the specified function to be
5077 5062 * periodically invoked in the interval time.
5078 5063 *
5079 5064 * As well as timeout(9F), the exact time interval over which the function
5080 5065 * takes effect cannot be guaranteed, but the value given is a close
5081 5066 * approximation.
5082 5067 *
5083 5068 * Drivers waiting on behalf of processes with real-time constraints must
5084 5069 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5085 5070 *
5086 5071 * RETURN VALUES
5087 5072 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5088 5073 * which must be used for ddi_periodic_delete(9F) to specify the request.
5089 5074 *
5090 5075 * CONTEXT
5091 5076 * ddi_periodic_add(9F) can be called in user or kernel context, but
5092 5077 * it cannot be called in interrupt context, which is different from
5093 5078 * timeout(9F).
5094 5079 */
5095 5080 ddi_periodic_t
5096 5081 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5097 5082 {
5098 5083 /*
5099 5084 * Sanity check of the argument level.
5100 5085 */
5101 5086 if (level < DDI_IPL_0 || level > DDI_IPL_10)
5102 5087 cmn_err(CE_PANIC,
5103 5088 "ddi_periodic_add: invalid interrupt level (%d).", level);
5104 5089
5105 5090 /*
5106 5091 * Sanity check of the context. ddi_periodic_add() cannot be
5107 5092 * called in either interrupt context or high interrupt context.
5108 5093 */
5109 5094 if (servicing_interrupt())
5110 5095 cmn_err(CE_PANIC,
5111 5096 "ddi_periodic_add: called in (high) interrupt context.");
5112 5097
5113 5098 return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5114 5099 }
5115 5100
5116 5101 /*
5117 5102 * void
5118 5103 * ddi_periodic_delete(ddi_periodic_t req)
5119 5104 *
5120 5105 * INTERFACE LEVEL
5121 5106 * Solaris DDI specific (Solaris DDI)
5122 5107 *
5123 5108 * PARAMETERS
5124 5109 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5125 5110 * previously.
5126 5111 *
5127 5112 * DESCRIPTION
5128 5113 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5129 5114 * previously requested.
5130 5115 *
5131 5116 * ddi_periodic_delete(9F) will not return until the pending request
5132 5117 * is canceled or executed.
5133 5118 *
5134 5119 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5135 5120 * timeout which is either running on another CPU, or has already
5136 5121 * completed causes no problems. However, unlike untimeout(9F), there is
5137 5122 * no restrictions on the lock which might be held across the call to
5138 5123 * ddi_periodic_delete(9F).
5139 5124 *
5140 5125 * Drivers should be structured with the understanding that the arrival of
5141 5126 * both an interrupt and a timeout for that interrupt can occasionally
5142 5127 * occur, in either order.
5143 5128 *
5144 5129 * CONTEXT
5145 5130 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5146 5131 * it cannot be called in interrupt context, which is different from
5147 5132 * untimeout(9F).
5148 5133 */
5149 5134 void
5150 5135 ddi_periodic_delete(ddi_periodic_t req)
5151 5136 {
5152 5137 /*
5153 5138 * Sanity check of the context. ddi_periodic_delete() cannot be
5154 5139 * called in either interrupt context or high interrupt context.
5155 5140 */
5156 5141 if (servicing_interrupt())
5157 5142 cmn_err(CE_PANIC,
5158 5143 "ddi_periodic_delete: called in (high) interrupt context.");
5159 5144
5160 5145 i_untimeout((timeout_t)req);
5161 5146 }
5162 5147
5163 5148 dev_info_t *
5164 5149 nodevinfo(dev_t dev, int otyp)
5165 5150 {
5166 5151 _NOTE(ARGUNUSED(dev, otyp))
5167 5152 return ((dev_info_t *)0);
5168 5153 }
5169 5154
5170 5155 /*
5171 5156 * A driver should support its own getinfo(9E) entry point. This function
5172 5157 * is provided as a convenience for ON drivers that don't expect their
5173 5158 * getinfo(9E) entry point to be called. A driver that uses this must not
5174 5159 * call ddi_create_minor_node.
5175 5160 */
5176 5161 int
5177 5162 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5178 5163 {
5179 5164 _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5180 5165 return (DDI_FAILURE);
5181 5166 }
5182 5167
5183 5168 /*
5184 5169 * A driver should support its own getinfo(9E) entry point. This function
5185 5170 * is provided as a convenience for ON drivers that where the minor number
5186 5171 * is the instance. Drivers that do not have 1:1 mapping must implement
5187 5172 * their own getinfo(9E) function.
5188 5173 */
5189 5174 int
5190 5175 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5191 5176 void *arg, void **result)
5192 5177 {
5193 5178 _NOTE(ARGUNUSED(dip))
5194 5179 int instance;
5195 5180
5196 5181 if (infocmd != DDI_INFO_DEVT2INSTANCE)
5197 5182 return (DDI_FAILURE);
5198 5183
5199 5184 instance = getminor((dev_t)(uintptr_t)arg);
5200 5185 *result = (void *)(uintptr_t)instance;
5201 5186 return (DDI_SUCCESS);
5202 5187 }
5203 5188
5204 5189 int
5205 5190 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5206 5191 {
5207 5192 _NOTE(ARGUNUSED(devi, cmd))
5208 5193 return (DDI_FAILURE);
5209 5194 }
5210 5195
5211 5196 int
5212 5197 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5213 5198 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5214 5199 {
5215 5200 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5216 5201 return (DDI_DMA_NOMAPPING);
5217 5202 }
5218 5203
5219 5204 int
5220 5205 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5221 5206 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5222 5207 {
5223 5208 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5224 5209 return (DDI_DMA_BADATTR);
5225 5210 }
5226 5211
5227 5212 int
5228 5213 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5229 5214 ddi_dma_handle_t handle)
5230 5215 {
5231 5216 _NOTE(ARGUNUSED(dip, rdip, handle))
5232 5217 return (DDI_FAILURE);
5233 5218 }
5234 5219
5235 5220 int
5236 5221 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5237 5222 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5238 5223 ddi_dma_cookie_t *cp, uint_t *ccountp)
5239 5224 {
5240 5225 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5241 5226 return (DDI_DMA_NOMAPPING);
5242 5227 }
5243 5228
5244 5229 int
5245 5230 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5246 5231 ddi_dma_handle_t handle)
5247 5232 {
5248 5233 _NOTE(ARGUNUSED(dip, rdip, handle))
5249 5234 return (DDI_FAILURE);
5250 5235 }
5251 5236
5252 5237 int
5253 5238 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5254 5239 ddi_dma_handle_t handle, off_t off, size_t len,
5255 5240 uint_t cache_flags)
5256 5241 {
5257 5242 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5258 5243 return (DDI_FAILURE);
5259 5244 }
5260 5245
5261 5246 int
5262 5247 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5263 5248 ddi_dma_handle_t handle, uint_t win, off_t *offp,
5264 5249 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5265 5250 {
5266 5251 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5267 5252 return (DDI_FAILURE);
5268 5253 }
5269 5254
5270 5255 int
5271 5256 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5272 5257 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5273 5258 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5274 5259 {
5275 5260 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5276 5261 return (DDI_FAILURE);
5277 5262 }
5278 5263
5279 5264 void
5280 5265 ddivoid(void)
5281 5266 {}
5282 5267
5283 5268 int
5284 5269 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5285 5270 struct pollhead **pollhdrp)
5286 5271 {
5287 5272 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5288 5273 return (ENXIO);
5289 5274 }
5290 5275
5291 5276 cred_t *
5292 5277 ddi_get_cred(void)
5293 5278 {
5294 5279 return (CRED());
5295 5280 }
5296 5281
5297 5282 clock_t
5298 5283 ddi_get_lbolt(void)
5299 5284 {
5300 5285 return ((clock_t)lbolt_hybrid());
5301 5286 }
5302 5287
5303 5288 int64_t
5304 5289 ddi_get_lbolt64(void)
5305 5290 {
5306 5291 return (lbolt_hybrid());
5307 5292 }
5308 5293
5309 5294 time_t
5310 5295 ddi_get_time(void)
5311 5296 {
5312 5297 time_t now;
5313 5298
5314 5299 if ((now = gethrestime_sec()) == 0) {
5315 5300 timestruc_t ts;
5316 5301 mutex_enter(&tod_lock);
5317 5302 ts = tod_get();
5318 5303 mutex_exit(&tod_lock);
5319 5304 return (ts.tv_sec);
5320 5305 } else {
5321 5306 return (now);
5322 5307 }
5323 5308 }
5324 5309
5325 5310 pid_t
5326 5311 ddi_get_pid(void)
5327 5312 {
5328 5313 return (ttoproc(curthread)->p_pid);
5329 5314 }
5330 5315
5331 5316 kt_did_t
5332 5317 ddi_get_kt_did(void)
5333 5318 {
5334 5319 return (curthread->t_did);
5335 5320 }
5336 5321
5337 5322 /*
5338 5323 * This function returns B_TRUE if the caller can reasonably expect that a call
5339 5324 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5340 5325 * by user-level signal. If it returns B_FALSE, then the caller should use
5341 5326 * other means to make certain that the wait will not hang "forever."
5342 5327 *
5343 5328 * It does not check the signal mask, nor for reception of any particular
5344 5329 * signal.
5345 5330 *
5346 5331 * Currently, a thread can receive a signal if it's not a kernel thread and it
5347 5332 * is not in the middle of exit(2) tear-down. Threads that are in that
5348 5333 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5349 5334 * cv_timedwait, and qwait_sig to qwait.
5350 5335 */
5351 5336 boolean_t
5352 5337 ddi_can_receive_sig(void)
5353 5338 {
5354 5339 proc_t *pp;
5355 5340
5356 5341 if (curthread->t_proc_flag & TP_LWPEXIT)
5357 5342 return (B_FALSE);
5358 5343 if ((pp = ttoproc(curthread)) == NULL)
5359 5344 return (B_FALSE);
5360 5345 return (pp->p_as != &kas);
5361 5346 }
5362 5347
5363 5348 /*
5364 5349 * Swap bytes in 16-bit [half-]words
5365 5350 */
5366 5351 void
5367 5352 swab(void *src, void *dst, size_t nbytes)
5368 5353 {
5369 5354 uchar_t *pf = (uchar_t *)src;
5370 5355 uchar_t *pt = (uchar_t *)dst;
5371 5356 uchar_t tmp;
5372 5357 int nshorts;
5373 5358
5374 5359 nshorts = nbytes >> 1;
5375 5360
5376 5361 while (--nshorts >= 0) {
5377 5362 tmp = *pf++;
5378 5363 *pt++ = *pf++;
5379 5364 *pt++ = tmp;
5380 5365 }
5381 5366 }
5382 5367
5383 5368 static void
5384 5369 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5385 5370 {
5386 5371 int circ;
5387 5372 struct ddi_minor_data *dp;
5388 5373
5389 5374 ndi_devi_enter(ddip, &circ);
5390 5375 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5391 5376 DEVI(ddip)->devi_minor = dmdp;
5392 5377 } else {
5393 5378 while (dp->next != (struct ddi_minor_data *)NULL)
5394 5379 dp = dp->next;
5395 5380 dp->next = dmdp;
5396 5381 }
5397 5382 ndi_devi_exit(ddip, circ);
5398 5383 }
5399 5384
5400 5385 /*
5401 5386 * Part of the obsolete SunCluster DDI Hooks.
5402 5387 * Keep for binary compatibility
5403 5388 */
5404 5389 minor_t
5405 5390 ddi_getiminor(dev_t dev)
5406 5391 {
5407 5392 return (getminor(dev));
5408 5393 }
5409 5394
5410 5395 static int
5411 5396 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5412 5397 {
5413 5398 int se_flag;
5414 5399 int kmem_flag;
5415 5400 int se_err;
5416 5401 char *pathname, *class_name;
5417 5402 sysevent_t *ev = NULL;
5418 5403 sysevent_id_t eid;
5419 5404 sysevent_value_t se_val;
5420 5405 sysevent_attr_list_t *ev_attr_list = NULL;
5421 5406
5422 5407 /* determine interrupt context */
5423 5408 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5424 5409 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5425 5410
5426 5411 i_ddi_di_cache_invalidate();
5427 5412
5428 5413 #ifdef DEBUG
5429 5414 if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5430 5415 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5431 5416 "interrupt level by driver %s",
5432 5417 ddi_driver_name(dip));
5433 5418 }
5434 5419 #endif /* DEBUG */
5435 5420
5436 5421 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5437 5422 if (ev == NULL) {
5438 5423 goto fail;
5439 5424 }
5440 5425
5441 5426 pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5442 5427 if (pathname == NULL) {
5443 5428 sysevent_free(ev);
5444 5429 goto fail;
5445 5430 }
5446 5431
5447 5432 (void) ddi_pathname(dip, pathname);
5448 5433 ASSERT(strlen(pathname));
5449 5434 se_val.value_type = SE_DATA_TYPE_STRING;
5450 5435 se_val.value.sv_string = pathname;
5451 5436 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5452 5437 &se_val, se_flag) != 0) {
5453 5438 kmem_free(pathname, MAXPATHLEN);
5454 5439 sysevent_free(ev);
5455 5440 goto fail;
5456 5441 }
5457 5442 kmem_free(pathname, MAXPATHLEN);
5458 5443
5459 5444 /* add the device class attribute */
5460 5445 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5461 5446 se_val.value_type = SE_DATA_TYPE_STRING;
5462 5447 se_val.value.sv_string = class_name;
5463 5448 if (sysevent_add_attr(&ev_attr_list,
5464 5449 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5465 5450 sysevent_free_attr(ev_attr_list);
5466 5451 goto fail;
5467 5452 }
5468 5453 }
5469 5454
5470 5455 /*
5471 5456 * allow for NULL minor names
5472 5457 */
5473 5458 if (minor_name != NULL) {
5474 5459 se_val.value.sv_string = minor_name;
5475 5460 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5476 5461 &se_val, se_flag) != 0) {
5477 5462 sysevent_free_attr(ev_attr_list);
5478 5463 sysevent_free(ev);
5479 5464 goto fail;
5480 5465 }
5481 5466 }
5482 5467
5483 5468 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5484 5469 sysevent_free_attr(ev_attr_list);
5485 5470 sysevent_free(ev);
5486 5471 goto fail;
5487 5472 }
5488 5473
5489 5474 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5490 5475 if (se_err == SE_NO_TRANSPORT) {
5491 5476 cmn_err(CE_WARN, "/devices or /dev may not be current "
5492 5477 "for driver %s (%s). Run devfsadm -i %s",
5493 5478 ddi_driver_name(dip), "syseventd not responding",
5494 5479 ddi_driver_name(dip));
5495 5480 } else {
5496 5481 sysevent_free(ev);
5497 5482 goto fail;
5498 5483 }
5499 5484 }
5500 5485
5501 5486 sysevent_free(ev);
5502 5487 return (DDI_SUCCESS);
5503 5488 fail:
5504 5489 cmn_err(CE_WARN, "/devices or /dev may not be current "
5505 5490 "for driver %s. Run devfsadm -i %s",
5506 5491 ddi_driver_name(dip), ddi_driver_name(dip));
5507 5492 return (DDI_SUCCESS);
5508 5493 }
5509 5494
5510 5495 /*
5511 5496 * failing to remove a minor node is not of interest
5512 5497 * therefore we do not generate an error message
5513 5498 */
5514 5499 static int
5515 5500 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5516 5501 {
5517 5502 char *pathname, *class_name;
5518 5503 sysevent_t *ev;
5519 5504 sysevent_id_t eid;
5520 5505 sysevent_value_t se_val;
5521 5506 sysevent_attr_list_t *ev_attr_list = NULL;
5522 5507
5523 5508 /*
5524 5509 * only log ddi_remove_minor_node() calls outside the scope
5525 5510 * of attach/detach reconfigurations and when the dip is
5526 5511 * still initialized.
5527 5512 */
5528 5513 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5529 5514 (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5530 5515 return (DDI_SUCCESS);
5531 5516 }
5532 5517
5533 5518 i_ddi_di_cache_invalidate();
5534 5519
5535 5520 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5536 5521 if (ev == NULL) {
5537 5522 return (DDI_SUCCESS);
5538 5523 }
5539 5524
5540 5525 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5541 5526 if (pathname == NULL) {
5542 5527 sysevent_free(ev);
5543 5528 return (DDI_SUCCESS);
5544 5529 }
5545 5530
5546 5531 (void) ddi_pathname(dip, pathname);
5547 5532 ASSERT(strlen(pathname));
5548 5533 se_val.value_type = SE_DATA_TYPE_STRING;
5549 5534 se_val.value.sv_string = pathname;
5550 5535 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5551 5536 &se_val, SE_SLEEP) != 0) {
5552 5537 kmem_free(pathname, MAXPATHLEN);
5553 5538 sysevent_free(ev);
5554 5539 return (DDI_SUCCESS);
5555 5540 }
5556 5541
5557 5542 kmem_free(pathname, MAXPATHLEN);
5558 5543
5559 5544 /*
5560 5545 * allow for NULL minor names
5561 5546 */
5562 5547 if (minor_name != NULL) {
5563 5548 se_val.value.sv_string = minor_name;
5564 5549 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5565 5550 &se_val, SE_SLEEP) != 0) {
5566 5551 sysevent_free_attr(ev_attr_list);
5567 5552 goto fail;
5568 5553 }
5569 5554 }
5570 5555
5571 5556 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5572 5557 /* add the device class, driver name and instance attributes */
5573 5558
5574 5559 se_val.value_type = SE_DATA_TYPE_STRING;
5575 5560 se_val.value.sv_string = class_name;
5576 5561 if (sysevent_add_attr(&ev_attr_list,
5577 5562 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5578 5563 sysevent_free_attr(ev_attr_list);
5579 5564 goto fail;
5580 5565 }
5581 5566
5582 5567 se_val.value_type = SE_DATA_TYPE_STRING;
5583 5568 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5584 5569 if (sysevent_add_attr(&ev_attr_list,
5585 5570 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5586 5571 sysevent_free_attr(ev_attr_list);
5587 5572 goto fail;
5588 5573 }
5589 5574
5590 5575 se_val.value_type = SE_DATA_TYPE_INT32;
5591 5576 se_val.value.sv_int32 = ddi_get_instance(dip);
5592 5577 if (sysevent_add_attr(&ev_attr_list,
5593 5578 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5594 5579 sysevent_free_attr(ev_attr_list);
5595 5580 goto fail;
5596 5581 }
5597 5582
5598 5583 }
5599 5584
5600 5585 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5601 5586 sysevent_free_attr(ev_attr_list);
5602 5587 } else {
5603 5588 (void) log_sysevent(ev, SE_SLEEP, &eid);
5604 5589 }
5605 5590 fail:
5606 5591 sysevent_free(ev);
5607 5592 return (DDI_SUCCESS);
5608 5593 }
5609 5594
5610 5595 /*
5611 5596 * Derive the device class of the node.
5612 5597 * Device class names aren't defined yet. Until this is done we use
5613 5598 * devfs event subclass names as device class names.
5614 5599 */
5615 5600 static int
5616 5601 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5617 5602 {
5618 5603 int rv = DDI_SUCCESS;
5619 5604
5620 5605 if (i_ddi_devi_class(dip) == NULL) {
5621 5606 if (strncmp(node_type, DDI_NT_BLOCK,
5622 5607 sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5623 5608 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5624 5609 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5625 5610 strcmp(node_type, DDI_NT_FD) != 0) {
5626 5611
5627 5612 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5628 5613
5629 5614 } else if (strncmp(node_type, DDI_NT_NET,
5630 5615 sizeof (DDI_NT_NET) - 1) == 0 &&
5631 5616 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5632 5617 node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5633 5618
5634 5619 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5635 5620
5636 5621 } else if (strncmp(node_type, DDI_NT_PRINTER,
5637 5622 sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5638 5623 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5639 5624 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5640 5625
5641 5626 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5642 5627
5643 5628 } else if (strncmp(node_type, DDI_PSEUDO,
5644 5629 sizeof (DDI_PSEUDO) -1) == 0 &&
5645 5630 (strncmp(ESC_LOFI, ddi_node_name(dip),
5646 5631 sizeof (ESC_LOFI) -1) == 0)) {
5647 5632 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5648 5633 }
5649 5634 }
5650 5635
5651 5636 return (rv);
5652 5637 }
5653 5638
5654 5639 /*
5655 5640 * Check compliance with PSARC 2003/375:
5656 5641 *
5657 5642 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5658 5643 * exceed IFNAMSIZ (16) characters in length.
5659 5644 */
5660 5645 static boolean_t
5661 5646 verify_name(char *name)
5662 5647 {
5663 5648 size_t len = strlen(name);
5664 5649 char *cp;
5665 5650
5666 5651 if (len == 0 || len > IFNAMSIZ)
5667 5652 return (B_FALSE);
5668 5653
5669 5654 for (cp = name; *cp != '\0'; cp++) {
5670 5655 if (!isalnum(*cp) && *cp != '_')
5671 5656 return (B_FALSE);
5672 5657 }
5673 5658
5674 5659 return (B_TRUE);
5675 5660 }
5676 5661
5677 5662 /*
5678 5663 * ddi_create_minor_common: Create a ddi_minor_data structure and
5679 5664 * attach it to the given devinfo node.
5680 5665 */
5681 5666
5682 5667 int
5683 5668 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5684 5669 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5685 5670 const char *read_priv, const char *write_priv, mode_t priv_mode)
5686 5671 {
5687 5672 struct ddi_minor_data *dmdp;
5688 5673 major_t major;
5689 5674
5690 5675 if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5691 5676 return (DDI_FAILURE);
5692 5677
5693 5678 if (name == NULL)
5694 5679 return (DDI_FAILURE);
5695 5680
5696 5681 /*
5697 5682 * Log a message if the minor number the driver is creating
5698 5683 * is not expressible on the on-disk filesystem (currently
5699 5684 * this is limited to 18 bits both by UFS). The device can
5700 5685 * be opened via devfs, but not by device special files created
5701 5686 * via mknod().
5702 5687 */
5703 5688 if (minor_num > L_MAXMIN32) {
5704 5689 cmn_err(CE_WARN,
5705 5690 "%s%d:%s minor 0x%x too big for 32-bit applications",
5706 5691 ddi_driver_name(dip), ddi_get_instance(dip),
5707 5692 name, minor_num);
5708 5693 return (DDI_FAILURE);
5709 5694 }
5710 5695
5711 5696 /* dip must be bound and attached */
5712 5697 major = ddi_driver_major(dip);
5713 5698 ASSERT(major != DDI_MAJOR_T_NONE);
5714 5699
5715 5700 /*
5716 5701 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5717 5702 */
5718 5703 if (node_type == NULL) {
5719 5704 node_type = DDI_PSEUDO;
5720 5705 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5721 5706 " minor node %s; default to DDI_PSEUDO",
5722 5707 ddi_driver_name(dip), ddi_get_instance(dip), name));
5723 5708 }
5724 5709
5725 5710 /*
5726 5711 * If the driver is a network driver, ensure that the name falls within
5727 5712 * the interface naming constraints specified by PSARC/2003/375.
5728 5713 */
5729 5714 if (strcmp(node_type, DDI_NT_NET) == 0) {
5730 5715 if (!verify_name(name))
5731 5716 return (DDI_FAILURE);
5732 5717
5733 5718 if (mtype == DDM_MINOR) {
5734 5719 struct devnames *dnp = &devnamesp[major];
5735 5720
5736 5721 /* Mark driver as a network driver */
5737 5722 LOCK_DEV_OPS(&dnp->dn_lock);
5738 5723 dnp->dn_flags |= DN_NETWORK_DRIVER;
5739 5724
5740 5725 /*
5741 5726 * If this minor node is created during the device
5742 5727 * attachment, this is a physical network device.
5743 5728 * Mark the driver as a physical network driver.
5744 5729 */
5745 5730 if (DEVI_IS_ATTACHING(dip))
5746 5731 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5747 5732 UNLOCK_DEV_OPS(&dnp->dn_lock);
5748 5733 }
5749 5734 }
5750 5735
5751 5736 if (mtype == DDM_MINOR) {
5752 5737 if (derive_devi_class(dip, node_type, KM_NOSLEEP) !=
5753 5738 DDI_SUCCESS)
5754 5739 return (DDI_FAILURE);
5755 5740 }
5756 5741
5757 5742 /*
5758 5743 * Take care of minor number information for the node.
5759 5744 */
5760 5745
5761 5746 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5762 5747 KM_NOSLEEP)) == NULL) {
5763 5748 return (DDI_FAILURE);
5764 5749 }
5765 5750 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5766 5751 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5767 5752 return (DDI_FAILURE);
5768 5753 }
5769 5754 dmdp->dip = dip;
5770 5755 dmdp->ddm_dev = makedevice(major, minor_num);
5771 5756 dmdp->ddm_spec_type = spec_type;
5772 5757 dmdp->ddm_node_type = node_type;
5773 5758 dmdp->type = mtype;
5774 5759 if (flag & CLONE_DEV) {
5775 5760 dmdp->type = DDM_ALIAS;
5776 5761 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5777 5762 }
5778 5763 if (flag & PRIVONLY_DEV) {
5779 5764 dmdp->ddm_flags |= DM_NO_FSPERM;
5780 5765 }
5781 5766 if (read_priv || write_priv) {
5782 5767 dmdp->ddm_node_priv =
5783 5768 devpolicy_priv_by_name(read_priv, write_priv);
5784 5769 }
5785 5770 dmdp->ddm_priv_mode = priv_mode;
5786 5771
5787 5772 ddi_append_minor_node(dip, dmdp);
5788 5773
5789 5774 /*
5790 5775 * only log ddi_create_minor_node() calls which occur
5791 5776 * outside the scope of attach(9e)/detach(9e) reconfigurations
5792 5777 */
5793 5778 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5794 5779 mtype != DDM_INTERNAL_PATH) {
5795 5780 (void) i_log_devfs_minor_create(dip, name);
5796 5781 }
5797 5782
5798 5783 /*
5799 5784 * Check if any dacf rules match the creation of this minor node
5800 5785 */
5801 5786 dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5802 5787 return (DDI_SUCCESS);
5803 5788 }
5804 5789
5805 5790 int
5806 5791 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5807 5792 minor_t minor_num, char *node_type, int flag)
5808 5793 {
5809 5794 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5810 5795 node_type, flag, DDM_MINOR, NULL, NULL, 0));
5811 5796 }
5812 5797
5813 5798 int
5814 5799 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5815 5800 minor_t minor_num, char *node_type, int flag,
5816 5801 const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5817 5802 {
5818 5803 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5819 5804 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5820 5805 }
5821 5806
5822 5807 int
5823 5808 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5824 5809 minor_t minor_num, char *node_type, int flag)
5825 5810 {
5826 5811 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5827 5812 node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5828 5813 }
5829 5814
5830 5815 /*
5831 5816 * Internal (non-ddi) routine for drivers to export names known
5832 5817 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5833 5818 * but not exported externally to /dev
5834 5819 */
5835 5820 int
5836 5821 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5837 5822 minor_t minor_num)
5838 5823 {
5839 5824 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5840 5825 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5841 5826 }
5842 5827
5843 5828 void
5844 5829 ddi_remove_minor_node(dev_info_t *dip, char *name)
5845 5830 {
5846 5831 int circ;
5847 5832 struct ddi_minor_data *dmdp, *dmdp1;
5848 5833 struct ddi_minor_data **dmdp_prev;
5849 5834
5850 5835 ndi_devi_enter(dip, &circ);
5851 5836 dmdp_prev = &DEVI(dip)->devi_minor;
5852 5837 dmdp = DEVI(dip)->devi_minor;
5853 5838 while (dmdp != NULL) {
5854 5839 dmdp1 = dmdp->next;
5855 5840 if ((name == NULL || (dmdp->ddm_name != NULL &&
5856 5841 strcmp(name, dmdp->ddm_name) == 0))) {
5857 5842 if (dmdp->ddm_name != NULL) {
5858 5843 if (dmdp->type != DDM_INTERNAL_PATH)
5859 5844 (void) i_log_devfs_minor_remove(dip,
5860 5845 dmdp->ddm_name);
5861 5846 kmem_free(dmdp->ddm_name,
5862 5847 strlen(dmdp->ddm_name) + 1);
5863 5848 }
5864 5849 /*
5865 5850 * Release device privilege, if any.
5866 5851 * Release dacf client data associated with this minor
5867 5852 * node by storing NULL.
5868 5853 */
5869 5854 if (dmdp->ddm_node_priv)
5870 5855 dpfree(dmdp->ddm_node_priv);
5871 5856 dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5872 5857 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5873 5858 *dmdp_prev = dmdp1;
5874 5859 /*
5875 5860 * OK, we found it, so get out now -- if we drive on,
5876 5861 * we will strcmp against garbage. See 1139209.
5877 5862 */
5878 5863 if (name != NULL)
5879 5864 break;
5880 5865 } else {
5881 5866 dmdp_prev = &dmdp->next;
5882 5867 }
5883 5868 dmdp = dmdp1;
5884 5869 }
5885 5870 ndi_devi_exit(dip, circ);
5886 5871 }
5887 5872
5888 5873
5889 5874 int
5890 5875 ddi_in_panic()
5891 5876 {
5892 5877 return (panicstr != NULL);
5893 5878 }
5894 5879
5895 5880
5896 5881 /*
5897 5882 * Find first bit set in a mask (returned counting from 1 up)
5898 5883 */
5899 5884
5900 5885 int
5901 5886 ddi_ffs(long mask)
5902 5887 {
5903 5888 return (ffs(mask));
5904 5889 }
5905 5890
5906 5891 /*
5907 5892 * Find last bit set. Take mask and clear
5908 5893 * all but the most significant bit, and
5909 5894 * then let ffs do the rest of the work.
5910 5895 *
5911 5896 * Algorithm courtesy of Steve Chessin.
5912 5897 */
5913 5898
5914 5899 int
5915 5900 ddi_fls(long mask)
5916 5901 {
5917 5902 while (mask) {
5918 5903 long nx;
5919 5904
5920 5905 if ((nx = (mask & (mask - 1))) == 0)
5921 5906 break;
5922 5907 mask = nx;
5923 5908 }
5924 5909 return (ffs(mask));
5925 5910 }
5926 5911
5927 5912 /*
5928 5913 * The ddi_soft_state_* routines comprise generic storage management utilities
5929 5914 * for driver soft state structures (in "the old days," this was done with
5930 5915 * statically sized array - big systems and dynamic loading and unloading
5931 5916 * make heap allocation more attractive).
5932 5917 */
5933 5918
5934 5919 /*
5935 5920 * Allocate a set of pointers to 'n_items' objects of size 'size'
5936 5921 * bytes. Each pointer is initialized to nil.
5937 5922 *
5938 5923 * The 'size' and 'n_items' values are stashed in the opaque
5939 5924 * handle returned to the caller.
5940 5925 *
5941 5926 * This implementation interprets 'set of pointers' to mean 'array
5942 5927 * of pointers' but note that nothing in the interface definition
5943 5928 * precludes an implementation that uses, for example, a linked list.
5944 5929 * However there should be a small efficiency gain from using an array
5945 5930 * at lookup time.
5946 5931 *
5947 5932 * NOTE As an optimization, we make our growable array allocations in
5948 5933 * powers of two (bytes), since that's how much kmem_alloc (currently)
5949 5934 * gives us anyway. It should save us some free/realloc's ..
5950 5935 *
5951 5936 * As a further optimization, we make the growable array start out
5952 5937 * with MIN_N_ITEMS in it.
5953 5938 */
5954 5939
5955 5940 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
5956 5941
5957 5942 int
5958 5943 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5959 5944 {
5960 5945 i_ddi_soft_state *ss;
5961 5946
5962 5947 if (state_p == NULL || size == 0)
5963 5948 return (EINVAL);
5964 5949
5965 5950 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5966 5951 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5967 5952 ss->size = size;
5968 5953
5969 5954 if (n_items < MIN_N_ITEMS)
5970 5955 ss->n_items = MIN_N_ITEMS;
5971 5956 else {
5972 5957 int bitlog;
5973 5958
5974 5959 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5975 5960 bitlog--;
5976 5961 ss->n_items = 1 << bitlog;
5977 5962 }
5978 5963
5979 5964 ASSERT(ss->n_items >= n_items);
5980 5965
5981 5966 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5982 5967
5983 5968 *state_p = ss;
5984 5969 return (0);
5985 5970 }
5986 5971
5987 5972 /*
5988 5973 * Allocate a state structure of size 'size' to be associated
5989 5974 * with item 'item'.
5990 5975 *
5991 5976 * In this implementation, the array is extended to
5992 5977 * allow the requested offset, if needed.
5993 5978 */
5994 5979 int
5995 5980 ddi_soft_state_zalloc(void *state, int item)
5996 5981 {
5997 5982 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
5998 5983 void **array;
5999 5984 void *new_element;
6000 5985
6001 5986 if ((state == NULL) || (item < 0))
6002 5987 return (DDI_FAILURE);
6003 5988
6004 5989 mutex_enter(&ss->lock);
6005 5990 if (ss->size == 0) {
6006 5991 mutex_exit(&ss->lock);
6007 5992 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6008 5993 mod_containing_pc(caller()));
6009 5994 return (DDI_FAILURE);
6010 5995 }
6011 5996
6012 5997 array = ss->array; /* NULL if ss->n_items == 0 */
6013 5998 ASSERT(ss->n_items != 0 && array != NULL);
6014 5999
6015 6000 /*
6016 6001 * refuse to tread on an existing element
6017 6002 */
6018 6003 if (item < ss->n_items && array[item] != NULL) {
6019 6004 mutex_exit(&ss->lock);
6020 6005 return (DDI_FAILURE);
6021 6006 }
6022 6007
6023 6008 /*
6024 6009 * Allocate a new element to plug in
6025 6010 */
6026 6011 new_element = kmem_zalloc(ss->size, KM_SLEEP);
6027 6012
6028 6013 /*
6029 6014 * Check if the array is big enough, if not, grow it.
6030 6015 */
6031 6016 if (item >= ss->n_items) {
6032 6017 void **new_array;
6033 6018 size_t new_n_items;
6034 6019 struct i_ddi_soft_state *dirty;
6035 6020
6036 6021 /*
6037 6022 * Allocate a new array of the right length, copy
6038 6023 * all the old pointers to the new array, then
6039 6024 * if it exists at all, put the old array on the
6040 6025 * dirty list.
6041 6026 *
6042 6027 * Note that we can't kmem_free() the old array.
6043 6028 *
6044 6029 * Why -- well the 'get' operation is 'mutex-free', so we
6045 6030 * can't easily catch a suspended thread that is just about
6046 6031 * to dereference the array we just grew out of. So we
6047 6032 * cons up a header and put it on a list of 'dirty'
6048 6033 * pointer arrays. (Dirty in the sense that there may
6049 6034 * be suspended threads somewhere that are in the middle
6050 6035 * of referencing them). Fortunately, we -can- garbage
6051 6036 * collect it all at ddi_soft_state_fini time.
6052 6037 */
6053 6038 new_n_items = ss->n_items;
6054 6039 while (new_n_items < (1 + item))
6055 6040 new_n_items <<= 1; /* double array size .. */
6056 6041
6057 6042 ASSERT(new_n_items >= (1 + item)); /* sanity check! */
6058 6043
6059 6044 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6060 6045 KM_SLEEP);
6061 6046 /*
6062 6047 * Copy the pointers into the new array
6063 6048 */
6064 6049 bcopy(array, new_array, ss->n_items * sizeof (void *));
6065 6050
6066 6051 /*
6067 6052 * Save the old array on the dirty list
6068 6053 */
6069 6054 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6070 6055 dirty->array = ss->array;
6071 6056 dirty->n_items = ss->n_items;
6072 6057 dirty->next = ss->next;
6073 6058 ss->next = dirty;
6074 6059
6075 6060 ss->array = (array = new_array);
6076 6061 ss->n_items = new_n_items;
6077 6062 }
6078 6063
6079 6064 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6080 6065
6081 6066 array[item] = new_element;
6082 6067
6083 6068 mutex_exit(&ss->lock);
6084 6069 return (DDI_SUCCESS);
6085 6070 }
6086 6071
6087 6072 /*
6088 6073 * Fetch a pointer to the allocated soft state structure.
6089 6074 *
6090 6075 * This is designed to be cheap.
6091 6076 *
6092 6077 * There's an argument that there should be more checking for
6093 6078 * nil pointers and out of bounds on the array.. but we do a lot
6094 6079 * of that in the alloc/free routines.
6095 6080 *
6096 6081 * An array has the convenience that we don't need to lock read-access
6097 6082 * to it c.f. a linked list. However our "expanding array" strategy
6098 6083 * means that we should hold a readers lock on the i_ddi_soft_state
6099 6084 * structure.
6100 6085 *
6101 6086 * However, from a performance viewpoint, we need to do it without
6102 6087 * any locks at all -- this also makes it a leaf routine. The algorithm
6103 6088 * is 'lock-free' because we only discard the pointer arrays at
6104 6089 * ddi_soft_state_fini() time.
6105 6090 */
6106 6091 void *
6107 6092 ddi_get_soft_state(void *state, int item)
6108 6093 {
6109 6094 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6110 6095
6111 6096 ASSERT((ss != NULL) && (item >= 0));
6112 6097
6113 6098 if (item < ss->n_items && ss->array != NULL)
6114 6099 return (ss->array[item]);
6115 6100 return (NULL);
6116 6101 }
6117 6102
6118 6103 /*
6119 6104 * Free the state structure corresponding to 'item.' Freeing an
6120 6105 * element that has either gone or was never allocated is not
6121 6106 * considered an error. Note that we free the state structure, but
6122 6107 * we don't shrink our pointer array, or discard 'dirty' arrays,
6123 6108 * since even a few pointers don't really waste too much memory.
6124 6109 *
6125 6110 * Passing an item number that is out of bounds, or a null pointer will
6126 6111 * provoke an error message.
6127 6112 */
6128 6113 void
6129 6114 ddi_soft_state_free(void *state, int item)
6130 6115 {
6131 6116 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6132 6117 void **array;
6133 6118 void *element;
6134 6119 static char msg[] = "ddi_soft_state_free:";
6135 6120
6136 6121 if (ss == NULL) {
6137 6122 cmn_err(CE_WARN, "%s null handle: %s",
6138 6123 msg, mod_containing_pc(caller()));
6139 6124 return;
6140 6125 }
6141 6126
6142 6127 element = NULL;
6143 6128
6144 6129 mutex_enter(&ss->lock);
6145 6130
6146 6131 if ((array = ss->array) == NULL || ss->size == 0) {
6147 6132 cmn_err(CE_WARN, "%s bad handle: %s",
6148 6133 msg, mod_containing_pc(caller()));
6149 6134 } else if (item < 0 || item >= ss->n_items) {
6150 6135 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6151 6136 msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6152 6137 } else if (array[item] != NULL) {
6153 6138 element = array[item];
6154 6139 array[item] = NULL;
6155 6140 }
6156 6141
6157 6142 mutex_exit(&ss->lock);
6158 6143
6159 6144 if (element)
6160 6145 kmem_free(element, ss->size);
6161 6146 }
6162 6147
6163 6148 /*
6164 6149 * Free the entire set of pointers, and any
6165 6150 * soft state structures contained therein.
6166 6151 *
6167 6152 * Note that we don't grab the ss->lock mutex, even though
6168 6153 * we're inspecting the various fields of the data structure.
6169 6154 *
6170 6155 * There is an implicit assumption that this routine will
6171 6156 * never run concurrently with any of the above on this
6172 6157 * particular state structure i.e. by the time the driver
6173 6158 * calls this routine, there should be no other threads
6174 6159 * running in the driver.
6175 6160 */
6176 6161 void
6177 6162 ddi_soft_state_fini(void **state_p)
6178 6163 {
6179 6164 i_ddi_soft_state *ss, *dirty;
6180 6165 int item;
6181 6166 static char msg[] = "ddi_soft_state_fini:";
6182 6167
6183 6168 if (state_p == NULL ||
6184 6169 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6185 6170 cmn_err(CE_WARN, "%s null handle: %s",
6186 6171 msg, mod_containing_pc(caller()));
6187 6172 return;
6188 6173 }
6189 6174
6190 6175 if (ss->size == 0) {
6191 6176 cmn_err(CE_WARN, "%s bad handle: %s",
6192 6177 msg, mod_containing_pc(caller()));
6193 6178 return;
6194 6179 }
6195 6180
6196 6181 if (ss->n_items > 0) {
6197 6182 for (item = 0; item < ss->n_items; item++)
6198 6183 ddi_soft_state_free(ss, item);
6199 6184 kmem_free(ss->array, ss->n_items * sizeof (void *));
6200 6185 }
6201 6186
6202 6187 /*
6203 6188 * Now delete any dirty arrays from previous 'grow' operations
6204 6189 */
6205 6190 for (dirty = ss->next; dirty; dirty = ss->next) {
6206 6191 ss->next = dirty->next;
6207 6192 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6208 6193 kmem_free(dirty, sizeof (*dirty));
6209 6194 }
6210 6195
6211 6196 mutex_destroy(&ss->lock);
6212 6197 kmem_free(ss, sizeof (*ss));
6213 6198
6214 6199 *state_p = NULL;
6215 6200 }
6216 6201
6217 6202 #define SS_N_ITEMS_PER_HASH 16
6218 6203 #define SS_MIN_HASH_SZ 16
6219 6204 #define SS_MAX_HASH_SZ 4096
6220 6205
6221 6206 int
6222 6207 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6223 6208 int n_items)
6224 6209 {
6225 6210 i_ddi_soft_state_bystr *sss;
6226 6211 int hash_sz;
6227 6212
6228 6213 ASSERT(state_p && size && n_items);
6229 6214 if ((state_p == NULL) || (size == 0) || (n_items == 0))
6230 6215 return (EINVAL);
6231 6216
6232 6217 /* current implementation is based on hash, convert n_items to hash */
6233 6218 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6234 6219 if (hash_sz < SS_MIN_HASH_SZ)
6235 6220 hash_sz = SS_MIN_HASH_SZ;
6236 6221 else if (hash_sz > SS_MAX_HASH_SZ)
6237 6222 hash_sz = SS_MAX_HASH_SZ;
6238 6223
6239 6224 /* allocate soft_state pool */
6240 6225 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6241 6226 sss->ss_size = size;
6242 6227 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6243 6228 hash_sz, mod_hash_null_valdtor);
6244 6229 *state_p = (ddi_soft_state_bystr *)sss;
6245 6230 return (0);
6246 6231 }
6247 6232
6248 6233 int
6249 6234 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6250 6235 {
6251 6236 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6252 6237 void *sso;
6253 6238 char *dup_str;
6254 6239
6255 6240 ASSERT(sss && str && sss->ss_mod_hash);
6256 6241 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6257 6242 return (DDI_FAILURE);
6258 6243 sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6259 6244 dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6260 6245 if (mod_hash_insert(sss->ss_mod_hash,
6261 6246 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6262 6247 return (DDI_SUCCESS);
6263 6248
6264 6249 /*
6265 6250 * The only error from an strhash insert is caused by a duplicate key.
6266 6251 * We refuse to tread on an existing elements, so free and fail.
6267 6252 */
6268 6253 kmem_free(dup_str, strlen(dup_str) + 1);
6269 6254 kmem_free(sso, sss->ss_size);
6270 6255 return (DDI_FAILURE);
6271 6256 }
6272 6257
6273 6258 void *
6274 6259 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6275 6260 {
6276 6261 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6277 6262 void *sso;
6278 6263
6279 6264 ASSERT(sss && str && sss->ss_mod_hash);
6280 6265 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6281 6266 return (NULL);
6282 6267
6283 6268 if (mod_hash_find(sss->ss_mod_hash,
6284 6269 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6285 6270 return (sso);
6286 6271 return (NULL);
6287 6272 }
6288 6273
6289 6274 void
6290 6275 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6291 6276 {
6292 6277 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6293 6278 void *sso;
6294 6279
6295 6280 ASSERT(sss && str && sss->ss_mod_hash);
6296 6281 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6297 6282 return;
6298 6283
6299 6284 (void) mod_hash_remove(sss->ss_mod_hash,
6300 6285 (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6301 6286 kmem_free(sso, sss->ss_size);
6302 6287 }
6303 6288
6304 6289 void
6305 6290 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6306 6291 {
6307 6292 i_ddi_soft_state_bystr *sss;
6308 6293
6309 6294 ASSERT(state_p);
6310 6295 if (state_p == NULL)
6311 6296 return;
6312 6297
6313 6298 sss = (i_ddi_soft_state_bystr *)(*state_p);
6314 6299 if (sss == NULL)
6315 6300 return;
6316 6301
6317 6302 ASSERT(sss->ss_mod_hash);
6318 6303 if (sss->ss_mod_hash) {
6319 6304 mod_hash_destroy_strhash(sss->ss_mod_hash);
6320 6305 sss->ss_mod_hash = NULL;
6321 6306 }
6322 6307
6323 6308 kmem_free(sss, sizeof (*sss));
6324 6309 *state_p = NULL;
6325 6310 }
6326 6311
6327 6312 /*
6328 6313 * The ddi_strid_* routines provide string-to-index management utilities.
6329 6314 */
6330 6315 /* allocate and initialize an strid set */
6331 6316 int
6332 6317 ddi_strid_init(ddi_strid **strid_p, int n_items)
6333 6318 {
6334 6319 i_ddi_strid *ss;
6335 6320 int hash_sz;
6336 6321
6337 6322 if (strid_p == NULL)
6338 6323 return (DDI_FAILURE);
6339 6324
6340 6325 /* current implementation is based on hash, convert n_items to hash */
6341 6326 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6342 6327 if (hash_sz < SS_MIN_HASH_SZ)
6343 6328 hash_sz = SS_MIN_HASH_SZ;
6344 6329 else if (hash_sz > SS_MAX_HASH_SZ)
6345 6330 hash_sz = SS_MAX_HASH_SZ;
6346 6331
6347 6332 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6348 6333 ss->strid_chunksz = n_items;
6349 6334 ss->strid_spacesz = n_items;
6350 6335 ss->strid_space = id_space_create("strid", 1, n_items);
6351 6336 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6352 6337 mod_hash_null_valdtor);
6353 6338 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6354 6339 mod_hash_null_valdtor);
6355 6340 *strid_p = (ddi_strid *)ss;
6356 6341 return (DDI_SUCCESS);
6357 6342 }
6358 6343
6359 6344 /* allocate an id mapping within the specified set for str, return id */
6360 6345 static id_t
6361 6346 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6362 6347 {
6363 6348 i_ddi_strid *ss = (i_ddi_strid *)strid;
6364 6349 id_t id;
6365 6350 char *s;
6366 6351
6367 6352 ASSERT(ss && str);
6368 6353 if ((ss == NULL) || (str == NULL))
6369 6354 return (0);
6370 6355
6371 6356 /*
6372 6357 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6373 6358 * range as compressed as possible. This is important to minimize
6374 6359 * the amount of space used when the id is used as a ddi_soft_state
6375 6360 * index by the caller.
6376 6361 *
6377 6362 * If the id list is exhausted, increase the size of the list
6378 6363 * by the chuck size specified in ddi_strid_init and reattempt
6379 6364 * the allocation
6380 6365 */
6381 6366 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6382 6367 id_space_extend(ss->strid_space, ss->strid_spacesz,
6383 6368 ss->strid_spacesz + ss->strid_chunksz);
6384 6369 ss->strid_spacesz += ss->strid_chunksz;
6385 6370 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6386 6371 return (0);
6387 6372 }
6388 6373
6389 6374 /*
6390 6375 * NOTE: since we create and destroy in unison we can save space by
6391 6376 * using bystr key as the byid value. This means destroy must occur
6392 6377 * in (byid, bystr) order.
6393 6378 */
6394 6379 s = i_ddi_strdup(str, KM_SLEEP);
6395 6380 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6396 6381 (mod_hash_val_t)(intptr_t)id) != 0) {
6397 6382 ddi_strid_free(strid, id);
6398 6383 return (0);
6399 6384 }
6400 6385 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6401 6386 (mod_hash_val_t)s) != 0) {
6402 6387 ddi_strid_free(strid, id);
6403 6388 return (0);
6404 6389 }
6405 6390
6406 6391 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6407 6392 return (id);
6408 6393 }
6409 6394
6410 6395 /* allocate an id mapping within the specified set for str, return id */
6411 6396 id_t
6412 6397 ddi_strid_alloc(ddi_strid *strid, char *str)
6413 6398 {
6414 6399 return (i_ddi_strid_alloc(strid, str));
6415 6400 }
6416 6401
6417 6402 /* return the id within the specified strid given the str */
6418 6403 id_t
6419 6404 ddi_strid_str2id(ddi_strid *strid, char *str)
6420 6405 {
6421 6406 i_ddi_strid *ss = (i_ddi_strid *)strid;
6422 6407 id_t id = 0;
6423 6408 mod_hash_val_t hv;
6424 6409
6425 6410 ASSERT(ss && str);
6426 6411 if (ss && str && (mod_hash_find(ss->strid_bystr,
6427 6412 (mod_hash_key_t)str, &hv) == 0))
6428 6413 id = (int)(intptr_t)hv;
6429 6414 return (id);
6430 6415 }
6431 6416
6432 6417 /* return str within the specified strid given the id */
6433 6418 char *
6434 6419 ddi_strid_id2str(ddi_strid *strid, id_t id)
6435 6420 {
6436 6421 i_ddi_strid *ss = (i_ddi_strid *)strid;
6437 6422 char *str = NULL;
6438 6423 mod_hash_val_t hv;
6439 6424
6440 6425 ASSERT(ss && id > 0);
6441 6426 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6442 6427 (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6443 6428 str = (char *)hv;
6444 6429 return (str);
6445 6430 }
6446 6431
6447 6432 /* free the id mapping within the specified strid */
6448 6433 void
6449 6434 ddi_strid_free(ddi_strid *strid, id_t id)
6450 6435 {
6451 6436 i_ddi_strid *ss = (i_ddi_strid *)strid;
6452 6437 char *str;
6453 6438
6454 6439 ASSERT(ss && id > 0);
6455 6440 if ((ss == NULL) || (id <= 0))
6456 6441 return;
6457 6442
6458 6443 /* bystr key is byid value: destroy order must be (byid, bystr) */
6459 6444 str = ddi_strid_id2str(strid, id);
6460 6445 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6461 6446 id_free(ss->strid_space, id);
6462 6447
6463 6448 if (str)
6464 6449 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6465 6450 }
6466 6451
6467 6452 /* destroy the strid set */
6468 6453 void
6469 6454 ddi_strid_fini(ddi_strid **strid_p)
6470 6455 {
6471 6456 i_ddi_strid *ss;
6472 6457
6473 6458 ASSERT(strid_p);
6474 6459 if (strid_p == NULL)
6475 6460 return;
6476 6461
6477 6462 ss = (i_ddi_strid *)(*strid_p);
6478 6463 if (ss == NULL)
6479 6464 return;
6480 6465
6481 6466 /* bystr key is byid value: destroy order must be (byid, bystr) */
6482 6467 if (ss->strid_byid)
6483 6468 mod_hash_destroy_hash(ss->strid_byid);
6484 6469 if (ss->strid_byid)
6485 6470 mod_hash_destroy_hash(ss->strid_bystr);
6486 6471 if (ss->strid_space)
6487 6472 id_space_destroy(ss->strid_space);
6488 6473 kmem_free(ss, sizeof (*ss));
6489 6474 *strid_p = NULL;
6490 6475 }
6491 6476
6492 6477 /*
6493 6478 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6494 6479 * Storage is double buffered to prevent updates during devi_addr use -
6495 6480 * double buffering is adaquate for reliable ddi_deviname() consumption.
6496 6481 * The double buffer is not freed until dev_info structure destruction
6497 6482 * (by i_ddi_free_node).
6498 6483 */
6499 6484 void
6500 6485 ddi_set_name_addr(dev_info_t *dip, char *name)
6501 6486 {
6502 6487 char *buf = DEVI(dip)->devi_addr_buf;
6503 6488 char *newaddr;
6504 6489
6505 6490 if (buf == NULL) {
6506 6491 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6507 6492 DEVI(dip)->devi_addr_buf = buf;
6508 6493 }
6509 6494
6510 6495 if (name) {
6511 6496 ASSERT(strlen(name) < MAXNAMELEN);
6512 6497 newaddr = (DEVI(dip)->devi_addr == buf) ?
6513 6498 (buf + MAXNAMELEN) : buf;
6514 6499 (void) strlcpy(newaddr, name, MAXNAMELEN);
6515 6500 } else
6516 6501 newaddr = NULL;
6517 6502
6518 6503 DEVI(dip)->devi_addr = newaddr;
6519 6504 }
6520 6505
6521 6506 char *
6522 6507 ddi_get_name_addr(dev_info_t *dip)
6523 6508 {
6524 6509 return (DEVI(dip)->devi_addr);
6525 6510 }
6526 6511
6527 6512 void
6528 6513 ddi_set_parent_data(dev_info_t *dip, void *pd)
6529 6514 {
6530 6515 DEVI(dip)->devi_parent_data = pd;
6531 6516 }
6532 6517
6533 6518 void *
6534 6519 ddi_get_parent_data(dev_info_t *dip)
6535 6520 {
6536 6521 return (DEVI(dip)->devi_parent_data);
6537 6522 }
6538 6523
6539 6524 /*
6540 6525 * ddi_name_to_major: returns the major number of a named module,
6541 6526 * derived from the current driver alias binding.
6542 6527 *
6543 6528 * Caveat: drivers should avoid the use of this function, in particular
6544 6529 * together with ddi_get_name/ddi_binding name, as per
6545 6530 * major = ddi_name_to_major(ddi_get_name(devi));
6546 6531 * ddi_name_to_major() relies on the state of the device/alias binding,
6547 6532 * which can and does change dynamically as aliases are administered
6548 6533 * over time. An attached device instance cannot rely on the major
6549 6534 * number returned by ddi_name_to_major() to match its own major number.
6550 6535 *
6551 6536 * For driver use, ddi_driver_major() reliably returns the major number
6552 6537 * for the module to which the device was bound at attach time over
6553 6538 * the life of the instance.
6554 6539 * major = ddi_driver_major(dev_info_t *)
6555 6540 */
6556 6541 major_t
6557 6542 ddi_name_to_major(char *name)
6558 6543 {
6559 6544 return (mod_name_to_major(name));
6560 6545 }
6561 6546
6562 6547 /*
6563 6548 * ddi_major_to_name: Returns the module name bound to a major number.
6564 6549 */
6565 6550 char *
6566 6551 ddi_major_to_name(major_t major)
6567 6552 {
6568 6553 return (mod_major_to_name(major));
6569 6554 }
6570 6555
6571 6556 /*
6572 6557 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6573 6558 * pointed at by 'name.' A devinfo node is named as a result of calling
6574 6559 * ddi_initchild().
6575 6560 *
6576 6561 * Note: the driver must be held before calling this function!
6577 6562 */
6578 6563 char *
6579 6564 ddi_deviname(dev_info_t *dip, char *name)
6580 6565 {
6581 6566 char *addrname;
6582 6567 char none = '\0';
6583 6568
6584 6569 if (dip == ddi_root_node()) {
6585 6570 *name = '\0';
6586 6571 return (name);
6587 6572 }
6588 6573
6589 6574 if (i_ddi_node_state(dip) < DS_BOUND) {
6590 6575 addrname = &none;
6591 6576 } else {
6592 6577 /*
6593 6578 * Use ddi_get_name_addr() without checking state so we get
6594 6579 * a unit-address if we are called after ddi_set_name_addr()
6595 6580 * by nexus DDI_CTL_INITCHILD code, but before completing
6596 6581 * node promotion to DS_INITIALIZED. We currently have
6597 6582 * two situations where we are called in this state:
6598 6583 * o For framework processing of a path-oriented alias.
6599 6584 * o If a SCSA nexus driver calls ddi_devid_register()
6600 6585 * from it's tran_tgt_init(9E) implementation.
6601 6586 */
6602 6587 addrname = ddi_get_name_addr(dip);
6603 6588 if (addrname == NULL)
6604 6589 addrname = &none;
6605 6590 }
6606 6591
6607 6592 if (*addrname == '\0') {
6608 6593 (void) sprintf(name, "/%s", ddi_node_name(dip));
6609 6594 } else {
6610 6595 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6611 6596 }
6612 6597
6613 6598 return (name);
6614 6599 }
6615 6600
6616 6601 /*
6617 6602 * Spits out the name of device node, typically name@addr, for a given node,
6618 6603 * using the driver name, not the nodename.
6619 6604 *
6620 6605 * Used by match_parent. Not to be used elsewhere.
6621 6606 */
6622 6607 char *
6623 6608 i_ddi_parname(dev_info_t *dip, char *name)
6624 6609 {
6625 6610 char *addrname;
6626 6611
6627 6612 if (dip == ddi_root_node()) {
6628 6613 *name = '\0';
6629 6614 return (name);
6630 6615 }
6631 6616
6632 6617 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6633 6618
6634 6619 if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6635 6620 (void) sprintf(name, "%s", ddi_binding_name(dip));
6636 6621 else
6637 6622 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6638 6623 return (name);
6639 6624 }
6640 6625
6641 6626 static char *
6642 6627 pathname_work(dev_info_t *dip, char *path)
6643 6628 {
6644 6629 char *bp;
6645 6630
6646 6631 if (dip == ddi_root_node()) {
6647 6632 *path = '\0';
6648 6633 return (path);
6649 6634 }
6650 6635 (void) pathname_work(ddi_get_parent(dip), path);
6651 6636 bp = path + strlen(path);
6652 6637 (void) ddi_deviname(dip, bp);
6653 6638 return (path);
6654 6639 }
6655 6640
6656 6641 char *
6657 6642 ddi_pathname(dev_info_t *dip, char *path)
6658 6643 {
6659 6644 return (pathname_work(dip, path));
6660 6645 }
6661 6646
6662 6647 char *
6663 6648 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6664 6649 {
6665 6650 if (dmdp->dip == NULL)
6666 6651 *path = '\0';
6667 6652 else {
6668 6653 (void) ddi_pathname(dmdp->dip, path);
6669 6654 if (dmdp->ddm_name) {
6670 6655 (void) strcat(path, ":");
6671 6656 (void) strcat(path, dmdp->ddm_name);
6672 6657 }
6673 6658 }
6674 6659 return (path);
6675 6660 }
6676 6661
6677 6662 static char *
6678 6663 pathname_work_obp(dev_info_t *dip, char *path)
6679 6664 {
6680 6665 char *bp;
6681 6666 char *obp_path;
6682 6667
6683 6668 /*
6684 6669 * look up the "obp-path" property, return the path if it exists
6685 6670 */
6686 6671 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6687 6672 "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6688 6673 (void) strcpy(path, obp_path);
6689 6674 ddi_prop_free(obp_path);
6690 6675 return (path);
6691 6676 }
6692 6677
6693 6678 /*
6694 6679 * stop at root, no obp path
6695 6680 */
6696 6681 if (dip == ddi_root_node()) {
6697 6682 return (NULL);
6698 6683 }
6699 6684
6700 6685 obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6701 6686 if (obp_path == NULL)
6702 6687 return (NULL);
6703 6688
6704 6689 /*
6705 6690 * append our component to parent's obp path
6706 6691 */
6707 6692 bp = path + strlen(path);
6708 6693 if (*(bp - 1) != '/')
6709 6694 (void) strcat(bp++, "/");
6710 6695 (void) ddi_deviname(dip, bp);
6711 6696 return (path);
6712 6697 }
6713 6698
6714 6699 /*
6715 6700 * return the 'obp-path' based path for the given node, or NULL if the node
6716 6701 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6717 6702 * function can't be called from interrupt context (since we need to
6718 6703 * lookup a string property).
6719 6704 */
6720 6705 char *
6721 6706 ddi_pathname_obp(dev_info_t *dip, char *path)
6722 6707 {
6723 6708 ASSERT(!servicing_interrupt());
6724 6709 if (dip == NULL || path == NULL)
6725 6710 return (NULL);
6726 6711
6727 6712 /* split work into a separate function to aid debugging */
6728 6713 return (pathname_work_obp(dip, path));
6729 6714 }
6730 6715
6731 6716 int
6732 6717 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6733 6718 {
6734 6719 dev_info_t *pdip;
6735 6720 char *obp_path = NULL;
6736 6721 int rc = DDI_FAILURE;
6737 6722
6738 6723 if (dip == NULL)
6739 6724 return (DDI_FAILURE);
6740 6725
6741 6726 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6742 6727
6743 6728 pdip = ddi_get_parent(dip);
6744 6729
6745 6730 if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6746 6731 (void) ddi_pathname(pdip, obp_path);
6747 6732 }
6748 6733
6749 6734 if (component) {
6750 6735 (void) strncat(obp_path, "/", MAXPATHLEN);
6751 6736 (void) strncat(obp_path, component, MAXPATHLEN);
6752 6737 }
6753 6738 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6754 6739 obp_path);
6755 6740
6756 6741 if (obp_path)
6757 6742 kmem_free(obp_path, MAXPATHLEN);
6758 6743
6759 6744 return (rc);
6760 6745 }
6761 6746
6762 6747 /*
6763 6748 * Given a dev_t, return the pathname of the corresponding device in the
6764 6749 * buffer pointed at by "path." The buffer is assumed to be large enough
6765 6750 * to hold the pathname of the device (MAXPATHLEN).
6766 6751 *
6767 6752 * The pathname of a device is the pathname of the devinfo node to which
6768 6753 * the device "belongs," concatenated with the character ':' and the name
6769 6754 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6770 6755 * just the pathname of the devinfo node is returned without driving attach
6771 6756 * of that node. For a non-zero spec_type, an attach is performed and a
6772 6757 * search of the minor list occurs.
6773 6758 *
6774 6759 * It is possible that the path associated with the dev_t is not
6775 6760 * currently available in the devinfo tree. In order to have a
6776 6761 * dev_t, a device must have been discovered before, which means
6777 6762 * that the path is always in the instance tree. The one exception
6778 6763 * to this is if the dev_t is associated with a pseudo driver, in
6779 6764 * which case the device must exist on the pseudo branch of the
6780 6765 * devinfo tree as a result of parsing .conf files.
6781 6766 */
6782 6767 int
6783 6768 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6784 6769 {
6785 6770 int circ;
6786 6771 major_t major = getmajor(devt);
6787 6772 int instance;
6788 6773 dev_info_t *dip;
6789 6774 char *minorname;
6790 6775 char *drvname;
6791 6776
6792 6777 if (major >= devcnt)
6793 6778 goto fail;
6794 6779 if (major == clone_major) {
6795 6780 /* clone has no minor nodes, manufacture the path here */
6796 6781 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6797 6782 goto fail;
6798 6783
6799 6784 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6800 6785 return (DDI_SUCCESS);
6801 6786 }
6802 6787
6803 6788 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6804 6789 if ((instance = dev_to_instance(devt)) == -1)
6805 6790 goto fail;
6806 6791
6807 6792 /* reconstruct the path given the major/instance */
6808 6793 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6809 6794 goto fail;
6810 6795
6811 6796 /* if spec_type given we must drive attach and search minor nodes */
6812 6797 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6813 6798 /* attach the path so we can search minors */
6814 6799 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6815 6800 goto fail;
6816 6801
6817 6802 /* Add minorname to path. */
6818 6803 ndi_devi_enter(dip, &circ);
6819 6804 minorname = i_ddi_devtspectype_to_minorname(dip,
6820 6805 devt, spec_type);
6821 6806 if (minorname) {
6822 6807 (void) strcat(path, ":");
6823 6808 (void) strcat(path, minorname);
6824 6809 }
6825 6810 ndi_devi_exit(dip, circ);
6826 6811 ddi_release_devi(dip);
6827 6812 if (minorname == NULL)
6828 6813 goto fail;
6829 6814 }
6830 6815 ASSERT(strlen(path) < MAXPATHLEN);
6831 6816 return (DDI_SUCCESS);
6832 6817
6833 6818 fail: *path = 0;
6834 6819 return (DDI_FAILURE);
6835 6820 }
6836 6821
6837 6822 /*
6838 6823 * Given a major number and an instance, return the path.
6839 6824 * This interface does NOT drive attach.
6840 6825 */
6841 6826 int
6842 6827 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6843 6828 {
6844 6829 struct devnames *dnp;
6845 6830 dev_info_t *dip;
6846 6831
6847 6832 if ((major >= devcnt) || (instance == -1)) {
6848 6833 *path = 0;
6849 6834 return (DDI_FAILURE);
6850 6835 }
6851 6836
6852 6837 /* look for the major/instance in the instance tree */
6853 6838 if (e_ddi_instance_majorinstance_to_path(major, instance,
6854 6839 path) == DDI_SUCCESS) {
6855 6840 ASSERT(strlen(path) < MAXPATHLEN);
6856 6841 return (DDI_SUCCESS);
6857 6842 }
6858 6843
6859 6844 /*
6860 6845 * Not in instance tree, find the instance on the per driver list and
6861 6846 * construct path to instance via ddi_pathname(). This is how paths
6862 6847 * down the 'pseudo' branch are constructed.
6863 6848 */
6864 6849 dnp = &(devnamesp[major]);
6865 6850 LOCK_DEV_OPS(&(dnp->dn_lock));
6866 6851 for (dip = dnp->dn_head; dip;
6867 6852 dip = (dev_info_t *)DEVI(dip)->devi_next) {
6868 6853 /* Skip if instance does not match. */
6869 6854 if (DEVI(dip)->devi_instance != instance)
6870 6855 continue;
6871 6856
6872 6857 /*
6873 6858 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6874 6859 * node demotion, so it is not an effective way of ensuring
6875 6860 * that the ddi_pathname result has a unit-address. Instead,
6876 6861 * we reverify the node state after calling ddi_pathname().
6877 6862 */
6878 6863 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6879 6864 (void) ddi_pathname(dip, path);
6880 6865 if (i_ddi_node_state(dip) < DS_INITIALIZED)
6881 6866 continue;
6882 6867 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6883 6868 ASSERT(strlen(path) < MAXPATHLEN);
6884 6869 return (DDI_SUCCESS);
6885 6870 }
6886 6871 }
6887 6872 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6888 6873
6889 6874 /* can't reconstruct the path */
6890 6875 *path = 0;
6891 6876 return (DDI_FAILURE);
6892 6877 }
6893 6878
6894 6879 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6895 6880
6896 6881 /*
6897 6882 * Given the dip for a network interface return the ppa for that interface.
6898 6883 *
6899 6884 * In all cases except GLD v0 drivers, the ppa == instance.
6900 6885 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6901 6886 * So for these drivers when the attach routine calls gld_register(),
6902 6887 * the GLD framework creates an integer property called "gld_driver_ppa"
6903 6888 * that can be queried here.
6904 6889 *
6905 6890 * The only time this function is used is when a system is booting over nfs.
6906 6891 * In this case the system has to resolve the pathname of the boot device
6907 6892 * to it's ppa.
6908 6893 */
6909 6894 int
6910 6895 i_ddi_devi_get_ppa(dev_info_t *dip)
6911 6896 {
6912 6897 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6913 6898 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6914 6899 GLD_DRIVER_PPA, ddi_get_instance(dip)));
6915 6900 }
6916 6901
6917 6902 /*
6918 6903 * i_ddi_devi_set_ppa() should only be called from gld_register()
6919 6904 * and only for GLD v0 drivers
6920 6905 */
6921 6906 void
6922 6907 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6923 6908 {
6924 6909 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6925 6910 }
6926 6911
6927 6912
6928 6913 /*
6929 6914 * Private DDI Console bell functions.
6930 6915 */
6931 6916 void
6932 6917 ddi_ring_console_bell(clock_t duration)
6933 6918 {
6934 6919 if (ddi_console_bell_func != NULL)
6935 6920 (*ddi_console_bell_func)(duration);
6936 6921 }
6937 6922
6938 6923 void
6939 6924 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6940 6925 {
6941 6926 ddi_console_bell_func = bellfunc;
6942 6927 }
6943 6928
6944 6929 int
6945 6930 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6946 6931 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6947 6932 {
6948 6933 int (*funcp)() = ddi_dma_allochdl;
6949 6934 ddi_dma_attr_t dma_attr;
6950 6935 struct bus_ops *bop;
6951 6936
6952 6937 if (attr == (ddi_dma_attr_t *)0)
6953 6938 return (DDI_DMA_BADATTR);
6954 6939
6955 6940 dma_attr = *attr;
6956 6941
6957 6942 bop = DEVI(dip)->devi_ops->devo_bus_ops;
6958 6943 if (bop && bop->bus_dma_allochdl)
6959 6944 funcp = bop->bus_dma_allochdl;
6960 6945
6961 6946 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6962 6947 }
6963 6948
6964 6949 void
6965 6950 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6966 6951 {
6967 6952 ddi_dma_handle_t h = *handlep;
6968 6953 (void) ddi_dma_freehdl(HD, HD, h);
6969 6954 }
6970 6955
6971 6956 static uintptr_t dma_mem_list_id = 0;
6972 6957
6973 6958
6974 6959 int
6975 6960 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6976 6961 ddi_device_acc_attr_t *accattrp, uint_t flags,
6977 6962 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6978 6963 size_t *real_length, ddi_acc_handle_t *handlep)
6979 6964 {
6980 6965 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6981 6966 dev_info_t *dip = hp->dmai_rdip;
6982 6967 ddi_acc_hdl_t *ap;
6983 6968 ddi_dma_attr_t *attrp = &hp->dmai_attr;
6984 6969 uint_t sleepflag, xfermodes;
6985 6970 int (*fp)(caddr_t);
6986 6971 int rval;
6987 6972
6988 6973 if (waitfp == DDI_DMA_SLEEP)
6989 6974 fp = (int (*)())KM_SLEEP;
6990 6975 else if (waitfp == DDI_DMA_DONTWAIT)
6991 6976 fp = (int (*)())KM_NOSLEEP;
6992 6977 else
6993 6978 fp = waitfp;
6994 6979 *handlep = impl_acc_hdl_alloc(fp, arg);
6995 6980 if (*handlep == NULL)
6996 6981 return (DDI_FAILURE);
6997 6982
6998 6983 /* check if the cache attributes are supported */
6999 6984 if (i_ddi_check_cache_attr(flags) == B_FALSE)
7000 6985 return (DDI_FAILURE);
7001 6986
7002 6987 /*
7003 6988 * Transfer the meaningful bits to xfermodes.
7004 6989 * Double-check if the 3rd party driver correctly sets the bits.
7005 6990 * If not, set DDI_DMA_STREAMING to keep compatibility.
7006 6991 */
7007 6992 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7008 6993 if (xfermodes == 0) {
7009 6994 xfermodes = DDI_DMA_STREAMING;
7010 6995 }
7011 6996
7012 6997 /*
7013 6998 * initialize the common elements of data access handle
7014 6999 */
7015 7000 ap = impl_acc_hdl_get(*handlep);
7016 7001 ap->ah_vers = VERS_ACCHDL;
7017 7002 ap->ah_dip = dip;
7018 7003 ap->ah_offset = 0;
7019 7004 ap->ah_len = 0;
7020 7005 ap->ah_xfermodes = flags;
7021 7006 ap->ah_acc = *accattrp;
7022 7007
7023 7008 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7024 7009 if (xfermodes == DDI_DMA_CONSISTENT) {
7025 7010 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7026 7011 flags, accattrp, kaddrp, NULL, ap);
7027 7012 *real_length = length;
7028 7013 } else {
7029 7014 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7030 7015 flags, accattrp, kaddrp, real_length, ap);
7031 7016 }
7032 7017 if (rval == DDI_SUCCESS) {
7033 7018 ap->ah_len = (off_t)(*real_length);
7034 7019 ap->ah_addr = *kaddrp;
7035 7020 } else {
7036 7021 impl_acc_hdl_free(*handlep);
7037 7022 *handlep = (ddi_acc_handle_t)NULL;
7038 7023 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7039 7024 ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7040 7025 }
7041 7026 rval = DDI_FAILURE;
7042 7027 }
7043 7028 return (rval);
7044 7029 }
7045 7030
7046 7031 void
7047 7032 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7048 7033 {
7049 7034 ddi_acc_hdl_t *ap;
7050 7035
7051 7036 ap = impl_acc_hdl_get(*handlep);
7052 7037 ASSERT(ap);
7053 7038
7054 7039 i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7055 7040
7056 7041 /*
7057 7042 * free the handle
7058 7043 */
7059 7044 impl_acc_hdl_free(*handlep);
7060 7045 *handlep = (ddi_acc_handle_t)NULL;
7061 7046
7062 7047 if (dma_mem_list_id != 0) {
7063 7048 ddi_run_callback(&dma_mem_list_id);
7064 7049 }
7065 7050 }
7066 7051
7067 7052 int
7068 7053 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7069 7054 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7070 7055 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7071 7056 {
7072 7057 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7073 7058 dev_info_t *dip, *rdip;
7074 7059 struct ddi_dma_req dmareq;
7075 7060 int (*funcp)();
7076 7061
7077 7062 dmareq.dmar_flags = flags;
7078 7063 dmareq.dmar_fp = waitfp;
7079 7064 dmareq.dmar_arg = arg;
7080 7065 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7081 7066
7082 7067 if (bp->b_flags & B_PAGEIO) {
7083 7068 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7084 7069 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7085 7070 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7086 7071 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7087 7072 } else {
7088 7073 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7089 7074 if (bp->b_flags & B_SHADOW) {
7090 7075 dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7091 7076 bp->b_shadow;
7092 7077 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7093 7078 } else {
7094 7079 dmareq.dmar_object.dmao_type =
7095 7080 (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7096 7081 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7097 7082 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7098 7083 }
7099 7084
7100 7085 /*
7101 7086 * If the buffer has no proc pointer, or the proc
7102 7087 * struct has the kernel address space, or the buffer has
7103 7088 * been marked B_REMAPPED (meaning that it is now
7104 7089 * mapped into the kernel's address space), then
7105 7090 * the address space is kas (kernel address space).
7106 7091 */
7107 7092 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7108 7093 (bp->b_flags & B_REMAPPED)) {
7109 7094 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7110 7095 } else {
7111 7096 dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7112 7097 bp->b_proc->p_as;
7113 7098 }
7114 7099 }
7115 7100
7116 7101 dip = rdip = hp->dmai_rdip;
7117 7102 if (dip != ddi_root_node())
7118 7103 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7119 7104 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7120 7105 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7121 7106 }
7122 7107
7123 7108 int
7124 7109 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7125 7110 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7126 7111 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7127 7112 {
7128 7113 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7129 7114 dev_info_t *dip, *rdip;
7130 7115 struct ddi_dma_req dmareq;
7131 7116 int (*funcp)();
7132 7117
7133 7118 if (len == (uint_t)0) {
7134 7119 return (DDI_DMA_NOMAPPING);
7135 7120 }
7136 7121 dmareq.dmar_flags = flags;
7137 7122 dmareq.dmar_fp = waitfp;
7138 7123 dmareq.dmar_arg = arg;
7139 7124 dmareq.dmar_object.dmao_size = len;
7140 7125 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7141 7126 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7142 7127 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7143 7128 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7144 7129
7145 7130 dip = rdip = hp->dmai_rdip;
7146 7131 if (dip != ddi_root_node())
7147 7132 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7148 7133 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7149 7134 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7150 7135 }
7151 7136
7152 7137 void
7153 7138 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7154 7139 {
7155 7140 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7156 7141 ddi_dma_cookie_t *cp;
7157 7142
7158 7143 cp = hp->dmai_cookie;
7159 7144 ASSERT(cp);
7160 7145
7161 7146 cookiep->dmac_notused = cp->dmac_notused;
7162 7147 cookiep->dmac_type = cp->dmac_type;
7163 7148 cookiep->dmac_address = cp->dmac_address;
7164 7149 cookiep->dmac_size = cp->dmac_size;
7165 7150 hp->dmai_cookie++;
7166 7151 }
7167 7152
7168 7153 int
7169 7154 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7170 7155 {
7171 7156 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7172 7157 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7173 7158 return (DDI_FAILURE);
7174 7159 } else {
7175 7160 *nwinp = hp->dmai_nwin;
7176 7161 return (DDI_SUCCESS);
7177 7162 }
7178 7163 }
7179 7164
7180 7165 int
7181 7166 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7182 7167 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7183 7168 {
7184 7169 int (*funcp)() = ddi_dma_win;
7185 7170 struct bus_ops *bop;
7186 7171
7187 7172 bop = DEVI(HD)->devi_ops->devo_bus_ops;
7188 7173 if (bop && bop->bus_dma_win)
7189 7174 funcp = bop->bus_dma_win;
7190 7175
7191 7176 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7192 7177 }
7193 7178
7194 7179 int
7195 7180 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7196 7181 {
7197 7182 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7198 7183 &burstsizes, 0, 0));
7199 7184 }
7200 7185
7201 7186 int
7202 7187 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7203 7188 {
7204 7189 return (hp->dmai_fault);
7205 7190 }
7206 7191
7207 7192 int
7208 7193 ddi_check_dma_handle(ddi_dma_handle_t handle)
7209 7194 {
7210 7195 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7211 7196 int (*check)(ddi_dma_impl_t *);
7212 7197
7213 7198 if ((check = hp->dmai_fault_check) == NULL)
7214 7199 check = i_ddi_dma_fault_check;
7215 7200
7216 7201 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7217 7202 }
7218 7203
7219 7204 void
7220 7205 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7221 7206 {
7222 7207 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7223 7208 void (*notify)(ddi_dma_impl_t *);
7224 7209
7225 7210 if (!hp->dmai_fault) {
7226 7211 hp->dmai_fault = 1;
7227 7212 if ((notify = hp->dmai_fault_notify) != NULL)
7228 7213 (*notify)(hp);
7229 7214 }
7230 7215 }
7231 7216
7232 7217 void
7233 7218 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7234 7219 {
7235 7220 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7236 7221 void (*notify)(ddi_dma_impl_t *);
7237 7222
7238 7223 if (hp->dmai_fault) {
7239 7224 hp->dmai_fault = 0;
7240 7225 if ((notify = hp->dmai_fault_notify) != NULL)
7241 7226 (*notify)(hp);
7242 7227 }
7243 7228 }
7244 7229
7245 7230 /*
7246 7231 * register mapping routines.
7247 7232 */
7248 7233 int
7249 7234 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7250 7235 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7251 7236 ddi_acc_handle_t *handle)
7252 7237 {
7253 7238 ddi_map_req_t mr;
7254 7239 ddi_acc_hdl_t *hp;
7255 7240 int result;
7256 7241
7257 7242 /*
7258 7243 * Allocate and initialize the common elements of data access handle.
7259 7244 */
7260 7245 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7261 7246 hp = impl_acc_hdl_get(*handle);
7262 7247 hp->ah_vers = VERS_ACCHDL;
7263 7248 hp->ah_dip = dip;
7264 7249 hp->ah_rnumber = rnumber;
7265 7250 hp->ah_offset = offset;
7266 7251 hp->ah_len = len;
7267 7252 hp->ah_acc = *accattrp;
7268 7253
7269 7254 /*
7270 7255 * Set up the mapping request and call to parent.
7271 7256 */
7272 7257 mr.map_op = DDI_MO_MAP_LOCKED;
7273 7258 mr.map_type = DDI_MT_RNUMBER;
7274 7259 mr.map_obj.rnumber = rnumber;
7275 7260 mr.map_prot = PROT_READ | PROT_WRITE;
7276 7261 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7277 7262 mr.map_handlep = hp;
7278 7263 mr.map_vers = DDI_MAP_VERSION;
7279 7264 result = ddi_map(dip, &mr, offset, len, addrp);
7280 7265
7281 7266 /*
7282 7267 * check for end result
7283 7268 */
7284 7269 if (result != DDI_SUCCESS) {
7285 7270 impl_acc_hdl_free(*handle);
7286 7271 *handle = (ddi_acc_handle_t)NULL;
7287 7272 } else {
7288 7273 hp->ah_addr = *addrp;
7289 7274 }
7290 7275
7291 7276 return (result);
7292 7277 }
7293 7278
7294 7279 void
7295 7280 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7296 7281 {
7297 7282 ddi_map_req_t mr;
7298 7283 ddi_acc_hdl_t *hp;
7299 7284
7300 7285 hp = impl_acc_hdl_get(*handlep);
7301 7286 ASSERT(hp);
7302 7287
7303 7288 mr.map_op = DDI_MO_UNMAP;
7304 7289 mr.map_type = DDI_MT_RNUMBER;
7305 7290 mr.map_obj.rnumber = hp->ah_rnumber;
7306 7291 mr.map_prot = PROT_READ | PROT_WRITE;
7307 7292 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7308 7293 mr.map_handlep = hp;
7309 7294 mr.map_vers = DDI_MAP_VERSION;
7310 7295
7311 7296 /*
7312 7297 * Call my parent to unmap my regs.
7313 7298 */
7314 7299 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7315 7300 hp->ah_len, &hp->ah_addr);
7316 7301 /*
7317 7302 * free the handle
7318 7303 */
7319 7304 impl_acc_hdl_free(*handlep);
7320 7305 *handlep = (ddi_acc_handle_t)NULL;
7321 7306 }
7322 7307
7323 7308 int
7324 7309 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7325 7310 ssize_t dev_advcnt, uint_t dev_datasz)
7326 7311 {
7327 7312 uint8_t *b;
7328 7313 uint16_t *w;
7329 7314 uint32_t *l;
7330 7315 uint64_t *ll;
7331 7316
7332 7317 /* check for total byte count is multiple of data transfer size */
7333 7318 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7334 7319 return (DDI_FAILURE);
7335 7320
7336 7321 switch (dev_datasz) {
7337 7322 case DDI_DATA_SZ01_ACC:
7338 7323 for (b = (uint8_t *)dev_addr;
7339 7324 bytecount != 0; bytecount -= 1, b += dev_advcnt)
7340 7325 ddi_put8(handle, b, 0);
7341 7326 break;
7342 7327 case DDI_DATA_SZ02_ACC:
7343 7328 for (w = (uint16_t *)dev_addr;
7344 7329 bytecount != 0; bytecount -= 2, w += dev_advcnt)
7345 7330 ddi_put16(handle, w, 0);
7346 7331 break;
7347 7332 case DDI_DATA_SZ04_ACC:
7348 7333 for (l = (uint32_t *)dev_addr;
7349 7334 bytecount != 0; bytecount -= 4, l += dev_advcnt)
7350 7335 ddi_put32(handle, l, 0);
7351 7336 break;
7352 7337 case DDI_DATA_SZ08_ACC:
7353 7338 for (ll = (uint64_t *)dev_addr;
7354 7339 bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7355 7340 ddi_put64(handle, ll, 0x0ll);
7356 7341 break;
7357 7342 default:
7358 7343 return (DDI_FAILURE);
7359 7344 }
7360 7345 return (DDI_SUCCESS);
7361 7346 }
7362 7347
7363 7348 int
7364 7349 ddi_device_copy(
7365 7350 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7366 7351 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7367 7352 size_t bytecount, uint_t dev_datasz)
7368 7353 {
7369 7354 uint8_t *b_src, *b_dst;
7370 7355 uint16_t *w_src, *w_dst;
7371 7356 uint32_t *l_src, *l_dst;
7372 7357 uint64_t *ll_src, *ll_dst;
7373 7358
7374 7359 /* check for total byte count is multiple of data transfer size */
7375 7360 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7376 7361 return (DDI_FAILURE);
7377 7362
7378 7363 switch (dev_datasz) {
7379 7364 case DDI_DATA_SZ01_ACC:
7380 7365 b_src = (uint8_t *)src_addr;
7381 7366 b_dst = (uint8_t *)dest_addr;
7382 7367
7383 7368 for (; bytecount != 0; bytecount -= 1) {
7384 7369 ddi_put8(dest_handle, b_dst,
7385 7370 ddi_get8(src_handle, b_src));
7386 7371 b_dst += dest_advcnt;
7387 7372 b_src += src_advcnt;
7388 7373 }
7389 7374 break;
7390 7375 case DDI_DATA_SZ02_ACC:
7391 7376 w_src = (uint16_t *)src_addr;
7392 7377 w_dst = (uint16_t *)dest_addr;
7393 7378
7394 7379 for (; bytecount != 0; bytecount -= 2) {
7395 7380 ddi_put16(dest_handle, w_dst,
7396 7381 ddi_get16(src_handle, w_src));
7397 7382 w_dst += dest_advcnt;
7398 7383 w_src += src_advcnt;
7399 7384 }
7400 7385 break;
7401 7386 case DDI_DATA_SZ04_ACC:
7402 7387 l_src = (uint32_t *)src_addr;
7403 7388 l_dst = (uint32_t *)dest_addr;
7404 7389
7405 7390 for (; bytecount != 0; bytecount -= 4) {
7406 7391 ddi_put32(dest_handle, l_dst,
7407 7392 ddi_get32(src_handle, l_src));
7408 7393 l_dst += dest_advcnt;
7409 7394 l_src += src_advcnt;
7410 7395 }
7411 7396 break;
7412 7397 case DDI_DATA_SZ08_ACC:
7413 7398 ll_src = (uint64_t *)src_addr;
7414 7399 ll_dst = (uint64_t *)dest_addr;
7415 7400
7416 7401 for (; bytecount != 0; bytecount -= 8) {
7417 7402 ddi_put64(dest_handle, ll_dst,
7418 7403 ddi_get64(src_handle, ll_src));
7419 7404 ll_dst += dest_advcnt;
7420 7405 ll_src += src_advcnt;
7421 7406 }
7422 7407 break;
7423 7408 default:
7424 7409 return (DDI_FAILURE);
7425 7410 }
7426 7411 return (DDI_SUCCESS);
7427 7412 }
7428 7413
7429 7414 #define swap16(value) \
7430 7415 ((((value) & 0xff) << 8) | ((value) >> 8))
7431 7416
7432 7417 #define swap32(value) \
7433 7418 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7434 7419 (uint32_t)swap16((uint16_t)((value) >> 16)))
7435 7420
7436 7421 #define swap64(value) \
7437 7422 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7438 7423 << 32) | \
7439 7424 (uint64_t)swap32((uint32_t)((value) >> 32)))
7440 7425
7441 7426 uint16_t
7442 7427 ddi_swap16(uint16_t value)
7443 7428 {
7444 7429 return (swap16(value));
7445 7430 }
7446 7431
7447 7432 uint32_t
7448 7433 ddi_swap32(uint32_t value)
7449 7434 {
7450 7435 return (swap32(value));
7451 7436 }
7452 7437
7453 7438 uint64_t
7454 7439 ddi_swap64(uint64_t value)
7455 7440 {
7456 7441 return (swap64(value));
7457 7442 }
7458 7443
7459 7444 /*
7460 7445 * Convert a binding name to a driver name.
7461 7446 * A binding name is the name used to determine the driver for a
7462 7447 * device - it may be either an alias for the driver or the name
7463 7448 * of the driver itself.
7464 7449 */
7465 7450 char *
7466 7451 i_binding_to_drv_name(char *bname)
7467 7452 {
7468 7453 major_t major_no;
7469 7454
7470 7455 ASSERT(bname != NULL);
7471 7456
7472 7457 if ((major_no = ddi_name_to_major(bname)) == -1)
7473 7458 return (NULL);
7474 7459 return (ddi_major_to_name(major_no));
7475 7460 }
7476 7461
7477 7462 /*
7478 7463 * Search for minor name that has specified dev_t and spec_type.
7479 7464 * If spec_type is zero then any dev_t match works. Since we
7480 7465 * are returning a pointer to the minor name string, we require the
7481 7466 * caller to do the locking.
7482 7467 */
7483 7468 char *
7484 7469 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7485 7470 {
7486 7471 struct ddi_minor_data *dmdp;
7487 7472
7488 7473 /*
7489 7474 * The did layered driver currently intentionally returns a
7490 7475 * devinfo ptr for an underlying sd instance based on a did
7491 7476 * dev_t. In this case it is not an error.
7492 7477 *
7493 7478 * The did layered driver is associated with Sun Cluster.
7494 7479 */
7495 7480 ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7496 7481 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7497 7482
7498 7483 ASSERT(DEVI_BUSY_OWNED(dip));
7499 7484 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7500 7485 if (((dmdp->type == DDM_MINOR) ||
7501 7486 (dmdp->type == DDM_INTERNAL_PATH) ||
7502 7487 (dmdp->type == DDM_DEFAULT)) &&
7503 7488 (dmdp->ddm_dev == dev) &&
7504 7489 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7505 7490 (dmdp->ddm_spec_type == spec_type)))
7506 7491 return (dmdp->ddm_name);
7507 7492 }
7508 7493
7509 7494 return (NULL);
7510 7495 }
7511 7496
7512 7497 /*
7513 7498 * Find the devt and spectype of the specified minor_name.
7514 7499 * Return DDI_FAILURE if minor_name not found. Since we are
7515 7500 * returning everything via arguments we can do the locking.
7516 7501 */
7517 7502 int
7518 7503 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7519 7504 dev_t *devtp, int *spectypep)
7520 7505 {
7521 7506 int circ;
7522 7507 struct ddi_minor_data *dmdp;
7523 7508
7524 7509 /* deal with clone minor nodes */
7525 7510 if (dip == clone_dip) {
7526 7511 major_t major;
7527 7512 /*
7528 7513 * Make sure minor_name is a STREAMS driver.
7529 7514 * We load the driver but don't attach to any instances.
7530 7515 */
7531 7516
7532 7517 major = ddi_name_to_major(minor_name);
7533 7518 if (major == DDI_MAJOR_T_NONE)
7534 7519 return (DDI_FAILURE);
7535 7520
7536 7521 if (ddi_hold_driver(major) == NULL)
7537 7522 return (DDI_FAILURE);
7538 7523
7539 7524 if (STREAMSTAB(major) == NULL) {
7540 7525 ddi_rele_driver(major);
7541 7526 return (DDI_FAILURE);
7542 7527 }
7543 7528 ddi_rele_driver(major);
7544 7529
7545 7530 if (devtp)
7546 7531 *devtp = makedevice(clone_major, (minor_t)major);
7547 7532
7548 7533 if (spectypep)
7549 7534 *spectypep = S_IFCHR;
7550 7535
7551 7536 return (DDI_SUCCESS);
7552 7537 }
7553 7538
7554 7539 ndi_devi_enter(dip, &circ);
7555 7540 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7556 7541 if (((dmdp->type != DDM_MINOR) &&
7557 7542 (dmdp->type != DDM_INTERNAL_PATH) &&
7558 7543 (dmdp->type != DDM_DEFAULT)) ||
7559 7544 strcmp(minor_name, dmdp->ddm_name))
7560 7545 continue;
7561 7546
7562 7547 if (devtp)
7563 7548 *devtp = dmdp->ddm_dev;
7564 7549
7565 7550 if (spectypep)
7566 7551 *spectypep = dmdp->ddm_spec_type;
7567 7552
7568 7553 ndi_devi_exit(dip, circ);
7569 7554 return (DDI_SUCCESS);
7570 7555 }
7571 7556 ndi_devi_exit(dip, circ);
7572 7557
7573 7558 return (DDI_FAILURE);
7574 7559 }
7575 7560
7576 7561 static kmutex_t devid_gen_mutex;
7577 7562 static short devid_gen_number;
7578 7563
7579 7564 #ifdef DEBUG
7580 7565
7581 7566 static int devid_register_corrupt = 0;
7582 7567 static int devid_register_corrupt_major = 0;
7583 7568 static int devid_register_corrupt_hint = 0;
7584 7569 static int devid_register_corrupt_hint_major = 0;
7585 7570
7586 7571 static int devid_lyr_debug = 0;
7587 7572
7588 7573 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7589 7574 if (devid_lyr_debug) \
7590 7575 ddi_debug_devid_devts(msg, ndevs, devs)
7591 7576
7592 7577 #else
7593 7578
7594 7579 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7595 7580
7596 7581 #endif /* DEBUG */
7597 7582
7598 7583
7599 7584 #ifdef DEBUG
7600 7585
7601 7586 static void
7602 7587 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7603 7588 {
7604 7589 int i;
7605 7590
7606 7591 cmn_err(CE_CONT, "%s:\n", msg);
7607 7592 for (i = 0; i < ndevs; i++) {
7608 7593 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7609 7594 }
7610 7595 }
7611 7596
7612 7597 static void
7613 7598 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7614 7599 {
7615 7600 int i;
7616 7601
7617 7602 cmn_err(CE_CONT, "%s:\n", msg);
7618 7603 for (i = 0; i < npaths; i++) {
7619 7604 cmn_err(CE_CONT, " %s\n", paths[i]);
7620 7605 }
7621 7606 }
7622 7607
7623 7608 static void
7624 7609 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7625 7610 {
7626 7611 int i;
7627 7612
7628 7613 cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7629 7614 for (i = 0; i < ndevs; i++) {
7630 7615 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7631 7616 }
7632 7617 }
7633 7618
7634 7619 #endif /* DEBUG */
7635 7620
7636 7621 /*
7637 7622 * Register device id into DDI framework.
7638 7623 * Must be called when the driver is bound.
7639 7624 */
7640 7625 static int
7641 7626 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7642 7627 {
7643 7628 impl_devid_t *i_devid = (impl_devid_t *)devid;
7644 7629 size_t driver_len;
7645 7630 const char *driver_name;
7646 7631 char *devid_str;
7647 7632 major_t major;
7648 7633
7649 7634 if ((dip == NULL) ||
7650 7635 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7651 7636 return (DDI_FAILURE);
7652 7637
7653 7638 /* verify that the devid is valid */
7654 7639 if (ddi_devid_valid(devid) != DDI_SUCCESS)
7655 7640 return (DDI_FAILURE);
7656 7641
7657 7642 /* Updating driver name hint in devid */
7658 7643 driver_name = ddi_driver_name(dip);
7659 7644 driver_len = strlen(driver_name);
7660 7645 if (driver_len > DEVID_HINT_SIZE) {
7661 7646 /* Pick up last four characters of driver name */
7662 7647 driver_name += driver_len - DEVID_HINT_SIZE;
7663 7648 driver_len = DEVID_HINT_SIZE;
7664 7649 }
7665 7650 bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7666 7651 bcopy(driver_name, i_devid->did_driver, driver_len);
7667 7652
7668 7653 #ifdef DEBUG
7669 7654 /* Corrupt the devid for testing. */
7670 7655 if (devid_register_corrupt)
7671 7656 i_devid->did_id[0] += devid_register_corrupt;
7672 7657 if (devid_register_corrupt_major &&
7673 7658 (major == devid_register_corrupt_major))
7674 7659 i_devid->did_id[0] += 1;
7675 7660 if (devid_register_corrupt_hint)
7676 7661 i_devid->did_driver[0] += devid_register_corrupt_hint;
7677 7662 if (devid_register_corrupt_hint_major &&
7678 7663 (major == devid_register_corrupt_hint_major))
7679 7664 i_devid->did_driver[0] += 1;
7680 7665 #endif /* DEBUG */
7681 7666
7682 7667 /* encode the devid as a string */
7683 7668 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7684 7669 return (DDI_FAILURE);
7685 7670
7686 7671 /* add string as a string property */
7687 7672 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7688 7673 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7689 7674 cmn_err(CE_WARN, "%s%d: devid property update failed",
7690 7675 ddi_driver_name(dip), ddi_get_instance(dip));
7691 7676 ddi_devid_str_free(devid_str);
7692 7677 return (DDI_FAILURE);
7693 7678 }
7694 7679
7695 7680 /* keep pointer to devid string for interrupt context fma code */
7696 7681 if (DEVI(dip)->devi_devid_str)
7697 7682 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7698 7683 DEVI(dip)->devi_devid_str = devid_str;
7699 7684 return (DDI_SUCCESS);
7700 7685 }
7701 7686
7702 7687 int
7703 7688 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7704 7689 {
7705 7690 int rval;
7706 7691
7707 7692 rval = i_ddi_devid_register(dip, devid);
7708 7693 if (rval == DDI_SUCCESS) {
7709 7694 /*
7710 7695 * Register devid in devid-to-path cache
7711 7696 */
7712 7697 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7713 7698 mutex_enter(&DEVI(dip)->devi_lock);
7714 7699 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7715 7700 mutex_exit(&DEVI(dip)->devi_lock);
7716 7701 } else if (ddi_get_name_addr(dip)) {
7717 7702 /*
7718 7703 * We only expect cache_register DDI_FAILURE when we
7719 7704 * can't form the full path because of NULL devi_addr.
7720 7705 */
7721 7706 cmn_err(CE_WARN, "%s%d: failed to cache devid",
7722 7707 ddi_driver_name(dip), ddi_get_instance(dip));
7723 7708 }
7724 7709 } else {
7725 7710 cmn_err(CE_WARN, "%s%d: failed to register devid",
7726 7711 ddi_driver_name(dip), ddi_get_instance(dip));
7727 7712 }
7728 7713 return (rval);
7729 7714 }
7730 7715
7731 7716 /*
7732 7717 * Remove (unregister) device id from DDI framework.
7733 7718 * Must be called when device is detached.
7734 7719 */
7735 7720 static void
7736 7721 i_ddi_devid_unregister(dev_info_t *dip)
7737 7722 {
7738 7723 if (DEVI(dip)->devi_devid_str) {
7739 7724 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7740 7725 DEVI(dip)->devi_devid_str = NULL;
7741 7726 }
7742 7727
7743 7728 /* remove the devid property */
7744 7729 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7745 7730 }
7746 7731
7747 7732 void
7748 7733 ddi_devid_unregister(dev_info_t *dip)
7749 7734 {
7750 7735 mutex_enter(&DEVI(dip)->devi_lock);
7751 7736 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7752 7737 mutex_exit(&DEVI(dip)->devi_lock);
7753 7738 e_devid_cache_unregister(dip);
7754 7739 i_ddi_devid_unregister(dip);
7755 7740 }
7756 7741
7757 7742 /*
7758 7743 * Allocate and initialize a device id.
7759 7744 */
7760 7745 int
7761 7746 ddi_devid_init(
7762 7747 dev_info_t *dip,
7763 7748 ushort_t devid_type,
7764 7749 ushort_t nbytes,
7765 7750 void *id,
7766 7751 ddi_devid_t *ret_devid)
7767 7752 {
7768 7753 impl_devid_t *i_devid;
7769 7754 int sz = sizeof (*i_devid) + nbytes - sizeof (char);
7770 7755 int driver_len;
7771 7756 const char *driver_name;
7772 7757
7773 7758 switch (devid_type) {
7774 7759 case DEVID_SCSI3_WWN:
7775 7760 /*FALLTHRU*/
7776 7761 case DEVID_SCSI_SERIAL:
7777 7762 /*FALLTHRU*/
7778 7763 case DEVID_ATA_SERIAL:
7779 7764 /*FALLTHRU*/
7780 7765 case DEVID_ENCAP:
7781 7766 if (nbytes == 0)
7782 7767 return (DDI_FAILURE);
7783 7768 if (id == NULL)
7784 7769 return (DDI_FAILURE);
7785 7770 break;
7786 7771 case DEVID_FAB:
7787 7772 if (nbytes != 0)
7788 7773 return (DDI_FAILURE);
7789 7774 if (id != NULL)
7790 7775 return (DDI_FAILURE);
7791 7776 nbytes = sizeof (int) +
7792 7777 sizeof (struct timeval32) + sizeof (short);
7793 7778 sz += nbytes;
7794 7779 break;
7795 7780 default:
7796 7781 return (DDI_FAILURE);
7797 7782 }
7798 7783
7799 7784 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7800 7785 return (DDI_FAILURE);
7801 7786
7802 7787 i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7803 7788 i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7804 7789 i_devid->did_rev_hi = DEVID_REV_MSB;
7805 7790 i_devid->did_rev_lo = DEVID_REV_LSB;
7806 7791 DEVID_FORMTYPE(i_devid, devid_type);
7807 7792 DEVID_FORMLEN(i_devid, nbytes);
7808 7793
7809 7794 /* Fill in driver name hint */
7810 7795 driver_name = ddi_driver_name(dip);
7811 7796 driver_len = strlen(driver_name);
7812 7797 if (driver_len > DEVID_HINT_SIZE) {
7813 7798 /* Pick up last four characters of driver name */
7814 7799 driver_name += driver_len - DEVID_HINT_SIZE;
7815 7800 driver_len = DEVID_HINT_SIZE;
7816 7801 }
7817 7802
7818 7803 bcopy(driver_name, i_devid->did_driver, driver_len);
7819 7804
7820 7805 /* Fill in id field */
7821 7806 if (devid_type == DEVID_FAB) {
7822 7807 char *cp;
7823 7808 uint32_t hostid;
7824 7809 struct timeval32 timestamp32;
7825 7810 int i;
7826 7811 int *ip;
7827 7812 short gen;
7828 7813
7829 7814 /* increase the generation number */
7830 7815 mutex_enter(&devid_gen_mutex);
7831 7816 gen = devid_gen_number++;
7832 7817 mutex_exit(&devid_gen_mutex);
7833 7818
7834 7819 cp = i_devid->did_id;
7835 7820
7836 7821 /* Fill in host id (big-endian byte ordering) */
7837 7822 hostid = zone_get_hostid(NULL);
7838 7823 *cp++ = hibyte(hiword(hostid));
7839 7824 *cp++ = lobyte(hiword(hostid));
7840 7825 *cp++ = hibyte(loword(hostid));
7841 7826 *cp++ = lobyte(loword(hostid));
7842 7827
7843 7828 /*
7844 7829 * Fill in timestamp (big-endian byte ordering)
7845 7830 *
7846 7831 * (Note that the format may have to be changed
7847 7832 * before 2038 comes around, though it's arguably
7848 7833 * unique enough as it is..)
7849 7834 */
7850 7835 uniqtime32(×tamp32);
7851 7836 ip = (int *)×tamp32;
7852 7837 for (i = 0;
7853 7838 i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7854 7839 int val;
7855 7840 val = *ip;
7856 7841 *cp++ = hibyte(hiword(val));
7857 7842 *cp++ = lobyte(hiword(val));
7858 7843 *cp++ = hibyte(loword(val));
7859 7844 *cp++ = lobyte(loword(val));
7860 7845 }
7861 7846
7862 7847 /* fill in the generation number */
7863 7848 *cp++ = hibyte(gen);
7864 7849 *cp++ = lobyte(gen);
7865 7850 } else
7866 7851 bcopy(id, i_devid->did_id, nbytes);
7867 7852
7868 7853 /* return device id */
7869 7854 *ret_devid = (ddi_devid_t)i_devid;
7870 7855 return (DDI_SUCCESS);
7871 7856 }
7872 7857
7873 7858 int
7874 7859 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7875 7860 {
7876 7861 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7877 7862 }
7878 7863
7879 7864 int
7880 7865 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7881 7866 {
7882 7867 char *devidstr;
7883 7868
7884 7869 ASSERT(dev != DDI_DEV_T_NONE);
7885 7870
7886 7871 /* look up the property, devt specific first */
7887 7872 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7888 7873 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7889 7874 if ((dev == DDI_DEV_T_ANY) ||
7890 7875 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7891 7876 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7892 7877 DDI_PROP_SUCCESS)) {
7893 7878 return (DDI_FAILURE);
7894 7879 }
7895 7880 }
7896 7881
7897 7882 /* convert to binary form */
7898 7883 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7899 7884 ddi_prop_free(devidstr);
7900 7885 return (DDI_FAILURE);
7901 7886 }
7902 7887 ddi_prop_free(devidstr);
7903 7888 return (DDI_SUCCESS);
7904 7889 }
7905 7890
7906 7891 /*
7907 7892 * Return a copy of the device id for dev_t
7908 7893 */
7909 7894 int
7910 7895 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7911 7896 {
7912 7897 dev_info_t *dip;
7913 7898 int rval;
7914 7899
7915 7900 /* get the dip */
7916 7901 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7917 7902 return (DDI_FAILURE);
7918 7903
7919 7904 rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7920 7905
7921 7906 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7922 7907 return (rval);
7923 7908 }
7924 7909
7925 7910 /*
7926 7911 * Return a copy of the minor name for dev_t and spec_type
7927 7912 */
7928 7913 int
7929 7914 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7930 7915 {
7931 7916 char *buf;
7932 7917 int circ;
7933 7918 dev_info_t *dip;
7934 7919 char *nm;
7935 7920 int rval;
7936 7921
7937 7922 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7938 7923 *minor_name = NULL;
7939 7924 return (DDI_FAILURE);
7940 7925 }
7941 7926
7942 7927 /* Find the minor name and copy into max size buf */
7943 7928 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7944 7929 ndi_devi_enter(dip, &circ);
7945 7930 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7946 7931 if (nm)
7947 7932 (void) strcpy(buf, nm);
7948 7933 ndi_devi_exit(dip, circ);
7949 7934 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7950 7935
7951 7936 if (nm) {
7952 7937 /* duplicate into min size buf for return result */
7953 7938 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
7954 7939 rval = DDI_SUCCESS;
7955 7940 } else {
7956 7941 *minor_name = NULL;
7957 7942 rval = DDI_FAILURE;
7958 7943 }
7959 7944
7960 7945 /* free max size buf and return */
7961 7946 kmem_free(buf, MAXNAMELEN);
7962 7947 return (rval);
7963 7948 }
7964 7949
7965 7950 int
7966 7951 ddi_lyr_devid_to_devlist(
7967 7952 ddi_devid_t devid,
7968 7953 char *minor_name,
7969 7954 int *retndevs,
7970 7955 dev_t **retdevs)
7971 7956 {
7972 7957 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7973 7958
7974 7959 if (e_devid_cache_to_devt_list(devid, minor_name,
7975 7960 retndevs, retdevs) == DDI_SUCCESS) {
7976 7961 ASSERT(*retndevs > 0);
7977 7962 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7978 7963 *retndevs, *retdevs);
7979 7964 return (DDI_SUCCESS);
7980 7965 }
7981 7966
7982 7967 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7983 7968 return (DDI_FAILURE);
7984 7969 }
7985 7970
7986 7971 if (e_devid_cache_to_devt_list(devid, minor_name,
7987 7972 retndevs, retdevs) == DDI_SUCCESS) {
7988 7973 ASSERT(*retndevs > 0);
7989 7974 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7990 7975 *retndevs, *retdevs);
7991 7976 return (DDI_SUCCESS);
7992 7977 }
7993 7978
7994 7979 return (DDI_FAILURE);
7995 7980 }
7996 7981
7997 7982 void
7998 7983 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7999 7984 {
8000 7985 kmem_free(devlist, sizeof (dev_t) * ndevs);
8001 7986 }
8002 7987
8003 7988 /*
8004 7989 * Note: This will need to be fixed if we ever allow processes to
8005 7990 * have more than one data model per exec.
8006 7991 */
8007 7992 model_t
8008 7993 ddi_mmap_get_model(void)
8009 7994 {
8010 7995 return (get_udatamodel());
8011 7996 }
8012 7997
8013 7998 model_t
8014 7999 ddi_model_convert_from(model_t model)
8015 8000 {
8016 8001 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8017 8002 }
8018 8003
8019 8004 /*
8020 8005 * ddi interfaces managing storage and retrieval of eventcookies.
8021 8006 */
8022 8007
8023 8008 /*
8024 8009 * Invoke bus nexus driver's implementation of the
8025 8010 * (*bus_remove_eventcall)() interface to remove a registered
8026 8011 * callback handler for "event".
8027 8012 */
8028 8013 int
8029 8014 ddi_remove_event_handler(ddi_callback_id_t id)
8030 8015 {
8031 8016 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8032 8017 dev_info_t *ddip;
8033 8018
8034 8019 ASSERT(cb);
8035 8020 if (!cb) {
8036 8021 return (DDI_FAILURE);
8037 8022 }
8038 8023
8039 8024 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8040 8025 return (ndi_busop_remove_eventcall(ddip, id));
8041 8026 }
8042 8027
8043 8028 /*
8044 8029 * Invoke bus nexus driver's implementation of the
8045 8030 * (*bus_add_eventcall)() interface to register a callback handler
8046 8031 * for "event".
8047 8032 */
8048 8033 int
8049 8034 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8050 8035 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8051 8036 void *arg, ddi_callback_id_t *id)
8052 8037 {
8053 8038 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8054 8039 }
8055 8040
8056 8041
8057 8042 /*
8058 8043 * Return a handle for event "name" by calling up the device tree
8059 8044 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8060 8045 * by a bus nexus or top of dev_info tree is reached.
8061 8046 */
8062 8047 int
8063 8048 ddi_get_eventcookie(dev_info_t *dip, char *name,
8064 8049 ddi_eventcookie_t *event_cookiep)
8065 8050 {
8066 8051 return (ndi_busop_get_eventcookie(dip, dip,
8067 8052 name, event_cookiep));
8068 8053 }
8069 8054
8070 8055 /*
8071 8056 * This procedure is provided as the general callback function when
8072 8057 * umem_lockmemory calls as_add_callback for long term memory locking.
8073 8058 * When as_unmap, as_setprot, or as_free encounter segments which have
8074 8059 * locked memory, this callback will be invoked.
8075 8060 */
8076 8061 void
8077 8062 umem_lock_undo(struct as *as, void *arg, uint_t event)
8078 8063 {
8079 8064 _NOTE(ARGUNUSED(as, event))
8080 8065 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8081 8066
8082 8067 /*
8083 8068 * Call the cleanup function. Decrement the cookie reference
8084 8069 * count, if it goes to zero, return the memory for the cookie.
8085 8070 * The i_ddi_umem_unlock for this cookie may or may not have been
8086 8071 * called already. It is the responsibility of the caller of
8087 8072 * umem_lockmemory to handle the case of the cleanup routine
8088 8073 * being called after a ddi_umem_unlock for the cookie
8089 8074 * was called.
8090 8075 */
8091 8076
8092 8077 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8093 8078
8094 8079 /* remove the cookie if reference goes to zero */
8095 8080 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8096 8081 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8097 8082 }
8098 8083 }
8099 8084
8100 8085 /*
8101 8086 * The following two Consolidation Private routines provide generic
8102 8087 * interfaces to increase/decrease the amount of device-locked memory.
8103 8088 *
8104 8089 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8105 8090 * must be called every time i_ddi_incr_locked_memory() is called.
8106 8091 */
8107 8092 int
8108 8093 /* ARGSUSED */
8109 8094 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8110 8095 {
8111 8096 ASSERT(procp != NULL);
8112 8097 mutex_enter(&procp->p_lock);
8113 8098 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8114 8099 mutex_exit(&procp->p_lock);
8115 8100 return (ENOMEM);
8116 8101 }
8117 8102 mutex_exit(&procp->p_lock);
8118 8103 return (0);
8119 8104 }
8120 8105
8121 8106 /*
8122 8107 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8123 8108 * must be called every time i_ddi_decr_locked_memory() is called.
8124 8109 */
8125 8110 /* ARGSUSED */
8126 8111 void
8127 8112 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8128 8113 {
8129 8114 ASSERT(procp != NULL);
8130 8115 mutex_enter(&procp->p_lock);
8131 8116 rctl_decr_locked_mem(procp, NULL, dec, 1);
8132 8117 mutex_exit(&procp->p_lock);
8133 8118 }
8134 8119
8135 8120 /*
8136 8121 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8137 8122 * charge device locked memory to the max-locked-memory rctl. Tracking
8138 8123 * device locked memory causes the rctl locks to get hot under high-speed
8139 8124 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8140 8125 * we bypass charging the locked memory to the rctl altogether. The cookie's
8141 8126 * flag tells us if the rctl value should be updated when unlocking the memory,
8142 8127 * in case the rctl gets changed after the memory was locked. Any device
8143 8128 * locked memory in that rare case will not be counted toward the rctl limit.
8144 8129 *
8145 8130 * When tracking the locked memory, the kproject_t parameter is always NULL
8146 8131 * in the code paths:
8147 8132 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8148 8133 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8149 8134 * Thus, we always use the tk_proj member to check the projp setting.
8150 8135 */
8151 8136 static void
8152 8137 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8153 8138 {
8154 8139 proc_t *p;
8155 8140 kproject_t *projp;
8156 8141 zone_t *zonep;
8157 8142
8158 8143 ASSERT(cookie);
8159 8144 p = cookie->procp;
8160 8145 ASSERT(p);
8161 8146
8162 8147 zonep = p->p_zone;
8163 8148 projp = p->p_task->tk_proj;
8164 8149
8165 8150 ASSERT(zonep);
8166 8151 ASSERT(projp);
8167 8152
8168 8153 if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8169 8154 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8170 8155 cookie->upd_max_lock_rctl = 0;
8171 8156 else
8172 8157 cookie->upd_max_lock_rctl = 1;
8173 8158 }
8174 8159
8175 8160 /*
8176 8161 * This routine checks if the max-locked-memory resource ctl is
8177 8162 * exceeded, if not increments it, grabs a hold on the project.
8178 8163 * Returns 0 if successful otherwise returns error code
8179 8164 */
8180 8165 static int
8181 8166 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8182 8167 {
8183 8168 proc_t *procp;
8184 8169 int ret;
8185 8170
8186 8171 ASSERT(cookie);
8187 8172 if (cookie->upd_max_lock_rctl == 0)
8188 8173 return (0);
8189 8174
8190 8175 procp = cookie->procp;
8191 8176 ASSERT(procp);
8192 8177
8193 8178 if ((ret = i_ddi_incr_locked_memory(procp,
8194 8179 cookie->size)) != 0) {
8195 8180 return (ret);
8196 8181 }
8197 8182 return (0);
8198 8183 }
8199 8184
8200 8185 /*
8201 8186 * Decrements the max-locked-memory resource ctl and releases
8202 8187 * the hold on the project that was acquired during umem_incr_devlockmem
8203 8188 */
8204 8189 static void
8205 8190 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8206 8191 {
8207 8192 proc_t *proc;
8208 8193
8209 8194 if (cookie->upd_max_lock_rctl == 0)
8210 8195 return;
8211 8196
8212 8197 proc = (proc_t *)cookie->procp;
8213 8198 if (!proc)
8214 8199 return;
8215 8200
8216 8201 i_ddi_decr_locked_memory(proc, cookie->size);
8217 8202 }
8218 8203
8219 8204 /*
8220 8205 * A consolidation private function which is essentially equivalent to
8221 8206 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8222 8207 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8223 8208 * the ops_vector is valid.
8224 8209 *
8225 8210 * Lock the virtual address range in the current process and create a
8226 8211 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8227 8212 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8228 8213 * to user space.
8229 8214 *
8230 8215 * Note: The resource control accounting currently uses a full charge model
8231 8216 * in other words attempts to lock the same/overlapping areas of memory
8232 8217 * will deduct the full size of the buffer from the projects running
8233 8218 * counter for the device locked memory.
8234 8219 *
8235 8220 * addr, size should be PAGESIZE aligned
8236 8221 *
8237 8222 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8238 8223 * identifies whether the locked memory will be read or written or both
8239 8224 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8240 8225 * be maintained for an indefinitely long period (essentially permanent),
8241 8226 * rather than for what would be required for a typical I/O completion.
8242 8227 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8243 8228 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8244 8229 * This is to prevent a deadlock if a file truncation is attempted after
8245 8230 * after the locking is done.
8246 8231 *
8247 8232 * Returns 0 on success
8248 8233 * EINVAL - for invalid parameters
8249 8234 * EPERM, ENOMEM and other error codes returned by as_pagelock
8250 8235 * ENOMEM - is returned if the current request to lock memory exceeds
8251 8236 * *.max-locked-memory resource control value.
8252 8237 * EFAULT - memory pertains to a regular file mapped shared and
8253 8238 * and DDI_UMEMLOCK_LONGTERM flag is set
8254 8239 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8255 8240 */
8256 8241 int
8257 8242 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8258 8243 struct umem_callback_ops *ops_vector,
8259 8244 proc_t *procp)
8260 8245 {
8261 8246 int error;
8262 8247 struct ddi_umem_cookie *p;
8263 8248 void (*driver_callback)() = NULL;
8264 8249 struct as *as;
8265 8250 struct seg *seg;
8266 8251 vnode_t *vp;
8267 8252
8268 8253 /* Allow device drivers to not have to reference "curproc" */
8269 8254 if (procp == NULL)
8270 8255 procp = curproc;
8271 8256 as = procp->p_as;
8272 8257 *cookie = NULL; /* in case of any error return */
8273 8258
8274 8259 /* These are the only three valid flags */
8275 8260 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8276 8261 DDI_UMEMLOCK_LONGTERM)) != 0)
8277 8262 return (EINVAL);
8278 8263
8279 8264 /* At least one (can be both) of the two access flags must be set */
8280 8265 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8281 8266 return (EINVAL);
8282 8267
8283 8268 /* addr and len must be page-aligned */
8284 8269 if (((uintptr_t)addr & PAGEOFFSET) != 0)
8285 8270 return (EINVAL);
8286 8271
8287 8272 if ((len & PAGEOFFSET) != 0)
8288 8273 return (EINVAL);
8289 8274
8290 8275 /*
8291 8276 * For longterm locking a driver callback must be specified; if
8292 8277 * not longterm then a callback is optional.
8293 8278 */
8294 8279 if (ops_vector != NULL) {
8295 8280 if (ops_vector->cbo_umem_callback_version !=
8296 8281 UMEM_CALLBACK_VERSION)
8297 8282 return (EINVAL);
8298 8283 else
8299 8284 driver_callback = ops_vector->cbo_umem_lock_cleanup;
8300 8285 }
8301 8286 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8302 8287 return (EINVAL);
8303 8288
8304 8289 /*
8305 8290 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8306 8291 * be called on first ddi_umem_lock or umem_lockmemory call.
8307 8292 */
8308 8293 if (ddi_umem_unlock_thread == NULL)
8309 8294 i_ddi_umem_unlock_thread_start();
8310 8295
8311 8296 /* Allocate memory for the cookie */
8312 8297 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8313 8298
8314 8299 /* Convert the flags to seg_rw type */
8315 8300 if (flags & DDI_UMEMLOCK_WRITE) {
8316 8301 p->s_flags = S_WRITE;
8317 8302 } else {
8318 8303 p->s_flags = S_READ;
8319 8304 }
8320 8305
8321 8306 /* Store procp in cookie for later iosetup/unlock */
8322 8307 p->procp = (void *)procp;
8323 8308
8324 8309 /*
8325 8310 * Store the struct as pointer in cookie for later use by
8326 8311 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8327 8312 * is called after relvm is called.
8328 8313 */
8329 8314 p->asp = as;
8330 8315
8331 8316 /*
8332 8317 * The size field is needed for lockmem accounting.
8333 8318 */
8334 8319 p->size = len;
8335 8320 init_lockedmem_rctl_flag(p);
8336 8321
8337 8322 if (umem_incr_devlockmem(p) != 0) {
8338 8323 /*
8339 8324 * The requested memory cannot be locked
8340 8325 */
8341 8326 kmem_free(p, sizeof (struct ddi_umem_cookie));
8342 8327 *cookie = (ddi_umem_cookie_t)NULL;
8343 8328 return (ENOMEM);
8344 8329 }
8345 8330
8346 8331 /* Lock the pages corresponding to addr, len in memory */
8347 8332 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8348 8333 if (error != 0) {
8349 8334 umem_decr_devlockmem(p);
8350 8335 kmem_free(p, sizeof (struct ddi_umem_cookie));
8351 8336 *cookie = (ddi_umem_cookie_t)NULL;
8352 8337 return (error);
8353 8338 }
8354 8339
8355 8340 /*
8356 8341 * For longterm locking the addr must pertain to a seg_vn segment or
8357 8342 * or a seg_spt segment.
8358 8343 * If the segment pertains to a regular file, it cannot be
8359 8344 * mapped MAP_SHARED.
8360 8345 * This is to prevent a deadlock if a file truncation is attempted
8361 8346 * after the locking is done.
8362 8347 * Doing this after as_pagelock guarantees persistence of the as; if
8363 8348 * an unacceptable segment is found, the cleanup includes calling
8364 8349 * as_pageunlock before returning EFAULT.
8365 8350 *
8366 8351 * segdev is allowed here as it is already locked. This allows
8367 8352 * for memory exported by drivers through mmap() (which is already
8368 8353 * locked) to be allowed for LONGTERM.
8369 8354 */
8370 8355 if (flags & DDI_UMEMLOCK_LONGTERM) {
8371 8356 extern struct seg_ops segspt_shmops;
8372 8357 extern struct seg_ops segdev_ops;
8373 8358 AS_LOCK_ENTER(as, RW_READER);
8374 8359 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8375 8360 if (seg == NULL || seg->s_base > addr + len)
8376 8361 break;
8377 8362 if (seg->s_ops == &segdev_ops)
8378 8363 continue;
8379 8364 if (((seg->s_ops != &segvn_ops) &&
8380 8365 (seg->s_ops != &segspt_shmops)) ||
8381 8366 ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8382 8367 vp != NULL && vp->v_type == VREG) &&
8383 8368 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8384 8369 as_pageunlock(as, p->pparray,
8385 8370 addr, len, p->s_flags);
8386 8371 AS_LOCK_EXIT(as);
8387 8372 umem_decr_devlockmem(p);
8388 8373 kmem_free(p, sizeof (struct ddi_umem_cookie));
8389 8374 *cookie = (ddi_umem_cookie_t)NULL;
8390 8375 return (EFAULT);
8391 8376 }
8392 8377 }
8393 8378 AS_LOCK_EXIT(as);
8394 8379 }
8395 8380
8396 8381
8397 8382 /* Initialize the fields in the ddi_umem_cookie */
8398 8383 p->cvaddr = addr;
8399 8384 p->type = UMEM_LOCKED;
8400 8385 if (driver_callback != NULL) {
8401 8386 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8402 8387 p->cook_refcnt = 2;
8403 8388 p->callbacks = *ops_vector;
8404 8389 } else {
8405 8390 /* only i_ddi_umme_unlock needs the cookie */
8406 8391 p->cook_refcnt = 1;
8407 8392 }
8408 8393
8409 8394 *cookie = (ddi_umem_cookie_t)p;
8410 8395
8411 8396 /*
8412 8397 * If a driver callback was specified, add an entry to the
8413 8398 * as struct callback list. The as_pagelock above guarantees
8414 8399 * the persistence of as.
8415 8400 */
8416 8401 if (driver_callback) {
8417 8402 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8418 8403 addr, len, KM_SLEEP);
8419 8404 if (error != 0) {
8420 8405 as_pageunlock(as, p->pparray,
8421 8406 addr, len, p->s_flags);
8422 8407 umem_decr_devlockmem(p);
8423 8408 kmem_free(p, sizeof (struct ddi_umem_cookie));
8424 8409 *cookie = (ddi_umem_cookie_t)NULL;
8425 8410 }
8426 8411 }
8427 8412 return (error);
8428 8413 }
8429 8414
8430 8415 /*
8431 8416 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8432 8417 * the cookie. Called from i_ddi_umem_unlock_thread.
8433 8418 */
8434 8419
8435 8420 static void
8436 8421 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8437 8422 {
8438 8423 uint_t rc;
8439 8424
8440 8425 /*
8441 8426 * There is no way to determine whether a callback to
8442 8427 * umem_lock_undo was registered via as_add_callback.
8443 8428 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8444 8429 * a valid callback function structure.) as_delete_callback
8445 8430 * is called to delete a possible registered callback. If the
8446 8431 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8447 8432 * indicates that there was a callback registered, and that is was
8448 8433 * successfully deleted. Thus, the cookie reference count
8449 8434 * will never be decremented by umem_lock_undo. Just return the
8450 8435 * memory for the cookie, since both users of the cookie are done.
8451 8436 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8452 8437 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8453 8438 * indicates that callback processing is taking place and, and
8454 8439 * umem_lock_undo is, or will be, executing, and thus decrementing
8455 8440 * the cookie reference count when it is complete.
8456 8441 *
8457 8442 * This needs to be done before as_pageunlock so that the
8458 8443 * persistence of as is guaranteed because of the locked pages.
8459 8444 *
8460 8445 */
8461 8446 rc = as_delete_callback(p->asp, p);
8462 8447
8463 8448
8464 8449 /*
8465 8450 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8466 8451 * after relvm is called so use p->asp.
8467 8452 */
8468 8453 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8469 8454
8470 8455 /*
8471 8456 * Now that we have unlocked the memory decrement the
8472 8457 * *.max-locked-memory rctl
8473 8458 */
8474 8459 umem_decr_devlockmem(p);
8475 8460
8476 8461 if (rc == AS_CALLBACK_DELETED) {
8477 8462 /* umem_lock_undo will not happen, return the cookie memory */
8478 8463 ASSERT(p->cook_refcnt == 2);
8479 8464 kmem_free(p, sizeof (struct ddi_umem_cookie));
8480 8465 } else {
8481 8466 /*
8482 8467 * umem_undo_lock may happen if as_delete_callback returned
8483 8468 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8484 8469 * reference count, atomically, and return the cookie
8485 8470 * memory if the reference count goes to zero. The only
8486 8471 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8487 8472 * case, just return the cookie memory.
8488 8473 */
8489 8474 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8490 8475 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8491 8476 == 0)) {
8492 8477 kmem_free(p, sizeof (struct ddi_umem_cookie));
8493 8478 }
8494 8479 }
8495 8480 }
8496 8481
8497 8482 /*
8498 8483 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8499 8484 *
8500 8485 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8501 8486 * until it is empty. Then, wait for more to be added. This thread is awoken
8502 8487 * via calls to ddi_umem_unlock.
8503 8488 */
8504 8489
8505 8490 static void
8506 8491 i_ddi_umem_unlock_thread(void)
8507 8492 {
8508 8493 struct ddi_umem_cookie *ret_cookie;
8509 8494 callb_cpr_t cprinfo;
8510 8495
8511 8496 /* process the ddi_umem_unlock list */
8512 8497 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8513 8498 callb_generic_cpr, "unlock_thread");
8514 8499 for (;;) {
8515 8500 mutex_enter(&ddi_umem_unlock_mutex);
8516 8501 if (ddi_umem_unlock_head != NULL) { /* list not empty */
8517 8502 ret_cookie = ddi_umem_unlock_head;
8518 8503 /* take if off the list */
8519 8504 if ((ddi_umem_unlock_head =
8520 8505 ddi_umem_unlock_head->unl_forw) == NULL) {
8521 8506 ddi_umem_unlock_tail = NULL;
8522 8507 }
8523 8508 mutex_exit(&ddi_umem_unlock_mutex);
8524 8509 /* unlock the pages in this cookie */
8525 8510 (void) i_ddi_umem_unlock(ret_cookie);
8526 8511 } else { /* list is empty, wait for next ddi_umem_unlock */
8527 8512 CALLB_CPR_SAFE_BEGIN(&cprinfo);
8528 8513 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8529 8514 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8530 8515 mutex_exit(&ddi_umem_unlock_mutex);
8531 8516 }
8532 8517 }
8533 8518 /* ddi_umem_unlock_thread does not exit */
8534 8519 /* NOTREACHED */
8535 8520 }
8536 8521
8537 8522 /*
8538 8523 * Start the thread that will process the ddi_umem_unlock list if it is
8539 8524 * not already started (i_ddi_umem_unlock_thread).
8540 8525 */
8541 8526 static void
8542 8527 i_ddi_umem_unlock_thread_start(void)
8543 8528 {
8544 8529 mutex_enter(&ddi_umem_unlock_mutex);
8545 8530 if (ddi_umem_unlock_thread == NULL) {
8546 8531 ddi_umem_unlock_thread = thread_create(NULL, 0,
8547 8532 i_ddi_umem_unlock_thread, NULL, 0, &p0,
8548 8533 TS_RUN, minclsyspri);
8549 8534 }
8550 8535 mutex_exit(&ddi_umem_unlock_mutex);
8551 8536 }
8552 8537
8553 8538 /*
8554 8539 * Lock the virtual address range in the current process and create a
8555 8540 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8556 8541 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8557 8542 * to user space.
8558 8543 *
8559 8544 * Note: The resource control accounting currently uses a full charge model
8560 8545 * in other words attempts to lock the same/overlapping areas of memory
8561 8546 * will deduct the full size of the buffer from the projects running
8562 8547 * counter for the device locked memory. This applies to umem_lockmemory too.
8563 8548 *
8564 8549 * addr, size should be PAGESIZE aligned
8565 8550 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8566 8551 * identifies whether the locked memory will be read or written or both
8567 8552 *
8568 8553 * Returns 0 on success
8569 8554 * EINVAL - for invalid parameters
8570 8555 * EPERM, ENOMEM and other error codes returned by as_pagelock
8571 8556 * ENOMEM - is returned if the current request to lock memory exceeds
8572 8557 * *.max-locked-memory resource control value.
8573 8558 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8574 8559 */
8575 8560 int
8576 8561 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8577 8562 {
8578 8563 int error;
8579 8564 struct ddi_umem_cookie *p;
8580 8565
8581 8566 *cookie = NULL; /* in case of any error return */
8582 8567
8583 8568 /* These are the only two valid flags */
8584 8569 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8585 8570 return (EINVAL);
8586 8571 }
8587 8572
8588 8573 /* At least one of the two flags (or both) must be set */
8589 8574 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8590 8575 return (EINVAL);
8591 8576 }
8592 8577
8593 8578 /* addr and len must be page-aligned */
8594 8579 if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8595 8580 return (EINVAL);
8596 8581 }
8597 8582
8598 8583 if ((len & PAGEOFFSET) != 0) {
8599 8584 return (EINVAL);
8600 8585 }
8601 8586
8602 8587 /*
8603 8588 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8604 8589 * be called on first ddi_umem_lock or umem_lockmemory call.
8605 8590 */
8606 8591 if (ddi_umem_unlock_thread == NULL)
8607 8592 i_ddi_umem_unlock_thread_start();
8608 8593
8609 8594 /* Allocate memory for the cookie */
8610 8595 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8611 8596
8612 8597 /* Convert the flags to seg_rw type */
8613 8598 if (flags & DDI_UMEMLOCK_WRITE) {
8614 8599 p->s_flags = S_WRITE;
8615 8600 } else {
8616 8601 p->s_flags = S_READ;
8617 8602 }
8618 8603
8619 8604 /* Store curproc in cookie for later iosetup/unlock */
8620 8605 p->procp = (void *)curproc;
8621 8606
8622 8607 /*
8623 8608 * Store the struct as pointer in cookie for later use by
8624 8609 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8625 8610 * is called after relvm is called.
8626 8611 */
8627 8612 p->asp = curproc->p_as;
8628 8613 /*
8629 8614 * The size field is needed for lockmem accounting.
8630 8615 */
8631 8616 p->size = len;
8632 8617 init_lockedmem_rctl_flag(p);
8633 8618
8634 8619 if (umem_incr_devlockmem(p) != 0) {
8635 8620 /*
8636 8621 * The requested memory cannot be locked
8637 8622 */
8638 8623 kmem_free(p, sizeof (struct ddi_umem_cookie));
8639 8624 *cookie = (ddi_umem_cookie_t)NULL;
8640 8625 return (ENOMEM);
8641 8626 }
8642 8627
8643 8628 /* Lock the pages corresponding to addr, len in memory */
8644 8629 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8645 8630 addr, len, p->s_flags);
8646 8631 if (error != 0) {
8647 8632 umem_decr_devlockmem(p);
8648 8633 kmem_free(p, sizeof (struct ddi_umem_cookie));
8649 8634 *cookie = (ddi_umem_cookie_t)NULL;
8650 8635 return (error);
8651 8636 }
8652 8637
8653 8638 /* Initialize the fields in the ddi_umem_cookie */
8654 8639 p->cvaddr = addr;
8655 8640 p->type = UMEM_LOCKED;
8656 8641 p->cook_refcnt = 1;
8657 8642
8658 8643 *cookie = (ddi_umem_cookie_t)p;
8659 8644 return (error);
8660 8645 }
8661 8646
8662 8647 /*
8663 8648 * Add the cookie to the ddi_umem_unlock list. Pages will be
8664 8649 * unlocked by i_ddi_umem_unlock_thread.
8665 8650 */
8666 8651
8667 8652 void
8668 8653 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8669 8654 {
8670 8655 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8671 8656
8672 8657 ASSERT(p->type == UMEM_LOCKED);
8673 8658 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8674 8659 ASSERT(ddi_umem_unlock_thread != NULL);
8675 8660
8676 8661 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */
8677 8662 /*
8678 8663 * Queue the unlock request and notify i_ddi_umem_unlock thread
8679 8664 * if it's called in the interrupt context. Otherwise, unlock pages
8680 8665 * immediately.
8681 8666 */
8682 8667 if (servicing_interrupt()) {
8683 8668 /* queue the unlock request and notify the thread */
8684 8669 mutex_enter(&ddi_umem_unlock_mutex);
8685 8670 if (ddi_umem_unlock_head == NULL) {
8686 8671 ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8687 8672 cv_broadcast(&ddi_umem_unlock_cv);
8688 8673 } else {
8689 8674 ddi_umem_unlock_tail->unl_forw = p;
8690 8675 ddi_umem_unlock_tail = p;
8691 8676 }
8692 8677 mutex_exit(&ddi_umem_unlock_mutex);
8693 8678 } else {
8694 8679 /* unlock the pages right away */
8695 8680 (void) i_ddi_umem_unlock(p);
8696 8681 }
8697 8682 }
8698 8683
8699 8684 /*
8700 8685 * Create a buf structure from a ddi_umem_cookie
8701 8686 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8702 8687 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8703 8688 * off, len - identifies the portion of the memory represented by the cookie
8704 8689 * that the buf points to.
8705 8690 * NOTE: off, len need to follow the alignment/size restrictions of the
8706 8691 * device (dev) that this buf will be passed to. Some devices
8707 8692 * will accept unrestricted alignment/size, whereas others (such as
8708 8693 * st) require some block-size alignment/size. It is the caller's
8709 8694 * responsibility to ensure that the alignment/size restrictions
8710 8695 * are met (we cannot assert as we do not know the restrictions)
8711 8696 *
8712 8697 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8713 8698 * the flags used in ddi_umem_lock
8714 8699 *
8715 8700 * The following three arguments are used to initialize fields in the
8716 8701 * buf structure and are uninterpreted by this routine.
8717 8702 *
8718 8703 * dev
8719 8704 * blkno
8720 8705 * iodone
8721 8706 *
8722 8707 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8723 8708 *
8724 8709 * Returns a buf structure pointer on success (to be freed by freerbuf)
8725 8710 * NULL on any parameter error or memory alloc failure
8726 8711 *
8727 8712 */
8728 8713 struct buf *
8729 8714 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8730 8715 int direction, dev_t dev, daddr_t blkno,
8731 8716 int (*iodone)(struct buf *), int sleepflag)
8732 8717 {
8733 8718 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8734 8719 struct buf *bp;
8735 8720
8736 8721 /*
8737 8722 * check for valid cookie offset, len
8738 8723 */
8739 8724 if ((off + len) > p->size) {
8740 8725 return (NULL);
8741 8726 }
8742 8727
8743 8728 if (len > p->size) {
8744 8729 return (NULL);
8745 8730 }
8746 8731
8747 8732 /* direction has to be one of B_READ or B_WRITE */
8748 8733 if ((direction != B_READ) && (direction != B_WRITE)) {
8749 8734 return (NULL);
8750 8735 }
8751 8736
8752 8737 /* These are the only two valid sleepflags */
8753 8738 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8754 8739 return (NULL);
8755 8740 }
8756 8741
8757 8742 /*
8758 8743 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8759 8744 */
8760 8745 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8761 8746 return (NULL);
8762 8747 }
8763 8748
8764 8749 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8765 8750 ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8766 8751 (p->procp == NULL) : (p->procp != NULL));
8767 8752
8768 8753 bp = kmem_alloc(sizeof (struct buf), sleepflag);
8769 8754 if (bp == NULL) {
8770 8755 return (NULL);
8771 8756 }
8772 8757 bioinit(bp);
8773 8758
8774 8759 bp->b_flags = B_BUSY | B_PHYS | direction;
8775 8760 bp->b_edev = dev;
8776 8761 bp->b_lblkno = blkno;
8777 8762 bp->b_iodone = iodone;
8778 8763 bp->b_bcount = len;
8779 8764 bp->b_proc = (proc_t *)p->procp;
8780 8765 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8781 8766 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8782 8767 if (p->pparray != NULL) {
8783 8768 bp->b_flags |= B_SHADOW;
8784 8769 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8785 8770 bp->b_shadow = p->pparray + btop(off);
8786 8771 }
8787 8772 return (bp);
8788 8773 }
8789 8774
8790 8775 /*
8791 8776 * Fault-handling and related routines
8792 8777 */
8793 8778
8794 8779 ddi_devstate_t
8795 8780 ddi_get_devstate(dev_info_t *dip)
8796 8781 {
8797 8782 if (DEVI_IS_DEVICE_OFFLINE(dip))
8798 8783 return (DDI_DEVSTATE_OFFLINE);
8799 8784 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8800 8785 return (DDI_DEVSTATE_DOWN);
8801 8786 else if (DEVI_IS_BUS_QUIESCED(dip))
8802 8787 return (DDI_DEVSTATE_QUIESCED);
8803 8788 else if (DEVI_IS_DEVICE_DEGRADED(dip))
8804 8789 return (DDI_DEVSTATE_DEGRADED);
8805 8790 else
8806 8791 return (DDI_DEVSTATE_UP);
8807 8792 }
8808 8793
8809 8794 void
8810 8795 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8811 8796 ddi_fault_location_t location, const char *message)
8812 8797 {
8813 8798 struct ddi_fault_event_data fd;
8814 8799 ddi_eventcookie_t ec;
8815 8800
8816 8801 /*
8817 8802 * Assemble all the information into a fault-event-data structure
8818 8803 */
8819 8804 fd.f_dip = dip;
8820 8805 fd.f_impact = impact;
8821 8806 fd.f_location = location;
8822 8807 fd.f_message = message;
8823 8808 fd.f_oldstate = ddi_get_devstate(dip);
8824 8809
8825 8810 /*
8826 8811 * Get eventcookie from defining parent.
8827 8812 */
8828 8813 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8829 8814 DDI_SUCCESS)
8830 8815 return;
8831 8816
8832 8817 (void) ndi_post_event(dip, dip, ec, &fd);
8833 8818 }
8834 8819
8835 8820 char *
8836 8821 i_ddi_devi_class(dev_info_t *dip)
8837 8822 {
8838 8823 return (DEVI(dip)->devi_device_class);
8839 8824 }
8840 8825
8841 8826 int
8842 8827 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8843 8828 {
8844 8829 struct dev_info *devi = DEVI(dip);
8845 8830
8846 8831 mutex_enter(&devi->devi_lock);
8847 8832
8848 8833 if (devi->devi_device_class)
8849 8834 kmem_free(devi->devi_device_class,
8850 8835 strlen(devi->devi_device_class) + 1);
8851 8836
8852 8837 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8853 8838 != NULL) {
8854 8839 mutex_exit(&devi->devi_lock);
8855 8840 return (DDI_SUCCESS);
8856 8841 }
8857 8842
8858 8843 mutex_exit(&devi->devi_lock);
8859 8844
8860 8845 return (DDI_FAILURE);
8861 8846 }
8862 8847
8863 8848
8864 8849 /*
8865 8850 * Task Queues DDI interfaces.
8866 8851 */
8867 8852
8868 8853 /* ARGSUSED */
8869 8854 ddi_taskq_t *
8870 8855 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8871 8856 pri_t pri, uint_t cflags)
8872 8857 {
8873 8858 char full_name[TASKQ_NAMELEN];
8874 8859 const char *tq_name;
8875 8860 int nodeid = 0;
8876 8861
8877 8862 if (dip == NULL)
8878 8863 tq_name = name;
8879 8864 else {
8880 8865 nodeid = ddi_get_instance(dip);
8881 8866
8882 8867 if (name == NULL)
8883 8868 name = "tq";
8884 8869
8885 8870 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
8886 8871 ddi_driver_name(dip), name);
8887 8872
8888 8873 tq_name = full_name;
8889 8874 }
8890 8875
8891 8876 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8892 8877 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8893 8878 nthreads, INT_MAX, TASKQ_PREPOPULATE));
8894 8879 }
8895 8880
8896 8881 void
8897 8882 ddi_taskq_destroy(ddi_taskq_t *tq)
8898 8883 {
8899 8884 taskq_destroy((taskq_t *)tq);
8900 8885 }
8901 8886
8902 8887 int
8903 8888 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8904 8889 void *arg, uint_t dflags)
8905 8890 {
8906 8891 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8907 8892 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8908 8893
8909 8894 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8910 8895 }
8911 8896
8912 8897 void
8913 8898 ddi_taskq_wait(ddi_taskq_t *tq)
8914 8899 {
8915 8900 taskq_wait((taskq_t *)tq);
8916 8901 }
8917 8902
8918 8903 void
8919 8904 ddi_taskq_suspend(ddi_taskq_t *tq)
8920 8905 {
8921 8906 taskq_suspend((taskq_t *)tq);
8922 8907 }
8923 8908
8924 8909 boolean_t
8925 8910 ddi_taskq_suspended(ddi_taskq_t *tq)
8926 8911 {
8927 8912 return (taskq_suspended((taskq_t *)tq));
8928 8913 }
8929 8914
8930 8915 void
8931 8916 ddi_taskq_resume(ddi_taskq_t *tq)
8932 8917 {
8933 8918 taskq_resume((taskq_t *)tq);
8934 8919 }
8935 8920
8936 8921 int
8937 8922 ddi_parse(
8938 8923 const char *ifname,
8939 8924 char *alnum,
8940 8925 uint_t *nump)
8941 8926 {
8942 8927 const char *p;
8943 8928 int l;
8944 8929 ulong_t num;
8945 8930 boolean_t nonum = B_TRUE;
8946 8931 char c;
8947 8932
8948 8933 l = strlen(ifname);
8949 8934 for (p = ifname + l; p != ifname; l--) {
8950 8935 c = *--p;
8951 8936 if (!isdigit(c)) {
8952 8937 (void) strlcpy(alnum, ifname, l + 1);
8953 8938 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8954 8939 return (DDI_FAILURE);
8955 8940 break;
8956 8941 }
8957 8942 nonum = B_FALSE;
8958 8943 }
8959 8944 if (l == 0 || nonum)
8960 8945 return (DDI_FAILURE);
8961 8946
8962 8947 *nump = num;
8963 8948 return (DDI_SUCCESS);
8964 8949 }
8965 8950
8966 8951 /*
8967 8952 * Default initialization function for drivers that don't need to quiesce.
8968 8953 */
8969 8954 /* ARGSUSED */
8970 8955 int
8971 8956 ddi_quiesce_not_needed(dev_info_t *dip)
8972 8957 {
8973 8958 return (DDI_SUCCESS);
8974 8959 }
8975 8960
8976 8961 /*
8977 8962 * Initialization function for drivers that should implement quiesce()
8978 8963 * but haven't yet.
8979 8964 */
8980 8965 /* ARGSUSED */
8981 8966 int
8982 8967 ddi_quiesce_not_supported(dev_info_t *dip)
8983 8968 {
8984 8969 return (DDI_FAILURE);
8985 8970 }
8986 8971
8987 8972 char *
8988 8973 ddi_strdup(const char *str, int flag)
8989 8974 {
8990 8975 int n;
8991 8976 char *ptr;
8992 8977
8993 8978 ASSERT(str != NULL);
8994 8979 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
8995 8980
8996 8981 n = strlen(str);
8997 8982 if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
8998 8983 return (NULL);
8999 8984 bcopy(str, ptr, n + 1);
9000 8985 return (ptr);
9001 8986 }
9002 8987
9003 8988 char *
9004 8989 strdup(const char *str)
9005 8990 {
9006 8991 return (ddi_strdup(str, KM_SLEEP));
9007 8992 }
9008 8993
9009 8994 void
9010 8995 strfree(char *str)
9011 8996 {
9012 8997 ASSERT(str != NULL);
9013 8998 kmem_free(str, strlen(str) + 1);
9014 8999 }
9015 9000
9016 9001 /*
9017 9002 * Generic DDI callback interfaces.
9018 9003 */
9019 9004
9020 9005 int
9021 9006 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9022 9007 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9023 9008 {
9024 9009 ddi_cb_t *cbp;
9025 9010
9026 9011 ASSERT(dip != NULL);
9027 9012 ASSERT(DDI_CB_FLAG_VALID(flags));
9028 9013 ASSERT(cbfunc != NULL);
9029 9014 ASSERT(ret_hdlp != NULL);
9030 9015
9031 9016 /* Sanity check the context */
9032 9017 ASSERT(!servicing_interrupt());
9033 9018 if (servicing_interrupt())
9034 9019 return (DDI_FAILURE);
9035 9020
9036 9021 /* Validate parameters */
9037 9022 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9038 9023 (cbfunc == NULL) || (ret_hdlp == NULL))
9039 9024 return (DDI_EINVAL);
9040 9025
9041 9026 /* Check for previous registration */
9042 9027 if (DEVI(dip)->devi_cb_p != NULL)
9043 9028 return (DDI_EALREADY);
9044 9029
9045 9030 /* Allocate and initialize callback */
9046 9031 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9047 9032 cbp->cb_dip = dip;
9048 9033 cbp->cb_func = cbfunc;
9049 9034 cbp->cb_arg1 = arg1;
9050 9035 cbp->cb_arg2 = arg2;
9051 9036 cbp->cb_flags = flags;
9052 9037 DEVI(dip)->devi_cb_p = cbp;
9053 9038
9054 9039 /* If adding an IRM callback, notify IRM */
9055 9040 if (flags & DDI_CB_FLAG_INTR)
9056 9041 i_ddi_irm_set_cb(dip, B_TRUE);
9057 9042
9058 9043 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9059 9044 return (DDI_SUCCESS);
9060 9045 }
9061 9046
9062 9047 int
9063 9048 ddi_cb_unregister(ddi_cb_handle_t hdl)
9064 9049 {
9065 9050 ddi_cb_t *cbp;
9066 9051 dev_info_t *dip;
9067 9052
9068 9053 ASSERT(hdl != NULL);
9069 9054
9070 9055 /* Sanity check the context */
9071 9056 ASSERT(!servicing_interrupt());
9072 9057 if (servicing_interrupt())
9073 9058 return (DDI_FAILURE);
9074 9059
9075 9060 /* Validate parameters */
9076 9061 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9077 9062 ((dip = cbp->cb_dip) == NULL))
9078 9063 return (DDI_EINVAL);
9079 9064
9080 9065 /* If removing an IRM callback, notify IRM */
9081 9066 if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9082 9067 i_ddi_irm_set_cb(dip, B_FALSE);
9083 9068
9084 9069 /* Destroy the callback */
9085 9070 kmem_free(cbp, sizeof (ddi_cb_t));
9086 9071 DEVI(dip)->devi_cb_p = NULL;
9087 9072
9088 9073 return (DDI_SUCCESS);
9089 9074 }
9090 9075
9091 9076 /*
9092 9077 * Platform independent DR routines
9093 9078 */
9094 9079
9095 9080 static int
9096 9081 ndi2errno(int n)
9097 9082 {
9098 9083 int err = 0;
9099 9084
9100 9085 switch (n) {
9101 9086 case NDI_NOMEM:
9102 9087 err = ENOMEM;
9103 9088 break;
9104 9089 case NDI_BUSY:
9105 9090 err = EBUSY;
9106 9091 break;
9107 9092 case NDI_FAULT:
9108 9093 err = EFAULT;
9109 9094 break;
9110 9095 case NDI_FAILURE:
9111 9096 err = EIO;
9112 9097 break;
9113 9098 case NDI_SUCCESS:
9114 9099 break;
9115 9100 case NDI_BADHANDLE:
9116 9101 default:
9117 9102 err = EINVAL;
9118 9103 break;
9119 9104 }
9120 9105 return (err);
9121 9106 }
9122 9107
9123 9108 /*
9124 9109 * Prom tree node list
9125 9110 */
9126 9111 struct ptnode {
9127 9112 pnode_t nodeid;
9128 9113 struct ptnode *next;
9129 9114 };
9130 9115
9131 9116 /*
9132 9117 * Prom tree walk arg
9133 9118 */
9134 9119 struct pta {
9135 9120 dev_info_t *pdip;
9136 9121 devi_branch_t *bp;
9137 9122 uint_t flags;
9138 9123 dev_info_t *fdip;
9139 9124 struct ptnode *head;
9140 9125 };
9141 9126
9142 9127 static void
9143 9128 visit_node(pnode_t nodeid, struct pta *ap)
9144 9129 {
9145 9130 struct ptnode **nextp;
9146 9131 int (*select)(pnode_t, void *, uint_t);
9147 9132
9148 9133 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9149 9134
9150 9135 select = ap->bp->create.prom_branch_select;
9151 9136
9152 9137 ASSERT(select);
9153 9138
9154 9139 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9155 9140
9156 9141 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9157 9142 ;
9158 9143
9159 9144 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9160 9145
9161 9146 (*nextp)->nodeid = nodeid;
9162 9147 }
9163 9148
9164 9149 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9165 9150 return;
9166 9151
9167 9152 nodeid = prom_childnode(nodeid);
9168 9153 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9169 9154 visit_node(nodeid, ap);
9170 9155 nodeid = prom_nextnode(nodeid);
9171 9156 }
9172 9157 }
9173 9158
9174 9159 /*
9175 9160 * NOTE: The caller of this function must check for device contracts
9176 9161 * or LDI callbacks against this dip before setting the dip offline.
9177 9162 */
9178 9163 static int
9179 9164 set_infant_dip_offline(dev_info_t *dip, void *arg)
9180 9165 {
9181 9166 char *path = (char *)arg;
9182 9167
9183 9168 ASSERT(dip);
9184 9169 ASSERT(arg);
9185 9170
9186 9171 if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9187 9172 (void) ddi_pathname(dip, path);
9188 9173 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9189 9174 "node: %s", path);
9190 9175 return (DDI_FAILURE);
9191 9176 }
9192 9177
9193 9178 mutex_enter(&(DEVI(dip)->devi_lock));
9194 9179 if (!DEVI_IS_DEVICE_OFFLINE(dip))
9195 9180 DEVI_SET_DEVICE_OFFLINE(dip);
9196 9181 mutex_exit(&(DEVI(dip)->devi_lock));
9197 9182
9198 9183 return (DDI_SUCCESS);
9199 9184 }
9200 9185
9201 9186 typedef struct result {
9202 9187 char *path;
9203 9188 int result;
9204 9189 } result_t;
9205 9190
9206 9191 static int
9207 9192 dip_set_offline(dev_info_t *dip, void *arg)
9208 9193 {
9209 9194 int end;
9210 9195 result_t *resp = (result_t *)arg;
9211 9196
9212 9197 ASSERT(dip);
9213 9198 ASSERT(resp);
9214 9199
9215 9200 /*
9216 9201 * We stop the walk if e_ddi_offline_notify() returns
9217 9202 * failure, because this implies that one or more consumers
9218 9203 * (either LDI or contract based) has blocked the offline.
9219 9204 * So there is no point in conitnuing the walk
9220 9205 */
9221 9206 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9222 9207 resp->result = DDI_FAILURE;
9223 9208 return (DDI_WALK_TERMINATE);
9224 9209 }
9225 9210
9226 9211 /*
9227 9212 * If set_infant_dip_offline() returns failure, it implies
9228 9213 * that we failed to set a particular dip offline. This
9229 9214 * does not imply that the offline as a whole should fail.
9230 9215 * We want to do the best we can, so we continue the walk.
9231 9216 */
9232 9217 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9233 9218 end = DDI_SUCCESS;
9234 9219 else
9235 9220 end = DDI_FAILURE;
9236 9221
9237 9222 e_ddi_offline_finalize(dip, end);
9238 9223
9239 9224 return (DDI_WALK_CONTINUE);
9240 9225 }
9241 9226
9242 9227 /*
9243 9228 * The call to e_ddi_offline_notify() exists for the
9244 9229 * unlikely error case that a branch we are trying to
9245 9230 * create already exists and has device contracts or LDI
9246 9231 * event callbacks against it.
9247 9232 *
9248 9233 * We allow create to succeed for such branches only if
9249 9234 * no constraints block the offline.
9250 9235 */
9251 9236 static int
9252 9237 branch_set_offline(dev_info_t *dip, char *path)
9253 9238 {
9254 9239 int circ;
9255 9240 int end;
9256 9241 result_t res;
9257 9242
9258 9243
9259 9244 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9260 9245 return (DDI_FAILURE);
9261 9246 }
9262 9247
9263 9248 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9264 9249 end = DDI_SUCCESS;
9265 9250 else
9266 9251 end = DDI_FAILURE;
9267 9252
9268 9253 e_ddi_offline_finalize(dip, end);
9269 9254
9270 9255 if (end == DDI_FAILURE)
9271 9256 return (DDI_FAILURE);
9272 9257
9273 9258 res.result = DDI_SUCCESS;
9274 9259 res.path = path;
9275 9260
9276 9261 ndi_devi_enter(dip, &circ);
9277 9262 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9278 9263 ndi_devi_exit(dip, circ);
9279 9264
9280 9265 return (res.result);
9281 9266 }
9282 9267
9283 9268 /*ARGSUSED*/
9284 9269 static int
9285 9270 create_prom_branch(void *arg, int has_changed)
9286 9271 {
9287 9272 int circ;
9288 9273 int exists, rv;
9289 9274 pnode_t nodeid;
9290 9275 struct ptnode *tnp;
9291 9276 dev_info_t *dip;
9292 9277 struct pta *ap = arg;
9293 9278 devi_branch_t *bp;
9294 9279 char *path;
9295 9280
9296 9281 ASSERT(ap);
9297 9282 ASSERT(ap->fdip == NULL);
9298 9283 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9299 9284
9300 9285 bp = ap->bp;
9301 9286
9302 9287 nodeid = ddi_get_nodeid(ap->pdip);
9303 9288 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9304 9289 cmn_err(CE_WARN, "create_prom_branch: invalid "
9305 9290 "nodeid: 0x%x", nodeid);
9306 9291 return (EINVAL);
9307 9292 }
9308 9293
9309 9294 ap->head = NULL;
9310 9295
9311 9296 nodeid = prom_childnode(nodeid);
9312 9297 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9313 9298 visit_node(nodeid, ap);
9314 9299 nodeid = prom_nextnode(nodeid);
9315 9300 }
9316 9301
9317 9302 if (ap->head == NULL)
9318 9303 return (ENODEV);
9319 9304
9320 9305 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9321 9306 rv = 0;
9322 9307 while ((tnp = ap->head) != NULL) {
9323 9308 ap->head = tnp->next;
9324 9309
9325 9310 ndi_devi_enter(ap->pdip, &circ);
9326 9311
9327 9312 /*
9328 9313 * Check if the branch already exists.
9329 9314 */
9330 9315 exists = 0;
9331 9316 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9332 9317 if (dip != NULL) {
9333 9318 exists = 1;
9334 9319
9335 9320 /* Parent is held busy, so release hold */
9336 9321 ndi_rele_devi(dip);
9337 9322 #ifdef DEBUG
9338 9323 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9339 9324 " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9340 9325 #endif
9341 9326 } else {
9342 9327 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9343 9328 }
9344 9329
9345 9330 kmem_free(tnp, sizeof (struct ptnode));
9346 9331
9347 9332 /*
9348 9333 * Hold the branch if it is not already held
9349 9334 */
9350 9335 if (dip && !exists) {
9351 9336 e_ddi_branch_hold(dip);
9352 9337 }
9353 9338
9354 9339 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9355 9340
9356 9341 /*
9357 9342 * Set all dips in the newly created branch offline so that
9358 9343 * only a "configure" operation can attach
9359 9344 * the branch
9360 9345 */
9361 9346 if (dip == NULL || branch_set_offline(dip, path)
9362 9347 == DDI_FAILURE) {
9363 9348 ndi_devi_exit(ap->pdip, circ);
9364 9349 rv = EIO;
9365 9350 continue;
9366 9351 }
9367 9352
9368 9353 ASSERT(ddi_get_parent(dip) == ap->pdip);
9369 9354
9370 9355 ndi_devi_exit(ap->pdip, circ);
9371 9356
9372 9357 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9373 9358 int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9374 9359 if (error && rv == 0)
9375 9360 rv = error;
9376 9361 }
9377 9362
9378 9363 /*
9379 9364 * Invoke devi_branch_callback() (if it exists) only for
9380 9365 * newly created branches
9381 9366 */
9382 9367 if (bp->devi_branch_callback && !exists)
9383 9368 bp->devi_branch_callback(dip, bp->arg, 0);
9384 9369 }
9385 9370
9386 9371 kmem_free(path, MAXPATHLEN);
9387 9372
9388 9373 return (rv);
9389 9374 }
9390 9375
9391 9376 static int
9392 9377 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9393 9378 {
9394 9379 int rv, circ, len;
9395 9380 int i, flags, ret;
9396 9381 dev_info_t *dip;
9397 9382 char *nbuf;
9398 9383 char *path;
9399 9384 static const char *noname = "<none>";
9400 9385
9401 9386 ASSERT(pdip);
9402 9387 ASSERT(DEVI_BUSY_OWNED(pdip));
9403 9388
9404 9389 flags = 0;
9405 9390
9406 9391 /*
9407 9392 * Creating the root of a branch ?
9408 9393 */
9409 9394 if (rdipp) {
9410 9395 *rdipp = NULL;
9411 9396 flags = DEVI_BRANCH_ROOT;
9412 9397 }
9413 9398
9414 9399 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9415 9400 rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9416 9401
9417 9402 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9418 9403
9419 9404 if (rv == DDI_WALK_ERROR) {
9420 9405 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9421 9406 " properties on devinfo node %p", (void *)dip);
9422 9407 goto fail;
9423 9408 }
9424 9409
9425 9410 len = OBP_MAXDRVNAME;
9426 9411 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9427 9412 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9428 9413 != DDI_PROP_SUCCESS) {
9429 9414 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9430 9415 "no name property", (void *)dip);
9431 9416 goto fail;
9432 9417 }
9433 9418
9434 9419 ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9435 9420 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9436 9421 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9437 9422 " for devinfo node %p", nbuf, (void *)dip);
9438 9423 goto fail;
9439 9424 }
9440 9425
9441 9426 kmem_free(nbuf, OBP_MAXDRVNAME);
9442 9427
9443 9428 /*
9444 9429 * Ignore bind failures just like boot does
9445 9430 */
9446 9431 (void) ndi_devi_bind_driver(dip, 0);
9447 9432
9448 9433 switch (rv) {
9449 9434 case DDI_WALK_CONTINUE:
9450 9435 case DDI_WALK_PRUNESIB:
9451 9436 ndi_devi_enter(dip, &circ);
9452 9437
9453 9438 i = DDI_WALK_CONTINUE;
9454 9439 for (; i == DDI_WALK_CONTINUE; ) {
9455 9440 i = sid_node_create(dip, bp, NULL);
9456 9441 }
9457 9442
9458 9443 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9459 9444 if (i == DDI_WALK_ERROR)
9460 9445 rv = i;
9461 9446 /*
9462 9447 * If PRUNESIB stop creating siblings
9463 9448 * of dip's child. Subsequent walk behavior
9464 9449 * is determined by rv returned by dip.
9465 9450 */
9466 9451
9467 9452 ndi_devi_exit(dip, circ);
9468 9453 break;
9469 9454 case DDI_WALK_TERMINATE:
9470 9455 /*
9471 9456 * Don't create children and ask our parent
9472 9457 * to not create siblings either.
9473 9458 */
9474 9459 rv = DDI_WALK_PRUNESIB;
9475 9460 break;
9476 9461 case DDI_WALK_PRUNECHILD:
9477 9462 /*
9478 9463 * Don't create children, but ask parent to continue
9479 9464 * with siblings.
9480 9465 */
9481 9466 rv = DDI_WALK_CONTINUE;
9482 9467 break;
9483 9468 default:
9484 9469 ASSERT(0);
9485 9470 break;
9486 9471 }
9487 9472
9488 9473 if (rdipp)
9489 9474 *rdipp = dip;
9490 9475
9491 9476 /*
9492 9477 * Set device offline - only the "configure" op should cause an attach.
9493 9478 * Note that it is safe to set the dip offline without checking
9494 9479 * for either device contract or layered driver (LDI) based constraints
9495 9480 * since there cannot be any contracts or LDI opens of this device.
9496 9481 * This is because this node is a newly created dip with the parent busy
9497 9482 * held, so no other thread can come in and attach this dip. A dip that
9498 9483 * has never been attached cannot have contracts since by definition
9499 9484 * a device contract (an agreement between a process and a device minor
9500 9485 * node) can only be created against a device that has minor nodes
9501 9486 * i.e is attached. Similarly an LDI open will only succeed if the
9502 9487 * dip is attached. We assert below that the dip is not attached.
9503 9488 */
9504 9489 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9505 9490 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9506 9491 ret = set_infant_dip_offline(dip, path);
9507 9492 ASSERT(ret == DDI_SUCCESS);
9508 9493 kmem_free(path, MAXPATHLEN);
9509 9494
9510 9495 return (rv);
9511 9496 fail:
9512 9497 (void) ndi_devi_free(dip);
9513 9498 kmem_free(nbuf, OBP_MAXDRVNAME);
9514 9499 return (DDI_WALK_ERROR);
9515 9500 }
9516 9501
9517 9502 static int
9518 9503 create_sid_branch(
9519 9504 dev_info_t *pdip,
9520 9505 devi_branch_t *bp,
9521 9506 dev_info_t **dipp,
9522 9507 uint_t flags)
9523 9508 {
9524 9509 int rv = 0, state = DDI_WALK_CONTINUE;
9525 9510 dev_info_t *rdip;
9526 9511
9527 9512 while (state == DDI_WALK_CONTINUE) {
9528 9513 int circ;
9529 9514
9530 9515 ndi_devi_enter(pdip, &circ);
9531 9516
9532 9517 state = sid_node_create(pdip, bp, &rdip);
9533 9518 if (rdip == NULL) {
9534 9519 ndi_devi_exit(pdip, circ);
9535 9520 ASSERT(state == DDI_WALK_ERROR);
9536 9521 break;
9537 9522 }
9538 9523
9539 9524 e_ddi_branch_hold(rdip);
9540 9525
9541 9526 ndi_devi_exit(pdip, circ);
9542 9527
9543 9528 if (flags & DEVI_BRANCH_CONFIGURE) {
9544 9529 int error = e_ddi_branch_configure(rdip, dipp, 0);
9545 9530 if (error && rv == 0)
9546 9531 rv = error;
9547 9532 }
9548 9533
9549 9534 /*
9550 9535 * devi_branch_callback() is optional
9551 9536 */
9552 9537 if (bp->devi_branch_callback)
9553 9538 bp->devi_branch_callback(rdip, bp->arg, 0);
9554 9539 }
9555 9540
9556 9541 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9557 9542
9558 9543 return (state == DDI_WALK_ERROR ? EIO : rv);
9559 9544 }
9560 9545
9561 9546 int
9562 9547 e_ddi_branch_create(
9563 9548 dev_info_t *pdip,
9564 9549 devi_branch_t *bp,
9565 9550 dev_info_t **dipp,
9566 9551 uint_t flags)
9567 9552 {
9568 9553 int prom_devi, sid_devi, error;
9569 9554
9570 9555 if (pdip == NULL || bp == NULL || bp->type == 0)
9571 9556 return (EINVAL);
9572 9557
9573 9558 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9574 9559 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9575 9560
9576 9561 if (prom_devi && bp->create.prom_branch_select == NULL)
9577 9562 return (EINVAL);
9578 9563 else if (sid_devi && bp->create.sid_branch_create == NULL)
9579 9564 return (EINVAL);
9580 9565 else if (!prom_devi && !sid_devi)
9581 9566 return (EINVAL);
9582 9567
9583 9568 if (flags & DEVI_BRANCH_EVENT)
9584 9569 return (EINVAL);
9585 9570
9586 9571 if (prom_devi) {
9587 9572 struct pta pta = {0};
9588 9573
9589 9574 pta.pdip = pdip;
9590 9575 pta.bp = bp;
9591 9576 pta.flags = flags;
9592 9577
9593 9578 error = prom_tree_access(create_prom_branch, &pta, NULL);
9594 9579
9595 9580 if (dipp)
9596 9581 *dipp = pta.fdip;
9597 9582 else if (pta.fdip)
9598 9583 ndi_rele_devi(pta.fdip);
9599 9584 } else {
9600 9585 error = create_sid_branch(pdip, bp, dipp, flags);
9601 9586 }
9602 9587
9603 9588 return (error);
9604 9589 }
9605 9590
9606 9591 int
9607 9592 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9608 9593 {
9609 9594 int rv;
9610 9595 char *devnm;
9611 9596 dev_info_t *pdip;
9612 9597
9613 9598 if (dipp)
9614 9599 *dipp = NULL;
9615 9600
9616 9601 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9617 9602 return (EINVAL);
9618 9603
9619 9604 pdip = ddi_get_parent(rdip);
9620 9605
9621 9606 ndi_hold_devi(pdip);
9622 9607
9623 9608 if (!e_ddi_branch_held(rdip)) {
9624 9609 ndi_rele_devi(pdip);
9625 9610 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9626 9611 "dip(%p) not held", (void *)rdip);
9627 9612 return (EINVAL);
9628 9613 }
9629 9614
9630 9615 if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9631 9616 /*
9632 9617 * First attempt to bind a driver. If we fail, return
9633 9618 * success (On some platforms, dips for some device
9634 9619 * types (CPUs) may not have a driver)
9635 9620 */
9636 9621 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9637 9622 ndi_rele_devi(pdip);
9638 9623 return (0);
9639 9624 }
9640 9625
9641 9626 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9642 9627 rv = NDI_FAILURE;
9643 9628 goto out;
9644 9629 }
9645 9630 }
9646 9631
9647 9632 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9648 9633
9649 9634 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9650 9635
9651 9636 (void) ddi_deviname(rdip, devnm);
9652 9637
9653 9638 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9654 9639 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9655 9640 /* release hold from ndi_devi_config_one() */
9656 9641 ndi_rele_devi(rdip);
9657 9642 }
9658 9643
9659 9644 kmem_free(devnm, MAXNAMELEN + 1);
9660 9645 out:
9661 9646 if (rv != NDI_SUCCESS && dipp && rdip) {
9662 9647 ndi_hold_devi(rdip);
9663 9648 *dipp = rdip;
9664 9649 }
9665 9650 ndi_rele_devi(pdip);
9666 9651 return (ndi2errno(rv));
9667 9652 }
9668 9653
9669 9654 void
9670 9655 e_ddi_branch_hold(dev_info_t *rdip)
9671 9656 {
9672 9657 if (e_ddi_branch_held(rdip)) {
9673 9658 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9674 9659 return;
9675 9660 }
9676 9661
9677 9662 mutex_enter(&DEVI(rdip)->devi_lock);
9678 9663 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9679 9664 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9680 9665 DEVI(rdip)->devi_ref++;
9681 9666 }
9682 9667 ASSERT(DEVI(rdip)->devi_ref > 0);
9683 9668 mutex_exit(&DEVI(rdip)->devi_lock);
9684 9669 }
9685 9670
9686 9671 int
9687 9672 e_ddi_branch_held(dev_info_t *rdip)
9688 9673 {
9689 9674 int rv = 0;
9690 9675
9691 9676 mutex_enter(&DEVI(rdip)->devi_lock);
9692 9677 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9693 9678 DEVI(rdip)->devi_ref > 0) {
9694 9679 rv = 1;
9695 9680 }
9696 9681 mutex_exit(&DEVI(rdip)->devi_lock);
9697 9682
9698 9683 return (rv);
9699 9684 }
9700 9685
9701 9686 void
9702 9687 e_ddi_branch_rele(dev_info_t *rdip)
9703 9688 {
9704 9689 mutex_enter(&DEVI(rdip)->devi_lock);
9705 9690 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9706 9691 DEVI(rdip)->devi_ref--;
9707 9692 mutex_exit(&DEVI(rdip)->devi_lock);
9708 9693 }
9709 9694
9710 9695 int
9711 9696 e_ddi_branch_unconfigure(
9712 9697 dev_info_t *rdip,
9713 9698 dev_info_t **dipp,
9714 9699 uint_t flags)
9715 9700 {
9716 9701 int circ, rv;
9717 9702 int destroy;
9718 9703 char *devnm;
9719 9704 uint_t nflags;
9720 9705 dev_info_t *pdip;
9721 9706
9722 9707 if (dipp)
9723 9708 *dipp = NULL;
9724 9709
9725 9710 if (rdip == NULL)
9726 9711 return (EINVAL);
9727 9712
9728 9713 pdip = ddi_get_parent(rdip);
9729 9714
9730 9715 ASSERT(pdip);
9731 9716
9732 9717 /*
9733 9718 * Check if caller holds pdip busy - can cause deadlocks during
9734 9719 * devfs_clean()
9735 9720 */
9736 9721 if (DEVI_BUSY_OWNED(pdip)) {
9737 9722 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9738 9723 " devinfo node(%p) is busy held", (void *)pdip);
9739 9724 return (EINVAL);
9740 9725 }
9741 9726
9742 9727 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9743 9728
9744 9729 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9745 9730
9746 9731 ndi_devi_enter(pdip, &circ);
9747 9732 (void) ddi_deviname(rdip, devnm);
9748 9733 ndi_devi_exit(pdip, circ);
9749 9734
9750 9735 /*
9751 9736 * ddi_deviname() returns a component name with / prepended.
9752 9737 */
9753 9738 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9754 9739
9755 9740 ndi_devi_enter(pdip, &circ);
9756 9741
9757 9742 /*
9758 9743 * Recreate device name as it may have changed state (init/uninit)
9759 9744 * when parent busy lock was dropped for devfs_clean()
9760 9745 */
9761 9746 (void) ddi_deviname(rdip, devnm);
9762 9747
9763 9748 if (!e_ddi_branch_held(rdip)) {
9764 9749 kmem_free(devnm, MAXNAMELEN + 1);
9765 9750 ndi_devi_exit(pdip, circ);
9766 9751 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9767 9752 destroy ? "destroy" : "unconfigure", (void *)rdip);
9768 9753 return (EINVAL);
9769 9754 }
9770 9755
9771 9756 /*
9772 9757 * Release hold on the branch. This is ok since we are holding the
9773 9758 * parent busy. If rdip is not removed, we must do a hold on the
9774 9759 * branch before returning.
9775 9760 */
9776 9761 e_ddi_branch_rele(rdip);
9777 9762
9778 9763 nflags = NDI_DEVI_OFFLINE;
9779 9764 if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9780 9765 nflags |= NDI_DEVI_REMOVE;
9781 9766 destroy = 1;
9782 9767 } else {
9783 9768 nflags |= NDI_UNCONFIG; /* uninit but don't remove */
9784 9769 }
9785 9770
9786 9771 if (flags & DEVI_BRANCH_EVENT)
9787 9772 nflags |= NDI_POST_EVENT;
9788 9773
9789 9774 if (i_ddi_devi_attached(pdip) &&
9790 9775 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9791 9776 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9792 9777 } else {
9793 9778 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9794 9779 if (rv == NDI_SUCCESS) {
9795 9780 ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9796 9781 rv = ndi_devi_offline(rdip, nflags);
9797 9782 }
9798 9783 }
9799 9784
9800 9785 if (!destroy || rv != NDI_SUCCESS) {
9801 9786 /* The dip still exists, so do a hold */
9802 9787 e_ddi_branch_hold(rdip);
9803 9788 }
9804 9789 out:
9805 9790 kmem_free(devnm, MAXNAMELEN + 1);
9806 9791 ndi_devi_exit(pdip, circ);
9807 9792 return (ndi2errno(rv));
9808 9793 }
9809 9794
9810 9795 int
9811 9796 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9812 9797 {
9813 9798 return (e_ddi_branch_unconfigure(rdip, dipp,
9814 9799 flag|DEVI_BRANCH_DESTROY));
9815 9800 }
9816 9801
9817 9802 /*
9818 9803 * Number of chains for hash table
9819 9804 */
9820 9805 #define NUMCHAINS 17
9821 9806
9822 9807 /*
9823 9808 * Devinfo busy arg
9824 9809 */
9825 9810 struct devi_busy {
9826 9811 int dv_total;
9827 9812 int s_total;
9828 9813 mod_hash_t *dv_hash;
9829 9814 mod_hash_t *s_hash;
9830 9815 int (*callback)(dev_info_t *, void *, uint_t);
9831 9816 void *arg;
9832 9817 };
9833 9818
9834 9819 static int
9835 9820 visit_dip(dev_info_t *dip, void *arg)
9836 9821 {
9837 9822 uintptr_t sbusy, dvbusy, ref;
9838 9823 struct devi_busy *bsp = arg;
9839 9824
9840 9825 ASSERT(bsp->callback);
9841 9826
9842 9827 /*
9843 9828 * A dip cannot be busy if its reference count is 0
9844 9829 */
9845 9830 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9846 9831 return (bsp->callback(dip, bsp->arg, 0));
9847 9832 }
9848 9833
9849 9834 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9850 9835 dvbusy = 0;
9851 9836
9852 9837 /*
9853 9838 * To catch device opens currently maintained on specfs common snodes.
9854 9839 */
9855 9840 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9856 9841 sbusy = 0;
9857 9842
9858 9843 #ifdef DEBUG
9859 9844 if (ref < sbusy || ref < dvbusy) {
9860 9845 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9861 9846 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9862 9847 }
9863 9848 #endif
9864 9849
9865 9850 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9866 9851
9867 9852 return (bsp->callback(dip, bsp->arg, dvbusy));
9868 9853 }
9869 9854
9870 9855 static int
9871 9856 visit_snode(struct snode *sp, void *arg)
9872 9857 {
9873 9858 uintptr_t sbusy;
9874 9859 dev_info_t *dip;
9875 9860 int count;
9876 9861 struct devi_busy *bsp = arg;
9877 9862
9878 9863 ASSERT(sp);
9879 9864
9880 9865 /*
9881 9866 * The stable lock is held. This prevents
9882 9867 * the snode and its associated dip from
9883 9868 * going away.
9884 9869 */
9885 9870 dip = NULL;
9886 9871 count = spec_devi_open_count(sp, &dip);
9887 9872
9888 9873 if (count <= 0)
9889 9874 return (DDI_WALK_CONTINUE);
9890 9875
9891 9876 ASSERT(dip);
9892 9877
9893 9878 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9894 9879 sbusy = count;
9895 9880 else
9896 9881 sbusy += count;
9897 9882
9898 9883 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9899 9884 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9900 9885 "sbusy = %lu", "e_ddi_branch_referenced",
9901 9886 (void *)dip, sbusy);
9902 9887 }
9903 9888
9904 9889 bsp->s_total += count;
9905 9890
9906 9891 return (DDI_WALK_CONTINUE);
9907 9892 }
9908 9893
9909 9894 static void
9910 9895 visit_dvnode(struct dv_node *dv, void *arg)
9911 9896 {
9912 9897 uintptr_t dvbusy;
9913 9898 uint_t count;
9914 9899 struct vnode *vp;
9915 9900 struct devi_busy *bsp = arg;
9916 9901
9917 9902 ASSERT(dv && dv->dv_devi);
9918 9903
9919 9904 vp = DVTOV(dv);
9920 9905
9921 9906 mutex_enter(&vp->v_lock);
9922 9907 count = vp->v_count;
9923 9908 mutex_exit(&vp->v_lock);
9924 9909
9925 9910 if (!count)
9926 9911 return;
9927 9912
9928 9913 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9929 9914 (mod_hash_val_t *)&dvbusy))
9930 9915 dvbusy = count;
9931 9916 else
9932 9917 dvbusy += count;
9933 9918
9934 9919 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9935 9920 (mod_hash_val_t)dvbusy)) {
9936 9921 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9937 9922 "dvbusy=%lu", "e_ddi_branch_referenced",
9938 9923 (void *)dv->dv_devi, dvbusy);
9939 9924 }
9940 9925
9941 9926 bsp->dv_total += count;
9942 9927 }
9943 9928
9944 9929 /*
9945 9930 * Returns reference count on success or -1 on failure.
9946 9931 */
9947 9932 int
9948 9933 e_ddi_branch_referenced(
9949 9934 dev_info_t *rdip,
9950 9935 int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9951 9936 void *arg)
9952 9937 {
9953 9938 int circ;
9954 9939 char *path;
9955 9940 dev_info_t *pdip;
9956 9941 struct devi_busy bsa = {0};
9957 9942
9958 9943 ASSERT(rdip);
9959 9944
9960 9945 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9961 9946
9962 9947 ndi_hold_devi(rdip);
9963 9948
9964 9949 pdip = ddi_get_parent(rdip);
9965 9950
9966 9951 ASSERT(pdip);
9967 9952
9968 9953 /*
9969 9954 * Check if caller holds pdip busy - can cause deadlocks during
9970 9955 * devfs_walk()
9971 9956 */
9972 9957 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9973 9958 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
9974 9959 "devinfo branch(%p) not held or parent busy held",
9975 9960 (void *)rdip);
9976 9961 ndi_rele_devi(rdip);
9977 9962 kmem_free(path, MAXPATHLEN);
9978 9963 return (-1);
9979 9964 }
9980 9965
9981 9966 ndi_devi_enter(pdip, &circ);
9982 9967 (void) ddi_pathname(rdip, path);
9983 9968 ndi_devi_exit(pdip, circ);
9984 9969
9985 9970 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
9986 9971 mod_hash_null_valdtor, sizeof (struct dev_info));
9987 9972
9988 9973 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
9989 9974 mod_hash_null_valdtor, sizeof (struct snode));
9990 9975
9991 9976 if (devfs_walk(path, visit_dvnode, &bsa)) {
9992 9977 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
9993 9978 "devfs walk failed for: %s", path);
9994 9979 kmem_free(path, MAXPATHLEN);
9995 9980 bsa.s_total = bsa.dv_total = -1;
9996 9981 goto out;
9997 9982 }
9998 9983
9999 9984 kmem_free(path, MAXPATHLEN);
10000 9985
10001 9986 /*
10002 9987 * Walk the snode table to detect device opens, which are currently
10003 9988 * maintained on specfs common snodes.
10004 9989 */
10005 9990 spec_snode_walk(visit_snode, &bsa);
10006 9991
10007 9992 if (callback == NULL)
10008 9993 goto out;
10009 9994
10010 9995 bsa.callback = callback;
10011 9996 bsa.arg = arg;
10012 9997
10013 9998 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10014 9999 ndi_devi_enter(rdip, &circ);
10015 10000 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10016 10001 ndi_devi_exit(rdip, circ);
10017 10002 }
10018 10003
10019 10004 out:
10020 10005 ndi_rele_devi(rdip);
10021 10006 mod_hash_destroy_ptrhash(bsa.s_hash);
10022 10007 mod_hash_destroy_ptrhash(bsa.dv_hash);
10023 10008 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10024 10009 }
|
↓ open down ↓ |
5239 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX