xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 79033acb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_devlockmem;
89 
90 #ifdef DEBUG
91 static int sunddi_debug = 0;
92 #endif /* DEBUG */
93 
94 /* ddi_umem_unlock miscellaneous */
95 
96 static	void	i_ddi_umem_unlock_thread_start(void);
97 
98 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
99 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
100 static	kthread_t	*ddi_umem_unlock_thread;
101 /*
102  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
103  */
104 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
106 
107 /*
108  * This lock protects the project.max-device-locked-memory counter.
109  * When both p_lock (proc_t) and this lock need to acquired, p_lock
110  * should be acquired first.
111  */
112 static kmutex_t umem_devlockmem_rctl_lock;
113 
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 int
409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 {
411 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 	    val_p));
413 }
414 
415 int
416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 {
418 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 	    val_p));
420 }
421 
422 int
423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 {
425 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 	    val_p));
427 }
428 
429 int
430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 {
432 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 	    val_p));
434 }
435 
436 
437 /*
438  * We need to separate the old interfaces from the new ones and leave them
439  * in here for a while. Previous versions of the OS defined the new interfaces
440  * to the old interfaces. This way we can fix things up so that we can
441  * eventually remove these interfaces.
442  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443  * or earlier will actually have a reference to ddi_peekc in the binary.
444  */
445 #ifdef _ILP32
446 int
447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 {
449 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 	    val_p));
451 }
452 
453 int
454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 {
456 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 	    val_p));
458 }
459 
460 int
461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 {
463 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 	    val_p));
465 }
466 
467 int
468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 {
470 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 	    val_p));
472 }
473 #endif /* _ILP32 */
474 
475 int
476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 {
478 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480 
481 int
482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 {
484 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486 
487 int
488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 {
490 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492 
493 int
494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 {
496 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 }
498 
499 /*
500  * We need to separate the old interfaces from the new ones and leave them
501  * in here for a while. Previous versions of the OS defined the new interfaces
502  * to the old interfaces. This way we can fix things up so that we can
503  * eventually remove these interfaces.
504  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505  * or earlier will actually have a reference to ddi_pokec in the binary.
506  */
507 #ifdef _ILP32
508 int
509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 {
511 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513 
514 int
515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 {
517 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519 
520 int
521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 {
523 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525 
526 int
527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 {
529 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 }
531 #endif /* _ILP32 */
532 
533 /*
534  * ddi_peekpokeio() is used primarily by the mem drivers for moving
535  * data to and from uio structures via peek and poke.  Note that we
536  * use "internal" routines ddi_peek and ddi_poke to make this go
537  * slightly faster, avoiding the call overhead ..
538  */
539 int
540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541     caddr_t addr, size_t len, uint_t xfersize)
542 {
543 	int64_t	ibuffer;
544 	int8_t w8;
545 	size_t sz;
546 	int o;
547 
548 	if (xfersize > sizeof (long))
549 		xfersize = sizeof (long);
550 
551 	while (len != 0) {
552 		if ((len | (uintptr_t)addr) & 1) {
553 			sz = sizeof (int8_t);
554 			if (rw == UIO_WRITE) {
555 				if ((o = uwritec(uio)) == -1)
556 					return (DDI_FAILURE);
557 				if (ddi_poke8(devi, (int8_t *)addr,
558 				    (int8_t)o) != DDI_SUCCESS)
559 					return (DDI_FAILURE);
560 			} else {
561 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
563 					return (DDI_FAILURE);
564 				if (ureadc(w8, uio))
565 					return (DDI_FAILURE);
566 			}
567 		} else {
568 			switch (xfersize) {
569 			case sizeof (int64_t):
570 				if (((len | (uintptr_t)addr) &
571 				    (sizeof (int64_t) - 1)) == 0) {
572 					sz = xfersize;
573 					break;
574 				}
575 				/*FALLTHROUGH*/
576 			case sizeof (int32_t):
577 				if (((len | (uintptr_t)addr) &
578 				    (sizeof (int32_t) - 1)) == 0) {
579 					sz = xfersize;
580 					break;
581 				}
582 				/*FALLTHROUGH*/
583 			default:
584 				/*
585 				 * This still assumes that we might have an
586 				 * I/O bus out there that permits 16-bit
587 				 * transfers (and that it would be upset by
588 				 * 32-bit transfers from such locations).
589 				 */
590 				sz = sizeof (int16_t);
591 				break;
592 			}
593 
594 			if (rw == UIO_READ) {
595 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 				    addr, &ibuffer) != DDI_SUCCESS)
597 					return (DDI_FAILURE);
598 			}
599 
600 			if (uiomove(&ibuffer, sz, rw, uio))
601 				return (DDI_FAILURE);
602 
603 			if (rw == UIO_WRITE) {
604 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 				    addr, &ibuffer) != DDI_SUCCESS)
606 					return (DDI_FAILURE);
607 			}
608 		}
609 		addr += sz;
610 		len -= sz;
611 	}
612 	return (DDI_SUCCESS);
613 }
614 
615 /*
616  * These routines are used by drivers that do layered ioctls
617  * On sparc, they're implemented in assembler to avoid spilling
618  * register windows in the common (copyin) case ..
619  */
620 #if !defined(__sparc)
621 int
622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 {
624 	if (flags & FKIOCTL)
625 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 	return (copyin(buf, kernbuf, size));
627 }
628 
629 int
630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 {
632 	if (flags & FKIOCTL)
633 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 	return (copyout(buf, kernbuf, size));
635 }
636 #endif	/* !__sparc */
637 
638 /*
639  * Conversions in nexus pagesize units.  We don't duplicate the
640  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641  * routines anyway.
642  */
643 unsigned long
644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 {
646 	unsigned long pages;
647 
648 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 	return (pages);
650 }
651 
652 unsigned long
653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 {
655 	unsigned long pages;
656 
657 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 	return (pages);
659 }
660 
661 unsigned long
662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 {
664 	unsigned long bytes;
665 
666 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 	return (bytes);
668 }
669 
670 unsigned int
671 ddi_enter_critical(void)
672 {
673 	return ((uint_t)spl7());
674 }
675 
676 void
677 ddi_exit_critical(unsigned int spl)
678 {
679 	splx((int)spl);
680 }
681 
682 /*
683  * Nexus ctlops punter
684  */
685 
686 #if !defined(__sparc)
687 /*
688  * Request bus_ctl parent to handle a bus_ctl request
689  *
690  * (The sparc version is in sparc_ddi.s)
691  */
692 int
693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 {
695 	int (*fp)();
696 
697 	if (!d || !r)
698 		return (DDI_FAILURE);
699 
700 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 		return (DDI_FAILURE);
702 
703 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 	return ((*fp)(d, r, op, a, v));
705 }
706 
707 #endif
708 
709 /*
710  * DMA/DVMA setup
711  */
712 
713 #if defined(__sparc)
714 static ddi_dma_lim_t standard_limits = {
715 	(uint_t)0,	/* addr_t dlim_addr_lo */
716 	(uint_t)-1,	/* addr_t dlim_addr_hi */
717 	(uint_t)-1,	/* uint_t dlim_cntr_max */
718 	(uint_t)1,	/* uint_t dlim_burstsizes */
719 	(uint_t)1,	/* uint_t dlim_minxfer */
720 	0		/* uint_t dlim_dmaspeed */
721 };
722 #elif defined(__x86)
723 static ddi_dma_lim_t standard_limits = {
724 	(uint_t)0,		/* addr_t dlim_addr_lo */
725 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
726 	(uint_t)0,		/* uint_t dlim_cntr_max */
727 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
728 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
729 	(uint_t)0,		/* uint_t dlim_dmaspeed */
730 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
731 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
732 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
733 	(uint_t)512,		/* uint_t dlim_granular */
734 	(int)1,			/* int dlim_sgllen */
735 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
736 };
737 
738 #endif
739 
740 int
741 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
742     ddi_dma_handle_t *handlep)
743 {
744 	int (*funcp)() = ddi_dma_map;
745 	struct bus_ops *bop;
746 #if defined(__sparc)
747 	auto ddi_dma_lim_t dma_lim;
748 
749 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
750 		dma_lim = standard_limits;
751 	} else {
752 		dma_lim = *dmareqp->dmar_limits;
753 	}
754 	dmareqp->dmar_limits = &dma_lim;
755 #endif
756 #if defined(__x86)
757 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
758 		return (DDI_FAILURE);
759 #endif
760 
761 	/*
762 	 * Handle the case that the requester is both a leaf
763 	 * and a nexus driver simultaneously by calling the
764 	 * requester's bus_dma_map function directly instead
765 	 * of ddi_dma_map.
766 	 */
767 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
768 	if (bop && bop->bus_dma_map)
769 		funcp = bop->bus_dma_map;
770 	return ((*funcp)(dip, dip, dmareqp, handlep));
771 }
772 
773 int
774 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
775     uint_t flags, int (*waitfp)(), caddr_t arg,
776     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
777 {
778 	int (*funcp)() = ddi_dma_map;
779 	ddi_dma_lim_t dma_lim;
780 	struct ddi_dma_req dmareq;
781 	struct bus_ops *bop;
782 
783 	if (len == 0) {
784 		return (DDI_DMA_NOMAPPING);
785 	}
786 	if (limits == (ddi_dma_lim_t *)0) {
787 		dma_lim = standard_limits;
788 	} else {
789 		dma_lim = *limits;
790 	}
791 	dmareq.dmar_limits = &dma_lim;
792 	dmareq.dmar_flags = flags;
793 	dmareq.dmar_fp = waitfp;
794 	dmareq.dmar_arg = arg;
795 	dmareq.dmar_object.dmao_size = len;
796 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
797 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
798 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
799 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
800 
801 	/*
802 	 * Handle the case that the requester is both a leaf
803 	 * and a nexus driver simultaneously by calling the
804 	 * requester's bus_dma_map function directly instead
805 	 * of ddi_dma_map.
806 	 */
807 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
808 	if (bop && bop->bus_dma_map)
809 		funcp = bop->bus_dma_map;
810 
811 	return ((*funcp)(dip, dip, &dmareq, handlep));
812 }
813 
814 int
815 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
816     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
817     ddi_dma_handle_t *handlep)
818 {
819 	int (*funcp)() = ddi_dma_map;
820 	ddi_dma_lim_t dma_lim;
821 	struct ddi_dma_req dmareq;
822 	struct bus_ops *bop;
823 
824 	if (limits == (ddi_dma_lim_t *)0) {
825 		dma_lim = standard_limits;
826 	} else {
827 		dma_lim = *limits;
828 	}
829 	dmareq.dmar_limits = &dma_lim;
830 	dmareq.dmar_flags = flags;
831 	dmareq.dmar_fp = waitfp;
832 	dmareq.dmar_arg = arg;
833 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
834 
835 	if (bp->b_flags & B_PAGEIO) {
836 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
837 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
838 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
839 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
840 	} else {
841 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
842 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
843 		if (bp->b_flags & B_SHADOW) {
844 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
845 							bp->b_shadow;
846 		} else {
847 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
848 		}
849 
850 		/*
851 		 * If the buffer has no proc pointer, or the proc
852 		 * struct has the kernel address space, or the buffer has
853 		 * been marked B_REMAPPED (meaning that it is now
854 		 * mapped into the kernel's address space), then
855 		 * the address space is kas (kernel address space).
856 		 */
857 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
858 		    (bp->b_flags & B_REMAPPED)) {
859 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
860 		} else {
861 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
862 			    bp->b_proc->p_as;
863 		}
864 	}
865 
866 	/*
867 	 * Handle the case that the requester is both a leaf
868 	 * and a nexus driver simultaneously by calling the
869 	 * requester's bus_dma_map function directly instead
870 	 * of ddi_dma_map.
871 	 */
872 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
873 	if (bop && bop->bus_dma_map)
874 		funcp = bop->bus_dma_map;
875 
876 	return ((*funcp)(dip, dip, &dmareq, handlep));
877 }
878 
879 #if !defined(__sparc)
880 /*
881  * Request bus_dma_ctl parent to fiddle with a dma request.
882  *
883  * (The sparc version is in sparc_subr.s)
884  */
885 int
886 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
887     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
888     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
889 {
890 	int (*fp)();
891 
892 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
893 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
894 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
895 }
896 #endif
897 
898 /*
899  * For all DMA control functions, call the DMA control
900  * routine and return status.
901  *
902  * Just plain assume that the parent is to be called.
903  * If a nexus driver or a thread outside the framework
904  * of a nexus driver or a leaf driver calls these functions,
905  * it is up to them to deal with the fact that the parent's
906  * bus_dma_ctl function will be the first one called.
907  */
908 
909 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
910 
911 int
912 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
913 {
914 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
915 }
916 
917 int
918 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
919 {
920 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
921 }
922 
923 int
924 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
925 {
926 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
927 	    (off_t *)c, 0, (caddr_t *)o, 0));
928 }
929 
930 int
931 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
932 {
933 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
934 	    l, (caddr_t *)c, 0));
935 }
936 
937 int
938 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
939 {
940 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
941 		return (DDI_FAILURE);
942 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
943 }
944 
945 int
946 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
947     ddi_dma_win_t *nwin)
948 {
949 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
950 	    (caddr_t *)nwin, 0));
951 }
952 
953 int
954 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
955 {
956 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
957 
958 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
959 	    (size_t *)&seg, (caddr_t *)nseg, 0));
960 }
961 
962 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
963 /*
964  * This routine is Obsolete and should be removed from ALL architectures
965  * in a future release of Solaris.
966  *
967  * It is deliberately NOT ported to amd64; please fix the code that
968  * depends on this routine to use ddi_dma_nextcookie(9F).
969  *
970  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
971  * is a side effect to some other cleanup), we're still not going to support
972  * this interface on x64.
973  */
974 int
975 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
976     ddi_dma_cookie_t *cookiep)
977 {
978 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
979 
980 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
981 	    (caddr_t *)cookiep, 0));
982 }
983 #endif	/* (__i386 && !__amd64) || __sparc */
984 
985 #if !defined(__sparc)
986 
987 /*
988  * The SPARC versions of these routines are done in assembler to
989  * save register windows, so they're in sparc_subr.s.
990  */
991 
992 int
993 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
994 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
995 {
996 	dev_info_t	*hdip;
997 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
998 	    ddi_dma_handle_t *);
999 
1000 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1001 
1002 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
1003 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
1004 }
1005 
1006 int
1007 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1008     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1009 {
1010 	dev_info_t	*hdip;
1011 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1012 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1013 
1014 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1015 
1016 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1017 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1018 }
1019 
1020 int
1021 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1022 {
1023 	dev_info_t	*hdip;
1024 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1025 
1026 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1027 
1028 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1029 	return ((*funcp)(hdip, rdip, handlep));
1030 }
1031 
1032 int
1033 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1034     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1035     ddi_dma_cookie_t *cp, uint_t *ccountp)
1036 {
1037 	dev_info_t	*hdip;
1038 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1039 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1040 
1041 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1042 
1043 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1044 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1045 }
1046 
1047 int
1048 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1049     ddi_dma_handle_t handle)
1050 {
1051 	dev_info_t	*hdip;
1052 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1053 
1054 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1055 
1056 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1057 	return ((*funcp)(hdip, rdip, handle));
1058 }
1059 
1060 
1061 int
1062 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1063     ddi_dma_handle_t handle, off_t off, size_t len,
1064     uint_t cache_flags)
1065 {
1066 	dev_info_t	*hdip;
1067 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1068 	    off_t, size_t, uint_t);
1069 
1070 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1071 
1072 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1073 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1074 }
1075 
1076 int
1077 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1078     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1079     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1080 {
1081 	dev_info_t	*hdip;
1082 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1083 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1084 
1085 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1086 
1087 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1088 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1089 	    cookiep, ccountp));
1090 }
1091 
1092 int
1093 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1094 {
1095 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1096 	dev_info_t *hdip, *dip;
1097 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1098 		size_t, uint_t);
1099 
1100 	/*
1101 	 * the DMA nexus driver will set DMP_NOSYNC if the
1102 	 * platform does not require any sync operation. For
1103 	 * example if the memory is uncached or consistent
1104 	 * and without any I/O write buffers involved.
1105 	 */
1106 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1107 		return (DDI_SUCCESS);
1108 
1109 	dip = hp->dmai_rdip;
1110 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1111 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1112 	return ((*funcp)(hdip, dip, h, o, l, whom));
1113 }
1114 
1115 int
1116 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1117 {
1118 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1119 	dev_info_t *hdip, *dip;
1120 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1121 
1122 	dip = hp->dmai_rdip;
1123 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1124 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1125 	return ((*funcp)(hdip, dip, h));
1126 }
1127 
1128 #endif	/* !__sparc */
1129 
1130 int
1131 ddi_dma_free(ddi_dma_handle_t h)
1132 {
1133 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1134 }
1135 
1136 int
1137 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1138 {
1139 	ddi_dma_lim_t defalt;
1140 	size_t size = len;
1141 
1142 	if (!limp) {
1143 		defalt = standard_limits;
1144 		limp = &defalt;
1145 	}
1146 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1147 	    iopbp, NULL, NULL));
1148 }
1149 
1150 void
1151 ddi_iopb_free(caddr_t iopb)
1152 {
1153 	i_ddi_mem_free(iopb, NULL);
1154 }
1155 
1156 int
1157 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1158 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1159 {
1160 	ddi_dma_lim_t defalt;
1161 	size_t size = length;
1162 
1163 	if (!limits) {
1164 		defalt = standard_limits;
1165 		limits = &defalt;
1166 	}
1167 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1168 	    1, 0, kaddrp, real_length, NULL));
1169 }
1170 
1171 void
1172 ddi_mem_free(caddr_t kaddr)
1173 {
1174 	i_ddi_mem_free(kaddr, NULL);
1175 }
1176 
1177 /*
1178  * DMA attributes, alignment, burst sizes, and transfer minimums
1179  */
1180 int
1181 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1182 {
1183 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1184 
1185 	if (attrp == NULL)
1186 		return (DDI_FAILURE);
1187 	*attrp = dimp->dmai_attr;
1188 	return (DDI_SUCCESS);
1189 }
1190 
1191 int
1192 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1193 {
1194 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1195 
1196 	if (!dimp)
1197 		return (0);
1198 	else
1199 		return (dimp->dmai_burstsizes);
1200 }
1201 
1202 int
1203 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1204 {
1205 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1206 
1207 	if (!dimp || !alignment || !mineffect)
1208 		return (DDI_FAILURE);
1209 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1210 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1211 	} else {
1212 		if (dimp->dmai_burstsizes & 0xff0000) {
1213 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1214 		} else {
1215 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1216 		}
1217 	}
1218 	*mineffect = dimp->dmai_minxfer;
1219 	return (DDI_SUCCESS);
1220 }
1221 
1222 int
1223 ddi_iomin(dev_info_t *a, int i, int stream)
1224 {
1225 	int r;
1226 
1227 	/*
1228 	 * Make sure that the initial value is sane
1229 	 */
1230 	if (i & (i - 1))
1231 		return (0);
1232 	if (i == 0)
1233 		i = (stream) ? 4 : 1;
1234 
1235 	r = ddi_ctlops(a, a,
1236 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1237 	if (r != DDI_SUCCESS || (i & (i - 1)))
1238 		return (0);
1239 	return (i);
1240 }
1241 
1242 /*
1243  * Given two DMA attribute structures, apply the attributes
1244  * of one to the other, following the rules of attributes
1245  * and the wishes of the caller.
1246  *
1247  * The rules of DMA attribute structures are that you cannot
1248  * make things *less* restrictive as you apply one set
1249  * of attributes to another.
1250  *
1251  */
1252 void
1253 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1254 {
1255 	attr->dma_attr_addr_lo =
1256 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1257 	attr->dma_attr_addr_hi =
1258 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1259 	attr->dma_attr_count_max =
1260 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1261 	attr->dma_attr_align =
1262 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1263 	attr->dma_attr_burstsizes =
1264 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1265 	attr->dma_attr_minxfer =
1266 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1267 	attr->dma_attr_maxxfer =
1268 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1269 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1270 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1271 	    (uint_t)mod->dma_attr_sgllen);
1272 	attr->dma_attr_granular =
1273 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1274 }
1275 
1276 /*
1277  * mmap/segmap interface:
1278  */
1279 
1280 /*
1281  * ddi_segmap:		setup the default segment driver. Calls the drivers
1282  *			XXmmap routine to validate the range to be mapped.
1283  *			Return ENXIO of the range is not valid.  Create
1284  *			a seg_dev segment that contains all of the
1285  *			necessary information and will reference the
1286  *			default segment driver routines. It returns zero
1287  *			on success or non-zero on failure.
1288  */
1289 int
1290 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1291     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1292 {
1293 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1294 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1295 
1296 	return (spec_segmap(dev, offset, asp, addrp, len,
1297 	    prot, maxprot, flags, credp));
1298 }
1299 
1300 /*
1301  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1302  *			drivers. Allows each successive parent to resolve
1303  *			address translations and add its mappings to the
1304  *			mapping list supplied in the page structure. It
1305  *			returns zero on success	or non-zero on failure.
1306  */
1307 
1308 int
1309 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1310     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1311 {
1312 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1313 }
1314 
1315 /*
1316  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1317  *	Invokes platform specific DDI to determine whether attributes specified
1318  *	in attr(9s) are	valid for the region of memory that will be made
1319  *	available for direct access to user process via the mmap(2) system call.
1320  */
1321 int
1322 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1323     uint_t rnumber, uint_t *hat_flags)
1324 {
1325 	ddi_acc_handle_t handle;
1326 	ddi_map_req_t mr;
1327 	ddi_acc_hdl_t *hp;
1328 	int result;
1329 	dev_info_t *dip;
1330 
1331 	/*
1332 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1333 	 * release it immediately since it should already be held by
1334 	 * a devfs vnode.
1335 	 */
1336 	if ((dip =
1337 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1338 		return (-1);
1339 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1340 
1341 	/*
1342 	 * Allocate and initialize the common elements of data
1343 	 * access handle.
1344 	 */
1345 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1346 	if (handle == NULL)
1347 		return (-1);
1348 
1349 	hp = impl_acc_hdl_get(handle);
1350 	hp->ah_vers = VERS_ACCHDL;
1351 	hp->ah_dip = dip;
1352 	hp->ah_rnumber = rnumber;
1353 	hp->ah_offset = 0;
1354 	hp->ah_len = 0;
1355 	hp->ah_acc = *accattrp;
1356 
1357 	/*
1358 	 * Set up the mapping request and call to parent.
1359 	 */
1360 	mr.map_op = DDI_MO_MAP_HANDLE;
1361 	mr.map_type = DDI_MT_RNUMBER;
1362 	mr.map_obj.rnumber = rnumber;
1363 	mr.map_prot = PROT_READ | PROT_WRITE;
1364 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1365 	mr.map_handlep = hp;
1366 	mr.map_vers = DDI_MAP_VERSION;
1367 	result = ddi_map(dip, &mr, 0, 0, NULL);
1368 
1369 	/*
1370 	 * Region must be mappable, pick up flags from the framework.
1371 	 */
1372 	*hat_flags = hp->ah_hat_flags;
1373 
1374 	impl_acc_hdl_free(handle);
1375 
1376 	/*
1377 	 * check for end result.
1378 	 */
1379 	if (result != DDI_SUCCESS)
1380 		return (-1);
1381 	return (0);
1382 }
1383 
1384 
1385 /*
1386  * Property functions:	 See also, ddipropdefs.h.
1387  *
1388  * These functions are the framework for the property functions,
1389  * i.e. they support software defined properties.  All implementation
1390  * specific property handling (i.e.: self-identifying devices and
1391  * PROM defined properties are handled in the implementation specific
1392  * functions (defined in ddi_implfuncs.h).
1393  */
1394 
1395 /*
1396  * nopropop:	Shouldn't be called, right?
1397  */
1398 int
1399 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1400     char *name, caddr_t valuep, int *lengthp)
1401 {
1402 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1403 	return (DDI_PROP_NOT_FOUND);
1404 }
1405 
1406 #ifdef	DDI_PROP_DEBUG
1407 int ddi_prop_debug_flag = 0;
1408 
1409 int
1410 ddi_prop_debug(int enable)
1411 {
1412 	int prev = ddi_prop_debug_flag;
1413 
1414 	if ((enable != 0) || (prev != 0))
1415 		printf("ddi_prop_debug: debugging %s\n",
1416 		    enable ? "enabled" : "disabled");
1417 	ddi_prop_debug_flag = enable;
1418 	return (prev);
1419 }
1420 
1421 #endif	/* DDI_PROP_DEBUG */
1422 
1423 /*
1424  * Search a property list for a match, if found return pointer
1425  * to matching prop struct, else return NULL.
1426  */
1427 
1428 ddi_prop_t *
1429 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1430 {
1431 	ddi_prop_t	*propp;
1432 
1433 	/*
1434 	 * find the property in child's devinfo:
1435 	 * Search order defined by this search function is first matching
1436 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1437 	 * dev == propp->prop_dev, name == propp->name, and the correct
1438 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1439 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1440 	 */
1441 	if (dev == DDI_DEV_T_NONE)
1442 		dev = DDI_DEV_T_ANY;
1443 
1444 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1445 
1446 		if (!DDI_STRSAME(propp->prop_name, name))
1447 			continue;
1448 
1449 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1450 			continue;
1451 
1452 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1453 			continue;
1454 
1455 		return (propp);
1456 	}
1457 
1458 	return ((ddi_prop_t *)0);
1459 }
1460 
1461 /*
1462  * Search for property within devnames structures
1463  */
1464 ddi_prop_t *
1465 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1466 {
1467 	major_t		major;
1468 	struct devnames	*dnp;
1469 	ddi_prop_t	*propp;
1470 
1471 	/*
1472 	 * Valid dev_t value is needed to index into the
1473 	 * correct devnames entry, therefore a dev_t
1474 	 * value of DDI_DEV_T_ANY is not appropriate.
1475 	 */
1476 	ASSERT(dev != DDI_DEV_T_ANY);
1477 	if (dev == DDI_DEV_T_ANY) {
1478 		return ((ddi_prop_t *)0);
1479 	}
1480 
1481 	major = getmajor(dev);
1482 	dnp = &(devnamesp[major]);
1483 
1484 	if (dnp->dn_global_prop_ptr == NULL)
1485 		return ((ddi_prop_t *)0);
1486 
1487 	LOCK_DEV_OPS(&dnp->dn_lock);
1488 
1489 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1490 	    propp != NULL;
1491 	    propp = (ddi_prop_t *)propp->prop_next) {
1492 
1493 		if (!DDI_STRSAME(propp->prop_name, name))
1494 			continue;
1495 
1496 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1497 			continue;
1498 
1499 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1500 			continue;
1501 
1502 		/* Property found, return it */
1503 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1504 		return (propp);
1505 	}
1506 
1507 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1508 	return ((ddi_prop_t *)0);
1509 }
1510 
1511 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1512 
1513 /*
1514  * ddi_prop_search_global:
1515  *	Search the global property list within devnames
1516  *	for the named property.  Return the encoded value.
1517  */
1518 static int
1519 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1520     void *valuep, uint_t *lengthp)
1521 {
1522 	ddi_prop_t	*propp;
1523 	caddr_t		buffer;
1524 
1525 	propp =  i_ddi_search_global_prop(dev, name, flags);
1526 
1527 	/* Property NOT found, bail */
1528 	if (propp == (ddi_prop_t *)0)
1529 		return (DDI_PROP_NOT_FOUND);
1530 
1531 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1532 		return (DDI_PROP_UNDEFINED);
1533 
1534 	if ((buffer = kmem_alloc(propp->prop_len,
1535 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1536 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1537 		return (DDI_PROP_NO_MEMORY);
1538 	}
1539 
1540 	/*
1541 	 * Return the encoded data
1542 	 */
1543 	*(caddr_t *)valuep = buffer;
1544 	*lengthp = propp->prop_len;
1545 	bcopy(propp->prop_val, buffer, propp->prop_len);
1546 
1547 	return (DDI_PROP_SUCCESS);
1548 }
1549 
1550 /*
1551  * ddi_prop_search_common:	Lookup and return the encoded value
1552  */
1553 int
1554 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1555     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1556 {
1557 	ddi_prop_t	*propp;
1558 	int		i;
1559 	caddr_t		buffer;
1560 	caddr_t		prealloc = NULL;
1561 	int		plength = 0;
1562 	dev_info_t	*pdip;
1563 	int		(*bop)();
1564 
1565 	/*CONSTANTCONDITION*/
1566 	while (1)  {
1567 
1568 		mutex_enter(&(DEVI(dip)->devi_lock));
1569 
1570 
1571 		/*
1572 		 * find the property in child's devinfo:
1573 		 * Search order is:
1574 		 *	1. driver defined properties
1575 		 *	2. system defined properties
1576 		 *	3. driver global properties
1577 		 *	4. boot defined properties
1578 		 */
1579 
1580 		propp = i_ddi_prop_search(dev, name, flags,
1581 		    &(DEVI(dip)->devi_drv_prop_ptr));
1582 		if (propp == NULL)  {
1583 			propp = i_ddi_prop_search(dev, name, flags,
1584 			    &(DEVI(dip)->devi_sys_prop_ptr));
1585 		}
1586 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1587 			propp = i_ddi_prop_search(dev, name, flags,
1588 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1589 		}
1590 
1591 		if (propp == NULL)  {
1592 			propp = i_ddi_prop_search(dev, name, flags,
1593 			    &(DEVI(dip)->devi_hw_prop_ptr));
1594 		}
1595 
1596 		/*
1597 		 * Software property found?
1598 		 */
1599 		if (propp != (ddi_prop_t *)0)	{
1600 
1601 			/*
1602 			 * If explicit undefine, return now.
1603 			 */
1604 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1605 				mutex_exit(&(DEVI(dip)->devi_lock));
1606 				if (prealloc)
1607 					kmem_free(prealloc, plength);
1608 				return (DDI_PROP_UNDEFINED);
1609 			}
1610 
1611 			/*
1612 			 * If we only want to know if it exists, return now
1613 			 */
1614 			if (prop_op == PROP_EXISTS) {
1615 				mutex_exit(&(DEVI(dip)->devi_lock));
1616 				ASSERT(prealloc == NULL);
1617 				return (DDI_PROP_SUCCESS);
1618 			}
1619 
1620 			/*
1621 			 * If length only request or prop length == 0,
1622 			 * service request and return now.
1623 			 */
1624 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1625 				*lengthp = propp->prop_len;
1626 
1627 				/*
1628 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1629 				 * that means prop_len is 0, so set valuep
1630 				 * also to NULL
1631 				 */
1632 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1633 					*(caddr_t *)valuep = NULL;
1634 
1635 				mutex_exit(&(DEVI(dip)->devi_lock));
1636 				if (prealloc)
1637 					kmem_free(prealloc, plength);
1638 				return (DDI_PROP_SUCCESS);
1639 			}
1640 
1641 			/*
1642 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1643 			 * drop the mutex, allocate the buffer, and go
1644 			 * through the loop again.  If we already allocated
1645 			 * the buffer, and the size of the property changed,
1646 			 * keep trying...
1647 			 */
1648 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1649 			    (flags & DDI_PROP_CANSLEEP))  {
1650 				if (prealloc && (propp->prop_len != plength)) {
1651 					kmem_free(prealloc, plength);
1652 					prealloc = NULL;
1653 				}
1654 				if (prealloc == NULL)  {
1655 					plength = propp->prop_len;
1656 					mutex_exit(&(DEVI(dip)->devi_lock));
1657 					prealloc = kmem_alloc(plength,
1658 					    KM_SLEEP);
1659 					continue;
1660 				}
1661 			}
1662 
1663 			/*
1664 			 * Allocate buffer, if required.  Either way,
1665 			 * set `buffer' variable.
1666 			 */
1667 			i = *lengthp;			/* Get callers length */
1668 			*lengthp = propp->prop_len;	/* Set callers length */
1669 
1670 			switch (prop_op) {
1671 
1672 			case PROP_LEN_AND_VAL_ALLOC:
1673 
1674 				if (prealloc == NULL) {
1675 					buffer = kmem_alloc(propp->prop_len,
1676 					    KM_NOSLEEP);
1677 				} else {
1678 					buffer = prealloc;
1679 				}
1680 
1681 				if (buffer == NULL)  {
1682 					mutex_exit(&(DEVI(dip)->devi_lock));
1683 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1684 					return (DDI_PROP_NO_MEMORY);
1685 				}
1686 				/* Set callers buf ptr */
1687 				*(caddr_t *)valuep = buffer;
1688 				break;
1689 
1690 			case PROP_LEN_AND_VAL_BUF:
1691 
1692 				if (propp->prop_len > (i)) {
1693 					mutex_exit(&(DEVI(dip)->devi_lock));
1694 					return (DDI_PROP_BUF_TOO_SMALL);
1695 				}
1696 
1697 				buffer = valuep;  /* Get callers buf ptr */
1698 				break;
1699 
1700 			default:
1701 				break;
1702 			}
1703 
1704 			/*
1705 			 * Do the copy.
1706 			 */
1707 			bcopy(propp->prop_val, buffer, propp->prop_len);
1708 			mutex_exit(&(DEVI(dip)->devi_lock));
1709 			return (DDI_PROP_SUCCESS);
1710 		}
1711 
1712 		mutex_exit(&(DEVI(dip)->devi_lock));
1713 		if (prealloc)
1714 			kmem_free(prealloc, plength);
1715 		prealloc = NULL;
1716 
1717 		/*
1718 		 * Prop not found, call parent bus_ops to deal with possible
1719 		 * h/w layer (possible PROM defined props, etc.) and to
1720 		 * possibly ascend the hierarchy, if allowed by flags.
1721 		 */
1722 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1723 
1724 		/*
1725 		 * One last call for the root driver PROM props?
1726 		 */
1727 		if (dip == ddi_root_node())  {
1728 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1729 			    flags, name, valuep, (int *)lengthp));
1730 		}
1731 
1732 		/*
1733 		 * We may have been called to check for properties
1734 		 * within a single devinfo node that has no parent -
1735 		 * see make_prop()
1736 		 */
1737 		if (pdip == NULL) {
1738 			ASSERT((flags &
1739 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1740 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1741 			return (DDI_PROP_NOT_FOUND);
1742 		}
1743 
1744 		/*
1745 		 * Instead of recursing, we do iterative calls up the tree.
1746 		 * As a bit of optimization, skip the bus_op level if the
1747 		 * node is a s/w node and if the parent's bus_prop_op function
1748 		 * is `ddi_bus_prop_op', because we know that in this case,
1749 		 * this function does nothing.
1750 		 *
1751 		 * 4225415: If the parent isn't attached, or the child
1752 		 * hasn't been named by the parent yet, use the default
1753 		 * ddi_bus_prop_op as a proxy for the parent.  This
1754 		 * allows property lookups in any child/parent state to
1755 		 * include 'prom' and inherited properties, even when
1756 		 * there are no drivers attached to the child or parent.
1757 		 */
1758 
1759 		bop = ddi_bus_prop_op;
1760 		if (i_ddi_devi_attached(pdip) &&
1761 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1762 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1763 
1764 		i = DDI_PROP_NOT_FOUND;
1765 
1766 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1767 			i = (*bop)(dev, pdip, dip, prop_op,
1768 			    flags | DDI_PROP_DONTPASS,
1769 			    name, valuep, lengthp);
1770 		}
1771 
1772 		if ((flags & DDI_PROP_DONTPASS) ||
1773 		    (i != DDI_PROP_NOT_FOUND))
1774 			return (i);
1775 
1776 		dip = pdip;
1777 	}
1778 	/*NOTREACHED*/
1779 }
1780 
1781 
1782 /*
1783  * ddi_prop_op: The basic property operator for drivers.
1784  *
1785  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1786  *
1787  *	prop_op			valuep
1788  *	------			------
1789  *
1790  *	PROP_LEN		<unused>
1791  *
1792  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1793  *
1794  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1795  *				address of allocated buffer, if successful)
1796  */
1797 int
1798 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1799     char *name, caddr_t valuep, int *lengthp)
1800 {
1801 	int	i;
1802 
1803 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1804 
1805 	/*
1806 	 * If this was originally an LDI prop lookup then we bail here.
1807 	 * The reason is that the LDI property lookup interfaces first call
1808 	 * a drivers prop_op() entry point to allow it to override
1809 	 * properties.  But if we've made it here, then the driver hasn't
1810 	 * overriden any properties.  We don't want to continue with the
1811 	 * property search here because we don't have any type inforamtion.
1812 	 * When we return failure, the LDI interfaces will then proceed to
1813 	 * call the typed property interfaces to look up the property.
1814 	 */
1815 	if (mod_flags & DDI_PROP_DYNAMIC)
1816 		return (DDI_PROP_NOT_FOUND);
1817 
1818 	/*
1819 	 * check for pre-typed property consumer asking for typed property:
1820 	 * see e_ddi_getprop_int64.
1821 	 */
1822 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1823 		mod_flags |= DDI_PROP_TYPE_INT64;
1824 	mod_flags |= DDI_PROP_TYPE_ANY;
1825 
1826 	i = ddi_prop_search_common(dev, dip, prop_op,
1827 		mod_flags, name, valuep, (uint_t *)lengthp);
1828 	if (i == DDI_PROP_FOUND_1275)
1829 		return (DDI_PROP_SUCCESS);
1830 	return (i);
1831 }
1832 
1833 /*
1834  * ddi_prop_op_nblocks: The basic property operator for drivers that maintain
1835  * size in number of DEV_BSIZE blocks.  Provides a dynamic property
1836  * implementation for size oriented properties based on nblocks64 values passed
1837  * in by the driver.  Fallback to ddi_prop_op if the nblocks64 is too large.
1838  * This interface should not be used with a nblocks64 that represents the
1839  * driver's idea of how to represent unknown, if nblocks is unknown use
1840  * ddi_prop_op.
1841  */
1842 int
1843 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1844     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1845 {
1846 	uint64_t size64;
1847 
1848 	/*
1849 	 * There is no point in supporting nblocks64 values that don't have
1850 	 * an accurate uint64_t byte count representation.
1851 	 */
1852 	if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT))
1853 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1854 		    name, valuep, lengthp));
1855 
1856 	size64 = nblocks64 << DEV_BSHIFT;
1857 	return (ddi_prop_op_size(dev, dip, prop_op, mod_flags,
1858 	    name, valuep, lengthp, size64));
1859 }
1860 
1861 /*
1862  * ddi_prop_op_size: The basic property operator for drivers that maintain size
1863  * in bytes. Provides a of dynamic property implementation for size oriented
1864  * properties based on size64 values passed in by the driver.  Fallback to
1865  * ddi_prop_op if the size64 is too large. This interface should not be used
1866  * with a size64 that represents the driver's idea of how to represent unknown,
1867  * if size is unknown use ddi_prop_op.
1868  *
1869  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1870  * integers. While the most likely interface to request them ([bc]devi_size)
1871  * is declared int (signed) there is no enforcement of this, which means we
1872  * can't enforce limitations here without risking regression.
1873  */
1874 int
1875 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1876     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1877 {
1878 	uint64_t nblocks64;
1879 	int	callers_length;
1880 	caddr_t	buffer;
1881 
1882 	/* compute DEV_BSIZE nblocks value */
1883 	nblocks64 = lbtodb(size64);
1884 
1885 	/* get callers length, establish length of our dynamic properties */
1886 	callers_length = *lengthp;
1887 
1888 	if (strcmp(name, "Nblocks") == 0)
1889 		*lengthp = sizeof (uint64_t);
1890 	else if (strcmp(name, "Size") == 0)
1891 		*lengthp = sizeof (uint64_t);
1892 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1893 		*lengthp = sizeof (uint32_t);
1894 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1895 		*lengthp = sizeof (uint32_t);
1896 	else {
1897 		/* fallback to ddi_prop_op */
1898 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1899 		    name, valuep, lengthp));
1900 	}
1901 
1902 	/* service request for the length of the property */
1903 	if (prop_op == PROP_LEN)
1904 		return (DDI_PROP_SUCCESS);
1905 
1906 	/* the length of the property and the request must match */
1907 	if (callers_length != *lengthp)
1908 		return (DDI_PROP_INVAL_ARG);
1909 
1910 	switch (prop_op) {
1911 	case PROP_LEN_AND_VAL_ALLOC:
1912 		if ((buffer = kmem_alloc(*lengthp,
1913 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1914 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1915 			return (DDI_PROP_NO_MEMORY);
1916 
1917 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1918 		break;
1919 
1920 	case PROP_LEN_AND_VAL_BUF:
1921 		buffer = valuep;		/* get callers buf ptr */
1922 		break;
1923 
1924 	default:
1925 		return (DDI_PROP_INVAL_ARG);
1926 	}
1927 
1928 	/* transfer the value into the buffer */
1929 	if (strcmp(name, "Nblocks") == 0)
1930 		*((uint64_t *)buffer) = nblocks64;
1931 	else if (strcmp(name, "Size") == 0)
1932 		*((uint64_t *)buffer) = size64;
1933 	else if (strcmp(name, "nblocks") == 0)
1934 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1935 	else if (strcmp(name, "size") == 0)
1936 		*((uint32_t *)buffer) = (uint32_t)size64;
1937 	return (DDI_PROP_SUCCESS);
1938 }
1939 
1940 /*
1941  * Variable length props...
1942  */
1943 
1944 /*
1945  * ddi_getlongprop:	Get variable length property len+val into a buffer
1946  *		allocated by property provider via kmem_alloc. Requester
1947  *		is responsible for freeing returned property via kmem_free.
1948  *
1949  *	Arguments:
1950  *
1951  *	dev_t:	Input:	dev_t of property.
1952  *	dip:	Input:	dev_info_t pointer of child.
1953  *	flags:	Input:	Possible flag modifiers are:
1954  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1955  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1956  *	name:	Input:	name of property.
1957  *	valuep:	Output:	Addr of callers buffer pointer.
1958  *	lengthp:Output:	*lengthp will contain prop length on exit.
1959  *
1960  *	Possible Returns:
1961  *
1962  *		DDI_PROP_SUCCESS:	Prop found and returned.
1963  *		DDI_PROP_NOT_FOUND:	Prop not found
1964  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1965  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1966  */
1967 
1968 int
1969 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1970     char *name, caddr_t valuep, int *lengthp)
1971 {
1972 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1973 	    flags, name, valuep, lengthp));
1974 }
1975 
1976 /*
1977  *
1978  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1979  *				buffer. (no memory allocation by provider).
1980  *
1981  *	dev_t:	Input:	dev_t of property.
1982  *	dip:	Input:	dev_info_t pointer of child.
1983  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1984  *	name:	Input:	name of property
1985  *	valuep:	Input:	ptr to callers buffer.
1986  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1987  *			actual length of property on exit.
1988  *
1989  *	Possible returns:
1990  *
1991  *		DDI_PROP_SUCCESS	Prop found and returned
1992  *		DDI_PROP_NOT_FOUND	Prop not found
1993  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1994  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1995  *					no value returned, but actual prop
1996  *					length returned in *lengthp
1997  *
1998  */
1999 
2000 int
2001 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2002     char *name, caddr_t valuep, int *lengthp)
2003 {
2004 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2005 	    flags, name, valuep, lengthp));
2006 }
2007 
2008 /*
2009  * Integer/boolean sized props.
2010  *
2011  * Call is value only... returns found boolean or int sized prop value or
2012  * defvalue if prop not found or is wrong length or is explicitly undefined.
2013  * Only flag is DDI_PROP_DONTPASS...
2014  *
2015  * By convention, this interface returns boolean (0) sized properties
2016  * as value (int)1.
2017  *
2018  * This never returns an error, if property not found or specifically
2019  * undefined, the input `defvalue' is returned.
2020  */
2021 
2022 int
2023 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2024 {
2025 	int	propvalue = defvalue;
2026 	int	proplength = sizeof (int);
2027 	int	error;
2028 
2029 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2030 	    flags, name, (caddr_t)&propvalue, &proplength);
2031 
2032 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2033 		propvalue = 1;
2034 
2035 	return (propvalue);
2036 }
2037 
2038 /*
2039  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2040  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2041  */
2042 
2043 int
2044 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2045 {
2046 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2047 }
2048 
2049 /*
2050  * Allocate a struct prop_driver_data, along with 'size' bytes
2051  * for decoded property data.  This structure is freed by
2052  * calling ddi_prop_free(9F).
2053  */
2054 static void *
2055 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2056 {
2057 	struct prop_driver_data *pdd;
2058 
2059 	/*
2060 	 * Allocate a structure with enough memory to store the decoded data.
2061 	 */
2062 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2063 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2064 	pdd->pdd_prop_free = prop_free;
2065 
2066 	/*
2067 	 * Return a pointer to the location to put the decoded data.
2068 	 */
2069 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2070 }
2071 
2072 /*
2073  * Allocated the memory needed to store the encoded data in the property
2074  * handle.
2075  */
2076 static int
2077 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2078 {
2079 	/*
2080 	 * If size is zero, then set data to NULL and size to 0.  This
2081 	 * is a boolean property.
2082 	 */
2083 	if (size == 0) {
2084 		ph->ph_size = 0;
2085 		ph->ph_data = NULL;
2086 		ph->ph_cur_pos = NULL;
2087 		ph->ph_save_pos = NULL;
2088 	} else {
2089 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2090 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2091 			if (ph->ph_data == NULL)
2092 				return (DDI_PROP_NO_MEMORY);
2093 		} else
2094 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2095 		ph->ph_size = size;
2096 		ph->ph_cur_pos = ph->ph_data;
2097 		ph->ph_save_pos = ph->ph_data;
2098 	}
2099 	return (DDI_PROP_SUCCESS);
2100 }
2101 
2102 /*
2103  * Free the space allocated by the lookup routines.  Each lookup routine
2104  * returns a pointer to the decoded data to the driver.  The driver then
2105  * passes this pointer back to us.  This data actually lives in a struct
2106  * prop_driver_data.  We use negative indexing to find the beginning of
2107  * the structure and then free the entire structure using the size and
2108  * the free routine stored in the structure.
2109  */
2110 void
2111 ddi_prop_free(void *datap)
2112 {
2113 	struct prop_driver_data *pdd;
2114 
2115 	/*
2116 	 * Get the structure
2117 	 */
2118 	pdd = (struct prop_driver_data *)
2119 		((caddr_t)datap - sizeof (struct prop_driver_data));
2120 	/*
2121 	 * Call the free routine to free it
2122 	 */
2123 	(*pdd->pdd_prop_free)(pdd);
2124 }
2125 
2126 /*
2127  * Free the data associated with an array of ints,
2128  * allocated with ddi_prop_decode_alloc().
2129  */
2130 static void
2131 ddi_prop_free_ints(struct prop_driver_data *pdd)
2132 {
2133 	kmem_free(pdd, pdd->pdd_size);
2134 }
2135 
2136 /*
2137  * Free a single string property or a single string contained within
2138  * the argv style return value of an array of strings.
2139  */
2140 static void
2141 ddi_prop_free_string(struct prop_driver_data *pdd)
2142 {
2143 	kmem_free(pdd, pdd->pdd_size);
2144 
2145 }
2146 
2147 /*
2148  * Free an array of strings.
2149  */
2150 static void
2151 ddi_prop_free_strings(struct prop_driver_data *pdd)
2152 {
2153 	kmem_free(pdd, pdd->pdd_size);
2154 }
2155 
2156 /*
2157  * Free the data associated with an array of bytes.
2158  */
2159 static void
2160 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2161 {
2162 	kmem_free(pdd, pdd->pdd_size);
2163 }
2164 
2165 /*
2166  * Reset the current location pointer in the property handle to the
2167  * beginning of the data.
2168  */
2169 void
2170 ddi_prop_reset_pos(prop_handle_t *ph)
2171 {
2172 	ph->ph_cur_pos = ph->ph_data;
2173 	ph->ph_save_pos = ph->ph_data;
2174 }
2175 
2176 /*
2177  * Restore the current location pointer in the property handle to the
2178  * saved position.
2179  */
2180 void
2181 ddi_prop_save_pos(prop_handle_t *ph)
2182 {
2183 	ph->ph_save_pos = ph->ph_cur_pos;
2184 }
2185 
2186 /*
2187  * Save the location that the current location pointer is pointing to..
2188  */
2189 void
2190 ddi_prop_restore_pos(prop_handle_t *ph)
2191 {
2192 	ph->ph_cur_pos = ph->ph_save_pos;
2193 }
2194 
2195 /*
2196  * Property encode/decode functions
2197  */
2198 
2199 /*
2200  * Decode a single integer property
2201  */
2202 static int
2203 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2204 {
2205 	int	i;
2206 	int	tmp;
2207 
2208 	/*
2209 	 * If there is nothing to decode return an error
2210 	 */
2211 	if (ph->ph_size == 0)
2212 		return (DDI_PROP_END_OF_DATA);
2213 
2214 	/*
2215 	 * Decode the property as a single integer and return it
2216 	 * in data if we were able to decode it.
2217 	 */
2218 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2219 	if (i < DDI_PROP_RESULT_OK) {
2220 		switch (i) {
2221 		case DDI_PROP_RESULT_EOF:
2222 			return (DDI_PROP_END_OF_DATA);
2223 
2224 		case DDI_PROP_RESULT_ERROR:
2225 			return (DDI_PROP_CANNOT_DECODE);
2226 		}
2227 	}
2228 
2229 	*(int *)data = tmp;
2230 	*nelements = 1;
2231 	return (DDI_PROP_SUCCESS);
2232 }
2233 
2234 /*
2235  * Decode a single 64 bit integer property
2236  */
2237 static int
2238 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2239 {
2240 	int	i;
2241 	int64_t	tmp;
2242 
2243 	/*
2244 	 * If there is nothing to decode return an error
2245 	 */
2246 	if (ph->ph_size == 0)
2247 		return (DDI_PROP_END_OF_DATA);
2248 
2249 	/*
2250 	 * Decode the property as a single integer and return it
2251 	 * in data if we were able to decode it.
2252 	 */
2253 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2254 	if (i < DDI_PROP_RESULT_OK) {
2255 		switch (i) {
2256 		case DDI_PROP_RESULT_EOF:
2257 			return (DDI_PROP_END_OF_DATA);
2258 
2259 		case DDI_PROP_RESULT_ERROR:
2260 			return (DDI_PROP_CANNOT_DECODE);
2261 		}
2262 	}
2263 
2264 	*(int64_t *)data = tmp;
2265 	*nelements = 1;
2266 	return (DDI_PROP_SUCCESS);
2267 }
2268 
2269 /*
2270  * Decode an array of integers property
2271  */
2272 static int
2273 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2274 {
2275 	int	i;
2276 	int	cnt = 0;
2277 	int	*tmp;
2278 	int	*intp;
2279 	int	n;
2280 
2281 	/*
2282 	 * Figure out how many array elements there are by going through the
2283 	 * data without decoding it first and counting.
2284 	 */
2285 	for (;;) {
2286 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2287 		if (i < 0)
2288 			break;
2289 		cnt++;
2290 	}
2291 
2292 	/*
2293 	 * If there are no elements return an error
2294 	 */
2295 	if (cnt == 0)
2296 		return (DDI_PROP_END_OF_DATA);
2297 
2298 	/*
2299 	 * If we cannot skip through the data, we cannot decode it
2300 	 */
2301 	if (i == DDI_PROP_RESULT_ERROR)
2302 		return (DDI_PROP_CANNOT_DECODE);
2303 
2304 	/*
2305 	 * Reset the data pointer to the beginning of the encoded data
2306 	 */
2307 	ddi_prop_reset_pos(ph);
2308 
2309 	/*
2310 	 * Allocated memory to store the decoded value in.
2311 	 */
2312 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2313 		ddi_prop_free_ints);
2314 
2315 	/*
2316 	 * Decode each element and place it in the space we just allocated
2317 	 */
2318 	tmp = intp;
2319 	for (n = 0; n < cnt; n++, tmp++) {
2320 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2321 		if (i < DDI_PROP_RESULT_OK) {
2322 			/*
2323 			 * Free the space we just allocated
2324 			 * and return an error.
2325 			 */
2326 			ddi_prop_free(intp);
2327 			switch (i) {
2328 			case DDI_PROP_RESULT_EOF:
2329 				return (DDI_PROP_END_OF_DATA);
2330 
2331 			case DDI_PROP_RESULT_ERROR:
2332 				return (DDI_PROP_CANNOT_DECODE);
2333 			}
2334 		}
2335 	}
2336 
2337 	*nelements = cnt;
2338 	*(int **)data = intp;
2339 
2340 	return (DDI_PROP_SUCCESS);
2341 }
2342 
2343 /*
2344  * Decode a 64 bit integer array property
2345  */
2346 static int
2347 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2348 {
2349 	int	i;
2350 	int	n;
2351 	int	cnt = 0;
2352 	int64_t	*tmp;
2353 	int64_t	*intp;
2354 
2355 	/*
2356 	 * Count the number of array elements by going
2357 	 * through the data without decoding it.
2358 	 */
2359 	for (;;) {
2360 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2361 		if (i < 0)
2362 			break;
2363 		cnt++;
2364 	}
2365 
2366 	/*
2367 	 * If there are no elements return an error
2368 	 */
2369 	if (cnt == 0)
2370 		return (DDI_PROP_END_OF_DATA);
2371 
2372 	/*
2373 	 * If we cannot skip through the data, we cannot decode it
2374 	 */
2375 	if (i == DDI_PROP_RESULT_ERROR)
2376 		return (DDI_PROP_CANNOT_DECODE);
2377 
2378 	/*
2379 	 * Reset the data pointer to the beginning of the encoded data
2380 	 */
2381 	ddi_prop_reset_pos(ph);
2382 
2383 	/*
2384 	 * Allocate memory to store the decoded value.
2385 	 */
2386 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2387 		ddi_prop_free_ints);
2388 
2389 	/*
2390 	 * Decode each element and place it in the space allocated
2391 	 */
2392 	tmp = intp;
2393 	for (n = 0; n < cnt; n++, tmp++) {
2394 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2395 		if (i < DDI_PROP_RESULT_OK) {
2396 			/*
2397 			 * Free the space we just allocated
2398 			 * and return an error.
2399 			 */
2400 			ddi_prop_free(intp);
2401 			switch (i) {
2402 			case DDI_PROP_RESULT_EOF:
2403 				return (DDI_PROP_END_OF_DATA);
2404 
2405 			case DDI_PROP_RESULT_ERROR:
2406 				return (DDI_PROP_CANNOT_DECODE);
2407 			}
2408 		}
2409 	}
2410 
2411 	*nelements = cnt;
2412 	*(int64_t **)data = intp;
2413 
2414 	return (DDI_PROP_SUCCESS);
2415 }
2416 
2417 /*
2418  * Encode an array of integers property (Can be one element)
2419  */
2420 int
2421 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2422 {
2423 	int	i;
2424 	int	*tmp;
2425 	int	cnt;
2426 	int	size;
2427 
2428 	/*
2429 	 * If there is no data, we cannot do anything
2430 	 */
2431 	if (nelements == 0)
2432 		return (DDI_PROP_CANNOT_ENCODE);
2433 
2434 	/*
2435 	 * Get the size of an encoded int.
2436 	 */
2437 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2438 
2439 	if (size < DDI_PROP_RESULT_OK) {
2440 		switch (size) {
2441 		case DDI_PROP_RESULT_EOF:
2442 			return (DDI_PROP_END_OF_DATA);
2443 
2444 		case DDI_PROP_RESULT_ERROR:
2445 			return (DDI_PROP_CANNOT_ENCODE);
2446 		}
2447 	}
2448 
2449 	/*
2450 	 * Allocate space in the handle to store the encoded int.
2451 	 */
2452 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2453 		DDI_PROP_SUCCESS)
2454 		return (DDI_PROP_NO_MEMORY);
2455 
2456 	/*
2457 	 * Encode the array of ints.
2458 	 */
2459 	tmp = (int *)data;
2460 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2461 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2462 		if (i < DDI_PROP_RESULT_OK) {
2463 			switch (i) {
2464 			case DDI_PROP_RESULT_EOF:
2465 				return (DDI_PROP_END_OF_DATA);
2466 
2467 			case DDI_PROP_RESULT_ERROR:
2468 				return (DDI_PROP_CANNOT_ENCODE);
2469 			}
2470 		}
2471 	}
2472 
2473 	return (DDI_PROP_SUCCESS);
2474 }
2475 
2476 
2477 /*
2478  * Encode a 64 bit integer array property
2479  */
2480 int
2481 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2482 {
2483 	int i;
2484 	int cnt;
2485 	int size;
2486 	int64_t *tmp;
2487 
2488 	/*
2489 	 * If there is no data, we cannot do anything
2490 	 */
2491 	if (nelements == 0)
2492 		return (DDI_PROP_CANNOT_ENCODE);
2493 
2494 	/*
2495 	 * Get the size of an encoded 64 bit int.
2496 	 */
2497 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2498 
2499 	if (size < DDI_PROP_RESULT_OK) {
2500 		switch (size) {
2501 		case DDI_PROP_RESULT_EOF:
2502 			return (DDI_PROP_END_OF_DATA);
2503 
2504 		case DDI_PROP_RESULT_ERROR:
2505 			return (DDI_PROP_CANNOT_ENCODE);
2506 		}
2507 	}
2508 
2509 	/*
2510 	 * Allocate space in the handle to store the encoded int.
2511 	 */
2512 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2513 	    DDI_PROP_SUCCESS)
2514 		return (DDI_PROP_NO_MEMORY);
2515 
2516 	/*
2517 	 * Encode the array of ints.
2518 	 */
2519 	tmp = (int64_t *)data;
2520 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2521 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2522 		if (i < DDI_PROP_RESULT_OK) {
2523 			switch (i) {
2524 			case DDI_PROP_RESULT_EOF:
2525 				return (DDI_PROP_END_OF_DATA);
2526 
2527 			case DDI_PROP_RESULT_ERROR:
2528 				return (DDI_PROP_CANNOT_ENCODE);
2529 			}
2530 		}
2531 	}
2532 
2533 	return (DDI_PROP_SUCCESS);
2534 }
2535 
2536 /*
2537  * Decode a single string property
2538  */
2539 static int
2540 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2541 {
2542 	char		*tmp;
2543 	char		*str;
2544 	int		i;
2545 	int		size;
2546 
2547 	/*
2548 	 * If there is nothing to decode return an error
2549 	 */
2550 	if (ph->ph_size == 0)
2551 		return (DDI_PROP_END_OF_DATA);
2552 
2553 	/*
2554 	 * Get the decoded size of the encoded string.
2555 	 */
2556 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2557 	if (size < DDI_PROP_RESULT_OK) {
2558 		switch (size) {
2559 		case DDI_PROP_RESULT_EOF:
2560 			return (DDI_PROP_END_OF_DATA);
2561 
2562 		case DDI_PROP_RESULT_ERROR:
2563 			return (DDI_PROP_CANNOT_DECODE);
2564 		}
2565 	}
2566 
2567 	/*
2568 	 * Allocated memory to store the decoded value in.
2569 	 */
2570 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2571 
2572 	ddi_prop_reset_pos(ph);
2573 
2574 	/*
2575 	 * Decode the str and place it in the space we just allocated
2576 	 */
2577 	tmp = str;
2578 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2579 	if (i < DDI_PROP_RESULT_OK) {
2580 		/*
2581 		 * Free the space we just allocated
2582 		 * and return an error.
2583 		 */
2584 		ddi_prop_free(str);
2585 		switch (i) {
2586 		case DDI_PROP_RESULT_EOF:
2587 			return (DDI_PROP_END_OF_DATA);
2588 
2589 		case DDI_PROP_RESULT_ERROR:
2590 			return (DDI_PROP_CANNOT_DECODE);
2591 		}
2592 	}
2593 
2594 	*(char **)data = str;
2595 	*nelements = 1;
2596 
2597 	return (DDI_PROP_SUCCESS);
2598 }
2599 
2600 /*
2601  * Decode an array of strings.
2602  */
2603 int
2604 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2605 {
2606 	int		cnt = 0;
2607 	char		**strs;
2608 	char		**tmp;
2609 	char		*ptr;
2610 	int		i;
2611 	int		n;
2612 	int		size;
2613 	size_t		nbytes;
2614 
2615 	/*
2616 	 * Figure out how many array elements there are by going through the
2617 	 * data without decoding it first and counting.
2618 	 */
2619 	for (;;) {
2620 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2621 		if (i < 0)
2622 			break;
2623 		cnt++;
2624 	}
2625 
2626 	/*
2627 	 * If there are no elements return an error
2628 	 */
2629 	if (cnt == 0)
2630 		return (DDI_PROP_END_OF_DATA);
2631 
2632 	/*
2633 	 * If we cannot skip through the data, we cannot decode it
2634 	 */
2635 	if (i == DDI_PROP_RESULT_ERROR)
2636 		return (DDI_PROP_CANNOT_DECODE);
2637 
2638 	/*
2639 	 * Reset the data pointer to the beginning of the encoded data
2640 	 */
2641 	ddi_prop_reset_pos(ph);
2642 
2643 	/*
2644 	 * Figure out how much memory we need for the sum total
2645 	 */
2646 	nbytes = (cnt + 1) * sizeof (char *);
2647 
2648 	for (n = 0; n < cnt; n++) {
2649 		/*
2650 		 * Get the decoded size of the current encoded string.
2651 		 */
2652 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2653 		if (size < DDI_PROP_RESULT_OK) {
2654 			switch (size) {
2655 			case DDI_PROP_RESULT_EOF:
2656 				return (DDI_PROP_END_OF_DATA);
2657 
2658 			case DDI_PROP_RESULT_ERROR:
2659 				return (DDI_PROP_CANNOT_DECODE);
2660 			}
2661 		}
2662 
2663 		nbytes += size;
2664 	}
2665 
2666 	/*
2667 	 * Allocate memory in which to store the decoded strings.
2668 	 */
2669 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2670 
2671 	/*
2672 	 * Set up pointers for each string by figuring out yet
2673 	 * again how long each string is.
2674 	 */
2675 	ddi_prop_reset_pos(ph);
2676 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2677 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2678 		/*
2679 		 * Get the decoded size of the current encoded string.
2680 		 */
2681 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2682 		if (size < DDI_PROP_RESULT_OK) {
2683 			ddi_prop_free(strs);
2684 			switch (size) {
2685 			case DDI_PROP_RESULT_EOF:
2686 				return (DDI_PROP_END_OF_DATA);
2687 
2688 			case DDI_PROP_RESULT_ERROR:
2689 				return (DDI_PROP_CANNOT_DECODE);
2690 			}
2691 		}
2692 
2693 		*tmp = ptr;
2694 		ptr += size;
2695 	}
2696 
2697 	/*
2698 	 * String array is terminated by a NULL
2699 	 */
2700 	*tmp = NULL;
2701 
2702 	/*
2703 	 * Finally, we can decode each string
2704 	 */
2705 	ddi_prop_reset_pos(ph);
2706 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2707 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2708 		if (i < DDI_PROP_RESULT_OK) {
2709 			/*
2710 			 * Free the space we just allocated
2711 			 * and return an error
2712 			 */
2713 			ddi_prop_free(strs);
2714 			switch (i) {
2715 			case DDI_PROP_RESULT_EOF:
2716 				return (DDI_PROP_END_OF_DATA);
2717 
2718 			case DDI_PROP_RESULT_ERROR:
2719 				return (DDI_PROP_CANNOT_DECODE);
2720 			}
2721 		}
2722 	}
2723 
2724 	*(char ***)data = strs;
2725 	*nelements = cnt;
2726 
2727 	return (DDI_PROP_SUCCESS);
2728 }
2729 
2730 /*
2731  * Encode a string.
2732  */
2733 int
2734 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2735 {
2736 	char		**tmp;
2737 	int		size;
2738 	int		i;
2739 
2740 	/*
2741 	 * If there is no data, we cannot do anything
2742 	 */
2743 	if (nelements == 0)
2744 		return (DDI_PROP_CANNOT_ENCODE);
2745 
2746 	/*
2747 	 * Get the size of the encoded string.
2748 	 */
2749 	tmp = (char **)data;
2750 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2751 	if (size < DDI_PROP_RESULT_OK) {
2752 		switch (size) {
2753 		case DDI_PROP_RESULT_EOF:
2754 			return (DDI_PROP_END_OF_DATA);
2755 
2756 		case DDI_PROP_RESULT_ERROR:
2757 			return (DDI_PROP_CANNOT_ENCODE);
2758 		}
2759 	}
2760 
2761 	/*
2762 	 * Allocate space in the handle to store the encoded string.
2763 	 */
2764 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2765 		return (DDI_PROP_NO_MEMORY);
2766 
2767 	ddi_prop_reset_pos(ph);
2768 
2769 	/*
2770 	 * Encode the string.
2771 	 */
2772 	tmp = (char **)data;
2773 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2774 	if (i < DDI_PROP_RESULT_OK) {
2775 		switch (i) {
2776 		case DDI_PROP_RESULT_EOF:
2777 			return (DDI_PROP_END_OF_DATA);
2778 
2779 		case DDI_PROP_RESULT_ERROR:
2780 			return (DDI_PROP_CANNOT_ENCODE);
2781 		}
2782 	}
2783 
2784 	return (DDI_PROP_SUCCESS);
2785 }
2786 
2787 
2788 /*
2789  * Encode an array of strings.
2790  */
2791 int
2792 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2793 {
2794 	int		cnt = 0;
2795 	char		**tmp;
2796 	int		size;
2797 	uint_t		total_size;
2798 	int		i;
2799 
2800 	/*
2801 	 * If there is no data, we cannot do anything
2802 	 */
2803 	if (nelements == 0)
2804 		return (DDI_PROP_CANNOT_ENCODE);
2805 
2806 	/*
2807 	 * Get the total size required to encode all the strings.
2808 	 */
2809 	total_size = 0;
2810 	tmp = (char **)data;
2811 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2812 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2813 		if (size < DDI_PROP_RESULT_OK) {
2814 			switch (size) {
2815 			case DDI_PROP_RESULT_EOF:
2816 				return (DDI_PROP_END_OF_DATA);
2817 
2818 			case DDI_PROP_RESULT_ERROR:
2819 				return (DDI_PROP_CANNOT_ENCODE);
2820 			}
2821 		}
2822 		total_size += (uint_t)size;
2823 	}
2824 
2825 	/*
2826 	 * Allocate space in the handle to store the encoded strings.
2827 	 */
2828 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2829 		return (DDI_PROP_NO_MEMORY);
2830 
2831 	ddi_prop_reset_pos(ph);
2832 
2833 	/*
2834 	 * Encode the array of strings.
2835 	 */
2836 	tmp = (char **)data;
2837 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2838 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2839 		if (i < DDI_PROP_RESULT_OK) {
2840 			switch (i) {
2841 			case DDI_PROP_RESULT_EOF:
2842 				return (DDI_PROP_END_OF_DATA);
2843 
2844 			case DDI_PROP_RESULT_ERROR:
2845 				return (DDI_PROP_CANNOT_ENCODE);
2846 			}
2847 		}
2848 	}
2849 
2850 	return (DDI_PROP_SUCCESS);
2851 }
2852 
2853 
2854 /*
2855  * Decode an array of bytes.
2856  */
2857 static int
2858 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2859 {
2860 	uchar_t		*tmp;
2861 	int		nbytes;
2862 	int		i;
2863 
2864 	/*
2865 	 * If there are no elements return an error
2866 	 */
2867 	if (ph->ph_size == 0)
2868 		return (DDI_PROP_END_OF_DATA);
2869 
2870 	/*
2871 	 * Get the size of the encoded array of bytes.
2872 	 */
2873 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2874 		data, ph->ph_size);
2875 	if (nbytes < DDI_PROP_RESULT_OK) {
2876 		switch (nbytes) {
2877 		case DDI_PROP_RESULT_EOF:
2878 			return (DDI_PROP_END_OF_DATA);
2879 
2880 		case DDI_PROP_RESULT_ERROR:
2881 			return (DDI_PROP_CANNOT_DECODE);
2882 		}
2883 	}
2884 
2885 	/*
2886 	 * Allocated memory to store the decoded value in.
2887 	 */
2888 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2889 
2890 	/*
2891 	 * Decode each element and place it in the space we just allocated
2892 	 */
2893 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2894 	if (i < DDI_PROP_RESULT_OK) {
2895 		/*
2896 		 * Free the space we just allocated
2897 		 * and return an error
2898 		 */
2899 		ddi_prop_free(tmp);
2900 		switch (i) {
2901 		case DDI_PROP_RESULT_EOF:
2902 			return (DDI_PROP_END_OF_DATA);
2903 
2904 		case DDI_PROP_RESULT_ERROR:
2905 			return (DDI_PROP_CANNOT_DECODE);
2906 		}
2907 	}
2908 
2909 	*(uchar_t **)data = tmp;
2910 	*nelements = nbytes;
2911 
2912 	return (DDI_PROP_SUCCESS);
2913 }
2914 
2915 /*
2916  * Encode an array of bytes.
2917  */
2918 int
2919 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2920 {
2921 	int		size;
2922 	int		i;
2923 
2924 	/*
2925 	 * If there are no elements, then this is a boolean property,
2926 	 * so just create a property handle with no data and return.
2927 	 */
2928 	if (nelements == 0) {
2929 		(void) ddi_prop_encode_alloc(ph, 0);
2930 		return (DDI_PROP_SUCCESS);
2931 	}
2932 
2933 	/*
2934 	 * Get the size of the encoded array of bytes.
2935 	 */
2936 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2937 		nelements);
2938 	if (size < DDI_PROP_RESULT_OK) {
2939 		switch (size) {
2940 		case DDI_PROP_RESULT_EOF:
2941 			return (DDI_PROP_END_OF_DATA);
2942 
2943 		case DDI_PROP_RESULT_ERROR:
2944 			return (DDI_PROP_CANNOT_DECODE);
2945 		}
2946 	}
2947 
2948 	/*
2949 	 * Allocate space in the handle to store the encoded bytes.
2950 	 */
2951 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2952 		return (DDI_PROP_NO_MEMORY);
2953 
2954 	/*
2955 	 * Encode the array of bytes.
2956 	 */
2957 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2958 		nelements);
2959 	if (i < DDI_PROP_RESULT_OK) {
2960 		switch (i) {
2961 		case DDI_PROP_RESULT_EOF:
2962 			return (DDI_PROP_END_OF_DATA);
2963 
2964 		case DDI_PROP_RESULT_ERROR:
2965 			return (DDI_PROP_CANNOT_ENCODE);
2966 		}
2967 	}
2968 
2969 	return (DDI_PROP_SUCCESS);
2970 }
2971 
2972 /*
2973  * OBP 1275 integer, string and byte operators.
2974  *
2975  * DDI_PROP_CMD_DECODE:
2976  *
2977  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2978  *	DDI_PROP_RESULT_EOF:		end of data
2979  *	DDI_PROP_OK:			data was decoded
2980  *
2981  * DDI_PROP_CMD_ENCODE:
2982  *
2983  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2984  *	DDI_PROP_RESULT_EOF:		end of data
2985  *	DDI_PROP_OK:			data was encoded
2986  *
2987  * DDI_PROP_CMD_SKIP:
2988  *
2989  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2990  *	DDI_PROP_RESULT_EOF:		end of data
2991  *	DDI_PROP_OK:			data was skipped
2992  *
2993  * DDI_PROP_CMD_GET_ESIZE:
2994  *
2995  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2996  *	DDI_PROP_RESULT_EOF:		end of data
2997  *	> 0:				the encoded size
2998  *
2999  * DDI_PROP_CMD_GET_DSIZE:
3000  *
3001  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3002  *	DDI_PROP_RESULT_EOF:		end of data
3003  *	> 0:				the decoded size
3004  */
3005 
3006 /*
3007  * OBP 1275 integer operator
3008  *
3009  * OBP properties are a byte stream of data, so integers may not be
3010  * properly aligned.  Therefore we need to copy them one byte at a time.
3011  */
3012 int
3013 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3014 {
3015 	int	i;
3016 
3017 	switch (cmd) {
3018 	case DDI_PROP_CMD_DECODE:
3019 		/*
3020 		 * Check that there is encoded data
3021 		 */
3022 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3023 			return (DDI_PROP_RESULT_ERROR);
3024 		if (ph->ph_flags & PH_FROM_PROM) {
3025 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3026 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3027 				ph->ph_size - i))
3028 				return (DDI_PROP_RESULT_ERROR);
3029 		} else {
3030 			if (ph->ph_size < sizeof (int) ||
3031 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3032 				ph->ph_size - sizeof (int))))
3033 			return (DDI_PROP_RESULT_ERROR);
3034 		}
3035 
3036 		/*
3037 		 * Copy the integer, using the implementation-specific
3038 		 * copy function if the property is coming from the PROM.
3039 		 */
3040 		if (ph->ph_flags & PH_FROM_PROM) {
3041 			*data = impl_ddi_prop_int_from_prom(
3042 				(uchar_t *)ph->ph_cur_pos,
3043 				(ph->ph_size < PROP_1275_INT_SIZE) ?
3044 				ph->ph_size : PROP_1275_INT_SIZE);
3045 		} else {
3046 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3047 		}
3048 
3049 		/*
3050 		 * Move the current location to the start of the next
3051 		 * bit of undecoded data.
3052 		 */
3053 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3054 			PROP_1275_INT_SIZE;
3055 		return (DDI_PROP_RESULT_OK);
3056 
3057 	case DDI_PROP_CMD_ENCODE:
3058 		/*
3059 		 * Check that there is room to encoded the data
3060 		 */
3061 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3062 			ph->ph_size < PROP_1275_INT_SIZE ||
3063 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3064 				ph->ph_size - sizeof (int))))
3065 			return (DDI_PROP_RESULT_ERROR);
3066 
3067 		/*
3068 		 * Encode the integer into the byte stream one byte at a
3069 		 * time.
3070 		 */
3071 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3072 
3073 		/*
3074 		 * Move the current location to the start of the next bit of
3075 		 * space where we can store encoded data.
3076 		 */
3077 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3078 		return (DDI_PROP_RESULT_OK);
3079 
3080 	case DDI_PROP_CMD_SKIP:
3081 		/*
3082 		 * Check that there is encoded data
3083 		 */
3084 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3085 				ph->ph_size < PROP_1275_INT_SIZE)
3086 			return (DDI_PROP_RESULT_ERROR);
3087 
3088 
3089 		if ((caddr_t)ph->ph_cur_pos ==
3090 				(caddr_t)ph->ph_data + ph->ph_size) {
3091 			return (DDI_PROP_RESULT_EOF);
3092 		} else if ((caddr_t)ph->ph_cur_pos >
3093 				(caddr_t)ph->ph_data + ph->ph_size) {
3094 			return (DDI_PROP_RESULT_EOF);
3095 		}
3096 
3097 		/*
3098 		 * Move the current location to the start of the next bit of
3099 		 * undecoded data.
3100 		 */
3101 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3102 		return (DDI_PROP_RESULT_OK);
3103 
3104 	case DDI_PROP_CMD_GET_ESIZE:
3105 		/*
3106 		 * Return the size of an encoded integer on OBP
3107 		 */
3108 		return (PROP_1275_INT_SIZE);
3109 
3110 	case DDI_PROP_CMD_GET_DSIZE:
3111 		/*
3112 		 * Return the size of a decoded integer on the system.
3113 		 */
3114 		return (sizeof (int));
3115 
3116 	default:
3117 #ifdef DEBUG
3118 		panic("ddi_prop_1275_int: %x impossible", cmd);
3119 		/*NOTREACHED*/
3120 #else
3121 		return (DDI_PROP_RESULT_ERROR);
3122 #endif	/* DEBUG */
3123 	}
3124 }
3125 
3126 /*
3127  * 64 bit integer operator.
3128  *
3129  * This is an extension, defined by Sun, to the 1275 integer
3130  * operator.  This routine handles the encoding/decoding of
3131  * 64 bit integer properties.
3132  */
3133 int
3134 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3135 {
3136 
3137 	switch (cmd) {
3138 	case DDI_PROP_CMD_DECODE:
3139 		/*
3140 		 * Check that there is encoded data
3141 		 */
3142 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3143 			return (DDI_PROP_RESULT_ERROR);
3144 		if (ph->ph_flags & PH_FROM_PROM) {
3145 			return (DDI_PROP_RESULT_ERROR);
3146 		} else {
3147 			if (ph->ph_size < sizeof (int64_t) ||
3148 			    ((int64_t *)ph->ph_cur_pos >
3149 			    ((int64_t *)ph->ph_data +
3150 			    ph->ph_size - sizeof (int64_t))))
3151 				return (DDI_PROP_RESULT_ERROR);
3152 		}
3153 		/*
3154 		 * Copy the integer, using the implementation-specific
3155 		 * copy function if the property is coming from the PROM.
3156 		 */
3157 		if (ph->ph_flags & PH_FROM_PROM) {
3158 			return (DDI_PROP_RESULT_ERROR);
3159 		} else {
3160 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3161 		}
3162 
3163 		/*
3164 		 * Move the current location to the start of the next
3165 		 * bit of undecoded data.
3166 		 */
3167 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3168 		    sizeof (int64_t);
3169 			return (DDI_PROP_RESULT_OK);
3170 
3171 	case DDI_PROP_CMD_ENCODE:
3172 		/*
3173 		 * Check that there is room to encoded the data
3174 		 */
3175 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3176 		    ph->ph_size < sizeof (int64_t) ||
3177 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3178 		    ph->ph_size - sizeof (int64_t))))
3179 			return (DDI_PROP_RESULT_ERROR);
3180 
3181 		/*
3182 		 * Encode the integer into the byte stream one byte at a
3183 		 * time.
3184 		 */
3185 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3186 
3187 		/*
3188 		 * Move the current location to the start of the next bit of
3189 		 * space where we can store encoded data.
3190 		 */
3191 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3192 		    sizeof (int64_t);
3193 		return (DDI_PROP_RESULT_OK);
3194 
3195 	case DDI_PROP_CMD_SKIP:
3196 		/*
3197 		 * Check that there is encoded data
3198 		 */
3199 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3200 		    ph->ph_size < sizeof (int64_t))
3201 			return (DDI_PROP_RESULT_ERROR);
3202 
3203 		if ((caddr_t)ph->ph_cur_pos ==
3204 		    (caddr_t)ph->ph_data + ph->ph_size) {
3205 			return (DDI_PROP_RESULT_EOF);
3206 		} else if ((caddr_t)ph->ph_cur_pos >
3207 		    (caddr_t)ph->ph_data + ph->ph_size) {
3208 			return (DDI_PROP_RESULT_EOF);
3209 		}
3210 
3211 		/*
3212 		 * Move the current location to the start of
3213 		 * the next bit of undecoded data.
3214 		 */
3215 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3216 		    sizeof (int64_t);
3217 			return (DDI_PROP_RESULT_OK);
3218 
3219 	case DDI_PROP_CMD_GET_ESIZE:
3220 		/*
3221 		 * Return the size of an encoded integer on OBP
3222 		 */
3223 		return (sizeof (int64_t));
3224 
3225 	case DDI_PROP_CMD_GET_DSIZE:
3226 		/*
3227 		 * Return the size of a decoded integer on the system.
3228 		 */
3229 		return (sizeof (int64_t));
3230 
3231 	default:
3232 #ifdef DEBUG
3233 		panic("ddi_prop_int64_op: %x impossible", cmd);
3234 		/*NOTREACHED*/
3235 #else
3236 		return (DDI_PROP_RESULT_ERROR);
3237 #endif  /* DEBUG */
3238 	}
3239 }
3240 
3241 /*
3242  * OBP 1275 string operator.
3243  *
3244  * OBP strings are NULL terminated.
3245  */
3246 int
3247 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3248 {
3249 	int	n;
3250 	char	*p;
3251 	char	*end;
3252 
3253 	switch (cmd) {
3254 	case DDI_PROP_CMD_DECODE:
3255 		/*
3256 		 * Check that there is encoded data
3257 		 */
3258 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3259 			return (DDI_PROP_RESULT_ERROR);
3260 		}
3261 
3262 		/*
3263 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3264 		 * how to NULL terminate result.
3265 		 */
3266 		p = (char *)ph->ph_cur_pos;
3267 		end = (char *)ph->ph_data + ph->ph_size;
3268 		if (p >= end)
3269 			return (DDI_PROP_RESULT_EOF);
3270 
3271 		while (p < end) {
3272 			*data++ = *p;
3273 			if (*p++ == 0) {	/* NULL from OBP */
3274 				ph->ph_cur_pos = p;
3275 				return (DDI_PROP_RESULT_OK);
3276 			}
3277 		}
3278 
3279 		/*
3280 		 * If OBP did not NULL terminate string, which happens
3281 		 * (at least) for 'true'/'false' boolean values, account for
3282 		 * the space and store null termination on decode.
3283 		 */
3284 		ph->ph_cur_pos = p;
3285 		*data = 0;
3286 		return (DDI_PROP_RESULT_OK);
3287 
3288 	case DDI_PROP_CMD_ENCODE:
3289 		/*
3290 		 * Check that there is room to encoded the data
3291 		 */
3292 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3293 			return (DDI_PROP_RESULT_ERROR);
3294 		}
3295 
3296 		n = strlen(data) + 1;
3297 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3298 				ph->ph_size - n)) {
3299 			return (DDI_PROP_RESULT_ERROR);
3300 		}
3301 
3302 		/*
3303 		 * Copy the NULL terminated string
3304 		 */
3305 		bcopy(data, ph->ph_cur_pos, n);
3306 
3307 		/*
3308 		 * Move the current location to the start of the next bit of
3309 		 * space where we can store encoded data.
3310 		 */
3311 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3312 		return (DDI_PROP_RESULT_OK);
3313 
3314 	case DDI_PROP_CMD_SKIP:
3315 		/*
3316 		 * Check that there is encoded data
3317 		 */
3318 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3319 			return (DDI_PROP_RESULT_ERROR);
3320 		}
3321 
3322 		/*
3323 		 * Return the string length plus one for the NULL
3324 		 * We know the size of the property, we need to
3325 		 * ensure that the string is properly formatted,
3326 		 * since we may be looking up random OBP data.
3327 		 */
3328 		p = (char *)ph->ph_cur_pos;
3329 		end = (char *)ph->ph_data + ph->ph_size;
3330 		if (p >= end)
3331 			return (DDI_PROP_RESULT_EOF);
3332 
3333 		while (p < end) {
3334 			if (*p++ == 0) {	/* NULL from OBP */
3335 				ph->ph_cur_pos = p;
3336 				return (DDI_PROP_RESULT_OK);
3337 			}
3338 		}
3339 
3340 		/*
3341 		 * Accommodate the fact that OBP does not always NULL
3342 		 * terminate strings.
3343 		 */
3344 		ph->ph_cur_pos = p;
3345 		return (DDI_PROP_RESULT_OK);
3346 
3347 	case DDI_PROP_CMD_GET_ESIZE:
3348 		/*
3349 		 * Return the size of the encoded string on OBP.
3350 		 */
3351 		return (strlen(data) + 1);
3352 
3353 	case DDI_PROP_CMD_GET_DSIZE:
3354 		/*
3355 		 * Return the string length plus one for the NULL.
3356 		 * We know the size of the property, we need to
3357 		 * ensure that the string is properly formatted,
3358 		 * since we may be looking up random OBP data.
3359 		 */
3360 		p = (char *)ph->ph_cur_pos;
3361 		end = (char *)ph->ph_data + ph->ph_size;
3362 		if (p >= end)
3363 			return (DDI_PROP_RESULT_EOF);
3364 
3365 		for (n = 0; p < end; n++) {
3366 			if (*p++ == 0) {	/* NULL from OBP */
3367 				ph->ph_cur_pos = p;
3368 				return (n + 1);
3369 			}
3370 		}
3371 
3372 		/*
3373 		 * If OBP did not NULL terminate string, which happens for
3374 		 * 'true'/'false' boolean values, account for the space
3375 		 * to store null termination here.
3376 		 */
3377 		ph->ph_cur_pos = p;
3378 		return (n + 1);
3379 
3380 	default:
3381 #ifdef DEBUG
3382 		panic("ddi_prop_1275_string: %x impossible", cmd);
3383 		/*NOTREACHED*/
3384 #else
3385 		return (DDI_PROP_RESULT_ERROR);
3386 #endif	/* DEBUG */
3387 	}
3388 }
3389 
3390 /*
3391  * OBP 1275 byte operator
3392  *
3393  * Caller must specify the number of bytes to get.  OBP encodes bytes
3394  * as a byte so there is a 1-to-1 translation.
3395  */
3396 int
3397 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3398 	uint_t nelements)
3399 {
3400 	switch (cmd) {
3401 	case DDI_PROP_CMD_DECODE:
3402 		/*
3403 		 * Check that there is encoded data
3404 		 */
3405 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3406 			ph->ph_size < nelements ||
3407 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3408 				ph->ph_size - nelements)))
3409 			return (DDI_PROP_RESULT_ERROR);
3410 
3411 		/*
3412 		 * Copy out the bytes
3413 		 */
3414 		bcopy(ph->ph_cur_pos, data, nelements);
3415 
3416 		/*
3417 		 * Move the current location
3418 		 */
3419 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3420 		return (DDI_PROP_RESULT_OK);
3421 
3422 	case DDI_PROP_CMD_ENCODE:
3423 		/*
3424 		 * Check that there is room to encode the data
3425 		 */
3426 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3427 			ph->ph_size < nelements ||
3428 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3429 				ph->ph_size - nelements)))
3430 			return (DDI_PROP_RESULT_ERROR);
3431 
3432 		/*
3433 		 * Copy in the bytes
3434 		 */
3435 		bcopy(data, ph->ph_cur_pos, nelements);
3436 
3437 		/*
3438 		 * Move the current location to the start of the next bit of
3439 		 * space where we can store encoded data.
3440 		 */
3441 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3442 		return (DDI_PROP_RESULT_OK);
3443 
3444 	case DDI_PROP_CMD_SKIP:
3445 		/*
3446 		 * Check that there is encoded data
3447 		 */
3448 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3449 				ph->ph_size < nelements)
3450 			return (DDI_PROP_RESULT_ERROR);
3451 
3452 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3453 				ph->ph_size - nelements))
3454 			return (DDI_PROP_RESULT_EOF);
3455 
3456 		/*
3457 		 * Move the current location
3458 		 */
3459 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3460 		return (DDI_PROP_RESULT_OK);
3461 
3462 	case DDI_PROP_CMD_GET_ESIZE:
3463 		/*
3464 		 * The size in bytes of the encoded size is the
3465 		 * same as the decoded size provided by the caller.
3466 		 */
3467 		return (nelements);
3468 
3469 	case DDI_PROP_CMD_GET_DSIZE:
3470 		/*
3471 		 * Just return the number of bytes specified by the caller.
3472 		 */
3473 		return (nelements);
3474 
3475 	default:
3476 #ifdef DEBUG
3477 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3478 		/*NOTREACHED*/
3479 #else
3480 		return (DDI_PROP_RESULT_ERROR);
3481 #endif	/* DEBUG */
3482 	}
3483 }
3484 
3485 /*
3486  * Used for properties that come from the OBP, hardware configuration files,
3487  * or that are created by calls to ddi_prop_update(9F).
3488  */
3489 static struct prop_handle_ops prop_1275_ops = {
3490 	ddi_prop_1275_int,
3491 	ddi_prop_1275_string,
3492 	ddi_prop_1275_bytes,
3493 	ddi_prop_int64_op
3494 };
3495 
3496 
3497 /*
3498  * Interface to create/modify a managed property on child's behalf...
3499  * Flags interpreted are:
3500  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3501  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3502  *
3503  * Use same dev_t when modifying or undefining a property.
3504  * Search for properties with DDI_DEV_T_ANY to match first named
3505  * property on the list.
3506  *
3507  * Properties are stored LIFO and subsequently will match the first
3508  * `matching' instance.
3509  */
3510 
3511 /*
3512  * ddi_prop_add:	Add a software defined property
3513  */
3514 
3515 /*
3516  * define to get a new ddi_prop_t.
3517  * km_flags are KM_SLEEP or KM_NOSLEEP.
3518  */
3519 
3520 #define	DDI_NEW_PROP_T(km_flags)	\
3521 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3522 
3523 static int
3524 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3525     char *name, caddr_t value, int length)
3526 {
3527 	ddi_prop_t	*new_propp, *propp;
3528 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3529 	int		km_flags = KM_NOSLEEP;
3530 	int		name_buf_len;
3531 
3532 	/*
3533 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3534 	 */
3535 
3536 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3537 		return (DDI_PROP_INVAL_ARG);
3538 
3539 	if (flags & DDI_PROP_CANSLEEP)
3540 		km_flags = KM_SLEEP;
3541 
3542 	if (flags & DDI_PROP_SYSTEM_DEF)
3543 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3544 	else if (flags & DDI_PROP_HW_DEF)
3545 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3546 
3547 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3548 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3549 		return (DDI_PROP_NO_MEMORY);
3550 	}
3551 
3552 	/*
3553 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3554 	 * to get the real major number for the device.  This needs to be
3555 	 * done because some drivers need to call ddi_prop_create in their
3556 	 * attach routines but they don't have a dev.  By creating the dev
3557 	 * ourself if the major number is 0, drivers will not have to know what
3558 	 * their major number.	They can just create a dev with major number
3559 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3560 	 * work by recreating the same dev that we already have, but its the
3561 	 * price you pay :-).
3562 	 *
3563 	 * This fixes bug #1098060.
3564 	 */
3565 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3566 		new_propp->prop_dev =
3567 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3568 		    getminor(dev));
3569 	} else
3570 		new_propp->prop_dev = dev;
3571 
3572 	/*
3573 	 * Allocate space for property name and copy it in...
3574 	 */
3575 
3576 	name_buf_len = strlen(name) + 1;
3577 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3578 	if (new_propp->prop_name == 0)	{
3579 		kmem_free(new_propp, sizeof (ddi_prop_t));
3580 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3581 		return (DDI_PROP_NO_MEMORY);
3582 	}
3583 	bcopy(name, new_propp->prop_name, name_buf_len);
3584 
3585 	/*
3586 	 * Set the property type
3587 	 */
3588 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3589 
3590 	/*
3591 	 * Set length and value ONLY if not an explicit property undefine:
3592 	 * NOTE: value and length are zero for explicit undefines.
3593 	 */
3594 
3595 	if (flags & DDI_PROP_UNDEF_IT) {
3596 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3597 	} else {
3598 		if ((new_propp->prop_len = length) != 0) {
3599 			new_propp->prop_val = kmem_alloc(length, km_flags);
3600 			if (new_propp->prop_val == 0)  {
3601 				kmem_free(new_propp->prop_name, name_buf_len);
3602 				kmem_free(new_propp, sizeof (ddi_prop_t));
3603 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3604 				return (DDI_PROP_NO_MEMORY);
3605 			}
3606 			bcopy(value, new_propp->prop_val, length);
3607 		}
3608 	}
3609 
3610 	/*
3611 	 * Link property into beginning of list. (Properties are LIFO order.)
3612 	 */
3613 
3614 	mutex_enter(&(DEVI(dip)->devi_lock));
3615 	propp = *list_head;
3616 	new_propp->prop_next = propp;
3617 	*list_head = new_propp;
3618 	mutex_exit(&(DEVI(dip)->devi_lock));
3619 	return (DDI_PROP_SUCCESS);
3620 }
3621 
3622 
3623 /*
3624  * ddi_prop_change:	Modify a software managed property value
3625  *
3626  *			Set new length and value if found.
3627  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3628  *			input name is the NULL string.
3629  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3630  *
3631  *			Note: an undef can be modified to be a define,
3632  *			(you can't go the other way.)
3633  */
3634 
3635 static int
3636 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3637     char *name, caddr_t value, int length)
3638 {
3639 	ddi_prop_t	*propp;
3640 	ddi_prop_t	**ppropp;
3641 	caddr_t		p = NULL;
3642 
3643 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3644 		return (DDI_PROP_INVAL_ARG);
3645 
3646 	/*
3647 	 * Preallocate buffer, even if we don't need it...
3648 	 */
3649 	if (length != 0)  {
3650 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3651 		    KM_SLEEP : KM_NOSLEEP);
3652 		if (p == NULL)	{
3653 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3654 			return (DDI_PROP_NO_MEMORY);
3655 		}
3656 	}
3657 
3658 	/*
3659 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3660 	 * number, a real dev_t value should be created based upon the dip's
3661 	 * binding driver.  See ddi_prop_add...
3662 	 */
3663 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3664 		dev = makedevice(
3665 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3666 		    getminor(dev));
3667 
3668 	/*
3669 	 * Check to see if the property exists.  If so we modify it.
3670 	 * Else we create it by calling ddi_prop_add().
3671 	 */
3672 	mutex_enter(&(DEVI(dip)->devi_lock));
3673 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3674 	if (flags & DDI_PROP_SYSTEM_DEF)
3675 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3676 	else if (flags & DDI_PROP_HW_DEF)
3677 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3678 
3679 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3680 		/*
3681 		 * Need to reallocate buffer?  If so, do it
3682 		 * carefully (reuse same space if new prop
3683 		 * is same size and non-NULL sized).
3684 		 */
3685 		if (length != 0)
3686 			bcopy(value, p, length);
3687 
3688 		if (propp->prop_len != 0)
3689 			kmem_free(propp->prop_val, propp->prop_len);
3690 
3691 		propp->prop_len = length;
3692 		propp->prop_val = p;
3693 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3694 		mutex_exit(&(DEVI(dip)->devi_lock));
3695 		return (DDI_PROP_SUCCESS);
3696 	}
3697 
3698 	mutex_exit(&(DEVI(dip)->devi_lock));
3699 	if (length != 0)
3700 		kmem_free(p, length);
3701 
3702 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3703 }
3704 
3705 /*
3706  * Common update routine used to update and encode a property.	Creates
3707  * a property handle, calls the property encode routine, figures out if
3708  * the property already exists and updates if it does.	Otherwise it
3709  * creates if it does not exist.
3710  */
3711 int
3712 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3713     char *name, void *data, uint_t nelements,
3714     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3715 {
3716 	prop_handle_t	ph;
3717 	int		rval;
3718 	uint_t		ourflags;
3719 
3720 	/*
3721 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3722 	 * return error.
3723 	 */
3724 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3725 		return (DDI_PROP_INVAL_ARG);
3726 
3727 	/*
3728 	 * Create the handle
3729 	 */
3730 	ph.ph_data = NULL;
3731 	ph.ph_cur_pos = NULL;
3732 	ph.ph_save_pos = NULL;
3733 	ph.ph_size = 0;
3734 	ph.ph_ops = &prop_1275_ops;
3735 
3736 	/*
3737 	 * ourflags:
3738 	 * For compatibility with the old interfaces.  The old interfaces
3739 	 * didn't sleep by default and slept when the flag was set.  These
3740 	 * interfaces to the opposite.	So the old interfaces now set the
3741 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3742 	 *
3743 	 * ph.ph_flags:
3744 	 * Blocked data or unblocked data allocation
3745 	 * for ph.ph_data in ddi_prop_encode_alloc()
3746 	 */
3747 	if (flags & DDI_PROP_DONTSLEEP) {
3748 		ourflags = flags;
3749 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3750 	} else {
3751 		ourflags = flags | DDI_PROP_CANSLEEP;
3752 		ph.ph_flags = DDI_PROP_CANSLEEP;
3753 	}
3754 
3755 	/*
3756 	 * Encode the data and store it in the property handle by
3757 	 * calling the prop_encode routine.
3758 	 */
3759 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3760 	    DDI_PROP_SUCCESS) {
3761 		if (rval == DDI_PROP_NO_MEMORY)
3762 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3763 		if (ph.ph_size != 0)
3764 			kmem_free(ph.ph_data, ph.ph_size);
3765 		return (rval);
3766 	}
3767 
3768 	/*
3769 	 * The old interfaces use a stacking approach to creating
3770 	 * properties.	If we are being called from the old interfaces,
3771 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3772 	 * create without checking.
3773 	 */
3774 	if (flags & DDI_PROP_STACK_CREATE) {
3775 		rval = ddi_prop_add(match_dev, dip,
3776 		    ourflags, name, ph.ph_data, ph.ph_size);
3777 	} else {
3778 		rval = ddi_prop_change(match_dev, dip,
3779 		    ourflags, name, ph.ph_data, ph.ph_size);
3780 	}
3781 
3782 	/*
3783 	 * Free the encoded data allocated in the prop_encode routine.
3784 	 */
3785 	if (ph.ph_size != 0)
3786 		kmem_free(ph.ph_data, ph.ph_size);
3787 
3788 	return (rval);
3789 }
3790 
3791 
3792 /*
3793  * ddi_prop_create:	Define a managed property:
3794  *			See above for details.
3795  */
3796 
3797 int
3798 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3799     char *name, caddr_t value, int length)
3800 {
3801 	if (!(flag & DDI_PROP_CANSLEEP)) {
3802 		flag |= DDI_PROP_DONTSLEEP;
3803 #ifdef DDI_PROP_DEBUG
3804 		if (length != 0)
3805 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3806 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3807 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3808 #endif /* DDI_PROP_DEBUG */
3809 	}
3810 	flag &= ~DDI_PROP_SYSTEM_DEF;
3811 	return (ddi_prop_update_common(dev, dip,
3812 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name,
3813 	    value, length, ddi_prop_fm_encode_bytes));
3814 }
3815 
3816 int
3817 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3818     char *name, caddr_t value, int length)
3819 {
3820 	if (!(flag & DDI_PROP_CANSLEEP))
3821 		flag |= DDI_PROP_DONTSLEEP;
3822 	return (ddi_prop_update_common(dev, dip,
3823 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
3824 	    DDI_PROP_TYPE_ANY),
3825 	    name, value, length, ddi_prop_fm_encode_bytes));
3826 }
3827 
3828 int
3829 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3830     char *name, caddr_t value, int length)
3831 {
3832 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3833 
3834 	/*
3835 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3836 	 * return error.
3837 	 */
3838 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3839 		return (DDI_PROP_INVAL_ARG);
3840 
3841 	if (!(flag & DDI_PROP_CANSLEEP))
3842 		flag |= DDI_PROP_DONTSLEEP;
3843 	flag &= ~DDI_PROP_SYSTEM_DEF;
3844 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3845 		return (DDI_PROP_NOT_FOUND);
3846 
3847 	return (ddi_prop_update_common(dev, dip,
3848 	    (flag | DDI_PROP_TYPE_BYTE), name,
3849 	    value, length, ddi_prop_fm_encode_bytes));
3850 }
3851 
3852 int
3853 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3854     char *name, caddr_t value, int length)
3855 {
3856 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3857 
3858 	/*
3859 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3860 	 * return error.
3861 	 */
3862 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3863 		return (DDI_PROP_INVAL_ARG);
3864 
3865 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3866 		return (DDI_PROP_NOT_FOUND);
3867 
3868 	if (!(flag & DDI_PROP_CANSLEEP))
3869 		flag |= DDI_PROP_DONTSLEEP;
3870 	return (ddi_prop_update_common(dev, dip,
3871 		(flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3872 		name, value, length, ddi_prop_fm_encode_bytes));
3873 }
3874 
3875 
3876 /*
3877  * Common lookup routine used to lookup and decode a property.
3878  * Creates a property handle, searches for the raw encoded data,
3879  * fills in the handle, and calls the property decode functions
3880  * passed in.
3881  *
3882  * This routine is not static because ddi_bus_prop_op() which lives in
3883  * ddi_impl.c calls it.  No driver should be calling this routine.
3884  */
3885 int
3886 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3887     uint_t flags, char *name, void *data, uint_t *nelements,
3888     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3889 {
3890 	int		rval;
3891 	uint_t		ourflags;
3892 	prop_handle_t	ph;
3893 
3894 	if ((match_dev == DDI_DEV_T_NONE) ||
3895 	    (name == NULL) || (strlen(name) == 0))
3896 		return (DDI_PROP_INVAL_ARG);
3897 
3898 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3899 		flags | DDI_PROP_CANSLEEP;
3900 
3901 	/*
3902 	 * Get the encoded data
3903 	 */
3904 	bzero(&ph, sizeof (prop_handle_t));
3905 
3906 	if (flags & DDI_UNBND_DLPI2) {
3907 		/*
3908 		 * For unbound dlpi style-2 devices, index into
3909 		 * the devnames' array and search the global
3910 		 * property list.
3911 		 */
3912 		ourflags &= ~DDI_UNBND_DLPI2;
3913 		rval = i_ddi_prop_search_global(match_dev,
3914 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3915 	} else {
3916 		rval = ddi_prop_search_common(match_dev, dip,
3917 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3918 		    &ph.ph_data, &ph.ph_size);
3919 
3920 	}
3921 
3922 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3923 		ASSERT(ph.ph_data == NULL);
3924 		ASSERT(ph.ph_size == 0);
3925 		return (rval);
3926 	}
3927 
3928 	/*
3929 	 * If the encoded data came from a OBP or software
3930 	 * use the 1275 OBP decode/encode routines.
3931 	 */
3932 	ph.ph_cur_pos = ph.ph_data;
3933 	ph.ph_save_pos = ph.ph_data;
3934 	ph.ph_ops = &prop_1275_ops;
3935 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3936 
3937 	rval = (*prop_decoder)(&ph, data, nelements);
3938 
3939 	/*
3940 	 * Free the encoded data
3941 	 */
3942 	if (ph.ph_size != 0)
3943 		kmem_free(ph.ph_data, ph.ph_size);
3944 
3945 	return (rval);
3946 }
3947 
3948 /*
3949  * Lookup and return an array of composite properties.  The driver must
3950  * provide the decode routine.
3951  */
3952 int
3953 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3954     uint_t flags, char *name, void *data, uint_t *nelements,
3955     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3956 {
3957 	return (ddi_prop_lookup_common(match_dev, dip,
3958 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3959 	    data, nelements, prop_decoder));
3960 }
3961 
3962 /*
3963  * Return 1 if a property exists (no type checking done).
3964  * Return 0 if it does not exist.
3965  */
3966 int
3967 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3968 {
3969 	int	i;
3970 	uint_t	x = 0;
3971 
3972 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3973 		flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3974 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3975 }
3976 
3977 
3978 /*
3979  * Update an array of composite properties.  The driver must
3980  * provide the encode routine.
3981  */
3982 int
3983 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3984     char *name, void *data, uint_t nelements,
3985     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3986 {
3987 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3988 	    name, data, nelements, prop_create));
3989 }
3990 
3991 /*
3992  * Get a single integer or boolean property and return it.
3993  * If the property does not exists, or cannot be decoded,
3994  * then return the defvalue passed in.
3995  *
3996  * This routine always succeeds.
3997  */
3998 int
3999 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4000     char *name, int defvalue)
4001 {
4002 	int	data;
4003 	uint_t	nelements;
4004 	int	rval;
4005 
4006 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4007 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4008 #ifdef DEBUG
4009 		if (dip != NULL) {
4010 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4011 			    " 0x%x (prop = %s, node = %s%d)", flags,
4012 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4013 		}
4014 #endif /* DEBUG */
4015 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4016 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4017 	}
4018 
4019 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4020 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4021 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4022 		if (rval == DDI_PROP_END_OF_DATA)
4023 			data = 1;
4024 		else
4025 			data = defvalue;
4026 	}
4027 	return (data);
4028 }
4029 
4030 /*
4031  * Get a single 64 bit integer or boolean property and return it.
4032  * If the property does not exists, or cannot be decoded,
4033  * then return the defvalue passed in.
4034  *
4035  * This routine always succeeds.
4036  */
4037 int64_t
4038 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4039     char *name, int64_t defvalue)
4040 {
4041 	int64_t	data;
4042 	uint_t	nelements;
4043 	int	rval;
4044 
4045 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4046 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4047 #ifdef DEBUG
4048 		if (dip != NULL) {
4049 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4050 			    " 0x%x (prop = %s, node = %s%d)", flags,
4051 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4052 		}
4053 #endif /* DEBUG */
4054 		return (DDI_PROP_INVAL_ARG);
4055 	}
4056 
4057 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4058 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4059 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4060 	    != DDI_PROP_SUCCESS) {
4061 		if (rval == DDI_PROP_END_OF_DATA)
4062 			data = 1;
4063 		else
4064 			data = defvalue;
4065 	}
4066 	return (data);
4067 }
4068 
4069 /*
4070  * Get an array of integer property
4071  */
4072 int
4073 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4074     char *name, int **data, uint_t *nelements)
4075 {
4076 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4077 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4078 #ifdef DEBUG
4079 		if (dip != NULL) {
4080 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4081 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4082 			    flags, name, ddi_driver_name(dip),
4083 			    ddi_get_instance(dip));
4084 		}
4085 #endif /* DEBUG */
4086 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4087 		LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4088 	}
4089 
4090 	return (ddi_prop_lookup_common(match_dev, dip,
4091 	    (flags | DDI_PROP_TYPE_INT), name, data,
4092 	    nelements, ddi_prop_fm_decode_ints));
4093 }
4094 
4095 /*
4096  * Get an array of 64 bit integer properties
4097  */
4098 int
4099 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4100     char *name, int64_t **data, uint_t *nelements)
4101 {
4102 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4103 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4104 #ifdef DEBUG
4105 		if (dip != NULL) {
4106 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4107 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4108 			    flags, name, ddi_driver_name(dip),
4109 			    ddi_get_instance(dip));
4110 		}
4111 #endif /* DEBUG */
4112 		return (DDI_PROP_INVAL_ARG);
4113 	}
4114 
4115 	return (ddi_prop_lookup_common(match_dev, dip,
4116 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4117 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4118 }
4119 
4120 /*
4121  * Update a single integer property.  If the property exists on the drivers
4122  * property list it updates, else it creates it.
4123  */
4124 int
4125 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4126     char *name, int data)
4127 {
4128 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4129 	    name, &data, 1, ddi_prop_fm_encode_ints));
4130 }
4131 
4132 /*
4133  * Update a single 64 bit integer property.
4134  * Update the driver property list if it exists, else create it.
4135  */
4136 int
4137 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4138     char *name, int64_t data)
4139 {
4140 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4141 	    name, &data, 1, ddi_prop_fm_encode_int64));
4142 }
4143 
4144 int
4145 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4146     char *name, int data)
4147 {
4148 	return (ddi_prop_update_common(match_dev, dip,
4149 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4150 	    name, &data, 1, ddi_prop_fm_encode_ints));
4151 }
4152 
4153 int
4154 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4155     char *name, int64_t data)
4156 {
4157 	return (ddi_prop_update_common(match_dev, dip,
4158 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4159 	    name, &data, 1, ddi_prop_fm_encode_int64));
4160 }
4161 
4162 /*
4163  * Update an array of integer property.  If the property exists on the drivers
4164  * property list it updates, else it creates it.
4165  */
4166 int
4167 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4168     char *name, int *data, uint_t nelements)
4169 {
4170 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4171 	    name, data, nelements, ddi_prop_fm_encode_ints));
4172 }
4173 
4174 /*
4175  * Update an array of 64 bit integer properties.
4176  * Update the driver property list if it exists, else create it.
4177  */
4178 int
4179 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4180     char *name, int64_t *data, uint_t nelements)
4181 {
4182 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4183 	    name, data, nelements, ddi_prop_fm_encode_int64));
4184 }
4185 
4186 int
4187 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4188     char *name, int64_t *data, uint_t nelements)
4189 {
4190 	return (ddi_prop_update_common(match_dev, dip,
4191 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4192 	    name, data, nelements, ddi_prop_fm_encode_int64));
4193 }
4194 
4195 int
4196 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4197     char *name, int *data, uint_t nelements)
4198 {
4199 	return (ddi_prop_update_common(match_dev, dip,
4200 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4201 	    name, data, nelements, ddi_prop_fm_encode_ints));
4202 }
4203 
4204 /*
4205  * Get a single string property.
4206  */
4207 int
4208 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4209     char *name, char **data)
4210 {
4211 	uint_t x;
4212 
4213 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4214 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4215 #ifdef DEBUG
4216 		if (dip != NULL) {
4217 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4218 			    "(prop = %s, node = %s%d); invalid bits ignored",
4219 			    "ddi_prop_lookup_string", flags, name,
4220 			    ddi_driver_name(dip), ddi_get_instance(dip));
4221 		}
4222 #endif /* DEBUG */
4223 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4224 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4225 	}
4226 
4227 	return (ddi_prop_lookup_common(match_dev, dip,
4228 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4229 	    &x, ddi_prop_fm_decode_string));
4230 }
4231 
4232 /*
4233  * Get an array of strings property.
4234  */
4235 int
4236 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4237     char *name, char ***data, uint_t *nelements)
4238 {
4239 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4240 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4241 #ifdef DEBUG
4242 		if (dip != NULL) {
4243 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4244 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4245 			    flags, name, ddi_driver_name(dip),
4246 			    ddi_get_instance(dip));
4247 		}
4248 #endif /* DEBUG */
4249 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4250 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4251 	}
4252 
4253 	return (ddi_prop_lookup_common(match_dev, dip,
4254 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4255 	    nelements, ddi_prop_fm_decode_strings));
4256 }
4257 
4258 /*
4259  * Update a single string property.
4260  */
4261 int
4262 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4263     char *name, char *data)
4264 {
4265 	return (ddi_prop_update_common(match_dev, dip,
4266 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4267 	    ddi_prop_fm_encode_string));
4268 }
4269 
4270 int
4271 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4272     char *name, char *data)
4273 {
4274 	return (ddi_prop_update_common(match_dev, dip,
4275 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4276 	    name, &data, 1, ddi_prop_fm_encode_string));
4277 }
4278 
4279 
4280 /*
4281  * Update an array of strings property.
4282  */
4283 int
4284 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4285     char *name, char **data, uint_t nelements)
4286 {
4287 	return (ddi_prop_update_common(match_dev, dip,
4288 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4289 	    ddi_prop_fm_encode_strings));
4290 }
4291 
4292 int
4293 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4294     char *name, char **data, uint_t nelements)
4295 {
4296 	return (ddi_prop_update_common(match_dev, dip,
4297 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4298 	    name, data, nelements,
4299 	    ddi_prop_fm_encode_strings));
4300 }
4301 
4302 
4303 /*
4304  * Get an array of bytes property.
4305  */
4306 int
4307 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4308     char *name, uchar_t **data, uint_t *nelements)
4309 {
4310 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4311 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4312 #ifdef DEBUG
4313 		if (dip != NULL) {
4314 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4315 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4316 			    flags, name, ddi_driver_name(dip),
4317 			    ddi_get_instance(dip));
4318 		}
4319 #endif /* DEBUG */
4320 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4321 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4322 	}
4323 
4324 	return (ddi_prop_lookup_common(match_dev, dip,
4325 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4326 	    nelements, ddi_prop_fm_decode_bytes));
4327 }
4328 
4329 /*
4330  * Update an array of bytes property.
4331  */
4332 int
4333 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4334     char *name, uchar_t *data, uint_t nelements)
4335 {
4336 	if (nelements == 0)
4337 		return (DDI_PROP_INVAL_ARG);
4338 
4339 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4340 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4341 }
4342 
4343 
4344 int
4345 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4346     char *name, uchar_t *data, uint_t nelements)
4347 {
4348 	if (nelements == 0)
4349 		return (DDI_PROP_INVAL_ARG);
4350 
4351 	return (ddi_prop_update_common(match_dev, dip,
4352 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4353 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4354 }
4355 
4356 
4357 /*
4358  * ddi_prop_remove_common:	Undefine a managed property:
4359  *			Input dev_t must match dev_t when defined.
4360  *			Returns DDI_PROP_NOT_FOUND, possibly.
4361  *			DDI_PROP_INVAL_ARG is also possible if dev is
4362  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4363  */
4364 int
4365 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4366 {
4367 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4368 	ddi_prop_t	*propp;
4369 	ddi_prop_t	*lastpropp = NULL;
4370 
4371 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4372 	    (strlen(name) == 0)) {
4373 		return (DDI_PROP_INVAL_ARG);
4374 	}
4375 
4376 	if (flag & DDI_PROP_SYSTEM_DEF)
4377 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4378 	else if (flag & DDI_PROP_HW_DEF)
4379 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4380 
4381 	mutex_enter(&(DEVI(dip)->devi_lock));
4382 
4383 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4384 		if (DDI_STRSAME(propp->prop_name, name) &&
4385 		    (dev == propp->prop_dev)) {
4386 			/*
4387 			 * Unlink this propp allowing for it to
4388 			 * be first in the list:
4389 			 */
4390 
4391 			if (lastpropp == NULL)
4392 				*list_head = propp->prop_next;
4393 			else
4394 				lastpropp->prop_next = propp->prop_next;
4395 
4396 			mutex_exit(&(DEVI(dip)->devi_lock));
4397 
4398 			/*
4399 			 * Free memory and return...
4400 			 */
4401 			kmem_free(propp->prop_name,
4402 			    strlen(propp->prop_name) + 1);
4403 			if (propp->prop_len != 0)
4404 				kmem_free(propp->prop_val, propp->prop_len);
4405 			kmem_free(propp, sizeof (ddi_prop_t));
4406 			return (DDI_PROP_SUCCESS);
4407 		}
4408 		lastpropp = propp;
4409 	}
4410 	mutex_exit(&(DEVI(dip)->devi_lock));
4411 	return (DDI_PROP_NOT_FOUND);
4412 }
4413 
4414 int
4415 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4416 {
4417 	return (ddi_prop_remove_common(dev, dip, name, 0));
4418 }
4419 
4420 int
4421 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4422 {
4423 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4424 }
4425 
4426 /*
4427  * e_ddi_prop_list_delete: remove a list of properties
4428  *	Note that the caller needs to provide the required protection
4429  *	(eg. devi_lock if these properties are still attached to a devi)
4430  */
4431 void
4432 e_ddi_prop_list_delete(ddi_prop_t *props)
4433 {
4434 	i_ddi_prop_list_delete(props);
4435 }
4436 
4437 /*
4438  * ddi_prop_remove_all_common:
4439  *	Used before unloading a driver to remove
4440  *	all properties. (undefines all dev_t's props.)
4441  *	Also removes `explicitly undefined' props.
4442  *	No errors possible.
4443  */
4444 void
4445 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4446 {
4447 	ddi_prop_t	**list_head;
4448 
4449 	mutex_enter(&(DEVI(dip)->devi_lock));
4450 	if (flag & DDI_PROP_SYSTEM_DEF) {
4451 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4452 	} else if (flag & DDI_PROP_HW_DEF) {
4453 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4454 	} else {
4455 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4456 	}
4457 	i_ddi_prop_list_delete(*list_head);
4458 	*list_head = NULL;
4459 	mutex_exit(&(DEVI(dip)->devi_lock));
4460 }
4461 
4462 
4463 /*
4464  * ddi_prop_remove_all:		Remove all driver prop definitions.
4465  */
4466 
4467 void
4468 ddi_prop_remove_all(dev_info_t *dip)
4469 {
4470 	ddi_prop_remove_all_common(dip, 0);
4471 }
4472 
4473 /*
4474  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4475  */
4476 
4477 void
4478 e_ddi_prop_remove_all(dev_info_t *dip)
4479 {
4480 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4481 }
4482 
4483 
4484 /*
4485  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4486  *			searches which match this property return
4487  *			the error code DDI_PROP_UNDEFINED.
4488  *
4489  *			Use ddi_prop_remove to negate effect of
4490  *			ddi_prop_undefine
4491  *
4492  *			See above for error returns.
4493  */
4494 
4495 int
4496 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4497 {
4498 	if (!(flag & DDI_PROP_CANSLEEP))
4499 		flag |= DDI_PROP_DONTSLEEP;
4500 	return (ddi_prop_update_common(dev, dip,
4501 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT |
4502 	    DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes));
4503 }
4504 
4505 int
4506 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4507 {
4508 	if (!(flag & DDI_PROP_CANSLEEP))
4509 		flag |= DDI_PROP_DONTSLEEP;
4510 	return (ddi_prop_update_common(dev, dip,
4511 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4512 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY),
4513 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4514 }
4515 
4516 /*
4517  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4518  *
4519  * if input dip != child_dip, then call is on behalf of child
4520  * to search PROM, do it via ddi_prop_search_common() and ascend only
4521  * if allowed.
4522  *
4523  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4524  * to search for PROM defined props only.
4525  *
4526  * Note that the PROM search is done only if the requested dev
4527  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4528  * have no associated dev, thus are automatically associated with
4529  * DDI_DEV_T_NONE.
4530  *
4531  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4532  *
4533  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4534  * that the property resides in the prom.
4535  */
4536 int
4537 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4538     ddi_prop_op_t prop_op, int mod_flags,
4539     char *name, caddr_t valuep, int *lengthp)
4540 {
4541 	int	len;
4542 	caddr_t buffer;
4543 
4544 	/*
4545 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4546 	 * look in caller's PROM if it's a self identifying device...
4547 	 *
4548 	 * Note that this is very similar to ddi_prop_op, but we
4549 	 * search the PROM instead of the s/w defined properties,
4550 	 * and we are called on by the parent driver to do this for
4551 	 * the child.
4552 	 */
4553 
4554 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4555 	    ndi_dev_is_prom_node(ch_dip) &&
4556 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4557 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4558 		if (len == -1) {
4559 			return (DDI_PROP_NOT_FOUND);
4560 		}
4561 
4562 		/*
4563 		 * If exists only request, we're done
4564 		 */
4565 		if (prop_op == PROP_EXISTS) {
4566 			return (DDI_PROP_FOUND_1275);
4567 		}
4568 
4569 		/*
4570 		 * If length only request or prop length == 0, get out
4571 		 */
4572 		if ((prop_op == PROP_LEN) || (len == 0)) {
4573 			*lengthp = len;
4574 			return (DDI_PROP_FOUND_1275);
4575 		}
4576 
4577 		/*
4578 		 * Allocate buffer if required... (either way `buffer'
4579 		 * is receiving address).
4580 		 */
4581 
4582 		switch (prop_op) {
4583 
4584 		case PROP_LEN_AND_VAL_ALLOC:
4585 
4586 			buffer = kmem_alloc((size_t)len,
4587 			    mod_flags & DDI_PROP_CANSLEEP ?
4588 			    KM_SLEEP : KM_NOSLEEP);
4589 			if (buffer == NULL) {
4590 				return (DDI_PROP_NO_MEMORY);
4591 			}
4592 			*(caddr_t *)valuep = buffer;
4593 			break;
4594 
4595 		case PROP_LEN_AND_VAL_BUF:
4596 
4597 			if (len > (*lengthp)) {
4598 				*lengthp = len;
4599 				return (DDI_PROP_BUF_TOO_SMALL);
4600 			}
4601 
4602 			buffer = valuep;
4603 			break;
4604 
4605 		default:
4606 			break;
4607 		}
4608 
4609 		/*
4610 		 * Call the PROM function to do the copy.
4611 		 */
4612 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4613 			name, buffer);
4614 
4615 		*lengthp = len; /* return the actual length to the caller */
4616 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4617 		return (DDI_PROP_FOUND_1275);
4618 	}
4619 
4620 	return (DDI_PROP_NOT_FOUND);
4621 }
4622 
4623 /*
4624  * The ddi_bus_prop_op default bus nexus prop op function.
4625  *
4626  * Code to search hardware layer (PROM), if it exists,
4627  * on behalf of child, then, if appropriate, ascend and check
4628  * my own software defined properties...
4629  */
4630 int
4631 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4632     ddi_prop_op_t prop_op, int mod_flags,
4633     char *name, caddr_t valuep, int *lengthp)
4634 {
4635 	int	error;
4636 
4637 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4638 				    name, valuep, lengthp);
4639 
4640 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4641 	    error == DDI_PROP_BUF_TOO_SMALL)
4642 		return (error);
4643 
4644 	if (error == DDI_PROP_NO_MEMORY) {
4645 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4646 		return (DDI_PROP_NO_MEMORY);
4647 	}
4648 
4649 	/*
4650 	 * Check the 'options' node as a last resort
4651 	 */
4652 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4653 		return (DDI_PROP_NOT_FOUND);
4654 
4655 	if (ch_dip == ddi_root_node())	{
4656 		/*
4657 		 * As a last resort, when we've reached
4658 		 * the top and still haven't found the
4659 		 * property, see if the desired property
4660 		 * is attached to the options node.
4661 		 *
4662 		 * The options dip is attached right after boot.
4663 		 */
4664 		ASSERT(options_dip != NULL);
4665 		/*
4666 		 * Force the "don't pass" flag to *just* see
4667 		 * what the options node has to offer.
4668 		 */
4669 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4670 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4671 		    (uint_t *)lengthp));
4672 	}
4673 
4674 	/*
4675 	 * Otherwise, continue search with parent's s/w defined properties...
4676 	 * NOTE: Using `dip' in following call increments the level.
4677 	 */
4678 
4679 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4680 	    name, valuep, (uint_t *)lengthp));
4681 }
4682 
4683 /*
4684  * External property functions used by other parts of the kernel...
4685  */
4686 
4687 /*
4688  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4689  */
4690 
4691 int
4692 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4693     caddr_t valuep, int *lengthp)
4694 {
4695 	_NOTE(ARGUNUSED(type))
4696 	dev_info_t *devi;
4697 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4698 	int error;
4699 
4700 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4701 		return (DDI_PROP_NOT_FOUND);
4702 
4703 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4704 	ddi_release_devi(devi);
4705 	return (error);
4706 }
4707 
4708 /*
4709  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4710  */
4711 
4712 int
4713 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4714     caddr_t valuep, int *lengthp)
4715 {
4716 	_NOTE(ARGUNUSED(type))
4717 	dev_info_t *devi;
4718 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4719 	int error;
4720 
4721 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4722 		return (DDI_PROP_NOT_FOUND);
4723 
4724 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4725 	ddi_release_devi(devi);
4726 	return (error);
4727 }
4728 
4729 /*
4730  * e_ddi_getprop:	See comments for ddi_getprop.
4731  */
4732 int
4733 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4734 {
4735 	_NOTE(ARGUNUSED(type))
4736 	dev_info_t *devi;
4737 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4738 	int	propvalue = defvalue;
4739 	int	proplength = sizeof (int);
4740 	int	error;
4741 
4742 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4743 		return (defvalue);
4744 
4745 	error = cdev_prop_op(dev, devi, prop_op,
4746 	    flags, name, (caddr_t)&propvalue, &proplength);
4747 	ddi_release_devi(devi);
4748 
4749 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4750 		propvalue = 1;
4751 
4752 	return (propvalue);
4753 }
4754 
4755 /*
4756  * e_ddi_getprop_int64:
4757  *
4758  * This is a typed interfaces, but predates typed properties. With the
4759  * introduction of typed properties the framework tries to ensure
4760  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4761  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4762  * typed interface invokes legacy (non-typed) interfaces:
4763  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4764  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4765  * this type of lookup as a single operation we invoke the legacy
4766  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4767  * framework ddi_prop_op(9F) implementation is expected to check for
4768  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4769  * (currently TYPE_INT64).
4770  */
4771 int64_t
4772 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4773     int flags, int64_t defvalue)
4774 {
4775 	_NOTE(ARGUNUSED(type))
4776 	dev_info_t	*devi;
4777 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4778 	int64_t		propvalue = defvalue;
4779 	int		proplength = sizeof (propvalue);
4780 	int		error;
4781 
4782 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4783 		return (defvalue);
4784 
4785 	error = cdev_prop_op(dev, devi, prop_op, flags |
4786 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4787 	ddi_release_devi(devi);
4788 
4789 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4790 		propvalue = 1;
4791 
4792 	return (propvalue);
4793 }
4794 
4795 /*
4796  * e_ddi_getproplen:	See comments for ddi_getproplen.
4797  */
4798 int
4799 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4800 {
4801 	_NOTE(ARGUNUSED(type))
4802 	dev_info_t *devi;
4803 	ddi_prop_op_t prop_op = PROP_LEN;
4804 	int error;
4805 
4806 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4807 		return (DDI_PROP_NOT_FOUND);
4808 
4809 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4810 	ddi_release_devi(devi);
4811 	return (error);
4812 }
4813 
4814 /*
4815  * Routines to get at elements of the dev_info structure
4816  */
4817 
4818 /*
4819  * ddi_binding_name: Return the driver binding name of the devinfo node
4820  *		This is the name the OS used to bind the node to a driver.
4821  */
4822 char *
4823 ddi_binding_name(dev_info_t *dip)
4824 {
4825 	return (DEVI(dip)->devi_binding_name);
4826 }
4827 
4828 /*
4829  * ddi_driver_major: Return the major number of the driver that
4830  *		the supplied devinfo is bound to (-1 if none)
4831  */
4832 major_t
4833 ddi_driver_major(dev_info_t *devi)
4834 {
4835 	return (DEVI(devi)->devi_major);
4836 }
4837 
4838 /*
4839  * ddi_driver_name: Return the normalized driver name. this is the
4840  *		actual driver name
4841  */
4842 const char *
4843 ddi_driver_name(dev_info_t *devi)
4844 {
4845 	major_t major;
4846 
4847 	if ((major = ddi_driver_major(devi)) != (major_t)-1)
4848 		return (ddi_major_to_name(major));
4849 
4850 	return (ddi_node_name(devi));
4851 }
4852 
4853 /*
4854  * i_ddi_set_binding_name:	Set binding name.
4855  *
4856  *	Set the binding name to the given name.
4857  *	This routine is for use by the ddi implementation, not by drivers.
4858  */
4859 void
4860 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4861 {
4862 	DEVI(dip)->devi_binding_name = name;
4863 
4864 }
4865 
4866 /*
4867  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4868  * the implementation has used to bind the node to a driver.
4869  */
4870 char *
4871 ddi_get_name(dev_info_t *dip)
4872 {
4873 	return (DEVI(dip)->devi_binding_name);
4874 }
4875 
4876 /*
4877  * ddi_node_name: Return the name property of the devinfo node
4878  *		This may differ from ddi_binding_name if the node name
4879  *		does not define a binding to a driver (i.e. generic names).
4880  */
4881 char *
4882 ddi_node_name(dev_info_t *dip)
4883 {
4884 	return (DEVI(dip)->devi_node_name);
4885 }
4886 
4887 
4888 /*
4889  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4890  */
4891 int
4892 ddi_get_nodeid(dev_info_t *dip)
4893 {
4894 	return (DEVI(dip)->devi_nodeid);
4895 }
4896 
4897 int
4898 ddi_get_instance(dev_info_t *dip)
4899 {
4900 	return (DEVI(dip)->devi_instance);
4901 }
4902 
4903 struct dev_ops *
4904 ddi_get_driver(dev_info_t *dip)
4905 {
4906 	return (DEVI(dip)->devi_ops);
4907 }
4908 
4909 void
4910 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4911 {
4912 	DEVI(dip)->devi_ops = devo;
4913 }
4914 
4915 /*
4916  * ddi_set_driver_private/ddi_get_driver_private:
4917  * Get/set device driver private data in devinfo.
4918  */
4919 void
4920 ddi_set_driver_private(dev_info_t *dip, void *data)
4921 {
4922 	DEVI(dip)->devi_driver_data = data;
4923 }
4924 
4925 void *
4926 ddi_get_driver_private(dev_info_t *dip)
4927 {
4928 	return (DEVI(dip)->devi_driver_data);
4929 }
4930 
4931 /*
4932  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4933  */
4934 
4935 dev_info_t *
4936 ddi_get_parent(dev_info_t *dip)
4937 {
4938 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4939 }
4940 
4941 dev_info_t *
4942 ddi_get_child(dev_info_t *dip)
4943 {
4944 	return ((dev_info_t *)DEVI(dip)->devi_child);
4945 }
4946 
4947 dev_info_t *
4948 ddi_get_next_sibling(dev_info_t *dip)
4949 {
4950 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4951 }
4952 
4953 dev_info_t *
4954 ddi_get_next(dev_info_t *dip)
4955 {
4956 	return ((dev_info_t *)DEVI(dip)->devi_next);
4957 }
4958 
4959 void
4960 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4961 {
4962 	DEVI(dip)->devi_next = DEVI(nextdip);
4963 }
4964 
4965 /*
4966  * ddi_root_node:		Return root node of devinfo tree
4967  */
4968 
4969 dev_info_t *
4970 ddi_root_node(void)
4971 {
4972 	extern dev_info_t *top_devinfo;
4973 
4974 	return (top_devinfo);
4975 }
4976 
4977 /*
4978  * Miscellaneous functions:
4979  */
4980 
4981 /*
4982  * Implementation specific hooks
4983  */
4984 
4985 void
4986 ddi_report_dev(dev_info_t *d)
4987 {
4988 	char *b;
4989 
4990 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4991 
4992 	/*
4993 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4994 	 * userland, so we print its full name together with the instance
4995 	 * number 'abbreviation' that the driver may use internally.
4996 	 */
4997 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4998 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4999 		cmn_err(CE_CONT, "?%s%d is %s\n",
5000 		    ddi_driver_name(d), ddi_get_instance(d),
5001 		    ddi_pathname(d, b));
5002 		kmem_free(b, MAXPATHLEN);
5003 	}
5004 }
5005 
5006 /*
5007  * ddi_ctlops() is described in the assembler not to buy a new register
5008  * window when it's called and can reduce cost in climbing the device tree
5009  * without using the tail call optimization.
5010  */
5011 int
5012 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5013 {
5014 	int ret;
5015 
5016 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5017 	    (void *)&rnumber, (void *)result);
5018 
5019 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5020 }
5021 
5022 int
5023 ddi_dev_nregs(dev_info_t *dev, int *result)
5024 {
5025 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5026 }
5027 
5028 int
5029 ddi_dev_is_sid(dev_info_t *d)
5030 {
5031 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5032 }
5033 
5034 int
5035 ddi_slaveonly(dev_info_t *d)
5036 {
5037 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5038 }
5039 
5040 int
5041 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5042 {
5043 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5044 }
5045 
5046 int
5047 ddi_streams_driver(dev_info_t *dip)
5048 {
5049 	if (i_ddi_devi_attached(dip) &&
5050 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5051 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5052 		return (DDI_SUCCESS);
5053 	return (DDI_FAILURE);
5054 }
5055 
5056 /*
5057  * callback free list
5058  */
5059 
5060 static int ncallbacks;
5061 static int nc_low = 170;
5062 static int nc_med = 512;
5063 static int nc_high = 2048;
5064 static struct ddi_callback *callbackq;
5065 static struct ddi_callback *callbackqfree;
5066 
5067 /*
5068  * set/run callback lists
5069  */
5070 struct	cbstats	{
5071 	kstat_named_t	cb_asked;
5072 	kstat_named_t	cb_new;
5073 	kstat_named_t	cb_run;
5074 	kstat_named_t	cb_delete;
5075 	kstat_named_t	cb_maxreq;
5076 	kstat_named_t	cb_maxlist;
5077 	kstat_named_t	cb_alloc;
5078 	kstat_named_t	cb_runouts;
5079 	kstat_named_t	cb_L2;
5080 	kstat_named_t	cb_grow;
5081 } cbstats = {
5082 	{"asked",	KSTAT_DATA_UINT32},
5083 	{"new",		KSTAT_DATA_UINT32},
5084 	{"run",		KSTAT_DATA_UINT32},
5085 	{"delete",	KSTAT_DATA_UINT32},
5086 	{"maxreq",	KSTAT_DATA_UINT32},
5087 	{"maxlist",	KSTAT_DATA_UINT32},
5088 	{"alloc",	KSTAT_DATA_UINT32},
5089 	{"runouts",	KSTAT_DATA_UINT32},
5090 	{"L2",		KSTAT_DATA_UINT32},
5091 	{"grow",	KSTAT_DATA_UINT32},
5092 };
5093 
5094 #define	nc_asked	cb_asked.value.ui32
5095 #define	nc_new		cb_new.value.ui32
5096 #define	nc_run		cb_run.value.ui32
5097 #define	nc_delete	cb_delete.value.ui32
5098 #define	nc_maxreq	cb_maxreq.value.ui32
5099 #define	nc_maxlist	cb_maxlist.value.ui32
5100 #define	nc_alloc	cb_alloc.value.ui32
5101 #define	nc_runouts	cb_runouts.value.ui32
5102 #define	nc_L2		cb_L2.value.ui32
5103 #define	nc_grow		cb_grow.value.ui32
5104 
5105 static kmutex_t ddi_callback_mutex;
5106 
5107 /*
5108  * callbacks are handled using a L1/L2 cache. The L1 cache
5109  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5110  * we can't get callbacks from the L1 cache [because pageout is doing
5111  * I/O at the time freemem is 0], we allocate callbacks out of the
5112  * L2 cache. The L2 cache is static and depends on the memory size.
5113  * [We might also count the number of devices at probe time and
5114  * allocate one structure per device and adjust for deferred attach]
5115  */
5116 void
5117 impl_ddi_callback_init(void)
5118 {
5119 	int	i;
5120 	uint_t	physmegs;
5121 	kstat_t	*ksp;
5122 
5123 	physmegs = physmem >> (20 - PAGESHIFT);
5124 	if (physmegs < 48) {
5125 		ncallbacks = nc_low;
5126 	} else if (physmegs < 128) {
5127 		ncallbacks = nc_med;
5128 	} else {
5129 		ncallbacks = nc_high;
5130 	}
5131 
5132 	/*
5133 	 * init free list
5134 	 */
5135 	callbackq = kmem_zalloc(
5136 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5137 	for (i = 0; i < ncallbacks-1; i++)
5138 		callbackq[i].c_nfree = &callbackq[i+1];
5139 	callbackqfree = callbackq;
5140 
5141 	/* init kstats */
5142 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5143 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5144 		ksp->ks_data = (void *) &cbstats;
5145 		kstat_install(ksp);
5146 	}
5147 
5148 }
5149 
5150 static void
5151 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5152 	int count)
5153 {
5154 	struct ddi_callback *list, *marker, *new;
5155 	size_t size = sizeof (struct ddi_callback);
5156 
5157 	list = marker = (struct ddi_callback *)*listid;
5158 	while (list != NULL) {
5159 		if (list->c_call == funcp && list->c_arg == arg) {
5160 			list->c_count += count;
5161 			return;
5162 		}
5163 		marker = list;
5164 		list = list->c_nlist;
5165 	}
5166 	new = kmem_alloc(size, KM_NOSLEEP);
5167 	if (new == NULL) {
5168 		new = callbackqfree;
5169 		if (new == NULL) {
5170 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5171 			    &size, KM_NOSLEEP | KM_PANIC);
5172 			cbstats.nc_grow++;
5173 		} else {
5174 			callbackqfree = new->c_nfree;
5175 			cbstats.nc_L2++;
5176 		}
5177 	}
5178 	if (marker != NULL) {
5179 		marker->c_nlist = new;
5180 	} else {
5181 		*listid = (uintptr_t)new;
5182 	}
5183 	new->c_size = size;
5184 	new->c_nlist = NULL;
5185 	new->c_call = funcp;
5186 	new->c_arg = arg;
5187 	new->c_count = count;
5188 	cbstats.nc_new++;
5189 	cbstats.nc_alloc++;
5190 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5191 		cbstats.nc_maxlist = cbstats.nc_alloc;
5192 }
5193 
5194 void
5195 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5196 {
5197 	mutex_enter(&ddi_callback_mutex);
5198 	cbstats.nc_asked++;
5199 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5200 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5201 	(void) callback_insert(funcp, arg, listid, 1);
5202 	mutex_exit(&ddi_callback_mutex);
5203 }
5204 
5205 static void
5206 real_callback_run(void *Queue)
5207 {
5208 	int (*funcp)(caddr_t);
5209 	caddr_t arg;
5210 	int count, rval;
5211 	uintptr_t *listid;
5212 	struct ddi_callback *list, *marker;
5213 	int check_pending = 1;
5214 	int pending = 0;
5215 
5216 	do {
5217 		mutex_enter(&ddi_callback_mutex);
5218 		listid = Queue;
5219 		list = (struct ddi_callback *)*listid;
5220 		if (list == NULL) {
5221 			mutex_exit(&ddi_callback_mutex);
5222 			return;
5223 		}
5224 		if (check_pending) {
5225 			marker = list;
5226 			while (marker != NULL) {
5227 				pending += marker->c_count;
5228 				marker = marker->c_nlist;
5229 			}
5230 			check_pending = 0;
5231 		}
5232 		ASSERT(pending > 0);
5233 		ASSERT(list->c_count > 0);
5234 		funcp = list->c_call;
5235 		arg = list->c_arg;
5236 		count = list->c_count;
5237 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5238 		if (list >= &callbackq[0] &&
5239 		    list <= &callbackq[ncallbacks-1]) {
5240 			list->c_nfree = callbackqfree;
5241 			callbackqfree = list;
5242 		} else
5243 			kmem_free(list, list->c_size);
5244 
5245 		cbstats.nc_delete++;
5246 		cbstats.nc_alloc--;
5247 		mutex_exit(&ddi_callback_mutex);
5248 
5249 		do {
5250 			if ((rval = (*funcp)(arg)) == 0) {
5251 				pending -= count;
5252 				mutex_enter(&ddi_callback_mutex);
5253 				(void) callback_insert(funcp, arg, listid,
5254 					count);
5255 				cbstats.nc_runouts++;
5256 			} else {
5257 				pending--;
5258 				mutex_enter(&ddi_callback_mutex);
5259 				cbstats.nc_run++;
5260 			}
5261 			mutex_exit(&ddi_callback_mutex);
5262 		} while (rval != 0 && (--count > 0));
5263 	} while (pending > 0);
5264 }
5265 
5266 void
5267 ddi_run_callback(uintptr_t *listid)
5268 {
5269 	softcall(real_callback_run, listid);
5270 }
5271 
5272 dev_info_t *
5273 nodevinfo(dev_t dev, int otyp)
5274 {
5275 	_NOTE(ARGUNUSED(dev, otyp))
5276 	return ((dev_info_t *)0);
5277 }
5278 
5279 /*
5280  * A driver should support its own getinfo(9E) entry point. This function
5281  * is provided as a convenience for ON drivers that don't expect their
5282  * getinfo(9E) entry point to be called. A driver that uses this must not
5283  * call ddi_create_minor_node.
5284  */
5285 int
5286 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5287 {
5288 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5289 	return (DDI_FAILURE);
5290 }
5291 
5292 /*
5293  * A driver should support its own getinfo(9E) entry point. This function
5294  * is provided as a convenience for ON drivers that where the minor number
5295  * is the instance. Drivers that do not have 1:1 mapping must implement
5296  * their own getinfo(9E) function.
5297  */
5298 int
5299 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5300     void *arg, void **result)
5301 {
5302 	_NOTE(ARGUNUSED(dip))
5303 	int	instance;
5304 
5305 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5306 		return (DDI_FAILURE);
5307 
5308 	instance = getminor((dev_t)(uintptr_t)arg);
5309 	*result = (void *)(uintptr_t)instance;
5310 	return (DDI_SUCCESS);
5311 }
5312 
5313 int
5314 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5315 {
5316 	_NOTE(ARGUNUSED(devi, cmd))
5317 	return (DDI_FAILURE);
5318 }
5319 
5320 int
5321 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5322     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5323 {
5324 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5325 	return (DDI_DMA_NOMAPPING);
5326 }
5327 
5328 int
5329 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5330     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5331 {
5332 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5333 	return (DDI_DMA_BADATTR);
5334 }
5335 
5336 int
5337 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5338     ddi_dma_handle_t handle)
5339 {
5340 	_NOTE(ARGUNUSED(dip, rdip, handle))
5341 	return (DDI_FAILURE);
5342 }
5343 
5344 int
5345 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5346     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5347     ddi_dma_cookie_t *cp, uint_t *ccountp)
5348 {
5349 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5350 	return (DDI_DMA_NOMAPPING);
5351 }
5352 
5353 int
5354 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5355     ddi_dma_handle_t handle)
5356 {
5357 	_NOTE(ARGUNUSED(dip, rdip, handle))
5358 	return (DDI_FAILURE);
5359 }
5360 
5361 int
5362 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5363     ddi_dma_handle_t handle, off_t off, size_t len,
5364     uint_t cache_flags)
5365 {
5366 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5367 	return (DDI_FAILURE);
5368 }
5369 
5370 int
5371 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5372     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5373     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5374 {
5375 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5376 	return (DDI_FAILURE);
5377 }
5378 
5379 int
5380 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5381     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5382     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5383 {
5384 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5385 	return (DDI_FAILURE);
5386 }
5387 
5388 void
5389 ddivoid(void)
5390 {}
5391 
5392 int
5393 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5394     struct pollhead **pollhdrp)
5395 {
5396 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5397 	return (ENXIO);
5398 }
5399 
5400 cred_t *
5401 ddi_get_cred(void)
5402 {
5403 	return (CRED());
5404 }
5405 
5406 clock_t
5407 ddi_get_lbolt(void)
5408 {
5409 	return (lbolt);
5410 }
5411 
5412 time_t
5413 ddi_get_time(void)
5414 {
5415 	time_t	now;
5416 
5417 	if ((now = gethrestime_sec()) == 0) {
5418 		timestruc_t ts;
5419 		mutex_enter(&tod_lock);
5420 		ts = tod_get();
5421 		mutex_exit(&tod_lock);
5422 		return (ts.tv_sec);
5423 	} else {
5424 		return (now);
5425 	}
5426 }
5427 
5428 pid_t
5429 ddi_get_pid(void)
5430 {
5431 	return (ttoproc(curthread)->p_pid);
5432 }
5433 
5434 kt_did_t
5435 ddi_get_kt_did(void)
5436 {
5437 	return (curthread->t_did);
5438 }
5439 
5440 /*
5441  * This function returns B_TRUE if the caller can reasonably expect that a call
5442  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5443  * by user-level signal.  If it returns B_FALSE, then the caller should use
5444  * other means to make certain that the wait will not hang "forever."
5445  *
5446  * It does not check the signal mask, nor for reception of any particular
5447  * signal.
5448  *
5449  * Currently, a thread can receive a signal if it's not a kernel thread and it
5450  * is not in the middle of exit(2) tear-down.  Threads that are in that
5451  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5452  * cv_timedwait, and qwait_sig to qwait.
5453  */
5454 boolean_t
5455 ddi_can_receive_sig(void)
5456 {
5457 	proc_t *pp;
5458 
5459 	if (curthread->t_proc_flag & TP_LWPEXIT)
5460 		return (B_FALSE);
5461 	if ((pp = ttoproc(curthread)) == NULL)
5462 		return (B_FALSE);
5463 	return (pp->p_as != &kas);
5464 }
5465 
5466 /*
5467  * Swap bytes in 16-bit [half-]words
5468  */
5469 void
5470 swab(void *src, void *dst, size_t nbytes)
5471 {
5472 	uchar_t *pf = (uchar_t *)src;
5473 	uchar_t *pt = (uchar_t *)dst;
5474 	uchar_t tmp;
5475 	int nshorts;
5476 
5477 	nshorts = nbytes >> 1;
5478 
5479 	while (--nshorts >= 0) {
5480 		tmp = *pf++;
5481 		*pt++ = *pf++;
5482 		*pt++ = tmp;
5483 	}
5484 }
5485 
5486 static void
5487 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5488 {
5489 	struct ddi_minor_data *dp;
5490 
5491 	mutex_enter(&(DEVI(ddip)->devi_lock));
5492 	i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5493 
5494 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5495 		DEVI(ddip)->devi_minor = dmdp;
5496 	} else {
5497 		while (dp->next != (struct ddi_minor_data *)NULL)
5498 			dp = dp->next;
5499 		dp->next = dmdp;
5500 	}
5501 
5502 	i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1);
5503 	mutex_exit(&(DEVI(ddip)->devi_lock));
5504 }
5505 
5506 /*
5507  * Part of the obsolete SunCluster DDI Hooks.
5508  * Keep for binary compatibility
5509  */
5510 minor_t
5511 ddi_getiminor(dev_t dev)
5512 {
5513 	return (getminor(dev));
5514 }
5515 
5516 static int
5517 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5518 {
5519 	int se_flag;
5520 	int kmem_flag;
5521 	int se_err;
5522 	char *pathname;
5523 	sysevent_t *ev = NULL;
5524 	sysevent_id_t eid;
5525 	sysevent_value_t se_val;
5526 	sysevent_attr_list_t *ev_attr_list = NULL;
5527 
5528 	/* determine interrupt context */
5529 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5530 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5531 
5532 	i_ddi_di_cache_invalidate(kmem_flag);
5533 
5534 #ifdef DEBUG
5535 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5536 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5537 		    "interrupt level by driver %s",
5538 		    ddi_driver_name(dip));
5539 	}
5540 #endif /* DEBUG */
5541 
5542 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5543 	if (ev == NULL) {
5544 		goto fail;
5545 	}
5546 
5547 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5548 	if (pathname == NULL) {
5549 		sysevent_free(ev);
5550 		goto fail;
5551 	}
5552 
5553 	(void) ddi_pathname(dip, pathname);
5554 	ASSERT(strlen(pathname));
5555 	se_val.value_type = SE_DATA_TYPE_STRING;
5556 	se_val.value.sv_string = pathname;
5557 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5558 	    &se_val, se_flag) != 0) {
5559 		kmem_free(pathname, MAXPATHLEN);
5560 		sysevent_free(ev);
5561 		goto fail;
5562 	}
5563 	kmem_free(pathname, MAXPATHLEN);
5564 
5565 	/*
5566 	 * allow for NULL minor names
5567 	 */
5568 	if (minor_name != NULL) {
5569 		se_val.value.sv_string = minor_name;
5570 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5571 		    &se_val, se_flag) != 0) {
5572 			sysevent_free_attr(ev_attr_list);
5573 			sysevent_free(ev);
5574 			goto fail;
5575 		}
5576 	}
5577 
5578 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5579 		sysevent_free_attr(ev_attr_list);
5580 		sysevent_free(ev);
5581 		goto fail;
5582 	}
5583 
5584 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5585 		if (se_err == SE_NO_TRANSPORT) {
5586 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5587 			    "for driver %s (%s). Run devfsadm -i %s",
5588 			    ddi_driver_name(dip), "syseventd not responding",
5589 			    ddi_driver_name(dip));
5590 		} else {
5591 			sysevent_free(ev);
5592 			goto fail;
5593 		}
5594 	}
5595 
5596 	sysevent_free(ev);
5597 	return (DDI_SUCCESS);
5598 fail:
5599 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5600 	    "for driver %s. Run devfsadm -i %s",
5601 	    ddi_driver_name(dip), ddi_driver_name(dip));
5602 	return (DDI_SUCCESS);
5603 }
5604 
5605 /*
5606  * failing to remove a minor node is not of interest
5607  * therefore we do not generate an error message
5608  */
5609 static int
5610 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5611 {
5612 	char *pathname;
5613 	sysevent_t *ev;
5614 	sysevent_id_t eid;
5615 	sysevent_value_t se_val;
5616 	sysevent_attr_list_t *ev_attr_list = NULL;
5617 
5618 	/*
5619 	 * only log ddi_remove_minor_node() calls outside the scope
5620 	 * of attach/detach reconfigurations and when the dip is
5621 	 * still initialized.
5622 	 */
5623 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5624 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5625 		return (DDI_SUCCESS);
5626 	}
5627 
5628 	i_ddi_di_cache_invalidate(KM_SLEEP);
5629 
5630 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5631 	if (ev == NULL) {
5632 		return (DDI_SUCCESS);
5633 	}
5634 
5635 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5636 	if (pathname == NULL) {
5637 		sysevent_free(ev);
5638 		return (DDI_SUCCESS);
5639 	}
5640 
5641 	(void) ddi_pathname(dip, pathname);
5642 	ASSERT(strlen(pathname));
5643 	se_val.value_type = SE_DATA_TYPE_STRING;
5644 	se_val.value.sv_string = pathname;
5645 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5646 	    &se_val, SE_SLEEP) != 0) {
5647 		kmem_free(pathname, MAXPATHLEN);
5648 		sysevent_free(ev);
5649 		return (DDI_SUCCESS);
5650 	}
5651 
5652 	kmem_free(pathname, MAXPATHLEN);
5653 
5654 	/*
5655 	 * allow for NULL minor names
5656 	 */
5657 	if (minor_name != NULL) {
5658 		se_val.value.sv_string = minor_name;
5659 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5660 		    &se_val, SE_SLEEP) != 0) {
5661 			sysevent_free_attr(ev_attr_list);
5662 			goto fail;
5663 		}
5664 	}
5665 
5666 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5667 		sysevent_free_attr(ev_attr_list);
5668 	} else {
5669 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5670 	}
5671 fail:
5672 	sysevent_free(ev);
5673 	return (DDI_SUCCESS);
5674 }
5675 
5676 /*
5677  * Derive the device class of the node.
5678  * Device class names aren't defined yet. Until this is done we use
5679  * devfs event subclass names as device class names.
5680  */
5681 static int
5682 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5683 {
5684 	int rv = DDI_SUCCESS;
5685 
5686 	if (i_ddi_devi_class(dip) == NULL) {
5687 		if (strncmp(node_type, DDI_NT_BLOCK,
5688 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5689 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5690 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5691 		    strcmp(node_type, DDI_NT_FD) != 0) {
5692 
5693 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5694 
5695 		} else if (strncmp(node_type, DDI_NT_NET,
5696 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5697 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5698 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5699 
5700 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5701 
5702 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5703 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5704 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5705 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5706 
5707 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5708 		}
5709 	}
5710 
5711 	return (rv);
5712 }
5713 
5714 /*
5715  * Check compliance with PSARC 2003/375:
5716  *
5717  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5718  * exceed IFNAMSIZ (16) characters in length.
5719  */
5720 static boolean_t
5721 verify_name(char *name)
5722 {
5723 	size_t	len = strlen(name);
5724 	char	*cp;
5725 
5726 	if (len == 0 || len > IFNAMSIZ)
5727 		return (B_FALSE);
5728 
5729 	for (cp = name; *cp != '\0'; cp++) {
5730 		if (!isalnum(*cp) && *cp != '_')
5731 			return (B_FALSE);
5732 	}
5733 
5734 	return (B_TRUE);
5735 }
5736 
5737 /*
5738  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5739  *				attach it to the given devinfo node.
5740  */
5741 
5742 int
5743 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5744     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5745     const char *read_priv, const char *write_priv, mode_t priv_mode)
5746 {
5747 	struct ddi_minor_data *dmdp;
5748 	major_t major;
5749 
5750 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5751 		return (DDI_FAILURE);
5752 
5753 	if (name == NULL)
5754 		return (DDI_FAILURE);
5755 
5756 	/*
5757 	 * Log a message if the minor number the driver is creating
5758 	 * is not expressible on the on-disk filesystem (currently
5759 	 * this is limited to 18 bits both by UFS). The device can
5760 	 * be opened via devfs, but not by device special files created
5761 	 * via mknod().
5762 	 */
5763 	if (minor_num > L_MAXMIN32) {
5764 		cmn_err(CE_WARN,
5765 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5766 		    ddi_driver_name(dip), ddi_get_instance(dip),
5767 		    name, minor_num);
5768 		return (DDI_FAILURE);
5769 	}
5770 
5771 	/* dip must be bound and attached */
5772 	major = ddi_driver_major(dip);
5773 	ASSERT(major != (major_t)-1);
5774 
5775 	/*
5776 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5777 	 */
5778 	if (node_type == NULL) {
5779 		node_type = DDI_PSEUDO;
5780 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5781 		    " minor node %s; default to DDI_PSEUDO",
5782 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5783 	}
5784 
5785 	/*
5786 	 * If the driver is a network driver, ensure that the name falls within
5787 	 * the interface naming constraints specified by PSARC/2003/375.
5788 	 */
5789 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5790 		if (!verify_name(name))
5791 			return (DDI_FAILURE);
5792 
5793 		if (mtype == DDM_MINOR) {
5794 			struct devnames *dnp = &devnamesp[major];
5795 
5796 			/* Mark driver as a network driver */
5797 			LOCK_DEV_OPS(&dnp->dn_lock);
5798 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5799 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5800 		}
5801 	}
5802 
5803 	if (mtype == DDM_MINOR) {
5804 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5805 		    DDI_SUCCESS)
5806 			return (DDI_FAILURE);
5807 	}
5808 
5809 	/*
5810 	 * Take care of minor number information for the node.
5811 	 */
5812 
5813 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5814 	    KM_NOSLEEP)) == NULL) {
5815 		return (DDI_FAILURE);
5816 	}
5817 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5818 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5819 		return (DDI_FAILURE);
5820 	}
5821 	dmdp->dip = dip;
5822 	dmdp->ddm_dev = makedevice(major, minor_num);
5823 	dmdp->ddm_spec_type = spec_type;
5824 	dmdp->ddm_node_type = node_type;
5825 	dmdp->type = mtype;
5826 	if (flag & CLONE_DEV) {
5827 		dmdp->type = DDM_ALIAS;
5828 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5829 	}
5830 	if (flag & PRIVONLY_DEV) {
5831 		dmdp->ddm_flags |= DM_NO_FSPERM;
5832 	}
5833 	if (read_priv || write_priv) {
5834 		dmdp->ddm_node_priv =
5835 		    devpolicy_priv_by_name(read_priv, write_priv);
5836 	}
5837 	dmdp->ddm_priv_mode = priv_mode;
5838 
5839 	ddi_append_minor_node(dip, dmdp);
5840 
5841 	/*
5842 	 * only log ddi_create_minor_node() calls which occur
5843 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5844 	 */
5845 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5846 	    mtype != DDM_INTERNAL_PATH) {
5847 		(void) i_log_devfs_minor_create(dip, name);
5848 	}
5849 
5850 	/*
5851 	 * Check if any dacf rules match the creation of this minor node
5852 	 */
5853 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5854 	return (DDI_SUCCESS);
5855 }
5856 
5857 int
5858 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5859     minor_t minor_num, char *node_type, int flag)
5860 {
5861 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5862 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5863 }
5864 
5865 int
5866 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5867     minor_t minor_num, char *node_type, int flag,
5868     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5869 {
5870 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5871 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5872 }
5873 
5874 int
5875 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5876     minor_t minor_num, char *node_type, int flag)
5877 {
5878 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5879 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5880 }
5881 
5882 /*
5883  * Internal (non-ddi) routine for drivers to export names known
5884  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5885  * but not exported externally to /dev
5886  */
5887 int
5888 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5889     minor_t minor_num)
5890 {
5891 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5892 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5893 }
5894 
5895 void
5896 ddi_remove_minor_node(dev_info_t *dip, char *name)
5897 {
5898 	struct ddi_minor_data *dmdp, *dmdp1;
5899 	struct ddi_minor_data **dmdp_prev;
5900 
5901 	mutex_enter(&(DEVI(dip)->devi_lock));
5902 	i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5903 
5904 	dmdp_prev = &DEVI(dip)->devi_minor;
5905 	dmdp = DEVI(dip)->devi_minor;
5906 	while (dmdp != NULL) {
5907 		dmdp1 = dmdp->next;
5908 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5909 		    strcmp(name, dmdp->ddm_name) == 0))) {
5910 			if (dmdp->ddm_name != NULL) {
5911 				if (dmdp->type != DDM_INTERNAL_PATH)
5912 					(void) i_log_devfs_minor_remove(dip,
5913 					    dmdp->ddm_name);
5914 				kmem_free(dmdp->ddm_name,
5915 				    strlen(dmdp->ddm_name) + 1);
5916 			}
5917 			/*
5918 			 * Release device privilege, if any.
5919 			 * Release dacf client data associated with this minor
5920 			 * node by storing NULL.
5921 			 */
5922 			if (dmdp->ddm_node_priv)
5923 				dpfree(dmdp->ddm_node_priv);
5924 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5925 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5926 			*dmdp_prev = dmdp1;
5927 			/*
5928 			 * OK, we found it, so get out now -- if we drive on,
5929 			 * we will strcmp against garbage.  See 1139209.
5930 			 */
5931 			if (name != NULL)
5932 				break;
5933 		} else {
5934 			dmdp_prev = &dmdp->next;
5935 		}
5936 		dmdp = dmdp1;
5937 	}
5938 
5939 	i_devi_exit(dip, DEVI_S_MD_UPDATE, 1);
5940 	mutex_exit(&(DEVI(dip)->devi_lock));
5941 }
5942 
5943 
5944 int
5945 ddi_in_panic()
5946 {
5947 	return (panicstr != NULL);
5948 }
5949 
5950 
5951 /*
5952  * Find first bit set in a mask (returned counting from 1 up)
5953  */
5954 
5955 int
5956 ddi_ffs(long mask)
5957 {
5958 	extern int ffs(long mask);
5959 	return (ffs(mask));
5960 }
5961 
5962 /*
5963  * Find last bit set. Take mask and clear
5964  * all but the most significant bit, and
5965  * then let ffs do the rest of the work.
5966  *
5967  * Algorithm courtesy of Steve Chessin.
5968  */
5969 
5970 int
5971 ddi_fls(long mask)
5972 {
5973 	extern int ffs(long);
5974 
5975 	while (mask) {
5976 		long nx;
5977 
5978 		if ((nx = (mask & (mask - 1))) == 0)
5979 			break;
5980 		mask = nx;
5981 	}
5982 	return (ffs(mask));
5983 }
5984 
5985 /*
5986  * The next five routines comprise generic storage management utilities
5987  * for driver soft state structures (in "the old days," this was done
5988  * with a statically sized array - big systems and dynamic loading
5989  * and unloading make heap allocation more attractive)
5990  */
5991 
5992 /*
5993  * Allocate a set of pointers to 'n_items' objects of size 'size'
5994  * bytes.  Each pointer is initialized to nil.
5995  *
5996  * The 'size' and 'n_items' values are stashed in the opaque
5997  * handle returned to the caller.
5998  *
5999  * This implementation interprets 'set of pointers' to mean 'array
6000  * of pointers' but note that nothing in the interface definition
6001  * precludes an implementation that uses, for example, a linked list.
6002  * However there should be a small efficiency gain from using an array
6003  * at lookup time.
6004  *
6005  * NOTE	As an optimization, we make our growable array allocations in
6006  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6007  *	gives us anyway.  It should save us some free/realloc's ..
6008  *
6009  *	As a further optimization, we make the growable array start out
6010  *	with MIN_N_ITEMS in it.
6011  */
6012 
6013 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6014 
6015 int
6016 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6017 {
6018 	struct i_ddi_soft_state *ss;
6019 
6020 	if (state_p == NULL || *state_p != NULL || size == 0)
6021 		return (EINVAL);
6022 
6023 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6024 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6025 	ss->size = size;
6026 
6027 	if (n_items < MIN_N_ITEMS)
6028 		ss->n_items = MIN_N_ITEMS;
6029 	else {
6030 		int bitlog;
6031 
6032 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6033 			bitlog--;
6034 		ss->n_items = 1 << bitlog;
6035 	}
6036 
6037 	ASSERT(ss->n_items >= n_items);
6038 
6039 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6040 
6041 	*state_p = ss;
6042 
6043 	return (0);
6044 }
6045 
6046 
6047 /*
6048  * Allocate a state structure of size 'size' to be associated
6049  * with item 'item'.
6050  *
6051  * In this implementation, the array is extended to
6052  * allow the requested offset, if needed.
6053  */
6054 int
6055 ddi_soft_state_zalloc(void *state, int item)
6056 {
6057 	struct i_ddi_soft_state *ss;
6058 	void **array;
6059 	void *new_element;
6060 
6061 	if ((ss = state) == NULL || item < 0)
6062 		return (DDI_FAILURE);
6063 
6064 	mutex_enter(&ss->lock);
6065 	if (ss->size == 0) {
6066 		mutex_exit(&ss->lock);
6067 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6068 		    mod_containing_pc(caller()));
6069 		return (DDI_FAILURE);
6070 	}
6071 
6072 	array = ss->array;	/* NULL if ss->n_items == 0 */
6073 	ASSERT(ss->n_items != 0 && array != NULL);
6074 
6075 	/*
6076 	 * refuse to tread on an existing element
6077 	 */
6078 	if (item < ss->n_items && array[item] != NULL) {
6079 		mutex_exit(&ss->lock);
6080 		return (DDI_FAILURE);
6081 	}
6082 
6083 	/*
6084 	 * Allocate a new element to plug in
6085 	 */
6086 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6087 
6088 	/*
6089 	 * Check if the array is big enough, if not, grow it.
6090 	 */
6091 	if (item >= ss->n_items) {
6092 		void	**new_array;
6093 		size_t	new_n_items;
6094 		struct i_ddi_soft_state *dirty;
6095 
6096 		/*
6097 		 * Allocate a new array of the right length, copy
6098 		 * all the old pointers to the new array, then
6099 		 * if it exists at all, put the old array on the
6100 		 * dirty list.
6101 		 *
6102 		 * Note that we can't kmem_free() the old array.
6103 		 *
6104 		 * Why -- well the 'get' operation is 'mutex-free', so we
6105 		 * can't easily catch a suspended thread that is just about
6106 		 * to dereference the array we just grew out of.  So we
6107 		 * cons up a header and put it on a list of 'dirty'
6108 		 * pointer arrays.  (Dirty in the sense that there may
6109 		 * be suspended threads somewhere that are in the middle
6110 		 * of referencing them).  Fortunately, we -can- garbage
6111 		 * collect it all at ddi_soft_state_fini time.
6112 		 */
6113 		new_n_items = ss->n_items;
6114 		while (new_n_items < (1 + item))
6115 			new_n_items <<= 1;	/* double array size .. */
6116 
6117 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6118 
6119 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6120 		    KM_SLEEP);
6121 		/*
6122 		 * Copy the pointers into the new array
6123 		 */
6124 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6125 
6126 		/*
6127 		 * Save the old array on the dirty list
6128 		 */
6129 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6130 		dirty->array = ss->array;
6131 		dirty->n_items = ss->n_items;
6132 		dirty->next = ss->next;
6133 		ss->next = dirty;
6134 
6135 		ss->array = (array = new_array);
6136 		ss->n_items = new_n_items;
6137 	}
6138 
6139 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6140 
6141 	array[item] = new_element;
6142 
6143 	mutex_exit(&ss->lock);
6144 	return (DDI_SUCCESS);
6145 }
6146 
6147 
6148 /*
6149  * Fetch a pointer to the allocated soft state structure.
6150  *
6151  * This is designed to be cheap.
6152  *
6153  * There's an argument that there should be more checking for
6154  * nil pointers and out of bounds on the array.. but we do a lot
6155  * of that in the alloc/free routines.
6156  *
6157  * An array has the convenience that we don't need to lock read-access
6158  * to it c.f. a linked list.  However our "expanding array" strategy
6159  * means that we should hold a readers lock on the i_ddi_soft_state
6160  * structure.
6161  *
6162  * However, from a performance viewpoint, we need to do it without
6163  * any locks at all -- this also makes it a leaf routine.  The algorithm
6164  * is 'lock-free' because we only discard the pointer arrays at
6165  * ddi_soft_state_fini() time.
6166  */
6167 void *
6168 ddi_get_soft_state(void *state, int item)
6169 {
6170 	struct i_ddi_soft_state *ss = state;
6171 
6172 	ASSERT(ss != NULL && item >= 0);
6173 
6174 	if (item < ss->n_items && ss->array != NULL)
6175 		return (ss->array[item]);
6176 	return (NULL);
6177 }
6178 
6179 /*
6180  * Free the state structure corresponding to 'item.'   Freeing an
6181  * element that has either gone or was never allocated is not
6182  * considered an error.  Note that we free the state structure, but
6183  * we don't shrink our pointer array, or discard 'dirty' arrays,
6184  * since even a few pointers don't really waste too much memory.
6185  *
6186  * Passing an item number that is out of bounds, or a null pointer will
6187  * provoke an error message.
6188  */
6189 void
6190 ddi_soft_state_free(void *state, int item)
6191 {
6192 	struct i_ddi_soft_state *ss;
6193 	void **array;
6194 	void *element;
6195 	static char msg[] = "ddi_soft_state_free:";
6196 
6197 	if ((ss = state) == NULL) {
6198 		cmn_err(CE_WARN, "%s null handle: %s",
6199 		    msg, mod_containing_pc(caller()));
6200 		return;
6201 	}
6202 
6203 	element = NULL;
6204 
6205 	mutex_enter(&ss->lock);
6206 
6207 	if ((array = ss->array) == NULL || ss->size == 0) {
6208 		cmn_err(CE_WARN, "%s bad handle: %s",
6209 		    msg, mod_containing_pc(caller()));
6210 	} else if (item < 0 || item >= ss->n_items) {
6211 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6212 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6213 	} else if (array[item] != NULL) {
6214 		element = array[item];
6215 		array[item] = NULL;
6216 	}
6217 
6218 	mutex_exit(&ss->lock);
6219 
6220 	if (element)
6221 		kmem_free(element, ss->size);
6222 }
6223 
6224 
6225 /*
6226  * Free the entire set of pointers, and any
6227  * soft state structures contained therein.
6228  *
6229  * Note that we don't grab the ss->lock mutex, even though
6230  * we're inspecting the various fields of the data structure.
6231  *
6232  * There is an implicit assumption that this routine will
6233  * never run concurrently with any of the above on this
6234  * particular state structure i.e. by the time the driver
6235  * calls this routine, there should be no other threads
6236  * running in the driver.
6237  */
6238 void
6239 ddi_soft_state_fini(void **state_p)
6240 {
6241 	struct i_ddi_soft_state *ss, *dirty;
6242 	int item;
6243 	static char msg[] = "ddi_soft_state_fini:";
6244 
6245 	if (state_p == NULL || (ss = *state_p) == NULL) {
6246 		cmn_err(CE_WARN, "%s null handle: %s",
6247 		    msg, mod_containing_pc(caller()));
6248 		return;
6249 	}
6250 
6251 	if (ss->size == 0) {
6252 		cmn_err(CE_WARN, "%s bad handle: %s",
6253 		    msg, mod_containing_pc(caller()));
6254 		return;
6255 	}
6256 
6257 	if (ss->n_items > 0) {
6258 		for (item = 0; item < ss->n_items; item++)
6259 			ddi_soft_state_free(ss, item);
6260 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6261 	}
6262 
6263 	/*
6264 	 * Now delete any dirty arrays from previous 'grow' operations
6265 	 */
6266 	for (dirty = ss->next; dirty; dirty = ss->next) {
6267 		ss->next = dirty->next;
6268 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6269 		kmem_free(dirty, sizeof (*dirty));
6270 	}
6271 
6272 	mutex_destroy(&ss->lock);
6273 	kmem_free(ss, sizeof (*ss));
6274 
6275 	*state_p = NULL;
6276 }
6277 
6278 /*
6279  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6280  * Storage is double buffered to prevent updates during devi_addr use -
6281  * double buffering is adaquate for reliable ddi_deviname() consumption.
6282  * The double buffer is not freed until dev_info structure destruction
6283  * (by i_ddi_free_node).
6284  */
6285 void
6286 ddi_set_name_addr(dev_info_t *dip, char *name)
6287 {
6288 	char	*buf = DEVI(dip)->devi_addr_buf;
6289 	char	*newaddr;
6290 
6291 	if (buf == NULL) {
6292 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6293 		DEVI(dip)->devi_addr_buf = buf;
6294 	}
6295 
6296 	if (name) {
6297 		ASSERT(strlen(name) < MAXNAMELEN);
6298 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6299 		    (buf + MAXNAMELEN) : buf;
6300 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6301 	} else
6302 		newaddr = NULL;
6303 
6304 	DEVI(dip)->devi_addr = newaddr;
6305 }
6306 
6307 char *
6308 ddi_get_name_addr(dev_info_t *dip)
6309 {
6310 	return (DEVI(dip)->devi_addr);
6311 }
6312 
6313 void
6314 ddi_set_parent_data(dev_info_t *dip, void *pd)
6315 {
6316 	DEVI(dip)->devi_parent_data = pd;
6317 }
6318 
6319 void *
6320 ddi_get_parent_data(dev_info_t *dip)
6321 {
6322 	return (DEVI(dip)->devi_parent_data);
6323 }
6324 
6325 /*
6326  * ddi_name_to_major: Returns the major number of a module given its name.
6327  */
6328 major_t
6329 ddi_name_to_major(char *name)
6330 {
6331 	return (mod_name_to_major(name));
6332 }
6333 
6334 /*
6335  * ddi_major_to_name: Returns the module name bound to a major number.
6336  */
6337 char *
6338 ddi_major_to_name(major_t major)
6339 {
6340 	return (mod_major_to_name(major));
6341 }
6342 
6343 /*
6344  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6345  * pointed at by 'name.'  A devinfo node is named as a result of calling
6346  * ddi_initchild().
6347  *
6348  * Note: the driver must be held before calling this function!
6349  */
6350 char *
6351 ddi_deviname(dev_info_t *dip, char *name)
6352 {
6353 	char *addrname;
6354 	char none = '\0';
6355 
6356 	if (dip == ddi_root_node()) {
6357 		*name = '\0';
6358 		return (name);
6359 	}
6360 
6361 	if (i_ddi_node_state(dip) < DS_INITIALIZED) {
6362 		addrname = &none;
6363 	} else {
6364 		addrname = ddi_get_name_addr(dip);
6365 	}
6366 
6367 	if (*addrname == '\0') {
6368 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6369 	} else {
6370 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6371 	}
6372 
6373 	return (name);
6374 }
6375 
6376 /*
6377  * Spits out the name of device node, typically name@addr, for a given node,
6378  * using the driver name, not the nodename.
6379  *
6380  * Used by match_parent. Not to be used elsewhere.
6381  */
6382 char *
6383 i_ddi_parname(dev_info_t *dip, char *name)
6384 {
6385 	char *addrname;
6386 
6387 	if (dip == ddi_root_node()) {
6388 		*name = '\0';
6389 		return (name);
6390 	}
6391 
6392 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6393 
6394 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6395 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6396 	else
6397 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6398 	return (name);
6399 }
6400 
6401 static char *
6402 pathname_work(dev_info_t *dip, char *path)
6403 {
6404 	char *bp;
6405 
6406 	if (dip == ddi_root_node()) {
6407 		*path = '\0';
6408 		return (path);
6409 	}
6410 	(void) pathname_work(ddi_get_parent(dip), path);
6411 	bp = path + strlen(path);
6412 	(void) ddi_deviname(dip, bp);
6413 	return (path);
6414 }
6415 
6416 char *
6417 ddi_pathname(dev_info_t *dip, char *path)
6418 {
6419 	return (pathname_work(dip, path));
6420 }
6421 
6422 /*
6423  * Given a dev_t, return the pathname of the corresponding device in the
6424  * buffer pointed at by "path."  The buffer is assumed to be large enough
6425  * to hold the pathname of the device (MAXPATHLEN).
6426  *
6427  * The pathname of a device is the pathname of the devinfo node to which
6428  * the device "belongs," concatenated with the character ':' and the name
6429  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6430  * just the pathname of the devinfo node is returned without driving attach
6431  * of that node.  For a non-zero spec_type, an attach is performed and a
6432  * search of the minor list occurs.
6433  *
6434  * It is possible that the path associated with the dev_t is not
6435  * currently available in the devinfo tree.  In order to have a
6436  * dev_t, a device must have been discovered before, which means
6437  * that the path is always in the instance tree.  The one exception
6438  * to this is if the dev_t is associated with a pseudo driver, in
6439  * which case the device must exist on the pseudo branch of the
6440  * devinfo tree as a result of parsing .conf files.
6441  */
6442 int
6443 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6444 {
6445 	major_t		major = getmajor(devt);
6446 	int		instance;
6447 	dev_info_t	*dip;
6448 	char		*minorname;
6449 	char		*drvname;
6450 
6451 	if (major >= devcnt)
6452 		goto fail;
6453 	if (major == clone_major) {
6454 		/* clone has no minor nodes, manufacture the path here */
6455 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6456 			goto fail;
6457 
6458 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6459 		return (DDI_SUCCESS);
6460 	}
6461 
6462 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6463 	if ((instance = dev_to_instance(devt)) == -1)
6464 		goto fail;
6465 
6466 	/* reconstruct the path given the major/instance */
6467 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6468 		goto fail;
6469 
6470 	/* if spec_type given we must drive attach and search minor nodes */
6471 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6472 		/* attach the path so we can search minors */
6473 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6474 			goto fail;
6475 
6476 		/* Add minorname to path. */
6477 		mutex_enter(&(DEVI(dip)->devi_lock));
6478 		minorname = i_ddi_devtspectype_to_minorname(dip,
6479 		    devt, spec_type);
6480 		if (minorname) {
6481 			(void) strcat(path, ":");
6482 			(void) strcat(path, minorname);
6483 		}
6484 		mutex_exit(&(DEVI(dip)->devi_lock));
6485 		ddi_release_devi(dip);
6486 		if (minorname == NULL)
6487 			goto fail;
6488 	}
6489 	ASSERT(strlen(path) < MAXPATHLEN);
6490 	return (DDI_SUCCESS);
6491 
6492 fail:	*path = 0;
6493 	return (DDI_FAILURE);
6494 }
6495 
6496 /*
6497  * Given a major number and an instance, return the path.
6498  * This interface does NOT drive attach.
6499  */
6500 int
6501 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6502 {
6503 	struct devnames *dnp;
6504 	dev_info_t	*dip;
6505 
6506 	if ((major >= devcnt) || (instance == -1)) {
6507 		*path = 0;
6508 		return (DDI_FAILURE);
6509 	}
6510 
6511 	/* look for the major/instance in the instance tree */
6512 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6513 	    path) == DDI_SUCCESS) {
6514 		ASSERT(strlen(path) < MAXPATHLEN);
6515 		return (DDI_SUCCESS);
6516 	}
6517 
6518 	/*
6519 	 * Not in instance tree, find the instance on the per driver list and
6520 	 * construct path to instance via ddi_pathname(). This is how paths
6521 	 * down the 'pseudo' branch are constructed.
6522 	 */
6523 	dnp = &(devnamesp[major]);
6524 	LOCK_DEV_OPS(&(dnp->dn_lock));
6525 	for (dip = dnp->dn_head; dip;
6526 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6527 		/* Skip if instance does not match. */
6528 		if (DEVI(dip)->devi_instance != instance)
6529 			continue;
6530 
6531 		/*
6532 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6533 		 * node demotion, so it is not an effective way of ensuring
6534 		 * that the ddi_pathname result has a unit-address.  Instead,
6535 		 * we reverify the node state after calling ddi_pathname().
6536 		 */
6537 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6538 			(void) ddi_pathname(dip, path);
6539 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6540 				continue;
6541 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6542 			ASSERT(strlen(path) < MAXPATHLEN);
6543 			return (DDI_SUCCESS);
6544 		}
6545 	}
6546 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6547 
6548 	/* can't reconstruct the path */
6549 	*path = 0;
6550 	return (DDI_FAILURE);
6551 }
6552 
6553 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6554 
6555 /*
6556  * Given the dip for a network interface return the ppa for that interface.
6557  *
6558  * In all cases except GLD v0 drivers, the ppa == instance.
6559  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6560  * So for these drivers when the attach routine calls gld_register(),
6561  * the GLD framework creates an integer property called "gld_driver_ppa"
6562  * that can be queried here.
6563  *
6564  * The only time this function is used is when a system is booting over nfs.
6565  * In this case the system has to resolve the pathname of the boot device
6566  * to it's ppa.
6567  */
6568 int
6569 i_ddi_devi_get_ppa(dev_info_t *dip)
6570 {
6571 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6572 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6573 			GLD_DRIVER_PPA, ddi_get_instance(dip)));
6574 }
6575 
6576 /*
6577  * i_ddi_devi_set_ppa() should only be called from gld_register()
6578  * and only for GLD v0 drivers
6579  */
6580 void
6581 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6582 {
6583 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6584 }
6585 
6586 
6587 /*
6588  * Private DDI Console bell functions.
6589  */
6590 void
6591 ddi_ring_console_bell(clock_t duration)
6592 {
6593 	if (ddi_console_bell_func != NULL)
6594 		(*ddi_console_bell_func)(duration);
6595 }
6596 
6597 void
6598 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6599 {
6600 	ddi_console_bell_func = bellfunc;
6601 }
6602 
6603 int
6604 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6605 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6606 {
6607 	int (*funcp)() = ddi_dma_allochdl;
6608 	ddi_dma_attr_t dma_attr;
6609 	struct bus_ops *bop;
6610 
6611 	if (attr == (ddi_dma_attr_t *)0)
6612 		return (DDI_DMA_BADATTR);
6613 
6614 	dma_attr = *attr;
6615 
6616 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6617 	if (bop && bop->bus_dma_allochdl)
6618 		funcp = bop->bus_dma_allochdl;
6619 
6620 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6621 }
6622 
6623 void
6624 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6625 {
6626 	ddi_dma_handle_t h = *handlep;
6627 	(void) ddi_dma_freehdl(HD, HD, h);
6628 }
6629 
6630 static uintptr_t dma_mem_list_id = 0;
6631 
6632 
6633 int
6634 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6635 	ddi_device_acc_attr_t *accattrp, uint_t flags,
6636 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6637 	size_t *real_length, ddi_acc_handle_t *handlep)
6638 {
6639 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6640 	dev_info_t *dip = hp->dmai_rdip;
6641 	ddi_acc_hdl_t *ap;
6642 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6643 	uint_t sleepflag, xfermodes;
6644 	int (*fp)(caddr_t);
6645 	int rval;
6646 
6647 	if (waitfp == DDI_DMA_SLEEP)
6648 		fp = (int (*)())KM_SLEEP;
6649 	else if (waitfp == DDI_DMA_DONTWAIT)
6650 		fp = (int (*)())KM_NOSLEEP;
6651 	else
6652 		fp = waitfp;
6653 	*handlep = impl_acc_hdl_alloc(fp, arg);
6654 	if (*handlep == NULL)
6655 		return (DDI_FAILURE);
6656 
6657 	/* check if the cache attributes are supported */
6658 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6659 		return (DDI_FAILURE);
6660 
6661 	/*
6662 	 * Transfer the meaningful bits to xfermodes.
6663 	 * Double-check if the 3rd party driver correctly sets the bits.
6664 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6665 	 */
6666 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6667 	if (xfermodes == 0) {
6668 		xfermodes = DDI_DMA_STREAMING;
6669 	}
6670 
6671 	/*
6672 	 * initialize the common elements of data access handle
6673 	 */
6674 	ap = impl_acc_hdl_get(*handlep);
6675 	ap->ah_vers = VERS_ACCHDL;
6676 	ap->ah_dip = dip;
6677 	ap->ah_offset = 0;
6678 	ap->ah_len = 0;
6679 	ap->ah_xfermodes = flags;
6680 	ap->ah_acc = *accattrp;
6681 
6682 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6683 	if (xfermodes == DDI_DMA_CONSISTENT) {
6684 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6685 		    flags, accattrp, kaddrp, NULL, ap);
6686 		*real_length = length;
6687 	} else {
6688 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6689 		    flags, accattrp, kaddrp, real_length, ap);
6690 	}
6691 	if (rval == DDI_SUCCESS) {
6692 		ap->ah_len = (off_t)(*real_length);
6693 		ap->ah_addr = *kaddrp;
6694 	} else {
6695 		impl_acc_hdl_free(*handlep);
6696 		*handlep = (ddi_acc_handle_t)NULL;
6697 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6698 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6699 		}
6700 		rval = DDI_FAILURE;
6701 	}
6702 	return (rval);
6703 }
6704 
6705 void
6706 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6707 {
6708 	ddi_acc_hdl_t *ap;
6709 
6710 	ap = impl_acc_hdl_get(*handlep);
6711 	ASSERT(ap);
6712 
6713 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6714 
6715 	/*
6716 	 * free the handle
6717 	 */
6718 	impl_acc_hdl_free(*handlep);
6719 	*handlep = (ddi_acc_handle_t)NULL;
6720 
6721 	if (dma_mem_list_id != 0) {
6722 		ddi_run_callback(&dma_mem_list_id);
6723 	}
6724 }
6725 
6726 int
6727 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6728 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6729 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6730 {
6731 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6732 	dev_info_t *hdip, *dip;
6733 	struct ddi_dma_req dmareq;
6734 	int (*funcp)();
6735 
6736 	dmareq.dmar_flags = flags;
6737 	dmareq.dmar_fp = waitfp;
6738 	dmareq.dmar_arg = arg;
6739 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
6740 
6741 	if (bp->b_flags & B_PAGEIO) {
6742 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
6743 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
6744 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
6745 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
6746 	} else {
6747 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
6748 		if (bp->b_flags & B_SHADOW) {
6749 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
6750 							bp->b_shadow;
6751 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
6752 		} else {
6753 			dmareq.dmar_object.dmao_type =
6754 				(bp->b_flags & (B_PHYS | B_REMAPPED)) ?
6755 				DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
6756 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6757 		}
6758 
6759 		/*
6760 		 * If the buffer has no proc pointer, or the proc
6761 		 * struct has the kernel address space, or the buffer has
6762 		 * been marked B_REMAPPED (meaning that it is now
6763 		 * mapped into the kernel's address space), then
6764 		 * the address space is kas (kernel address space).
6765 		 */
6766 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
6767 		    (bp->b_flags & B_REMAPPED)) {
6768 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
6769 		} else {
6770 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
6771 			    bp->b_proc->p_as;
6772 		}
6773 	}
6774 
6775 	dip = hp->dmai_rdip;
6776 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6777 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6778 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6779 }
6780 
6781 int
6782 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
6783 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
6784 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6785 {
6786 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6787 	dev_info_t *hdip, *dip;
6788 	struct ddi_dma_req dmareq;
6789 	int (*funcp)();
6790 
6791 	if (len == (uint_t)0) {
6792 		return (DDI_DMA_NOMAPPING);
6793 	}
6794 	dmareq.dmar_flags = flags;
6795 	dmareq.dmar_fp = waitfp;
6796 	dmareq.dmar_arg = arg;
6797 	dmareq.dmar_object.dmao_size = len;
6798 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
6799 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
6800 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
6801 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6802 
6803 	dip = hp->dmai_rdip;
6804 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6805 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6806 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6807 }
6808 
6809 void
6810 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
6811 {
6812 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6813 	ddi_dma_cookie_t *cp;
6814 
6815 	cp = hp->dmai_cookie;
6816 	ASSERT(cp);
6817 
6818 	cookiep->dmac_notused = cp->dmac_notused;
6819 	cookiep->dmac_type = cp->dmac_type;
6820 	cookiep->dmac_address = cp->dmac_address;
6821 	cookiep->dmac_size = cp->dmac_size;
6822 	hp->dmai_cookie++;
6823 }
6824 
6825 int
6826 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
6827 {
6828 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6829 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
6830 		return (DDI_FAILURE);
6831 	} else {
6832 		*nwinp = hp->dmai_nwin;
6833 		return (DDI_SUCCESS);
6834 	}
6835 }
6836 
6837 int
6838 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
6839 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6840 {
6841 	int (*funcp)() = ddi_dma_win;
6842 	struct bus_ops *bop;
6843 
6844 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
6845 	if (bop && bop->bus_dma_win)
6846 		funcp = bop->bus_dma_win;
6847 
6848 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
6849 }
6850 
6851 int
6852 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
6853 {
6854 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
6855 		&burstsizes, 0, 0));
6856 }
6857 
6858 int
6859 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
6860 {
6861 	return (hp->dmai_fault);
6862 }
6863 
6864 int
6865 ddi_check_dma_handle(ddi_dma_handle_t handle)
6866 {
6867 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6868 	int (*check)(ddi_dma_impl_t *);
6869 
6870 	if ((check = hp->dmai_fault_check) == NULL)
6871 		check = i_ddi_dma_fault_check;
6872 
6873 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
6874 }
6875 
6876 void
6877 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
6878 {
6879 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6880 	void (*notify)(ddi_dma_impl_t *);
6881 
6882 	if (!hp->dmai_fault) {
6883 		hp->dmai_fault = 1;
6884 		if ((notify = hp->dmai_fault_notify) != NULL)
6885 			(*notify)(hp);
6886 	}
6887 }
6888 
6889 void
6890 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
6891 {
6892 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6893 	void (*notify)(ddi_dma_impl_t *);
6894 
6895 	if (hp->dmai_fault) {
6896 		hp->dmai_fault = 0;
6897 		if ((notify = hp->dmai_fault_notify) != NULL)
6898 			(*notify)(hp);
6899 	}
6900 }
6901 
6902 /*
6903  * register mapping routines.
6904  */
6905 int
6906 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
6907 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
6908 	ddi_acc_handle_t *handle)
6909 {
6910 	ddi_map_req_t mr;
6911 	ddi_acc_hdl_t *hp;
6912 	int result;
6913 
6914 	/*
6915 	 * Allocate and initialize the common elements of data access handle.
6916 	 */
6917 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
6918 	hp = impl_acc_hdl_get(*handle);
6919 	hp->ah_vers = VERS_ACCHDL;
6920 	hp->ah_dip = dip;
6921 	hp->ah_rnumber = rnumber;
6922 	hp->ah_offset = offset;
6923 	hp->ah_len = len;
6924 	hp->ah_acc = *accattrp;
6925 
6926 	/*
6927 	 * Set up the mapping request and call to parent.
6928 	 */
6929 	mr.map_op = DDI_MO_MAP_LOCKED;
6930 	mr.map_type = DDI_MT_RNUMBER;
6931 	mr.map_obj.rnumber = rnumber;
6932 	mr.map_prot = PROT_READ | PROT_WRITE;
6933 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6934 	mr.map_handlep = hp;
6935 	mr.map_vers = DDI_MAP_VERSION;
6936 	result = ddi_map(dip, &mr, offset, len, addrp);
6937 
6938 	/*
6939 	 * check for end result
6940 	 */
6941 	if (result != DDI_SUCCESS) {
6942 		impl_acc_hdl_free(*handle);
6943 		*handle = (ddi_acc_handle_t)NULL;
6944 	} else {
6945 		hp->ah_addr = *addrp;
6946 	}
6947 
6948 	return (result);
6949 }
6950 
6951 void
6952 ddi_regs_map_free(ddi_acc_handle_t *handlep)
6953 {
6954 	ddi_map_req_t mr;
6955 	ddi_acc_hdl_t *hp;
6956 
6957 	hp = impl_acc_hdl_get(*handlep);
6958 	ASSERT(hp);
6959 
6960 	mr.map_op = DDI_MO_UNMAP;
6961 	mr.map_type = DDI_MT_RNUMBER;
6962 	mr.map_obj.rnumber = hp->ah_rnumber;
6963 	mr.map_prot = PROT_READ | PROT_WRITE;
6964 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6965 	mr.map_handlep = hp;
6966 	mr.map_vers = DDI_MAP_VERSION;
6967 
6968 	/*
6969 	 * Call my parent to unmap my regs.
6970 	 */
6971 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
6972 		hp->ah_len, &hp->ah_addr);
6973 	/*
6974 	 * free the handle
6975 	 */
6976 	impl_acc_hdl_free(*handlep);
6977 	*handlep = (ddi_acc_handle_t)NULL;
6978 }
6979 
6980 int
6981 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
6982 	ssize_t dev_advcnt, uint_t dev_datasz)
6983 {
6984 	uint8_t *b;
6985 	uint16_t *w;
6986 	uint32_t *l;
6987 	uint64_t *ll;
6988 
6989 	/* check for total byte count is multiple of data transfer size */
6990 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
6991 		return (DDI_FAILURE);
6992 
6993 	switch (dev_datasz) {
6994 	case DDI_DATA_SZ01_ACC:
6995 		for (b = (uint8_t *)dev_addr;
6996 			bytecount != 0; bytecount -= 1, b += dev_advcnt)
6997 			ddi_put8(handle, b, 0);
6998 		break;
6999 	case DDI_DATA_SZ02_ACC:
7000 		for (w = (uint16_t *)dev_addr;
7001 			bytecount != 0; bytecount -= 2, w += dev_advcnt)
7002 			ddi_put16(handle, w, 0);
7003 		break;
7004 	case DDI_DATA_SZ04_ACC:
7005 		for (l = (uint32_t *)dev_addr;
7006 			bytecount != 0; bytecount -= 4, l += dev_advcnt)
7007 			ddi_put32(handle, l, 0);
7008 		break;
7009 	case DDI_DATA_SZ08_ACC:
7010 		for (ll = (uint64_t *)dev_addr;
7011 			bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7012 			ddi_put64(handle, ll, 0x0ll);
7013 		break;
7014 	default:
7015 		return (DDI_FAILURE);
7016 	}
7017 	return (DDI_SUCCESS);
7018 }
7019 
7020 int
7021 ddi_device_copy(
7022 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7023 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7024 	size_t bytecount, uint_t dev_datasz)
7025 {
7026 	uint8_t *b_src, *b_dst;
7027 	uint16_t *w_src, *w_dst;
7028 	uint32_t *l_src, *l_dst;
7029 	uint64_t *ll_src, *ll_dst;
7030 
7031 	/* check for total byte count is multiple of data transfer size */
7032 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7033 		return (DDI_FAILURE);
7034 
7035 	switch (dev_datasz) {
7036 	case DDI_DATA_SZ01_ACC:
7037 		b_src = (uint8_t *)src_addr;
7038 		b_dst = (uint8_t *)dest_addr;
7039 
7040 		for (; bytecount != 0; bytecount -= 1) {
7041 			ddi_put8(dest_handle, b_dst,
7042 				ddi_get8(src_handle, b_src));
7043 			b_dst += dest_advcnt;
7044 			b_src += src_advcnt;
7045 		}
7046 		break;
7047 	case DDI_DATA_SZ02_ACC:
7048 		w_src = (uint16_t *)src_addr;
7049 		w_dst = (uint16_t *)dest_addr;
7050 
7051 		for (; bytecount != 0; bytecount -= 2) {
7052 			ddi_put16(dest_handle, w_dst,
7053 				ddi_get16(src_handle, w_src));
7054 			w_dst += dest_advcnt;
7055 			w_src += src_advcnt;
7056 		}
7057 		break;
7058 	case DDI_DATA_SZ04_ACC:
7059 		l_src = (uint32_t *)src_addr;
7060 		l_dst = (uint32_t *)dest_addr;
7061 
7062 		for (; bytecount != 0; bytecount -= 4) {
7063 			ddi_put32(dest_handle, l_dst,
7064 				ddi_get32(src_handle, l_src));
7065 			l_dst += dest_advcnt;
7066 			l_src += src_advcnt;
7067 		}
7068 		break;
7069 	case DDI_DATA_SZ08_ACC:
7070 		ll_src = (uint64_t *)src_addr;
7071 		ll_dst = (uint64_t *)dest_addr;
7072 
7073 		for (; bytecount != 0; bytecount -= 8) {
7074 			ddi_put64(dest_handle, ll_dst,
7075 				ddi_get64(src_handle, ll_src));
7076 			ll_dst += dest_advcnt;
7077 			ll_src += src_advcnt;
7078 		}
7079 		break;
7080 	default:
7081 		return (DDI_FAILURE);
7082 	}
7083 	return (DDI_SUCCESS);
7084 }
7085 
7086 #define	swap16(value)  \
7087 	((((value) & 0xff) << 8) | ((value) >> 8))
7088 
7089 #define	swap32(value)	\
7090 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7091 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7092 
7093 #define	swap64(value)	\
7094 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7095 	    << 32) | \
7096 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7097 
7098 uint16_t
7099 ddi_swap16(uint16_t value)
7100 {
7101 	return (swap16(value));
7102 }
7103 
7104 uint32_t
7105 ddi_swap32(uint32_t value)
7106 {
7107 	return (swap32(value));
7108 }
7109 
7110 uint64_t
7111 ddi_swap64(uint64_t value)
7112 {
7113 	return (swap64(value));
7114 }
7115 
7116 /*
7117  * Convert a binding name to a driver name.
7118  * A binding name is the name used to determine the driver for a
7119  * device - it may be either an alias for the driver or the name
7120  * of the driver itself.
7121  */
7122 char *
7123 i_binding_to_drv_name(char *bname)
7124 {
7125 	major_t major_no;
7126 
7127 	ASSERT(bname != NULL);
7128 
7129 	if ((major_no = ddi_name_to_major(bname)) == -1)
7130 		return (NULL);
7131 	return (ddi_major_to_name(major_no));
7132 }
7133 
7134 /*
7135  * Search for minor name that has specified dev_t and spec_type.
7136  * If spec_type is zero then any dev_t match works.  Since we
7137  * are returning a pointer to the minor name string, we require the
7138  * caller to do the locking.
7139  */
7140 char *
7141 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7142 {
7143 	struct ddi_minor_data	*dmdp;
7144 
7145 	/*
7146 	 * The did layered driver currently intentionally returns a
7147 	 * devinfo ptr for an underlying sd instance based on a did
7148 	 * dev_t. In this case it is not an error.
7149 	 *
7150 	 * The did layered driver is associated with Sun Cluster.
7151 	 */
7152 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7153 		(strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7154 	ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7155 
7156 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7157 		if (((dmdp->type == DDM_MINOR) ||
7158 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7159 		    (dmdp->type == DDM_DEFAULT)) &&
7160 		    (dmdp->ddm_dev == dev) &&
7161 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7162 		    (dmdp->ddm_spec_type == spec_type)))
7163 			return (dmdp->ddm_name);
7164 	}
7165 
7166 	return (NULL);
7167 }
7168 
7169 /*
7170  * Find the devt and spectype of the specified minor_name.
7171  * Return DDI_FAILURE if minor_name not found. Since we are
7172  * returning everything via arguments we can do the locking.
7173  */
7174 int
7175 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7176 	dev_t *devtp, int *spectypep)
7177 {
7178 	struct ddi_minor_data	*dmdp;
7179 
7180 	/* deal with clone minor nodes */
7181 	if (dip == clone_dip) {
7182 		major_t	major;
7183 		/*
7184 		 * Make sure minor_name is a STREAMS driver.
7185 		 * We load the driver but don't attach to any instances.
7186 		 */
7187 
7188 		major = ddi_name_to_major(minor_name);
7189 		if (major == (major_t)-1)
7190 			return (DDI_FAILURE);
7191 
7192 		if (ddi_hold_driver(major) == NULL)
7193 			return (DDI_FAILURE);
7194 
7195 		if (STREAMSTAB(major) == NULL) {
7196 			ddi_rele_driver(major);
7197 			return (DDI_FAILURE);
7198 		}
7199 		ddi_rele_driver(major);
7200 
7201 		if (devtp)
7202 			*devtp = makedevice(clone_major, (minor_t)major);
7203 
7204 		if (spectypep)
7205 			*spectypep = S_IFCHR;
7206 
7207 		return (DDI_SUCCESS);
7208 	}
7209 
7210 	ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7211 	mutex_enter(&(DEVI(dip)->devi_lock));
7212 
7213 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7214 		if (((dmdp->type != DDM_MINOR) &&
7215 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7216 		    (dmdp->type != DDM_DEFAULT)) ||
7217 		    strcmp(minor_name, dmdp->ddm_name))
7218 			continue;
7219 
7220 		if (devtp)
7221 			*devtp = dmdp->ddm_dev;
7222 
7223 		if (spectypep)
7224 			*spectypep = dmdp->ddm_spec_type;
7225 
7226 		mutex_exit(&(DEVI(dip)->devi_lock));
7227 		return (DDI_SUCCESS);
7228 	}
7229 
7230 	mutex_exit(&(DEVI(dip)->devi_lock));
7231 	return (DDI_FAILURE);
7232 }
7233 
7234 extern char	hw_serial[];
7235 static kmutex_t devid_gen_mutex;
7236 static short	devid_gen_number;
7237 
7238 #ifdef DEBUG
7239 
7240 static int	devid_register_corrupt = 0;
7241 static int	devid_register_corrupt_major = 0;
7242 static int	devid_register_corrupt_hint = 0;
7243 static int	devid_register_corrupt_hint_major = 0;
7244 
7245 static int devid_lyr_debug = 0;
7246 
7247 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7248 	if (devid_lyr_debug)					\
7249 		ddi_debug_devid_devts(msg, ndevs, devs)
7250 
7251 #else
7252 
7253 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7254 
7255 #endif /* DEBUG */
7256 
7257 
7258 #ifdef	DEBUG
7259 
7260 static void
7261 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7262 {
7263 	int i;
7264 
7265 	cmn_err(CE_CONT, "%s:\n", msg);
7266 	for (i = 0; i < ndevs; i++) {
7267 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7268 	}
7269 }
7270 
7271 static void
7272 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7273 {
7274 	int i;
7275 
7276 	cmn_err(CE_CONT, "%s:\n", msg);
7277 	for (i = 0; i < npaths; i++) {
7278 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7279 	}
7280 }
7281 
7282 static void
7283 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7284 {
7285 	int i;
7286 
7287 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7288 	for (i = 0; i < ndevs; i++) {
7289 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7290 	}
7291 }
7292 
7293 #endif	/* DEBUG */
7294 
7295 /*
7296  * Register device id into DDI framework.
7297  * Must be called when device is attached.
7298  */
7299 static int
7300 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7301 {
7302 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7303 	size_t		driver_len;
7304 	const char	*driver_name;
7305 	char		*devid_str;
7306 	major_t		major;
7307 
7308 	if ((dip == NULL) ||
7309 	    ((major = ddi_driver_major(dip)) == (major_t)-1))
7310 		return (DDI_FAILURE);
7311 
7312 	/* verify that the devid is valid */
7313 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7314 		return (DDI_FAILURE);
7315 
7316 	/* Updating driver name hint in devid */
7317 	driver_name = ddi_driver_name(dip);
7318 	driver_len = strlen(driver_name);
7319 	if (driver_len > DEVID_HINT_SIZE) {
7320 		/* Pick up last four characters of driver name */
7321 		driver_name += driver_len - DEVID_HINT_SIZE;
7322 		driver_len = DEVID_HINT_SIZE;
7323 	}
7324 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7325 	bcopy(driver_name, i_devid->did_driver, driver_len);
7326 
7327 #ifdef DEBUG
7328 	/* Corrupt the devid for testing. */
7329 	if (devid_register_corrupt)
7330 		i_devid->did_id[0] += devid_register_corrupt;
7331 	if (devid_register_corrupt_major &&
7332 	    (major == devid_register_corrupt_major))
7333 		i_devid->did_id[0] += 1;
7334 	if (devid_register_corrupt_hint)
7335 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7336 	if (devid_register_corrupt_hint_major &&
7337 	    (major == devid_register_corrupt_hint_major))
7338 		i_devid->did_driver[0] += 1;
7339 #endif /* DEBUG */
7340 
7341 	/* encode the devid as a string */
7342 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7343 		return (DDI_FAILURE);
7344 
7345 	/* add string as a string property */
7346 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7347 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7348 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7349 			ddi_driver_name(dip), ddi_get_instance(dip));
7350 		ddi_devid_str_free(devid_str);
7351 		return (DDI_FAILURE);
7352 	}
7353 
7354 	ddi_devid_str_free(devid_str);
7355 
7356 #ifdef	DEVID_COMPATIBILITY
7357 	/*
7358 	 * marker for devinfo snapshot compatibility.
7359 	 * This code gets deleted when di_devid is gone from libdevid
7360 	 */
7361 	DEVI(dip)->devi_devid = DEVID_COMPATIBILITY;
7362 #endif	/* DEVID_COMPATIBILITY */
7363 	return (DDI_SUCCESS);
7364 }
7365 
7366 int
7367 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7368 {
7369 	int rval;
7370 
7371 	rval = i_ddi_devid_register(dip, devid);
7372 	if (rval == DDI_SUCCESS) {
7373 		/*
7374 		 * Register devid in devid-to-path cache
7375 		 */
7376 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7377 			mutex_enter(&DEVI(dip)->devi_lock);
7378 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7379 			mutex_exit(&DEVI(dip)->devi_lock);
7380 		} else {
7381 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7382 				ddi_driver_name(dip), ddi_get_instance(dip));
7383 		}
7384 	} else {
7385 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7386 			ddi_driver_name(dip), ddi_get_instance(dip));
7387 	}
7388 	return (rval);
7389 }
7390 
7391 /*
7392  * Remove (unregister) device id from DDI framework.
7393  * Must be called when device is detached.
7394  */
7395 static void
7396 i_ddi_devid_unregister(dev_info_t *dip)
7397 {
7398 #ifdef	DEVID_COMPATIBILITY
7399 	/*
7400 	 * marker for micro release devinfo snapshot compatibility.
7401 	 * This code gets deleted for the minor release.
7402 	 */
7403 	DEVI(dip)->devi_devid = NULL;		/* unset DEVID_PROP */
7404 #endif	/* DEVID_COMPATIBILITY */
7405 
7406 	/* remove the devid property */
7407 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7408 }
7409 
7410 void
7411 ddi_devid_unregister(dev_info_t *dip)
7412 {
7413 	mutex_enter(&DEVI(dip)->devi_lock);
7414 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7415 	mutex_exit(&DEVI(dip)->devi_lock);
7416 	e_devid_cache_unregister(dip);
7417 	i_ddi_devid_unregister(dip);
7418 }
7419 
7420 /*
7421  * Allocate and initialize a device id.
7422  */
7423 int
7424 ddi_devid_init(
7425 	dev_info_t	*dip,
7426 	ushort_t	devid_type,
7427 	ushort_t	nbytes,
7428 	void		*id,
7429 	ddi_devid_t	*ret_devid)
7430 {
7431 	impl_devid_t	*i_devid;
7432 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7433 	int		driver_len;
7434 	const char	*driver_name;
7435 
7436 	switch (devid_type) {
7437 	case DEVID_SCSI3_WWN:
7438 		/*FALLTHRU*/
7439 	case DEVID_SCSI_SERIAL:
7440 		/*FALLTHRU*/
7441 	case DEVID_ATA_SERIAL:
7442 		/*FALLTHRU*/
7443 	case DEVID_ENCAP:
7444 		if (nbytes == 0)
7445 			return (DDI_FAILURE);
7446 		if (id == NULL)
7447 			return (DDI_FAILURE);
7448 		break;
7449 	case DEVID_FAB:
7450 		if (nbytes != 0)
7451 			return (DDI_FAILURE);
7452 		if (id != NULL)
7453 			return (DDI_FAILURE);
7454 		nbytes = sizeof (int) +
7455 		    sizeof (struct timeval32) + sizeof (short);
7456 		sz += nbytes;
7457 		break;
7458 	default:
7459 		return (DDI_FAILURE);
7460 	}
7461 
7462 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7463 		return (DDI_FAILURE);
7464 
7465 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7466 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7467 	i_devid->did_rev_hi = DEVID_REV_MSB;
7468 	i_devid->did_rev_lo = DEVID_REV_LSB;
7469 	DEVID_FORMTYPE(i_devid, devid_type);
7470 	DEVID_FORMLEN(i_devid, nbytes);
7471 
7472 	/* Fill in driver name hint */
7473 	driver_name = ddi_driver_name(dip);
7474 	driver_len = strlen(driver_name);
7475 	if (driver_len > DEVID_HINT_SIZE) {
7476 		/* Pick up last four characters of driver name */
7477 		driver_name += driver_len - DEVID_HINT_SIZE;
7478 		driver_len = DEVID_HINT_SIZE;
7479 	}
7480 
7481 	bcopy(driver_name, i_devid->did_driver, driver_len);
7482 
7483 	/* Fill in id field */
7484 	if (devid_type == DEVID_FAB) {
7485 		char		*cp;
7486 		int		hostid;
7487 		char		*hostid_cp = &hw_serial[0];
7488 		struct timeval32 timestamp32;
7489 		int		i;
7490 		int		*ip;
7491 		short		gen;
7492 
7493 		/* increase the generation number */
7494 		mutex_enter(&devid_gen_mutex);
7495 		gen = devid_gen_number++;
7496 		mutex_exit(&devid_gen_mutex);
7497 
7498 		cp = i_devid->did_id;
7499 
7500 		/* Fill in host id (big-endian byte ordering) */
7501 		hostid = stoi(&hostid_cp);
7502 		*cp++ = hibyte(hiword(hostid));
7503 		*cp++ = lobyte(hiword(hostid));
7504 		*cp++ = hibyte(loword(hostid));
7505 		*cp++ = lobyte(loword(hostid));
7506 
7507 		/*
7508 		 * Fill in timestamp (big-endian byte ordering)
7509 		 *
7510 		 * (Note that the format may have to be changed
7511 		 * before 2038 comes around, though it's arguably
7512 		 * unique enough as it is..)
7513 		 */
7514 		uniqtime32(&timestamp32);
7515 		ip = (int *)&timestamp32;
7516 		for (i = 0;
7517 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7518 			int	val;
7519 			val = *ip;
7520 			*cp++ = hibyte(hiword(val));
7521 			*cp++ = lobyte(hiword(val));
7522 			*cp++ = hibyte(loword(val));
7523 			*cp++ = lobyte(loword(val));
7524 		}
7525 
7526 		/* fill in the generation number */
7527 		*cp++ = hibyte(gen);
7528 		*cp++ = lobyte(gen);
7529 	} else
7530 		bcopy(id, i_devid->did_id, nbytes);
7531 
7532 	/* return device id */
7533 	*ret_devid = (ddi_devid_t)i_devid;
7534 	return (DDI_SUCCESS);
7535 }
7536 
7537 int
7538 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7539 {
7540 	char		*devidstr;
7541 
7542 	ASSERT(dev != DDI_DEV_T_NONE);
7543 
7544 	/* look up the property, devt specific first */
7545 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7546 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7547 		if ((dev == DDI_DEV_T_ANY) ||
7548 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7549 			DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7550 			DDI_PROP_SUCCESS)) {
7551 				return (DDI_FAILURE);
7552 		}
7553 	}
7554 
7555 	/* convert to binary form */
7556 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7557 		ddi_prop_free(devidstr);
7558 		return (DDI_FAILURE);
7559 	}
7560 	ddi_prop_free(devidstr);
7561 	return (DDI_SUCCESS);
7562 }
7563 
7564 /*
7565  * Return a copy of the device id for dev_t
7566  */
7567 int
7568 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7569 {
7570 	dev_info_t	*dip;
7571 	int		rval;
7572 
7573 	/* get the dip */
7574 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7575 		return (DDI_FAILURE);
7576 
7577 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7578 
7579 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7580 	return (rval);
7581 }
7582 
7583 /*
7584  * Return a copy of the minor name for dev_t and spec_type
7585  */
7586 int
7587 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7588 {
7589 	dev_info_t	*dip;
7590 	char		*nm;
7591 	size_t		alloc_sz, sz;
7592 
7593 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7594 		return (DDI_FAILURE);
7595 
7596 	mutex_enter(&(DEVI(dip)->devi_lock));
7597 
7598 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7599 	    dev, spec_type)) == NULL) {
7600 		mutex_exit(&(DEVI(dip)->devi_lock));
7601 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7602 		return (DDI_FAILURE);
7603 	}
7604 
7605 	/* make a copy */
7606 	alloc_sz = strlen(nm) + 1;
7607 retry:
7608 	/* drop lock to allocate memory */
7609 	mutex_exit(&(DEVI(dip)->devi_lock));
7610 	*minor_name = kmem_alloc(alloc_sz, KM_SLEEP);
7611 	mutex_enter(&(DEVI(dip)->devi_lock));
7612 
7613 	/* re-check things, since we dropped the lock */
7614 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7615 	    dev, spec_type)) == NULL) {
7616 		mutex_exit(&(DEVI(dip)->devi_lock));
7617 		kmem_free(*minor_name, alloc_sz);
7618 		*minor_name = NULL;
7619 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7620 		return (DDI_FAILURE);
7621 	}
7622 
7623 	/* verify size is the same */
7624 	sz = strlen(nm) + 1;
7625 	if (alloc_sz != sz) {
7626 		kmem_free(*minor_name, alloc_sz);
7627 		alloc_sz = sz;
7628 		goto retry;
7629 	}
7630 
7631 	/* sz == alloc_sz - make a copy */
7632 	(void) strcpy(*minor_name, nm);
7633 
7634 	mutex_exit(&(DEVI(dip)->devi_lock));
7635 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7636 	return (DDI_SUCCESS);
7637 }
7638 
7639 int
7640 ddi_lyr_devid_to_devlist(
7641 	ddi_devid_t	devid,
7642 	char		*minor_name,
7643 	int		*retndevs,
7644 	dev_t		**retdevs)
7645 {
7646 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7647 
7648 	if (e_devid_cache_to_devt_list(devid, minor_name,
7649 	    retndevs, retdevs) == DDI_SUCCESS) {
7650 		ASSERT(*retndevs > 0);
7651 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7652 			*retndevs, *retdevs);
7653 		return (DDI_SUCCESS);
7654 	}
7655 
7656 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7657 		return (DDI_FAILURE);
7658 	}
7659 
7660 	if (e_devid_cache_to_devt_list(devid, minor_name,
7661 	    retndevs, retdevs) == DDI_SUCCESS) {
7662 		ASSERT(*retndevs > 0);
7663 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7664 			*retndevs, *retdevs);
7665 		return (DDI_SUCCESS);
7666 	}
7667 
7668 	return (DDI_FAILURE);
7669 }
7670 
7671 void
7672 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7673 {
7674 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7675 }
7676 
7677 /*
7678  * Note: This will need to be fixed if we ever allow processes to
7679  * have more than one data model per exec.
7680  */
7681 model_t
7682 ddi_mmap_get_model(void)
7683 {
7684 	return (get_udatamodel());
7685 }
7686 
7687 model_t
7688 ddi_model_convert_from(model_t model)
7689 {
7690 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
7691 }
7692 
7693 /*
7694  * ddi interfaces managing storage and retrieval of eventcookies.
7695  */
7696 
7697 /*
7698  * Invoke bus nexus driver's implementation of the
7699  * (*bus_remove_eventcall)() interface to remove a registered
7700  * callback handler for "event".
7701  */
7702 int
7703 ddi_remove_event_handler(ddi_callback_id_t id)
7704 {
7705 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
7706 	dev_info_t *ddip;
7707 
7708 	ASSERT(cb);
7709 	if (!cb) {
7710 		return (DDI_FAILURE);
7711 	}
7712 
7713 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
7714 	return (ndi_busop_remove_eventcall(ddip, id));
7715 }
7716 
7717 /*
7718  * Invoke bus nexus driver's implementation of the
7719  * (*bus_add_eventcall)() interface to register a callback handler
7720  * for "event".
7721  */
7722 int
7723 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
7724     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
7725     void *arg, ddi_callback_id_t *id)
7726 {
7727 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
7728 }
7729 
7730 
7731 /*
7732  * Return a handle for event "name" by calling up the device tree
7733  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
7734  * by a bus nexus or top of dev_info tree is reached.
7735  */
7736 int
7737 ddi_get_eventcookie(dev_info_t *dip, char *name,
7738     ddi_eventcookie_t *event_cookiep)
7739 {
7740 	return (ndi_busop_get_eventcookie(dip, dip,
7741 	    name, event_cookiep));
7742 }
7743 
7744 /*
7745  * single thread access to dev_info node and set state
7746  */
7747 void
7748 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock)
7749 {
7750 	if (!has_lock)
7751 		mutex_enter(&(DEVI(dip)->devi_lock));
7752 
7753 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7754 
7755 	/*
7756 	 * wait until state(s) have been changed
7757 	 */
7758 	while ((DEVI(dip)->devi_state & w_mask) != 0) {
7759 		cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock));
7760 	}
7761 	DEVI(dip)->devi_state |= s_mask;
7762 
7763 	if (!has_lock)
7764 		mutex_exit(&(DEVI(dip)->devi_lock));
7765 }
7766 
7767 void
7768 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock)
7769 {
7770 	if (!has_lock)
7771 		mutex_enter(&(DEVI(dip)->devi_lock));
7772 
7773 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7774 
7775 	/*
7776 	 * clear the state(s) and wakeup any threads waiting
7777 	 * for state change
7778 	 */
7779 	DEVI(dip)->devi_state &= ~c_mask;
7780 	cv_broadcast(&(DEVI(dip)->devi_cv));
7781 
7782 	if (!has_lock)
7783 		mutex_exit(&(DEVI(dip)->devi_lock));
7784 }
7785 
7786 /*
7787  * This procedure is provided as the general callback function when
7788  * umem_lockmemory calls as_add_callback for long term memory locking.
7789  * When as_unmap, as_setprot, or as_free encounter segments which have
7790  * locked memory, this callback will be invoked.
7791  */
7792 void
7793 umem_lock_undo(struct as *as, void *arg, uint_t event)
7794 {
7795 	_NOTE(ARGUNUSED(as, event))
7796 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
7797 
7798 	/*
7799 	 * Call the cleanup function.  Decrement the cookie reference
7800 	 * count, if it goes to zero, return the memory for the cookie.
7801 	 * The i_ddi_umem_unlock for this cookie may or may not have been
7802 	 * called already.  It is the responsibility of the caller of
7803 	 * umem_lockmemory to handle the case of the cleanup routine
7804 	 * being called after a ddi_umem_unlock for the cookie
7805 	 * was called.
7806 	 */
7807 
7808 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
7809 
7810 	/* remove the cookie if reference goes to zero */
7811 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
7812 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
7813 	}
7814 }
7815 
7816 /*
7817  * The following two Consolidation Private routines provide generic
7818  * interfaces to increase/decrease the amount of device-locked memory.
7819  *
7820  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
7821  * must be called every time i_ddi_incr_locked_memory() is called.
7822  */
7823 int
7824 /* ARGSUSED */
7825 i_ddi_incr_locked_memory(proc_t *procp, task_t *taskp,
7826     kproject_t *projectp, zone_t *zonep, rctl_qty_t inc)
7827 {
7828 	kproject_t *projp;
7829 
7830 	ASSERT(procp);
7831 	ASSERT(mutex_owned(&procp->p_lock));
7832 
7833 	projp = procp->p_task->tk_proj;
7834 	mutex_enter(&umem_devlockmem_rctl_lock);
7835 	/*
7836 	 * Test if the requested memory can be locked without exceeding the
7837 	 * limits.
7838 	 */
7839 	if (rctl_test(rc_project_devlockmem, projp->kpj_rctls,
7840 	    procp, inc, RCA_SAFE) & RCT_DENY) {
7841 		mutex_exit(&umem_devlockmem_rctl_lock);
7842 		return (ENOMEM);
7843 	}
7844 	projp->kpj_data.kpd_devlockmem += inc;
7845 	mutex_exit(&umem_devlockmem_rctl_lock);
7846 	/*
7847 	 * Grab a hold on the project.
7848 	 */
7849 	(void) project_hold(projp);
7850 
7851 	return (0);
7852 }
7853 
7854 /*
7855  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
7856  * must be called every time i_ddi_decr_locked_memory() is called.
7857  */
7858 /* ARGSUSED */
7859 void
7860 i_ddi_decr_locked_memory(proc_t *procp, task_t *taskp,
7861     kproject_t *projectp, zone_t *zonep, rctl_qty_t dec)
7862 {
7863 	ASSERT(projectp);
7864 
7865 	mutex_enter(&umem_devlockmem_rctl_lock);
7866 	projectp->kpj_data.kpd_devlockmem -= dec;
7867 	mutex_exit(&umem_devlockmem_rctl_lock);
7868 
7869 	/*
7870 	 * Release the project pointer reference accquired in
7871 	 * i_ddi_incr_locked_memory().
7872 	 */
7873 	(void) project_rele(projectp);
7874 }
7875 
7876 /*
7877  * This routine checks if the max-device-locked-memory resource ctl is
7878  * exceeded, if not increments it, grabs a hold on the project.
7879  * Returns 0 if successful otherwise returns error code
7880  */
7881 static int
7882 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
7883 {
7884 	proc_t		*procp;
7885 	int		ret;
7886 
7887 	ASSERT(cookie);
7888 	procp = cookie->procp;
7889 	ASSERT(procp);
7890 
7891 	mutex_enter(&procp->p_lock);
7892 
7893 	if ((ret = i_ddi_incr_locked_memory(procp, NULL,
7894 		NULL, NULL, cookie->size)) != 0) {
7895 		mutex_exit(&procp->p_lock);
7896 		return (ret);
7897 	}
7898 
7899 	/*
7900 	 * save the project pointer in the
7901 	 * umem cookie, project pointer already
7902 	 * hold in i_ddi_incr_locked_memory
7903 	 */
7904 	cookie->lockmem_proj = (void *)procp->p_task->tk_proj;
7905 	mutex_exit(&procp->p_lock);
7906 
7907 	return (0);
7908 }
7909 
7910 /*
7911  * Decrements the max-device-locked-memory resource ctl and releases
7912  * the hold on the project that was acquired during umem_incr_devlockmem
7913  */
7914 static void
7915 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
7916 {
7917 	kproject_t	*projp;
7918 
7919 	if (!cookie->lockmem_proj)
7920 		return;
7921 
7922 	projp = (kproject_t *)cookie->lockmem_proj;
7923 	i_ddi_decr_locked_memory(NULL, NULL, projp, NULL, cookie->size);
7924 
7925 	cookie->lockmem_proj = NULL;
7926 }
7927 
7928 /*
7929  * A consolidation private function which is essentially equivalent to
7930  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
7931  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
7932  * the ops_vector is valid.
7933  *
7934  * Lock the virtual address range in the current process and create a
7935  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
7936  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
7937  * to user space.
7938  *
7939  * Note: The resource control accounting currently uses a full charge model
7940  * in other words attempts to lock the same/overlapping areas of memory
7941  * will deduct the full size of the buffer from the projects running
7942  * counter for the device locked memory.
7943  *
7944  * addr, size should be PAGESIZE aligned
7945  *
7946  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
7947  *	identifies whether the locked memory will be read or written or both
7948  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
7949  * be maintained for an indefinitely long period (essentially permanent),
7950  * rather than for what would be required for a typical I/O completion.
7951  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
7952  * if the memory pertains to a regular file which is mapped MAP_SHARED.
7953  * This is to prevent a deadlock if a file truncation is attempted after
7954  * after the locking is done.
7955  *
7956  * Returns 0 on success
7957  *	EINVAL - for invalid parameters
7958  *	EPERM, ENOMEM and other error codes returned by as_pagelock
7959  *	ENOMEM - is returned if the current request to lock memory exceeds
7960  *		project.max-device-locked-memory resource control value.
7961  *      EFAULT - memory pertains to a regular file mapped shared and
7962  *		and DDI_UMEMLOCK_LONGTERM flag is set
7963  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
7964  */
7965 int
7966 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
7967 		struct umem_callback_ops *ops_vector,
7968 		proc_t *procp)
7969 {
7970 	int	error;
7971 	struct ddi_umem_cookie *p;
7972 	void	(*driver_callback)() = NULL;
7973 	struct as *as = procp->p_as;
7974 	struct seg		*seg;
7975 	vnode_t			*vp;
7976 
7977 	*cookie = NULL;		/* in case of any error return */
7978 
7979 	/* These are the only three valid flags */
7980 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
7981 	    DDI_UMEMLOCK_LONGTERM)) != 0)
7982 		return (EINVAL);
7983 
7984 	/* At least one (can be both) of the two access flags must be set */
7985 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
7986 		return (EINVAL);
7987 
7988 	/* addr and len must be page-aligned */
7989 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
7990 		return (EINVAL);
7991 
7992 	if ((len & PAGEOFFSET) != 0)
7993 		return (EINVAL);
7994 
7995 	/*
7996 	 * For longterm locking a driver callback must be specified; if
7997 	 * not longterm then a callback is optional.
7998 	 */
7999 	if (ops_vector != NULL) {
8000 		if (ops_vector->cbo_umem_callback_version !=
8001 		    UMEM_CALLBACK_VERSION)
8002 			return (EINVAL);
8003 		else
8004 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8005 	}
8006 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8007 		return (EINVAL);
8008 
8009 	/*
8010 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8011 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8012 	 */
8013 	if (ddi_umem_unlock_thread == NULL)
8014 		i_ddi_umem_unlock_thread_start();
8015 
8016 	/* Allocate memory for the cookie */
8017 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8018 
8019 	/* Convert the flags to seg_rw type */
8020 	if (flags & DDI_UMEMLOCK_WRITE) {
8021 		p->s_flags = S_WRITE;
8022 	} else {
8023 		p->s_flags = S_READ;
8024 	}
8025 
8026 	/* Store procp in cookie for later iosetup/unlock */
8027 	p->procp = (void *)procp;
8028 
8029 	/*
8030 	 * Store the struct as pointer in cookie for later use by
8031 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8032 	 * is called after relvm is called.
8033 	 */
8034 	p->asp = as;
8035 
8036 	/*
8037 	 * The size field is needed for lockmem accounting.
8038 	 */
8039 	p->size = len;
8040 
8041 	if (umem_incr_devlockmem(p) != 0) {
8042 		/*
8043 		 * The requested memory cannot be locked
8044 		 */
8045 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8046 		*cookie = (ddi_umem_cookie_t)NULL;
8047 		return (ENOMEM);
8048 	}
8049 	/*
8050 	 * umem_incr_devlockmem stashes the project ptr into the
8051 	 * cookie. This is needed during unlock since that can
8052 	 * happen in a non-USER context
8053 	 */
8054 	ASSERT(p->lockmem_proj);
8055 
8056 	/* Lock the pages corresponding to addr, len in memory */
8057 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8058 	if (error != 0) {
8059 		umem_decr_devlockmem(p);
8060 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8061 		*cookie = (ddi_umem_cookie_t)NULL;
8062 		return (error);
8063 	}
8064 
8065 	/*
8066 	 * For longterm locking the addr must pertain to a seg_vn segment or
8067 	 * or a seg_spt segment.
8068 	 * If the segment pertains to a regular file, it cannot be
8069 	 * mapped MAP_SHARED.
8070 	 * This is to prevent a deadlock if a file truncation is attempted
8071 	 * after the locking is done.
8072 	 * Doing this after as_pagelock guarantees persistence of the as; if
8073 	 * an unacceptable segment is found, the cleanup includes calling
8074 	 * as_pageunlock before returning EFAULT.
8075 	 */
8076 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8077 		extern  struct seg_ops segspt_shmops;
8078 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8079 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8080 			if (seg == NULL || seg->s_base > addr + len)
8081 				break;
8082 			if (((seg->s_ops != &segvn_ops) &&
8083 			    (seg->s_ops != &segspt_shmops)) ||
8084 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8085 			    vp != NULL && vp->v_type == VREG) &&
8086 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8087 				as_pageunlock(as, p->pparray,
8088 						addr, len, p->s_flags);
8089 				AS_LOCK_EXIT(as, &as->a_lock);
8090 				umem_decr_devlockmem(p);
8091 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8092 				*cookie = (ddi_umem_cookie_t)NULL;
8093 				return (EFAULT);
8094 			}
8095 		}
8096 		AS_LOCK_EXIT(as, &as->a_lock);
8097 	}
8098 
8099 
8100 	/* Initialize the fields in the ddi_umem_cookie */
8101 	p->cvaddr = addr;
8102 	p->type = UMEM_LOCKED;
8103 	if (driver_callback != NULL) {
8104 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8105 		p->cook_refcnt = 2;
8106 		p->callbacks = *ops_vector;
8107 	} else {
8108 		/* only i_ddi_umme_unlock needs the cookie */
8109 		p->cook_refcnt = 1;
8110 	}
8111 
8112 	*cookie = (ddi_umem_cookie_t)p;
8113 
8114 	/*
8115 	 * If a driver callback was specified, add an entry to the
8116 	 * as struct callback list. The as_pagelock above guarantees
8117 	 * the persistence of as.
8118 	 */
8119 	if (driver_callback) {
8120 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8121 						addr, len, KM_SLEEP);
8122 		if (error != 0) {
8123 			as_pageunlock(as, p->pparray,
8124 					addr, len, p->s_flags);
8125 			umem_decr_devlockmem(p);
8126 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8127 			*cookie = (ddi_umem_cookie_t)NULL;
8128 		}
8129 	}
8130 	return (error);
8131 }
8132 
8133 /*
8134  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8135  * the cookie.  Called from i_ddi_umem_unlock_thread.
8136  */
8137 
8138 static void
8139 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8140 {
8141 	uint_t	rc;
8142 
8143 	/*
8144 	 * There is no way to determine whether a callback to
8145 	 * umem_lock_undo was registered via as_add_callback.
8146 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8147 	 * a valid callback function structure.)  as_delete_callback
8148 	 * is called to delete a possible registered callback.  If the
8149 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8150 	 * indicates that there was a callback registered, and that is was
8151 	 * successfully deleted.  Thus, the cookie reference count
8152 	 * will never be decremented by umem_lock_undo.  Just return the
8153 	 * memory for the cookie, since both users of the cookie are done.
8154 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8155 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8156 	 * indicates that callback processing is taking place and, and
8157 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8158 	 * the cookie reference count when it is complete.
8159 	 *
8160 	 * This needs to be done before as_pageunlock so that the
8161 	 * persistence of as is guaranteed because of the locked pages.
8162 	 *
8163 	 */
8164 	rc = as_delete_callback(p->asp, p);
8165 
8166 
8167 	/*
8168 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8169 	 * after relvm is called so use p->asp.
8170 	 */
8171 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8172 
8173 	/*
8174 	 * Now that we have unlocked the memory decrement the
8175 	 * max-device-locked-memory rctl
8176 	 */
8177 	umem_decr_devlockmem(p);
8178 
8179 	if (rc == AS_CALLBACK_DELETED) {
8180 		/* umem_lock_undo will not happen, return the cookie memory */
8181 		ASSERT(p->cook_refcnt == 2);
8182 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8183 	} else {
8184 		/*
8185 		 * umem_undo_lock may happen if as_delete_callback returned
8186 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8187 		 * reference count, atomically, and return the cookie
8188 		 * memory if the reference count goes to zero.  The only
8189 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8190 		 * case, just return the cookie memory.
8191 		 */
8192 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8193 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8194 		    == 0)) {
8195 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8196 		}
8197 	}
8198 }
8199 
8200 /*
8201  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8202  *
8203  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8204  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8205  * via calls to ddi_umem_unlock.
8206  */
8207 
8208 static void
8209 i_ddi_umem_unlock_thread(void)
8210 {
8211 	struct ddi_umem_cookie	*ret_cookie;
8212 	callb_cpr_t	cprinfo;
8213 
8214 	/* process the ddi_umem_unlock list */
8215 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8216 	    callb_generic_cpr, "unlock_thread");
8217 	for (;;) {
8218 		mutex_enter(&ddi_umem_unlock_mutex);
8219 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8220 			ret_cookie = ddi_umem_unlock_head;
8221 			/* take if off the list */
8222 			if ((ddi_umem_unlock_head =
8223 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8224 				ddi_umem_unlock_tail = NULL;
8225 			}
8226 			mutex_exit(&ddi_umem_unlock_mutex);
8227 			/* unlock the pages in this cookie */
8228 			(void) i_ddi_umem_unlock(ret_cookie);
8229 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8230 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8231 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8232 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8233 			mutex_exit(&ddi_umem_unlock_mutex);
8234 		}
8235 	}
8236 	/* ddi_umem_unlock_thread does not exit */
8237 	/* NOTREACHED */
8238 }
8239 
8240 /*
8241  * Start the thread that will process the ddi_umem_unlock list if it is
8242  * not already started (i_ddi_umem_unlock_thread).
8243  */
8244 static void
8245 i_ddi_umem_unlock_thread_start(void)
8246 {
8247 	mutex_enter(&ddi_umem_unlock_mutex);
8248 	if (ddi_umem_unlock_thread == NULL) {
8249 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8250 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8251 		    TS_RUN, minclsyspri);
8252 	}
8253 	mutex_exit(&ddi_umem_unlock_mutex);
8254 }
8255 
8256 /*
8257  * Lock the virtual address range in the current process and create a
8258  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8259  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8260  * to user space.
8261  *
8262  * Note: The resource control accounting currently uses a full charge model
8263  * in other words attempts to lock the same/overlapping areas of memory
8264  * will deduct the full size of the buffer from the projects running
8265  * counter for the device locked memory. This applies to umem_lockmemory too.
8266  *
8267  * addr, size should be PAGESIZE aligned
8268  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8269  *	identifies whether the locked memory will be read or written or both
8270  *
8271  * Returns 0 on success
8272  *	EINVAL - for invalid parameters
8273  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8274  *	ENOMEM - is returned if the current request to lock memory exceeds
8275  *		project.max-device-locked-memory resource control value.
8276  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8277  */
8278 int
8279 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8280 {
8281 	int	error;
8282 	struct ddi_umem_cookie *p;
8283 
8284 	*cookie = NULL;		/* in case of any error return */
8285 
8286 	/* These are the only two valid flags */
8287 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8288 		return (EINVAL);
8289 	}
8290 
8291 	/* At least one of the two flags (or both) must be set */
8292 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8293 		return (EINVAL);
8294 	}
8295 
8296 	/* addr and len must be page-aligned */
8297 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8298 		return (EINVAL);
8299 	}
8300 
8301 	if ((len & PAGEOFFSET) != 0) {
8302 		return (EINVAL);
8303 	}
8304 
8305 	/*
8306 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8307 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8308 	 */
8309 	if (ddi_umem_unlock_thread == NULL)
8310 		i_ddi_umem_unlock_thread_start();
8311 
8312 	/* Allocate memory for the cookie */
8313 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8314 
8315 	/* Convert the flags to seg_rw type */
8316 	if (flags & DDI_UMEMLOCK_WRITE) {
8317 		p->s_flags = S_WRITE;
8318 	} else {
8319 		p->s_flags = S_READ;
8320 	}
8321 
8322 	/* Store curproc in cookie for later iosetup/unlock */
8323 	p->procp = (void *)curproc;
8324 
8325 	/*
8326 	 * Store the struct as pointer in cookie for later use by
8327 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8328 	 * is called after relvm is called.
8329 	 */
8330 	p->asp = curproc->p_as;
8331 	/*
8332 	 * The size field is needed for lockmem accounting.
8333 	 */
8334 	p->size = len;
8335 
8336 	if (umem_incr_devlockmem(p) != 0) {
8337 		/*
8338 		 * The requested memory cannot be locked
8339 		 */
8340 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8341 		*cookie = (ddi_umem_cookie_t)NULL;
8342 		return (ENOMEM);
8343 	}
8344 	/*
8345 	 * umem_incr_devlockmem stashes the project ptr into the
8346 	 * cookie. This is needed during unlock since that can
8347 	 * happen in a non-USER context
8348 	 */
8349 	ASSERT(p->lockmem_proj);
8350 
8351 	/* Lock the pages corresponding to addr, len in memory */
8352 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8353 	    addr, len, p->s_flags);
8354 	if (error != 0) {
8355 		umem_decr_devlockmem(p);
8356 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8357 		*cookie = (ddi_umem_cookie_t)NULL;
8358 		return (error);
8359 	}
8360 
8361 	/* Initialize the fields in the ddi_umem_cookie */
8362 	p->cvaddr = addr;
8363 	p->type = UMEM_LOCKED;
8364 	p->cook_refcnt = 1;
8365 
8366 	*cookie = (ddi_umem_cookie_t)p;
8367 	return (error);
8368 }
8369 
8370 /*
8371  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8372  * unlocked by i_ddi_umem_unlock_thread.
8373  */
8374 
8375 void
8376 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8377 {
8378 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8379 
8380 	ASSERT(p->type == UMEM_LOCKED);
8381 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8382 	ASSERT(ddi_umem_unlock_thread != NULL);
8383 
8384 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8385 	/*
8386 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8387 	 * if it's called in the interrupt context. Otherwise, unlock pages
8388 	 * immediately.
8389 	 */
8390 	if (servicing_interrupt()) {
8391 		/* queue the unlock request and notify the thread */
8392 		mutex_enter(&ddi_umem_unlock_mutex);
8393 		if (ddi_umem_unlock_head == NULL) {
8394 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8395 			cv_broadcast(&ddi_umem_unlock_cv);
8396 		} else {
8397 			ddi_umem_unlock_tail->unl_forw = p;
8398 			ddi_umem_unlock_tail = p;
8399 		}
8400 		mutex_exit(&ddi_umem_unlock_mutex);
8401 	} else {
8402 		/* unlock the pages right away */
8403 		(void) i_ddi_umem_unlock(p);
8404 	}
8405 }
8406 
8407 /*
8408  * Create a buf structure from a ddi_umem_cookie
8409  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8410  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8411  * off, len - identifies the portion of the memory represented by the cookie
8412  *		that the buf points to.
8413  *	NOTE: off, len need to follow the alignment/size restrictions of the
8414  *		device (dev) that this buf will be passed to. Some devices
8415  *		will accept unrestricted alignment/size, whereas others (such as
8416  *		st) require some block-size alignment/size. It is the caller's
8417  *		responsibility to ensure that the alignment/size restrictions
8418  *		are met (we cannot assert as we do not know the restrictions)
8419  *
8420  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8421  *		the flags used in ddi_umem_lock
8422  *
8423  * The following three arguments are used to initialize fields in the
8424  * buf structure and are uninterpreted by this routine.
8425  *
8426  * dev
8427  * blkno
8428  * iodone
8429  *
8430  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8431  *
8432  * Returns a buf structure pointer on success (to be freed by freerbuf)
8433  *	NULL on any parameter error or memory alloc failure
8434  *
8435  */
8436 struct buf *
8437 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8438 	int direction, dev_t dev, daddr_t blkno,
8439 	int (*iodone)(struct buf *), int sleepflag)
8440 {
8441 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8442 	struct buf *bp;
8443 
8444 	/*
8445 	 * check for valid cookie offset, len
8446 	 */
8447 	if ((off + len) > p->size) {
8448 		return (NULL);
8449 	}
8450 
8451 	if (len > p->size) {
8452 		return (NULL);
8453 	}
8454 
8455 	/* direction has to be one of B_READ or B_WRITE */
8456 	if ((direction != B_READ) && (direction != B_WRITE)) {
8457 		return (NULL);
8458 	}
8459 
8460 	/* These are the only two valid sleepflags */
8461 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8462 		return (NULL);
8463 	}
8464 
8465 	/*
8466 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8467 	 */
8468 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8469 		return (NULL);
8470 	}
8471 
8472 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8473 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8474 		(p->procp == NULL) : (p->procp != NULL));
8475 
8476 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8477 	if (bp == NULL) {
8478 		return (NULL);
8479 	}
8480 	bioinit(bp);
8481 
8482 	bp->b_flags = B_BUSY | B_PHYS | direction;
8483 	bp->b_edev = dev;
8484 	bp->b_lblkno = blkno;
8485 	bp->b_iodone = iodone;
8486 	bp->b_bcount = len;
8487 	bp->b_proc = (proc_t *)p->procp;
8488 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8489 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8490 	if (p->pparray != NULL) {
8491 		bp->b_flags |= B_SHADOW;
8492 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8493 		bp->b_shadow = p->pparray + btop(off);
8494 	}
8495 	return (bp);
8496 }
8497 
8498 /*
8499  * Fault-handling and related routines
8500  */
8501 
8502 ddi_devstate_t
8503 ddi_get_devstate(dev_info_t *dip)
8504 {
8505 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8506 		return (DDI_DEVSTATE_OFFLINE);
8507 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8508 		return (DDI_DEVSTATE_DOWN);
8509 	else if (DEVI_IS_BUS_QUIESCED(dip))
8510 		return (DDI_DEVSTATE_QUIESCED);
8511 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8512 		return (DDI_DEVSTATE_DEGRADED);
8513 	else
8514 		return (DDI_DEVSTATE_UP);
8515 }
8516 
8517 void
8518 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8519 	ddi_fault_location_t location, const char *message)
8520 {
8521 	struct ddi_fault_event_data fd;
8522 	ddi_eventcookie_t ec;
8523 
8524 	/*
8525 	 * Assemble all the information into a fault-event-data structure
8526 	 */
8527 	fd.f_dip = dip;
8528 	fd.f_impact = impact;
8529 	fd.f_location = location;
8530 	fd.f_message = message;
8531 	fd.f_oldstate = ddi_get_devstate(dip);
8532 
8533 	/*
8534 	 * Get eventcookie from defining parent.
8535 	 */
8536 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8537 	    DDI_SUCCESS)
8538 		return;
8539 
8540 	(void) ndi_post_event(dip, dip, ec, &fd);
8541 }
8542 
8543 char *
8544 i_ddi_devi_class(dev_info_t *dip)
8545 {
8546 	return (DEVI(dip)->devi_device_class);
8547 }
8548 
8549 int
8550 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8551 {
8552 	struct dev_info *devi = DEVI(dip);
8553 
8554 	mutex_enter(&devi->devi_lock);
8555 
8556 	if (devi->devi_device_class)
8557 		kmem_free(devi->devi_device_class,
8558 		    strlen(devi->devi_device_class) + 1);
8559 
8560 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8561 	    != NULL) {
8562 		mutex_exit(&devi->devi_lock);
8563 		return (DDI_SUCCESS);
8564 	}
8565 
8566 	mutex_exit(&devi->devi_lock);
8567 
8568 	return (DDI_FAILURE);
8569 }
8570 
8571 
8572 /*
8573  * Task Queues DDI interfaces.
8574  */
8575 
8576 /* ARGSUSED */
8577 ddi_taskq_t *
8578 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8579     pri_t pri, uint_t cflags)
8580 {
8581 	char full_name[TASKQ_NAMELEN];
8582 	const char *tq_name;
8583 	int nodeid = 0;
8584 
8585 	if (dip == NULL)
8586 		tq_name = name;
8587 	else {
8588 		nodeid = ddi_get_instance(dip);
8589 
8590 		if (name == NULL)
8591 			name = "tq";
8592 
8593 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8594 		    ddi_driver_name(dip), name);
8595 
8596 		tq_name = full_name;
8597 	}
8598 
8599 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8600 		    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8601 		    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8602 }
8603 
8604 void
8605 ddi_taskq_destroy(ddi_taskq_t *tq)
8606 {
8607 	taskq_destroy((taskq_t *)tq);
8608 }
8609 
8610 int
8611 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8612     void *arg, uint_t dflags)
8613 {
8614 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8615 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8616 
8617 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8618 }
8619 
8620 void
8621 ddi_taskq_wait(ddi_taskq_t *tq)
8622 {
8623 	taskq_wait((taskq_t *)tq);
8624 }
8625 
8626 void
8627 ddi_taskq_suspend(ddi_taskq_t *tq)
8628 {
8629 	taskq_suspend((taskq_t *)tq);
8630 }
8631 
8632 boolean_t
8633 ddi_taskq_suspended(ddi_taskq_t *tq)
8634 {
8635 	return (taskq_suspended((taskq_t *)tq));
8636 }
8637 
8638 void
8639 ddi_taskq_resume(ddi_taskq_t *tq)
8640 {
8641 	taskq_resume((taskq_t *)tq);
8642 }
8643 
8644 int
8645 ddi_parse(
8646 	const char	*ifname,
8647 	char		*alnum,
8648 	uint_t		*nump)
8649 {
8650 	const char	*p;
8651 	int		l;
8652 	ulong_t		num;
8653 	boolean_t	nonum = B_TRUE;
8654 	char		c;
8655 
8656 	l = strlen(ifname);
8657 	for (p = ifname + l; p != ifname; l--) {
8658 		c = *--p;
8659 		if (!isdigit(c)) {
8660 			(void) strlcpy(alnum, ifname, l + 1);
8661 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8662 				return (DDI_FAILURE);
8663 			break;
8664 		}
8665 		nonum = B_FALSE;
8666 	}
8667 	if (l == 0 || nonum)
8668 		return (DDI_FAILURE);
8669 
8670 	*nump = num;
8671 	return (DDI_SUCCESS);
8672 }
8673