xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 62d1ab6e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_devlockmem;
89 
90 #ifdef DEBUG
91 static int sunddi_debug = 0;
92 #endif /* DEBUG */
93 
94 /* ddi_umem_unlock miscellaneous */
95 
96 static	void	i_ddi_umem_unlock_thread_start(void);
97 
98 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
99 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
100 static	kthread_t	*ddi_umem_unlock_thread;
101 /*
102  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
103  */
104 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
106 
107 /*
108  * This lock protects the project.max-device-locked-memory counter.
109  * When both p_lock (proc_t) and this lock need to acquired, p_lock
110  * should be acquired first.
111  */
112 static kmutex_t umem_devlockmem_rctl_lock;
113 
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 #ifdef _LP64
409 int
410 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
411 #else /* _ILP32 */
412 int
413 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
414 #endif
415 {
416 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
417 	    val_p));
418 }
419 
420 #ifdef _LP64
421 int
422 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
423 #else /* _ILP32 */
424 int
425 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
426 #endif
427 {
428 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
429 	    val_p));
430 }
431 
432 #ifdef _LP64
433 int
434 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
435 #else /* _ILP32 */
436 int
437 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
438 #endif
439 {
440 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
441 	    val_p));
442 }
443 
444 #ifdef _LP64
445 int
446 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
447 #else /* _ILP32 */
448 int
449 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
450 #endif
451 {
452 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
453 	    val_p));
454 }
455 
456 #ifdef _LP64
457 int
458 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
459 #else /* _ILP32 */
460 int
461 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
462 #endif
463 {
464 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
465 }
466 
467 #ifdef _LP64
468 int
469 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
470 #else /* _ILP32 */
471 int
472 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
473 #endif
474 {
475 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
476 }
477 
478 #ifdef _LP64
479 int
480 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
481 #else /* _ILP32 */
482 int
483 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
484 #endif
485 {
486 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
487 }
488 
489 #ifdef _LP64
490 int
491 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
492 #else /* _ILP32 */
493 int
494 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
495 #endif
496 {
497 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
498 }
499 
500 /*
501  * ddi_peekpokeio() is used primarily by the mem drivers for moving
502  * data to and from uio structures via peek and poke.  Note that we
503  * use "internal" routines ddi_peek and ddi_poke to make this go
504  * slightly faster, avoiding the call overhead ..
505  */
506 int
507 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
508     caddr_t addr, size_t len, uint_t xfersize)
509 {
510 	int64_t	ibuffer;
511 	int8_t w8;
512 	size_t sz;
513 	int o;
514 
515 	if (xfersize > sizeof (long))
516 		xfersize = sizeof (long);
517 
518 	while (len != 0) {
519 		if ((len | (uintptr_t)addr) & 1) {
520 			sz = sizeof (int8_t);
521 			if (rw == UIO_WRITE) {
522 				if ((o = uwritec(uio)) == -1)
523 					return (DDI_FAILURE);
524 				if (ddi_poke8(devi, (int8_t *)addr,
525 				    (int8_t)o) != DDI_SUCCESS)
526 					return (DDI_FAILURE);
527 			} else {
528 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
529 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
530 					return (DDI_FAILURE);
531 				if (ureadc(w8, uio))
532 					return (DDI_FAILURE);
533 			}
534 		} else {
535 			switch (xfersize) {
536 			case sizeof (int64_t):
537 				if (((len | (uintptr_t)addr) &
538 				    (sizeof (int64_t) - 1)) == 0) {
539 					sz = xfersize;
540 					break;
541 				}
542 				/*FALLTHROUGH*/
543 			case sizeof (int32_t):
544 				if (((len | (uintptr_t)addr) &
545 				    (sizeof (int32_t) - 1)) == 0) {
546 					sz = xfersize;
547 					break;
548 				}
549 				/*FALLTHROUGH*/
550 			default:
551 				/*
552 				 * This still assumes that we might have an
553 				 * I/O bus out there that permits 16-bit
554 				 * transfers (and that it would be upset by
555 				 * 32-bit transfers from such locations).
556 				 */
557 				sz = sizeof (int16_t);
558 				break;
559 			}
560 
561 			if (rw == UIO_READ) {
562 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
563 				    addr, &ibuffer) != DDI_SUCCESS)
564 					return (DDI_FAILURE);
565 			}
566 
567 			if (uiomove(&ibuffer, sz, rw, uio))
568 				return (DDI_FAILURE);
569 
570 			if (rw == UIO_WRITE) {
571 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
572 				    addr, &ibuffer) != DDI_SUCCESS)
573 					return (DDI_FAILURE);
574 			}
575 		}
576 		addr += sz;
577 		len -= sz;
578 	}
579 	return (DDI_SUCCESS);
580 }
581 
582 /*
583  * These routines are used by drivers that do layered ioctls
584  * On sparc, they're implemented in assembler to avoid spilling
585  * register windows in the common (copyin) case ..
586  */
587 #if !defined(__sparc)
588 int
589 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
590 {
591 	if (flags & FKIOCTL)
592 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
593 	return (copyin(buf, kernbuf, size));
594 }
595 
596 int
597 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
598 {
599 	if (flags & FKIOCTL)
600 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
601 	return (copyout(buf, kernbuf, size));
602 }
603 #endif	/* !__sparc */
604 
605 /*
606  * Conversions in nexus pagesize units.  We don't duplicate the
607  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
608  * routines anyway.
609  */
610 unsigned long
611 ddi_btop(dev_info_t *dip, unsigned long bytes)
612 {
613 	unsigned long pages;
614 
615 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
616 	return (pages);
617 }
618 
619 unsigned long
620 ddi_btopr(dev_info_t *dip, unsigned long bytes)
621 {
622 	unsigned long pages;
623 
624 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
625 	return (pages);
626 }
627 
628 unsigned long
629 ddi_ptob(dev_info_t *dip, unsigned long pages)
630 {
631 	unsigned long bytes;
632 
633 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
634 	return (bytes);
635 }
636 
637 unsigned int
638 ddi_enter_critical(void)
639 {
640 	return ((uint_t)spl7());
641 }
642 
643 void
644 ddi_exit_critical(unsigned int spl)
645 {
646 	splx((int)spl);
647 }
648 
649 /*
650  * Nexus ctlops punter
651  */
652 
653 #if !defined(__sparc)
654 /*
655  * Request bus_ctl parent to handle a bus_ctl request
656  *
657  * (The sparc version is in sparc_ddi.s)
658  */
659 int
660 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
661 {
662 	int (*fp)();
663 
664 	if (!d || !r)
665 		return (DDI_FAILURE);
666 
667 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
668 		return (DDI_FAILURE);
669 
670 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
671 	return ((*fp)(d, r, op, a, v));
672 }
673 
674 #endif
675 
676 /*
677  * DMA/DVMA setup
678  */
679 
680 #if defined(__sparc)
681 static ddi_dma_lim_t standard_limits = {
682 	(uint_t)0,	/* addr_t dlim_addr_lo */
683 	(uint_t)-1,	/* addr_t dlim_addr_hi */
684 	(uint_t)-1,	/* uint_t dlim_cntr_max */
685 	(uint_t)1,	/* uint_t dlim_burstsizes */
686 	(uint_t)1,	/* uint_t dlim_minxfer */
687 	0		/* uint_t dlim_dmaspeed */
688 };
689 #elif defined(__x86)
690 static ddi_dma_lim_t standard_limits = {
691 	(uint_t)0,		/* addr_t dlim_addr_lo */
692 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
693 	(uint_t)0,		/* uint_t dlim_cntr_max */
694 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
695 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
696 	(uint_t)0,		/* uint_t dlim_dmaspeed */
697 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
698 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
699 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
700 	(uint_t)512,		/* uint_t dlim_granular */
701 	(int)1,			/* int dlim_sgllen */
702 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
703 };
704 
705 #endif
706 
707 int
708 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
709     ddi_dma_handle_t *handlep)
710 {
711 	int (*funcp)() = ddi_dma_map;
712 	struct bus_ops *bop;
713 #if defined(__sparc)
714 	auto ddi_dma_lim_t dma_lim;
715 
716 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
717 		dma_lim = standard_limits;
718 	} else {
719 		dma_lim = *dmareqp->dmar_limits;
720 	}
721 	dmareqp->dmar_limits = &dma_lim;
722 #endif
723 #if defined(__x86)
724 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
725 		return (DDI_FAILURE);
726 #endif
727 
728 	/*
729 	 * Handle the case that the requester is both a leaf
730 	 * and a nexus driver simultaneously by calling the
731 	 * requester's bus_dma_map function directly instead
732 	 * of ddi_dma_map.
733 	 */
734 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
735 	if (bop && bop->bus_dma_map)
736 		funcp = bop->bus_dma_map;
737 	return ((*funcp)(dip, dip, dmareqp, handlep));
738 }
739 
740 int
741 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
742     uint_t flags, int (*waitfp)(), caddr_t arg,
743     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
744 {
745 	int (*funcp)() = ddi_dma_map;
746 	ddi_dma_lim_t dma_lim;
747 	struct ddi_dma_req dmareq;
748 	struct bus_ops *bop;
749 
750 	if (len == 0) {
751 		return (DDI_DMA_NOMAPPING);
752 	}
753 	if (limits == (ddi_dma_lim_t *)0) {
754 		dma_lim = standard_limits;
755 	} else {
756 		dma_lim = *limits;
757 	}
758 	dmareq.dmar_limits = &dma_lim;
759 	dmareq.dmar_flags = flags;
760 	dmareq.dmar_fp = waitfp;
761 	dmareq.dmar_arg = arg;
762 	dmareq.dmar_object.dmao_size = len;
763 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
764 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
765 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
766 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
767 
768 	/*
769 	 * Handle the case that the requester is both a leaf
770 	 * and a nexus driver simultaneously by calling the
771 	 * requester's bus_dma_map function directly instead
772 	 * of ddi_dma_map.
773 	 */
774 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
775 	if (bop && bop->bus_dma_map)
776 		funcp = bop->bus_dma_map;
777 
778 	return ((*funcp)(dip, dip, &dmareq, handlep));
779 }
780 
781 int
782 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
783     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
784     ddi_dma_handle_t *handlep)
785 {
786 	int (*funcp)() = ddi_dma_map;
787 	ddi_dma_lim_t dma_lim;
788 	struct ddi_dma_req dmareq;
789 	struct bus_ops *bop;
790 
791 	if (limits == (ddi_dma_lim_t *)0) {
792 		dma_lim = standard_limits;
793 	} else {
794 		dma_lim = *limits;
795 	}
796 	dmareq.dmar_limits = &dma_lim;
797 	dmareq.dmar_flags = flags;
798 	dmareq.dmar_fp = waitfp;
799 	dmareq.dmar_arg = arg;
800 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
801 
802 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
803 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
804 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
805 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
806 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
807 	} else {
808 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
809 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
810 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
811 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
812 							bp->b_shadow;
813 		} else {
814 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
815 		}
816 
817 		/*
818 		 * If the buffer has no proc pointer, or the proc
819 		 * struct has the kernel address space, or the buffer has
820 		 * been marked B_REMAPPED (meaning that it is now
821 		 * mapped into the kernel's address space), then
822 		 * the address space is kas (kernel address space).
823 		 */
824 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
825 		    (bp->b_flags & B_REMAPPED) != 0) {
826 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
827 		} else {
828 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
829 			    bp->b_proc->p_as;
830 		}
831 	}
832 
833 	/*
834 	 * Handle the case that the requester is both a leaf
835 	 * and a nexus driver simultaneously by calling the
836 	 * requester's bus_dma_map function directly instead
837 	 * of ddi_dma_map.
838 	 */
839 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
840 	if (bop && bop->bus_dma_map)
841 		funcp = bop->bus_dma_map;
842 
843 	return ((*funcp)(dip, dip, &dmareq, handlep));
844 }
845 
846 #if !defined(__sparc)
847 /*
848  * Request bus_dma_ctl parent to fiddle with a dma request.
849  *
850  * (The sparc version is in sparc_subr.s)
851  */
852 int
853 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
854     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
855     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
856 {
857 	int (*fp)();
858 
859 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
860 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
861 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
862 }
863 #endif
864 
865 /*
866  * For all DMA control functions, call the DMA control
867  * routine and return status.
868  *
869  * Just plain assume that the parent is to be called.
870  * If a nexus driver or a thread outside the framework
871  * of a nexus driver or a leaf driver calls these functions,
872  * it is up to them to deal with the fact that the parent's
873  * bus_dma_ctl function will be the first one called.
874  */
875 
876 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
877 
878 int
879 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
880 {
881 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
882 }
883 
884 int
885 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
886 {
887 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
888 }
889 
890 int
891 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
892 {
893 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
894 	    (off_t *)c, 0, (caddr_t *)o, 0));
895 }
896 
897 int
898 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
899 {
900 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
901 	    l, (caddr_t *)c, 0));
902 }
903 
904 int
905 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
906 {
907 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
908 		return (DDI_FAILURE);
909 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
910 }
911 
912 /*
913  * Note:  The astute might notice that in the next two routines
914  * the SPARC case passes a pointer to a ddi_dma_win_t as the 5th
915  * argument while the x86 case passes the ddi_dma_win_t directly.
916  *
917  * While it would be nice if the "correct" behavior was
918  * platform independent and specified someplace, it isn't.
919  * Until that point, what's required is that this call and
920  * the relevant bus nexus drivers agree, and in this case they
921  * do, at least for the cases I've looked at.
922  */
923 int
924 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
925     ddi_dma_win_t *nwin)
926 {
927 #if defined(__sparc)
928 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
929 	    (caddr_t *)nwin, 0));
930 #elif defined(__x86)
931 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
932 		(off_t *)win, 0, (caddr_t *)nwin, 0));
933 #else
934 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
935 		(off_t *)win, 0, (caddr_t *)nwin, 0));
936 #endif
937 }
938 
939 int
940 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
941 {
942 #if defined(__sparc)
943 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
944 
945 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
946 	    (size_t *)&seg, (caddr_t *)nseg, 0));
947 #else
948 	ddi_dma_handle_t h = (ddi_dma_handle_t)
949 	    ((impl_dma_segment_t *)win)->dmais_hndl;
950 
951 #if defined(__x86)
952 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
953 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
954 #else
955 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
956 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
957 #endif
958 #endif
959 }
960 
961 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
962 /*
963  * This routine is Obsolete and should be removed from ALL architectures
964  * in a future release of Solaris.
965  *
966  * It is deliberately NOT ported to amd64; please fix the code that
967  * depends on this routine to use ddi_dma_nextcookie(9F).
968  */
969 int
970 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
971     ddi_dma_cookie_t *cookiep)
972 {
973 #if defined(__sparc)
974 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
975 
976 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
977 	    (caddr_t *)cookiep, 0));
978 #elif defined(__i386) && !defined(__amd64)
979 	ddi_dma_handle_t h = (ddi_dma_handle_t)
980 	    ((impl_dma_segment_t *)seg)->dmais_hndl;
981 
982 	/*
983 	 * The hack used for i386 won't work here; we can't squeeze a
984 	 * pointer through the 'cache_flags' field.
985 	 */
986 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_SEGTOC,
987 		o, (size_t *)l, (caddr_t *)cookiep, (uint_t)seg));
988 #endif
989 }
990 #endif	/* (__i386 && !__amd64) || __sparc */
991 
992 #if !defined(__sparc)
993 
994 /*
995  * The SPARC versions of these routines are done in assembler to
996  * save register windows, so they're in sparc_subr.s.
997  */
998 
999 int
1000 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
1001 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
1002 {
1003 	dev_info_t	*hdip;
1004 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
1005 	    ddi_dma_handle_t *);
1006 
1007 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1008 
1009 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
1010 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
1011 }
1012 
1013 int
1014 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1015     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1016 {
1017 	dev_info_t	*hdip;
1018 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1019 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1020 
1021 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1022 
1023 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1024 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1025 }
1026 
1027 int
1028 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1029 {
1030 	dev_info_t	*hdip;
1031 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1032 
1033 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1034 
1035 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1036 	return ((*funcp)(hdip, rdip, handlep));
1037 }
1038 
1039 int
1040 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1041     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1042     ddi_dma_cookie_t *cp, uint_t *ccountp)
1043 {
1044 	dev_info_t	*hdip;
1045 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1046 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1047 
1048 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1049 
1050 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1051 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1052 }
1053 
1054 int
1055 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1056     ddi_dma_handle_t handle)
1057 {
1058 	dev_info_t	*hdip;
1059 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1060 
1061 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1062 
1063 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1064 	return ((*funcp)(hdip, rdip, handle));
1065 }
1066 
1067 
1068 int
1069 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1070     ddi_dma_handle_t handle, off_t off, size_t len,
1071     uint_t cache_flags)
1072 {
1073 	dev_info_t	*hdip;
1074 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1075 	    off_t, size_t, uint_t);
1076 
1077 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1078 
1079 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1080 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1081 }
1082 
1083 int
1084 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1085     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1086     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1087 {
1088 	dev_info_t	*hdip;
1089 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1090 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1091 
1092 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1093 
1094 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1095 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1096 	    cookiep, ccountp));
1097 }
1098 
1099 int
1100 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1101 {
1102 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1103 	dev_info_t *hdip, *dip;
1104 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1105 		size_t, uint_t);
1106 
1107 	/*
1108 	 * the DMA nexus driver will set DMP_NOSYNC if the
1109 	 * platform does not require any sync operation. For
1110 	 * example if the memory is uncached or consistent
1111 	 * and without any I/O write buffers involved.
1112 	 */
1113 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1114 		return (DDI_SUCCESS);
1115 
1116 	dip = hp->dmai_rdip;
1117 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1118 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1119 	return ((*funcp)(hdip, dip, h, o, l, whom));
1120 }
1121 
1122 int
1123 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1124 {
1125 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1126 	dev_info_t *hdip, *dip;
1127 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1128 
1129 	dip = hp->dmai_rdip;
1130 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1131 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1132 	return ((*funcp)(hdip, dip, h));
1133 }
1134 
1135 #endif	/* !__sparc */
1136 
1137 int
1138 ddi_dma_free(ddi_dma_handle_t h)
1139 {
1140 #if !defined(__x86)
1141 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1142 #else
1143 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_FREE,
1144 		0, 0, 0, 0));
1145 #endif
1146 }
1147 
1148 int
1149 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1150 {
1151 	ddi_dma_lim_t defalt;
1152 	size_t size = len;
1153 
1154 	if (!limp) {
1155 		defalt = standard_limits;
1156 		limp = &defalt;
1157 	}
1158 #if defined(__sparc)
1159 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1160 	    iopbp, NULL, NULL));
1161 #else
1162 	return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_IOPB_ALLOC, (off_t *)limp,
1163 	    &size, iopbp, 0));
1164 #endif
1165 }
1166 
1167 void
1168 ddi_iopb_free(caddr_t iopb)
1169 {
1170 	i_ddi_mem_free(iopb, 0);
1171 }
1172 
1173 int
1174 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1175 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1176 {
1177 	ddi_dma_lim_t defalt;
1178 	size_t size = length;
1179 
1180 	if (!limits) {
1181 		defalt = standard_limits;
1182 		limits = &defalt;
1183 	}
1184 #if defined(__sparc)
1185 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1186 	    1, 0, kaddrp, real_length, NULL));
1187 #else
1188 	return (ddi_dma_mctl(dip, dip, (ddi_dma_handle_t)real_length,
1189 	    DDI_DMA_SMEM_ALLOC, (off_t *)limits, &size,
1190 	    kaddrp, (flags & 0x1)));
1191 #endif
1192 }
1193 
1194 void
1195 ddi_mem_free(caddr_t kaddr)
1196 {
1197 	i_ddi_mem_free(kaddr, 1);
1198 }
1199 
1200 /*
1201  * DMA attributes, alignment, burst sizes, and transfer minimums
1202  */
1203 int
1204 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1205 {
1206 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1207 
1208 	if (attrp == NULL)
1209 		return (DDI_FAILURE);
1210 	*attrp = dimp->dmai_attr;
1211 	return (DDI_SUCCESS);
1212 }
1213 
1214 int
1215 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1216 {
1217 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1218 
1219 	if (!dimp)
1220 		return (0);
1221 	else
1222 		return (dimp->dmai_burstsizes);
1223 }
1224 
1225 int
1226 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1227 {
1228 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1229 
1230 	if (!dimp || !alignment || !mineffect)
1231 		return (DDI_FAILURE);
1232 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1233 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1234 	} else {
1235 		if (dimp->dmai_burstsizes & 0xff0000) {
1236 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1237 		} else {
1238 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1239 		}
1240 	}
1241 	*mineffect = dimp->dmai_minxfer;
1242 	return (DDI_SUCCESS);
1243 }
1244 
1245 int
1246 ddi_iomin(dev_info_t *a, int i, int stream)
1247 {
1248 	int r;
1249 
1250 	/*
1251 	 * Make sure that the initial value is sane
1252 	 */
1253 	if (i & (i - 1))
1254 		return (0);
1255 	if (i == 0)
1256 		i = (stream) ? 4 : 1;
1257 
1258 	r = ddi_ctlops(a, a,
1259 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1260 	if (r != DDI_SUCCESS || (i & (i - 1)))
1261 		return (0);
1262 	return (i);
1263 }
1264 
1265 /*
1266  * Given two DMA attribute structures, apply the attributes
1267  * of one to the other, following the rules of attributes
1268  * and the wishes of the caller.
1269  *
1270  * The rules of DMA attribute structures are that you cannot
1271  * make things *less* restrictive as you apply one set
1272  * of attributes to another.
1273  *
1274  */
1275 void
1276 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1277 {
1278 	attr->dma_attr_addr_lo =
1279 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1280 	attr->dma_attr_addr_hi =
1281 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1282 	attr->dma_attr_count_max =
1283 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1284 	attr->dma_attr_align =
1285 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1286 	attr->dma_attr_burstsizes =
1287 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1288 	attr->dma_attr_minxfer =
1289 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1290 	attr->dma_attr_maxxfer =
1291 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1292 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1293 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1294 	    (uint_t)mod->dma_attr_sgllen);
1295 	attr->dma_attr_granular =
1296 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1297 }
1298 
1299 /*
1300  * mmap/segmap interface:
1301  */
1302 
1303 /*
1304  * ddi_segmap:		setup the default segment driver. Calls the drivers
1305  *			XXmmap routine to validate the range to be mapped.
1306  *			Return ENXIO of the range is not valid.  Create
1307  *			a seg_dev segment that contains all of the
1308  *			necessary information and will reference the
1309  *			default segment driver routines. It returns zero
1310  *			on success or non-zero on failure.
1311  */
1312 int
1313 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1314     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1315 {
1316 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1317 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1318 
1319 	return (spec_segmap(dev, offset, asp, addrp, len,
1320 	    prot, maxprot, flags, credp));
1321 }
1322 
1323 /*
1324  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1325  *			drivers. Allows each successive parent to resolve
1326  *			address translations and add its mappings to the
1327  *			mapping list supplied in the page structure. It
1328  *			returns zero on success	or non-zero on failure.
1329  */
1330 
1331 int
1332 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1333     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1334 {
1335 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1336 }
1337 
1338 /*
1339  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1340  *	Invokes platform specific DDI to determine whether attributes specified
1341  *	in attr(9s) are	valid for the region of memory that will be made
1342  *	available for direct access to user process via the mmap(2) system call.
1343  */
1344 int
1345 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1346     uint_t rnumber, uint_t *hat_flags)
1347 {
1348 	ddi_acc_handle_t handle;
1349 	ddi_map_req_t mr;
1350 	ddi_acc_hdl_t *hp;
1351 	int result;
1352 	dev_info_t *dip;
1353 
1354 	/*
1355 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1356 	 * release it immediately since it should already be held by
1357 	 * a devfs vnode.
1358 	 */
1359 	if ((dip =
1360 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1361 		return (-1);
1362 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1363 
1364 	/*
1365 	 * Allocate and initialize the common elements of data
1366 	 * access handle.
1367 	 */
1368 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1369 	if (handle == NULL)
1370 		return (-1);
1371 
1372 	hp = impl_acc_hdl_get(handle);
1373 	hp->ah_vers = VERS_ACCHDL;
1374 	hp->ah_dip = dip;
1375 	hp->ah_rnumber = rnumber;
1376 	hp->ah_offset = 0;
1377 	hp->ah_len = 0;
1378 	hp->ah_acc = *accattrp;
1379 
1380 	/*
1381 	 * Set up the mapping request and call to parent.
1382 	 */
1383 	mr.map_op = DDI_MO_MAP_HANDLE;
1384 	mr.map_type = DDI_MT_RNUMBER;
1385 	mr.map_obj.rnumber = rnumber;
1386 	mr.map_prot = PROT_READ | PROT_WRITE;
1387 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1388 	mr.map_handlep = hp;
1389 	mr.map_vers = DDI_MAP_VERSION;
1390 	result = ddi_map(dip, &mr, 0, 0, NULL);
1391 
1392 	/*
1393 	 * Region must be mappable, pick up flags from the framework.
1394 	 */
1395 	*hat_flags = hp->ah_hat_flags;
1396 
1397 	impl_acc_hdl_free(handle);
1398 
1399 	/*
1400 	 * check for end result.
1401 	 */
1402 	if (result != DDI_SUCCESS)
1403 		return (-1);
1404 	return (0);
1405 }
1406 
1407 
1408 /*
1409  * Property functions:	 See also, ddipropdefs.h.
1410  *
1411  * These functions are the framework for the property functions,
1412  * i.e. they support software defined properties.  All implementation
1413  * specific property handling (i.e.: self-identifying devices and
1414  * PROM defined properties are handled in the implementation specific
1415  * functions (defined in ddi_implfuncs.h).
1416  */
1417 
1418 /*
1419  * nopropop:	Shouldn't be called, right?
1420  */
1421 int
1422 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1423     char *name, caddr_t valuep, int *lengthp)
1424 {
1425 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1426 	return (DDI_PROP_NOT_FOUND);
1427 }
1428 
1429 #ifdef	DDI_PROP_DEBUG
1430 int ddi_prop_debug_flag = 0;
1431 
1432 int
1433 ddi_prop_debug(int enable)
1434 {
1435 	int prev = ddi_prop_debug_flag;
1436 
1437 	if ((enable != 0) || (prev != 0))
1438 		printf("ddi_prop_debug: debugging %s\n",
1439 		    enable ? "enabled" : "disabled");
1440 	ddi_prop_debug_flag = enable;
1441 	return (prev);
1442 }
1443 
1444 #endif	/* DDI_PROP_DEBUG */
1445 
1446 /*
1447  * Search a property list for a match, if found return pointer
1448  * to matching prop struct, else return NULL.
1449  */
1450 
1451 ddi_prop_t *
1452 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1453 {
1454 	ddi_prop_t	*propp;
1455 
1456 	/*
1457 	 * find the property in child's devinfo:
1458 	 * Search order defined by this search function is first matching
1459 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1460 	 * dev == propp->prop_dev, name == propp->name, and the correct
1461 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1462 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1463 	 */
1464 	if (dev == DDI_DEV_T_NONE)
1465 		dev = DDI_DEV_T_ANY;
1466 
1467 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1468 
1469 		if (!DDI_STRSAME(propp->prop_name, name))
1470 			continue;
1471 
1472 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1473 			continue;
1474 
1475 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1476 			continue;
1477 
1478 		return (propp);
1479 	}
1480 
1481 	return ((ddi_prop_t *)0);
1482 }
1483 
1484 /*
1485  * Search for property within devnames structures
1486  */
1487 ddi_prop_t *
1488 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1489 {
1490 	major_t		major;
1491 	struct devnames	*dnp;
1492 	ddi_prop_t	*propp;
1493 
1494 	/*
1495 	 * Valid dev_t value is needed to index into the
1496 	 * correct devnames entry, therefore a dev_t
1497 	 * value of DDI_DEV_T_ANY is not appropriate.
1498 	 */
1499 	ASSERT(dev != DDI_DEV_T_ANY);
1500 	if (dev == DDI_DEV_T_ANY) {
1501 		return ((ddi_prop_t *)0);
1502 	}
1503 
1504 	major = getmajor(dev);
1505 	dnp = &(devnamesp[major]);
1506 
1507 	if (dnp->dn_global_prop_ptr == NULL)
1508 		return ((ddi_prop_t *)0);
1509 
1510 	LOCK_DEV_OPS(&dnp->dn_lock);
1511 
1512 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1513 	    propp != NULL;
1514 	    propp = (ddi_prop_t *)propp->prop_next) {
1515 
1516 		if (!DDI_STRSAME(propp->prop_name, name))
1517 			continue;
1518 
1519 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1520 			continue;
1521 
1522 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1523 			continue;
1524 
1525 		/* Property found, return it */
1526 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1527 		return (propp);
1528 	}
1529 
1530 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1531 	return ((ddi_prop_t *)0);
1532 }
1533 
1534 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1535 
1536 /*
1537  * ddi_prop_search_global:
1538  *	Search the global property list within devnames
1539  *	for the named property.  Return the encoded value.
1540  */
1541 static int
1542 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1543     void *valuep, uint_t *lengthp)
1544 {
1545 	ddi_prop_t	*propp;
1546 	caddr_t		buffer;
1547 
1548 	propp =  i_ddi_search_global_prop(dev, name, flags);
1549 
1550 	/* Property NOT found, bail */
1551 	if (propp == (ddi_prop_t *)0)
1552 		return (DDI_PROP_NOT_FOUND);
1553 
1554 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1555 		return (DDI_PROP_UNDEFINED);
1556 
1557 	if ((buffer = kmem_alloc(propp->prop_len, KM_NOSLEEP)) == NULL) {
1558 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1559 		return (DDI_PROP_NO_MEMORY);
1560 	}
1561 
1562 	/*
1563 	 * Return the encoded data
1564 	 */
1565 	*(caddr_t *)valuep = buffer;
1566 	*lengthp = propp->prop_len;
1567 	bcopy(propp->prop_val, buffer, propp->prop_len);
1568 
1569 	return (DDI_PROP_SUCCESS);
1570 }
1571 
1572 /*
1573  * ddi_prop_search_common:	Lookup and return the encoded value
1574  */
1575 int
1576 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1577     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1578 {
1579 	ddi_prop_t	*propp;
1580 	int		i;
1581 	caddr_t		buffer;
1582 	caddr_t		prealloc = NULL;
1583 	int		plength = 0;
1584 	dev_info_t	*pdip;
1585 	int		(*bop)();
1586 
1587 	/*CONSTANTCONDITION*/
1588 	while (1)  {
1589 
1590 		mutex_enter(&(DEVI(dip)->devi_lock));
1591 
1592 
1593 		/*
1594 		 * find the property in child's devinfo:
1595 		 * Search order is:
1596 		 *	1. driver defined properties
1597 		 *	2. system defined properties
1598 		 *	3. driver global properties
1599 		 *	4. boot defined properties
1600 		 */
1601 
1602 		propp = i_ddi_prop_search(dev, name, flags,
1603 		    &(DEVI(dip)->devi_drv_prop_ptr));
1604 		if (propp == NULL)  {
1605 			propp = i_ddi_prop_search(dev, name, flags,
1606 			    &(DEVI(dip)->devi_sys_prop_ptr));
1607 		}
1608 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1609 			propp = i_ddi_prop_search(dev, name, flags,
1610 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1611 		}
1612 
1613 		if (propp == NULL)  {
1614 			propp = i_ddi_prop_search(dev, name, flags,
1615 			    &(DEVI(dip)->devi_hw_prop_ptr));
1616 		}
1617 
1618 		/*
1619 		 * Software property found?
1620 		 */
1621 		if (propp != (ddi_prop_t *)0)	{
1622 
1623 			/*
1624 			 * If explicit undefine, return now.
1625 			 */
1626 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1627 				mutex_exit(&(DEVI(dip)->devi_lock));
1628 				if (prealloc)
1629 					kmem_free(prealloc, plength);
1630 				return (DDI_PROP_UNDEFINED);
1631 			}
1632 
1633 			/*
1634 			 * If we only want to know if it exists, return now
1635 			 */
1636 			if (prop_op == PROP_EXISTS) {
1637 				mutex_exit(&(DEVI(dip)->devi_lock));
1638 				ASSERT(prealloc == NULL);
1639 				return (DDI_PROP_SUCCESS);
1640 			}
1641 
1642 			/*
1643 			 * If length only request or prop length == 0,
1644 			 * service request and return now.
1645 			 */
1646 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1647 				*lengthp = propp->prop_len;
1648 
1649 				/*
1650 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1651 				 * that means prop_len is 0, so set valuep
1652 				 * also to NULL
1653 				 */
1654 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1655 					*(caddr_t *)valuep = NULL;
1656 
1657 				mutex_exit(&(DEVI(dip)->devi_lock));
1658 				if (prealloc)
1659 					kmem_free(prealloc, plength);
1660 				return (DDI_PROP_SUCCESS);
1661 			}
1662 
1663 			/*
1664 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1665 			 * drop the mutex, allocate the buffer, and go
1666 			 * through the loop again.  If we already allocated
1667 			 * the buffer, and the size of the property changed,
1668 			 * keep trying...
1669 			 */
1670 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1671 			    (flags & DDI_PROP_CANSLEEP))  {
1672 				if (prealloc && (propp->prop_len != plength)) {
1673 					kmem_free(prealloc, plength);
1674 					prealloc = NULL;
1675 				}
1676 				if (prealloc == NULL)  {
1677 					plength = propp->prop_len;
1678 					mutex_exit(&(DEVI(dip)->devi_lock));
1679 					prealloc = kmem_alloc(plength,
1680 					    KM_SLEEP);
1681 					continue;
1682 				}
1683 			}
1684 
1685 			/*
1686 			 * Allocate buffer, if required.  Either way,
1687 			 * set `buffer' variable.
1688 			 */
1689 			i = *lengthp;			/* Get callers length */
1690 			*lengthp = propp->prop_len;	/* Set callers length */
1691 
1692 			switch (prop_op) {
1693 
1694 			case PROP_LEN_AND_VAL_ALLOC:
1695 
1696 				if (prealloc == NULL) {
1697 					buffer = kmem_alloc(propp->prop_len,
1698 					    KM_NOSLEEP);
1699 				} else {
1700 					buffer = prealloc;
1701 				}
1702 
1703 				if (buffer == NULL)  {
1704 					mutex_exit(&(DEVI(dip)->devi_lock));
1705 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1706 					return (DDI_PROP_NO_MEMORY);
1707 				}
1708 				/* Set callers buf ptr */
1709 				*(caddr_t *)valuep = buffer;
1710 				break;
1711 
1712 			case PROP_LEN_AND_VAL_BUF:
1713 
1714 				if (propp->prop_len > (i)) {
1715 					mutex_exit(&(DEVI(dip)->devi_lock));
1716 					return (DDI_PROP_BUF_TOO_SMALL);
1717 				}
1718 
1719 				buffer = valuep;  /* Get callers buf ptr */
1720 				break;
1721 
1722 			default:
1723 				break;
1724 			}
1725 
1726 			/*
1727 			 * Do the copy.
1728 			 */
1729 			bcopy(propp->prop_val, buffer, propp->prop_len);
1730 			mutex_exit(&(DEVI(dip)->devi_lock));
1731 			return (DDI_PROP_SUCCESS);
1732 		}
1733 
1734 		mutex_exit(&(DEVI(dip)->devi_lock));
1735 		if (prealloc)
1736 			kmem_free(prealloc, plength);
1737 		prealloc = NULL;
1738 
1739 		/*
1740 		 * Prop not found, call parent bus_ops to deal with possible
1741 		 * h/w layer (possible PROM defined props, etc.) and to
1742 		 * possibly ascend the hierarchy, if allowed by flags.
1743 		 */
1744 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1745 
1746 		/*
1747 		 * One last call for the root driver PROM props?
1748 		 */
1749 		if (dip == ddi_root_node())  {
1750 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1751 			    flags, name, valuep, (int *)lengthp));
1752 		}
1753 
1754 		/*
1755 		 * We may have been called to check for properties
1756 		 * within a single devinfo node that has no parent -
1757 		 * see make_prop()
1758 		 */
1759 		if (pdip == NULL) {
1760 			ASSERT((flags &
1761 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1762 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1763 			return (DDI_PROP_NOT_FOUND);
1764 		}
1765 
1766 		/*
1767 		 * Instead of recursing, we do iterative calls up the tree.
1768 		 * As a bit of optimization, skip the bus_op level if the
1769 		 * node is a s/w node and if the parent's bus_prop_op function
1770 		 * is `ddi_bus_prop_op', because we know that in this case,
1771 		 * this function does nothing.
1772 		 *
1773 		 * 4225415: If the parent isn't attached, or the child
1774 		 * hasn't been named by the parent yet, use the default
1775 		 * ddi_bus_prop_op as a proxy for the parent.  This
1776 		 * allows property lookups in any child/parent state to
1777 		 * include 'prom' and inherited properties, even when
1778 		 * there are no drivers attached to the child or parent.
1779 		 */
1780 
1781 		bop = ddi_bus_prop_op;
1782 		if ((i_ddi_node_state(pdip) == DS_READY) &&
1783 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1784 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1785 
1786 		i = DDI_PROP_NOT_FOUND;
1787 
1788 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1789 			i = (*bop)(dev, pdip, dip, prop_op,
1790 			    flags | DDI_PROP_DONTPASS,
1791 			    name, valuep, lengthp);
1792 		}
1793 
1794 		if ((flags & DDI_PROP_DONTPASS) ||
1795 		    (i != DDI_PROP_NOT_FOUND))
1796 			return (i);
1797 
1798 		dip = pdip;
1799 	}
1800 	/*NOTREACHED*/
1801 }
1802 
1803 
1804 /*
1805  * ddi_prop_op: The basic property operator for drivers.
1806  *
1807  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1808  *
1809  *	prop_op			valuep
1810  *	------			------
1811  *
1812  *	PROP_LEN		<unused>
1813  *
1814  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1815  *
1816  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1817  *				address of allocated buffer, if successful)
1818  */
1819 int
1820 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1821     char *name, caddr_t valuep, int *lengthp)
1822 {
1823 	int	i;
1824 
1825 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1826 
1827 	/*
1828 	 * If this was originally an LDI prop lookup then we bail here.
1829 	 * The reason is that the LDI property lookup interfaces first call
1830 	 * a drivers prop_op() entry point to allow it to override
1831 	 * properties.  But if we've made it here, then the driver hasn't
1832 	 * overriden any properties.  We don't want to continue with the
1833 	 * property search here because we don't have any type inforamtion.
1834 	 * When we return failure, the LDI interfaces will then proceed to
1835 	 * call the typed property interfaces to look up the property.
1836 	 */
1837 	if (mod_flags & DDI_PROP_DYNAMIC)
1838 		return (DDI_PROP_NOT_FOUND);
1839 
1840 	/*
1841 	 * check for pre-typed property consumer asking for typed property:
1842 	 * see e_ddi_getprop_int64.
1843 	 */
1844 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1845 		mod_flags |= DDI_PROP_TYPE_INT64;
1846 	mod_flags |= DDI_PROP_TYPE_ANY;
1847 
1848 	i = ddi_prop_search_common(dev, dip, prop_op,
1849 		mod_flags, name, valuep, (uint_t *)lengthp);
1850 	if (i == DDI_PROP_FOUND_1275)
1851 		return (DDI_PROP_SUCCESS);
1852 	return (i);
1853 }
1854 
1855 /*
1856  * ddi_prop_op_nblocks: The basic property operator for drivers that maintain
1857  * size in number of DEV_BSIZE blocks.  Provides a dynamic property
1858  * implementation for size oriented properties based on nblocks64 values passed
1859  * in by the driver.  Fallback to ddi_prop_op if the nblocks64 is too large.
1860  * This interface should not be used with a nblocks64 that represents the
1861  * driver's idea of how to represent unknown, if nblocks is unknown use
1862  * ddi_prop_op.
1863  */
1864 int
1865 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1866     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1867 {
1868 	uint64_t size64;
1869 
1870 	/*
1871 	 * There is no point in supporting nblocks64 values that don't have
1872 	 * an accurate uint64_t byte count representation.
1873 	 */
1874 	if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT))
1875 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1876 		    name, valuep, lengthp));
1877 
1878 	size64 = nblocks64 << DEV_BSHIFT;
1879 	return (ddi_prop_op_size(dev, dip, prop_op, mod_flags,
1880 	    name, valuep, lengthp, size64));
1881 }
1882 
1883 /*
1884  * ddi_prop_op_size: The basic property operator for drivers that maintain size
1885  * in bytes. Provides a of dynamic property implementation for size oriented
1886  * properties based on size64 values passed in by the driver.  Fallback to
1887  * ddi_prop_op if the size64 is too large. This interface should not be used
1888  * with a size64 that represents the driver's idea of how to represent unknown,
1889  * if size is unknown use ddi_prop_op.
1890  *
1891  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1892  * integers. While the most likely interface to request them ([bc]devi_size)
1893  * is declared int (signed) there is no enforcement of this, which means we
1894  * can't enforce limitations here without risking regression.
1895  */
1896 int
1897 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1898     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1899 {
1900 	uint64_t nblocks64;
1901 	int	callers_length;
1902 	caddr_t	buffer;
1903 
1904 	/* compute DEV_BSIZE nblocks value */
1905 	nblocks64 = lbtodb(size64);
1906 
1907 	/* get callers length, establish length of our dynamic properties */
1908 	callers_length = *lengthp;
1909 
1910 	if (strcmp(name, "Nblocks") == 0)
1911 		*lengthp = sizeof (uint64_t);
1912 	else if (strcmp(name, "Size") == 0)
1913 		*lengthp = sizeof (uint64_t);
1914 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1915 		*lengthp = sizeof (uint32_t);
1916 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1917 		*lengthp = sizeof (uint32_t);
1918 	else {
1919 		/* fallback to ddi_prop_op */
1920 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1921 		    name, valuep, lengthp));
1922 	}
1923 
1924 	/* service request for the length of the property */
1925 	if (prop_op == PROP_LEN)
1926 		return (DDI_PROP_SUCCESS);
1927 
1928 	/* the length of the property and the request must match */
1929 	if (callers_length != *lengthp)
1930 		return (DDI_PROP_INVAL_ARG);
1931 
1932 	switch (prop_op) {
1933 	case PROP_LEN_AND_VAL_ALLOC:
1934 		if ((buffer = kmem_alloc(*lengthp,
1935 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1936 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1937 			return (DDI_PROP_NO_MEMORY);
1938 
1939 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1940 		break;
1941 
1942 	case PROP_LEN_AND_VAL_BUF:
1943 		buffer = valuep;		/* get callers buf ptr */
1944 		break;
1945 
1946 	default:
1947 		return (DDI_PROP_INVAL_ARG);
1948 	}
1949 
1950 	/* transfer the value into the buffer */
1951 	if (strcmp(name, "Nblocks") == 0)
1952 		*((uint64_t *)buffer) = nblocks64;
1953 	else if (strcmp(name, "Size") == 0)
1954 		*((uint64_t *)buffer) = size64;
1955 	else if (strcmp(name, "nblocks") == 0)
1956 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1957 	else if (strcmp(name, "size") == 0)
1958 		*((uint32_t *)buffer) = (uint32_t)size64;
1959 	return (DDI_PROP_SUCCESS);
1960 }
1961 
1962 /*
1963  * Variable length props...
1964  */
1965 
1966 /*
1967  * ddi_getlongprop:	Get variable length property len+val into a buffer
1968  *		allocated by property provider via kmem_alloc. Requester
1969  *		is responsible for freeing returned property via kmem_free.
1970  *
1971  *	Arguments:
1972  *
1973  *	dev_t:	Input:	dev_t of property.
1974  *	dip:	Input:	dev_info_t pointer of child.
1975  *	flags:	Input:	Possible flag modifiers are:
1976  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1977  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1978  *	name:	Input:	name of property.
1979  *	valuep:	Output:	Addr of callers buffer pointer.
1980  *	lengthp:Output:	*lengthp will contain prop length on exit.
1981  *
1982  *	Possible Returns:
1983  *
1984  *		DDI_PROP_SUCCESS:	Prop found and returned.
1985  *		DDI_PROP_NOT_FOUND:	Prop not found
1986  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1987  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1988  */
1989 
1990 int
1991 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1992     char *name, caddr_t valuep, int *lengthp)
1993 {
1994 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1995 	    flags, name, valuep, lengthp));
1996 }
1997 
1998 /*
1999  *
2000  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2001  *				buffer. (no memory allocation by provider).
2002  *
2003  *	dev_t:	Input:	dev_t of property.
2004  *	dip:	Input:	dev_info_t pointer of child.
2005  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2006  *	name:	Input:	name of property
2007  *	valuep:	Input:	ptr to callers buffer.
2008  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2009  *			actual length of property on exit.
2010  *
2011  *	Possible returns:
2012  *
2013  *		DDI_PROP_SUCCESS	Prop found and returned
2014  *		DDI_PROP_NOT_FOUND	Prop not found
2015  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2016  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2017  *					no value returned, but actual prop
2018  *					length returned in *lengthp
2019  *
2020  */
2021 
2022 int
2023 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2024     char *name, caddr_t valuep, int *lengthp)
2025 {
2026 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2027 	    flags, name, valuep, lengthp));
2028 }
2029 
2030 /*
2031  * Integer/boolean sized props.
2032  *
2033  * Call is value only... returns found boolean or int sized prop value or
2034  * defvalue if prop not found or is wrong length or is explicitly undefined.
2035  * Only flag is DDI_PROP_DONTPASS...
2036  *
2037  * By convention, this interface returns boolean (0) sized properties
2038  * as value (int)1.
2039  *
2040  * This never returns an error, if property not found or specifically
2041  * undefined, the input `defvalue' is returned.
2042  */
2043 
2044 int
2045 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2046 {
2047 	int	propvalue = defvalue;
2048 	int	proplength = sizeof (int);
2049 	int	error;
2050 
2051 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2052 	    flags, name, (caddr_t)&propvalue, &proplength);
2053 
2054 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2055 		propvalue = 1;
2056 
2057 	return (propvalue);
2058 }
2059 
2060 /*
2061  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2062  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2063  */
2064 
2065 int
2066 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2067 {
2068 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2069 }
2070 
2071 /*
2072  * Allocate a struct prop_driver_data, along with 'size' bytes
2073  * for decoded property data.  This structure is freed by
2074  * calling ddi_prop_free(9F).
2075  */
2076 static void *
2077 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2078 {
2079 	struct prop_driver_data *pdd;
2080 
2081 	/*
2082 	 * Allocate a structure with enough memory to store the decoded data.
2083 	 */
2084 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2085 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2086 	pdd->pdd_prop_free = prop_free;
2087 
2088 	/*
2089 	 * Return a pointer to the location to put the decoded data.
2090 	 */
2091 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2092 }
2093 
2094 /*
2095  * Allocated the memory needed to store the encoded data in the property
2096  * handle.
2097  */
2098 static int
2099 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2100 {
2101 	/*
2102 	 * If size is zero, then set data to NULL and size to 0.  This
2103 	 * is a boolean property.
2104 	 */
2105 	if (size == 0) {
2106 		ph->ph_size = 0;
2107 		ph->ph_data = NULL;
2108 		ph->ph_cur_pos = NULL;
2109 		ph->ph_save_pos = NULL;
2110 	} else {
2111 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2112 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2113 			if (ph->ph_data == NULL)
2114 				return (DDI_PROP_NO_MEMORY);
2115 		} else
2116 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2117 		ph->ph_size = size;
2118 		ph->ph_cur_pos = ph->ph_data;
2119 		ph->ph_save_pos = ph->ph_data;
2120 	}
2121 	return (DDI_PROP_SUCCESS);
2122 }
2123 
2124 /*
2125  * Free the space allocated by the lookup routines.  Each lookup routine
2126  * returns a pointer to the decoded data to the driver.  The driver then
2127  * passes this pointer back to us.  This data actually lives in a struct
2128  * prop_driver_data.  We use negative indexing to find the beginning of
2129  * the structure and then free the entire structure using the size and
2130  * the free routine stored in the structure.
2131  */
2132 void
2133 ddi_prop_free(void *datap)
2134 {
2135 	struct prop_driver_data *pdd;
2136 
2137 	/*
2138 	 * Get the structure
2139 	 */
2140 	pdd = (struct prop_driver_data *)
2141 		((caddr_t)datap - sizeof (struct prop_driver_data));
2142 	/*
2143 	 * Call the free routine to free it
2144 	 */
2145 	(*pdd->pdd_prop_free)(pdd);
2146 }
2147 
2148 /*
2149  * Free the data associated with an array of ints,
2150  * allocated with ddi_prop_decode_alloc().
2151  */
2152 static void
2153 ddi_prop_free_ints(struct prop_driver_data *pdd)
2154 {
2155 	kmem_free(pdd, pdd->pdd_size);
2156 }
2157 
2158 /*
2159  * Free a single string property or a single string contained within
2160  * the argv style return value of an array of strings.
2161  */
2162 static void
2163 ddi_prop_free_string(struct prop_driver_data *pdd)
2164 {
2165 	kmem_free(pdd, pdd->pdd_size);
2166 
2167 }
2168 
2169 /*
2170  * Free an array of strings.
2171  */
2172 static void
2173 ddi_prop_free_strings(struct prop_driver_data *pdd)
2174 {
2175 	kmem_free(pdd, pdd->pdd_size);
2176 }
2177 
2178 /*
2179  * Free the data associated with an array of bytes.
2180  */
2181 static void
2182 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2183 {
2184 	kmem_free(pdd, pdd->pdd_size);
2185 }
2186 
2187 /*
2188  * Reset the current location pointer in the property handle to the
2189  * beginning of the data.
2190  */
2191 void
2192 ddi_prop_reset_pos(prop_handle_t *ph)
2193 {
2194 	ph->ph_cur_pos = ph->ph_data;
2195 	ph->ph_save_pos = ph->ph_data;
2196 }
2197 
2198 /*
2199  * Restore the current location pointer in the property handle to the
2200  * saved position.
2201  */
2202 void
2203 ddi_prop_save_pos(prop_handle_t *ph)
2204 {
2205 	ph->ph_save_pos = ph->ph_cur_pos;
2206 }
2207 
2208 /*
2209  * Save the location that the current location pointer is pointing to..
2210  */
2211 void
2212 ddi_prop_restore_pos(prop_handle_t *ph)
2213 {
2214 	ph->ph_cur_pos = ph->ph_save_pos;
2215 }
2216 
2217 /*
2218  * Property encode/decode functions
2219  */
2220 
2221 /*
2222  * Decode a single integer property
2223  */
2224 static int
2225 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2226 {
2227 	int	i;
2228 	int	tmp;
2229 
2230 	/*
2231 	 * If there is nothing to decode return an error
2232 	 */
2233 	if (ph->ph_size == 0)
2234 		return (DDI_PROP_END_OF_DATA);
2235 
2236 	/*
2237 	 * Decode the property as a single integer and return it
2238 	 * in data if we were able to decode it.
2239 	 */
2240 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2241 	if (i < DDI_PROP_RESULT_OK) {
2242 		switch (i) {
2243 		case DDI_PROP_RESULT_EOF:
2244 			return (DDI_PROP_END_OF_DATA);
2245 
2246 		case DDI_PROP_RESULT_ERROR:
2247 			return (DDI_PROP_CANNOT_DECODE);
2248 		}
2249 	}
2250 
2251 	*(int *)data = tmp;
2252 	*nelements = 1;
2253 	return (DDI_PROP_SUCCESS);
2254 }
2255 
2256 /*
2257  * Decode a single 64 bit integer property
2258  */
2259 static int
2260 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2261 {
2262 	int	i;
2263 	int64_t	tmp;
2264 
2265 	/*
2266 	 * If there is nothing to decode return an error
2267 	 */
2268 	if (ph->ph_size == 0)
2269 		return (DDI_PROP_END_OF_DATA);
2270 
2271 	/*
2272 	 * Decode the property as a single integer and return it
2273 	 * in data if we were able to decode it.
2274 	 */
2275 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2276 	if (i < DDI_PROP_RESULT_OK) {
2277 		switch (i) {
2278 		case DDI_PROP_RESULT_EOF:
2279 			return (DDI_PROP_END_OF_DATA);
2280 
2281 		case DDI_PROP_RESULT_ERROR:
2282 			return (DDI_PROP_CANNOT_DECODE);
2283 		}
2284 	}
2285 
2286 	*(int64_t *)data = tmp;
2287 	*nelements = 1;
2288 	return (DDI_PROP_SUCCESS);
2289 }
2290 
2291 /*
2292  * Decode an array of integers property
2293  */
2294 static int
2295 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2296 {
2297 	int	i;
2298 	int	cnt = 0;
2299 	int	*tmp;
2300 	int	*intp;
2301 	int	n;
2302 
2303 	/*
2304 	 * Figure out how many array elements there are by going through the
2305 	 * data without decoding it first and counting.
2306 	 */
2307 	for (;;) {
2308 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2309 		if (i < 0)
2310 			break;
2311 		cnt++;
2312 	}
2313 
2314 	/*
2315 	 * If there are no elements return an error
2316 	 */
2317 	if (cnt == 0)
2318 		return (DDI_PROP_END_OF_DATA);
2319 
2320 	/*
2321 	 * If we cannot skip through the data, we cannot decode it
2322 	 */
2323 	if (i == DDI_PROP_RESULT_ERROR)
2324 		return (DDI_PROP_CANNOT_DECODE);
2325 
2326 	/*
2327 	 * Reset the data pointer to the beginning of the encoded data
2328 	 */
2329 	ddi_prop_reset_pos(ph);
2330 
2331 	/*
2332 	 * Allocated memory to store the decoded value in.
2333 	 */
2334 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2335 		ddi_prop_free_ints);
2336 
2337 	/*
2338 	 * Decode each element and place it in the space we just allocated
2339 	 */
2340 	tmp = intp;
2341 	for (n = 0; n < cnt; n++, tmp++) {
2342 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2343 		if (i < DDI_PROP_RESULT_OK) {
2344 			/*
2345 			 * Free the space we just allocated
2346 			 * and return an error.
2347 			 */
2348 			ddi_prop_free(intp);
2349 			switch (i) {
2350 			case DDI_PROP_RESULT_EOF:
2351 				return (DDI_PROP_END_OF_DATA);
2352 
2353 			case DDI_PROP_RESULT_ERROR:
2354 				return (DDI_PROP_CANNOT_DECODE);
2355 			}
2356 		}
2357 	}
2358 
2359 	*nelements = cnt;
2360 	*(int **)data = intp;
2361 
2362 	return (DDI_PROP_SUCCESS);
2363 }
2364 
2365 /*
2366  * Decode a 64 bit integer array property
2367  */
2368 static int
2369 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2370 {
2371 	int	i;
2372 	int	n;
2373 	int	cnt = 0;
2374 	int64_t	*tmp;
2375 	int64_t	*intp;
2376 
2377 	/*
2378 	 * Count the number of array elements by going
2379 	 * through the data without decoding it.
2380 	 */
2381 	for (;;) {
2382 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2383 		if (i < 0)
2384 			break;
2385 		cnt++;
2386 	}
2387 
2388 	/*
2389 	 * If there are no elements return an error
2390 	 */
2391 	if (cnt == 0)
2392 		return (DDI_PROP_END_OF_DATA);
2393 
2394 	/*
2395 	 * If we cannot skip through the data, we cannot decode it
2396 	 */
2397 	if (i == DDI_PROP_RESULT_ERROR)
2398 		return (DDI_PROP_CANNOT_DECODE);
2399 
2400 	/*
2401 	 * Reset the data pointer to the beginning of the encoded data
2402 	 */
2403 	ddi_prop_reset_pos(ph);
2404 
2405 	/*
2406 	 * Allocate memory to store the decoded value.
2407 	 */
2408 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2409 		ddi_prop_free_ints);
2410 
2411 	/*
2412 	 * Decode each element and place it in the space allocated
2413 	 */
2414 	tmp = intp;
2415 	for (n = 0; n < cnt; n++, tmp++) {
2416 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2417 		if (i < DDI_PROP_RESULT_OK) {
2418 			/*
2419 			 * Free the space we just allocated
2420 			 * and return an error.
2421 			 */
2422 			ddi_prop_free(intp);
2423 			switch (i) {
2424 			case DDI_PROP_RESULT_EOF:
2425 				return (DDI_PROP_END_OF_DATA);
2426 
2427 			case DDI_PROP_RESULT_ERROR:
2428 				return (DDI_PROP_CANNOT_DECODE);
2429 			}
2430 		}
2431 	}
2432 
2433 	*nelements = cnt;
2434 	*(int64_t **)data = intp;
2435 
2436 	return (DDI_PROP_SUCCESS);
2437 }
2438 
2439 /*
2440  * Encode an array of integers property (Can be one element)
2441  */
2442 int
2443 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2444 {
2445 	int	i;
2446 	int	*tmp;
2447 	int	cnt;
2448 	int	size;
2449 
2450 	/*
2451 	 * If there is no data, we cannot do anything
2452 	 */
2453 	if (nelements == 0)
2454 		return (DDI_PROP_CANNOT_ENCODE);
2455 
2456 	/*
2457 	 * Get the size of an encoded int.
2458 	 */
2459 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2460 
2461 	if (size < DDI_PROP_RESULT_OK) {
2462 		switch (size) {
2463 		case DDI_PROP_RESULT_EOF:
2464 			return (DDI_PROP_END_OF_DATA);
2465 
2466 		case DDI_PROP_RESULT_ERROR:
2467 			return (DDI_PROP_CANNOT_ENCODE);
2468 		}
2469 	}
2470 
2471 	/*
2472 	 * Allocate space in the handle to store the encoded int.
2473 	 */
2474 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2475 		DDI_PROP_SUCCESS)
2476 		return (DDI_PROP_NO_MEMORY);
2477 
2478 	/*
2479 	 * Encode the array of ints.
2480 	 */
2481 	tmp = (int *)data;
2482 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2483 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2484 		if (i < DDI_PROP_RESULT_OK) {
2485 			switch (i) {
2486 			case DDI_PROP_RESULT_EOF:
2487 				return (DDI_PROP_END_OF_DATA);
2488 
2489 			case DDI_PROP_RESULT_ERROR:
2490 				return (DDI_PROP_CANNOT_ENCODE);
2491 			}
2492 		}
2493 	}
2494 
2495 	return (DDI_PROP_SUCCESS);
2496 }
2497 
2498 
2499 /*
2500  * Encode a 64 bit integer array property
2501  */
2502 int
2503 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2504 {
2505 	int i;
2506 	int cnt;
2507 	int size;
2508 	int64_t *tmp;
2509 
2510 	/*
2511 	 * If there is no data, we cannot do anything
2512 	 */
2513 	if (nelements == 0)
2514 		return (DDI_PROP_CANNOT_ENCODE);
2515 
2516 	/*
2517 	 * Get the size of an encoded 64 bit int.
2518 	 */
2519 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2520 
2521 	if (size < DDI_PROP_RESULT_OK) {
2522 		switch (size) {
2523 		case DDI_PROP_RESULT_EOF:
2524 			return (DDI_PROP_END_OF_DATA);
2525 
2526 		case DDI_PROP_RESULT_ERROR:
2527 			return (DDI_PROP_CANNOT_ENCODE);
2528 		}
2529 	}
2530 
2531 	/*
2532 	 * Allocate space in the handle to store the encoded int.
2533 	 */
2534 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2535 	    DDI_PROP_SUCCESS)
2536 		return (DDI_PROP_NO_MEMORY);
2537 
2538 	/*
2539 	 * Encode the array of ints.
2540 	 */
2541 	tmp = (int64_t *)data;
2542 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2543 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2544 		if (i < DDI_PROP_RESULT_OK) {
2545 			switch (i) {
2546 			case DDI_PROP_RESULT_EOF:
2547 				return (DDI_PROP_END_OF_DATA);
2548 
2549 			case DDI_PROP_RESULT_ERROR:
2550 				return (DDI_PROP_CANNOT_ENCODE);
2551 			}
2552 		}
2553 	}
2554 
2555 	return (DDI_PROP_SUCCESS);
2556 }
2557 
2558 /*
2559  * Decode a single string property
2560  */
2561 static int
2562 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2563 {
2564 	char		*tmp;
2565 	char		*str;
2566 	int		i;
2567 	int		size;
2568 
2569 	/*
2570 	 * If there is nothing to decode return an error
2571 	 */
2572 	if (ph->ph_size == 0)
2573 		return (DDI_PROP_END_OF_DATA);
2574 
2575 	/*
2576 	 * Get the decoded size of the encoded string.
2577 	 */
2578 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2579 	if (size < DDI_PROP_RESULT_OK) {
2580 		switch (size) {
2581 		case DDI_PROP_RESULT_EOF:
2582 			return (DDI_PROP_END_OF_DATA);
2583 
2584 		case DDI_PROP_RESULT_ERROR:
2585 			return (DDI_PROP_CANNOT_DECODE);
2586 		}
2587 	}
2588 
2589 	/*
2590 	 * Allocated memory to store the decoded value in.
2591 	 */
2592 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2593 
2594 	ddi_prop_reset_pos(ph);
2595 
2596 	/*
2597 	 * Decode the str and place it in the space we just allocated
2598 	 */
2599 	tmp = str;
2600 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2601 	if (i < DDI_PROP_RESULT_OK) {
2602 		/*
2603 		 * Free the space we just allocated
2604 		 * and return an error.
2605 		 */
2606 		ddi_prop_free(str);
2607 		switch (i) {
2608 		case DDI_PROP_RESULT_EOF:
2609 			return (DDI_PROP_END_OF_DATA);
2610 
2611 		case DDI_PROP_RESULT_ERROR:
2612 			return (DDI_PROP_CANNOT_DECODE);
2613 		}
2614 	}
2615 
2616 	*(char **)data = str;
2617 	*nelements = 1;
2618 
2619 	return (DDI_PROP_SUCCESS);
2620 }
2621 
2622 /*
2623  * Decode an array of strings.
2624  */
2625 int
2626 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2627 {
2628 	int		cnt = 0;
2629 	char		**strs;
2630 	char		**tmp;
2631 	char		*ptr;
2632 	int		i;
2633 	int		n;
2634 	int		size;
2635 	size_t		nbytes;
2636 
2637 	/*
2638 	 * Figure out how many array elements there are by going through the
2639 	 * data without decoding it first and counting.
2640 	 */
2641 	for (;;) {
2642 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2643 		if (i < 0)
2644 			break;
2645 		cnt++;
2646 	}
2647 
2648 	/*
2649 	 * If there are no elements return an error
2650 	 */
2651 	if (cnt == 0)
2652 		return (DDI_PROP_END_OF_DATA);
2653 
2654 	/*
2655 	 * If we cannot skip through the data, we cannot decode it
2656 	 */
2657 	if (i == DDI_PROP_RESULT_ERROR)
2658 		return (DDI_PROP_CANNOT_DECODE);
2659 
2660 	/*
2661 	 * Reset the data pointer to the beginning of the encoded data
2662 	 */
2663 	ddi_prop_reset_pos(ph);
2664 
2665 	/*
2666 	 * Figure out how much memory we need for the sum total
2667 	 */
2668 	nbytes = (cnt + 1) * sizeof (char *);
2669 
2670 	for (n = 0; n < cnt; n++) {
2671 		/*
2672 		 * Get the decoded size of the current encoded string.
2673 		 */
2674 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2675 		if (size < DDI_PROP_RESULT_OK) {
2676 			switch (size) {
2677 			case DDI_PROP_RESULT_EOF:
2678 				return (DDI_PROP_END_OF_DATA);
2679 
2680 			case DDI_PROP_RESULT_ERROR:
2681 				return (DDI_PROP_CANNOT_DECODE);
2682 			}
2683 		}
2684 
2685 		nbytes += size;
2686 	}
2687 
2688 	/*
2689 	 * Allocate memory in which to store the decoded strings.
2690 	 */
2691 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2692 
2693 	/*
2694 	 * Set up pointers for each string by figuring out yet
2695 	 * again how long each string is.
2696 	 */
2697 	ddi_prop_reset_pos(ph);
2698 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2699 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2700 		/*
2701 		 * Get the decoded size of the current encoded string.
2702 		 */
2703 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2704 		if (size < DDI_PROP_RESULT_OK) {
2705 			ddi_prop_free(strs);
2706 			switch (size) {
2707 			case DDI_PROP_RESULT_EOF:
2708 				return (DDI_PROP_END_OF_DATA);
2709 
2710 			case DDI_PROP_RESULT_ERROR:
2711 				return (DDI_PROP_CANNOT_DECODE);
2712 			}
2713 		}
2714 
2715 		*tmp = ptr;
2716 		ptr += size;
2717 	}
2718 
2719 	/*
2720 	 * String array is terminated by a NULL
2721 	 */
2722 	*tmp = NULL;
2723 
2724 	/*
2725 	 * Finally, we can decode each string
2726 	 */
2727 	ddi_prop_reset_pos(ph);
2728 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2729 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2730 		if (i < DDI_PROP_RESULT_OK) {
2731 			/*
2732 			 * Free the space we just allocated
2733 			 * and return an error
2734 			 */
2735 			ddi_prop_free(strs);
2736 			switch (i) {
2737 			case DDI_PROP_RESULT_EOF:
2738 				return (DDI_PROP_END_OF_DATA);
2739 
2740 			case DDI_PROP_RESULT_ERROR:
2741 				return (DDI_PROP_CANNOT_DECODE);
2742 			}
2743 		}
2744 	}
2745 
2746 	*(char ***)data = strs;
2747 	*nelements = cnt;
2748 
2749 	return (DDI_PROP_SUCCESS);
2750 }
2751 
2752 /*
2753  * Encode a string.
2754  */
2755 int
2756 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2757 {
2758 	char		**tmp;
2759 	int		size;
2760 	int		i;
2761 
2762 	/*
2763 	 * If there is no data, we cannot do anything
2764 	 */
2765 	if (nelements == 0)
2766 		return (DDI_PROP_CANNOT_ENCODE);
2767 
2768 	/*
2769 	 * Get the size of the encoded string.
2770 	 */
2771 	tmp = (char **)data;
2772 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2773 	if (size < DDI_PROP_RESULT_OK) {
2774 		switch (size) {
2775 		case DDI_PROP_RESULT_EOF:
2776 			return (DDI_PROP_END_OF_DATA);
2777 
2778 		case DDI_PROP_RESULT_ERROR:
2779 			return (DDI_PROP_CANNOT_ENCODE);
2780 		}
2781 	}
2782 
2783 	/*
2784 	 * Allocate space in the handle to store the encoded string.
2785 	 */
2786 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2787 		return (DDI_PROP_NO_MEMORY);
2788 
2789 	ddi_prop_reset_pos(ph);
2790 
2791 	/*
2792 	 * Encode the string.
2793 	 */
2794 	tmp = (char **)data;
2795 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2796 	if (i < DDI_PROP_RESULT_OK) {
2797 		switch (i) {
2798 		case DDI_PROP_RESULT_EOF:
2799 			return (DDI_PROP_END_OF_DATA);
2800 
2801 		case DDI_PROP_RESULT_ERROR:
2802 			return (DDI_PROP_CANNOT_ENCODE);
2803 		}
2804 	}
2805 
2806 	return (DDI_PROP_SUCCESS);
2807 }
2808 
2809 
2810 /*
2811  * Encode an array of strings.
2812  */
2813 int
2814 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2815 {
2816 	int		cnt = 0;
2817 	char		**tmp;
2818 	int		size;
2819 	uint_t		total_size;
2820 	int		i;
2821 
2822 	/*
2823 	 * If there is no data, we cannot do anything
2824 	 */
2825 	if (nelements == 0)
2826 		return (DDI_PROP_CANNOT_ENCODE);
2827 
2828 	/*
2829 	 * Get the total size required to encode all the strings.
2830 	 */
2831 	total_size = 0;
2832 	tmp = (char **)data;
2833 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2834 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2835 		if (size < DDI_PROP_RESULT_OK) {
2836 			switch (size) {
2837 			case DDI_PROP_RESULT_EOF:
2838 				return (DDI_PROP_END_OF_DATA);
2839 
2840 			case DDI_PROP_RESULT_ERROR:
2841 				return (DDI_PROP_CANNOT_ENCODE);
2842 			}
2843 		}
2844 		total_size += (uint_t)size;
2845 	}
2846 
2847 	/*
2848 	 * Allocate space in the handle to store the encoded strings.
2849 	 */
2850 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2851 		return (DDI_PROP_NO_MEMORY);
2852 
2853 	ddi_prop_reset_pos(ph);
2854 
2855 	/*
2856 	 * Encode the array of strings.
2857 	 */
2858 	tmp = (char **)data;
2859 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2860 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2861 		if (i < DDI_PROP_RESULT_OK) {
2862 			switch (i) {
2863 			case DDI_PROP_RESULT_EOF:
2864 				return (DDI_PROP_END_OF_DATA);
2865 
2866 			case DDI_PROP_RESULT_ERROR:
2867 				return (DDI_PROP_CANNOT_ENCODE);
2868 			}
2869 		}
2870 	}
2871 
2872 	return (DDI_PROP_SUCCESS);
2873 }
2874 
2875 
2876 /*
2877  * Decode an array of bytes.
2878  */
2879 static int
2880 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2881 {
2882 	uchar_t		*tmp;
2883 	int		nbytes;
2884 	int		i;
2885 
2886 	/*
2887 	 * If there are no elements return an error
2888 	 */
2889 	if (ph->ph_size == 0)
2890 		return (DDI_PROP_END_OF_DATA);
2891 
2892 	/*
2893 	 * Get the size of the encoded array of bytes.
2894 	 */
2895 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2896 		data, ph->ph_size);
2897 	if (nbytes < DDI_PROP_RESULT_OK) {
2898 		switch (nbytes) {
2899 		case DDI_PROP_RESULT_EOF:
2900 			return (DDI_PROP_END_OF_DATA);
2901 
2902 		case DDI_PROP_RESULT_ERROR:
2903 			return (DDI_PROP_CANNOT_DECODE);
2904 		}
2905 	}
2906 
2907 	/*
2908 	 * Allocated memory to store the decoded value in.
2909 	 */
2910 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2911 
2912 	/*
2913 	 * Decode each element and place it in the space we just allocated
2914 	 */
2915 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2916 	if (i < DDI_PROP_RESULT_OK) {
2917 		/*
2918 		 * Free the space we just allocated
2919 		 * and return an error
2920 		 */
2921 		ddi_prop_free(tmp);
2922 		switch (i) {
2923 		case DDI_PROP_RESULT_EOF:
2924 			return (DDI_PROP_END_OF_DATA);
2925 
2926 		case DDI_PROP_RESULT_ERROR:
2927 			return (DDI_PROP_CANNOT_DECODE);
2928 		}
2929 	}
2930 
2931 	*(uchar_t **)data = tmp;
2932 	*nelements = nbytes;
2933 
2934 	return (DDI_PROP_SUCCESS);
2935 }
2936 
2937 /*
2938  * Encode an array of bytes.
2939  */
2940 int
2941 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2942 {
2943 	int		size;
2944 	int		i;
2945 
2946 	/*
2947 	 * If there are no elements, then this is a boolean property,
2948 	 * so just create a property handle with no data and return.
2949 	 */
2950 	if (nelements == 0) {
2951 		(void) ddi_prop_encode_alloc(ph, 0);
2952 		return (DDI_PROP_SUCCESS);
2953 	}
2954 
2955 	/*
2956 	 * Get the size of the encoded array of bytes.
2957 	 */
2958 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2959 		nelements);
2960 	if (size < DDI_PROP_RESULT_OK) {
2961 		switch (size) {
2962 		case DDI_PROP_RESULT_EOF:
2963 			return (DDI_PROP_END_OF_DATA);
2964 
2965 		case DDI_PROP_RESULT_ERROR:
2966 			return (DDI_PROP_CANNOT_DECODE);
2967 		}
2968 	}
2969 
2970 	/*
2971 	 * Allocate space in the handle to store the encoded bytes.
2972 	 */
2973 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2974 		return (DDI_PROP_NO_MEMORY);
2975 
2976 	/*
2977 	 * Encode the array of bytes.
2978 	 */
2979 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2980 		nelements);
2981 	if (i < DDI_PROP_RESULT_OK) {
2982 		switch (i) {
2983 		case DDI_PROP_RESULT_EOF:
2984 			return (DDI_PROP_END_OF_DATA);
2985 
2986 		case DDI_PROP_RESULT_ERROR:
2987 			return (DDI_PROP_CANNOT_ENCODE);
2988 		}
2989 	}
2990 
2991 	return (DDI_PROP_SUCCESS);
2992 }
2993 
2994 /*
2995  * OBP 1275 integer, string and byte operators.
2996  *
2997  * DDI_PROP_CMD_DECODE:
2998  *
2999  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3000  *	DDI_PROP_RESULT_EOF:		end of data
3001  *	DDI_PROP_OK:			data was decoded
3002  *
3003  * DDI_PROP_CMD_ENCODE:
3004  *
3005  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3006  *	DDI_PROP_RESULT_EOF:		end of data
3007  *	DDI_PROP_OK:			data was encoded
3008  *
3009  * DDI_PROP_CMD_SKIP:
3010  *
3011  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3012  *	DDI_PROP_RESULT_EOF:		end of data
3013  *	DDI_PROP_OK:			data was skipped
3014  *
3015  * DDI_PROP_CMD_GET_ESIZE:
3016  *
3017  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3018  *	DDI_PROP_RESULT_EOF:		end of data
3019  *	> 0:				the encoded size
3020  *
3021  * DDI_PROP_CMD_GET_DSIZE:
3022  *
3023  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3024  *	DDI_PROP_RESULT_EOF:		end of data
3025  *	> 0:				the decoded size
3026  */
3027 
3028 /*
3029  * OBP 1275 integer operator
3030  *
3031  * OBP properties are a byte stream of data, so integers may not be
3032  * properly aligned.  Therefore we need to copy them one byte at a time.
3033  */
3034 int
3035 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3036 {
3037 	int	i;
3038 
3039 	switch (cmd) {
3040 	case DDI_PROP_CMD_DECODE:
3041 		/*
3042 		 * Check that there is encoded data
3043 		 */
3044 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3045 			return (DDI_PROP_RESULT_ERROR);
3046 		if (ph->ph_flags & PH_FROM_PROM) {
3047 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3048 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3049 				ph->ph_size - i))
3050 				return (DDI_PROP_RESULT_ERROR);
3051 		} else {
3052 			if (ph->ph_size < sizeof (int) ||
3053 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3054 				ph->ph_size - sizeof (int))))
3055 			return (DDI_PROP_RESULT_ERROR);
3056 		}
3057 
3058 		/*
3059 		 * Copy the integer, using the implementation-specific
3060 		 * copy function if the property is coming from the PROM.
3061 		 */
3062 		if (ph->ph_flags & PH_FROM_PROM) {
3063 			*data = impl_ddi_prop_int_from_prom(
3064 				(uchar_t *)ph->ph_cur_pos,
3065 				(ph->ph_size < PROP_1275_INT_SIZE) ?
3066 				ph->ph_size : PROP_1275_INT_SIZE);
3067 		} else {
3068 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3069 		}
3070 
3071 		/*
3072 		 * Move the current location to the start of the next
3073 		 * bit of undecoded data.
3074 		 */
3075 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3076 			PROP_1275_INT_SIZE;
3077 		return (DDI_PROP_RESULT_OK);
3078 
3079 	case DDI_PROP_CMD_ENCODE:
3080 		/*
3081 		 * Check that there is room to encoded the data
3082 		 */
3083 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3084 			ph->ph_size < PROP_1275_INT_SIZE ||
3085 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3086 				ph->ph_size - sizeof (int))))
3087 			return (DDI_PROP_RESULT_ERROR);
3088 
3089 		/*
3090 		 * Encode the integer into the byte stream one byte at a
3091 		 * time.
3092 		 */
3093 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3094 
3095 		/*
3096 		 * Move the current location to the start of the next bit of
3097 		 * space where we can store encoded data.
3098 		 */
3099 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3100 		return (DDI_PROP_RESULT_OK);
3101 
3102 	case DDI_PROP_CMD_SKIP:
3103 		/*
3104 		 * Check that there is encoded data
3105 		 */
3106 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3107 				ph->ph_size < PROP_1275_INT_SIZE)
3108 			return (DDI_PROP_RESULT_ERROR);
3109 
3110 
3111 		if ((caddr_t)ph->ph_cur_pos ==
3112 				(caddr_t)ph->ph_data + ph->ph_size) {
3113 			return (DDI_PROP_RESULT_EOF);
3114 		} else if ((caddr_t)ph->ph_cur_pos >
3115 				(caddr_t)ph->ph_data + ph->ph_size) {
3116 			return (DDI_PROP_RESULT_EOF);
3117 		}
3118 
3119 		/*
3120 		 * Move the current location to the start of the next bit of
3121 		 * undecoded data.
3122 		 */
3123 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3124 		return (DDI_PROP_RESULT_OK);
3125 
3126 	case DDI_PROP_CMD_GET_ESIZE:
3127 		/*
3128 		 * Return the size of an encoded integer on OBP
3129 		 */
3130 		return (PROP_1275_INT_SIZE);
3131 
3132 	case DDI_PROP_CMD_GET_DSIZE:
3133 		/*
3134 		 * Return the size of a decoded integer on the system.
3135 		 */
3136 		return (sizeof (int));
3137 
3138 	default:
3139 #ifdef DEBUG
3140 		panic("ddi_prop_1275_int: %x impossible", cmd);
3141 		/*NOTREACHED*/
3142 #else
3143 		return (DDI_PROP_RESULT_ERROR);
3144 #endif	/* DEBUG */
3145 	}
3146 }
3147 
3148 /*
3149  * 64 bit integer operator.
3150  *
3151  * This is an extension, defined by Sun, to the 1275 integer
3152  * operator.  This routine handles the encoding/decoding of
3153  * 64 bit integer properties.
3154  */
3155 int
3156 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3157 {
3158 
3159 	switch (cmd) {
3160 	case DDI_PROP_CMD_DECODE:
3161 		/*
3162 		 * Check that there is encoded data
3163 		 */
3164 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3165 			return (DDI_PROP_RESULT_ERROR);
3166 		if (ph->ph_flags & PH_FROM_PROM) {
3167 			return (DDI_PROP_RESULT_ERROR);
3168 		} else {
3169 			if (ph->ph_size < sizeof (int64_t) ||
3170 			    ((int64_t *)ph->ph_cur_pos >
3171 			    ((int64_t *)ph->ph_data +
3172 			    ph->ph_size - sizeof (int64_t))))
3173 				return (DDI_PROP_RESULT_ERROR);
3174 		}
3175 		/*
3176 		 * Copy the integer, using the implementation-specific
3177 		 * copy function if the property is coming from the PROM.
3178 		 */
3179 		if (ph->ph_flags & PH_FROM_PROM) {
3180 			return (DDI_PROP_RESULT_ERROR);
3181 		} else {
3182 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3183 		}
3184 
3185 		/*
3186 		 * Move the current location to the start of the next
3187 		 * bit of undecoded data.
3188 		 */
3189 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3190 		    sizeof (int64_t);
3191 			return (DDI_PROP_RESULT_OK);
3192 
3193 	case DDI_PROP_CMD_ENCODE:
3194 		/*
3195 		 * Check that there is room to encoded the data
3196 		 */
3197 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3198 		    ph->ph_size < sizeof (int64_t) ||
3199 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3200 		    ph->ph_size - sizeof (int64_t))))
3201 			return (DDI_PROP_RESULT_ERROR);
3202 
3203 		/*
3204 		 * Encode the integer into the byte stream one byte at a
3205 		 * time.
3206 		 */
3207 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3208 
3209 		/*
3210 		 * Move the current location to the start of the next bit of
3211 		 * space where we can store encoded data.
3212 		 */
3213 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3214 		    sizeof (int64_t);
3215 		return (DDI_PROP_RESULT_OK);
3216 
3217 	case DDI_PROP_CMD_SKIP:
3218 		/*
3219 		 * Check that there is encoded data
3220 		 */
3221 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3222 		    ph->ph_size < sizeof (int64_t))
3223 			return (DDI_PROP_RESULT_ERROR);
3224 
3225 		if ((caddr_t)ph->ph_cur_pos ==
3226 		    (caddr_t)ph->ph_data + ph->ph_size) {
3227 			return (DDI_PROP_RESULT_EOF);
3228 		} else if ((caddr_t)ph->ph_cur_pos >
3229 		    (caddr_t)ph->ph_data + ph->ph_size) {
3230 			return (DDI_PROP_RESULT_EOF);
3231 		}
3232 
3233 		/*
3234 		 * Move the current location to the start of
3235 		 * the next bit of undecoded data.
3236 		 */
3237 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3238 		    sizeof (int64_t);
3239 			return (DDI_PROP_RESULT_OK);
3240 
3241 	case DDI_PROP_CMD_GET_ESIZE:
3242 		/*
3243 		 * Return the size of an encoded integer on OBP
3244 		 */
3245 		return (sizeof (int64_t));
3246 
3247 	case DDI_PROP_CMD_GET_DSIZE:
3248 		/*
3249 		 * Return the size of a decoded integer on the system.
3250 		 */
3251 		return (sizeof (int64_t));
3252 
3253 	default:
3254 #ifdef DEBUG
3255 		panic("ddi_prop_int64_op: %x impossible", cmd);
3256 		/*NOTREACHED*/
3257 #else
3258 		return (DDI_PROP_RESULT_ERROR);
3259 #endif  /* DEBUG */
3260 	}
3261 }
3262 
3263 /*
3264  * OBP 1275 string operator.
3265  *
3266  * OBP strings are NULL terminated.
3267  */
3268 int
3269 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3270 {
3271 	int	n;
3272 	char	*p;
3273 	char	*end;
3274 
3275 	switch (cmd) {
3276 	case DDI_PROP_CMD_DECODE:
3277 		/*
3278 		 * Check that there is encoded data
3279 		 */
3280 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3281 			return (DDI_PROP_RESULT_ERROR);
3282 		}
3283 
3284 		n = strlen((char *)ph->ph_cur_pos) + 1;
3285 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3286 				ph->ph_size - n)) {
3287 			return (DDI_PROP_RESULT_ERROR);
3288 		}
3289 
3290 		/*
3291 		 * Copy the NULL terminated string
3292 		 */
3293 		bcopy(ph->ph_cur_pos, data, n);
3294 
3295 		/*
3296 		 * Move the current location to the start of the next bit of
3297 		 * undecoded data.
3298 		 */
3299 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3300 		return (DDI_PROP_RESULT_OK);
3301 
3302 	case DDI_PROP_CMD_ENCODE:
3303 		/*
3304 		 * Check that there is room to encoded the data
3305 		 */
3306 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3307 			return (DDI_PROP_RESULT_ERROR);
3308 		}
3309 
3310 		n = strlen(data) + 1;
3311 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3312 				ph->ph_size - n)) {
3313 			return (DDI_PROP_RESULT_ERROR);
3314 		}
3315 
3316 		/*
3317 		 * Copy the NULL terminated string
3318 		 */
3319 		bcopy(data, ph->ph_cur_pos, n);
3320 
3321 		/*
3322 		 * Move the current location to the start of the next bit of
3323 		 * space where we can store encoded data.
3324 		 */
3325 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3326 		return (DDI_PROP_RESULT_OK);
3327 
3328 	case DDI_PROP_CMD_SKIP:
3329 		/*
3330 		 * Check that there is encoded data
3331 		 */
3332 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3333 			return (DDI_PROP_RESULT_ERROR);
3334 		}
3335 
3336 		/*
3337 		 * Return the string length plus one for the NULL
3338 		 * We know the size of the property, we need to
3339 		 * ensure that the string is properly formatted,
3340 		 * since we may be looking up random OBP data.
3341 		 */
3342 		p = (char *)ph->ph_cur_pos;
3343 		end = (char *)ph->ph_data + ph->ph_size;
3344 
3345 		if (p == end) {
3346 			return (DDI_PROP_RESULT_EOF);
3347 		}
3348 
3349 		for (n = 0; p < end; n++) {
3350 			if (*p++ == 0) {
3351 				ph->ph_cur_pos = p;
3352 				return (DDI_PROP_RESULT_OK);
3353 			}
3354 		}
3355 
3356 		return (DDI_PROP_RESULT_ERROR);
3357 
3358 	case DDI_PROP_CMD_GET_ESIZE:
3359 		/*
3360 		 * Return the size of the encoded string on OBP.
3361 		 */
3362 		return (strlen(data) + 1);
3363 
3364 	case DDI_PROP_CMD_GET_DSIZE:
3365 		/*
3366 		 * Return the string length plus one for the NULL
3367 		 * We know the size of the property, we need to
3368 		 * ensure that the string is properly formatted,
3369 		 * since we may be looking up random OBP data.
3370 		 */
3371 		p = (char *)ph->ph_cur_pos;
3372 		end = (char *)ph->ph_data + ph->ph_size;
3373 		for (n = 0; p < end; n++) {
3374 			if (*p++ == 0) {
3375 				ph->ph_cur_pos = p;
3376 				return (n+1);
3377 			}
3378 		}
3379 		return (DDI_PROP_RESULT_ERROR);
3380 
3381 	default:
3382 #ifdef DEBUG
3383 		panic("ddi_prop_1275_string: %x impossible", cmd);
3384 		/*NOTREACHED*/
3385 #else
3386 		return (DDI_PROP_RESULT_ERROR);
3387 #endif	/* DEBUG */
3388 	}
3389 }
3390 
3391 /*
3392  * OBP 1275 byte operator
3393  *
3394  * Caller must specify the number of bytes to get.  OBP encodes bytes
3395  * as a byte so there is a 1-to-1 translation.
3396  */
3397 int
3398 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3399 	uint_t nelements)
3400 {
3401 	switch (cmd) {
3402 	case DDI_PROP_CMD_DECODE:
3403 		/*
3404 		 * Check that there is encoded data
3405 		 */
3406 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3407 			ph->ph_size < nelements ||
3408 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3409 				ph->ph_size - nelements)))
3410 			return (DDI_PROP_RESULT_ERROR);
3411 
3412 		/*
3413 		 * Copy out the bytes
3414 		 */
3415 		bcopy(ph->ph_cur_pos, data, nelements);
3416 
3417 		/*
3418 		 * Move the current location
3419 		 */
3420 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3421 		return (DDI_PROP_RESULT_OK);
3422 
3423 	case DDI_PROP_CMD_ENCODE:
3424 		/*
3425 		 * Check that there is room to encode the data
3426 		 */
3427 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3428 			ph->ph_size < nelements ||
3429 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3430 				ph->ph_size - nelements)))
3431 			return (DDI_PROP_RESULT_ERROR);
3432 
3433 		/*
3434 		 * Copy in the bytes
3435 		 */
3436 		bcopy(data, ph->ph_cur_pos, nelements);
3437 
3438 		/*
3439 		 * Move the current location to the start of the next bit of
3440 		 * space where we can store encoded data.
3441 		 */
3442 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3443 		return (DDI_PROP_RESULT_OK);
3444 
3445 	case DDI_PROP_CMD_SKIP:
3446 		/*
3447 		 * Check that there is encoded data
3448 		 */
3449 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3450 				ph->ph_size < nelements)
3451 			return (DDI_PROP_RESULT_ERROR);
3452 
3453 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3454 				ph->ph_size - nelements))
3455 			return (DDI_PROP_RESULT_EOF);
3456 
3457 		/*
3458 		 * Move the current location
3459 		 */
3460 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3461 		return (DDI_PROP_RESULT_OK);
3462 
3463 	case DDI_PROP_CMD_GET_ESIZE:
3464 		/*
3465 		 * The size in bytes of the encoded size is the
3466 		 * same as the decoded size provided by the caller.
3467 		 */
3468 		return (nelements);
3469 
3470 	case DDI_PROP_CMD_GET_DSIZE:
3471 		/*
3472 		 * Just return the number of bytes specified by the caller.
3473 		 */
3474 		return (nelements);
3475 
3476 	default:
3477 #ifdef DEBUG
3478 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3479 		/*NOTREACHED*/
3480 #else
3481 		return (DDI_PROP_RESULT_ERROR);
3482 #endif	/* DEBUG */
3483 	}
3484 }
3485 
3486 /*
3487  * Used for properties that come from the OBP, hardware configuration files,
3488  * or that are created by calls to ddi_prop_update(9F).
3489  */
3490 static struct prop_handle_ops prop_1275_ops = {
3491 	ddi_prop_1275_int,
3492 	ddi_prop_1275_string,
3493 	ddi_prop_1275_bytes,
3494 	ddi_prop_int64_op
3495 };
3496 
3497 
3498 /*
3499  * Interface to create/modify a managed property on child's behalf...
3500  * Flags interpreted are:
3501  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3502  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3503  *
3504  * Use same dev_t when modifying or undefining a property.
3505  * Search for properties with DDI_DEV_T_ANY to match first named
3506  * property on the list.
3507  *
3508  * Properties are stored LIFO and subsequently will match the first
3509  * `matching' instance.
3510  */
3511 
3512 /*
3513  * ddi_prop_add:	Add a software defined property
3514  */
3515 
3516 /*
3517  * define to get a new ddi_prop_t.
3518  * km_flags are KM_SLEEP or KM_NOSLEEP.
3519  */
3520 
3521 #define	DDI_NEW_PROP_T(km_flags)	\
3522 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3523 
3524 static int
3525 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3526     char *name, caddr_t value, int length)
3527 {
3528 	ddi_prop_t	*new_propp, *propp;
3529 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3530 	int		km_flags = KM_NOSLEEP;
3531 	int		name_buf_len;
3532 
3533 	/*
3534 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3535 	 */
3536 
3537 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3538 		return (DDI_PROP_INVAL_ARG);
3539 
3540 	if (flags & DDI_PROP_CANSLEEP)
3541 		km_flags = KM_SLEEP;
3542 
3543 	if (flags & DDI_PROP_SYSTEM_DEF)
3544 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3545 	else if (flags & DDI_PROP_HW_DEF)
3546 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3547 
3548 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3549 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3550 		return (DDI_PROP_NO_MEMORY);
3551 	}
3552 
3553 	/*
3554 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3555 	 * to get the real major number for the device.  This needs to be
3556 	 * done because some drivers need to call ddi_prop_create in their
3557 	 * attach routines but they don't have a dev.  By creating the dev
3558 	 * ourself if the major number is 0, drivers will not have to know what
3559 	 * their major number.	They can just create a dev with major number
3560 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3561 	 * work by recreating the same dev that we already have, but its the
3562 	 * price you pay :-).
3563 	 *
3564 	 * This fixes bug #1098060.
3565 	 */
3566 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3567 		new_propp->prop_dev =
3568 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3569 		    getminor(dev));
3570 	} else
3571 		new_propp->prop_dev = dev;
3572 
3573 	/*
3574 	 * Allocate space for property name and copy it in...
3575 	 */
3576 
3577 	name_buf_len = strlen(name) + 1;
3578 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3579 	if (new_propp->prop_name == 0)	{
3580 		kmem_free(new_propp, sizeof (ddi_prop_t));
3581 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3582 		return (DDI_PROP_NO_MEMORY);
3583 	}
3584 	bcopy(name, new_propp->prop_name, name_buf_len);
3585 
3586 	/*
3587 	 * Set the property type
3588 	 */
3589 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3590 
3591 	/*
3592 	 * Set length and value ONLY if not an explicit property undefine:
3593 	 * NOTE: value and length are zero for explicit undefines.
3594 	 */
3595 
3596 	if (flags & DDI_PROP_UNDEF_IT) {
3597 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3598 	} else {
3599 		if ((new_propp->prop_len = length) != 0) {
3600 			new_propp->prop_val = kmem_alloc(length, km_flags);
3601 			if (new_propp->prop_val == 0)  {
3602 				kmem_free(new_propp->prop_name, name_buf_len);
3603 				kmem_free(new_propp, sizeof (ddi_prop_t));
3604 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3605 				return (DDI_PROP_NO_MEMORY);
3606 			}
3607 			bcopy(value, new_propp->prop_val, length);
3608 		}
3609 	}
3610 
3611 	/*
3612 	 * Link property into beginning of list. (Properties are LIFO order.)
3613 	 */
3614 
3615 	mutex_enter(&(DEVI(dip)->devi_lock));
3616 	propp = *list_head;
3617 	new_propp->prop_next = propp;
3618 	*list_head = new_propp;
3619 	mutex_exit(&(DEVI(dip)->devi_lock));
3620 	return (DDI_PROP_SUCCESS);
3621 }
3622 
3623 
3624 /*
3625  * ddi_prop_change:	Modify a software managed property value
3626  *
3627  *			Set new length and value if found.
3628  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3629  *			input name is the NULL string.
3630  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3631  *
3632  *			Note: an undef can be modified to be a define,
3633  *			(you can't go the other way.)
3634  */
3635 
3636 static int
3637 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3638     char *name, caddr_t value, int length)
3639 {
3640 	ddi_prop_t	*propp;
3641 	ddi_prop_t	**ppropp;
3642 	caddr_t		p = NULL;
3643 
3644 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3645 		return (DDI_PROP_INVAL_ARG);
3646 
3647 	/*
3648 	 * Preallocate buffer, even if we don't need it...
3649 	 */
3650 	if (length != 0)  {
3651 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3652 		    KM_SLEEP : KM_NOSLEEP);
3653 		if (p == NULL)	{
3654 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3655 			return (DDI_PROP_NO_MEMORY);
3656 		}
3657 	}
3658 
3659 	/*
3660 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3661 	 * number, a real dev_t value should be created based upon the dip's
3662 	 * binding driver.  See ddi_prop_add...
3663 	 */
3664 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3665 		dev = makedevice(
3666 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3667 		    getminor(dev));
3668 
3669 	/*
3670 	 * Check to see if the property exists.  If so we modify it.
3671 	 * Else we create it by calling ddi_prop_add().
3672 	 */
3673 	mutex_enter(&(DEVI(dip)->devi_lock));
3674 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3675 	if (flags & DDI_PROP_SYSTEM_DEF)
3676 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3677 	else if (flags & DDI_PROP_HW_DEF)
3678 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3679 
3680 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3681 		/*
3682 		 * Need to reallocate buffer?  If so, do it
3683 		 * carefully (reuse same space if new prop
3684 		 * is same size and non-NULL sized).
3685 		 */
3686 		if (length != 0)
3687 			bcopy(value, p, length);
3688 
3689 		if (propp->prop_len != 0)
3690 			kmem_free(propp->prop_val, propp->prop_len);
3691 
3692 		propp->prop_len = length;
3693 		propp->prop_val = p;
3694 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3695 		mutex_exit(&(DEVI(dip)->devi_lock));
3696 		return (DDI_PROP_SUCCESS);
3697 	}
3698 
3699 	mutex_exit(&(DEVI(dip)->devi_lock));
3700 	if (length != 0)
3701 		kmem_free(p, length);
3702 
3703 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3704 }
3705 
3706 /*
3707  * Common update routine used to update and encode a property.	Creates
3708  * a property handle, calls the property encode routine, figures out if
3709  * the property already exists and updates if it does.	Otherwise it
3710  * creates if it does not exist.
3711  */
3712 int
3713 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3714     char *name, void *data, uint_t nelements,
3715     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3716 {
3717 	prop_handle_t	ph;
3718 	int		rval;
3719 	uint_t		ourflags;
3720 
3721 	/*
3722 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3723 	 * return error.
3724 	 */
3725 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3726 		return (DDI_PROP_INVAL_ARG);
3727 
3728 	/*
3729 	 * Create the handle
3730 	 */
3731 	ph.ph_data = NULL;
3732 	ph.ph_cur_pos = NULL;
3733 	ph.ph_save_pos = NULL;
3734 	ph.ph_size = 0;
3735 	ph.ph_ops = &prop_1275_ops;
3736 
3737 	/*
3738 	 * ourflags:
3739 	 * For compatibility with the old interfaces.  The old interfaces
3740 	 * didn't sleep by default and slept when the flag was set.  These
3741 	 * interfaces to the opposite.	So the old interfaces now set the
3742 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3743 	 *
3744 	 * ph.ph_flags:
3745 	 * Blocked data or unblocked data allocation
3746 	 * for ph.ph_data in ddi_prop_encode_alloc()
3747 	 */
3748 	if (flags & DDI_PROP_DONTSLEEP) {
3749 		ourflags = flags;
3750 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3751 	} else {
3752 		ourflags = flags | DDI_PROP_CANSLEEP;
3753 		ph.ph_flags = DDI_PROP_CANSLEEP;
3754 	}
3755 
3756 	/*
3757 	 * Encode the data and store it in the property handle by
3758 	 * calling the prop_encode routine.
3759 	 */
3760 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3761 	    DDI_PROP_SUCCESS) {
3762 		if (rval == DDI_PROP_NO_MEMORY)
3763 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3764 		if (ph.ph_size != 0)
3765 			kmem_free(ph.ph_data, ph.ph_size);
3766 		return (rval);
3767 	}
3768 
3769 	/*
3770 	 * The old interfaces use a stacking approach to creating
3771 	 * properties.	If we are being called from the old interfaces,
3772 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3773 	 * create without checking.
3774 	 */
3775 	if (flags & DDI_PROP_STACK_CREATE) {
3776 		rval = ddi_prop_add(match_dev, dip,
3777 		    ourflags, name, ph.ph_data, ph.ph_size);
3778 	} else {
3779 		rval = ddi_prop_change(match_dev, dip,
3780 		    ourflags, name, ph.ph_data, ph.ph_size);
3781 	}
3782 
3783 	/*
3784 	 * Free the encoded data allocated in the prop_encode routine.
3785 	 */
3786 	if (ph.ph_size != 0)
3787 		kmem_free(ph.ph_data, ph.ph_size);
3788 
3789 	return (rval);
3790 }
3791 
3792 
3793 /*
3794  * ddi_prop_create:	Define a managed property:
3795  *			See above for details.
3796  */
3797 
3798 int
3799 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3800     char *name, caddr_t value, int length)
3801 {
3802 	if (!(flag & DDI_PROP_CANSLEEP)) {
3803 		flag |= DDI_PROP_DONTSLEEP;
3804 #ifdef DDI_PROP_DEBUG
3805 		if (length != 0)
3806 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3807 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3808 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3809 #endif /* DDI_PROP_DEBUG */
3810 	}
3811 	flag &= ~DDI_PROP_SYSTEM_DEF;
3812 	return (ddi_prop_update_common(dev, dip,
3813 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name,
3814 	    value, length, ddi_prop_fm_encode_bytes));
3815 }
3816 
3817 int
3818 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3819     char *name, caddr_t value, int length)
3820 {
3821 	if (!(flag & DDI_PROP_CANSLEEP))
3822 		flag |= DDI_PROP_DONTSLEEP;
3823 	return (ddi_prop_update_common(dev, dip,
3824 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
3825 	    DDI_PROP_TYPE_ANY),
3826 	    name, value, length, ddi_prop_fm_encode_bytes));
3827 }
3828 
3829 int
3830 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3831     char *name, caddr_t value, int length)
3832 {
3833 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3834 
3835 	/*
3836 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3837 	 * return error.
3838 	 */
3839 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3840 		return (DDI_PROP_INVAL_ARG);
3841 
3842 	if (!(flag & DDI_PROP_CANSLEEP))
3843 		flag |= DDI_PROP_DONTSLEEP;
3844 	flag &= ~DDI_PROP_SYSTEM_DEF;
3845 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3846 		return (DDI_PROP_NOT_FOUND);
3847 
3848 	return (ddi_prop_update_common(dev, dip,
3849 	    (flag | DDI_PROP_TYPE_BYTE), name,
3850 	    value, length, ddi_prop_fm_encode_bytes));
3851 }
3852 
3853 int
3854 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3855     char *name, caddr_t value, int length)
3856 {
3857 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3858 
3859 	/*
3860 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3861 	 * return error.
3862 	 */
3863 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3864 		return (DDI_PROP_INVAL_ARG);
3865 
3866 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3867 		return (DDI_PROP_NOT_FOUND);
3868 
3869 	if (!(flag & DDI_PROP_CANSLEEP))
3870 		flag |= DDI_PROP_DONTSLEEP;
3871 	return (ddi_prop_update_common(dev, dip,
3872 		(flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3873 		name, value, length, ddi_prop_fm_encode_bytes));
3874 }
3875 
3876 
3877 /*
3878  * Common lookup routine used to lookup and decode a property.
3879  * Creates a property handle, searches for the raw encoded data,
3880  * fills in the handle, and calls the property decode functions
3881  * passed in.
3882  *
3883  * This routine is not static because ddi_bus_prop_op() which lives in
3884  * ddi_impl.c calls it.  No driver should be calling this routine.
3885  */
3886 int
3887 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3888     uint_t flags, char *name, void *data, uint_t *nelements,
3889     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3890 {
3891 	int		rval;
3892 	uint_t		ourflags;
3893 	prop_handle_t	ph;
3894 
3895 	if ((match_dev == DDI_DEV_T_NONE) ||
3896 	    (name == NULL) || (strlen(name) == 0))
3897 		return (DDI_PROP_INVAL_ARG);
3898 
3899 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3900 		flags | DDI_PROP_CANSLEEP;
3901 
3902 	/*
3903 	 * Get the encoded data
3904 	 */
3905 	bzero(&ph, sizeof (prop_handle_t));
3906 
3907 	if (flags & DDI_UNBND_DLPI2) {
3908 		/*
3909 		 * For unbound dlpi style-2 devices, index into
3910 		 * the devnames' array and search the global
3911 		 * property list.
3912 		 */
3913 		ourflags &= ~DDI_UNBND_DLPI2;
3914 		rval = i_ddi_prop_search_global(match_dev,
3915 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3916 	} else {
3917 		rval = ddi_prop_search_common(match_dev, dip,
3918 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3919 		    &ph.ph_data, &ph.ph_size);
3920 
3921 	}
3922 
3923 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3924 		ASSERT(ph.ph_data == NULL);
3925 		ASSERT(ph.ph_size == 0);
3926 		return (rval);
3927 	}
3928 
3929 	/*
3930 	 * If the encoded data came from a OBP or software
3931 	 * use the 1275 OBP decode/encode routines.
3932 	 */
3933 	ph.ph_cur_pos = ph.ph_data;
3934 	ph.ph_save_pos = ph.ph_data;
3935 	ph.ph_ops = &prop_1275_ops;
3936 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3937 
3938 	rval = (*prop_decoder)(&ph, data, nelements);
3939 
3940 	/*
3941 	 * Free the encoded data
3942 	 */
3943 	if (ph.ph_size != 0)
3944 		kmem_free(ph.ph_data, ph.ph_size);
3945 
3946 	return (rval);
3947 }
3948 
3949 /*
3950  * Lookup and return an array of composite properties.  The driver must
3951  * provide the decode routine.
3952  */
3953 int
3954 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3955     uint_t flags, char *name, void *data, uint_t *nelements,
3956     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3957 {
3958 	return (ddi_prop_lookup_common(match_dev, dip,
3959 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3960 	    data, nelements, prop_decoder));
3961 }
3962 
3963 /*
3964  * Return 1 if a property exists (no type checking done).
3965  * Return 0 if it does not exist.
3966  */
3967 int
3968 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3969 {
3970 	int	i;
3971 	uint_t	x = 0;
3972 
3973 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3974 		flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3975 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3976 }
3977 
3978 
3979 /*
3980  * Update an array of composite properties.  The driver must
3981  * provide the encode routine.
3982  */
3983 int
3984 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3985     char *name, void *data, uint_t nelements,
3986     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3987 {
3988 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3989 	    name, data, nelements, prop_create));
3990 }
3991 
3992 /*
3993  * Get a single integer or boolean property and return it.
3994  * If the property does not exists, or cannot be decoded,
3995  * then return the defvalue passed in.
3996  *
3997  * This routine always succeeds.
3998  */
3999 int
4000 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4001     char *name, int defvalue)
4002 {
4003 	int	data;
4004 	uint_t	nelements;
4005 	int	rval;
4006 
4007 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4008 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4009 #ifdef DEBUG
4010 		if (dip != NULL) {
4011 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4012 			    " 0x%x (prop = %s, node = %s%d)", flags,
4013 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4014 		}
4015 #endif /* DEBUG */
4016 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4017 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4018 	}
4019 
4020 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4021 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4022 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4023 		if (rval == DDI_PROP_END_OF_DATA)
4024 			data = 1;
4025 		else
4026 			data = defvalue;
4027 	}
4028 	return (data);
4029 }
4030 
4031 /*
4032  * Get a single 64 bit integer or boolean property and return it.
4033  * If the property does not exists, or cannot be decoded,
4034  * then return the defvalue passed in.
4035  *
4036  * This routine always succeeds.
4037  */
4038 int64_t
4039 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4040     char *name, int64_t defvalue)
4041 {
4042 	int64_t	data;
4043 	uint_t	nelements;
4044 	int	rval;
4045 
4046 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4047 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4048 #ifdef DEBUG
4049 		if (dip != NULL) {
4050 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4051 			    " 0x%x (prop = %s, node = %s%d)", flags,
4052 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4053 		}
4054 #endif /* DEBUG */
4055 		return (DDI_PROP_INVAL_ARG);
4056 	}
4057 
4058 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4059 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4060 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4061 	    != DDI_PROP_SUCCESS) {
4062 		if (rval == DDI_PROP_END_OF_DATA)
4063 			data = 1;
4064 		else
4065 			data = defvalue;
4066 	}
4067 	return (data);
4068 }
4069 
4070 /*
4071  * Get an array of integer property
4072  */
4073 int
4074 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4075     char *name, int **data, uint_t *nelements)
4076 {
4077 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4078 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4079 #ifdef DEBUG
4080 		if (dip != NULL) {
4081 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4082 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4083 			    flags, name, ddi_driver_name(dip),
4084 			    ddi_get_instance(dip));
4085 		}
4086 #endif /* DEBUG */
4087 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4088 		LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4089 	}
4090 
4091 	return (ddi_prop_lookup_common(match_dev, dip,
4092 	    (flags | DDI_PROP_TYPE_INT), name, data,
4093 	    nelements, ddi_prop_fm_decode_ints));
4094 }
4095 
4096 /*
4097  * Get an array of 64 bit integer properties
4098  */
4099 int
4100 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4101     char *name, int64_t **data, uint_t *nelements)
4102 {
4103 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4104 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4105 #ifdef DEBUG
4106 		if (dip != NULL) {
4107 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4108 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4109 			    flags, name, ddi_driver_name(dip),
4110 			    ddi_get_instance(dip));
4111 		}
4112 #endif /* DEBUG */
4113 		return (DDI_PROP_INVAL_ARG);
4114 	}
4115 
4116 	return (ddi_prop_lookup_common(match_dev, dip,
4117 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4118 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4119 }
4120 
4121 /*
4122  * Update a single integer property.  If the property exists on the drivers
4123  * property list it updates, else it creates it.
4124  */
4125 int
4126 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4127     char *name, int data)
4128 {
4129 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4130 	    name, &data, 1, ddi_prop_fm_encode_ints));
4131 }
4132 
4133 /*
4134  * Update a single 64 bit integer property.
4135  * Update the driver property list if it exists, else create it.
4136  */
4137 int
4138 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4139     char *name, int64_t data)
4140 {
4141 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4142 	    name, &data, 1, ddi_prop_fm_encode_int64));
4143 }
4144 
4145 int
4146 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4147     char *name, int data)
4148 {
4149 	return (ddi_prop_update_common(match_dev, dip,
4150 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4151 	    name, &data, 1, ddi_prop_fm_encode_ints));
4152 }
4153 
4154 int
4155 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4156     char *name, int64_t data)
4157 {
4158 	return (ddi_prop_update_common(match_dev, dip,
4159 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4160 	    name, &data, 1, ddi_prop_fm_encode_int64));
4161 }
4162 
4163 /*
4164  * Update an array of integer property.  If the property exists on the drivers
4165  * property list it updates, else it creates it.
4166  */
4167 int
4168 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4169     char *name, int *data, uint_t nelements)
4170 {
4171 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4172 	    name, data, nelements, ddi_prop_fm_encode_ints));
4173 }
4174 
4175 /*
4176  * Update an array of 64 bit integer properties.
4177  * Update the driver property list if it exists, else create it.
4178  */
4179 int
4180 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4181     char *name, int64_t *data, uint_t nelements)
4182 {
4183 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4184 	    name, data, nelements, ddi_prop_fm_encode_int64));
4185 }
4186 
4187 int
4188 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4189     char *name, int64_t *data, uint_t nelements)
4190 {
4191 	return (ddi_prop_update_common(match_dev, dip,
4192 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4193 	    name, data, nelements, ddi_prop_fm_encode_int64));
4194 }
4195 
4196 int
4197 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4198     char *name, int *data, uint_t nelements)
4199 {
4200 	return (ddi_prop_update_common(match_dev, dip,
4201 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4202 	    name, data, nelements, ddi_prop_fm_encode_ints));
4203 }
4204 
4205 /*
4206  * Get a single string property.
4207  */
4208 int
4209 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4210     char *name, char **data)
4211 {
4212 	uint_t x;
4213 
4214 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4215 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4216 #ifdef DEBUG
4217 		if (dip != NULL) {
4218 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4219 			    "(prop = %s, node = %s%d); invalid bits ignored",
4220 			    "ddi_prop_lookup_string", flags, name,
4221 			    ddi_driver_name(dip), ddi_get_instance(dip));
4222 		}
4223 #endif /* DEBUG */
4224 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4225 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4226 	}
4227 
4228 	return (ddi_prop_lookup_common(match_dev, dip,
4229 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4230 	    &x, ddi_prop_fm_decode_string));
4231 }
4232 
4233 /*
4234  * Get an array of strings property.
4235  */
4236 int
4237 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4238     char *name, char ***data, uint_t *nelements)
4239 {
4240 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4241 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4242 #ifdef DEBUG
4243 		if (dip != NULL) {
4244 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4245 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4246 			    flags, name, ddi_driver_name(dip),
4247 			    ddi_get_instance(dip));
4248 		}
4249 #endif /* DEBUG */
4250 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4251 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4252 	}
4253 
4254 	return (ddi_prop_lookup_common(match_dev, dip,
4255 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4256 	    nelements, ddi_prop_fm_decode_strings));
4257 }
4258 
4259 /*
4260  * Update a single string property.
4261  */
4262 int
4263 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4264     char *name, char *data)
4265 {
4266 	return (ddi_prop_update_common(match_dev, dip,
4267 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4268 	    ddi_prop_fm_encode_string));
4269 }
4270 
4271 int
4272 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4273     char *name, char *data)
4274 {
4275 	return (ddi_prop_update_common(match_dev, dip,
4276 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4277 	    name, &data, 1, ddi_prop_fm_encode_string));
4278 }
4279 
4280 
4281 /*
4282  * Update an array of strings property.
4283  */
4284 int
4285 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4286     char *name, char **data, uint_t nelements)
4287 {
4288 	return (ddi_prop_update_common(match_dev, dip,
4289 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4290 	    ddi_prop_fm_encode_strings));
4291 }
4292 
4293 int
4294 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4295     char *name, char **data, uint_t nelements)
4296 {
4297 	return (ddi_prop_update_common(match_dev, dip,
4298 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4299 	    name, data, nelements,
4300 	    ddi_prop_fm_encode_strings));
4301 }
4302 
4303 
4304 /*
4305  * Get an array of bytes property.
4306  */
4307 int
4308 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4309     char *name, uchar_t **data, uint_t *nelements)
4310 {
4311 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4312 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4313 #ifdef DEBUG
4314 		if (dip != NULL) {
4315 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4316 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4317 			    flags, name, ddi_driver_name(dip),
4318 			    ddi_get_instance(dip));
4319 		}
4320 #endif /* DEBUG */
4321 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4322 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4323 	}
4324 
4325 	return (ddi_prop_lookup_common(match_dev, dip,
4326 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4327 	    nelements, ddi_prop_fm_decode_bytes));
4328 }
4329 
4330 /*
4331  * Update an array of bytes property.
4332  */
4333 int
4334 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4335     char *name, uchar_t *data, uint_t nelements)
4336 {
4337 	if (nelements == 0)
4338 		return (DDI_PROP_INVAL_ARG);
4339 
4340 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4341 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4342 }
4343 
4344 
4345 int
4346 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4347     char *name, uchar_t *data, uint_t nelements)
4348 {
4349 	if (nelements == 0)
4350 		return (DDI_PROP_INVAL_ARG);
4351 
4352 	return (ddi_prop_update_common(match_dev, dip,
4353 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4354 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4355 }
4356 
4357 
4358 /*
4359  * ddi_prop_remove_common:	Undefine a managed property:
4360  *			Input dev_t must match dev_t when defined.
4361  *			Returns DDI_PROP_NOT_FOUND, possibly.
4362  *			DDI_PROP_INVAL_ARG is also possible if dev is
4363  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4364  */
4365 int
4366 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4367 {
4368 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4369 	ddi_prop_t	*propp;
4370 	ddi_prop_t	*lastpropp = NULL;
4371 
4372 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4373 	    (strlen(name) == 0)) {
4374 		return (DDI_PROP_INVAL_ARG);
4375 	}
4376 
4377 	if (flag & DDI_PROP_SYSTEM_DEF)
4378 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4379 	else if (flag & DDI_PROP_HW_DEF)
4380 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4381 
4382 	mutex_enter(&(DEVI(dip)->devi_lock));
4383 
4384 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4385 		if (DDI_STRSAME(propp->prop_name, name) &&
4386 		    (dev == propp->prop_dev)) {
4387 			/*
4388 			 * Unlink this propp allowing for it to
4389 			 * be first in the list:
4390 			 */
4391 
4392 			if (lastpropp == NULL)
4393 				*list_head = propp->prop_next;
4394 			else
4395 				lastpropp->prop_next = propp->prop_next;
4396 
4397 			mutex_exit(&(DEVI(dip)->devi_lock));
4398 
4399 			/*
4400 			 * Free memory and return...
4401 			 */
4402 			kmem_free(propp->prop_name,
4403 			    strlen(propp->prop_name) + 1);
4404 			if (propp->prop_len != 0)
4405 				kmem_free(propp->prop_val, propp->prop_len);
4406 			kmem_free(propp, sizeof (ddi_prop_t));
4407 			return (DDI_PROP_SUCCESS);
4408 		}
4409 		lastpropp = propp;
4410 	}
4411 	mutex_exit(&(DEVI(dip)->devi_lock));
4412 	return (DDI_PROP_NOT_FOUND);
4413 }
4414 
4415 int
4416 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4417 {
4418 	return (ddi_prop_remove_common(dev, dip, name, 0));
4419 }
4420 
4421 int
4422 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4423 {
4424 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4425 }
4426 
4427 /*
4428  * e_ddi_prop_list_delete: remove a list of properties
4429  *	Note that the caller needs to provide the required protection
4430  *	(eg. devi_lock if these properties are still attached to a devi)
4431  */
4432 void
4433 e_ddi_prop_list_delete(ddi_prop_t *props)
4434 {
4435 	i_ddi_prop_list_delete(props);
4436 }
4437 
4438 /*
4439  * ddi_prop_remove_all_common:
4440  *	Used before unloading a driver to remove
4441  *	all properties. (undefines all dev_t's props.)
4442  *	Also removes `explicitly undefined' props.
4443  *	No errors possible.
4444  */
4445 void
4446 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4447 {
4448 	ddi_prop_t	**list_head;
4449 
4450 	mutex_enter(&(DEVI(dip)->devi_lock));
4451 	if (flag & DDI_PROP_SYSTEM_DEF) {
4452 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4453 	} else if (flag & DDI_PROP_HW_DEF) {
4454 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4455 	} else {
4456 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4457 	}
4458 	i_ddi_prop_list_delete(*list_head);
4459 	*list_head = NULL;
4460 	mutex_exit(&(DEVI(dip)->devi_lock));
4461 }
4462 
4463 
4464 /*
4465  * ddi_prop_remove_all:		Remove all driver prop definitions.
4466  */
4467 
4468 void
4469 ddi_prop_remove_all(dev_info_t *dip)
4470 {
4471 	ddi_prop_remove_all_common(dip, 0);
4472 }
4473 
4474 /*
4475  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4476  */
4477 
4478 void
4479 e_ddi_prop_remove_all(dev_info_t *dip)
4480 {
4481 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4482 }
4483 
4484 
4485 /*
4486  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4487  *			searches which match this property return
4488  *			the error code DDI_PROP_UNDEFINED.
4489  *
4490  *			Use ddi_prop_remove to negate effect of
4491  *			ddi_prop_undefine
4492  *
4493  *			See above for error returns.
4494  */
4495 
4496 int
4497 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4498 {
4499 	if (!(flag & DDI_PROP_CANSLEEP))
4500 		flag |= DDI_PROP_DONTSLEEP;
4501 	return (ddi_prop_update_common(dev, dip,
4502 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT |
4503 	    DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes));
4504 }
4505 
4506 int
4507 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4508 {
4509 	if (!(flag & DDI_PROP_CANSLEEP))
4510 		flag |= DDI_PROP_DONTSLEEP;
4511 	return (ddi_prop_update_common(dev, dip,
4512 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4513 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY),
4514 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4515 }
4516 
4517 /*
4518  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4519  *
4520  * if input dip != child_dip, then call is on behalf of child
4521  * to search PROM, do it via ddi_prop_search_common() and ascend only
4522  * if allowed.
4523  *
4524  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4525  * to search for PROM defined props only.
4526  *
4527  * Note that the PROM search is done only if the requested dev
4528  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4529  * have no associated dev, thus are automatically associated with
4530  * DDI_DEV_T_NONE.
4531  *
4532  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4533  *
4534  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4535  * that the property resides in the prom.
4536  */
4537 int
4538 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4539     ddi_prop_op_t prop_op, int mod_flags,
4540     char *name, caddr_t valuep, int *lengthp)
4541 {
4542 	int	len;
4543 	caddr_t buffer;
4544 
4545 	/*
4546 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4547 	 * look in caller's PROM if it's a self identifying device...
4548 	 *
4549 	 * Note that this is very similar to ddi_prop_op, but we
4550 	 * search the PROM instead of the s/w defined properties,
4551 	 * and we are called on by the parent driver to do this for
4552 	 * the child.
4553 	 */
4554 
4555 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4556 	    ndi_dev_is_prom_node(ch_dip) &&
4557 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4558 		len = prom_getproplen((dnode_t)DEVI(ch_dip)->devi_nodeid, name);
4559 		if (len == -1) {
4560 			return (DDI_PROP_NOT_FOUND);
4561 		}
4562 
4563 		/*
4564 		 * If exists only request, we're done
4565 		 */
4566 		if (prop_op == PROP_EXISTS) {
4567 			return (DDI_PROP_FOUND_1275);
4568 		}
4569 
4570 		/*
4571 		 * If length only request or prop length == 0, get out
4572 		 */
4573 		if ((prop_op == PROP_LEN) || (len == 0)) {
4574 			*lengthp = len;
4575 			return (DDI_PROP_FOUND_1275);
4576 		}
4577 
4578 		/*
4579 		 * Allocate buffer if required... (either way `buffer'
4580 		 * is receiving address).
4581 		 */
4582 
4583 		switch (prop_op) {
4584 
4585 		case PROP_LEN_AND_VAL_ALLOC:
4586 
4587 			buffer = kmem_alloc((size_t)len,
4588 			    mod_flags & DDI_PROP_CANSLEEP ?
4589 			    KM_SLEEP : KM_NOSLEEP);
4590 			if (buffer == NULL) {
4591 				return (DDI_PROP_NO_MEMORY);
4592 			}
4593 			*(caddr_t *)valuep = buffer;
4594 			break;
4595 
4596 		case PROP_LEN_AND_VAL_BUF:
4597 
4598 			if (len > (*lengthp)) {
4599 				*lengthp = len;
4600 				return (DDI_PROP_BUF_TOO_SMALL);
4601 			}
4602 
4603 			buffer = valuep;
4604 			break;
4605 
4606 		default:
4607 			break;
4608 		}
4609 
4610 		/*
4611 		 * Call the PROM function to do the copy.
4612 		 */
4613 		(void) prom_getprop((dnode_t)DEVI(ch_dip)->devi_nodeid,
4614 			name, buffer);
4615 
4616 		*lengthp = len; /* return the actual length to the caller */
4617 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4618 		return (DDI_PROP_FOUND_1275);
4619 	}
4620 
4621 	return (DDI_PROP_NOT_FOUND);
4622 }
4623 
4624 /*
4625  * The ddi_bus_prop_op default bus nexus prop op function.
4626  *
4627  * Code to search hardware layer (PROM), if it exists,
4628  * on behalf of child, then, if appropriate, ascend and check
4629  * my own software defined properties...
4630  */
4631 int
4632 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4633     ddi_prop_op_t prop_op, int mod_flags,
4634     char *name, caddr_t valuep, int *lengthp)
4635 {
4636 	int	error;
4637 
4638 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4639 				    name, valuep, lengthp);
4640 
4641 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4642 	    error == DDI_PROP_BUF_TOO_SMALL)
4643 		return (error);
4644 
4645 	if (error == DDI_PROP_NO_MEMORY) {
4646 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4647 		return (DDI_PROP_NO_MEMORY);
4648 	}
4649 
4650 	/*
4651 	 * Check the 'options' node as a last resort
4652 	 */
4653 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4654 		return (DDI_PROP_NOT_FOUND);
4655 
4656 	if (ch_dip == ddi_root_node())	{
4657 		/*
4658 		 * As a last resort, when we've reached
4659 		 * the top and still haven't found the
4660 		 * property, see if the desired property
4661 		 * is attached to the options node.
4662 		 *
4663 		 * The options dip is attached right after boot.
4664 		 */
4665 		ASSERT(options_dip != NULL);
4666 		/*
4667 		 * Force the "don't pass" flag to *just* see
4668 		 * what the options node has to offer.
4669 		 */
4670 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4671 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4672 		    (uint_t *)lengthp));
4673 	}
4674 
4675 	/*
4676 	 * Otherwise, continue search with parent's s/w defined properties...
4677 	 * NOTE: Using `dip' in following call increments the level.
4678 	 */
4679 
4680 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4681 	    name, valuep, (uint_t *)lengthp));
4682 }
4683 
4684 /*
4685  * External property functions used by other parts of the kernel...
4686  */
4687 
4688 /*
4689  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4690  */
4691 
4692 int
4693 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4694     caddr_t valuep, int *lengthp)
4695 {
4696 	_NOTE(ARGUNUSED(type))
4697 	dev_info_t *devi;
4698 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4699 	int error;
4700 
4701 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4702 		return (DDI_PROP_NOT_FOUND);
4703 
4704 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4705 	ddi_release_devi(devi);
4706 	return (error);
4707 }
4708 
4709 /*
4710  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4711  */
4712 
4713 int
4714 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4715     caddr_t valuep, int *lengthp)
4716 {
4717 	_NOTE(ARGUNUSED(type))
4718 	dev_info_t *devi;
4719 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4720 	int error;
4721 
4722 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4723 		return (DDI_PROP_NOT_FOUND);
4724 
4725 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4726 	ddi_release_devi(devi);
4727 	return (error);
4728 }
4729 
4730 /*
4731  * e_ddi_getprop:	See comments for ddi_getprop.
4732  */
4733 int
4734 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4735 {
4736 	_NOTE(ARGUNUSED(type))
4737 	dev_info_t *devi;
4738 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4739 	int	propvalue = defvalue;
4740 	int	proplength = sizeof (int);
4741 	int	error;
4742 
4743 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4744 		return (defvalue);
4745 
4746 	error = cdev_prop_op(dev, devi, prop_op,
4747 	    flags, name, (caddr_t)&propvalue, &proplength);
4748 	ddi_release_devi(devi);
4749 
4750 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4751 		propvalue = 1;
4752 
4753 	return (propvalue);
4754 }
4755 
4756 /*
4757  * e_ddi_getprop_int64:
4758  *
4759  * This is a typed interfaces, but predates typed properties. With the
4760  * introduction of typed properties the framework tries to ensure
4761  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4762  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4763  * typed interface invokes legacy (non-typed) interfaces:
4764  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4765  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4766  * this type of lookup as a single operation we invoke the legacy
4767  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4768  * framework ddi_prop_op(9F) implementation is expected to check for
4769  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4770  * (currently TYPE_INT64).
4771  */
4772 int64_t
4773 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4774     int flags, int64_t defvalue)
4775 {
4776 	_NOTE(ARGUNUSED(type))
4777 	dev_info_t	*devi;
4778 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4779 	int64_t		propvalue = defvalue;
4780 	int		proplength = sizeof (propvalue);
4781 	int		error;
4782 
4783 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4784 		return (defvalue);
4785 
4786 	error = cdev_prop_op(dev, devi, prop_op, flags |
4787 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4788 	ddi_release_devi(devi);
4789 
4790 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4791 		propvalue = 1;
4792 
4793 	return (propvalue);
4794 }
4795 
4796 /*
4797  * e_ddi_getproplen:	See comments for ddi_getproplen.
4798  */
4799 int
4800 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4801 {
4802 	_NOTE(ARGUNUSED(type))
4803 	dev_info_t *devi;
4804 	ddi_prop_op_t prop_op = PROP_LEN;
4805 	int error;
4806 
4807 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4808 		return (DDI_PROP_NOT_FOUND);
4809 
4810 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4811 	ddi_release_devi(devi);
4812 	return (error);
4813 }
4814 
4815 /*
4816  * Routines to get at elements of the dev_info structure
4817  */
4818 
4819 /*
4820  * ddi_binding_name: Return the driver binding name of the devinfo node
4821  *		This is the name the OS used to bind the node to a driver.
4822  */
4823 char *
4824 ddi_binding_name(dev_info_t *dip)
4825 {
4826 	return (DEVI(dip)->devi_binding_name);
4827 }
4828 
4829 /*
4830  * ddi_driver_major: Return the major number of the driver that
4831  *		the supplied devinfo is bound to (-1 if none)
4832  */
4833 major_t
4834 ddi_driver_major(dev_info_t *devi)
4835 {
4836 	return (DEVI(devi)->devi_major);
4837 }
4838 
4839 /*
4840  * ddi_driver_name: Return the normalized driver name. this is the
4841  *		actual driver name
4842  */
4843 const char *
4844 ddi_driver_name(dev_info_t *devi)
4845 {
4846 	major_t major;
4847 
4848 	if ((major = ddi_driver_major(devi)) != (major_t)-1)
4849 		return (ddi_major_to_name(major));
4850 
4851 	return (ddi_node_name(devi));
4852 }
4853 
4854 /*
4855  * i_ddi_set_binding_name:	Set binding name.
4856  *
4857  *	Set the binding name to the given name.
4858  *	This routine is for use by the ddi implementation, not by drivers.
4859  */
4860 void
4861 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4862 {
4863 	DEVI(dip)->devi_binding_name = name;
4864 
4865 }
4866 
4867 /*
4868  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4869  * the implementation has used to bind the node to a driver.
4870  */
4871 char *
4872 ddi_get_name(dev_info_t *dip)
4873 {
4874 	return (DEVI(dip)->devi_binding_name);
4875 }
4876 
4877 /*
4878  * ddi_node_name: Return the name property of the devinfo node
4879  *		This may differ from ddi_binding_name if the node name
4880  *		does not define a binding to a driver (i.e. generic names).
4881  */
4882 char *
4883 ddi_node_name(dev_info_t *dip)
4884 {
4885 	return (DEVI(dip)->devi_node_name);
4886 }
4887 
4888 
4889 /*
4890  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4891  */
4892 int
4893 ddi_get_nodeid(dev_info_t *dip)
4894 {
4895 	return (DEVI(dip)->devi_nodeid);
4896 }
4897 
4898 int
4899 ddi_get_instance(dev_info_t *dip)
4900 {
4901 	return (DEVI(dip)->devi_instance);
4902 }
4903 
4904 struct dev_ops *
4905 ddi_get_driver(dev_info_t *dip)
4906 {
4907 	return (DEVI(dip)->devi_ops);
4908 }
4909 
4910 void
4911 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4912 {
4913 	DEVI(dip)->devi_ops = devo;
4914 }
4915 
4916 /*
4917  * ddi_set_driver_private/ddi_get_driver_private:
4918  * Get/set device driver private data in devinfo.
4919  */
4920 void
4921 ddi_set_driver_private(dev_info_t *dip, void *data)
4922 {
4923 	DEVI(dip)->devi_driver_data = data;
4924 }
4925 
4926 void *
4927 ddi_get_driver_private(dev_info_t *dip)
4928 {
4929 	return (DEVI(dip)->devi_driver_data);
4930 }
4931 
4932 /*
4933  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4934  */
4935 
4936 dev_info_t *
4937 ddi_get_parent(dev_info_t *dip)
4938 {
4939 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4940 }
4941 
4942 dev_info_t *
4943 ddi_get_child(dev_info_t *dip)
4944 {
4945 	return ((dev_info_t *)DEVI(dip)->devi_child);
4946 }
4947 
4948 dev_info_t *
4949 ddi_get_next_sibling(dev_info_t *dip)
4950 {
4951 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4952 }
4953 
4954 dev_info_t *
4955 ddi_get_next(dev_info_t *dip)
4956 {
4957 	return ((dev_info_t *)DEVI(dip)->devi_next);
4958 }
4959 
4960 void
4961 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4962 {
4963 	DEVI(dip)->devi_next = DEVI(nextdip);
4964 }
4965 
4966 /*
4967  * ddi_root_node:		Return root node of devinfo tree
4968  */
4969 
4970 dev_info_t *
4971 ddi_root_node(void)
4972 {
4973 	extern dev_info_t *top_devinfo;
4974 
4975 	return (top_devinfo);
4976 }
4977 
4978 /*
4979  * Miscellaneous functions:
4980  */
4981 
4982 /*
4983  * Implementation specific hooks
4984  */
4985 
4986 void
4987 ddi_report_dev(dev_info_t *d)
4988 {
4989 	char *b;
4990 
4991 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4992 
4993 	/*
4994 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4995 	 * userland, so we print its full name together with the instance
4996 	 * number 'abbreviation' that the driver may use internally.
4997 	 */
4998 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4999 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5000 		cmn_err(CE_CONT, "?%s%d is %s\n",
5001 		    ddi_driver_name(d), ddi_get_instance(d),
5002 		    ddi_pathname(d, b));
5003 		kmem_free(b, MAXPATHLEN);
5004 	}
5005 }
5006 
5007 /*
5008  * ddi_ctlops() is described in the assembler not to buy a new register
5009  * window when it's called and can reduce cost in climbing the device tree
5010  * without using the tail call optimization.
5011  */
5012 int
5013 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5014 {
5015 	int ret;
5016 
5017 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5018 	    (void *)&rnumber, (void *)result);
5019 
5020 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5021 }
5022 
5023 int
5024 ddi_dev_nregs(dev_info_t *dev, int *result)
5025 {
5026 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5027 }
5028 
5029 int
5030 ddi_dev_is_sid(dev_info_t *d)
5031 {
5032 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5033 }
5034 
5035 int
5036 ddi_slaveonly(dev_info_t *d)
5037 {
5038 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5039 }
5040 
5041 int
5042 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5043 {
5044 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5045 }
5046 
5047 int
5048 ddi_streams_driver(dev_info_t *dip)
5049 {
5050 	if ((i_ddi_node_state(dip) >= DS_ATTACHED) &&
5051 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5052 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5053 		return (DDI_SUCCESS);
5054 	return (DDI_FAILURE);
5055 }
5056 
5057 /*
5058  * callback free list
5059  */
5060 
5061 static int ncallbacks;
5062 static int nc_low = 170;
5063 static int nc_med = 512;
5064 static int nc_high = 2048;
5065 static struct ddi_callback *callbackq;
5066 static struct ddi_callback *callbackqfree;
5067 
5068 /*
5069  * set/run callback lists
5070  */
5071 struct	cbstats	{
5072 	kstat_named_t	cb_asked;
5073 	kstat_named_t	cb_new;
5074 	kstat_named_t	cb_run;
5075 	kstat_named_t	cb_delete;
5076 	kstat_named_t	cb_maxreq;
5077 	kstat_named_t	cb_maxlist;
5078 	kstat_named_t	cb_alloc;
5079 	kstat_named_t	cb_runouts;
5080 	kstat_named_t	cb_L2;
5081 	kstat_named_t	cb_grow;
5082 } cbstats = {
5083 	{"asked",	KSTAT_DATA_UINT32},
5084 	{"new",		KSTAT_DATA_UINT32},
5085 	{"run",		KSTAT_DATA_UINT32},
5086 	{"delete",	KSTAT_DATA_UINT32},
5087 	{"maxreq",	KSTAT_DATA_UINT32},
5088 	{"maxlist",	KSTAT_DATA_UINT32},
5089 	{"alloc",	KSTAT_DATA_UINT32},
5090 	{"runouts",	KSTAT_DATA_UINT32},
5091 	{"L2",		KSTAT_DATA_UINT32},
5092 	{"grow",	KSTAT_DATA_UINT32},
5093 };
5094 
5095 #define	nc_asked	cb_asked.value.ui32
5096 #define	nc_new		cb_new.value.ui32
5097 #define	nc_run		cb_run.value.ui32
5098 #define	nc_delete	cb_delete.value.ui32
5099 #define	nc_maxreq	cb_maxreq.value.ui32
5100 #define	nc_maxlist	cb_maxlist.value.ui32
5101 #define	nc_alloc	cb_alloc.value.ui32
5102 #define	nc_runouts	cb_runouts.value.ui32
5103 #define	nc_L2		cb_L2.value.ui32
5104 #define	nc_grow		cb_grow.value.ui32
5105 
5106 static kmutex_t ddi_callback_mutex;
5107 
5108 /*
5109  * callbacks are handled using a L1/L2 cache. The L1 cache
5110  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5111  * we can't get callbacks from the L1 cache [because pageout is doing
5112  * I/O at the time freemem is 0], we allocate callbacks out of the
5113  * L2 cache. The L2 cache is static and depends on the memory size.
5114  * [We might also count the number of devices at probe time and
5115  * allocate one structure per device and adjust for deferred attach]
5116  */
5117 void
5118 impl_ddi_callback_init(void)
5119 {
5120 	int	i;
5121 	uint_t	physmegs;
5122 	kstat_t	*ksp;
5123 
5124 	physmegs = physmem >> (20 - PAGESHIFT);
5125 	if (physmegs < 48) {
5126 		ncallbacks = nc_low;
5127 	} else if (physmegs < 128) {
5128 		ncallbacks = nc_med;
5129 	} else {
5130 		ncallbacks = nc_high;
5131 	}
5132 
5133 	/*
5134 	 * init free list
5135 	 */
5136 	callbackq = kmem_zalloc(
5137 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5138 	for (i = 0; i < ncallbacks-1; i++)
5139 		callbackq[i].c_nfree = &callbackq[i+1];
5140 	callbackqfree = callbackq;
5141 
5142 	/* init kstats */
5143 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5144 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5145 		ksp->ks_data = (void *) &cbstats;
5146 		kstat_install(ksp);
5147 	}
5148 
5149 }
5150 
5151 static void
5152 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5153 	int count)
5154 {
5155 	struct ddi_callback *list, *marker, *new;
5156 	size_t size = sizeof (struct ddi_callback);
5157 
5158 	list = marker = (struct ddi_callback *)*listid;
5159 	while (list != NULL) {
5160 		if (list->c_call == funcp && list->c_arg == arg) {
5161 			list->c_count += count;
5162 			return;
5163 		}
5164 		marker = list;
5165 		list = list->c_nlist;
5166 	}
5167 	new = kmem_alloc(size, KM_NOSLEEP);
5168 	if (new == NULL) {
5169 		new = callbackqfree;
5170 		if (new == NULL) {
5171 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5172 			    &size, KM_NOSLEEP | KM_PANIC);
5173 			cbstats.nc_grow++;
5174 		} else {
5175 			callbackqfree = new->c_nfree;
5176 			cbstats.nc_L2++;
5177 		}
5178 	}
5179 	if (marker != NULL) {
5180 		marker->c_nlist = new;
5181 	} else {
5182 		*listid = (uintptr_t)new;
5183 	}
5184 	new->c_size = size;
5185 	new->c_nlist = NULL;
5186 	new->c_call = funcp;
5187 	new->c_arg = arg;
5188 	new->c_count = count;
5189 	cbstats.nc_new++;
5190 	cbstats.nc_alloc++;
5191 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5192 		cbstats.nc_maxlist = cbstats.nc_alloc;
5193 }
5194 
5195 void
5196 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5197 {
5198 	mutex_enter(&ddi_callback_mutex);
5199 	cbstats.nc_asked++;
5200 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5201 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5202 	(void) callback_insert(funcp, arg, listid, 1);
5203 	mutex_exit(&ddi_callback_mutex);
5204 }
5205 
5206 static void
5207 real_callback_run(void *Queue)
5208 {
5209 	int (*funcp)(caddr_t);
5210 	caddr_t arg;
5211 	int count, rval;
5212 	uintptr_t *listid;
5213 	struct ddi_callback *list, *marker;
5214 	int check_pending = 1;
5215 	int pending = 0;
5216 
5217 	do {
5218 		mutex_enter(&ddi_callback_mutex);
5219 		listid = Queue;
5220 		list = (struct ddi_callback *)*listid;
5221 		if (list == NULL) {
5222 			mutex_exit(&ddi_callback_mutex);
5223 			return;
5224 		}
5225 		if (check_pending) {
5226 			marker = list;
5227 			while (marker != NULL) {
5228 				pending += marker->c_count;
5229 				marker = marker->c_nlist;
5230 			}
5231 			check_pending = 0;
5232 		}
5233 		ASSERT(pending > 0);
5234 		ASSERT(list->c_count > 0);
5235 		funcp = list->c_call;
5236 		arg = list->c_arg;
5237 		count = list->c_count;
5238 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5239 		if (list >= &callbackq[0] &&
5240 		    list <= &callbackq[ncallbacks-1]) {
5241 			list->c_nfree = callbackqfree;
5242 			callbackqfree = list;
5243 		} else
5244 			kmem_free(list, list->c_size);
5245 
5246 		cbstats.nc_delete++;
5247 		cbstats.nc_alloc--;
5248 		mutex_exit(&ddi_callback_mutex);
5249 
5250 		do {
5251 			if ((rval = (*funcp)(arg)) == 0) {
5252 				pending -= count;
5253 				mutex_enter(&ddi_callback_mutex);
5254 				(void) callback_insert(funcp, arg, listid,
5255 					count);
5256 				cbstats.nc_runouts++;
5257 			} else {
5258 				pending--;
5259 				mutex_enter(&ddi_callback_mutex);
5260 				cbstats.nc_run++;
5261 			}
5262 			mutex_exit(&ddi_callback_mutex);
5263 		} while (rval != 0 && (--count > 0));
5264 	} while (pending > 0);
5265 }
5266 
5267 void
5268 ddi_run_callback(uintptr_t *listid)
5269 {
5270 	softcall(real_callback_run, listid);
5271 }
5272 
5273 dev_info_t *
5274 nodevinfo(dev_t dev, int otyp)
5275 {
5276 	_NOTE(ARGUNUSED(dev, otyp))
5277 	return ((dev_info_t *)0);
5278 }
5279 
5280 /*
5281  * A driver should support its own getinfo(9E) entry point. This function
5282  * is provided as a convenience for ON drivers that don't expect their
5283  * getinfo(9E) entry point to be called. A driver that uses this must not
5284  * call ddi_create_minor_node.
5285  */
5286 int
5287 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5288 {
5289 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5290 	return (DDI_FAILURE);
5291 }
5292 
5293 /*
5294  * A driver should support its own getinfo(9E) entry point. This function
5295  * is provided as a convenience for ON drivers that where the minor number
5296  * is the instance. Drivers that do not have 1:1 mapping must implement
5297  * their own getinfo(9E) function.
5298  */
5299 int
5300 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5301     void *arg, void **result)
5302 {
5303 	_NOTE(ARGUNUSED(dip))
5304 	int	instance;
5305 
5306 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5307 		return (DDI_FAILURE);
5308 
5309 	instance = getminor((dev_t)(uintptr_t)arg);
5310 	*result = (void *)(uintptr_t)instance;
5311 	return (DDI_SUCCESS);
5312 }
5313 
5314 int
5315 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5316 {
5317 	_NOTE(ARGUNUSED(devi, cmd))
5318 	return (DDI_FAILURE);
5319 }
5320 
5321 int
5322 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5323     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5324 {
5325 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5326 	return (DDI_DMA_NOMAPPING);
5327 }
5328 
5329 int
5330 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5331     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5332 {
5333 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5334 	return (DDI_DMA_BADATTR);
5335 }
5336 
5337 int
5338 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5339     ddi_dma_handle_t handle)
5340 {
5341 	_NOTE(ARGUNUSED(dip, rdip, handle))
5342 	return (DDI_FAILURE);
5343 }
5344 
5345 int
5346 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5347     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5348     ddi_dma_cookie_t *cp, uint_t *ccountp)
5349 {
5350 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5351 	return (DDI_DMA_NOMAPPING);
5352 }
5353 
5354 int
5355 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5356     ddi_dma_handle_t handle)
5357 {
5358 	_NOTE(ARGUNUSED(dip, rdip, handle))
5359 	return (DDI_FAILURE);
5360 }
5361 
5362 int
5363 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5364     ddi_dma_handle_t handle, off_t off, size_t len,
5365     uint_t cache_flags)
5366 {
5367 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5368 	return (DDI_FAILURE);
5369 }
5370 
5371 int
5372 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5373     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5374     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5375 {
5376 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5377 	return (DDI_FAILURE);
5378 }
5379 
5380 int
5381 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5382     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5383     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5384 {
5385 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5386 	return (DDI_FAILURE);
5387 }
5388 
5389 void
5390 ddivoid(void)
5391 {}
5392 
5393 int
5394 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5395     struct pollhead **pollhdrp)
5396 {
5397 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5398 	return (ENXIO);
5399 }
5400 
5401 cred_t *
5402 ddi_get_cred(void)
5403 {
5404 	return (CRED());
5405 }
5406 
5407 clock_t
5408 ddi_get_lbolt(void)
5409 {
5410 	return (lbolt);
5411 }
5412 
5413 time_t
5414 ddi_get_time(void)
5415 {
5416 	time_t	now;
5417 
5418 	if ((now = gethrestime_sec()) == 0) {
5419 		timestruc_t ts;
5420 		mutex_enter(&tod_lock);
5421 		ts = tod_get();
5422 		mutex_exit(&tod_lock);
5423 		return (ts.tv_sec);
5424 	} else {
5425 		return (now);
5426 	}
5427 }
5428 
5429 pid_t
5430 ddi_get_pid(void)
5431 {
5432 	return (ttoproc(curthread)->p_pid);
5433 }
5434 
5435 kt_did_t
5436 ddi_get_kt_did(void)
5437 {
5438 	return (curthread->t_did);
5439 }
5440 
5441 /*
5442  * This function returns B_TRUE if the caller can reasonably expect that a call
5443  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5444  * by user-level signal.  If it returns B_FALSE, then the caller should use
5445  * other means to make certain that the wait will not hang "forever."
5446  *
5447  * It does not check the signal mask, nor for reception of any particular
5448  * signal.
5449  *
5450  * Currently, a thread can receive a signal if it's not a kernel thread and it
5451  * is not in the middle of exit(2) tear-down.  Threads that are in that
5452  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5453  * cv_timedwait, and qwait_sig to qwait.
5454  */
5455 boolean_t
5456 ddi_can_receive_sig(void)
5457 {
5458 	proc_t *pp;
5459 
5460 	if (curthread->t_proc_flag & TP_LWPEXIT)
5461 		return (B_FALSE);
5462 	if ((pp = ttoproc(curthread)) == NULL)
5463 		return (B_FALSE);
5464 	return (pp->p_as != &kas);
5465 }
5466 
5467 /*
5468  * Swap bytes in 16-bit [half-]words
5469  */
5470 void
5471 swab(void *src, void *dst, size_t nbytes)
5472 {
5473 	uchar_t *pf = (uchar_t *)src;
5474 	uchar_t *pt = (uchar_t *)dst;
5475 	uchar_t tmp;
5476 	int nshorts;
5477 
5478 	nshorts = nbytes >> 1;
5479 
5480 	while (--nshorts >= 0) {
5481 		tmp = *pf++;
5482 		*pt++ = *pf++;
5483 		*pt++ = tmp;
5484 	}
5485 }
5486 
5487 static void
5488 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5489 {
5490 	struct ddi_minor_data *dp;
5491 
5492 	mutex_enter(&(DEVI(ddip)->devi_lock));
5493 	i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5494 
5495 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5496 		DEVI(ddip)->devi_minor = dmdp;
5497 	} else {
5498 		while (dp->next != (struct ddi_minor_data *)NULL)
5499 			dp = dp->next;
5500 		dp->next = dmdp;
5501 	}
5502 
5503 	i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1);
5504 	mutex_exit(&(DEVI(ddip)->devi_lock));
5505 }
5506 
5507 /*
5508  * Part of the obsolete SunCluster DDI Hooks.
5509  * Keep for binary compatibility
5510  */
5511 minor_t
5512 ddi_getiminor(dev_t dev)
5513 {
5514 	return (getminor(dev));
5515 }
5516 
5517 static int
5518 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5519 {
5520 	int se_flag;
5521 	int kmem_flag;
5522 	int se_err;
5523 	char *pathname;
5524 	sysevent_t *ev = NULL;
5525 	sysevent_id_t eid;
5526 	sysevent_value_t se_val;
5527 	sysevent_attr_list_t *ev_attr_list = NULL;
5528 
5529 	/* determine interrupt context */
5530 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5531 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5532 
5533 	i_ddi_di_cache_invalidate(kmem_flag);
5534 
5535 #ifdef DEBUG
5536 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5537 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5538 		    "interrupt level by driver %s",
5539 		    ddi_driver_name(dip));
5540 	}
5541 #endif /* DEBUG */
5542 
5543 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5544 	if (ev == NULL) {
5545 		goto fail;
5546 	}
5547 
5548 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5549 	if (pathname == NULL) {
5550 		sysevent_free(ev);
5551 		goto fail;
5552 	}
5553 
5554 	(void) ddi_pathname(dip, pathname);
5555 	ASSERT(strlen(pathname));
5556 	se_val.value_type = SE_DATA_TYPE_STRING;
5557 	se_val.value.sv_string = pathname;
5558 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5559 	    &se_val, se_flag) != 0) {
5560 		kmem_free(pathname, MAXPATHLEN);
5561 		sysevent_free(ev);
5562 		goto fail;
5563 	}
5564 	kmem_free(pathname, MAXPATHLEN);
5565 
5566 	/*
5567 	 * allow for NULL minor names
5568 	 */
5569 	if (minor_name != NULL) {
5570 		se_val.value.sv_string = minor_name;
5571 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5572 		    &se_val, se_flag) != 0) {
5573 			sysevent_free_attr(ev_attr_list);
5574 			sysevent_free(ev);
5575 			goto fail;
5576 		}
5577 	}
5578 
5579 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5580 		sysevent_free_attr(ev_attr_list);
5581 		sysevent_free(ev);
5582 		goto fail;
5583 	}
5584 
5585 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5586 		if (se_err == SE_NO_TRANSPORT) {
5587 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5588 			    "for driver %s (%s). Run devfsadm -i %s",
5589 			    ddi_driver_name(dip), "syseventd not responding",
5590 			    ddi_driver_name(dip));
5591 		} else {
5592 			sysevent_free(ev);
5593 			goto fail;
5594 		}
5595 	}
5596 
5597 	sysevent_free(ev);
5598 	return (DDI_SUCCESS);
5599 fail:
5600 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5601 	    "for driver %s. Run devfsadm -i %s",
5602 	    ddi_driver_name(dip), ddi_driver_name(dip));
5603 	return (DDI_SUCCESS);
5604 }
5605 
5606 /*
5607  * failing to remove a minor node is not of interest
5608  * therefore we do not generate an error message
5609  */
5610 static int
5611 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5612 {
5613 	char *pathname;
5614 	sysevent_t *ev;
5615 	sysevent_id_t eid;
5616 	sysevent_value_t se_val;
5617 	sysevent_attr_list_t *ev_attr_list = NULL;
5618 
5619 	/*
5620 	 * only log ddi_remove_minor_node() calls outside the scope
5621 	 * of attach/detach reconfigurations and when the dip is
5622 	 * still initialized.
5623 	 */
5624 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5625 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5626 		return (DDI_SUCCESS);
5627 	}
5628 
5629 	i_ddi_di_cache_invalidate(KM_SLEEP);
5630 
5631 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5632 	if (ev == NULL) {
5633 		return (DDI_SUCCESS);
5634 	}
5635 
5636 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5637 	if (pathname == NULL) {
5638 		sysevent_free(ev);
5639 		return (DDI_SUCCESS);
5640 	}
5641 
5642 	(void) ddi_pathname(dip, pathname);
5643 	ASSERT(strlen(pathname));
5644 	se_val.value_type = SE_DATA_TYPE_STRING;
5645 	se_val.value.sv_string = pathname;
5646 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5647 	    &se_val, SE_SLEEP) != 0) {
5648 		kmem_free(pathname, MAXPATHLEN);
5649 		sysevent_free(ev);
5650 		return (DDI_SUCCESS);
5651 	}
5652 
5653 	kmem_free(pathname, MAXPATHLEN);
5654 
5655 	/*
5656 	 * allow for NULL minor names
5657 	 */
5658 	if (minor_name != NULL) {
5659 		se_val.value.sv_string = minor_name;
5660 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5661 		    &se_val, SE_SLEEP) != 0) {
5662 			sysevent_free_attr(ev_attr_list);
5663 			goto fail;
5664 		}
5665 	}
5666 
5667 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5668 		sysevent_free_attr(ev_attr_list);
5669 	} else {
5670 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5671 	}
5672 fail:
5673 	sysevent_free(ev);
5674 	return (DDI_SUCCESS);
5675 }
5676 
5677 /*
5678  * Derive the device class of the node.
5679  * Device class names aren't defined yet. Until this is done we use
5680  * devfs event subclass names as device class names.
5681  */
5682 static int
5683 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5684 {
5685 	int rv = DDI_SUCCESS;
5686 
5687 	if (i_ddi_devi_class(dip) == NULL) {
5688 		if (strncmp(node_type, DDI_NT_BLOCK,
5689 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5690 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5691 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5692 		    strcmp(node_type, DDI_NT_FD) != 0) {
5693 
5694 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5695 
5696 		} else if (strncmp(node_type, DDI_NT_NET,
5697 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5698 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5699 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5700 
5701 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5702 		}
5703 	}
5704 
5705 	return (rv);
5706 }
5707 
5708 /*
5709  * Check compliance with PSARC 2003/375:
5710  *
5711  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5712  * exceed IFNAMSIZ (16) characters in length.
5713  */
5714 static boolean_t
5715 verify_name(char *name)
5716 {
5717 	size_t	len = strlen(name);
5718 	char	*cp;
5719 
5720 	if (len == 0 || len > IFNAMSIZ)
5721 		return (B_FALSE);
5722 
5723 	for (cp = name; *cp != '\0'; cp++) {
5724 		if (!isalnum(*cp) && *cp != '_')
5725 			return (B_FALSE);
5726 	}
5727 
5728 	return (B_TRUE);
5729 }
5730 
5731 /*
5732  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5733  *				attach it to the given devinfo node.
5734  */
5735 
5736 int
5737 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5738     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5739     const char *read_priv, const char *write_priv, mode_t priv_mode)
5740 {
5741 	struct ddi_minor_data *dmdp;
5742 	major_t major;
5743 
5744 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5745 		return (DDI_FAILURE);
5746 
5747 	if (name == NULL)
5748 		return (DDI_FAILURE);
5749 
5750 	/*
5751 	 * Log a message if the minor number the driver is creating
5752 	 * is not expressible on the on-disk filesystem (currently
5753 	 * this is limited to 18 bits both by UFS). The device can
5754 	 * be opened via devfs, but not by device special files created
5755 	 * via mknod().
5756 	 */
5757 	if (minor_num > L_MAXMIN32) {
5758 		cmn_err(CE_WARN,
5759 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5760 		    ddi_driver_name(dip), ddi_get_instance(dip),
5761 		    name, minor_num);
5762 		return (DDI_FAILURE);
5763 	}
5764 
5765 	/* dip must be bound and attached */
5766 	major = ddi_driver_major(dip);
5767 	ASSERT(major != (major_t)-1);
5768 
5769 	/*
5770 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5771 	 */
5772 	if (node_type == NULL) {
5773 		node_type = DDI_PSEUDO;
5774 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5775 		    " minor node %s; default to DDI_PSEUDO",
5776 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5777 	}
5778 
5779 	/*
5780 	 * If the driver is a network driver, ensure that the name falls within
5781 	 * the interface naming constraints specified by PSARC/2003/375.
5782 	 */
5783 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5784 		if (!verify_name(name))
5785 			return (DDI_FAILURE);
5786 
5787 		if (mtype == DDM_MINOR) {
5788 			struct devnames *dnp = &devnamesp[major];
5789 
5790 			/* Mark driver as a network driver */
5791 			LOCK_DEV_OPS(&dnp->dn_lock);
5792 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5793 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5794 		}
5795 	}
5796 
5797 	if (mtype == DDM_MINOR) {
5798 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5799 		    DDI_SUCCESS)
5800 			return (DDI_FAILURE);
5801 	}
5802 
5803 	/*
5804 	 * Take care of minor number information for the node.
5805 	 */
5806 
5807 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5808 	    KM_NOSLEEP)) == NULL) {
5809 		return (DDI_FAILURE);
5810 	}
5811 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5812 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5813 		return (DDI_FAILURE);
5814 	}
5815 	dmdp->dip = dip;
5816 	dmdp->ddm_dev = makedevice(major, minor_num);
5817 	dmdp->ddm_spec_type = spec_type;
5818 	dmdp->ddm_node_type = node_type;
5819 	dmdp->type = mtype;
5820 	if (flag & CLONE_DEV) {
5821 		dmdp->type = DDM_ALIAS;
5822 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5823 	}
5824 	if (flag & PRIVONLY_DEV) {
5825 		dmdp->ddm_flags |= DM_NO_FSPERM;
5826 	}
5827 	if (read_priv || write_priv) {
5828 		dmdp->ddm_node_priv =
5829 		    devpolicy_priv_by_name(read_priv, write_priv);
5830 	}
5831 	dmdp->ddm_priv_mode = priv_mode;
5832 
5833 	ddi_append_minor_node(dip, dmdp);
5834 
5835 	/*
5836 	 * only log ddi_create_minor_node() calls which occur
5837 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5838 	 */
5839 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip))) {
5840 		(void) i_log_devfs_minor_create(dip, name);
5841 	}
5842 
5843 	/*
5844 	 * Check if any dacf rules match the creation of this minor node
5845 	 */
5846 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5847 	return (DDI_SUCCESS);
5848 }
5849 
5850 int
5851 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5852     minor_t minor_num, char *node_type, int flag)
5853 {
5854 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5855 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5856 }
5857 
5858 int
5859 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5860     minor_t minor_num, char *node_type, int flag,
5861     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5862 {
5863 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5864 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5865 }
5866 
5867 int
5868 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5869     minor_t minor_num, char *node_type, int flag)
5870 {
5871 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5872 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5873 }
5874 
5875 /*
5876  * Internal (non-ddi) routine for drivers to export names known
5877  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5878  * but not exported externally to /dev
5879  */
5880 int
5881 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5882     minor_t minor_num)
5883 {
5884 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5885 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5886 }
5887 
5888 void
5889 ddi_remove_minor_node(dev_info_t *dip, char *name)
5890 {
5891 	struct ddi_minor_data *dmdp, *dmdp1;
5892 	struct ddi_minor_data **dmdp_prev;
5893 
5894 	mutex_enter(&(DEVI(dip)->devi_lock));
5895 	i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5896 
5897 	dmdp_prev = &DEVI(dip)->devi_minor;
5898 	dmdp = DEVI(dip)->devi_minor;
5899 	while (dmdp != NULL) {
5900 		dmdp1 = dmdp->next;
5901 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5902 		    strcmp(name, dmdp->ddm_name) == 0))) {
5903 			if (dmdp->ddm_name != NULL) {
5904 				(void) i_log_devfs_minor_remove(dip,
5905 				    dmdp->ddm_name);
5906 				kmem_free(dmdp->ddm_name,
5907 				    strlen(dmdp->ddm_name) + 1);
5908 			}
5909 			/*
5910 			 * Release device privilege, if any.
5911 			 * Release dacf client data associated with this minor
5912 			 * node by storing NULL.
5913 			 */
5914 			if (dmdp->ddm_node_priv)
5915 				dpfree(dmdp->ddm_node_priv);
5916 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5917 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5918 			*dmdp_prev = dmdp1;
5919 			/*
5920 			 * OK, we found it, so get out now -- if we drive on,
5921 			 * we will strcmp against garbage.  See 1139209.
5922 			 */
5923 			if (name != NULL)
5924 				break;
5925 		} else {
5926 			dmdp_prev = &dmdp->next;
5927 		}
5928 		dmdp = dmdp1;
5929 	}
5930 
5931 	i_devi_exit(dip, DEVI_S_MD_UPDATE, 1);
5932 	mutex_exit(&(DEVI(dip)->devi_lock));
5933 }
5934 
5935 
5936 int
5937 ddi_in_panic()
5938 {
5939 	return (panicstr != NULL);
5940 }
5941 
5942 
5943 /*
5944  * Find first bit set in a mask (returned counting from 1 up)
5945  */
5946 
5947 int
5948 ddi_ffs(long mask)
5949 {
5950 	extern int ffs(long mask);
5951 	return (ffs(mask));
5952 }
5953 
5954 /*
5955  * Find last bit set. Take mask and clear
5956  * all but the most significant bit, and
5957  * then let ffs do the rest of the work.
5958  *
5959  * Algorithm courtesy of Steve Chessin.
5960  */
5961 
5962 int
5963 ddi_fls(long mask)
5964 {
5965 	extern int ffs(long);
5966 
5967 	while (mask) {
5968 		long nx;
5969 
5970 		if ((nx = (mask & (mask - 1))) == 0)
5971 			break;
5972 		mask = nx;
5973 	}
5974 	return (ffs(mask));
5975 }
5976 
5977 /*
5978  * The next five routines comprise generic storage management utilities
5979  * for driver soft state structures (in "the old days," this was done
5980  * with a statically sized array - big systems and dynamic loading
5981  * and unloading make heap allocation more attractive)
5982  */
5983 
5984 /*
5985  * Allocate a set of pointers to 'n_items' objects of size 'size'
5986  * bytes.  Each pointer is initialized to nil.
5987  *
5988  * The 'size' and 'n_items' values are stashed in the opaque
5989  * handle returned to the caller.
5990  *
5991  * This implementation interprets 'set of pointers' to mean 'array
5992  * of pointers' but note that nothing in the interface definition
5993  * precludes an implementation that uses, for example, a linked list.
5994  * However there should be a small efficiency gain from using an array
5995  * at lookup time.
5996  *
5997  * NOTE	As an optimization, we make our growable array allocations in
5998  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5999  *	gives us anyway.  It should save us some free/realloc's ..
6000  *
6001  *	As a further optimization, we make the growable array start out
6002  *	with MIN_N_ITEMS in it.
6003  */
6004 
6005 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6006 
6007 int
6008 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6009 {
6010 	struct i_ddi_soft_state *ss;
6011 
6012 	if (state_p == NULL || *state_p != NULL || size == 0)
6013 		return (EINVAL);
6014 
6015 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6016 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6017 	ss->size = size;
6018 
6019 	if (n_items < MIN_N_ITEMS)
6020 		ss->n_items = MIN_N_ITEMS;
6021 	else {
6022 		int bitlog;
6023 
6024 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6025 			bitlog--;
6026 		ss->n_items = 1 << bitlog;
6027 	}
6028 
6029 	ASSERT(ss->n_items >= n_items);
6030 
6031 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6032 
6033 	*state_p = ss;
6034 
6035 	return (0);
6036 }
6037 
6038 
6039 /*
6040  * Allocate a state structure of size 'size' to be associated
6041  * with item 'item'.
6042  *
6043  * In this implementation, the array is extended to
6044  * allow the requested offset, if needed.
6045  */
6046 int
6047 ddi_soft_state_zalloc(void *state, int item)
6048 {
6049 	struct i_ddi_soft_state *ss;
6050 	void **array;
6051 	void *new_element;
6052 
6053 	if ((ss = state) == NULL || item < 0)
6054 		return (DDI_FAILURE);
6055 
6056 	mutex_enter(&ss->lock);
6057 	if (ss->size == 0) {
6058 		mutex_exit(&ss->lock);
6059 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6060 		    mod_containing_pc(caller()));
6061 		return (DDI_FAILURE);
6062 	}
6063 
6064 	array = ss->array;	/* NULL if ss->n_items == 0 */
6065 	ASSERT(ss->n_items != 0 && array != NULL);
6066 
6067 	/*
6068 	 * refuse to tread on an existing element
6069 	 */
6070 	if (item < ss->n_items && array[item] != NULL) {
6071 		mutex_exit(&ss->lock);
6072 		return (DDI_FAILURE);
6073 	}
6074 
6075 	/*
6076 	 * Allocate a new element to plug in
6077 	 */
6078 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6079 
6080 	/*
6081 	 * Check if the array is big enough, if not, grow it.
6082 	 */
6083 	if (item >= ss->n_items) {
6084 		void	**new_array;
6085 		size_t	new_n_items;
6086 		struct i_ddi_soft_state *dirty;
6087 
6088 		/*
6089 		 * Allocate a new array of the right length, copy
6090 		 * all the old pointers to the new array, then
6091 		 * if it exists at all, put the old array on the
6092 		 * dirty list.
6093 		 *
6094 		 * Note that we can't kmem_free() the old array.
6095 		 *
6096 		 * Why -- well the 'get' operation is 'mutex-free', so we
6097 		 * can't easily catch a suspended thread that is just about
6098 		 * to dereference the array we just grew out of.  So we
6099 		 * cons up a header and put it on a list of 'dirty'
6100 		 * pointer arrays.  (Dirty in the sense that there may
6101 		 * be suspended threads somewhere that are in the middle
6102 		 * of referencing them).  Fortunately, we -can- garbage
6103 		 * collect it all at ddi_soft_state_fini time.
6104 		 */
6105 		new_n_items = ss->n_items;
6106 		while (new_n_items < (1 + item))
6107 			new_n_items <<= 1;	/* double array size .. */
6108 
6109 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6110 
6111 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6112 		    KM_SLEEP);
6113 		/*
6114 		 * Copy the pointers into the new array
6115 		 */
6116 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6117 
6118 		/*
6119 		 * Save the old array on the dirty list
6120 		 */
6121 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6122 		dirty->array = ss->array;
6123 		dirty->n_items = ss->n_items;
6124 		dirty->next = ss->next;
6125 		ss->next = dirty;
6126 
6127 		ss->array = (array = new_array);
6128 		ss->n_items = new_n_items;
6129 	}
6130 
6131 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6132 
6133 	array[item] = new_element;
6134 
6135 	mutex_exit(&ss->lock);
6136 	return (DDI_SUCCESS);
6137 }
6138 
6139 
6140 /*
6141  * Fetch a pointer to the allocated soft state structure.
6142  *
6143  * This is designed to be cheap.
6144  *
6145  * There's an argument that there should be more checking for
6146  * nil pointers and out of bounds on the array.. but we do a lot
6147  * of that in the alloc/free routines.
6148  *
6149  * An array has the convenience that we don't need to lock read-access
6150  * to it c.f. a linked list.  However our "expanding array" strategy
6151  * means that we should hold a readers lock on the i_ddi_soft_state
6152  * structure.
6153  *
6154  * However, from a performance viewpoint, we need to do it without
6155  * any locks at all -- this also makes it a leaf routine.  The algorithm
6156  * is 'lock-free' because we only discard the pointer arrays at
6157  * ddi_soft_state_fini() time.
6158  */
6159 void *
6160 ddi_get_soft_state(void *state, int item)
6161 {
6162 	struct i_ddi_soft_state *ss = state;
6163 
6164 	ASSERT(ss != NULL && item >= 0);
6165 
6166 	if (item < ss->n_items && ss->array != NULL)
6167 		return (ss->array[item]);
6168 	return (NULL);
6169 }
6170 
6171 /*
6172  * Free the state structure corresponding to 'item.'   Freeing an
6173  * element that has either gone or was never allocated is not
6174  * considered an error.  Note that we free the state structure, but
6175  * we don't shrink our pointer array, or discard 'dirty' arrays,
6176  * since even a few pointers don't really waste too much memory.
6177  *
6178  * Passing an item number that is out of bounds, or a null pointer will
6179  * provoke an error message.
6180  */
6181 void
6182 ddi_soft_state_free(void *state, int item)
6183 {
6184 	struct i_ddi_soft_state *ss;
6185 	void **array;
6186 	void *element;
6187 	static char msg[] = "ddi_soft_state_free:";
6188 
6189 	if ((ss = state) == NULL) {
6190 		cmn_err(CE_WARN, "%s null handle: %s",
6191 		    msg, mod_containing_pc(caller()));
6192 		return;
6193 	}
6194 
6195 	element = NULL;
6196 
6197 	mutex_enter(&ss->lock);
6198 
6199 	if ((array = ss->array) == NULL || ss->size == 0) {
6200 		cmn_err(CE_WARN, "%s bad handle: %s",
6201 		    msg, mod_containing_pc(caller()));
6202 	} else if (item < 0 || item >= ss->n_items) {
6203 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6204 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6205 	} else if (array[item] != NULL) {
6206 		element = array[item];
6207 		array[item] = NULL;
6208 	}
6209 
6210 	mutex_exit(&ss->lock);
6211 
6212 	if (element)
6213 		kmem_free(element, ss->size);
6214 }
6215 
6216 
6217 /*
6218  * Free the entire set of pointers, and any
6219  * soft state structures contained therein.
6220  *
6221  * Note that we don't grab the ss->lock mutex, even though
6222  * we're inspecting the various fields of the data structure.
6223  *
6224  * There is an implicit assumption that this routine will
6225  * never run concurrently with any of the above on this
6226  * particular state structure i.e. by the time the driver
6227  * calls this routine, there should be no other threads
6228  * running in the driver.
6229  */
6230 void
6231 ddi_soft_state_fini(void **state_p)
6232 {
6233 	struct i_ddi_soft_state *ss, *dirty;
6234 	int item;
6235 	static char msg[] = "ddi_soft_state_fini:";
6236 
6237 	if (state_p == NULL || (ss = *state_p) == NULL) {
6238 		cmn_err(CE_WARN, "%s null handle: %s",
6239 		    msg, mod_containing_pc(caller()));
6240 		return;
6241 	}
6242 
6243 	if (ss->size == 0) {
6244 		cmn_err(CE_WARN, "%s bad handle: %s",
6245 		    msg, mod_containing_pc(caller()));
6246 		return;
6247 	}
6248 
6249 	if (ss->n_items > 0) {
6250 		for (item = 0; item < ss->n_items; item++)
6251 			ddi_soft_state_free(ss, item);
6252 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6253 	}
6254 
6255 	/*
6256 	 * Now delete any dirty arrays from previous 'grow' operations
6257 	 */
6258 	for (dirty = ss->next; dirty; dirty = ss->next) {
6259 		ss->next = dirty->next;
6260 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6261 		kmem_free(dirty, sizeof (*dirty));
6262 	}
6263 
6264 	mutex_destroy(&ss->lock);
6265 	kmem_free(ss, sizeof (*ss));
6266 
6267 	*state_p = NULL;
6268 }
6269 
6270 /*
6271  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6272  * Storage is double buffered to prevent updates during devi_addr use -
6273  * double buffering is adaquate for reliable ddi_deviname() consumption.
6274  * The double buffer is not freed until dev_info structure destruction
6275  * (by i_ddi_free_node).
6276  */
6277 void
6278 ddi_set_name_addr(dev_info_t *dip, char *name)
6279 {
6280 	char	*buf = DEVI(dip)->devi_addr_buf;
6281 	char	*newaddr;
6282 
6283 	if (buf == NULL) {
6284 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6285 		DEVI(dip)->devi_addr_buf = buf;
6286 	}
6287 
6288 	if (name) {
6289 		ASSERT(strlen(name) < MAXNAMELEN);
6290 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6291 		    (buf + MAXNAMELEN) : buf;
6292 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6293 	} else
6294 		newaddr = NULL;
6295 
6296 	DEVI(dip)->devi_addr = newaddr;
6297 }
6298 
6299 char *
6300 ddi_get_name_addr(dev_info_t *dip)
6301 {
6302 	return (DEVI(dip)->devi_addr);
6303 }
6304 
6305 void
6306 ddi_set_parent_data(dev_info_t *dip, void *pd)
6307 {
6308 	DEVI(dip)->devi_parent_data = pd;
6309 }
6310 
6311 void *
6312 ddi_get_parent_data(dev_info_t *dip)
6313 {
6314 	return (DEVI(dip)->devi_parent_data);
6315 }
6316 
6317 /*
6318  * ddi_name_to_major: Returns the major number of a module given its name.
6319  */
6320 major_t
6321 ddi_name_to_major(char *name)
6322 {
6323 	return (mod_name_to_major(name));
6324 }
6325 
6326 /*
6327  * ddi_major_to_name: Returns the module name bound to a major number.
6328  */
6329 char *
6330 ddi_major_to_name(major_t major)
6331 {
6332 	return (mod_major_to_name(major));
6333 }
6334 
6335 /*
6336  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6337  * pointed at by 'name.'  A devinfo node is named as a result of calling
6338  * ddi_initchild().
6339  *
6340  * Note: the driver must be held before calling this function!
6341  */
6342 char *
6343 ddi_deviname(dev_info_t *dip, char *name)
6344 {
6345 	char *addrname;
6346 	char none = '\0';
6347 
6348 	if (dip == ddi_root_node()) {
6349 		*name = '\0';
6350 		return (name);
6351 	}
6352 
6353 	if (i_ddi_node_state(dip) < DS_INITIALIZED) {
6354 		addrname = &none;
6355 	} else {
6356 		addrname = ddi_get_name_addr(dip);
6357 	}
6358 
6359 	if (*addrname == '\0') {
6360 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6361 	} else {
6362 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6363 	}
6364 
6365 	return (name);
6366 }
6367 
6368 /*
6369  * Spits out the name of device node, typically name@addr, for a given node,
6370  * using the driver name, not the nodename.
6371  *
6372  * Used by match_parent. Not to be used elsewhere.
6373  */
6374 char *
6375 i_ddi_parname(dev_info_t *dip, char *name)
6376 {
6377 	char *addrname;
6378 
6379 	if (dip == ddi_root_node()) {
6380 		*name = '\0';
6381 		return (name);
6382 	}
6383 
6384 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6385 
6386 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6387 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6388 	else
6389 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6390 	return (name);
6391 }
6392 
6393 static char *
6394 pathname_work(dev_info_t *dip, char *path)
6395 {
6396 	char *bp;
6397 
6398 	if (dip == ddi_root_node()) {
6399 		*path = '\0';
6400 		return (path);
6401 	}
6402 	(void) pathname_work(ddi_get_parent(dip), path);
6403 	bp = path + strlen(path);
6404 	(void) ddi_deviname(dip, bp);
6405 	return (path);
6406 }
6407 
6408 char *
6409 ddi_pathname(dev_info_t *dip, char *path)
6410 {
6411 	return (pathname_work(dip, path));
6412 }
6413 
6414 /*
6415  * Given a dev_t, return the pathname of the corresponding device in the
6416  * buffer pointed at by "path."  The buffer is assumed to be large enough
6417  * to hold the pathname of the device (MAXPATHLEN).
6418  *
6419  * The pathname of a device is the pathname of the devinfo node to which
6420  * the device "belongs," concatenated with the character ':' and the name
6421  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6422  * just the pathname of the devinfo node is returned without driving attach
6423  * of that node.  For a non-zero spec_type, an attach is performed and a
6424  * search of the minor list occurs.
6425  *
6426  * It is possible that the path associated with the dev_t is not
6427  * currently available in the devinfo tree.  In order to have a
6428  * dev_t, a device must have been discovered before, which means
6429  * that the path is always in the instance tree.  The one exception
6430  * to this is if the dev_t is associated with a pseudo driver, in
6431  * which case the device must exist on the pseudo branch of the
6432  * devinfo tree as a result of parsing .conf files.
6433  */
6434 int
6435 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6436 {
6437 	major_t		major = getmajor(devt);
6438 	int		instance;
6439 	dev_info_t	*dip;
6440 	char		*minorname;
6441 	char		*drvname;
6442 
6443 	if (major >= devcnt)
6444 		goto fail;
6445 	if (major == clone_major) {
6446 		/* clone has no minor nodes, manufacture the path here */
6447 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6448 			goto fail;
6449 
6450 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6451 		return (DDI_SUCCESS);
6452 	}
6453 
6454 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6455 	if ((instance = dev_to_instance(devt)) == -1)
6456 		goto fail;
6457 
6458 	/* reconstruct the path given the major/instance */
6459 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6460 		goto fail;
6461 
6462 	/* if spec_type given we must drive attach and search minor nodes */
6463 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6464 		/* attach the path so we can search minors */
6465 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6466 			goto fail;
6467 
6468 		/* Add minorname to path. */
6469 		mutex_enter(&(DEVI(dip)->devi_lock));
6470 		minorname = i_ddi_devtspectype_to_minorname(dip,
6471 		    devt, spec_type);
6472 		if (minorname) {
6473 			(void) strcat(path, ":");
6474 			(void) strcat(path, minorname);
6475 		}
6476 		mutex_exit(&(DEVI(dip)->devi_lock));
6477 		ddi_release_devi(dip);
6478 		if (minorname == NULL)
6479 			goto fail;
6480 	}
6481 	ASSERT(strlen(path) < MAXPATHLEN);
6482 	return (DDI_SUCCESS);
6483 
6484 fail:	*path = 0;
6485 	return (DDI_FAILURE);
6486 }
6487 
6488 /*
6489  * Given a major number and an instance, return the path.
6490  * This interface does NOT drive attach.
6491  */
6492 int
6493 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6494 {
6495 	struct devnames *dnp;
6496 	dev_info_t	*dip;
6497 
6498 	if ((major >= devcnt) || (instance == -1)) {
6499 		*path = 0;
6500 		return (DDI_FAILURE);
6501 	}
6502 
6503 	/* look for the major/instance in the instance tree */
6504 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6505 	    path) == DDI_SUCCESS) {
6506 		ASSERT(strlen(path) < MAXPATHLEN);
6507 		return (DDI_SUCCESS);
6508 	}
6509 
6510 	/*
6511 	 * Not in instance tree, find the instance on the per driver list and
6512 	 * construct path to instance via ddi_pathname(). This is how paths
6513 	 * down the 'pseudo' branch are constructed.
6514 	 */
6515 	dnp = &(devnamesp[major]);
6516 	LOCK_DEV_OPS(&(dnp->dn_lock));
6517 	for (dip = dnp->dn_head; dip;
6518 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6519 		/* Skip if instance does not match. */
6520 		if (DEVI(dip)->devi_instance != instance)
6521 			continue;
6522 
6523 		/*
6524 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6525 		 * node demotion, so it is not an effective way of ensuring
6526 		 * that the ddi_pathname result has a unit-address.  Instead,
6527 		 * we reverify the node state after calling ddi_pathname().
6528 		 */
6529 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6530 			(void) ddi_pathname(dip, path);
6531 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6532 				continue;
6533 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6534 			ASSERT(strlen(path) < MAXPATHLEN);
6535 			return (DDI_SUCCESS);
6536 		}
6537 	}
6538 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6539 
6540 	/* can't reconstruct the path */
6541 	*path = 0;
6542 	return (DDI_FAILURE);
6543 }
6544 
6545 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6546 
6547 /*
6548  * Given the dip for a network interface return the ppa for that interface.
6549  *
6550  * In all cases except GLD v0 drivers, the ppa == instance.
6551  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6552  * So for these drivers when the attach routine calls gld_register(),
6553  * the GLD framework creates an integer property called "gld_driver_ppa"
6554  * that can be queried here.
6555  *
6556  * The only time this function is used is when a system is booting over nfs.
6557  * In this case the system has to resolve the pathname of the boot device
6558  * to it's ppa.
6559  */
6560 int
6561 i_ddi_devi_get_ppa(dev_info_t *dip)
6562 {
6563 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6564 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6565 			GLD_DRIVER_PPA, ddi_get_instance(dip)));
6566 }
6567 
6568 /*
6569  * i_ddi_devi_set_ppa() should only be called from gld_register()
6570  * and only for GLD v0 drivers
6571  */
6572 void
6573 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6574 {
6575 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6576 }
6577 
6578 
6579 /*
6580  * Private DDI Console bell functions.
6581  */
6582 void
6583 ddi_ring_console_bell(clock_t duration)
6584 {
6585 	if (ddi_console_bell_func != NULL)
6586 		(*ddi_console_bell_func)(duration);
6587 }
6588 
6589 void
6590 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6591 {
6592 	ddi_console_bell_func = bellfunc;
6593 }
6594 
6595 int
6596 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6597 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6598 {
6599 	int (*funcp)() = ddi_dma_allochdl;
6600 	ddi_dma_attr_t dma_attr;
6601 	struct bus_ops *bop;
6602 
6603 	if (attr == (ddi_dma_attr_t *)0)
6604 		return (DDI_DMA_BADATTR);
6605 
6606 	dma_attr = *attr;
6607 
6608 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6609 	if (bop && bop->bus_dma_allochdl)
6610 		funcp = bop->bus_dma_allochdl;
6611 
6612 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6613 }
6614 
6615 void
6616 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6617 {
6618 	ddi_dma_handle_t h = *handlep;
6619 	(void) ddi_dma_freehdl(HD, HD, h);
6620 }
6621 
6622 static uintptr_t dma_mem_list_id = 0;
6623 
6624 
6625 int
6626 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6627 	ddi_device_acc_attr_t *accattrp, uint_t xfermodes,
6628 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6629 	size_t *real_length, ddi_acc_handle_t *handlep)
6630 {
6631 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6632 	dev_info_t *dip = hp->dmai_rdip;
6633 	ddi_acc_hdl_t *ap;
6634 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6635 	uint_t sleepflag;
6636 	int (*fp)(caddr_t);
6637 	int rval;
6638 
6639 	if (waitfp == DDI_DMA_SLEEP)
6640 		fp = (int (*)())KM_SLEEP;
6641 	else if (waitfp == DDI_DMA_DONTWAIT)
6642 		fp = (int (*)())KM_NOSLEEP;
6643 	else
6644 		fp = waitfp;
6645 	*handlep = impl_acc_hdl_alloc(fp, arg);
6646 	if (*handlep == NULL)
6647 		return (DDI_FAILURE);
6648 
6649 	/*
6650 	 * initialize the common elements of data access handle
6651 	 */
6652 	ap = impl_acc_hdl_get(*handlep);
6653 	ap->ah_vers = VERS_ACCHDL;
6654 	ap->ah_dip = dip;
6655 	ap->ah_offset = 0;
6656 	ap->ah_len = 0;
6657 	ap->ah_xfermodes = xfermodes;
6658 	ap->ah_acc = *accattrp;
6659 
6660 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6661 	if (xfermodes == DDI_DMA_CONSISTENT) {
6662 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 0,
6663 			    accattrp, kaddrp, NULL, ap);
6664 		*real_length = length;
6665 	} else {
6666 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 1,
6667 			    accattrp, kaddrp, real_length, ap);
6668 	}
6669 	if (rval == DDI_SUCCESS) {
6670 		ap->ah_len = (off_t)(*real_length);
6671 		ap->ah_addr = *kaddrp;
6672 	} else {
6673 		impl_acc_hdl_free(*handlep);
6674 		*handlep = (ddi_acc_handle_t)NULL;
6675 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6676 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6677 		}
6678 		rval = DDI_FAILURE;
6679 	}
6680 	return (rval);
6681 }
6682 
6683 void
6684 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6685 {
6686 	ddi_acc_hdl_t *ap;
6687 
6688 	ap = impl_acc_hdl_get(*handlep);
6689 	ASSERT(ap);
6690 
6691 	if (ap->ah_xfermodes == DDI_DMA_CONSISTENT) {
6692 		i_ddi_mem_free((caddr_t)ap->ah_addr, 0);
6693 	} else {
6694 		i_ddi_mem_free((caddr_t)ap->ah_addr, 1);
6695 	}
6696 
6697 	/*
6698 	 * free the handle
6699 	 */
6700 	impl_acc_hdl_free(*handlep);
6701 	*handlep = (ddi_acc_handle_t)NULL;
6702 
6703 	if (dma_mem_list_id != 0) {
6704 		ddi_run_callback(&dma_mem_list_id);
6705 	}
6706 }
6707 
6708 int
6709 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6710 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6711 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6712 {
6713 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6714 	dev_info_t *hdip, *dip;
6715 	struct ddi_dma_req dmareq;
6716 	int (*funcp)();
6717 
6718 	dmareq.dmar_flags = flags;
6719 	dmareq.dmar_fp = waitfp;
6720 	dmareq.dmar_arg = arg;
6721 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
6722 
6723 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
6724 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
6725 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
6726 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
6727 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
6728 	} else {
6729 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
6730 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
6731 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
6732 							bp->b_shadow;
6733 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
6734 		} else {
6735 			dmareq.dmar_object.dmao_type =
6736 				(bp->b_flags & (B_PHYS | B_REMAPPED))?
6737 				DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
6738 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6739 		}
6740 
6741 		/*
6742 		 * If the buffer has no proc pointer, or the proc
6743 		 * struct has the kernel address space, or the buffer has
6744 		 * been marked B_REMAPPED (meaning that it is now
6745 		 * mapped into the kernel's address space), then
6746 		 * the address space is kas (kernel address space).
6747 		 */
6748 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
6749 		    (bp->b_flags & B_REMAPPED) != 0) {
6750 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
6751 		} else {
6752 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
6753 			    bp->b_proc->p_as;
6754 		}
6755 	}
6756 
6757 	dip = hp->dmai_rdip;
6758 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6759 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6760 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6761 }
6762 
6763 int
6764 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
6765 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
6766 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6767 {
6768 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6769 	dev_info_t *hdip, *dip;
6770 	struct ddi_dma_req dmareq;
6771 	int (*funcp)();
6772 
6773 	if (len == (uint_t)0) {
6774 		return (DDI_DMA_NOMAPPING);
6775 	}
6776 	dmareq.dmar_flags = flags;
6777 	dmareq.dmar_fp = waitfp;
6778 	dmareq.dmar_arg = arg;
6779 	dmareq.dmar_object.dmao_size = len;
6780 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
6781 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
6782 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
6783 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6784 
6785 	dip = hp->dmai_rdip;
6786 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6787 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6788 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6789 }
6790 
6791 void
6792 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
6793 {
6794 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6795 	ddi_dma_cookie_t *cp;
6796 
6797 	cp = hp->dmai_cookie;
6798 	ASSERT(cp);
6799 
6800 	cookiep->dmac_notused = cp->dmac_notused;
6801 	cookiep->dmac_type = cp->dmac_type;
6802 	cookiep->dmac_address = cp->dmac_address;
6803 	cookiep->dmac_size = cp->dmac_size;
6804 	hp->dmai_cookie++;
6805 }
6806 
6807 int
6808 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
6809 {
6810 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6811 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
6812 		return (DDI_FAILURE);
6813 	} else {
6814 		*nwinp = hp->dmai_nwin;
6815 		return (DDI_SUCCESS);
6816 	}
6817 }
6818 
6819 int
6820 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
6821 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6822 {
6823 	int (*funcp)() = ddi_dma_win;
6824 	struct bus_ops *bop;
6825 
6826 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
6827 	if (bop && bop->bus_dma_win)
6828 		funcp = bop->bus_dma_win;
6829 
6830 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
6831 }
6832 
6833 int
6834 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
6835 {
6836 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
6837 		&burstsizes, 0, 0));
6838 }
6839 
6840 int
6841 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
6842 {
6843 	return (hp->dmai_fault);
6844 }
6845 
6846 int
6847 ddi_check_dma_handle(ddi_dma_handle_t handle)
6848 {
6849 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6850 	int (*check)(ddi_dma_impl_t *);
6851 
6852 	if ((check = hp->dmai_fault_check) == NULL)
6853 		check = i_ddi_dma_fault_check;
6854 
6855 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
6856 }
6857 
6858 void
6859 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
6860 {
6861 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6862 	void (*notify)(ddi_dma_impl_t *);
6863 
6864 	if (!hp->dmai_fault) {
6865 		hp->dmai_fault = 1;
6866 		if ((notify = hp->dmai_fault_notify) != NULL)
6867 			(*notify)(hp);
6868 	}
6869 }
6870 
6871 void
6872 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
6873 {
6874 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6875 	void (*notify)(ddi_dma_impl_t *);
6876 
6877 	if (hp->dmai_fault) {
6878 		hp->dmai_fault = 0;
6879 		if ((notify = hp->dmai_fault_notify) != NULL)
6880 			(*notify)(hp);
6881 	}
6882 }
6883 
6884 /*
6885  * register mapping routines.
6886  */
6887 int
6888 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
6889 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
6890 	ddi_acc_handle_t *handle)
6891 {
6892 	ddi_map_req_t mr;
6893 	ddi_acc_hdl_t *hp;
6894 	int result;
6895 
6896 	/*
6897 	 * Allocate and initialize the common elements of data access handle.
6898 	 */
6899 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
6900 	hp = impl_acc_hdl_get(*handle);
6901 	hp->ah_vers = VERS_ACCHDL;
6902 	hp->ah_dip = dip;
6903 	hp->ah_rnumber = rnumber;
6904 	hp->ah_offset = offset;
6905 	hp->ah_len = len;
6906 	hp->ah_acc = *accattrp;
6907 
6908 	/*
6909 	 * Set up the mapping request and call to parent.
6910 	 */
6911 	mr.map_op = DDI_MO_MAP_LOCKED;
6912 	mr.map_type = DDI_MT_RNUMBER;
6913 	mr.map_obj.rnumber = rnumber;
6914 	mr.map_prot = PROT_READ | PROT_WRITE;
6915 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6916 	mr.map_handlep = hp;
6917 	mr.map_vers = DDI_MAP_VERSION;
6918 	result = ddi_map(dip, &mr, offset, len, addrp);
6919 
6920 	/*
6921 	 * check for end result
6922 	 */
6923 	if (result != DDI_SUCCESS) {
6924 		impl_acc_hdl_free(*handle);
6925 		*handle = (ddi_acc_handle_t)NULL;
6926 	} else {
6927 		hp->ah_addr = *addrp;
6928 	}
6929 
6930 	return (result);
6931 }
6932 
6933 void
6934 ddi_regs_map_free(ddi_acc_handle_t *handlep)
6935 {
6936 	ddi_map_req_t mr;
6937 	ddi_acc_hdl_t *hp;
6938 
6939 	hp = impl_acc_hdl_get(*handlep);
6940 	ASSERT(hp);
6941 
6942 	mr.map_op = DDI_MO_UNMAP;
6943 	mr.map_type = DDI_MT_RNUMBER;
6944 	mr.map_obj.rnumber = hp->ah_rnumber;
6945 	mr.map_prot = PROT_READ | PROT_WRITE;
6946 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6947 	mr.map_handlep = hp;
6948 	mr.map_vers = DDI_MAP_VERSION;
6949 
6950 	/*
6951 	 * Call my parent to unmap my regs.
6952 	 */
6953 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
6954 		hp->ah_len, &hp->ah_addr);
6955 	/*
6956 	 * free the handle
6957 	 */
6958 	impl_acc_hdl_free(*handlep);
6959 	*handlep = (ddi_acc_handle_t)NULL;
6960 }
6961 
6962 int
6963 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
6964 	ssize_t dev_advcnt, uint_t dev_datasz)
6965 {
6966 	uint8_t *b;
6967 	uint16_t *w;
6968 	uint32_t *l;
6969 	uint64_t *ll;
6970 
6971 	/* check for total byte count is multiple of data transfer size */
6972 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
6973 		return (DDI_FAILURE);
6974 
6975 	switch (dev_datasz) {
6976 	case DDI_DATA_SZ01_ACC:
6977 		for (b = (uint8_t *)dev_addr;
6978 			bytecount != 0; bytecount -= 1, b += dev_advcnt)
6979 			ddi_put8(handle, b, 0);
6980 		break;
6981 	case DDI_DATA_SZ02_ACC:
6982 		for (w = (uint16_t *)dev_addr;
6983 			bytecount != 0; bytecount -= 2, w += dev_advcnt)
6984 			ddi_put16(handle, w, 0);
6985 		break;
6986 	case DDI_DATA_SZ04_ACC:
6987 		for (l = (uint32_t *)dev_addr;
6988 			bytecount != 0; bytecount -= 4, l += dev_advcnt)
6989 			ddi_put32(handle, l, 0);
6990 		break;
6991 	case DDI_DATA_SZ08_ACC:
6992 		for (ll = (uint64_t *)dev_addr;
6993 			bytecount != 0; bytecount -= 8, ll += dev_advcnt)
6994 			ddi_put64(handle, ll, 0x0ll);
6995 		break;
6996 	default:
6997 		return (DDI_FAILURE);
6998 	}
6999 	return (DDI_SUCCESS);
7000 }
7001 
7002 int
7003 ddi_device_copy(
7004 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7005 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7006 	size_t bytecount, uint_t dev_datasz)
7007 {
7008 	uint8_t *b_src, *b_dst;
7009 	uint16_t *w_src, *w_dst;
7010 	uint32_t *l_src, *l_dst;
7011 	uint64_t *ll_src, *ll_dst;
7012 
7013 	/* check for total byte count is multiple of data transfer size */
7014 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7015 		return (DDI_FAILURE);
7016 
7017 	switch (dev_datasz) {
7018 	case DDI_DATA_SZ01_ACC:
7019 		b_src = (uint8_t *)src_addr;
7020 		b_dst = (uint8_t *)dest_addr;
7021 
7022 		for (; bytecount != 0; bytecount -= 1) {
7023 			ddi_put8(dest_handle, b_dst,
7024 				ddi_get8(src_handle, b_src));
7025 			b_dst += dest_advcnt;
7026 			b_src += src_advcnt;
7027 		}
7028 		break;
7029 	case DDI_DATA_SZ02_ACC:
7030 		w_src = (uint16_t *)src_addr;
7031 		w_dst = (uint16_t *)dest_addr;
7032 
7033 		for (; bytecount != 0; bytecount -= 2) {
7034 			ddi_put16(dest_handle, w_dst,
7035 				ddi_get16(src_handle, w_src));
7036 			w_dst += dest_advcnt;
7037 			w_src += src_advcnt;
7038 		}
7039 		break;
7040 	case DDI_DATA_SZ04_ACC:
7041 		l_src = (uint32_t *)src_addr;
7042 		l_dst = (uint32_t *)dest_addr;
7043 
7044 		for (; bytecount != 0; bytecount -= 4) {
7045 			ddi_put32(dest_handle, l_dst,
7046 				ddi_get32(src_handle, l_src));
7047 			l_dst += dest_advcnt;
7048 			l_src += src_advcnt;
7049 		}
7050 		break;
7051 	case DDI_DATA_SZ08_ACC:
7052 		ll_src = (uint64_t *)src_addr;
7053 		ll_dst = (uint64_t *)dest_addr;
7054 
7055 		for (; bytecount != 0; bytecount -= 8) {
7056 			ddi_put64(dest_handle, ll_dst,
7057 				ddi_get64(src_handle, ll_src));
7058 			ll_dst += dest_advcnt;
7059 			ll_src += src_advcnt;
7060 		}
7061 		break;
7062 	default:
7063 		return (DDI_FAILURE);
7064 	}
7065 	return (DDI_SUCCESS);
7066 }
7067 
7068 #define	swap16(value)  \
7069 	((((value) & 0xff) << 8) | ((value) >> 8))
7070 
7071 #define	swap32(value)	\
7072 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7073 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7074 
7075 #define	swap64(value)	\
7076 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7077 	    << 32) | \
7078 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7079 
7080 uint16_t
7081 ddi_swap16(uint16_t value)
7082 {
7083 	return (swap16(value));
7084 }
7085 
7086 uint32_t
7087 ddi_swap32(uint32_t value)
7088 {
7089 	return (swap32(value));
7090 }
7091 
7092 uint64_t
7093 ddi_swap64(uint64_t value)
7094 {
7095 	return (swap64(value));
7096 }
7097 
7098 /*
7099  * Convert a binding name to a driver name.
7100  * A binding name is the name used to determine the driver for a
7101  * device - it may be either an alias for the driver or the name
7102  * of the driver itself.
7103  */
7104 char *
7105 i_binding_to_drv_name(char *bname)
7106 {
7107 	major_t major_no;
7108 
7109 	ASSERT(bname != NULL);
7110 
7111 	if ((major_no = ddi_name_to_major(bname)) == -1)
7112 		return (NULL);
7113 	return (ddi_major_to_name(major_no));
7114 }
7115 
7116 /*
7117  * Search for minor name that has specified dev_t and spec_type.
7118  * If spec_type is zero then any dev_t match works.  Since we
7119  * are returning a pointer to the minor name string, we require the
7120  * caller to do the locking.
7121  */
7122 char *
7123 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7124 {
7125 	struct ddi_minor_data	*dmdp;
7126 
7127 	/*
7128 	 * The did layered driver currently intentionally returns a
7129 	 * devinfo ptr for an underlying sd instance based on a did
7130 	 * dev_t. In this case it is not an error.
7131 	 *
7132 	 * The did layered driver is associated with Sun Cluster.
7133 	 */
7134 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7135 		(strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7136 	ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7137 
7138 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7139 		if (((dmdp->type == DDM_MINOR) ||
7140 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7141 		    (dmdp->type == DDM_DEFAULT)) &&
7142 		    (dmdp->ddm_dev == dev) &&
7143 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7144 		    (dmdp->ddm_spec_type == spec_type)))
7145 			return (dmdp->ddm_name);
7146 	}
7147 
7148 	return (NULL);
7149 }
7150 
7151 /*
7152  * Find the devt and spectype of the specified minor_name.
7153  * Return DDI_FAILURE if minor_name not found. Since we are
7154  * returning everything via arguments we can do the locking.
7155  */
7156 int
7157 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7158 	dev_t *devtp, int *spectypep)
7159 {
7160 	struct ddi_minor_data	*dmdp;
7161 
7162 	/* deal with clone minor nodes */
7163 	if (dip == clone_dip) {
7164 		major_t	major;
7165 		/*
7166 		 * Make sure minor_name is a STREAMS driver.
7167 		 * We load the driver but don't attach to any instances.
7168 		 */
7169 
7170 		major = ddi_name_to_major(minor_name);
7171 		if (major == (major_t)-1)
7172 			return (DDI_FAILURE);
7173 
7174 		if (ddi_hold_driver(major) == NULL)
7175 			return (DDI_FAILURE);
7176 
7177 		if (STREAMSTAB(major) == NULL) {
7178 			ddi_rele_driver(major);
7179 			return (DDI_FAILURE);
7180 		}
7181 		ddi_rele_driver(major);
7182 
7183 		if (devtp)
7184 			*devtp = makedevice(clone_major, (minor_t)major);
7185 
7186 		if (spectypep)
7187 			*spectypep = S_IFCHR;
7188 
7189 		return (DDI_SUCCESS);
7190 	}
7191 
7192 	ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7193 	mutex_enter(&(DEVI(dip)->devi_lock));
7194 
7195 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7196 		if (((dmdp->type != DDM_MINOR) &&
7197 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7198 		    (dmdp->type != DDM_DEFAULT)) ||
7199 		    strcmp(minor_name, dmdp->ddm_name))
7200 			continue;
7201 
7202 		if (devtp)
7203 			*devtp = dmdp->ddm_dev;
7204 
7205 		if (spectypep)
7206 			*spectypep = dmdp->ddm_spec_type;
7207 
7208 		mutex_exit(&(DEVI(dip)->devi_lock));
7209 		return (DDI_SUCCESS);
7210 	}
7211 
7212 	mutex_exit(&(DEVI(dip)->devi_lock));
7213 	return (DDI_FAILURE);
7214 }
7215 
7216 extern char	hw_serial[];
7217 static kmutex_t devid_gen_mutex;
7218 static short	devid_gen_number;
7219 
7220 #ifdef DEBUG
7221 
7222 static int	devid_register_corrupt = 0;
7223 static int	devid_register_corrupt_major = 0;
7224 static int	devid_register_corrupt_hint = 0;
7225 static int	devid_register_corrupt_hint_major = 0;
7226 
7227 static int devid_lyr_debug = 0;
7228 
7229 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7230 	if (devid_lyr_debug)					\
7231 		ddi_debug_devid_devts(msg, ndevs, devs)
7232 
7233 #else
7234 
7235 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7236 
7237 #endif /* DEBUG */
7238 
7239 
7240 #ifdef	DEBUG
7241 
7242 static void
7243 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7244 {
7245 	int i;
7246 
7247 	cmn_err(CE_CONT, "%s:\n", msg);
7248 	for (i = 0; i < ndevs; i++) {
7249 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7250 	}
7251 }
7252 
7253 static void
7254 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7255 {
7256 	int i;
7257 
7258 	cmn_err(CE_CONT, "%s:\n", msg);
7259 	for (i = 0; i < npaths; i++) {
7260 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7261 	}
7262 }
7263 
7264 static void
7265 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7266 {
7267 	int i;
7268 
7269 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7270 	for (i = 0; i < ndevs; i++) {
7271 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7272 	}
7273 }
7274 
7275 #endif	/* DEBUG */
7276 
7277 /*
7278  * Register device id into DDI framework.
7279  * Must be called when device is attached.
7280  */
7281 static int
7282 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7283 {
7284 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7285 	size_t		driver_len;
7286 	const char	*driver_name;
7287 	char		*devid_str;
7288 	major_t		major;
7289 
7290 	if ((dip == NULL) ||
7291 	    ((major = ddi_driver_major(dip)) == (major_t)-1))
7292 		return (DDI_FAILURE);
7293 
7294 	/* verify that the devid is valid */
7295 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7296 		return (DDI_FAILURE);
7297 
7298 	/* Updating driver name hint in devid */
7299 	driver_name = ddi_driver_name(dip);
7300 	driver_len = strlen(driver_name);
7301 	if (driver_len > DEVID_HINT_SIZE) {
7302 		/* Pick up last four characters of driver name */
7303 		driver_name += driver_len - DEVID_HINT_SIZE;
7304 		driver_len = DEVID_HINT_SIZE;
7305 	}
7306 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7307 	bcopy(driver_name, i_devid->did_driver, driver_len);
7308 
7309 #ifdef DEBUG
7310 	/* Corrupt the devid for testing. */
7311 	if (devid_register_corrupt)
7312 		i_devid->did_id[0] += devid_register_corrupt;
7313 	if (devid_register_corrupt_major &&
7314 	    (major == devid_register_corrupt_major))
7315 		i_devid->did_id[0] += 1;
7316 	if (devid_register_corrupt_hint)
7317 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7318 	if (devid_register_corrupt_hint_major &&
7319 	    (major == devid_register_corrupt_hint_major))
7320 		i_devid->did_driver[0] += 1;
7321 #endif /* DEBUG */
7322 
7323 	/* encode the devid as a string */
7324 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7325 		return (DDI_FAILURE);
7326 
7327 	/* add string as a string property */
7328 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7329 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7330 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7331 			ddi_driver_name(dip), ddi_get_instance(dip));
7332 		ddi_devid_str_free(devid_str);
7333 		return (DDI_FAILURE);
7334 	}
7335 
7336 	ddi_devid_str_free(devid_str);
7337 
7338 #ifdef	DEVID_COMPATIBILITY
7339 	/*
7340 	 * marker for devinfo snapshot compatibility.
7341 	 * This code gets deleted when di_devid is gone from libdevid
7342 	 */
7343 	DEVI(dip)->devi_devid = DEVID_COMPATIBILITY;
7344 #endif	/* DEVID_COMPATIBILITY */
7345 	return (DDI_SUCCESS);
7346 }
7347 
7348 int
7349 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7350 {
7351 	int rval;
7352 
7353 	rval = i_ddi_devid_register(dip, devid);
7354 	if (rval == DDI_SUCCESS) {
7355 		/*
7356 		 * Register devid in devid-to-path cache
7357 		 */
7358 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7359 			mutex_enter(&DEVI(dip)->devi_lock);
7360 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7361 			mutex_exit(&DEVI(dip)->devi_lock);
7362 		} else {
7363 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7364 				ddi_driver_name(dip), ddi_get_instance(dip));
7365 		}
7366 	} else {
7367 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7368 			ddi_driver_name(dip), ddi_get_instance(dip));
7369 	}
7370 	return (rval);
7371 }
7372 
7373 /*
7374  * Remove (unregister) device id from DDI framework.
7375  * Must be called when device is detached.
7376  */
7377 static void
7378 i_ddi_devid_unregister(dev_info_t *dip)
7379 {
7380 #ifdef	DEVID_COMPATIBILITY
7381 	/*
7382 	 * marker for micro release devinfo snapshot compatibility.
7383 	 * This code gets deleted for the minor release.
7384 	 */
7385 	DEVI(dip)->devi_devid = NULL;		/* unset DEVID_PROP */
7386 #endif	/* DEVID_COMPATIBILITY */
7387 
7388 	/* remove the devid property */
7389 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7390 }
7391 
7392 void
7393 ddi_devid_unregister(dev_info_t *dip)
7394 {
7395 	mutex_enter(&DEVI(dip)->devi_lock);
7396 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7397 	mutex_exit(&DEVI(dip)->devi_lock);
7398 	e_devid_cache_unregister(dip);
7399 	i_ddi_devid_unregister(dip);
7400 }
7401 
7402 /*
7403  * Allocate and initialize a device id.
7404  */
7405 int
7406 ddi_devid_init(
7407 	dev_info_t	*dip,
7408 	ushort_t	devid_type,
7409 	ushort_t	nbytes,
7410 	void		*id,
7411 	ddi_devid_t	*ret_devid)
7412 {
7413 	impl_devid_t	*i_devid;
7414 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7415 	int		driver_len;
7416 	const char	*driver_name;
7417 
7418 	switch (devid_type) {
7419 	case DEVID_SCSI3_WWN:
7420 		/*FALLTHRU*/
7421 	case DEVID_SCSI_SERIAL:
7422 		/*FALLTHRU*/
7423 	case DEVID_ATA_SERIAL:
7424 		/*FALLTHRU*/
7425 	case DEVID_ENCAP:
7426 		if (nbytes == 0)
7427 			return (DDI_FAILURE);
7428 		if (id == NULL)
7429 			return (DDI_FAILURE);
7430 		break;
7431 	case DEVID_FAB:
7432 		if (nbytes != 0)
7433 			return (DDI_FAILURE);
7434 		if (id != NULL)
7435 			return (DDI_FAILURE);
7436 		nbytes = sizeof (int) +
7437 		    sizeof (struct timeval32) + sizeof (short);
7438 		sz += nbytes;
7439 		break;
7440 	default:
7441 		return (DDI_FAILURE);
7442 	}
7443 
7444 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7445 		return (DDI_FAILURE);
7446 
7447 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7448 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7449 	i_devid->did_rev_hi = DEVID_REV_MSB;
7450 	i_devid->did_rev_lo = DEVID_REV_LSB;
7451 	DEVID_FORMTYPE(i_devid, devid_type);
7452 	DEVID_FORMLEN(i_devid, nbytes);
7453 
7454 	/* Fill in driver name hint */
7455 	driver_name = ddi_driver_name(dip);
7456 	driver_len = strlen(driver_name);
7457 	if (driver_len > DEVID_HINT_SIZE) {
7458 		/* Pick up last four characters of driver name */
7459 		driver_name += driver_len - DEVID_HINT_SIZE;
7460 		driver_len = DEVID_HINT_SIZE;
7461 	}
7462 
7463 	bcopy(driver_name, i_devid->did_driver, driver_len);
7464 
7465 	/* Fill in id field */
7466 	if (devid_type == DEVID_FAB) {
7467 		char		*cp;
7468 		int		hostid;
7469 		char		*hostid_cp = &hw_serial[0];
7470 		struct timeval32 timestamp32;
7471 		int		i;
7472 		int		*ip;
7473 		short		gen;
7474 
7475 		/* increase the generation number */
7476 		mutex_enter(&devid_gen_mutex);
7477 		gen = devid_gen_number++;
7478 		mutex_exit(&devid_gen_mutex);
7479 
7480 		cp = i_devid->did_id;
7481 
7482 		/* Fill in host id (big-endian byte ordering) */
7483 		hostid = stoi(&hostid_cp);
7484 		*cp++ = hibyte(hiword(hostid));
7485 		*cp++ = lobyte(hiword(hostid));
7486 		*cp++ = hibyte(loword(hostid));
7487 		*cp++ = lobyte(loword(hostid));
7488 
7489 		/*
7490 		 * Fill in timestamp (big-endian byte ordering)
7491 		 *
7492 		 * (Note that the format may have to be changed
7493 		 * before 2038 comes around, though it's arguably
7494 		 * unique enough as it is..)
7495 		 */
7496 		uniqtime32(&timestamp32);
7497 		ip = (int *)&timestamp32;
7498 		for (i = 0;
7499 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7500 			int	val;
7501 			val = *ip;
7502 			*cp++ = hibyte(hiword(val));
7503 			*cp++ = lobyte(hiword(val));
7504 			*cp++ = hibyte(loword(val));
7505 			*cp++ = lobyte(loword(val));
7506 		}
7507 
7508 		/* fill in the generation number */
7509 		*cp++ = hibyte(gen);
7510 		*cp++ = lobyte(gen);
7511 	} else
7512 		bcopy(id, i_devid->did_id, nbytes);
7513 
7514 	/* return device id */
7515 	*ret_devid = (ddi_devid_t)i_devid;
7516 	return (DDI_SUCCESS);
7517 }
7518 
7519 int
7520 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7521 {
7522 	char		*devidstr;
7523 
7524 	ASSERT(dev != DDI_DEV_T_NONE);
7525 
7526 	/* look up the property, devt specific first */
7527 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7528 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7529 		if ((dev == DDI_DEV_T_ANY) ||
7530 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7531 			DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7532 			DDI_PROP_SUCCESS)) {
7533 				return (DDI_FAILURE);
7534 		}
7535 	}
7536 
7537 	/* convert to binary form */
7538 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7539 		ddi_prop_free(devidstr);
7540 		return (DDI_FAILURE);
7541 	}
7542 	ddi_prop_free(devidstr);
7543 	return (DDI_SUCCESS);
7544 }
7545 
7546 /*
7547  * Return a copy of the device id for dev_t
7548  */
7549 int
7550 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7551 {
7552 	dev_info_t	*dip;
7553 	int		rval;
7554 
7555 	/* get the dip */
7556 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7557 		return (DDI_FAILURE);
7558 
7559 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7560 
7561 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7562 	return (rval);
7563 }
7564 
7565 /*
7566  * Return a copy of the minor name for dev_t and spec_type
7567  */
7568 int
7569 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7570 {
7571 	dev_info_t	*dip;
7572 	char		*nm;
7573 	size_t		alloc_sz, sz;
7574 
7575 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7576 		return (DDI_FAILURE);
7577 
7578 	mutex_enter(&(DEVI(dip)->devi_lock));
7579 
7580 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7581 	    dev, spec_type)) == NULL) {
7582 		mutex_exit(&(DEVI(dip)->devi_lock));
7583 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7584 		return (DDI_FAILURE);
7585 	}
7586 
7587 	/* make a copy */
7588 	alloc_sz = strlen(nm) + 1;
7589 retry:
7590 	/* drop lock to allocate memory */
7591 	mutex_exit(&(DEVI(dip)->devi_lock));
7592 	*minor_name = kmem_alloc(alloc_sz, KM_SLEEP);
7593 	mutex_enter(&(DEVI(dip)->devi_lock));
7594 
7595 	/* re-check things, since we dropped the lock */
7596 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7597 	    dev, spec_type)) == NULL) {
7598 		mutex_exit(&(DEVI(dip)->devi_lock));
7599 		kmem_free(*minor_name, alloc_sz);
7600 		*minor_name = NULL;
7601 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7602 		return (DDI_FAILURE);
7603 	}
7604 
7605 	/* verify size is the same */
7606 	sz = strlen(nm) + 1;
7607 	if (alloc_sz != sz) {
7608 		kmem_free(*minor_name, alloc_sz);
7609 		alloc_sz = sz;
7610 		goto retry;
7611 	}
7612 
7613 	/* sz == alloc_sz - make a copy */
7614 	(void) strcpy(*minor_name, nm);
7615 
7616 	mutex_exit(&(DEVI(dip)->devi_lock));
7617 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7618 	return (DDI_SUCCESS);
7619 }
7620 
7621 int
7622 ddi_lyr_devid_to_devlist(
7623 	ddi_devid_t	devid,
7624 	char		*minor_name,
7625 	int		*retndevs,
7626 	dev_t		**retdevs)
7627 {
7628 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7629 
7630 	if (e_devid_cache_to_devt_list(devid, minor_name,
7631 	    retndevs, retdevs) == DDI_SUCCESS) {
7632 		ASSERT(*retndevs > 0);
7633 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7634 			*retndevs, *retdevs);
7635 		return (DDI_SUCCESS);
7636 	}
7637 
7638 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7639 		return (DDI_FAILURE);
7640 	}
7641 
7642 	if (e_devid_cache_to_devt_list(devid, minor_name,
7643 	    retndevs, retdevs) == DDI_SUCCESS) {
7644 		ASSERT(*retndevs > 0);
7645 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7646 			*retndevs, *retdevs);
7647 		return (DDI_SUCCESS);
7648 	}
7649 
7650 	return (DDI_FAILURE);
7651 }
7652 
7653 void
7654 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7655 {
7656 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7657 }
7658 
7659 /*
7660  * Note: This will need to be fixed if we ever allow processes to
7661  * have more than one data model per exec.
7662  */
7663 model_t
7664 ddi_mmap_get_model(void)
7665 {
7666 	return (get_udatamodel());
7667 }
7668 
7669 model_t
7670 ddi_model_convert_from(model_t model)
7671 {
7672 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
7673 }
7674 
7675 /*
7676  * ddi interfaces managing storage and retrieval of eventcookies.
7677  */
7678 
7679 /*
7680  * Invoke bus nexus driver's implementation of the
7681  * (*bus_remove_eventcall)() interface to remove a registered
7682  * callback handler for "event".
7683  */
7684 int
7685 ddi_remove_event_handler(ddi_callback_id_t id)
7686 {
7687 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
7688 	dev_info_t *ddip;
7689 
7690 	ASSERT(cb);
7691 	if (!cb) {
7692 		return (DDI_FAILURE);
7693 	}
7694 
7695 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
7696 	return (ndi_busop_remove_eventcall(ddip, id));
7697 }
7698 
7699 /*
7700  * Invoke bus nexus driver's implementation of the
7701  * (*bus_add_eventcall)() interface to register a callback handler
7702  * for "event".
7703  */
7704 int
7705 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
7706     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
7707     void *arg, ddi_callback_id_t *id)
7708 {
7709 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
7710 }
7711 
7712 
7713 /*
7714  * Return a handle for event "name" by calling up the device tree
7715  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
7716  * by a bus nexus or top of dev_info tree is reached.
7717  */
7718 int
7719 ddi_get_eventcookie(dev_info_t *dip, char *name,
7720     ddi_eventcookie_t *event_cookiep)
7721 {
7722 	return (ndi_busop_get_eventcookie(dip, dip,
7723 	    name, event_cookiep));
7724 }
7725 
7726 /*
7727  * single thread access to dev_info node and set state
7728  */
7729 void
7730 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock)
7731 {
7732 	if (!has_lock)
7733 		mutex_enter(&(DEVI(dip)->devi_lock));
7734 
7735 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7736 
7737 	/*
7738 	 * wait until state(s) have been changed
7739 	 */
7740 	while ((DEVI(dip)->devi_state & w_mask) != 0) {
7741 		cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock));
7742 	}
7743 	DEVI(dip)->devi_state |= s_mask;
7744 
7745 	if (!has_lock)
7746 		mutex_exit(&(DEVI(dip)->devi_lock));
7747 }
7748 
7749 void
7750 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock)
7751 {
7752 	if (!has_lock)
7753 		mutex_enter(&(DEVI(dip)->devi_lock));
7754 
7755 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7756 
7757 	/*
7758 	 * clear the state(s) and wakeup any threads waiting
7759 	 * for state change
7760 	 */
7761 	DEVI(dip)->devi_state &= ~c_mask;
7762 	cv_broadcast(&(DEVI(dip)->devi_cv));
7763 
7764 	if (!has_lock)
7765 		mutex_exit(&(DEVI(dip)->devi_lock));
7766 }
7767 
7768 /*
7769  * This procedure is provided as the general callback function when
7770  * umem_lockmemory calls as_add_callback for long term memory locking.
7771  * When as_unmap, as_setprot, or as_free encounter segments which have
7772  * locked memory, this callback will be invoked.
7773  */
7774 void
7775 umem_lock_undo(struct as *as, void *arg, uint_t event)
7776 {
7777 	_NOTE(ARGUNUSED(as, event))
7778 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
7779 
7780 	/*
7781 	 * Call the cleanup function.  Decrement the cookie reference
7782 	 * count, if it goes to zero, return the memory for the cookie.
7783 	 * The i_ddi_umem_unlock for this cookie may or may not have been
7784 	 * called already.  It is the responsibility of the caller of
7785 	 * umem_lockmemory to handle the case of the cleanup routine
7786 	 * being called after a ddi_umem_unlock for the cookie
7787 	 * was called.
7788 	 */
7789 
7790 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
7791 
7792 	/* remove the cookie if reference goes to zero */
7793 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
7794 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
7795 	}
7796 }
7797 
7798 /*
7799  * The following two Consolidation Private routines provide generic
7800  * interfaces to increase/decrease the amount of device-locked memory.
7801  *
7802  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
7803  * must be called every time i_ddi_incr_locked_memory() is called.
7804  */
7805 int
7806 /* ARGSUSED */
7807 i_ddi_incr_locked_memory(proc_t *procp, task_t *taskp,
7808     kproject_t *projectp, zone_t *zonep, rctl_qty_t inc)
7809 {
7810 	kproject_t *projp;
7811 
7812 	ASSERT(procp);
7813 	ASSERT(mutex_owned(&procp->p_lock));
7814 
7815 	projp = procp->p_task->tk_proj;
7816 	mutex_enter(&umem_devlockmem_rctl_lock);
7817 	/*
7818 	 * Test if the requested memory can be locked without exceeding the
7819 	 * limits.
7820 	 */
7821 	if (rctl_test(rc_project_devlockmem, projp->kpj_rctls,
7822 	    procp, inc, RCA_SAFE) & RCT_DENY) {
7823 		mutex_exit(&umem_devlockmem_rctl_lock);
7824 		return (ENOMEM);
7825 	}
7826 	projp->kpj_data.kpd_devlockmem += inc;
7827 	mutex_exit(&umem_devlockmem_rctl_lock);
7828 	/*
7829 	 * Grab a hold on the project.
7830 	 */
7831 	(void) project_hold(projp);
7832 
7833 	return (0);
7834 }
7835 
7836 /*
7837  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
7838  * must be called every time i_ddi_decr_locked_memory() is called.
7839  */
7840 /* ARGSUSED */
7841 void
7842 i_ddi_decr_locked_memory(proc_t *procp, task_t *taskp,
7843     kproject_t *projectp, zone_t *zonep, rctl_qty_t dec)
7844 {
7845 	ASSERT(projectp);
7846 
7847 	mutex_enter(&umem_devlockmem_rctl_lock);
7848 	projectp->kpj_data.kpd_devlockmem -= dec;
7849 	mutex_exit(&umem_devlockmem_rctl_lock);
7850 
7851 	/*
7852 	 * Release the project pointer reference accquired in
7853 	 * i_ddi_incr_locked_memory().
7854 	 */
7855 	(void) project_rele(projectp);
7856 }
7857 
7858 /*
7859  * This routine checks if the max-device-locked-memory resource ctl is
7860  * exceeded, if not increments it, grabs a hold on the project.
7861  * Returns 0 if successful otherwise returns error code
7862  */
7863 static int
7864 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
7865 {
7866 	proc_t		*procp;
7867 	int		ret;
7868 
7869 	ASSERT(cookie);
7870 	procp = cookie->procp;
7871 	ASSERT(procp);
7872 
7873 	mutex_enter(&procp->p_lock);
7874 
7875 	if ((ret = i_ddi_incr_locked_memory(procp, NULL,
7876 		NULL, NULL, cookie->size)) != 0) {
7877 		mutex_exit(&procp->p_lock);
7878 		return (ret);
7879 	}
7880 
7881 	/*
7882 	 * save the project pointer in the
7883 	 * umem cookie, project pointer already
7884 	 * hold in i_ddi_incr_locked_memory
7885 	 */
7886 	cookie->lockmem_proj = (void *)procp->p_task->tk_proj;
7887 	mutex_exit(&procp->p_lock);
7888 
7889 	return (0);
7890 }
7891 
7892 /*
7893  * Decrements the max-device-locked-memory resource ctl and releases
7894  * the hold on the project that was acquired during umem_incr_devlockmem
7895  */
7896 static void
7897 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
7898 {
7899 	kproject_t	*projp;
7900 
7901 	if (!cookie->lockmem_proj)
7902 		return;
7903 
7904 	projp = (kproject_t *)cookie->lockmem_proj;
7905 	i_ddi_decr_locked_memory(NULL, NULL, projp, NULL, cookie->size);
7906 
7907 	cookie->lockmem_proj = NULL;
7908 }
7909 
7910 /*
7911  * A consolidation private function which is essentially equivalent to
7912  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
7913  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
7914  * the ops_vector is valid.
7915  *
7916  * Lock the virtual address range in the current process and create a
7917  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
7918  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
7919  * to user space.
7920  *
7921  * Note: The resource control accounting currently uses a full charge model
7922  * in other words attempts to lock the same/overlapping areas of memory
7923  * will deduct the full size of the buffer from the projects running
7924  * counter for the device locked memory.
7925  *
7926  * addr, size should be PAGESIZE aligned
7927  *
7928  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
7929  *	identifies whether the locked memory will be read or written or both
7930  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
7931  * be maintained for an indefinitely long period (essentially permanent),
7932  * rather than for what would be required for a typical I/O completion.
7933  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
7934  * if the memory pertains to a regular file which is mapped MAP_SHARED.
7935  * This is to prevent a deadlock if a file truncation is attempted after
7936  * after the locking is done.
7937  *
7938  * Returns 0 on success
7939  *	EINVAL - for invalid parameters
7940  *	EPERM, ENOMEM and other error codes returned by as_pagelock
7941  *	ENOMEM - is returned if the current request to lock memory exceeds
7942  *		project.max-device-locked-memory resource control value.
7943  *      EFAULT - memory pertains to a regular file mapped shared and
7944  *		and DDI_UMEMLOCK_LONGTERM flag is set
7945  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
7946  */
7947 int
7948 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
7949 		struct umem_callback_ops *ops_vector,
7950 		proc_t *procp)
7951 {
7952 	int	error;
7953 	struct ddi_umem_cookie *p;
7954 	void	(*driver_callback)() = NULL;
7955 	struct as *as = procp->p_as;
7956 	struct seg		*seg;
7957 	vnode_t			*vp;
7958 
7959 	*cookie = NULL;		/* in case of any error return */
7960 
7961 	/* These are the only three valid flags */
7962 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
7963 	    DDI_UMEMLOCK_LONGTERM)) != 0)
7964 		return (EINVAL);
7965 
7966 	/* At least one (can be both) of the two access flags must be set */
7967 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
7968 		return (EINVAL);
7969 
7970 	/* addr and len must be page-aligned */
7971 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
7972 		return (EINVAL);
7973 
7974 	if ((len & PAGEOFFSET) != 0)
7975 		return (EINVAL);
7976 
7977 	/*
7978 	 * For longterm locking a driver callback must be specified; if
7979 	 * not longterm then a callback is optional.
7980 	 */
7981 	if (ops_vector != NULL) {
7982 		if (ops_vector->cbo_umem_callback_version !=
7983 		    UMEM_CALLBACK_VERSION)
7984 			return (EINVAL);
7985 		else
7986 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
7987 	}
7988 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
7989 		return (EINVAL);
7990 
7991 	/*
7992 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
7993 	 * be called on first ddi_umem_lock or umem_lockmemory call.
7994 	 */
7995 	if (ddi_umem_unlock_thread == NULL)
7996 		i_ddi_umem_unlock_thread_start();
7997 
7998 	/* Allocate memory for the cookie */
7999 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8000 
8001 	/* Convert the flags to seg_rw type */
8002 	if (flags & DDI_UMEMLOCK_WRITE) {
8003 		p->s_flags = S_WRITE;
8004 	} else {
8005 		p->s_flags = S_READ;
8006 	}
8007 
8008 	/* Store procp in cookie for later iosetup/unlock */
8009 	p->procp = (void *)procp;
8010 
8011 	/*
8012 	 * Store the struct as pointer in cookie for later use by
8013 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8014 	 * is called after relvm is called.
8015 	 */
8016 	p->asp = as;
8017 
8018 	/*
8019 	 * The size field is needed for lockmem accounting.
8020 	 */
8021 	p->size = len;
8022 
8023 	if (umem_incr_devlockmem(p) != 0) {
8024 		/*
8025 		 * The requested memory cannot be locked
8026 		 */
8027 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8028 		*cookie = (ddi_umem_cookie_t)NULL;
8029 		return (ENOMEM);
8030 	}
8031 	/*
8032 	 * umem_incr_devlockmem stashes the project ptr into the
8033 	 * cookie. This is needed during unlock since that can
8034 	 * happen in a non-USER context
8035 	 */
8036 	ASSERT(p->lockmem_proj);
8037 
8038 	/* Lock the pages corresponding to addr, len in memory */
8039 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8040 	if (error != 0) {
8041 		umem_decr_devlockmem(p);
8042 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8043 		*cookie = (ddi_umem_cookie_t)NULL;
8044 		return (error);
8045 	}
8046 
8047 	/*
8048 	 * For longterm locking the addr must pertain to a seg_vn segment or
8049 	 * or a seg_spt segment.
8050 	 * If the segment pertains to a regular file, it cannot be
8051 	 * mapped MAP_SHARED.
8052 	 * This is to prevent a deadlock if a file truncation is attempted
8053 	 * after the locking is done.
8054 	 * Doing this after as_pagelock guarantees persistence of the as; if
8055 	 * an unacceptable segment is found, the cleanup includes calling
8056 	 * as_pageunlock before returning EFAULT.
8057 	 */
8058 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8059 		extern  struct seg_ops segspt_shmops;
8060 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8061 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8062 			if (seg == NULL || seg->s_base > addr + len)
8063 				break;
8064 			if (((seg->s_ops != &segvn_ops) &&
8065 			    (seg->s_ops != &segspt_shmops)) ||
8066 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8067 			    vp != NULL && vp->v_type == VREG) &&
8068 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8069 				as_pageunlock(as, p->pparray,
8070 						addr, len, p->s_flags);
8071 				AS_LOCK_EXIT(as, &as->a_lock);
8072 				umem_decr_devlockmem(p);
8073 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8074 				*cookie = (ddi_umem_cookie_t)NULL;
8075 				return (EFAULT);
8076 			}
8077 		}
8078 		AS_LOCK_EXIT(as, &as->a_lock);
8079 	}
8080 
8081 
8082 	/* Initialize the fields in the ddi_umem_cookie */
8083 	p->cvaddr = addr;
8084 	p->type = UMEM_LOCKED;
8085 	if (driver_callback != NULL) {
8086 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8087 		p->cook_refcnt = 2;
8088 		p->callbacks = *ops_vector;
8089 	} else {
8090 		/* only i_ddi_umme_unlock needs the cookie */
8091 		p->cook_refcnt = 1;
8092 	}
8093 
8094 	*cookie = (ddi_umem_cookie_t)p;
8095 
8096 	/*
8097 	 * If a driver callback was specified, add an entry to the
8098 	 * as struct callback list. The as_pagelock above guarantees
8099 	 * the persistence of as.
8100 	 */
8101 	if (driver_callback) {
8102 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8103 						addr, len, KM_SLEEP);
8104 		if (error != 0) {
8105 			as_pageunlock(as, p->pparray,
8106 					addr, len, p->s_flags);
8107 			umem_decr_devlockmem(p);
8108 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8109 			*cookie = (ddi_umem_cookie_t)NULL;
8110 		}
8111 	}
8112 	return (error);
8113 }
8114 
8115 /*
8116  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8117  * the cookie.  Called from i_ddi_umem_unlock_thread.
8118  */
8119 
8120 static void
8121 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8122 {
8123 	uint_t	rc;
8124 
8125 	/*
8126 	 * There is no way to determine whether a callback to
8127 	 * umem_lock_undo was registered via as_add_callback.
8128 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8129 	 * a valid callback function structure.)  as_delete_callback
8130 	 * is called to delete a possible registered callback.  If the
8131 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8132 	 * indicates that there was a callback registered, and that is was
8133 	 * successfully deleted.  Thus, the cookie reference count
8134 	 * will never be decremented by umem_lock_undo.  Just return the
8135 	 * memory for the cookie, since both users of the cookie are done.
8136 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8137 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8138 	 * indicates that callback processing is taking place and, and
8139 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8140 	 * the cookie reference count when it is complete.
8141 	 *
8142 	 * This needs to be done before as_pageunlock so that the
8143 	 * persistence of as is guaranteed because of the locked pages.
8144 	 *
8145 	 */
8146 	rc = as_delete_callback(p->asp, p);
8147 
8148 
8149 	/*
8150 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8151 	 * after relvm is called so use p->asp.
8152 	 */
8153 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8154 
8155 	/*
8156 	 * Now that we have unlocked the memory decrement the
8157 	 * max-device-locked-memory rctl
8158 	 */
8159 	umem_decr_devlockmem(p);
8160 
8161 	if (rc == AS_CALLBACK_DELETED) {
8162 		/* umem_lock_undo will not happen, return the cookie memory */
8163 		ASSERT(p->cook_refcnt == 2);
8164 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8165 	} else {
8166 		/*
8167 		 * umem_undo_lock may happen if as_delete_callback returned
8168 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8169 		 * reference count, atomically, and return the cookie
8170 		 * memory if the reference count goes to zero.  The only
8171 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8172 		 * case, just return the cookie memory.
8173 		 */
8174 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8175 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8176 		    == 0)) {
8177 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8178 		}
8179 	}
8180 }
8181 
8182 /*
8183  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8184  *
8185  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8186  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8187  * via calls to ddi_umem_unlock.
8188  */
8189 
8190 static void
8191 i_ddi_umem_unlock_thread(void)
8192 {
8193 	struct ddi_umem_cookie	*ret_cookie;
8194 	callb_cpr_t	cprinfo;
8195 
8196 	/* process the ddi_umem_unlock list */
8197 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8198 	    callb_generic_cpr, "unlock_thread");
8199 	for (;;) {
8200 		mutex_enter(&ddi_umem_unlock_mutex);
8201 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8202 			ret_cookie = ddi_umem_unlock_head;
8203 			/* take if off the list */
8204 			if ((ddi_umem_unlock_head =
8205 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8206 				ddi_umem_unlock_tail = NULL;
8207 			}
8208 			mutex_exit(&ddi_umem_unlock_mutex);
8209 			/* unlock the pages in this cookie */
8210 			(void) i_ddi_umem_unlock(ret_cookie);
8211 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8212 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8213 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8214 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8215 			mutex_exit(&ddi_umem_unlock_mutex);
8216 		}
8217 	}
8218 	/* ddi_umem_unlock_thread does not exit */
8219 	/* NOTREACHED */
8220 }
8221 
8222 /*
8223  * Start the thread that will process the ddi_umem_unlock list if it is
8224  * not already started (i_ddi_umem_unlock_thread).
8225  */
8226 static void
8227 i_ddi_umem_unlock_thread_start(void)
8228 {
8229 	mutex_enter(&ddi_umem_unlock_mutex);
8230 	if (ddi_umem_unlock_thread == NULL) {
8231 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8232 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8233 		    TS_RUN, minclsyspri);
8234 	}
8235 	mutex_exit(&ddi_umem_unlock_mutex);
8236 }
8237 
8238 /*
8239  * Lock the virtual address range in the current process and create a
8240  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8241  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8242  * to user space.
8243  *
8244  * Note: The resource control accounting currently uses a full charge model
8245  * in other words attempts to lock the same/overlapping areas of memory
8246  * will deduct the full size of the buffer from the projects running
8247  * counter for the device locked memory. This applies to umem_lockmemory too.
8248  *
8249  * addr, size should be PAGESIZE aligned
8250  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8251  *	identifies whether the locked memory will be read or written or both
8252  *
8253  * Returns 0 on success
8254  *	EINVAL - for invalid parameters
8255  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8256  *	ENOMEM - is returned if the current request to lock memory exceeds
8257  *		project.max-device-locked-memory resource control value.
8258  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8259  */
8260 int
8261 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8262 {
8263 	int	error;
8264 	struct ddi_umem_cookie *p;
8265 
8266 	*cookie = NULL;		/* in case of any error return */
8267 
8268 	/* These are the only two valid flags */
8269 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8270 		return (EINVAL);
8271 	}
8272 
8273 	/* At least one of the two flags (or both) must be set */
8274 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8275 		return (EINVAL);
8276 	}
8277 
8278 	/* addr and len must be page-aligned */
8279 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8280 		return (EINVAL);
8281 	}
8282 
8283 	if ((len & PAGEOFFSET) != 0) {
8284 		return (EINVAL);
8285 	}
8286 
8287 	/*
8288 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8289 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8290 	 */
8291 	if (ddi_umem_unlock_thread == NULL)
8292 		i_ddi_umem_unlock_thread_start();
8293 
8294 	/* Allocate memory for the cookie */
8295 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8296 
8297 	/* Convert the flags to seg_rw type */
8298 	if (flags & DDI_UMEMLOCK_WRITE) {
8299 		p->s_flags = S_WRITE;
8300 	} else {
8301 		p->s_flags = S_READ;
8302 	}
8303 
8304 	/* Store curproc in cookie for later iosetup/unlock */
8305 	p->procp = (void *)curproc;
8306 
8307 	/*
8308 	 * Store the struct as pointer in cookie for later use by
8309 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8310 	 * is called after relvm is called.
8311 	 */
8312 	p->asp = curproc->p_as;
8313 	/*
8314 	 * The size field is needed for lockmem accounting.
8315 	 */
8316 	p->size = len;
8317 
8318 	if (umem_incr_devlockmem(p) != 0) {
8319 		/*
8320 		 * The requested memory cannot be locked
8321 		 */
8322 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8323 		*cookie = (ddi_umem_cookie_t)NULL;
8324 		return (ENOMEM);
8325 	}
8326 	/*
8327 	 * umem_incr_devlockmem stashes the project ptr into the
8328 	 * cookie. This is needed during unlock since that can
8329 	 * happen in a non-USER context
8330 	 */
8331 	ASSERT(p->lockmem_proj);
8332 
8333 	/* Lock the pages corresponding to addr, len in memory */
8334 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8335 	    addr, len, p->s_flags);
8336 	if (error != 0) {
8337 		umem_decr_devlockmem(p);
8338 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8339 		*cookie = (ddi_umem_cookie_t)NULL;
8340 		return (error);
8341 	}
8342 
8343 	/* Initialize the fields in the ddi_umem_cookie */
8344 	p->cvaddr = addr;
8345 	p->type = UMEM_LOCKED;
8346 	p->cook_refcnt = 1;
8347 
8348 	*cookie = (ddi_umem_cookie_t)p;
8349 	return (error);
8350 }
8351 
8352 /*
8353  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8354  * unlocked by i_ddi_umem_unlock_thread.
8355  */
8356 
8357 void
8358 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8359 {
8360 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8361 
8362 	ASSERT(p->type == UMEM_LOCKED);
8363 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8364 	ASSERT(ddi_umem_unlock_thread != NULL);
8365 
8366 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8367 	mutex_enter(&ddi_umem_unlock_mutex);
8368 	if (ddi_umem_unlock_head == NULL) {
8369 		ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8370 		cv_broadcast(&ddi_umem_unlock_cv);
8371 	} else {
8372 		ddi_umem_unlock_tail->unl_forw = p;
8373 		ddi_umem_unlock_tail = p;
8374 	}
8375 	mutex_exit(&ddi_umem_unlock_mutex);
8376 }
8377 
8378 /*
8379  * Create a buf structure from a ddi_umem_cookie
8380  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8381  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8382  * off, len - identifies the portion of the memory represented by the cookie
8383  *		that the buf points to.
8384  *	NOTE: off, len need to follow the alignment/size restrictions of the
8385  *		device (dev) that this buf will be passed to. Some devices
8386  *		will accept unrestricted alignment/size, whereas others (such as
8387  *		st) require some block-size alignment/size. It is the caller's
8388  *		responsibility to ensure that the alignment/size restrictions
8389  *		are met (we cannot assert as we do not know the restrictions)
8390  *
8391  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8392  *		the flags used in ddi_umem_lock
8393  *
8394  * The following three arguments are used to initialize fields in the
8395  * buf structure and are uninterpreted by this routine.
8396  *
8397  * dev
8398  * blkno
8399  * iodone
8400  *
8401  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8402  *
8403  * Returns a buf structure pointer on success (to be freed by freerbuf)
8404  *	NULL on any parameter error or memory alloc failure
8405  *
8406  */
8407 struct buf *
8408 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8409 	int direction, dev_t dev, daddr_t blkno,
8410 	int (*iodone)(struct buf *), int sleepflag)
8411 {
8412 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8413 	struct buf *bp;
8414 
8415 	/*
8416 	 * check for valid cookie offset, len
8417 	 */
8418 	if ((off + len) > p->size) {
8419 		return (NULL);
8420 	}
8421 
8422 	if (len > p->size) {
8423 		return (NULL);
8424 	}
8425 
8426 	/* direction has to be one of B_READ or B_WRITE */
8427 	if ((direction != B_READ) && (direction != B_WRITE)) {
8428 		return (NULL);
8429 	}
8430 
8431 	/* These are the only two valid sleepflags */
8432 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8433 		return (NULL);
8434 	}
8435 
8436 	/*
8437 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8438 	 */
8439 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8440 		return (NULL);
8441 	}
8442 
8443 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8444 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8445 		(p->procp == NULL) : (p->procp != NULL));
8446 
8447 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8448 	if (bp == NULL) {
8449 		return (NULL);
8450 	}
8451 	bioinit(bp);
8452 
8453 	bp->b_flags = B_BUSY | B_PHYS | direction;
8454 	bp->b_edev = dev;
8455 	bp->b_lblkno = blkno;
8456 	bp->b_iodone = iodone;
8457 	bp->b_bcount = len;
8458 	bp->b_proc = (proc_t *)p->procp;
8459 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8460 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8461 	if (p->pparray != NULL) {
8462 		bp->b_flags |= B_SHADOW;
8463 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8464 		bp->b_shadow = p->pparray + btop(off);
8465 	}
8466 	return (bp);
8467 }
8468 
8469 /*
8470  * Fault-handling and related routines
8471  */
8472 
8473 ddi_devstate_t
8474 ddi_get_devstate(dev_info_t *dip)
8475 {
8476 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8477 		return (DDI_DEVSTATE_OFFLINE);
8478 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8479 		return (DDI_DEVSTATE_DOWN);
8480 	else if (DEVI_IS_BUS_QUIESCED(dip))
8481 		return (DDI_DEVSTATE_QUIESCED);
8482 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8483 		return (DDI_DEVSTATE_DEGRADED);
8484 	else
8485 		return (DDI_DEVSTATE_UP);
8486 }
8487 
8488 void
8489 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8490 	ddi_fault_location_t location, const char *message)
8491 {
8492 	struct ddi_fault_event_data fd;
8493 	ddi_eventcookie_t ec;
8494 
8495 	/*
8496 	 * Assemble all the information into a fault-event-data structure
8497 	 */
8498 	fd.f_dip = dip;
8499 	fd.f_impact = impact;
8500 	fd.f_location = location;
8501 	fd.f_message = message;
8502 	fd.f_oldstate = ddi_get_devstate(dip);
8503 
8504 	/*
8505 	 * Get eventcookie from defining parent.
8506 	 */
8507 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8508 	    DDI_SUCCESS)
8509 		return;
8510 
8511 	(void) ndi_post_event(dip, dip, ec, &fd);
8512 }
8513 
8514 char *
8515 i_ddi_devi_class(dev_info_t *dip)
8516 {
8517 	return (DEVI(dip)->devi_device_class);
8518 }
8519 
8520 int
8521 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8522 {
8523 	struct dev_info *devi = DEVI(dip);
8524 
8525 	mutex_enter(&devi->devi_lock);
8526 
8527 	if (devi->devi_device_class)
8528 		kmem_free(devi->devi_device_class,
8529 		    strlen(devi->devi_device_class) + 1);
8530 
8531 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8532 	    != NULL) {
8533 		mutex_exit(&devi->devi_lock);
8534 		return (DDI_SUCCESS);
8535 	}
8536 
8537 	mutex_exit(&devi->devi_lock);
8538 
8539 	return (DDI_FAILURE);
8540 }
8541 
8542 
8543 /*
8544  * Task Queues DDI interfaces.
8545  */
8546 
8547 /* ARGSUSED */
8548 ddi_taskq_t *
8549 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8550     pri_t pri, uint_t cflags)
8551 {
8552 	char full_name[TASKQ_NAMELEN];
8553 	const char *tq_name;
8554 	int nodeid = 0;
8555 
8556 	if (dip == NULL)
8557 		tq_name = name;
8558 	else {
8559 		nodeid = ddi_get_instance(dip);
8560 
8561 		if (name == NULL)
8562 			name = "tq";
8563 
8564 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8565 		    ddi_driver_name(dip), name);
8566 
8567 		tq_name = full_name;
8568 	}
8569 
8570 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8571 		    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8572 		    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8573 }
8574 
8575 void
8576 ddi_taskq_destroy(ddi_taskq_t *tq)
8577 {
8578 	taskq_destroy((taskq_t *)tq);
8579 }
8580 
8581 int
8582 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8583     void *arg, uint_t dflags)
8584 {
8585 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8586 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8587 
8588 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8589 }
8590 
8591 void
8592 ddi_taskq_wait(ddi_taskq_t *tq)
8593 {
8594 	taskq_wait((taskq_t *)tq);
8595 }
8596 
8597 void
8598 ddi_taskq_suspend(ddi_taskq_t *tq)
8599 {
8600 	taskq_suspend((taskq_t *)tq);
8601 }
8602 
8603 boolean_t
8604 ddi_taskq_suspended(ddi_taskq_t *tq)
8605 {
8606 	return (taskq_suspended((taskq_t *)tq));
8607 }
8608 
8609 void
8610 ddi_taskq_resume(ddi_taskq_t *tq)
8611 {
8612 	taskq_resume((taskq_t *)tq);
8613 }
8614 
8615 int
8616 ddi_parse(
8617 	const char	*ifname,
8618 	char		*alnum,
8619 	uint_t		*nump)
8620 {
8621 	const char	*p;
8622 	int		l;
8623 	ulong_t		num;
8624 	boolean_t	nonum = B_TRUE;
8625 	char		c;
8626 
8627 	l = strlen(ifname);
8628 	for (p = ifname + l; p != ifname; l--) {
8629 		c = *--p;
8630 		if (!isdigit(c)) {
8631 			(void) strlcpy(alnum, ifname, l + 1);
8632 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8633 				return (DDI_FAILURE);
8634 			break;
8635 		}
8636 		nonum = B_FALSE;
8637 	}
8638 	if (l == 0 || nonum)
8639 		return (DDI_FAILURE);
8640 
8641 	*nump = num;
8642 	return (DDI_SUCCESS);
8643 }
8644