xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision fdafd315)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/refcount.h>
40 #include <sys/systm.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/bhnd/cores/chipc/chipc.h>
45 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
46 
47 #include "siba_eromvar.h"
48 
49 #include "sibareg.h"
50 #include "sibavar.h"
51 
52 /* RID used when allocating EROM resources */
53 #define	SIBA_EROM_RID	0
54 
55 static bhnd_erom_class_t *
siba_get_erom_class(driver_t * driver)56 siba_get_erom_class(driver_t *driver)
57 {
58 	return (&siba_erom_parser);
59 }
60 
61 int
siba_probe(device_t dev)62 siba_probe(device_t dev)
63 {
64 	device_set_desc(dev, "SIBA BHND bus");
65 	return (BUS_PROBE_DEFAULT);
66 }
67 
68 /**
69  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
70  *
71  * This implementation initializes internal siba(4) state and performs
72  * bus enumeration, and must be called by subclassing drivers in
73  * DEVICE_ATTACH() before any other bus methods.
74  */
75 int
siba_attach(device_t dev)76 siba_attach(device_t dev)
77 {
78 	struct siba_softc	*sc;
79 	int			 error;
80 
81 	sc = device_get_softc(dev);
82 	sc->dev = dev;
83 
84 	SIBA_LOCK_INIT(sc);
85 
86 	/* Enumerate children */
87 	if ((error = siba_add_children(dev))) {
88 		device_delete_children(dev);
89 		SIBA_LOCK_DESTROY(sc);
90 		return (error);
91 	}
92 
93 	return (0);
94 }
95 
96 int
siba_detach(device_t dev)97 siba_detach(device_t dev)
98 {
99 	struct siba_softc	*sc;
100 	int			 error;
101 
102 	sc = device_get_softc(dev);
103 
104 	if ((error = bhnd_generic_detach(dev)))
105 		return (error);
106 
107 	SIBA_LOCK_DESTROY(sc);
108 
109 	return (0);
110 }
111 
112 int
siba_resume(device_t dev)113 siba_resume(device_t dev)
114 {
115 	return (bhnd_generic_resume(dev));
116 }
117 
118 int
siba_suspend(device_t dev)119 siba_suspend(device_t dev)
120 {
121 	return (bhnd_generic_suspend(dev));
122 }
123 
124 static int
siba_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)125 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
126 {
127 	struct siba_softc		*sc;
128 	const struct siba_devinfo	*dinfo;
129 	const struct bhnd_core_info	*cfg;
130 
131 	sc = device_get_softc(dev);
132 	dinfo = device_get_ivars(child);
133 	cfg = &dinfo->core_id.core_info;
134 
135 	switch (index) {
136 	case BHND_IVAR_VENDOR:
137 		*result = cfg->vendor;
138 		return (0);
139 	case BHND_IVAR_DEVICE:
140 		*result = cfg->device;
141 		return (0);
142 	case BHND_IVAR_HWREV:
143 		*result = cfg->hwrev;
144 		return (0);
145 	case BHND_IVAR_DEVICE_CLASS:
146 		*result = bhnd_core_class(cfg);
147 		return (0);
148 	case BHND_IVAR_VENDOR_NAME:
149 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
150 		return (0);
151 	case BHND_IVAR_DEVICE_NAME:
152 		*result = (uintptr_t) bhnd_core_name(cfg);
153 		return (0);
154 	case BHND_IVAR_CORE_INDEX:
155 		*result = cfg->core_idx;
156 		return (0);
157 	case BHND_IVAR_CORE_UNIT:
158 		*result = cfg->unit;
159 		return (0);
160 	case BHND_IVAR_PMU_INFO:
161 		SIBA_LOCK(sc);
162 		switch (dinfo->pmu_state) {
163 		case SIBA_PMU_NONE:
164 			*result = (uintptr_t)NULL;
165 			SIBA_UNLOCK(sc);
166 			return (0);
167 
168 		case SIBA_PMU_BHND:
169 			*result = (uintptr_t)dinfo->pmu.bhnd_info;
170 			SIBA_UNLOCK(sc);
171 			return (0);
172 
173 		case SIBA_PMU_PWRCTL:
174 		case SIBA_PMU_FIXED:
175 			*result = (uintptr_t)NULL;
176 			SIBA_UNLOCK(sc);
177 			return (0);
178 		}
179 
180 		panic("invalid PMU state: %d", dinfo->pmu_state);
181 		return (ENXIO);
182 
183 	default:
184 		return (ENOENT);
185 	}
186 }
187 
188 static int
siba_write_ivar(device_t dev,device_t child,int index,uintptr_t value)189 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
190 {
191 	struct siba_softc	*sc;
192 	struct siba_devinfo	*dinfo;
193 
194 	sc = device_get_softc(dev);
195 	dinfo = device_get_ivars(child);
196 
197 	switch (index) {
198 	case BHND_IVAR_VENDOR:
199 	case BHND_IVAR_DEVICE:
200 	case BHND_IVAR_HWREV:
201 	case BHND_IVAR_DEVICE_CLASS:
202 	case BHND_IVAR_VENDOR_NAME:
203 	case BHND_IVAR_DEVICE_NAME:
204 	case BHND_IVAR_CORE_INDEX:
205 	case BHND_IVAR_CORE_UNIT:
206 		return (EINVAL);
207 	case BHND_IVAR_PMU_INFO:
208 		SIBA_LOCK(sc);
209 		switch (dinfo->pmu_state) {
210 		case SIBA_PMU_NONE:
211 		case SIBA_PMU_BHND:
212 			dinfo->pmu.bhnd_info = (void *)value;
213 			dinfo->pmu_state = SIBA_PMU_BHND;
214 			SIBA_UNLOCK(sc);
215 			return (0);
216 
217 		case SIBA_PMU_PWRCTL:
218 		case SIBA_PMU_FIXED:
219 			panic("bhnd_set_pmu_info() called with siba PMU state "
220 			    "%d", dinfo->pmu_state);
221 			return (ENXIO);
222 		}
223 
224 		panic("invalid PMU state: %d", dinfo->pmu_state);
225 		return (ENXIO);
226 
227 	default:
228 		return (ENOENT);
229 	}
230 }
231 
232 static struct resource_list *
siba_get_resource_list(device_t dev,device_t child)233 siba_get_resource_list(device_t dev, device_t child)
234 {
235 	struct siba_devinfo *dinfo = device_get_ivars(child);
236 	return (&dinfo->resources);
237 }
238 
239 /* BHND_BUS_ALLOC_PMU() */
240 static int
siba_alloc_pmu(device_t dev,device_t child)241 siba_alloc_pmu(device_t dev, device_t child)
242 {
243 	struct siba_softc	*sc;
244 	struct siba_devinfo	*dinfo;
245 	device_t		 chipc;
246 	device_t		 pwrctl;
247 	struct chipc_caps	 ccaps;
248 	siba_pmu_state		 pmu_state;
249 	int			 error;
250 
251 	if (device_get_parent(child) != dev)
252 		return (EINVAL);
253 
254 	sc = device_get_softc(dev);
255 	dinfo = device_get_ivars(child);
256 	pwrctl = NULL;
257 
258 	/* Fetch ChipCommon capability flags */
259 	chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
260 	if (chipc != NULL) {
261 		ccaps = *BHND_CHIPC_GET_CAPS(chipc);
262 		bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
263 	} else {
264 		memset(&ccaps, 0, sizeof(ccaps));
265 	}
266 
267 	/* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
268 	 * advertises PMU support */
269 	if (ccaps.pmu) {
270 		if ((error = bhnd_generic_alloc_pmu(dev, child)))
271 			return (error);
272 
273 		KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
274 		    ("unexpected PMU state: %d", dinfo->pmu_state));
275 
276 		return (0);
277 	}
278 
279 	/*
280 	 * This is either a legacy PWRCTL chipset, or the device does not
281 	 * support dynamic clock control.
282 	 *
283 	 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
284 	 */
285 	if (ccaps.pwr_ctrl) {
286 		pmu_state = SIBA_PMU_PWRCTL;
287 		pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
288 		if (pwrctl == NULL) {
289 			device_printf(dev, "PWRCTL not found\n");
290 			return (ENODEV);
291 		}
292 	} else {
293 		pmu_state = SIBA_PMU_FIXED;
294 		pwrctl = NULL;
295 	}
296 
297 	SIBA_LOCK(sc);
298 
299 	/* Per-core PMU state already allocated? */
300 	if (dinfo->pmu_state != SIBA_PMU_NONE) {
301 		panic("duplicate PMU allocation for %s",
302 		    device_get_nameunit(child));
303 	}
304 
305 	/* Update the child's PMU allocation state, and transfer ownership of
306 	 * the PWRCTL provider reference (if any) */
307 	dinfo->pmu_state = pmu_state;
308 	dinfo->pmu.pwrctl = pwrctl;
309 
310 	SIBA_UNLOCK(sc);
311 
312 	return (0);
313 }
314 
315 /* BHND_BUS_RELEASE_PMU() */
316 static int
siba_release_pmu(device_t dev,device_t child)317 siba_release_pmu(device_t dev, device_t child)
318 {
319 	struct siba_softc	*sc;
320 	struct siba_devinfo	*dinfo;
321 	device_t		 pwrctl;
322 	int			 error;
323 
324 	if (device_get_parent(child) != dev)
325 		return (EINVAL);
326 
327 	sc = device_get_softc(dev);
328 	dinfo = device_get_ivars(child);
329 
330 	SIBA_LOCK(sc);
331 	switch(dinfo->pmu_state) {
332 	case SIBA_PMU_NONE:
333 		panic("pmu over-release for %s", device_get_nameunit(child));
334 		SIBA_UNLOCK(sc);
335 		return (ENXIO);
336 
337 	case SIBA_PMU_BHND:
338 		SIBA_UNLOCK(sc);
339 		return (bhnd_generic_release_pmu(dev, child));
340 
341 	case SIBA_PMU_PWRCTL:
342 		/* Requesting BHND_CLOCK_DYN releases any outstanding clock
343 		 * reservations */
344 		pwrctl = dinfo->pmu.pwrctl;
345 		error = bhnd_pwrctl_request_clock(pwrctl, child,
346 		    BHND_CLOCK_DYN);
347 		if (error) {
348 			SIBA_UNLOCK(sc);
349 			return (error);
350 		}
351 
352 		/* Clean up the child's PMU state */
353 		dinfo->pmu_state = SIBA_PMU_NONE;
354 		dinfo->pmu.pwrctl = NULL;
355 		SIBA_UNLOCK(sc);
356 
357 		/* Release the provider reference */
358 		bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
359 		return (0);
360 
361 	case SIBA_PMU_FIXED:
362 		/* Clean up the child's PMU state */
363 		KASSERT(dinfo->pmu.pwrctl == NULL,
364 		    ("PWRCTL reference with FIXED state"));
365 
366 		dinfo->pmu_state = SIBA_PMU_NONE;
367 		dinfo->pmu.pwrctl = NULL;
368 		SIBA_UNLOCK(sc);
369 	}
370 
371 	panic("invalid PMU state: %d", dinfo->pmu_state);
372 }
373 
374 /* BHND_BUS_GET_CLOCK_LATENCY() */
375 static int
siba_get_clock_latency(device_t dev,device_t child,bhnd_clock clock,u_int * latency)376 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
377     u_int *latency)
378 {
379 	struct siba_softc	*sc;
380 	struct siba_devinfo	*dinfo;
381 	int			 error;
382 
383 	if (device_get_parent(child) != dev)
384 		return (EINVAL);
385 
386 	sc = device_get_softc(dev);
387 	dinfo = device_get_ivars(child);
388 
389 	SIBA_LOCK(sc);
390 	switch(dinfo->pmu_state) {
391 	case SIBA_PMU_NONE:
392 		panic("no active PMU request state");
393 
394 		SIBA_UNLOCK(sc);
395 		return (ENXIO);
396 
397 	case SIBA_PMU_BHND:
398 		SIBA_UNLOCK(sc);
399 		return (bhnd_generic_get_clock_latency(dev, child, clock,
400 		    latency));
401 
402 	case SIBA_PMU_PWRCTL:
403 		 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
404 		    latency);
405 		 SIBA_UNLOCK(sc);
406 
407 		 return (error);
408 
409 	case SIBA_PMU_FIXED:
410 		SIBA_UNLOCK(sc);
411 
412 		/* HT clock is always available, and incurs no transition
413 		 * delay. */
414 		switch (clock) {
415 		case BHND_CLOCK_HT:
416 			*latency = 0;
417 			return (0);
418 
419 		default:
420 			return (ENODEV);
421 		}
422 
423 		return (ENODEV);
424 	}
425 
426 	panic("invalid PMU state: %d", dinfo->pmu_state);
427 }
428 
429 /* BHND_BUS_GET_CLOCK_FREQ() */
430 static int
siba_get_clock_freq(device_t dev,device_t child,bhnd_clock clock,u_int * freq)431 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
432     u_int *freq)
433 {
434 	struct siba_softc	*sc;
435 	struct siba_devinfo	*dinfo;
436 	int			 error;
437 
438 	if (device_get_parent(child) != dev)
439 		return (EINVAL);
440 
441 	sc = device_get_softc(dev);
442 	dinfo = device_get_ivars(child);
443 
444 	SIBA_LOCK(sc);
445 	switch(dinfo->pmu_state) {
446 	case SIBA_PMU_NONE:
447 		panic("no active PMU request state");
448 
449 		SIBA_UNLOCK(sc);
450 		return (ENXIO);
451 
452 	case SIBA_PMU_BHND:
453 		SIBA_UNLOCK(sc);
454 		return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
455 
456 	case SIBA_PMU_PWRCTL:
457 		error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
458 		    freq);
459 		SIBA_UNLOCK(sc);
460 
461 		return (error);
462 
463 	case SIBA_PMU_FIXED:
464 		SIBA_UNLOCK(sc);
465 
466 		return (ENODEV);
467 	}
468 
469 	panic("invalid PMU state: %d", dinfo->pmu_state);
470 }
471 
472 /* BHND_BUS_REQUEST_EXT_RSRC() */
473 static int
siba_request_ext_rsrc(device_t dev,device_t child,u_int rsrc)474 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
475 {
476 	struct siba_softc	*sc;
477 	struct siba_devinfo	*dinfo;
478 
479 	if (device_get_parent(child) != dev)
480 		return (EINVAL);
481 
482 	sc = device_get_softc(dev);
483 	dinfo = device_get_ivars(child);
484 
485 	SIBA_LOCK(sc);
486 	switch(dinfo->pmu_state) {
487 	case SIBA_PMU_NONE:
488 		panic("no active PMU request state");
489 
490 		SIBA_UNLOCK(sc);
491 		return (ENXIO);
492 
493 	case SIBA_PMU_BHND:
494 		SIBA_UNLOCK(sc);
495 		return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
496 
497 	case SIBA_PMU_PWRCTL:
498 	case SIBA_PMU_FIXED:
499 		/* HW does not support per-core external resources */
500 		SIBA_UNLOCK(sc);
501 		return (ENODEV);
502 	}
503 
504 	panic("invalid PMU state: %d", dinfo->pmu_state);
505 }
506 
507 /* BHND_BUS_RELEASE_EXT_RSRC() */
508 static int
siba_release_ext_rsrc(device_t dev,device_t child,u_int rsrc)509 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
510 {
511 	struct siba_softc	*sc;
512 	struct siba_devinfo	*dinfo;
513 
514 	if (device_get_parent(child) != dev)
515 		return (EINVAL);
516 
517 	sc = device_get_softc(dev);
518 	dinfo = device_get_ivars(child);
519 
520 	SIBA_LOCK(sc);
521 	switch(dinfo->pmu_state) {
522 	case SIBA_PMU_NONE:
523 		panic("no active PMU request state");
524 
525 		SIBA_UNLOCK(sc);
526 		return (ENXIO);
527 
528 	case SIBA_PMU_BHND:
529 		SIBA_UNLOCK(sc);
530 		return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
531 
532 	case SIBA_PMU_PWRCTL:
533 	case SIBA_PMU_FIXED:
534 		/* HW does not support per-core external resources */
535 		SIBA_UNLOCK(sc);
536 		return (ENODEV);
537 	}
538 
539 	panic("invalid PMU state: %d", dinfo->pmu_state);
540 }
541 
542 /* BHND_BUS_REQUEST_CLOCK() */
543 static int
siba_request_clock(device_t dev,device_t child,bhnd_clock clock)544 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
545 {
546 	struct siba_softc	*sc;
547 	struct siba_devinfo	*dinfo;
548 	int			 error;
549 
550 	if (device_get_parent(child) != dev)
551 		return (EINVAL);
552 
553 	sc = device_get_softc(dev);
554 	dinfo = device_get_ivars(child);
555 
556 	SIBA_LOCK(sc);
557 	switch(dinfo->pmu_state) {
558 	case SIBA_PMU_NONE:
559 		panic("no active PMU request state");
560 
561 		SIBA_UNLOCK(sc);
562 		return (ENXIO);
563 
564 	case SIBA_PMU_BHND:
565 		SIBA_UNLOCK(sc);
566 		return (bhnd_generic_request_clock(dev, child, clock));
567 
568 	case SIBA_PMU_PWRCTL:
569 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
570 		    clock);
571 		SIBA_UNLOCK(sc);
572 
573 		return (error);
574 
575 	case SIBA_PMU_FIXED:
576 		SIBA_UNLOCK(sc);
577 
578 		/* HT clock is always available, and fulfills any of the
579 		 * following clock requests */
580 		switch (clock) {
581 		case BHND_CLOCK_DYN:
582 		case BHND_CLOCK_ILP:
583 		case BHND_CLOCK_ALP:
584 		case BHND_CLOCK_HT:
585 			return (0);
586 
587 		default:
588 			return (ENODEV);
589 		}
590 	}
591 
592 	panic("invalid PMU state: %d", dinfo->pmu_state);
593 }
594 
595 /* BHND_BUS_ENABLE_CLOCKS() */
596 static int
siba_enable_clocks(device_t dev,device_t child,uint32_t clocks)597 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
598 {
599 	struct siba_softc	*sc;
600 	struct siba_devinfo	*dinfo;
601 
602 	if (device_get_parent(child) != dev)
603 		return (EINVAL);
604 
605 	sc = device_get_softc(dev);
606 	dinfo = device_get_ivars(child);
607 
608 	SIBA_LOCK(sc);
609 	switch(dinfo->pmu_state) {
610 	case SIBA_PMU_NONE:
611 		panic("no active PMU request state");
612 
613 		SIBA_UNLOCK(sc);
614 		return (ENXIO);
615 
616 	case SIBA_PMU_BHND:
617 		SIBA_UNLOCK(sc);
618 		return (bhnd_generic_enable_clocks(dev, child, clocks));
619 
620 	case SIBA_PMU_PWRCTL:
621 	case SIBA_PMU_FIXED:
622 		SIBA_UNLOCK(sc);
623 
624 		/* All (supported) clocks are already enabled by default */
625 		clocks &= ~(BHND_CLOCK_DYN |
626 			    BHND_CLOCK_ILP |
627 			    BHND_CLOCK_ALP |
628 			    BHND_CLOCK_HT);
629 
630 		if (clocks != 0) {
631 			device_printf(dev, "%s requested unknown clocks: %#x\n",
632 			    device_get_nameunit(child), clocks);
633 			return (ENODEV);
634 		}
635 
636 		return (0);
637 	}
638 
639 	panic("invalid PMU state: %d", dinfo->pmu_state);
640 }
641 
642 static int
siba_read_iost(device_t dev,device_t child,uint16_t * iost)643 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
644 {
645 	uint32_t	tmhigh;
646 	int		error;
647 
648 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
649 	if (error)
650 		return (error);
651 
652 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
653 	return (0);
654 }
655 
656 static int
siba_read_ioctl(device_t dev,device_t child,uint16_t * ioctl)657 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
658 {
659 	uint32_t	ts_low;
660 	int		error;
661 
662 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
663 		return (error);
664 
665 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
666 	return (0);
667 }
668 
669 static int
siba_write_ioctl(device_t dev,device_t child,uint16_t value,uint16_t mask)670 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
671 {
672 	struct siba_devinfo	*dinfo;
673 	struct bhnd_resource	*r;
674 	uint32_t		 ts_low, ts_mask;
675 
676 	if (device_get_parent(child) != dev)
677 		return (EINVAL);
678 
679 	/* Fetch CFG0 mapping */
680 	dinfo = device_get_ivars(child);
681 	if ((r = dinfo->cfg_res[0]) == NULL)
682 		return (ENODEV);
683 
684 	/* Mask and set TMSTATELOW core flag bits */
685 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
686 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
687 
688 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
689 	    ts_low, ts_mask);
690 	return (0);
691 }
692 
693 static bool
siba_is_hw_suspended(device_t dev,device_t child)694 siba_is_hw_suspended(device_t dev, device_t child)
695 {
696 	uint32_t		ts_low;
697 	uint16_t		ioctl;
698 	int			error;
699 
700 	/* Fetch target state */
701 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
702 	if (error) {
703 		device_printf(child, "error reading HW reset state: %d\n",
704 		    error);
705 		return (true);
706 	}
707 
708 	/* Is core held in RESET? */
709 	if (ts_low & SIBA_TML_RESET)
710 		return (true);
711 
712 	/* Is target reject enabled? */
713 	if (ts_low & SIBA_TML_REJ_MASK)
714 		return (true);
715 
716 	/* Is core clocked? */
717 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
718 	if (!(ioctl & BHND_IOCTL_CLK_EN))
719 		return (true);
720 
721 	return (false);
722 }
723 
724 static int
siba_reset_hw(device_t dev,device_t child,uint16_t ioctl,uint16_t reset_ioctl)725 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
726     uint16_t reset_ioctl)
727 {
728 	struct siba_devinfo		*dinfo;
729 	struct bhnd_resource		*r;
730 	uint32_t			 ts_low, imstate;
731 	uint16_t			 clkflags;
732 	int				 error;
733 
734 	if (device_get_parent(child) != dev)
735 		return (EINVAL);
736 
737 	dinfo = device_get_ivars(child);
738 
739 	/* Can't suspend the core without access to the CFG0 registers */
740 	if ((r = dinfo->cfg_res[0]) == NULL)
741 		return (ENODEV);
742 
743 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
744 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
745 	if (ioctl & clkflags)
746 		return (EINVAL);
747 
748 	/* Place core into known RESET state */
749 	if ((error = bhnd_suspend_hw(child, reset_ioctl)))
750 		return (error);
751 
752 	/* Set RESET, clear REJ, set the caller's IOCTL flags, and
753 	 * force clocks to ensure the signal propagates throughout the
754 	 * core. */
755 	ts_low = SIBA_TML_RESET |
756 		 (ioctl << SIBA_TML_SICF_SHIFT) |
757 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
758 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
759 
760 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
761 	    ts_low, UINT32_MAX);
762 
763 	/* Clear any target errors */
764 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
765 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
766 		    0x0, SIBA_TMH_SERR);
767 	}
768 
769 	/* Clear any initiator errors */
770 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
771 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
772 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
773 		    SIBA_IM_IBE|SIBA_IM_TO);
774 	}
775 
776 	/* Release from RESET while leaving clocks forced, ensuring the
777 	 * signal propagates throughout the core */
778 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
779 	    SIBA_TML_RESET);
780 
781 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
782 	 * bit and allow the core to manage clock gating. */
783 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
784 	    (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
785 
786 	return (0);
787 }
788 
789 static int
siba_suspend_hw(device_t dev,device_t child,uint16_t ioctl)790 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
791 {
792 	struct siba_softc		*sc;
793 	struct siba_devinfo		*dinfo;
794 	struct bhnd_resource		*r;
795 	uint32_t			 idl, ts_low, ts_mask;
796 	uint16_t			 cflags, clkflags;
797 	int				 error;
798 
799 	if (device_get_parent(child) != dev)
800 		return (EINVAL);
801 
802 	sc = device_get_softc(dev);
803 	dinfo = device_get_ivars(child);
804 
805 	/* Can't suspend the core without access to the CFG0 registers */
806 	if ((r = dinfo->cfg_res[0]) == NULL)
807 		return (ENODEV);
808 
809 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
810 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
811 	if (ioctl & clkflags)
812 		return (EINVAL);
813 
814 	/* Already in RESET? */
815 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
816 	if (ts_low & SIBA_TML_RESET)
817 		return (0);
818 
819 	/* If clocks are already disabled, we can place the core directly
820 	 * into RESET|REJ while setting the caller's IOCTL flags. */
821 	cflags = SIBA_REG_GET(ts_low, TML_SICF);
822 	if (!(cflags & BHND_IOCTL_CLK_EN)) {
823 		ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
824 			 (ioctl << SIBA_TML_SICF_SHIFT);
825 		ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
826 
827 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
828 		    ts_low, ts_mask);
829 		return (0);
830 	}
831 
832 	/* Reject further transactions reaching this core */
833 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
834 	    SIBA_TML_REJ, SIBA_TML_REJ);
835 
836 	/* Wait for transaction busy flag to clear for all transactions
837 	 * initiated by this core */
838 	error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
839 	    0x0, SIBA_TMH_BUSY, 100000);
840 	if (error)
841 		return (error);
842 
843 	/* If this is an initiator core, we need to reject initiator
844 	 * transactions too. */
845 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
846 	if (idl & SIBA_IDL_INIT) {
847 		/* Reject further initiator transactions */
848 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
849 		    SIBA_IM_RJ, SIBA_IM_RJ);
850 
851 		/* Wait for initiator busy flag to clear */
852 		error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
853 		    0x0, SIBA_IM_BY, 100000);
854 		if (error)
855 			return (error);
856 	}
857 
858 	/* Put the core into RESET, set the caller's IOCTL flags, and
859 	 * force clocks to ensure the RESET signal propagates throughout the
860 	 * core. */
861 	ts_low = SIBA_TML_RESET |
862 		 (ioctl << SIBA_TML_SICF_SHIFT) |
863 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
864 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
865 	ts_mask = SIBA_TML_RESET |
866 		  SIBA_TML_SICF_MASK;
867 
868 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
869 	    ts_mask);
870 
871 	/* Give RESET ample time */
872 	DELAY(10);
873 
874 	/* Clear previously asserted initiator reject */
875 	if (idl & SIBA_IDL_INIT) {
876 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
877 		    SIBA_IM_RJ);
878 	}
879 
880 	/* Disable all clocks, leaving RESET and REJ asserted */
881 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
882 	    (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
883 
884 	/*
885 	 * Core is now in RESET.
886 	 *
887 	 * If the core holds any PWRCTL clock reservations, we need to release
888 	 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
889 	 * automatically clearing clkctl
890 	 */
891 	SIBA_LOCK(sc);
892 	if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
893 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
894 		    BHND_CLOCK_DYN);
895 		SIBA_UNLOCK(sc);
896 
897 		if (error) {
898 			device_printf(child, "failed to release clock request: "
899 			    "%d", error);
900 			return (error);
901 		}
902 
903 		return (0);
904 	} else {
905 		SIBA_UNLOCK(sc);
906 		return (0);
907 	}
908 }
909 
910 static int
siba_read_config(device_t dev,device_t child,bus_size_t offset,void * value,u_int width)911 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
912     u_int width)
913 {
914 	struct siba_devinfo	*dinfo;
915 	rman_res_t		 r_size;
916 
917 	/* Must be directly attached */
918 	if (device_get_parent(child) != dev)
919 		return (EINVAL);
920 
921 	/* CFG0 registers must be available */
922 	dinfo = device_get_ivars(child);
923 	if (dinfo->cfg_res[0] == NULL)
924 		return (ENODEV);
925 
926 	/* Offset must fall within CFG0 */
927 	r_size = rman_get_size(dinfo->cfg_res[0]->res);
928 	if (r_size < offset || r_size - offset < width)
929 		return (EFAULT);
930 
931 	switch (width) {
932 	case 1:
933 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
934 		    offset);
935 		return (0);
936 	case 2:
937 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
938 		    offset);
939 		return (0);
940 	case 4:
941 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
942 		    offset);
943 		return (0);
944 	default:
945 		return (EINVAL);
946 	}
947 }
948 
949 static int
siba_write_config(device_t dev,device_t child,bus_size_t offset,const void * value,u_int width)950 siba_write_config(device_t dev, device_t child, bus_size_t offset,
951     const void *value, u_int width)
952 {
953 	struct siba_devinfo	*dinfo;
954 	struct bhnd_resource	*r;
955 	rman_res_t		 r_size;
956 
957 	/* Must be directly attached */
958 	if (device_get_parent(child) != dev)
959 		return (EINVAL);
960 
961 	/* CFG0 registers must be available */
962 	dinfo = device_get_ivars(child);
963 	if ((r = dinfo->cfg_res[0]) == NULL)
964 		return (ENODEV);
965 
966 	/* Offset must fall within CFG0 */
967 	r_size = rman_get_size(r->res);
968 	if (r_size < offset || r_size - offset < width)
969 		return (EFAULT);
970 
971 	switch (width) {
972 	case 1:
973 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
974 		return (0);
975 	case 2:
976 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
977 		return (0);
978 	case 4:
979 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
980 		return (0);
981 	default:
982 		return (EINVAL);
983 	}
984 }
985 
986 static u_int
siba_get_port_count(device_t dev,device_t child,bhnd_port_type type)987 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
988 {
989 	struct siba_devinfo *dinfo;
990 
991 	/* delegate non-bus-attached devices to our parent */
992 	if (device_get_parent(child) != dev)
993 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
994 		    type));
995 
996 	dinfo = device_get_ivars(child);
997 	return (siba_port_count(&dinfo->core_id, type));
998 }
999 
1000 static u_int
siba_get_region_count(device_t dev,device_t child,bhnd_port_type type,u_int port)1001 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1002     u_int port)
1003 {
1004 	struct siba_devinfo	*dinfo;
1005 
1006 	/* delegate non-bus-attached devices to our parent */
1007 	if (device_get_parent(child) != dev)
1008 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1009 		    type, port));
1010 
1011 	dinfo = device_get_ivars(child);
1012 	return (siba_port_region_count(&dinfo->core_id, type, port));
1013 }
1014 
1015 static int
siba_get_port_rid(device_t dev,device_t child,bhnd_port_type port_type,u_int port_num,u_int region_num)1016 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1017     u_int port_num, u_int region_num)
1018 {
1019 	struct siba_devinfo	*dinfo;
1020 	struct siba_addrspace	*addrspace;
1021 	struct siba_cfg_block	*cfg;
1022 
1023 	/* delegate non-bus-attached devices to our parent */
1024 	if (device_get_parent(child) != dev)
1025 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1026 		    port_type, port_num, region_num));
1027 
1028 	dinfo = device_get_ivars(child);
1029 
1030 	/* Look for a matching addrspace entry */
1031 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1032 	if (addrspace != NULL)
1033 		return (addrspace->sa_rid);
1034 
1035 	/* Try the config blocks */
1036 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1037 	if (cfg != NULL)
1038 		return (cfg->cb_rid);
1039 
1040 	/* Not found */
1041 	return (-1);
1042 }
1043 
1044 static int
siba_decode_port_rid(device_t dev,device_t child,int type,int rid,bhnd_port_type * port_type,u_int * port_num,u_int * region_num)1045 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1046     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1047 {
1048 	struct siba_devinfo	*dinfo;
1049 
1050 	/* delegate non-bus-attached devices to our parent */
1051 	if (device_get_parent(child) != dev)
1052 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1053 		    type, rid, port_type, port_num, region_num));
1054 
1055 	dinfo = device_get_ivars(child);
1056 
1057 	/* Ports are always memory mapped */
1058 	if (type != SYS_RES_MEMORY)
1059 		return (EINVAL);
1060 
1061 	/* Look for a matching addrspace entry */
1062 	for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) {
1063 		if (dinfo->addrspace[i].sa_rid != rid)
1064 			continue;
1065 
1066 		*port_type = BHND_PORT_DEVICE;
1067 		*port_num = siba_addrspace_device_port(i);
1068 		*region_num = siba_addrspace_device_region(i);
1069 		return (0);
1070 	}
1071 
1072 	/* Try the config blocks */
1073 	for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1074 		if (dinfo->cfg[i].cb_rid != rid)
1075 			continue;
1076 
1077 		*port_type = BHND_PORT_AGENT;
1078 		*port_num = siba_cfg_agent_port(i);
1079 		*region_num = siba_cfg_agent_region(i);
1080 		return (0);
1081 	}
1082 
1083 	/* Not found */
1084 	return (ENOENT);
1085 }
1086 
1087 static int
siba_get_region_addr(device_t dev,device_t child,bhnd_port_type port_type,u_int port_num,u_int region_num,bhnd_addr_t * addr,bhnd_size_t * size)1088 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1089     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1090 {
1091 	struct siba_devinfo	*dinfo;
1092 	struct siba_addrspace	*addrspace;
1093 	struct siba_cfg_block	*cfg;
1094 
1095 	/* delegate non-bus-attached devices to our parent */
1096 	if (device_get_parent(child) != dev) {
1097 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1098 		    port_type, port_num, region_num, addr, size));
1099 	}
1100 
1101 	dinfo = device_get_ivars(child);
1102 
1103 	/* Look for a matching addrspace */
1104 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1105 	if (addrspace != NULL) {
1106 		*addr = addrspace->sa_base;
1107 		*size = addrspace->sa_size - addrspace->sa_bus_reserved;
1108 		return (0);
1109 	}
1110 
1111 	/* Look for a matching cfg block */
1112 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1113 	if (cfg != NULL) {
1114 		*addr = cfg->cb_base;
1115 		*size = cfg->cb_size;
1116 		return (0);
1117 	}
1118 
1119 	/* Not found */
1120 	return (ENOENT);
1121 }
1122 
1123 /**
1124  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1125  */
1126 u_int
siba_get_intr_count(device_t dev,device_t child)1127 siba_get_intr_count(device_t dev, device_t child)
1128 {
1129 	struct siba_devinfo	*dinfo;
1130 
1131 	/* delegate non-bus-attached devices to our parent */
1132 	if (device_get_parent(child) != dev)
1133 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1134 
1135 	dinfo = device_get_ivars(child);
1136 	if (!dinfo->core_id.intr_en) {
1137 		/* No interrupts */
1138 		return (0);
1139 	} else {
1140 		/* One assigned interrupt */
1141 		return (1);
1142 	}
1143 }
1144 
1145 /**
1146  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1147  */
1148 int
siba_get_intr_ivec(device_t dev,device_t child,u_int intr,u_int * ivec)1149 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1150 {
1151 	struct siba_devinfo	*dinfo;
1152 
1153 	/* delegate non-bus-attached devices to our parent */
1154 	if (device_get_parent(child) != dev)
1155 		return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1156 		    intr, ivec));
1157 
1158 	/* Must be a valid interrupt ID */
1159 	if (intr >= siba_get_intr_count(dev, child))
1160 		return (ENXIO);
1161 
1162 	KASSERT(intr == 0, ("invalid ivec %u", intr));
1163 
1164 	dinfo = device_get_ivars(child);
1165 
1166 	KASSERT(dinfo->core_id.intr_en,
1167 	    ("core does not have an interrupt assigned"));
1168 
1169 	*ivec = dinfo->core_id.intr_flag;
1170 	return (0);
1171 }
1172 
1173 /**
1174  * Map per-core configuration blocks for @p dinfo.
1175  *
1176  * @param dev The siba bus device.
1177  * @param dinfo The device info instance on which to map all per-core
1178  * configuration blocks.
1179  */
1180 static int
siba_map_cfg_resources(device_t dev,struct siba_devinfo * dinfo)1181 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1182 {
1183 	struct siba_addrspace	*addrspace;
1184 	rman_res_t		 r_start, r_count, r_end;
1185 	uint8_t			 num_cfg;
1186 	int			 rid;
1187 
1188 	num_cfg = dinfo->core_id.num_cfg_blocks;
1189 	if (num_cfg > SIBA_MAX_CFG) {
1190 		device_printf(dev, "config block count %hhu out of range\n",
1191 		    num_cfg);
1192 		return (ENXIO);
1193 	}
1194 
1195 	/* Fetch the core register address space */
1196 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1197 	if (addrspace == NULL) {
1198 		device_printf(dev, "missing device registers\n");
1199 		return (ENXIO);
1200 	}
1201 
1202 	/*
1203 	 * Map the per-core configuration blocks
1204 	 */
1205 	for (uint8_t i = 0; i < num_cfg; i++) {
1206 		/* Add to child's resource list */
1207 		r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1208 		r_count = SIBA_CFG_SIZE;
1209 		r_end = r_start + r_count - 1;
1210 
1211 		rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1212 		    r_start, r_end, r_count);
1213 
1214 		/* Initialize config block descriptor */
1215 		dinfo->cfg[i] = ((struct siba_cfg_block) {
1216 			.cb_base = r_start,
1217 			.cb_size = SIBA_CFG_SIZE,
1218 			.cb_rid = rid
1219 		});
1220 
1221 		/* Map the config resource for bus-level access */
1222 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1223 		dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1224 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1225 		    r_count, RF_ACTIVE|RF_SHAREABLE);
1226 
1227 		if (dinfo->cfg_res[i] == NULL) {
1228 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1229 			    i);
1230 			return (ENXIO);
1231 		}
1232 	}
1233 
1234 	return (0);
1235 }
1236 
1237 static device_t
siba_add_child(device_t dev,u_int order,const char * name,int unit)1238 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1239 {
1240 	struct siba_devinfo	*dinfo;
1241 	device_t		 child;
1242 
1243 	child = device_add_child_ordered(dev, order, name, unit);
1244 	if (child == NULL)
1245 		return (NULL);
1246 
1247 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1248 		device_delete_child(dev, child);
1249 		return (NULL);
1250 	}
1251 
1252 	device_set_ivars(child, dinfo);
1253 
1254 	return (child);
1255 }
1256 
1257 static void
siba_child_deleted(device_t dev,device_t child)1258 siba_child_deleted(device_t dev, device_t child)
1259 {
1260 	struct siba_devinfo	*dinfo;
1261 
1262 	/* Call required bhnd(4) implementation */
1263 	bhnd_generic_child_deleted(dev, child);
1264 
1265 	/* Free siba device info */
1266 	if ((dinfo = device_get_ivars(child)) != NULL)
1267 		siba_free_dinfo(dev, child, dinfo);
1268 
1269 	device_set_ivars(child, NULL);
1270 }
1271 
1272 /**
1273  * Scan the core table and add all valid discovered cores to
1274  * the bus.
1275  *
1276  * @param dev The siba bus device.
1277  */
1278 int
siba_add_children(device_t dev)1279 siba_add_children(device_t dev)
1280 {
1281 	bhnd_erom_t			*erom;
1282 	struct siba_erom		*siba_erom;
1283 	struct bhnd_erom_io		*eio;
1284 	const struct bhnd_chipid	*cid;
1285 	struct siba_core_id		*cores;
1286 	device_t			*children;
1287 	int				 error;
1288 
1289 	cid = BHND_BUS_GET_CHIPID(dev, dev);
1290 
1291 	/* Allocate our EROM parser */
1292 	eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID);
1293 	erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio);
1294 	if (erom == NULL) {
1295 		bhnd_erom_io_fini(eio);
1296 		return (ENODEV);
1297 	}
1298 
1299 	/* Allocate our temporary core and device table */
1300 	cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK);
1301 	children = malloc(sizeof(*children) * cid->ncores, M_BHND,
1302 	    M_WAITOK | M_ZERO);
1303 
1304 	/*
1305 	 * Add child devices for all discovered cores.
1306 	 *
1307 	 * On bridged devices, we'll exhaust our available register windows if
1308 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1309 	 * defer mapping of the per-core siba(4) config blocks until all cores
1310 	 * have been enumerated and otherwise configured.
1311 	 */
1312 	siba_erom = (struct siba_erom *)erom;
1313 	for (u_int i = 0; i < cid->ncores; i++) {
1314 		struct siba_devinfo	*dinfo;
1315 		device_t		 child;
1316 
1317 		if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i])))
1318 			goto failed;
1319 
1320 		/* Add the child device */
1321 		child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1322 		if (child == NULL) {
1323 			error = ENXIO;
1324 			goto failed;
1325 		}
1326 
1327 		children[i] = child;
1328 
1329 		/* Initialize per-device bus info */
1330 		if ((dinfo = device_get_ivars(child)) == NULL) {
1331 			error = ENXIO;
1332 			goto failed;
1333 		}
1334 
1335 		if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i])))
1336 			goto failed;
1337 
1338 		/* If pins are floating or the hardware is otherwise
1339 		 * unpopulated, the device shouldn't be used. */
1340 		if (bhnd_is_hw_disabled(child))
1341 			device_disable(child);
1342 	}
1343 
1344 	/* Free EROM (and any bridge register windows it might hold) */
1345 	bhnd_erom_free(erom);
1346 	erom = NULL;
1347 
1348 	/* Map all valid core's config register blocks and perform interrupt
1349 	 * assignment */
1350 	for (u_int i = 0; i < cid->ncores; i++) {
1351 		struct siba_devinfo	*dinfo;
1352 		device_t		 child;
1353 
1354 		child = children[i];
1355 
1356 		/* Skip if core is disabled */
1357 		if (bhnd_is_hw_disabled(child))
1358 			continue;
1359 
1360 		dinfo = device_get_ivars(child);
1361 
1362 		/* Map the core's config blocks */
1363 		if ((error = siba_map_cfg_resources(dev, dinfo)))
1364 			goto failed;
1365 
1366 		/* Issue bus callback for fully initialized child. */
1367 		BHND_BUS_CHILD_ADDED(dev, child);
1368 	}
1369 
1370 	free(cores, M_BHND);
1371 	free(children, M_BHND);
1372 
1373 	return (0);
1374 
1375 failed:
1376 	for (u_int i = 0; i < cid->ncores; i++) {
1377 		if (children[i] == NULL)
1378 			continue;
1379 
1380 		device_delete_child(dev, children[i]);
1381 	}
1382 
1383 	free(cores, M_BHND);
1384 	free(children, M_BHND);
1385 	if (erom != NULL)
1386 		bhnd_erom_free(erom);
1387 
1388 	return (error);
1389 }
1390 
1391 static device_method_t siba_methods[] = {
1392 	/* Device interface */
1393 	DEVMETHOD(device_probe,			siba_probe),
1394 	DEVMETHOD(device_attach,		siba_attach),
1395 	DEVMETHOD(device_detach,		siba_detach),
1396 	DEVMETHOD(device_resume,		siba_resume),
1397 	DEVMETHOD(device_suspend,		siba_suspend),
1398 
1399 	/* Bus interface */
1400 	DEVMETHOD(bus_add_child,		siba_add_child),
1401 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
1402 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
1403 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
1404 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
1405 
1406 	/* BHND interface */
1407 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
1408 	DEVMETHOD(bhnd_bus_alloc_pmu,		siba_alloc_pmu),
1409 	DEVMETHOD(bhnd_bus_release_pmu,		siba_release_pmu),
1410 	DEVMETHOD(bhnd_bus_request_clock,	siba_request_clock),
1411 	DEVMETHOD(bhnd_bus_enable_clocks,	siba_enable_clocks),
1412 	DEVMETHOD(bhnd_bus_request_ext_rsrc,	siba_request_ext_rsrc),
1413 	DEVMETHOD(bhnd_bus_release_ext_rsrc,	siba_release_ext_rsrc),
1414 	DEVMETHOD(bhnd_bus_get_clock_freq,	siba_get_clock_freq),
1415 	DEVMETHOD(bhnd_bus_get_clock_latency,	siba_get_clock_latency),
1416 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
1417 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
1418 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
1419 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
1420 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
1421 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
1422 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
1423 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
1424 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
1425 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
1426 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
1427 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
1428 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
1429 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
1430 	DEVMETHOD(bhnd_bus_get_intr_ivec,	siba_get_intr_ivec),
1431 
1432 	DEVMETHOD_END
1433 };
1434 
1435 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1436 
1437 MODULE_VERSION(siba, 1);
1438 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1439