xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision 38069501)
1 /*-
2  * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/systm.h>
42 
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 
46 #include <dev/bhnd/cores/chipc/chipcreg.h>
47 
48 #include "bcma_eromreg.h"
49 #include "bcma_eromvar.h"
50 
51 /*
52  * BCMA Enumeration ROM (EROM) Table
53  *
54  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
55  *
56  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
57  * ChipCommon registers. The table itself is comprised of 32-bit
58  * type-tagged entries, organized into an array of variable-length
59  * core descriptor records.
60  *
61  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
62  * marker.
63  */
64 
65 static const char	*bcma_erom_entry_type_name (uint8_t entry);
66 
67 static int		 bcma_erom_read32(struct bcma_erom *erom,
68 			     uint32_t *entry);
69 static int		 bcma_erom_skip32(struct bcma_erom *erom);
70 
71 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
72 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
73 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
74 
75 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
76 			     uint8_t etype);
77 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
78 			     uint8_t region_type, bhnd_port_type *port_type);
79 
80 
81 static int		 bcma_erom_peek32(struct bcma_erom *erom,
82 			     uint32_t *entry);
83 
84 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
85 static void		 bcma_erom_seek(struct bcma_erom *erom,
86 			     bus_size_t offset);
87 static void		 bcma_erom_reset(struct bcma_erom *erom);
88 
89 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
90 			     const struct bhnd_core_match *desc,
91 			     struct bhnd_core_info *core);
92 
93 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
94 			     struct bcma_erom_core *core);
95 
96 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
97 			     struct bcma_erom_mport *mport);
98 
99 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
100 			     struct bcma_erom_sport_region *region);
101 
102 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
103 			     u_int core_idx, int core_unit,
104 			     struct bhnd_core_info *info);
105 
106 /**
107  * BCMA EROM per-instance state.
108  */
109 struct bcma_erom {
110 	struct bhnd_erom	 obj;
111 	device_t	 	 dev;		/**< parent device, or NULL if none. */
112 	struct bhnd_erom_io	*eio;		/**< bus I/O callbacks */
113 	bhnd_size_t	 	 offset;	/**< current read offset */
114 };
115 
116 #define	EROM_LOG(erom, fmt, ...)	do {			\
117 	printf("%s erom[0x%llx]: " fmt, __FUNCTION__,		\
118 	    (unsigned long long)(erom->offset), ##__VA_ARGS__);	\
119 } while(0)
120 
121 /** Return the type name for an EROM entry */
122 static const char *
123 bcma_erom_entry_type_name (uint8_t entry)
124 {
125 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
126 	case BCMA_EROM_ENTRY_TYPE_CORE:
127 		return "core";
128 	case BCMA_EROM_ENTRY_TYPE_MPORT:
129 		return "mport";
130 	case BCMA_EROM_ENTRY_TYPE_REGION:
131 		return "region";
132 	default:
133 		return "unknown";
134 	}
135 }
136 
137 /* BCMA implementation of BHND_EROM_INIT() */
138 static int
139 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
140     struct bhnd_erom_io *eio)
141 {
142 	struct bcma_erom	*sc;
143 	bhnd_addr_t		 table_addr;
144 	int			 error;
145 
146 	sc = (struct bcma_erom *)erom;
147 	sc->eio = eio;
148 	sc->offset = 0;
149 
150 	/* Determine erom table address */
151 	if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
152 		return (ENXIO); /* would overflow */
153 
154 	table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
155 
156 	/* Try to map the erom table */
157 	error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
158 	if (error)
159 		return (error);
160 
161 	return (0);
162 }
163 
164 /* BCMA implementation of BHND_EROM_PROBE() */
165 static int
166 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
167     const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
168 {
169 	uint32_t idreg, eromptr;
170 
171 	/* Hints aren't supported; all BCMA devices have a ChipCommon
172 	 * core */
173 	if (hint != NULL)
174 		return (EINVAL);
175 
176 	/* Confirm CHIPC_EROMPTR availability */
177 	idreg = bhnd_erom_io_read(eio, CHIPC_ID, 4);
178 	if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS)))
179 		return (ENXIO);
180 
181 	/* Fetch EROM address */
182 	eromptr = bhnd_erom_io_read(eio, CHIPC_EROMPTR, 4);
183 
184 	/* Parse chip identifier */
185 	*cid = bhnd_parse_chipid(idreg, eromptr);
186 
187 	/* Verify chip type */
188 	switch (cid->chip_type) {
189 		case BHND_CHIPTYPE_BCMA:
190 			return (BUS_PROBE_DEFAULT);
191 
192 		case BHND_CHIPTYPE_BCMA_ALT:
193 		case BHND_CHIPTYPE_UBUS:
194 			return (BUS_PROBE_GENERIC);
195 
196 		default:
197 			return (ENXIO);
198 	}
199 }
200 
201 static void
202 bcma_erom_fini(bhnd_erom_t *erom)
203 {
204 	struct bcma_erom *sc = (struct bcma_erom *)erom;
205 
206 	bhnd_erom_io_fini(sc->eio);
207 }
208 
209 static int
210 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
211     struct bhnd_core_info *core)
212 {
213 	struct bcma_erom *sc = (struct bcma_erom *)erom;
214 
215 	/* Search for the first matching core */
216 	return (bcma_erom_seek_matching_core(sc, desc, core));
217 }
218 
219 static int
220 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
221     bhnd_port_type port_type, u_int port_num, u_int region_num,
222     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
223 {
224 	struct bcma_erom	*sc;
225 	struct bcma_erom_core	 ec;
226 	uint32_t		 entry;
227 	uint8_t			 region_port, region_type;
228 	bool			 found;
229 	int			 error;
230 
231 	sc = (struct bcma_erom *)erom;
232 
233 	/* Seek to the first matching core and provide the core info
234 	 * to the caller */
235 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
236 		return (error);
237 
238 	if ((error = bcma_erom_parse_core(sc, &ec)))
239 		return (error);
240 
241 	/* Skip master ports */
242 	for (u_long i = 0; i < ec.num_mport; i++) {
243 		if ((error = bcma_erom_skip_mport(sc)))
244 			return (error);
245 	}
246 
247 	/* Seek to the region block for the given port type */
248 	found = false;
249 	while (1) {
250 		bhnd_port_type	p_type;
251 		uint8_t		r_type;
252 
253 		if ((error = bcma_erom_peek32(sc, &entry)))
254 			return (error);
255 
256 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
257 			return (ENOENT);
258 
259 		/* Expected region type? */
260 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
261 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
262 		if (error)
263 			return (error);
264 
265 		if (p_type == port_type) {
266 			found = true;
267 			break;
268 		}
269 
270 		/* Skip to next entry */
271 		if ((error = bcma_erom_skip_sport_region(sc)))
272 			return (error);
273 	}
274 
275 	if (!found)
276 		return (ENOENT);
277 
278 	/* Found the appropriate port type block; now find the region records
279 	 * for the given port number */
280 	found = false;
281 	for (u_int i = 0; i <= port_num; i++) {
282 		bhnd_port_type	p_type;
283 
284 		if ((error = bcma_erom_peek32(sc, &entry)))
285 			return (error);
286 
287 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
288 			return (ENOENT);
289 
290 		/* Fetch the type/port of the first region entry */
291 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
292 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
293 
294 		/* Have we found the region entries for the desired port? */
295 		if (i == port_num) {
296 			error = bcma_erom_region_to_port_type(sc, region_type,
297 			    &p_type);
298 			if (error)
299 				return (error);
300 
301 			if (p_type == port_type)
302 				found = true;
303 
304 			break;
305 		}
306 
307 		/* Otherwise, seek to next block of region records */
308 		while (1) {
309 			uint8_t	next_type, next_port;
310 
311 			if ((error = bcma_erom_skip_sport_region(sc)))
312 				return (error);
313 
314 			if ((error = bcma_erom_peek32(sc, &entry)))
315 				return (error);
316 
317 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
318 				return (ENOENT);
319 
320 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
321 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
322 
323 			if (next_type != region_type ||
324 			    next_port != region_port)
325 				break;
326 		}
327 	}
328 
329 	if (!found)
330 		return (ENOENT);
331 
332 	/* Finally, search for the requested region number */
333 	for (u_int i = 0; i <= region_num; i++) {
334 		struct bcma_erom_sport_region	region;
335 		uint8_t				next_port, next_type;
336 
337 		if ((error = bcma_erom_peek32(sc, &entry)))
338 			return (error);
339 
340 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
341 			return (ENOENT);
342 
343 		/* Check for the end of the region block */
344 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
345 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
346 
347 		if (next_type != region_type ||
348 		    next_port != region_port)
349 			break;
350 
351 		/* Parse the region */
352 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
353 			return (error);
354 
355 		/* Is this our target region_num? */
356 		if (i == region_num) {
357 			/* Found */
358 			*addr = region.base_addr;
359 			*size = region.size;
360 			return (0);
361 		}
362 	}
363 
364 	/* Not found */
365 	return (ENOENT);
366 };
367 
368 static int
369 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
370     u_int *num_cores)
371 {
372 	struct bcma_erom	*sc;
373 	struct bhnd_core_info	*buffer;
374 	bus_size_t		 initial_offset;
375 	u_int			 count;
376 	int			 error;
377 
378 	sc = (struct bcma_erom *)erom;
379 
380 	buffer = NULL;
381 	initial_offset = bcma_erom_tell(sc);
382 
383 	/* Determine the core count */
384 	bcma_erom_reset(sc);
385 	for (count = 0, error = 0; !error; count++) {
386 		struct bcma_erom_core core;
387 
388 		/* Seek to the first readable core entry */
389 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
390 		if (error == ENOENT)
391 			break;
392 		else if (error)
393 			goto cleanup;
394 
395 		/* Read past the core descriptor */
396 		if ((error = bcma_erom_parse_core(sc, &core)))
397 			goto cleanup;
398 	}
399 
400 	/* Allocate our output buffer */
401 	buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
402 	    M_NOWAIT);
403 	if (buffer == NULL) {
404 		error = ENOMEM;
405 		goto cleanup;
406 	}
407 
408 	/* Parse all core descriptors */
409 	bcma_erom_reset(sc);
410 	for (u_int i = 0; i < count; i++) {
411 		struct bcma_erom_core	core;
412 		int			unit;
413 
414 		/* Parse the core */
415 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
416 		if (error)
417 			goto cleanup;
418 
419 		error = bcma_erom_parse_core(sc, &core);
420 		if (error)
421 			goto cleanup;
422 
423 		/* Determine the unit number */
424 		unit = 0;
425 		for (u_int j = 0; j < i; j++) {
426 			if (buffer[i].vendor == buffer[j].vendor &&
427 			    buffer[i].device == buffer[j].device)
428 				unit++;
429 		}
430 
431 		/* Convert to a bhnd info record */
432 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
433 	}
434 
435 cleanup:
436 	if (!error) {
437 		*cores = buffer;
438 		*num_cores = count;
439 	} else {
440 		if (buffer != NULL)
441 			free(buffer, M_BHND);
442 	}
443 
444 	/* Restore the initial position */
445 	bcma_erom_seek(sc, initial_offset);
446 	return (error);
447 }
448 
449 static void
450 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
451 {
452 	free(cores, M_BHND);
453 }
454 
455 /**
456  * Return the current read position.
457  */
458 static bus_size_t
459 bcma_erom_tell(struct bcma_erom *erom)
460 {
461 	return (erom->offset);
462 }
463 
464 /**
465  * Seek to an absolute read position.
466  */
467 static void
468 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
469 {
470 	erom->offset = offset;
471 }
472 
473 /**
474  * Read a 32-bit entry value from the EROM table without advancing the
475  * read position.
476  *
477  * @param erom EROM read state.
478  * @param entry Will contain the read result on success.
479  * @retval 0 success
480  * @retval ENOENT The end of the EROM table was reached.
481  * @retval non-zero The read could not be completed.
482  */
483 static int
484 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
485 {
486 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
487 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
488 		return (EINVAL);
489 	}
490 
491 	*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
492 	return (0);
493 }
494 
495 /**
496  * Read a 32-bit entry value from the EROM table.
497  *
498  * @param erom EROM read state.
499  * @param entry Will contain the read result on success.
500  * @retval 0 success
501  * @retval ENOENT The end of the EROM table was reached.
502  * @retval non-zero The read could not be completed.
503  */
504 static int
505 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
506 {
507 	int error;
508 
509 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
510 		erom->offset += 4;
511 
512 	return (error);
513 }
514 
515 /**
516  * Read and discard 32-bit entry value from the EROM table.
517  *
518  * @param erom EROM read state.
519  * @retval 0 success
520  * @retval ENOENT The end of the EROM table was reached.
521  * @retval non-zero The read could not be completed.
522  */
523 static int
524 bcma_erom_skip32(struct bcma_erom *erom)
525 {
526 	uint32_t	entry;
527 
528 	return bcma_erom_read32(erom, &entry);
529 }
530 
531 /**
532  * Read and discard a core descriptor from the EROM table.
533  *
534  * @param erom EROM read state.
535  * @retval 0 success
536  * @retval ENOENT The end of the EROM table was reached.
537  * @retval non-zero The read could not be completed.
538  */
539 static int
540 bcma_erom_skip_core(struct bcma_erom *erom)
541 {
542 	struct bcma_erom_core core;
543 	return (bcma_erom_parse_core(erom, &core));
544 }
545 
546 /**
547  * Read and discard a master port descriptor from the EROM table.
548  *
549  * @param erom EROM read state.
550  * @retval 0 success
551  * @retval ENOENT The end of the EROM table was reached.
552  * @retval non-zero The read could not be completed.
553  */
554 static int
555 bcma_erom_skip_mport(struct bcma_erom *erom)
556 {
557 	struct bcma_erom_mport mp;
558 	return (bcma_erom_parse_mport(erom, &mp));
559 }
560 
561 /**
562  * Read and discard a port region descriptor from the EROM table.
563  *
564  * @param erom EROM read state.
565  * @retval 0 success
566  * @retval ENOENT The end of the EROM table was reached.
567  * @retval non-zero The read could not be completed.
568  */
569 static int
570 bcma_erom_skip_sport_region(struct bcma_erom *erom)
571 {
572 	struct bcma_erom_sport_region r;
573 	return (bcma_erom_parse_sport_region(erom, &r));
574 }
575 
576 /**
577  * Seek to the next entry matching the given EROM entry type.
578  *
579  * @param erom EROM read state.
580  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
581  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
582  * @retval 0 success
583  * @retval ENOENT The end of the EROM table was reached.
584  * @retval non-zero Reading or parsing the descriptor failed.
585  */
586 static int
587 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
588 {
589 	uint32_t			entry;
590 	int				error;
591 
592 	/* Iterate until we hit an entry matching the requested type. */
593 	while (!(error = bcma_erom_peek32(erom, &entry))) {
594 		/* Handle EOF */
595 		if (entry == BCMA_EROM_TABLE_EOF)
596 			return (ENOENT);
597 
598 		/* Invalid entry */
599 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
600 			return (EINVAL);
601 
602 		/* Entry type matches? */
603 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
604 			return (0);
605 
606 		/* Skip non-matching entry types. */
607 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
608 		case BCMA_EROM_ENTRY_TYPE_CORE:
609 			if ((error = bcma_erom_skip_core(erom)))
610 				return (error);
611 
612 			break;
613 
614 		case BCMA_EROM_ENTRY_TYPE_MPORT:
615 			if ((error = bcma_erom_skip_mport(erom)))
616 				return (error);
617 
618 			break;
619 
620 		case BCMA_EROM_ENTRY_TYPE_REGION:
621 			if ((error = bcma_erom_skip_sport_region(erom)))
622 				return (error);
623 			break;
624 
625 		default:
626 			/* Unknown entry type! */
627 			return (EINVAL);
628 		}
629 	}
630 
631 	return (error);
632 }
633 
634 /**
635  * Return the read position to the start of the EROM table.
636  *
637  * @param erom EROM read state.
638  */
639 static void
640 bcma_erom_reset(struct bcma_erom *erom)
641 {
642 	erom->offset = 0;
643 }
644 
645 /**
646  * Seek to the first core entry matching @p desc.
647  *
648  * @param erom EROM read state.
649  * @param desc The core match descriptor.
650  * @param[out] core On success, the matching core info. If the core info
651  * is not desired, a NULL pointer may be provided.
652  * @retval 0 success
653  * @retval ENOENT The end of the EROM table was reached before @p index was
654  * found.
655  * @retval non-zero Reading or parsing failed.
656  */
657 static int
658 bcma_erom_seek_matching_core(struct bcma_erom *sc,
659     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
660 {
661 	struct bhnd_core_match	 imatch;
662 	bus_size_t		 core_offset, next_offset;
663 	int			 error;
664 
665 	/* Seek to table start. */
666 	bcma_erom_reset(sc);
667 
668 	/* We can't determine a core's unit number during the initial scan. */
669 	imatch = *desc;
670 	imatch.m.match.core_unit = 0;
671 
672 	/* Locate the first matching core */
673 	for (u_int i = 0; i < UINT_MAX; i++) {
674 		struct bcma_erom_core	ec;
675 		struct bhnd_core_info	ci;
676 
677 		/* Seek to the next core */
678 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
679 		if (error)
680 			return (error);
681 
682 		/* Save the core offset */
683 		core_offset = bcma_erom_tell(sc);
684 
685 		/* Parse the core */
686 		if ((error = bcma_erom_parse_core(sc, &ec)))
687 			return (error);
688 
689 		bcma_erom_to_core_info(&ec, i, 0, &ci);
690 
691 		/* Check for initial match */
692 		if (!bhnd_core_matches(&ci, &imatch))
693 			continue;
694 
695 		/* Re-scan preceding cores to determine the unit number. */
696 		next_offset = bcma_erom_tell(sc);
697 		bcma_erom_reset(sc);
698 		for (u_int j = 0; j < i; j++) {
699 			/* Parse the core */
700 			error = bcma_erom_seek_next(sc,
701 			    BCMA_EROM_ENTRY_TYPE_CORE);
702 			if (error)
703 				return (error);
704 
705 			if ((error = bcma_erom_parse_core(sc, &ec)))
706 				return (error);
707 
708 			/* Bump the unit number? */
709 			if (ec.vendor == ci.vendor && ec.device == ci.device)
710 				ci.unit++;
711 		}
712 
713 		/* Check for full match against now-valid unit number */
714 		if (!bhnd_core_matches(&ci, desc)) {
715 			/* Reposition to allow reading the next core */
716 			bcma_erom_seek(sc, next_offset);
717 			continue;
718 		}
719 
720 		/* Found; seek to the core's initial offset and provide
721 		 * the core info to the caller */
722 		bcma_erom_seek(sc, core_offset);
723 		if (core != NULL)
724 			*core = ci;
725 
726 		return (0);
727 	}
728 
729 	/* Not found, or a parse error occured */
730 	return (error);
731 }
732 
733 /**
734  * Read the next core descriptor from the EROM table.
735  *
736  * @param erom EROM read state.
737  * @param[out] core On success, will be populated with the parsed core
738  * descriptor data.
739  * @retval 0 success
740  * @retval ENOENT The end of the EROM table was reached.
741  * @retval non-zero Reading or parsing the core descriptor failed.
742  */
743 static int
744 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
745 {
746 	uint32_t	entry;
747 	int		error;
748 
749 	/* Parse CoreDescA */
750 	if ((error = bcma_erom_read32(erom, &entry)))
751 		return (error);
752 
753 	/* Handle EOF */
754 	if (entry == BCMA_EROM_TABLE_EOF)
755 		return (ENOENT);
756 
757 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
758 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
759                    entry, bcma_erom_entry_type_name(entry));
760 
761 		return (EINVAL);
762 	}
763 
764 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
765 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
766 
767 	/* Parse CoreDescB */
768 	if ((error = bcma_erom_read32(erom, &entry)))
769 		return (error);
770 
771 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
772 		return (EINVAL);
773 	}
774 
775 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
776 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
777 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
778 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
779 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
780 
781 	return (0);
782 }
783 
784 /**
785  * Read the next master port descriptor from the EROM table.
786  *
787  * @param erom EROM read state.
788  * @param[out] mport On success, will be populated with the parsed
789  * descriptor data.
790  * @retval 0 success
791  * @retval non-zero Reading or parsing the descriptor failed.
792  */
793 static int
794 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
795 {
796 	uint32_t	entry;
797 	int		error;
798 
799 	/* Parse the master port descriptor */
800 	if ((error = bcma_erom_read32(erom, &entry)))
801 		return (error);
802 
803 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
804 		return (EINVAL);
805 
806 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
807 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
808 
809 	return (0);
810 }
811 
812 /**
813  * Read the next slave port region descriptor from the EROM table.
814  *
815  * @param erom EROM read state.
816  * @param[out] mport On success, will be populated with the parsed
817  * descriptor data.
818  * @retval 0 success
819  * @retval ENOENT The end of the region descriptor table was reached.
820  * @retval non-zero Reading or parsing the descriptor failed.
821  */
822 static int
823 bcma_erom_parse_sport_region(struct bcma_erom *erom,
824     struct bcma_erom_sport_region *region)
825 {
826 	uint32_t	entry;
827 	uint8_t		size_type;
828 	int		error;
829 
830 	/* Peek at the region descriptor */
831 	if (bcma_erom_peek32(erom, &entry))
832 		return (EINVAL);
833 
834 	/* A non-region entry signals the end of the region table */
835 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
836 		return (ENOENT);
837 	} else {
838 		bcma_erom_skip32(erom);
839 	}
840 
841 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
842 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
843 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
844 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
845 
846 	/* If region address is 64-bit, fetch the high bits. */
847 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
848 		if ((error = bcma_erom_read32(erom, &entry)))
849 			return (error);
850 
851 		region->base_addr |= ((bhnd_addr_t) entry << 32);
852 	}
853 
854 	/* Parse the region size; it's either encoded as the binary logarithm
855 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
856 	 * 32-bit/64-bit literal value directly following the current entry. */
857 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
858 		if ((error = bcma_erom_read32(erom, &entry)))
859 			return (error);
860 
861 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
862 
863 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
864 			if ((error = bcma_erom_read32(erom, &entry)))
865 				return (error);
866 			region->size |= ((bhnd_size_t) entry << 32);
867 		}
868 	} else {
869 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
870 	}
871 
872 	/* Verify that addr+size does not overflow. */
873 	if (region->size != 0 &&
874 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
875 	{
876 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
877 		    bcma_erom_entry_type_name(region->region_type),
878 		    region->region_port,
879 		    (unsigned long long) region->base_addr,
880 		    (unsigned long long) region->size);
881 
882 		return (EINVAL);
883 	}
884 
885 	return (0);
886 }
887 
888 /**
889  * Convert a bcma_erom_core record to its bhnd_core_info representation.
890  *
891  * @param core EROM core record to convert.
892  * @param core_idx The core index of @p core.
893  * @param core_unit The core unit of @p core.
894  * @param[out] info The populated bhnd_core_info representation.
895  */
896 static void
897 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
898     int core_unit, struct bhnd_core_info *info)
899 {
900 	info->vendor = core->vendor;
901 	info->device = core->device;
902 	info->hwrev = core->rev;
903 	info->core_idx = core_idx;
904 	info->unit = core_unit;
905 }
906 
907 /**
908  * Map an EROM region type to its corresponding port type.
909  *
910  * @param region_type Region type value.
911  * @param[out] port_type On success, the corresponding port type.
912  */
913 static int
914 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
915     bhnd_port_type *port_type)
916 {
917 	switch (region_type) {
918 	case BCMA_EROM_REGION_TYPE_DEVICE:
919 		*port_type = BHND_PORT_DEVICE;
920 		return (0);
921 	case BCMA_EROM_REGION_TYPE_BRIDGE:
922 		*port_type = BHND_PORT_BRIDGE;
923 		return (0);
924 	case BCMA_EROM_REGION_TYPE_MWRAP:
925 	case BCMA_EROM_REGION_TYPE_SWRAP:
926 		*port_type = BHND_PORT_AGENT;
927 		return (0);
928 	default:
929 		EROM_LOG(erom, "unsupported region type %hhx\n",
930 			region_type);
931 		return (EINVAL);
932 	}
933 }
934 
935 /**
936  * Register all MMIO region descriptors for the given slave port.
937  *
938  * @param erom EROM read state.
939  * @param corecfg Core info to be populated with the scanned port regions.
940  * @param port_num Port index for which regions will be parsed.
941  * @param region_type The region type to be parsed.
942  * @param[out] offset The offset at which to perform parsing. On success, this
943  * will be updated to point to the next EROM table entry.
944  */
945 static int
946 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
947     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
948     uint8_t region_type)
949 {
950 	struct bcma_sport	*sport;
951 	struct bcma_sport_list	*sports;
952 	bus_size_t		 entry_offset;
953 	int			 error;
954 	bhnd_port_type		 port_type;
955 
956 	error = 0;
957 
958 	/* Determine the port type for this region type. */
959 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
960 	if (error)
961 		return (error);
962 
963 	/* Fetch the list to be populated */
964 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
965 
966 	/* Allocate a new port descriptor */
967 	sport = bcma_alloc_sport(port_num, port_type);
968 	if (sport == NULL)
969 		return (ENOMEM);
970 
971 	/* Read all address regions defined for this port */
972 	for (bcma_rmid_t region_num = 0;; region_num++) {
973 		struct bcma_map			*map;
974 		struct bcma_erom_sport_region	 spr;
975 
976 		/* No valid port definition should come anywhere near
977 		 * BCMA_RMID_MAX. */
978 		if (region_num == BCMA_RMID_MAX) {
979 			EROM_LOG(erom, "core%u %s%u: region count reached "
980 			    "upper limit of %u\n",
981 			    corecfg->core_info.core_idx,
982 			    bhnd_port_type_name(port_type),
983 			    port_num, BCMA_RMID_MAX);
984 
985 			error = EINVAL;
986 			goto cleanup;
987 		}
988 
989 		/* Parse the next region entry. */
990 		entry_offset = bcma_erom_tell(erom);
991 		error = bcma_erom_parse_sport_region(erom, &spr);
992 		if (error && error != ENOENT) {
993 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
994 			    "address region\n",
995 			    corecfg->core_info.core_idx,
996 			    bhnd_port_type_name(port_type),
997 			    port_num, region_num);
998 			goto cleanup;
999 		}
1000 
1001 		/* ENOENT signals no further region entries */
1002 		if (error == ENOENT) {
1003 			/* No further entries */
1004 			error = 0;
1005 			break;
1006 		}
1007 
1008 		/* A region or type mismatch also signals no further region
1009 		 * entries */
1010 		if (spr.region_port != port_num ||
1011 		    spr.region_type != region_type)
1012 		{
1013 			/* We don't want to consume this entry */
1014 			bcma_erom_seek(erom, entry_offset);
1015 
1016 			error = 0;
1017 			goto cleanup;
1018 		}
1019 
1020 		/*
1021 		 * Create the map entry.
1022 		 */
1023 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1024 		if (map == NULL) {
1025 			error = ENOMEM;
1026 			goto cleanup;
1027 		}
1028 
1029 		map->m_region_num = region_num;
1030 		map->m_base = spr.base_addr;
1031 		map->m_size = spr.size;
1032 		map->m_rid = -1;
1033 
1034 		/* Add the region map to the port */
1035 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1036 		sport->sp_num_maps++;
1037 	}
1038 
1039 cleanup:
1040 	/* Append the new port descriptor on success, or deallocate the
1041 	 * partially parsed descriptor on failure. */
1042 	if (error == 0) {
1043 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1044 	} else if (sport != NULL) {
1045 		bcma_free_sport(sport);
1046 	}
1047 
1048 	return error;
1049 }
1050 
1051 /**
1052  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1053  * to be owned by the caller.
1054  *
1055  * @param erom A bcma EROM instance.
1056  * @param[out] result On success, the core's device info. The caller inherits
1057  * ownership of this allocation.
1058  *
1059  * @return If successful, returns 0. If the end of the EROM table is hit,
1060  * ENOENT will be returned. On error, returns a non-zero error value.
1061  */
1062 int
1063 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1064 {
1065 	struct bcma_corecfg	*cfg;
1066 	struct bcma_erom_core	 core;
1067 	uint8_t			 first_region_type;
1068 	bus_size_t		 initial_offset;
1069 	u_int			 core_index;
1070 	int			 core_unit;
1071 	int			 error;
1072 
1073 	cfg = NULL;
1074 	initial_offset = bcma_erom_tell(erom);
1075 
1076 	/* Parse the next core entry */
1077 	if ((error = bcma_erom_parse_core(erom, &core)))
1078 		return (error);
1079 
1080 	/* Determine the core's index and unit numbers */
1081 	bcma_erom_reset(erom);
1082 	core_unit = 0;
1083 	core_index = 0;
1084 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1085 		struct bcma_erom_core prev_core;
1086 
1087 		/* Parse next core */
1088 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1089 		if (error)
1090 			return (error);
1091 
1092 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1093 			return (error);
1094 
1095 		/* Is earlier unit? */
1096 		if (core.vendor == prev_core.vendor &&
1097 		    core.device == prev_core.device)
1098 		{
1099 			core_unit++;
1100 		}
1101 
1102 		/* Seek to next core */
1103 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1104 		if (error)
1105 			return (error);
1106 	}
1107 
1108 	/* We already parsed the core descriptor */
1109 	if ((error = bcma_erom_skip_core(erom)))
1110 		return (error);
1111 
1112 	/* Allocate our corecfg */
1113 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1114 	    core.device, core.rev);
1115 	if (cfg == NULL)
1116 		return (ENOMEM);
1117 
1118 	/* These are 5-bit values in the EROM table, and should never be able
1119 	 * to overflow BCMA_PID_MAX. */
1120 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1121 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1122 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1123 	    ("unsupported wport count"));
1124 
1125 	if (bootverbose) {
1126 		EROM_LOG(erom,
1127 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1128 		    core_index,
1129 		    bhnd_vendor_name(core.vendor),
1130 		    bhnd_find_core_name(core.vendor, core.device),
1131 		    core.device, core.rev, core_unit);
1132 	}
1133 
1134 	cfg->num_master_ports = core.num_mport;
1135 	cfg->num_dev_ports = 0;		/* determined below */
1136 	cfg->num_bridge_ports = 0;	/* determined blow */
1137 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1138 
1139 	/* Parse Master Port Descriptors */
1140 	for (uint8_t i = 0; i < core.num_mport; i++) {
1141 		struct bcma_mport	*mport;
1142 		struct bcma_erom_mport	 mpd;
1143 
1144 		/* Parse the master port descriptor */
1145 		error = bcma_erom_parse_mport(erom, &mpd);
1146 		if (error)
1147 			goto failed;
1148 
1149 		/* Initialize a new bus mport structure */
1150 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1151 		if (mport == NULL) {
1152 			error = ENOMEM;
1153 			goto failed;
1154 		}
1155 
1156 		mport->mp_vid = mpd.port_vid;
1157 		mport->mp_num = mpd.port_num;
1158 
1159 		/* Update dinfo */
1160 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1161 	}
1162 
1163 
1164 	/*
1165 	 * Determine whether this is a bridge device; if so, we can
1166 	 * expect the first sequence of address region descriptors to
1167 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1168 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1169 	 *
1170 	 * It's unclear whether this is the correct mechanism by which we
1171 	 * should detect/handle bridge devices, but this approach matches
1172 	 * that of (some of) Broadcom's published drivers.
1173 	 */
1174 	if (core.num_dport > 0) {
1175 		uint32_t entry;
1176 
1177 		if ((error = bcma_erom_peek32(erom, &entry)))
1178 			goto failed;
1179 
1180 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1181 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1182 		{
1183 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1184 			cfg->num_dev_ports = 0;
1185 			cfg->num_bridge_ports = core.num_dport;
1186 		} else {
1187 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1188 			cfg->num_dev_ports = core.num_dport;
1189 			cfg->num_bridge_ports = 0;
1190 		}
1191 	}
1192 
1193 	/* Device/bridge port descriptors */
1194 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1195 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1196 		    first_region_type);
1197 
1198 		if (error)
1199 			goto failed;
1200 	}
1201 
1202 	/* Wrapper (aka device management) descriptors (for master ports). */
1203 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1204 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1205 		    BCMA_EROM_REGION_TYPE_MWRAP);
1206 
1207 		if (error)
1208 			goto failed;
1209 	}
1210 
1211 
1212 	/* Wrapper (aka device management) descriptors (for slave ports). */
1213 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1214 		/* Slave wrapper ports are not numbered distinctly from master
1215 		 * wrapper ports. */
1216 
1217 		/*
1218 		 * Broadcom DDR1/DDR2 Memory Controller
1219 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1220 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1221 		 *
1222 		 * ARM BP135 AMBA3 AXI to APB Bridge
1223 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1224 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1225 		 *
1226 		 * core.num_mwrap
1227 		 * ===>
1228 		 * (core.num_mwrap > 0) ?
1229 		 *           core.num_mwrap :
1230 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1231 		 */
1232 		uint8_t sp_num;
1233 		sp_num = (core.num_mwrap > 0) ?
1234 				core.num_mwrap :
1235 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1236 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1237 		    BCMA_EROM_REGION_TYPE_SWRAP);
1238 
1239 		if (error)
1240 			goto failed;
1241 	}
1242 
1243 	/*
1244 	 * Seek to the next core entry (if any), skipping any dangling/invalid
1245 	 * region entries.
1246 	 *
1247 	 * On the BCM4706, the EROM entry for the memory controller core
1248 	 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1249 	 * descriptor.
1250 	 */
1251 	if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1252 		if (error != ENOENT)
1253 			goto failed;
1254 	}
1255 
1256 	*result = cfg;
1257 	return (0);
1258 
1259 failed:
1260 	if (cfg != NULL)
1261 		bcma_free_corecfg(cfg);
1262 
1263 	return error;
1264 }
1265 
1266 static int
1267 bcma_erom_dump(bhnd_erom_t *erom)
1268 {
1269 	struct bcma_erom	*sc;
1270 	uint32_t		entry;
1271 	int			error;
1272 
1273 	sc = (struct bcma_erom *)erom;
1274 
1275 	bcma_erom_reset(sc);
1276 
1277 	while (!(error = bcma_erom_read32(sc, &entry))) {
1278 		/* Handle EOF */
1279 		if (entry == BCMA_EROM_TABLE_EOF) {
1280 			EROM_LOG(sc, "EOF\n");
1281 			return (0);
1282 		}
1283 
1284 		/* Invalid entry */
1285 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1286 			EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1287 			return (EINVAL);
1288 		}
1289 
1290 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1291 		case BCMA_EROM_ENTRY_TYPE_CORE: {
1292 			/* CoreDescA */
1293 			EROM_LOG(sc, "coreA (0x%x)\n", entry);
1294 			EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1295 			    BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1296 			EROM_LOG(sc, "\tid:\t\t0x%x\n",
1297 			    BCMA_EROM_GET_ATTR(entry, COREA_ID));
1298 			EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1299 			    BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1300 
1301 			/* CoreDescB */
1302 			if ((error = bcma_erom_read32(sc, &entry))) {
1303 				EROM_LOG(sc, "error reading CoreDescB: %d\n",
1304 				    error);
1305 				return (error);
1306 			}
1307 
1308 			if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1309 				EROM_LOG(sc, "invalid core descriptor; found "
1310 				    "unexpected entry %#x (type=%s)\n",
1311 				    entry, bcma_erom_entry_type_name(entry));
1312 				return (EINVAL);
1313 			}
1314 
1315 			EROM_LOG(sc, "coreB (0x%x)\n", entry);
1316 			EROM_LOG(sc, "\trev:\t0x%x\n",
1317 			    BCMA_EROM_GET_ATTR(entry, COREB_REV));
1318 			EROM_LOG(sc, "\tnummp:\t0x%x\n",
1319 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1320 			EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1321 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1322 			EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1323 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1324 			EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1325 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1326 
1327 			break;
1328 		}
1329 		case BCMA_EROM_ENTRY_TYPE_MPORT:
1330 			EROM_LOG(sc, "\tmport 0x%x\n", entry);
1331 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1332 			    BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1333 			EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1334 			    BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1335 			break;
1336 
1337 		case BCMA_EROM_ENTRY_TYPE_REGION: {
1338 			bool	addr64;
1339 			uint8_t	size_type;
1340 
1341 			addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1342 			size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1343 
1344 			EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1345 			EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1346 			    addr64 ? "baselo" : "base",
1347 			    BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1348 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1349 			    BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1350 			EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1351 			    BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1352 			EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1353 
1354 			/* Read the base address high bits */
1355 			if (addr64) {
1356 				if ((error = bcma_erom_read32(sc, &entry))) {
1357 					EROM_LOG(sc, "error reading region "
1358 					    "base address high bits %d\n",
1359 					    error);
1360 					return (error);
1361 				}
1362 
1363 				EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1364 			}
1365 
1366 			/* Read extended size descriptor */
1367 			if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1368 				bool size64;
1369 
1370 				if ((error = bcma_erom_read32(sc, &entry))) {
1371 					EROM_LOG(sc, "error reading region "
1372 					    "size descriptor %d\n",
1373 					    error);
1374 					return (error);
1375 				}
1376 
1377 				if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1378 					size64 = true;
1379 				else
1380 					size64 = false;
1381 
1382 				EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1383 				    size64 ? "sizelo" : "size",
1384 				    BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1385 
1386 				if (size64) {
1387 					error = bcma_erom_read32(sc, &entry);
1388 					if (error) {
1389 						EROM_LOG(sc, "error reading "
1390 						    "region size high bits: "
1391 						    "%d\n", error);
1392 						return (error);
1393 					}
1394 
1395 					EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1396 					    entry);
1397 				}
1398 			}
1399 			break;
1400 		}
1401 
1402 		default:
1403 			EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1404 			    entry, bcma_erom_entry_type_name(entry));
1405 			return (EINVAL);
1406 		}
1407 	}
1408 
1409 	if (error == ENOENT)
1410 		EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1411 	else if (error)
1412 		EROM_LOG(sc, "EROM read failed: %d\n", error);
1413 
1414 	return (error);
1415 }
1416 
1417 static kobj_method_t bcma_erom_methods[] = {
1418 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1419 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1420 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1421 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1422 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1423 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1424 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1425 	KOBJMETHOD(bhnd_erom_dump,		bcma_erom_dump),
1426 
1427 	KOBJMETHOD_END
1428 };
1429 
1430 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1431