xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision 315ee00f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
5  * Copyright (c) 2017 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Landon Fuller
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19  *    redistribution must be conditioned upon including a substantially
20  *    similar Disclaimer requirement for further binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGES.
34  */
35 
36 #include <sys/cdefs.h>
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/systm.h>
42 
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 
46 #include <dev/bhnd/bhnd_eromvar.h>
47 
48 #include "bcma_eromreg.h"
49 #include "bcma_eromvar.h"
50 
51 /*
52  * BCMA Enumeration ROM (EROM) Table
53  *
54  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
55  *
56  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
57  * ChipCommon registers. The table itself is comprised of 32-bit
58  * type-tagged entries, organized into an array of variable-length
59  * core descriptor records.
60  *
61  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
62  * marker.
63  */
64 
65 static const char	*bcma_erom_entry_type_name (uint8_t entry);
66 
67 static int		 bcma_erom_read32(struct bcma_erom *erom,
68 			     uint32_t *entry);
69 static int		 bcma_erom_skip32(struct bcma_erom *erom);
70 
71 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
72 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
73 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
74 
75 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
76 			     uint8_t etype);
77 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
78 			     uint8_t region_type, bhnd_port_type *port_type);
79 
80 static int		 bcma_erom_peek32(struct bcma_erom *erom,
81 			     uint32_t *entry);
82 
83 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
84 static void		 bcma_erom_seek(struct bcma_erom *erom,
85 			     bus_size_t offset);
86 static void		 bcma_erom_reset(struct bcma_erom *erom);
87 
88 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
89 			     const struct bhnd_core_match *desc,
90 			     struct bhnd_core_info *core);
91 
92 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
93 			     struct bcma_erom_core *core);
94 
95 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
96 			     struct bcma_erom_mport *mport);
97 
98 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
99 			     struct bcma_erom_sport_region *region);
100 
101 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
102 			     u_int core_idx, int core_unit,
103 			     struct bhnd_core_info *info);
104 
105 /**
106  * BCMA EROM per-instance state.
107  */
108 struct bcma_erom {
109 	struct bhnd_erom	 obj;
110 	device_t	 	 dev;		/**< parent device, or NULL if none. */
111 	struct bhnd_erom_io	*eio;		/**< bus I/O callbacks */
112 	bhnd_size_t	 	 offset;	/**< current read offset */
113 };
114 
115 #define	EROM_LOG(erom, fmt, ...)	do {			\
116 	printf("%s erom[0x%llx]: " fmt, __FUNCTION__,		\
117 	    (unsigned long long)(erom->offset), ##__VA_ARGS__);	\
118 } while(0)
119 
120 /** Return the type name for an EROM entry */
121 static const char *
122 bcma_erom_entry_type_name (uint8_t entry)
123 {
124 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
125 	case BCMA_EROM_ENTRY_TYPE_CORE:
126 		return "core";
127 	case BCMA_EROM_ENTRY_TYPE_MPORT:
128 		return "mport";
129 	case BCMA_EROM_ENTRY_TYPE_REGION:
130 		return "region";
131 	default:
132 		return "unknown";
133 	}
134 }
135 
136 /* BCMA implementation of BHND_EROM_INIT() */
137 static int
138 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
139     struct bhnd_erom_io *eio)
140 {
141 	struct bcma_erom	*sc;
142 	bhnd_addr_t		 table_addr;
143 	int			 error;
144 
145 	sc = (struct bcma_erom *)erom;
146 	sc->eio = eio;
147 	sc->offset = 0;
148 
149 	/* Determine erom table address */
150 	if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
151 		return (ENXIO); /* would overflow */
152 
153 	table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
154 
155 	/* Try to map the erom table */
156 	error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
157 	if (error)
158 		return (error);
159 
160 	return (0);
161 }
162 
163 /* BCMA implementation of BHND_EROM_PROBE() */
164 static int
165 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
166     const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
167 {
168 	int error;
169 
170 	/* Hints aren't supported; all BCMA devices have a ChipCommon
171 	 * core */
172 	if (hint != NULL)
173 		return (EINVAL);
174 
175 	/* Read and parse chip identification */
176 	if ((error = bhnd_erom_read_chipid(eio, cid)))
177 		return (error);
178 
179 	/* Verify chip type */
180 	switch (cid->chip_type) {
181 		case BHND_CHIPTYPE_BCMA:
182 			return (BUS_PROBE_DEFAULT);
183 
184 		case BHND_CHIPTYPE_BCMA_ALT:
185 		case BHND_CHIPTYPE_UBUS:
186 			return (BUS_PROBE_GENERIC);
187 
188 		default:
189 			return (ENXIO);
190 	}
191 }
192 
193 static void
194 bcma_erom_fini(bhnd_erom_t *erom)
195 {
196 	struct bcma_erom *sc = (struct bcma_erom *)erom;
197 
198 	bhnd_erom_io_fini(sc->eio);
199 }
200 
201 static int
202 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
203     struct bhnd_core_info *core)
204 {
205 	struct bcma_erom *sc = (struct bcma_erom *)erom;
206 
207 	/* Search for the first matching core */
208 	return (bcma_erom_seek_matching_core(sc, desc, core));
209 }
210 
211 static int
212 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
213     bhnd_port_type port_type, u_int port_num, u_int region_num,
214     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
215 {
216 	struct bcma_erom	*sc;
217 	struct bcma_erom_core	 ec;
218 	uint32_t		 entry;
219 	uint8_t			 region_port, region_type;
220 	bool			 found;
221 	int			 error;
222 
223 	sc = (struct bcma_erom *)erom;
224 
225 	/* Seek to the first matching core and provide the core info
226 	 * to the caller */
227 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
228 		return (error);
229 
230 	if ((error = bcma_erom_parse_core(sc, &ec)))
231 		return (error);
232 
233 	/* Skip master ports */
234 	for (u_long i = 0; i < ec.num_mport; i++) {
235 		if ((error = bcma_erom_skip_mport(sc)))
236 			return (error);
237 	}
238 
239 	/* Seek to the region block for the given port type */
240 	found = false;
241 	while (1) {
242 		bhnd_port_type	p_type;
243 		uint8_t		r_type;
244 
245 		if ((error = bcma_erom_peek32(sc, &entry)))
246 			return (error);
247 
248 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
249 			return (ENOENT);
250 
251 		/* Expected region type? */
252 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
253 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
254 		if (error)
255 			return (error);
256 
257 		if (p_type == port_type) {
258 			found = true;
259 			break;
260 		}
261 
262 		/* Skip to next entry */
263 		if ((error = bcma_erom_skip_sport_region(sc)))
264 			return (error);
265 	}
266 
267 	if (!found)
268 		return (ENOENT);
269 
270 	/* Found the appropriate port type block; now find the region records
271 	 * for the given port number */
272 	found = false;
273 	for (u_int i = 0; i <= port_num; i++) {
274 		bhnd_port_type	p_type;
275 
276 		if ((error = bcma_erom_peek32(sc, &entry)))
277 			return (error);
278 
279 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
280 			return (ENOENT);
281 
282 		/* Fetch the type/port of the first region entry */
283 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
284 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
285 
286 		/* Have we found the region entries for the desired port? */
287 		if (i == port_num) {
288 			error = bcma_erom_region_to_port_type(sc, region_type,
289 			    &p_type);
290 			if (error)
291 				return (error);
292 
293 			if (p_type == port_type)
294 				found = true;
295 
296 			break;
297 		}
298 
299 		/* Otherwise, seek to next block of region records */
300 		while (1) {
301 			uint8_t	next_type, next_port;
302 
303 			if ((error = bcma_erom_skip_sport_region(sc)))
304 				return (error);
305 
306 			if ((error = bcma_erom_peek32(sc, &entry)))
307 				return (error);
308 
309 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
310 				return (ENOENT);
311 
312 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
313 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
314 
315 			if (next_type != region_type ||
316 			    next_port != region_port)
317 				break;
318 		}
319 	}
320 
321 	if (!found)
322 		return (ENOENT);
323 
324 	/* Finally, search for the requested region number */
325 	for (u_int i = 0; i <= region_num; i++) {
326 		struct bcma_erom_sport_region	region;
327 		uint8_t				next_port, next_type;
328 
329 		if ((error = bcma_erom_peek32(sc, &entry)))
330 			return (error);
331 
332 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
333 			return (ENOENT);
334 
335 		/* Check for the end of the region block */
336 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
337 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
338 
339 		if (next_type != region_type ||
340 		    next_port != region_port)
341 			break;
342 
343 		/* Parse the region */
344 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
345 			return (error);
346 
347 		/* Is this our target region_num? */
348 		if (i == region_num) {
349 			/* Found */
350 			*addr = region.base_addr;
351 			*size = region.size;
352 			return (0);
353 		}
354 	}
355 
356 	/* Not found */
357 	return (ENOENT);
358 };
359 
360 static int
361 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
362     u_int *num_cores)
363 {
364 	struct bcma_erom	*sc;
365 	struct bhnd_core_info	*buffer;
366 	bus_size_t		 initial_offset;
367 	u_int			 count;
368 	int			 error;
369 
370 	sc = (struct bcma_erom *)erom;
371 
372 	buffer = NULL;
373 	initial_offset = bcma_erom_tell(sc);
374 
375 	/* Determine the core count */
376 	bcma_erom_reset(sc);
377 	for (count = 0, error = 0; !error; count++) {
378 		struct bcma_erom_core core;
379 
380 		/* Seek to the first readable core entry */
381 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
382 		if (error == ENOENT)
383 			break;
384 		else if (error)
385 			goto cleanup;
386 
387 		/* Read past the core descriptor */
388 		if ((error = bcma_erom_parse_core(sc, &core)))
389 			goto cleanup;
390 	}
391 
392 	/* Allocate our output buffer */
393 	buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND,
394 	    M_NOWAIT);
395 	if (buffer == NULL) {
396 		error = ENOMEM;
397 		goto cleanup;
398 	}
399 
400 	/* Parse all core descriptors */
401 	bcma_erom_reset(sc);
402 	for (u_int i = 0; i < count; i++) {
403 		struct bcma_erom_core	core;
404 		int			unit;
405 
406 		/* Parse the core */
407 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
408 		if (error)
409 			goto cleanup;
410 
411 		error = bcma_erom_parse_core(sc, &core);
412 		if (error)
413 			goto cleanup;
414 
415 		/* Determine the unit number */
416 		unit = 0;
417 		for (u_int j = 0; j < i; j++) {
418 			if (buffer[i].vendor == buffer[j].vendor &&
419 			    buffer[i].device == buffer[j].device)
420 				unit++;
421 		}
422 
423 		/* Convert to a bhnd info record */
424 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
425 	}
426 
427 cleanup:
428 	if (!error) {
429 		*cores = buffer;
430 		*num_cores = count;
431 	} else {
432 		if (buffer != NULL)
433 			free(buffer, M_BHND);
434 	}
435 
436 	/* Restore the initial position */
437 	bcma_erom_seek(sc, initial_offset);
438 	return (error);
439 }
440 
441 static void
442 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
443 {
444 	free(cores, M_BHND);
445 }
446 
447 /**
448  * Return the current read position.
449  */
450 static bus_size_t
451 bcma_erom_tell(struct bcma_erom *erom)
452 {
453 	return (erom->offset);
454 }
455 
456 /**
457  * Seek to an absolute read position.
458  */
459 static void
460 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
461 {
462 	erom->offset = offset;
463 }
464 
465 /**
466  * Read a 32-bit entry value from the EROM table without advancing the
467  * read position.
468  *
469  * @param erom EROM read state.
470  * @param entry Will contain the read result on success.
471  * @retval 0 success
472  * @retval ENOENT The end of the EROM table was reached.
473  * @retval non-zero The read could not be completed.
474  */
475 static int
476 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
477 {
478 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
479 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
480 		return (EINVAL);
481 	}
482 
483 	*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
484 	return (0);
485 }
486 
487 /**
488  * Read a 32-bit entry value from the EROM table.
489  *
490  * @param erom EROM read state.
491  * @param entry Will contain the read result on success.
492  * @retval 0 success
493  * @retval ENOENT The end of the EROM table was reached.
494  * @retval non-zero The read could not be completed.
495  */
496 static int
497 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
498 {
499 	int error;
500 
501 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
502 		erom->offset += 4;
503 
504 	return (error);
505 }
506 
507 /**
508  * Read and discard 32-bit entry value from the EROM table.
509  *
510  * @param erom EROM read state.
511  * @retval 0 success
512  * @retval ENOENT The end of the EROM table was reached.
513  * @retval non-zero The read could not be completed.
514  */
515 static int
516 bcma_erom_skip32(struct bcma_erom *erom)
517 {
518 	uint32_t	entry;
519 
520 	return bcma_erom_read32(erom, &entry);
521 }
522 
523 /**
524  * Read and discard a core descriptor from the EROM table.
525  *
526  * @param erom EROM read state.
527  * @retval 0 success
528  * @retval ENOENT The end of the EROM table was reached.
529  * @retval non-zero The read could not be completed.
530  */
531 static int
532 bcma_erom_skip_core(struct bcma_erom *erom)
533 {
534 	struct bcma_erom_core core;
535 	return (bcma_erom_parse_core(erom, &core));
536 }
537 
538 /**
539  * Read and discard a master port descriptor from the EROM table.
540  *
541  * @param erom EROM read state.
542  * @retval 0 success
543  * @retval ENOENT The end of the EROM table was reached.
544  * @retval non-zero The read could not be completed.
545  */
546 static int
547 bcma_erom_skip_mport(struct bcma_erom *erom)
548 {
549 	struct bcma_erom_mport mp;
550 	return (bcma_erom_parse_mport(erom, &mp));
551 }
552 
553 /**
554  * Read and discard a port region descriptor from the EROM table.
555  *
556  * @param erom EROM read state.
557  * @retval 0 success
558  * @retval ENOENT The end of the EROM table was reached.
559  * @retval non-zero The read could not be completed.
560  */
561 static int
562 bcma_erom_skip_sport_region(struct bcma_erom *erom)
563 {
564 	struct bcma_erom_sport_region r;
565 	return (bcma_erom_parse_sport_region(erom, &r));
566 }
567 
568 /**
569  * Seek to the next entry matching the given EROM entry type.
570  *
571  * @param erom EROM read state.
572  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
573  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
574  * @retval 0 success
575  * @retval ENOENT The end of the EROM table was reached.
576  * @retval non-zero Reading or parsing the descriptor failed.
577  */
578 static int
579 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
580 {
581 	uint32_t			entry;
582 	int				error;
583 
584 	/* Iterate until we hit an entry matching the requested type. */
585 	while (!(error = bcma_erom_peek32(erom, &entry))) {
586 		/* Handle EOF */
587 		if (entry == BCMA_EROM_TABLE_EOF)
588 			return (ENOENT);
589 
590 		/* Invalid entry */
591 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
592 			return (EINVAL);
593 
594 		/* Entry type matches? */
595 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
596 			return (0);
597 
598 		/* Skip non-matching entry types. */
599 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
600 		case BCMA_EROM_ENTRY_TYPE_CORE:
601 			if ((error = bcma_erom_skip_core(erom)))
602 				return (error);
603 
604 			break;
605 
606 		case BCMA_EROM_ENTRY_TYPE_MPORT:
607 			if ((error = bcma_erom_skip_mport(erom)))
608 				return (error);
609 
610 			break;
611 
612 		case BCMA_EROM_ENTRY_TYPE_REGION:
613 			if ((error = bcma_erom_skip_sport_region(erom)))
614 				return (error);
615 			break;
616 
617 		default:
618 			/* Unknown entry type! */
619 			return (EINVAL);
620 		}
621 	}
622 
623 	return (error);
624 }
625 
626 /**
627  * Return the read position to the start of the EROM table.
628  *
629  * @param erom EROM read state.
630  */
631 static void
632 bcma_erom_reset(struct bcma_erom *erom)
633 {
634 	erom->offset = 0;
635 }
636 
637 /**
638  * Seek to the first core entry matching @p desc.
639  *
640  * @param erom EROM read state.
641  * @param desc The core match descriptor.
642  * @param[out] core On success, the matching core info. If the core info
643  * is not desired, a NULL pointer may be provided.
644  * @retval 0 success
645  * @retval ENOENT The end of the EROM table was reached before @p index was
646  * found.
647  * @retval non-zero Reading or parsing failed.
648  */
649 static int
650 bcma_erom_seek_matching_core(struct bcma_erom *sc,
651     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
652 {
653 	struct bhnd_core_match	 imatch;
654 	bus_size_t		 core_offset, next_offset;
655 	int			 error;
656 
657 	/* Seek to table start. */
658 	bcma_erom_reset(sc);
659 
660 	/* We can't determine a core's unit number during the initial scan. */
661 	imatch = *desc;
662 	imatch.m.match.core_unit = 0;
663 
664 	/* Locate the first matching core */
665 	for (u_int i = 0; i < UINT_MAX; i++) {
666 		struct bcma_erom_core	ec;
667 		struct bhnd_core_info	ci;
668 
669 		/* Seek to the next core */
670 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
671 		if (error)
672 			return (error);
673 
674 		/* Save the core offset */
675 		core_offset = bcma_erom_tell(sc);
676 
677 		/* Parse the core */
678 		if ((error = bcma_erom_parse_core(sc, &ec)))
679 			return (error);
680 
681 		bcma_erom_to_core_info(&ec, i, 0, &ci);
682 
683 		/* Check for initial match */
684 		if (!bhnd_core_matches(&ci, &imatch))
685 			continue;
686 
687 		/* Re-scan preceding cores to determine the unit number. */
688 		next_offset = bcma_erom_tell(sc);
689 		bcma_erom_reset(sc);
690 		for (u_int j = 0; j < i; j++) {
691 			/* Parse the core */
692 			error = bcma_erom_seek_next(sc,
693 			    BCMA_EROM_ENTRY_TYPE_CORE);
694 			if (error)
695 				return (error);
696 
697 			if ((error = bcma_erom_parse_core(sc, &ec)))
698 				return (error);
699 
700 			/* Bump the unit number? */
701 			if (ec.vendor == ci.vendor && ec.device == ci.device)
702 				ci.unit++;
703 		}
704 
705 		/* Check for full match against now-valid unit number */
706 		if (!bhnd_core_matches(&ci, desc)) {
707 			/* Reposition to allow reading the next core */
708 			bcma_erom_seek(sc, next_offset);
709 			continue;
710 		}
711 
712 		/* Found; seek to the core's initial offset and provide
713 		 * the core info to the caller */
714 		bcma_erom_seek(sc, core_offset);
715 		if (core != NULL)
716 			*core = ci;
717 
718 		return (0);
719 	}
720 
721 	/* Not found, or a parse error occurred */
722 	return (error);
723 }
724 
725 /**
726  * Read the next core descriptor from the EROM table.
727  *
728  * @param erom EROM read state.
729  * @param[out] core On success, will be populated with the parsed core
730  * descriptor data.
731  * @retval 0 success
732  * @retval ENOENT The end of the EROM table was reached.
733  * @retval non-zero Reading or parsing the core descriptor failed.
734  */
735 static int
736 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
737 {
738 	uint32_t	entry;
739 	int		error;
740 
741 	/* Parse CoreDescA */
742 	if ((error = bcma_erom_read32(erom, &entry)))
743 		return (error);
744 
745 	/* Handle EOF */
746 	if (entry == BCMA_EROM_TABLE_EOF)
747 		return (ENOENT);
748 
749 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
750 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
751                    entry, bcma_erom_entry_type_name(entry));
752 
753 		return (EINVAL);
754 	}
755 
756 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
757 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
758 
759 	/* Parse CoreDescB */
760 	if ((error = bcma_erom_read32(erom, &entry)))
761 		return (error);
762 
763 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
764 		return (EINVAL);
765 	}
766 
767 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
768 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
769 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
770 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
771 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
772 
773 	return (0);
774 }
775 
776 /**
777  * Read the next master port descriptor from the EROM table.
778  *
779  * @param erom EROM read state.
780  * @param[out] mport On success, will be populated with the parsed
781  * descriptor data.
782  * @retval 0 success
783  * @retval non-zero Reading or parsing the descriptor failed.
784  */
785 static int
786 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
787 {
788 	uint32_t	entry;
789 	int		error;
790 
791 	/* Parse the master port descriptor */
792 	if ((error = bcma_erom_read32(erom, &entry)))
793 		return (error);
794 
795 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
796 		return (EINVAL);
797 
798 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
799 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
800 
801 	return (0);
802 }
803 
804 /**
805  * Read the next slave port region descriptor from the EROM table.
806  *
807  * @param erom EROM read state.
808  * @param[out] mport On success, will be populated with the parsed
809  * descriptor data.
810  * @retval 0 success
811  * @retval ENOENT The end of the region descriptor table was reached.
812  * @retval non-zero Reading or parsing the descriptor failed.
813  */
814 static int
815 bcma_erom_parse_sport_region(struct bcma_erom *erom,
816     struct bcma_erom_sport_region *region)
817 {
818 	uint32_t	entry;
819 	uint8_t		size_type;
820 	int		error;
821 
822 	/* Peek at the region descriptor */
823 	if (bcma_erom_peek32(erom, &entry))
824 		return (EINVAL);
825 
826 	/* A non-region entry signals the end of the region table */
827 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
828 		return (ENOENT);
829 	} else {
830 		bcma_erom_skip32(erom);
831 	}
832 
833 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
834 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
835 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
836 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
837 
838 	/* If region address is 64-bit, fetch the high bits. */
839 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
840 		if ((error = bcma_erom_read32(erom, &entry)))
841 			return (error);
842 
843 		region->base_addr |= ((bhnd_addr_t) entry << 32);
844 	}
845 
846 	/* Parse the region size; it's either encoded as the binary logarithm
847 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
848 	 * 32-bit/64-bit literal value directly following the current entry. */
849 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
850 		if ((error = bcma_erom_read32(erom, &entry)))
851 			return (error);
852 
853 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
854 
855 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
856 			if ((error = bcma_erom_read32(erom, &entry)))
857 				return (error);
858 			region->size |= ((bhnd_size_t) entry << 32);
859 		}
860 	} else {
861 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
862 	}
863 
864 	/* Verify that addr+size does not overflow. */
865 	if (region->size != 0 &&
866 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
867 	{
868 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
869 		    bcma_erom_entry_type_name(region->region_type),
870 		    region->region_port,
871 		    (unsigned long long) region->base_addr,
872 		    (unsigned long long) region->size);
873 
874 		return (EINVAL);
875 	}
876 
877 	return (0);
878 }
879 
880 /**
881  * Convert a bcma_erom_core record to its bhnd_core_info representation.
882  *
883  * @param core EROM core record to convert.
884  * @param core_idx The core index of @p core.
885  * @param core_unit The core unit of @p core.
886  * @param[out] info The populated bhnd_core_info representation.
887  */
888 static void
889 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
890     int core_unit, struct bhnd_core_info *info)
891 {
892 	info->vendor = core->vendor;
893 	info->device = core->device;
894 	info->hwrev = core->rev;
895 	info->core_idx = core_idx;
896 	info->unit = core_unit;
897 }
898 
899 /**
900  * Map an EROM region type to its corresponding port type.
901  *
902  * @param region_type Region type value.
903  * @param[out] port_type On success, the corresponding port type.
904  */
905 static int
906 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
907     bhnd_port_type *port_type)
908 {
909 	switch (region_type) {
910 	case BCMA_EROM_REGION_TYPE_DEVICE:
911 		*port_type = BHND_PORT_DEVICE;
912 		return (0);
913 	case BCMA_EROM_REGION_TYPE_BRIDGE:
914 		*port_type = BHND_PORT_BRIDGE;
915 		return (0);
916 	case BCMA_EROM_REGION_TYPE_MWRAP:
917 	case BCMA_EROM_REGION_TYPE_SWRAP:
918 		*port_type = BHND_PORT_AGENT;
919 		return (0);
920 	default:
921 		EROM_LOG(erom, "unsupported region type %hhx\n",
922 			region_type);
923 		return (EINVAL);
924 	}
925 }
926 
927 /**
928  * Register all MMIO region descriptors for the given slave port.
929  *
930  * @param erom EROM read state.
931  * @param corecfg Core info to be populated with the scanned port regions.
932  * @param port_num Port index for which regions will be parsed.
933  * @param region_type The region type to be parsed.
934  * @param[out] offset The offset at which to perform parsing. On success, this
935  * will be updated to point to the next EROM table entry.
936  */
937 static int
938 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
939     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
940     uint8_t region_type)
941 {
942 	struct bcma_sport	*sport;
943 	struct bcma_sport_list	*sports;
944 	bus_size_t		 entry_offset;
945 	int			 error;
946 	bhnd_port_type		 port_type;
947 
948 	error = 0;
949 
950 	/* Determine the port type for this region type. */
951 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
952 	if (error)
953 		return (error);
954 
955 	/* Fetch the list to be populated */
956 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
957 
958 	/* Allocate a new port descriptor */
959 	sport = bcma_alloc_sport(port_num, port_type);
960 	if (sport == NULL)
961 		return (ENOMEM);
962 
963 	/* Read all address regions defined for this port */
964 	for (bcma_rmid_t region_num = 0;; region_num++) {
965 		struct bcma_map			*map;
966 		struct bcma_erom_sport_region	 spr;
967 
968 		/* No valid port definition should come anywhere near
969 		 * BCMA_RMID_MAX. */
970 		if (region_num == BCMA_RMID_MAX) {
971 			EROM_LOG(erom, "core%u %s%u: region count reached "
972 			    "upper limit of %u\n",
973 			    corecfg->core_info.core_idx,
974 			    bhnd_port_type_name(port_type),
975 			    port_num, BCMA_RMID_MAX);
976 
977 			error = EINVAL;
978 			goto cleanup;
979 		}
980 
981 		/* Parse the next region entry. */
982 		entry_offset = bcma_erom_tell(erom);
983 		error = bcma_erom_parse_sport_region(erom, &spr);
984 		if (error && error != ENOENT) {
985 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
986 			    "address region\n",
987 			    corecfg->core_info.core_idx,
988 			    bhnd_port_type_name(port_type),
989 			    port_num, region_num);
990 			goto cleanup;
991 		}
992 
993 		/* ENOENT signals no further region entries */
994 		if (error == ENOENT) {
995 			/* No further entries */
996 			error = 0;
997 			break;
998 		}
999 
1000 		/* A region or type mismatch also signals no further region
1001 		 * entries */
1002 		if (spr.region_port != port_num ||
1003 		    spr.region_type != region_type)
1004 		{
1005 			/* We don't want to consume this entry */
1006 			bcma_erom_seek(erom, entry_offset);
1007 
1008 			error = 0;
1009 			goto cleanup;
1010 		}
1011 
1012 		/*
1013 		 * Create the map entry.
1014 		 */
1015 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1016 		if (map == NULL) {
1017 			error = ENOMEM;
1018 			goto cleanup;
1019 		}
1020 
1021 		map->m_region_num = region_num;
1022 		map->m_base = spr.base_addr;
1023 		map->m_size = spr.size;
1024 		map->m_rid = -1;
1025 
1026 		/* Add the region map to the port */
1027 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1028 		sport->sp_num_maps++;
1029 	}
1030 
1031 cleanup:
1032 	/* Append the new port descriptor on success, or deallocate the
1033 	 * partially parsed descriptor on failure. */
1034 	if (error == 0) {
1035 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1036 	} else if (sport != NULL) {
1037 		bcma_free_sport(sport);
1038 	}
1039 
1040 	return error;
1041 }
1042 
1043 /**
1044  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1045  * to be owned by the caller.
1046  *
1047  * @param erom A bcma EROM instance.
1048  * @param[out] result On success, the core's device info. The caller inherits
1049  * ownership of this allocation.
1050  *
1051  * @return If successful, returns 0. If the end of the EROM table is hit,
1052  * ENOENT will be returned. On error, returns a non-zero error value.
1053  */
1054 int
1055 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1056 {
1057 	struct bcma_corecfg	*cfg;
1058 	struct bcma_erom_core	 core;
1059 	uint8_t			 first_region_type;
1060 	bus_size_t		 initial_offset;
1061 	u_int			 core_index;
1062 	int			 core_unit;
1063 	int			 error;
1064 
1065 	cfg = NULL;
1066 	initial_offset = bcma_erom_tell(erom);
1067 
1068 	/* Parse the next core entry */
1069 	if ((error = bcma_erom_parse_core(erom, &core)))
1070 		return (error);
1071 
1072 	/* Determine the core's index and unit numbers */
1073 	bcma_erom_reset(erom);
1074 	core_unit = 0;
1075 	core_index = 0;
1076 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1077 		struct bcma_erom_core prev_core;
1078 
1079 		/* Parse next core */
1080 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1081 		if (error)
1082 			return (error);
1083 
1084 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1085 			return (error);
1086 
1087 		/* Is earlier unit? */
1088 		if (core.vendor == prev_core.vendor &&
1089 		    core.device == prev_core.device)
1090 		{
1091 			core_unit++;
1092 		}
1093 
1094 		/* Seek to next core */
1095 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1096 		if (error)
1097 			return (error);
1098 	}
1099 
1100 	/* We already parsed the core descriptor */
1101 	if ((error = bcma_erom_skip_core(erom)))
1102 		return (error);
1103 
1104 	/* Allocate our corecfg */
1105 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1106 	    core.device, core.rev);
1107 	if (cfg == NULL)
1108 		return (ENOMEM);
1109 
1110 	/* These are 5-bit values in the EROM table, and should never be able
1111 	 * to overflow BCMA_PID_MAX. */
1112 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1113 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1114 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1115 	    ("unsupported wport count"));
1116 
1117 	if (bootverbose) {
1118 		EROM_LOG(erom,
1119 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1120 		    core_index,
1121 		    bhnd_vendor_name(core.vendor),
1122 		    bhnd_find_core_name(core.vendor, core.device),
1123 		    core.device, core.rev, core_unit);
1124 	}
1125 
1126 	cfg->num_master_ports = core.num_mport;
1127 	cfg->num_dev_ports = 0;		/* determined below */
1128 	cfg->num_bridge_ports = 0;	/* determined blow */
1129 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1130 
1131 	/* Parse Master Port Descriptors */
1132 	for (uint8_t i = 0; i < core.num_mport; i++) {
1133 		struct bcma_mport	*mport;
1134 		struct bcma_erom_mport	 mpd;
1135 
1136 		/* Parse the master port descriptor */
1137 		error = bcma_erom_parse_mport(erom, &mpd);
1138 		if (error)
1139 			goto failed;
1140 
1141 		/* Initialize a new bus mport structure */
1142 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1143 		if (mport == NULL) {
1144 			error = ENOMEM;
1145 			goto failed;
1146 		}
1147 
1148 		mport->mp_vid = mpd.port_vid;
1149 		mport->mp_num = mpd.port_num;
1150 
1151 		/* Update dinfo */
1152 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1153 	}
1154 
1155 	/*
1156 	 * Determine whether this is a bridge device; if so, we can
1157 	 * expect the first sequence of address region descriptors to
1158 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1159 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1160 	 *
1161 	 * It's unclear whether this is the correct mechanism by which we
1162 	 * should detect/handle bridge devices, but this approach matches
1163 	 * that of (some of) Broadcom's published drivers.
1164 	 */
1165 	if (core.num_dport > 0) {
1166 		uint32_t entry;
1167 
1168 		if ((error = bcma_erom_peek32(erom, &entry)))
1169 			goto failed;
1170 
1171 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1172 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1173 		{
1174 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1175 			cfg->num_dev_ports = 0;
1176 			cfg->num_bridge_ports = core.num_dport;
1177 		} else {
1178 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1179 			cfg->num_dev_ports = core.num_dport;
1180 			cfg->num_bridge_ports = 0;
1181 		}
1182 	}
1183 
1184 	/* Device/bridge port descriptors */
1185 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1186 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1187 		    first_region_type);
1188 
1189 		if (error)
1190 			goto failed;
1191 	}
1192 
1193 	/* Wrapper (aka device management) descriptors (for master ports). */
1194 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1195 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1196 		    BCMA_EROM_REGION_TYPE_MWRAP);
1197 
1198 		if (error)
1199 			goto failed;
1200 	}
1201 
1202 	/* Wrapper (aka device management) descriptors (for slave ports). */
1203 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1204 		/* Slave wrapper ports are not numbered distinctly from master
1205 		 * wrapper ports. */
1206 
1207 		/*
1208 		 * Broadcom DDR1/DDR2 Memory Controller
1209 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1210 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1211 		 *
1212 		 * ARM BP135 AMBA3 AXI to APB Bridge
1213 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1214 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1215 		 *
1216 		 * core.num_mwrap
1217 		 * ===>
1218 		 * (core.num_mwrap > 0) ?
1219 		 *           core.num_mwrap :
1220 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1221 		 */
1222 		uint8_t sp_num;
1223 		sp_num = (core.num_mwrap > 0) ?
1224 				core.num_mwrap :
1225 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1226 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1227 		    BCMA_EROM_REGION_TYPE_SWRAP);
1228 
1229 		if (error)
1230 			goto failed;
1231 	}
1232 
1233 	/*
1234 	 * Seek to the next core entry (if any), skipping any dangling/invalid
1235 	 * region entries.
1236 	 *
1237 	 * On the BCM4706, the EROM entry for the memory controller core
1238 	 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1239 	 * descriptor.
1240 	 */
1241 	if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1242 		if (error != ENOENT)
1243 			goto failed;
1244 	}
1245 
1246 	*result = cfg;
1247 	return (0);
1248 
1249 failed:
1250 	if (cfg != NULL)
1251 		bcma_free_corecfg(cfg);
1252 
1253 	return error;
1254 }
1255 
1256 static int
1257 bcma_erom_dump(bhnd_erom_t *erom)
1258 {
1259 	struct bcma_erom	*sc;
1260 	uint32_t		entry;
1261 	int			error;
1262 
1263 	sc = (struct bcma_erom *)erom;
1264 
1265 	bcma_erom_reset(sc);
1266 
1267 	while (!(error = bcma_erom_read32(sc, &entry))) {
1268 		/* Handle EOF */
1269 		if (entry == BCMA_EROM_TABLE_EOF) {
1270 			EROM_LOG(sc, "EOF\n");
1271 			return (0);
1272 		}
1273 
1274 		/* Invalid entry */
1275 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1276 			EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1277 			return (EINVAL);
1278 		}
1279 
1280 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1281 		case BCMA_EROM_ENTRY_TYPE_CORE: {
1282 			/* CoreDescA */
1283 			EROM_LOG(sc, "coreA (0x%x)\n", entry);
1284 			EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1285 			    BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1286 			EROM_LOG(sc, "\tid:\t\t0x%x\n",
1287 			    BCMA_EROM_GET_ATTR(entry, COREA_ID));
1288 			EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1289 			    BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1290 
1291 			/* CoreDescB */
1292 			if ((error = bcma_erom_read32(sc, &entry))) {
1293 				EROM_LOG(sc, "error reading CoreDescB: %d\n",
1294 				    error);
1295 				return (error);
1296 			}
1297 
1298 			if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1299 				EROM_LOG(sc, "invalid core descriptor; found "
1300 				    "unexpected entry %#x (type=%s)\n",
1301 				    entry, bcma_erom_entry_type_name(entry));
1302 				return (EINVAL);
1303 			}
1304 
1305 			EROM_LOG(sc, "coreB (0x%x)\n", entry);
1306 			EROM_LOG(sc, "\trev:\t0x%x\n",
1307 			    BCMA_EROM_GET_ATTR(entry, COREB_REV));
1308 			EROM_LOG(sc, "\tnummp:\t0x%x\n",
1309 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1310 			EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1311 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1312 			EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1313 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1314 			EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1315 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1316 
1317 			break;
1318 		}
1319 		case BCMA_EROM_ENTRY_TYPE_MPORT:
1320 			EROM_LOG(sc, "\tmport 0x%x\n", entry);
1321 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1322 			    BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1323 			EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1324 			    BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1325 			break;
1326 
1327 		case BCMA_EROM_ENTRY_TYPE_REGION: {
1328 			bool	addr64;
1329 			uint8_t	size_type;
1330 
1331 			addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1332 			size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1333 
1334 			EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1335 			EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1336 			    addr64 ? "baselo" : "base",
1337 			    BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1338 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1339 			    BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1340 			EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1341 			    BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1342 			EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1343 
1344 			/* Read the base address high bits */
1345 			if (addr64) {
1346 				if ((error = bcma_erom_read32(sc, &entry))) {
1347 					EROM_LOG(sc, "error reading region "
1348 					    "base address high bits %d\n",
1349 					    error);
1350 					return (error);
1351 				}
1352 
1353 				EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1354 			}
1355 
1356 			/* Read extended size descriptor */
1357 			if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1358 				bool size64;
1359 
1360 				if ((error = bcma_erom_read32(sc, &entry))) {
1361 					EROM_LOG(sc, "error reading region "
1362 					    "size descriptor %d\n",
1363 					    error);
1364 					return (error);
1365 				}
1366 
1367 				if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1368 					size64 = true;
1369 				else
1370 					size64 = false;
1371 
1372 				EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1373 				    size64 ? "sizelo" : "size",
1374 				    BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1375 
1376 				if (size64) {
1377 					error = bcma_erom_read32(sc, &entry);
1378 					if (error) {
1379 						EROM_LOG(sc, "error reading "
1380 						    "region size high bits: "
1381 						    "%d\n", error);
1382 						return (error);
1383 					}
1384 
1385 					EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1386 					    entry);
1387 				}
1388 			}
1389 			break;
1390 		}
1391 
1392 		default:
1393 			EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1394 			    entry, bcma_erom_entry_type_name(entry));
1395 			return (EINVAL);
1396 		}
1397 	}
1398 
1399 	if (error == ENOENT)
1400 		EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1401 	else if (error)
1402 		EROM_LOG(sc, "EROM read failed: %d\n", error);
1403 
1404 	return (error);
1405 }
1406 
1407 static kobj_method_t bcma_erom_methods[] = {
1408 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1409 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1410 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1411 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1412 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1413 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1414 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1415 	KOBJMETHOD(bhnd_erom_dump,		bcma_erom_dump),
1416 
1417 	KOBJMETHOD_END
1418 };
1419 
1420 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1421