1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include "nb5000.h"
44 #include "nb_log.h"
45 #include "dimm_phys.h"
46 
47 static uint32_t uerrcnt[2];
48 static uint32_t cerrcnta[2][2];
49 static uint32_t cerrcntb[2][2];
50 static uint32_t cerrcntc[2][2];
51 static uint32_t cerrcntd[2][2];
52 static nb_logout_t nb_log;
53 
54 struct mch_error_code {
55 	int intel_error_list;	/* error number in Chipset Error List */
56 	uint32_t emask;		/* mask for machine check */
57 	uint32_t error_bit;	/* error bit in fault register */
58 };
59 
60 static struct mch_error_code fat_fbd_error_code[] = {
61 	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
62 	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
63 	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
64 	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
65 };
66 
67 static int
68 intel_fat_fbd_err(uint32_t fat_fbd)
69 {
70 	int rt = -1;
71 	int nerr = 0;
72 	uint32_t emask_fbd = 0;
73 	int i;
74 	int sz;
75 
76 	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
77 
78 	for (i = 0; i < sz; i++) {
79 		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
80 			rt = fat_fbd_error_code[i].intel_error_list;
81 			emask_fbd |= fat_fbd_error_code[i].emask;
82 			nerr++;
83 		}
84 	}
85 
86 	if (emask_fbd)
87 		nb_fbd_mask_mc(emask_fbd);
88 	if (nerr > 1)
89 		rt = -1;
90 	return (rt);
91 }
92 
93 static char *
94 fat_memory_error(const nb_regs_t *rp, void *data)
95 {
96 	int channel;
97 	uint32_t ferr_fat_fbd, nrecmemb;
98 	uint32_t nrecmema;
99 	char *intr = "nb.unknown";
100 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
101 
102 	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
103 	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
104 		sp->intel_error_list =
105 		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
106 		sp->branch = -1;
107 		sp->channel = -1;
108 		sp->rank = -1;
109 		sp->dimm = -1;
110 		sp->bank = -1;
111 		sp->cas = -1;
112 		sp->ras = -1;
113 		sp->pa = -1LL;
114 		sp->offset = -1;
115 		return (intr);
116 	}
117 	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
118 	channel = (ferr_fat_fbd >> 28) & 3;
119 	sp->branch = channel >> 1;
120 	sp->channel = channel;
121 	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
122 		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
123 			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
124 		else
125 			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
126 		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
127 		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
128 		sp->rank = (nrecmema >> 8) & RANK_MASK;
129 		sp->dimm = sp->rank >> 1;
130 		sp->bank = (nrecmema >> 12) & BANK_MASK;
131 		sp->cas = (nrecmemb >> 16) & CAS_MASK;
132 		sp->ras = nrecmemb & RAS_MASK;
133 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
134 		    sp->cas);
135 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
136 		    sp->ras, sp->cas);
137 	} else {
138 		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
139 			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
140 		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
141 			intr = "nb.fbd.reset_timeout";
142 			sp->channel = -1;
143 		}
144 		sp->rank = -1;
145 		sp->dimm = -1;
146 		sp->bank = -1;
147 		sp->cas = -1;
148 		sp->ras = -1;
149 		sp->pa = -1LL;
150 		sp->offset = -1;
151 	}
152 	return (intr);
153 }
154 
155 
156 static struct mch_error_code nf_fbd_error_code[] = {
157 	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
158 	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
159 	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
160 	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
161 	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
162 	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
163 	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
164 	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
165 	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
166 	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
167 	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
168 	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
169 	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
170 	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
171 	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
172 	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
173 	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
174 	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
175 	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
176 	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
177 	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
178 	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
179 	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
180 	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
181 	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
182 };
183 
184 static int
185 intel_nf_fbd_err(uint32_t nf_fbd)
186 {
187 	int rt = -1;
188 	int nerr = 0;
189 	uint32_t emask_fbd = 0;
190 	int i;
191 	int sz;
192 
193 	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
194 
195 	for (i = 0; i < sz; i++) {
196 		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
197 			rt = nf_fbd_error_code[i].intel_error_list;
198 			emask_fbd |= nf_fbd_error_code[i].emask;
199 			nerr++;
200 		}
201 	}
202 	if (emask_fbd)
203 		nb_fbd_mask_mc(emask_fbd);
204 	if (nerr > 1)
205 		rt = -1;
206 	return (rt);
207 }
208 
209 static char *
210 nf_memory_error(const nb_regs_t *rp, void *data)
211 {
212 	uint32_t ferr_nf_fbd, recmemb, redmemb;
213 	uint32_t recmema;
214 	int branch, channel, ecc_locator;
215 	char *intr = "nb.unknown";
216 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
217 
218 	sp->rank = -1;
219 	sp->dimm = -1;
220 	sp->bank = -1;
221 	sp->cas = -1;
222 	sp->ras = -1LL;
223 	sp->pa = -1LL;
224 	sp->offset = -1;
225 	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
226 	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
227 		sp->branch = -1;
228 		sp->channel = -1;
229 		sp->intel_error_list =
230 		    intel_nf_fbd_err(rp->nb.nf_fbd_regs.nerr_nf_fbd);
231 		return (intr);
232 	}
233 	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
234 	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
235 	branch = channel >> 1;
236 	sp->branch = branch;
237 	sp->channel = channel;
238 	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
239 		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
240 			/*
241 			 * uncorrectable ECC M4 - M12
242 			 * we can only isolate to pair of dimms
243 			 * for single dimm configuration let eversholt
244 			 * sort it out with out needing a special rule
245 			 */
246 			sp->channel = -1;
247 			recmema = rp->nb.nf_fbd_regs.recmema;
248 			recmemb = rp->nb.nf_fbd_regs.recmemb;
249 			sp->rank = (recmema >> 8) & RANK_MASK;
250 			sp->bank = (recmema >> 12) & BANK_MASK;
251 			sp->cas = (recmemb >> 16) & CAS_MASK;
252 			sp->ras = recmemb & RAS_MASK;
253 			intr = "nb.mem_ue";
254 		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
255 			/*
256 			 * write error M13
257 			 * we can only isolate to pair of dimms
258 			 */
259 			sp->channel = -1;
260 			if (nb_mode != NB_MEMORY_MIRROR) {
261 				recmema = rp->nb.nf_fbd_regs.recmema;
262 				sp->rank = (recmema >> 8) & RANK_MASK;
263 				sp->bank = (recmema >> 12) & BANK_MASK;
264 				sp->cas = (recmemb >> 16) & CAS_MASK;
265 				sp->ras = recmemb & RAS_MASK;
266 			}
267 			intr = "nb.fbd.ma"; /* memory alert */
268 		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
269 			intr = "nb.fbd.ch"; /* FBD on channel */
270 		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
271 			/* correctable ECC M17-M20 */
272 			recmema = rp->nb.nf_fbd_regs.recmema;
273 			recmemb = rp->nb.nf_fbd_regs.recmemb;
274 			sp->rank = (recmema >> 8) & RANK_MASK;
275 			redmemb = rp->nb.nf_fbd_regs.redmemb;
276 			ecc_locator = redmemb & 0x3ffff;
277 			if (ecc_locator & 0x1ff)
278 				sp->channel = branch << 1;
279 			else if (ecc_locator & 0x3fe00)
280 				sp->channel = (branch << 1) + 1;
281 			sp->dimm = sp->rank >> 1;
282 			sp->bank = (recmema >> 12) & BANK_MASK;
283 			sp->cas = (recmemb >> 16) & CAS_MASK;
284 			sp->ras = recmemb & RAS_MASK;
285 			intr = "nb.mem_ce";
286 		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
287 			/* spare dimm M27, M28 */
288 			intr = "nb.mem_ds";
289 			sp->channel = -1;
290 			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
291 				sp->rank =
292 				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
293 				nb_used_spare_rank(sp->branch, sp->rank);
294 				nb_config_gen++;
295 			}
296 		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
297 			intr = "nb.spd";	/* SPD protocol */
298 		}
299 	}
300 	if (sp->ras != -1) {
301 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
302 		    sp->cas);
303 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
304 		    sp->ras, sp->cas);
305 	}
306 	return (intr);
307 }
308 
309 static struct mch_error_code fat_int_error_code[] = {
310 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
311 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
312 	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
313 	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
314 	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
315 	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
316 	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
317 	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
318 	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
319 	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
320 };
321 
322 static struct mch_error_code nf_int_error_code[] = {
323 	{ 27, 0, ERR_NF_INT_B27 },
324 	{ 24, 0, ERR_NF_INT_B24 },
325 	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
326 	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
327 	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
328 	{ 18, 0, ERR_NF_INT_B18 },
329 	{ 17, 0, ERR_NF_INT_B17 },
330 	{ 16, 0, ERR_NF_INT_B16 },
331 	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
332 	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
333 	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
334 	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
335 	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
336 	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
337 };
338 
339 static int
340 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
341 {
342 	int rt = -1;
343 	int nerr = 0;
344 	uint32_t emask_int = 0;
345 	int i;
346 	int sz;
347 
348 	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
349 
350 	for (i = 0; i < sz; i++) {
351 		if (err_fat_int & fat_int_error_code[i].error_bit) {
352 			rt = fat_int_error_code[i].intel_error_list;
353 			emask_int |= fat_int_error_code[i].emask;
354 			nerr++;
355 		}
356 	}
357 
358 	if (nb_chipset == INTEL_NB_5400 &&
359 	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
360 		err_nf_int &= ~NERR_NF_5400_INT_B26;
361 		rt = 26;
362 		nerr++;
363 	}
364 
365 	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
366 
367 	for (i = 0; i < sz; i++) {
368 		if (err_nf_int & nf_int_error_code[i].error_bit) {
369 			rt = nf_int_error_code[i].intel_error_list;
370 			emask_int |= nf_int_error_code[i].emask;
371 			nerr++;
372 		}
373 	}
374 
375 	if (emask_int)
376 		nb_int_mask_mc(emask_int);
377 	if (nerr > 1)
378 		rt = -1;
379 	return (rt);
380 }
381 
382 static void
383 log_int_err(nb_regs_t *rp, int *interpose)
384 {
385 	int t = 0;
386 
387 	rp->flag = NB_REG_LOG_INT;
388 	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
389 	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
390 	*interpose |= t;
391 	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
392 	*interpose |= t;
393 	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
394 	*interpose |= t;
395 	rp->nb.int_regs.nrecint = NRECINT_RD();
396 	rp->nb.int_regs.recint = RECINT_RD();
397 	rp->nb.int_regs.nrecsf = NRECSF_RD();
398 	rp->nb.int_regs.recsf = RECSF_RD();
399 
400 	if (rp->nb.int_regs.ferr_fat_int || *interpose)
401 		FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
402 	if (rp->nb.int_regs.ferr_nf_int || *interpose)
403 		FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
404 	if (rp->nb.int_regs.nerr_fat_int)
405 		NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
406 	if (rp->nb.int_regs.nerr_nf_int)
407 		NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
408 	/* if interpose write read-only registers to clear from pcii cache */
409 	if (*interpose) {
410 		NRECINT_WR();
411 		RECINT_WR();
412 		NRECSF_WR();
413 		RECSF_WR();
414 	}
415 }
416 
417 static void
418 log_thermal_err(nb_regs_t *rp, int *interpose)
419 {
420 	int t = 0;
421 
422 	rp->flag = NB_REG_LOG_THR;
423 	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
424 	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
425 	*interpose |= t;
426 	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
427 	*interpose |= t;
428 	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
429 	*interpose |= t;
430 	rp->nb.thr_regs.ctsts = CTSTS_RD();
431 	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
432 
433 	if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
434 		FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
435 	if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
436 		NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
437 	if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
438 		FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
439 	if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
440 		NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
441 
442 	if (*interpose) {
443 		CTSTS_WR(rp->nb.thr_regs.ctsts);
444 		THRTSTS_WR(rp->nb.thr_regs.thrtsts);
445 	}
446 }
447 
448 static void
449 log_dma_err(nb_regs_t *rp, int *interpose)
450 {
451 	rp->flag = NB_REG_LOG_DMA;
452 
453 	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
454 	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
455 }
456 
457 static struct mch_error_code fat_fsb_error_code[] = {
458 	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
459 	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
460 	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
461 };
462 
463 static struct mch_error_code nf_fsb_error_code[] = {
464 	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
465 	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
466 	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
467 };
468 
469 static int
470 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
471 {
472 	int rt = -1;
473 	int nerr = 0;
474 	uint16_t emask_fsb = 0;
475 	int i;
476 	int sz;
477 
478 	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
479 
480 	for (i = 0; i < sz; i++) {
481 		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
482 			rt = fat_fsb_error_code[i].intel_error_list;
483 			emask_fsb |= fat_fsb_error_code[i].emask;
484 			nerr++;
485 		}
486 	}
487 
488 	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
489 
490 	for (i = 0; i < sz; i++) {
491 		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
492 			rt = nf_fsb_error_code[i].intel_error_list;
493 			emask_fsb |= nf_fsb_error_code[i].emask;
494 			nerr++;
495 		}
496 	}
497 
498 	if (emask_fsb)
499 		nb_fsb_mask_mc(fsb, emask_fsb);
500 	if (nerr > 1)
501 		rt = -1;
502 	return (rt);
503 }
504 
505 static void
506 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int *interpose)
507 {
508 	uint8_t fsb;
509 	int t = 0;
510 
511 	fsb = GE_FERR_FSB(ferr);
512 	rp->flag = NB_REG_LOG_FSB;
513 
514 	rp->nb.fsb_regs.fsb = fsb;
515 	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
516 	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
517 	*interpose |= t;
518 	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
519 	*interpose |= t;
520 	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
521 	*interpose |= t;
522 	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
523 	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
524 	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
525 	if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
526 		FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
527 	if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
528 		FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
529 	/* if interpose write read-only registers to clear from pcii cache */
530 	if (*interpose) {
531 		NRECFSB_WR(fsb);
532 		NRECADDR_WR(fsb);
533 		RECFSB_WR(fsb);
534 	}
535 }
536 
537 static struct mch_error_code fat_pex_error_code[] = {
538 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
539 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
540 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
541 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
542 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
543 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
544 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
545 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
546 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
547 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
548 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
549 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
550 };
551 
552 static struct mch_error_code fat_unit_pex_5400_error_code[] = {
553 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
554 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
555 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
556 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
557 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
558 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
559 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
560 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
561 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
562 	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
563 };
564 
565 static struct mch_error_code fat_pex_5400_error_code[] = {
566 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
567 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
568 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
569 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
570 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
571 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
572 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
573 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
574 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
575 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
576 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
577 };
578 
579 static struct mch_error_code fat_rp_5400_error_code[] = {
580 	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
581 };
582 
583 static struct mch_error_code fat_rp_error_code[] = {
584 	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
585 };
586 
587 static struct mch_error_code uncor_pex_error_code[] = {
588 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
589 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
590 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
591 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
592 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
593 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
594 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
595 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
596 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
597 };
598 
599 static struct mch_error_code uncor_pex_5400_error_code[] = {
600 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
601 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
602 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
603 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
604 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
605 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
606 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
607 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
608 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
609 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
610 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
611 };
612 
613 static struct mch_error_code cor_pex_error_code[] = {
614 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
615 	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
616 	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
617 	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
618 	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
619 	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
620 	{ 10, 0, PEX_NF_IO10 },
621 	{ 2, 0, PEX_NF_IO2 }
622 };
623 
624 static struct mch_error_code rp_pex_5400_error_code[] = {
625 	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
626 	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
627 };
628 
629 static struct mch_error_code cor_pex_5400_error_code1[] = {
630 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
631 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
632 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
633 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
634 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
635 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
636 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
637 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
638 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
639 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
640 };
641 
642 static struct mch_error_code cor_pex_5400_error_code2[] = {
643 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
644 	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
645 	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
646 	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
647 	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
648 	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
649 };
650 
651 static struct mch_error_code cor_pex_5400_error_code3[] = {
652 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
653 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
654 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
655 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
656 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
657 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
658 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
659 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
660 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
661 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
662 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
663 };
664 
665 static struct mch_error_code rp_pex_error_code[] = {
666 	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
667 	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
668 };
669 
670 static int
671 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
672 {
673 	int rt = -1;
674 	int nerr = 0;
675 	int i;
676 	int sz;
677 
678 	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
679 
680 	for (i = 0; i < sz; i++) {
681 		if (pex_fat & fat_pex_error_code[i].error_bit) {
682 			rt = fat_pex_error_code[i].intel_error_list;
683 			nerr++;
684 		}
685 	}
686 	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
687 
688 	for (i = 0; i < sz; i++) {
689 		if (pex_fat & fat_rp_error_code[i].error_bit) {
690 			rt = fat_rp_error_code[i].intel_error_list;
691 			nerr++;
692 		}
693 	}
694 	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
695 
696 	for (i = 0; i < sz; i++) {
697 		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
698 			rt = uncor_pex_error_code[i].intel_error_list;
699 			nerr++;
700 		}
701 	}
702 
703 	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
704 
705 	for (i = 0; i < sz; i++) {
706 		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
707 			rt = cor_pex_error_code[i].intel_error_list;
708 			nerr++;
709 		}
710 	}
711 	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
712 
713 	for (i = 0; i < sz; i++) {
714 		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
715 			rt = rp_pex_error_code[i].intel_error_list;
716 			nerr++;
717 		}
718 	}
719 
720 	if (nerr > 1)
721 		rt = -1;
722 	return (rt);
723 }
724 
725 static struct mch_error_code fat_thr_error_code[] = {
726 	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
727 	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
728 };
729 
730 static struct mch_error_code nf_thr_error_code[] = {
731 	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
732 	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
733 	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
734 };
735 
736 static int
737 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
738 {
739 	int rt = -1;
740 	int nerr = 0;
741 	uint16_t emask_thr = 0;
742 	int i;
743 	int sz;
744 
745 	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
746 
747 	for (i = 0; i < sz; i++) {
748 		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
749 			rt = fat_thr_error_code[i].intel_error_list;
750 			emask_thr |= fat_thr_error_code[i].emask;
751 			nerr++;
752 		}
753 	}
754 
755 	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
756 
757 	for (i = 0; i < sz; i++) {
758 		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
759 			rt = nf_thr_error_code[i].intel_error_list;
760 			emask_thr |= nf_thr_error_code[i].emask;
761 			nerr++;
762 		}
763 	}
764 
765 	if (emask_thr)
766 		nb_thr_mask_mc(emask_thr);
767 	if (nerr > 1)
768 		rt = -1;
769 	return (rt);
770 }
771 
772 static int
773 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
774 {
775 	int rt = -1;
776 	int nerr = 0;
777 	int i;
778 	int sz;
779 
780 	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
781 
782 	for (i = 0; i < sz; i++) {
783 		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
784 			rt = fat_pex_5400_error_code[i].intel_error_list;
785 			nerr++;
786 		}
787 	}
788 	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
789 
790 	for (i = 0; i < sz; i++) {
791 		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
792 			rt = fat_rp_5400_error_code[i].intel_error_list;
793 			nerr++;
794 		}
795 	}
796 	sz = sizeof (fat_unit_pex_5400_error_code) /
797 	    sizeof (struct mch_error_code);
798 
799 	for (i = 0; i < sz; i++) {
800 		if (pex_fat &
801 		    fat_unit_pex_5400_error_code[i].error_bit) {
802 			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
803 			nerr++;
804 		}
805 	}
806 	sz = sizeof (uncor_pex_5400_error_code) /
807 	    sizeof (struct mch_error_code);
808 
809 	for (i = 0; i < sz; i++) {
810 		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
811 			rt = uncor_pex_5400_error_code[i].intel_error_list;
812 			nerr++;
813 		}
814 	}
815 
816 	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
817 
818 	for (i = 0; i < sz; i++) {
819 		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
820 			rt = rp_pex_5400_error_code[i].intel_error_list;
821 			nerr++;
822 		}
823 	}
824 
825 	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
826 
827 	for (i = 0; i < sz; i++) {
828 		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
829 			rt = cor_pex_5400_error_code1[i].intel_error_list;
830 			nerr++;
831 		}
832 	}
833 
834 	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
835 
836 	for (i = 0; i < sz; i++) {
837 		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
838 			rt = cor_pex_5400_error_code2[i].intel_error_list;
839 			nerr++;
840 		}
841 	}
842 
843 	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
844 
845 	for (i = 0; i < sz; i++) {
846 		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
847 			rt = cor_pex_5400_error_code3[i].intel_error_list;
848 			nerr++;
849 		}
850 	}
851 
852 	if (nerr > 1)
853 		rt = -1;
854 	return (rt);
855 }
856 
857 static void
858 log_pex_err(uint64_t ferr, nb_regs_t *rp, int *interpose)
859 {
860 	uint8_t pex = (uint8_t)-1;
861 	int t = 0;
862 
863 	rp->flag = NB_REG_LOG_PEX;
864 	pex = GE_ERR_PEX(ferr);
865 
866 	rp->nb.pex_regs.pex = pex;
867 	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
868 	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
869 	*interpose |= t;
870 	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
871 	*interpose |= t;
872 	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
873 	*interpose |= t;
874 	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
875 	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
876 	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
877 	if (pex != (uint8_t)-1)
878 		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
879 	else
880 		rp->nb.pex_regs.uncerrsts = 0;
881 	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
882 	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
883 	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
884 
885 	if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
886 		PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
887 	if (rp->nb.pex_regs.pex_fat_nerr)
888 		PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
889 	if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
890 		PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
891 	if (rp->nb.pex_regs.pex_nf_corr_nerr)
892 		PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
893 	if (*interpose)
894 		UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
895 	if (*interpose)
896 		RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
897 	if (*interpose)
898 		PEXDEVSTS_WR(pex, 0);
899 }
900 
901 static void
902 log_fat_fbd_err(nb_regs_t *rp, int *interpose)
903 {
904 	int channel, branch;
905 	int t = 0;
906 
907 	rp->flag = NB_REG_LOG_FAT_FBD;
908 	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
909 	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
910 	branch = channel >> 1;
911 	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
912 	*interpose |= t;
913 	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
914 	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
915 	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
916 	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
917 	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
918 	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
919 	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
920 	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
921 	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
922 	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
923 	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
924 	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
925 	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
926 	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
927 	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
928 	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
929 	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
930 	if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
931 		FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
932 	if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
933 		NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
934 	/* if interpose write read-only registers to clear from pcii cache */
935 	if (*interpose) {
936 		NRECMEMA_WR(branch);
937 		NRECMEMB_WR(branch);
938 		NRECFGLOG_WR(branch);
939 		NRECFBDA_WR(branch);
940 		NRECFBDB_WR(branch);
941 		NRECFBDC_WR(branch);
942 		NRECFBDD_WR(branch);
943 		NRECFBDE_WR(branch);
944 		NRECFBDF_WR(branch);
945 	}
946 }
947 
948 static void
949 log_nf_fbd_err(nb_regs_t *rp, int *interpose)
950 {
951 	int channel, branch;
952 	int t = 0;
953 
954 	rp->flag = NB_REG_LOG_NF_FBD;
955 	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
956 	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
957 	branch = channel >> 1;
958 	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
959 	*interpose |= t;
960 	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
961 	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
962 	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
963 	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
964 	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
965 	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
966 	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
967 	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
968 	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
969 	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
970 	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
971 	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
972 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
973 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
974 		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
975 		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
976 		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
977 	} else {
978 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
979 		rp->nb.nf_fbd_regs.cerrcntb = 0;
980 		rp->nb.nf_fbd_regs.cerrcntc = 0;
981 		rp->nb.nf_fbd_regs.cerrcntd = 0;
982 	}
983 	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
984 	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
985 	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
986 	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
987 	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
988 	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
989 	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
990 	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
991 	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
992 	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
993 	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
994 	if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
995 		FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
996 	if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
997 		NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
998 	/* if interpose write read-only registers to clear from pcii cache */
999 	if (*interpose) {
1000 		RECMEMA_WR(branch);
1001 		RECMEMB_WR(branch);
1002 		RECFGLOG_WR(branch);
1003 		RECFBDA_WR(branch);
1004 		RECFBDB_WR(branch);
1005 		RECFBDC_WR(branch);
1006 		RECFBDD_WR(branch);
1007 		RECFBDE_WR(branch);
1008 		RECFBDF_WR(branch);
1009 		SPCPS_WR(branch);
1010 	}
1011 }
1012 
1013 static void
1014 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1015 {
1016 	nb_regs_t *rp = &log->nb_regs;
1017 	uint32_t nerr = *nerrp;
1018 	int interpose = 0;
1019 
1020 	log->acl_timestamp = gethrtime_waitfree();
1021 	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1022 		log_pex_err(ferr, rp, &interpose);
1023 		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1024 	} else if ((ferr & GE_FBD_FATAL) != 0) {
1025 		log_fat_fbd_err(rp, &interpose);
1026 		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1027 	} else if ((ferr & GE_FBD_NF) != 0) {
1028 		log_nf_fbd_err(rp, &interpose);
1029 		*nerrp = nerr & ~GE_NERR_FBD_NF;
1030 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1031 		log_fsb_err(ferr, rp, &interpose);
1032 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1033 	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1034 		log_dma_err(rp, &interpose);
1035 		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1036 	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1037 		log_int_err(rp, &interpose);
1038 		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1039 	} else if (nb_chipset == INTEL_NB_5400 &&
1040 	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1041 		log_thermal_err(rp, &interpose);
1042 		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1043 	}
1044 	if (interpose)
1045 		log->type = "inject";
1046 	else
1047 		log->type = "error";
1048 	errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1049 	    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1050 }
1051 
1052 static void
1053 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1054 {
1055 	uint32_t err;
1056 	nb_regs_t *rp = &log->nb_regs;
1057 	int interpose = 0;
1058 
1059 	err = *errp;
1060 	log->acl_timestamp = gethrtime_waitfree();
1061 	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1062 		log_pex_err(err, rp, &interpose);
1063 		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1064 	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1065 		log_fat_fbd_err(rp, &interpose);
1066 		*errp = err & ~GE_NERR_FBD_FATAL;
1067 	} else if ((err & GE_NERR_FBD_NF) != 0) {
1068 		log_nf_fbd_err(rp, &interpose);
1069 		*errp = err & ~GE_NERR_FBD_NF;
1070 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1071 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, &interpose);
1072 		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1073 	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1074 		log_dma_err(rp, &interpose);
1075 		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1076 	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1077 		log_int_err(rp, &interpose);
1078 		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1079 	}
1080 	if (interpose)
1081 		log->type = "inject";
1082 	else
1083 		log->type = "error";
1084 	errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1085 	    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1086 }
1087 
1088 /*ARGSUSED*/
1089 void
1090 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1091 {
1092 	uint64_t ferr;
1093 	uint32_t nerr, err;
1094 	int nmc = 0;
1095 	int i;
1096 
1097 	if (mutex_tryenter(&nb_mutex) == 0)
1098 		return;
1099 
1100 	nerr = NERR_GLOBAL_RD();
1101 	err = nerr;
1102 	for (i = 0; i < NB_MAX_ERRORS; i++) {
1103 		ferr = FERR_GLOBAL_RD();
1104 		nb_log.nb_regs.chipset = nb_chipset;
1105 		nb_log.nb_regs.ferr = ferr;
1106 		nb_log.nb_regs.nerr = nerr;
1107 		if (ferr) {
1108 			log_ferr(ferr, &err, &nb_log, willpanic);
1109 			FERR_GLOBAL_WR(ferr);
1110 			nmc++;
1111 		} else if (err) {
1112 			log_nerr(&err, &nb_log, willpanic);
1113 			nmc++;
1114 		}
1115 	}
1116 	if (nerr) {
1117 		NERR_GLOBAL_WR(nerr);
1118 	}
1119 	if (nmc == 0 && nb_mask_mc_set)
1120 		nb_mask_mc_reset();
1121 	mutex_exit(&nb_mutex);
1122 }
1123 
1124 static void
1125 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1126     nb_scatchpad_t *data)
1127 {
1128 	int intel_error_list;
1129 	char buf[32];
1130 
1131 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1132 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1133 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1134 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1135 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1136 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1137 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1138 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1139 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1140 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1141 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1142 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1143 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1144 	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1145 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1146 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1147 	intel_error_list = data->intel_error_list;
1148 	if (intel_error_list >= 0)
1149 		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1150 	else
1151 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1152 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1153 	    DATA_TYPE_STRING, buf, NULL);
1154 }
1155 
1156 static void
1157 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1158     nb_scatchpad_t *data)
1159 {
1160 	int intel_error_list;
1161 	char buf[32];
1162 
1163 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1164 	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1165 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1166 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1167 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1168 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1169 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1170 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1171 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1172 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1173 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1174 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1175 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1176 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1177 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1178 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1179 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1180 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1181 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1182 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1183 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1184 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1185 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1186 	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1187 	intel_error_list = data->intel_error_list;
1188 	if (intel_error_list >= 0)
1189 		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1190 	else
1191 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1192 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1193 	    DATA_TYPE_STRING, buf, NULL);
1194 }
1195 
1196 static void
1197 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1198     nb_scatchpad_t *data)
1199 {
1200 	int intel_error_list;
1201 	char buf[32];
1202 
1203 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1204 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1205 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1206 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1207 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1208 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1209 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1210 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1211 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1212 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1213 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1214 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1215 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1216 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1217 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1218 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1219 	intel_error_list = data->intel_error_list;
1220 	if (intel_error_list >= 0)
1221 		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1222 	else
1223 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1224 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1225 	    DATA_TYPE_STRING, buf, NULL);
1226 }
1227 
1228 static void
1229 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1230     nb_scatchpad_t *data)
1231 {
1232 	nb_mem_scatchpad_t *sp;
1233 	char buf[32];
1234 
1235 	sp = &((nb_scatchpad_t *)data)->ms;
1236 
1237 	if (sp->ras != -1) {
1238 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1239 		    DATA_TYPE_INT32, sp->bank, NULL);
1240 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1241 		    DATA_TYPE_INT32, sp->cas, NULL);
1242 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1243 		    DATA_TYPE_INT32, sp->ras, NULL);
1244 		if (sp->offset != -1LL) {
1245 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1246 			    DATA_TYPE_UINT64, sp->offset, NULL);
1247 		}
1248 		if (sp->pa != -1LL) {
1249 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1250 			    DATA_TYPE_UINT64, sp->pa, NULL);
1251 		}
1252 	}
1253 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1254 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1255 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1256 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1257 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1258 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1259 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1260 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1261 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1262 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1263 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1264 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1265 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1266 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1267 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1268 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1269 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1270 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1271 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1272 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1273 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1274 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1275 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1276 	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1277 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1278 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1279 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1280 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1281 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1282 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1283 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1284 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1285 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1286 	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1287 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1288 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1289 
1290 	if (sp->intel_error_list >= 0)
1291 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1292 	else
1293 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1294 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1295 	    DATA_TYPE_STRING, buf, NULL);
1296 }
1297 
1298 static void
1299 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1300     nb_scatchpad_t *data)
1301 {
1302 	nb_mem_scatchpad_t *sp;
1303 	char buf[32];
1304 
1305 	sp = &((nb_scatchpad_t *)data)->ms;
1306 
1307 	if (sp->dimm == -1 && sp->rank != -1) {
1308 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1309 		    DATA_TYPE_INT32, sp->rank, NULL);
1310 	}
1311 	if (sp->ras != -1) {
1312 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1313 		    DATA_TYPE_INT32, sp->bank, NULL);
1314 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1315 		    DATA_TYPE_INT32, sp->cas, NULL);
1316 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1317 		    DATA_TYPE_INT32, sp->ras, NULL);
1318 		if (sp->offset != -1LL) {
1319 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1320 			    DATA_TYPE_UINT64, sp->offset, NULL);
1321 		}
1322 		if (sp->pa != -1LL) {
1323 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1324 			    DATA_TYPE_UINT64, sp->pa, NULL);
1325 		}
1326 	}
1327 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1328 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1329 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1330 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1331 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1332 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1333 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1334 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1335 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1336 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1337 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1338 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1339 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1340 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1341 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1342 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1343 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1344 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1345 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1346 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1347 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1348 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1349 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1350 	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1351 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1352 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1353 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1354 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1355 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1356 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1357 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1358 		if (nb_chipset == INTEL_NB_7300) {
1359 			fm_payload_set(payload,
1360 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1361 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1362 			    NULL);
1363 			fm_payload_set(payload,
1364 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1365 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1366 			    NULL);
1367 		}
1368 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1369 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1370 		    NULL);
1371 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1372 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1373 		    NULL);
1374 		if (nb_chipset == INTEL_NB_7300) {
1375 			fm_payload_set(payload,
1376 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1377 			    DATA_TYPE_UINT32,
1378 			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1379 			fm_payload_set(payload,
1380 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1381 			    DATA_TYPE_UINT32,
1382 			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1383 		}
1384 	} else {
1385 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1386 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1387 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1388 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1389 		    NULL);
1390 	}
1391 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1392 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1393 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1394 	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1395 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1396 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1397 
1398 	if (sp->intel_error_list >= 0)
1399 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1400 	else
1401 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1402 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1403 	    DATA_TYPE_STRING, buf, NULL);
1404 }
1405 
1406 static void
1407 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1408 {
1409 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1410 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1411 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1412 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1413 }
1414 
1415 static void
1416 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1417     nb_scatchpad_t *data)
1418 {
1419 	char buf[32];
1420 
1421 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1422 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1423 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1424 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1425 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1426 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1427 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1428 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1429 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1430 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1431 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1432 	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1433 	if (data->intel_error_list >= 0) {
1434 		(void) snprintf(buf, sizeof (buf), "TH%d",
1435 		    data->intel_error_list);
1436 	} else {
1437 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1438 	}
1439 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1440 	    DATA_TYPE_STRING, buf, NULL);
1441 }
1442 
1443 static void
1444 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1445     nb_scatchpad_t *data)
1446 {
1447 	const nb_regs_t *nb_regs = &acl->nb_regs;
1448 
1449 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1450 	    DATA_TYPE_STRING, acl->type, NULL);
1451 	switch (nb_regs->flag) {
1452 	case NB_REG_LOG_FSB:
1453 		nb_fsb_err_payload(nb_regs, payload, data);
1454 		break;
1455 	case NB_REG_LOG_PEX:
1456 		nb_pex_err_payload(nb_regs, payload, data);
1457 		break;
1458 	case NB_REG_LOG_INT:
1459 		nb_int_err_payload(nb_regs, payload, data);
1460 		break;
1461 	case NB_REG_LOG_FAT_FBD:
1462 		nb_fat_fbd_err_payload(nb_regs, payload, data);
1463 		break;
1464 	case NB_REG_LOG_NF_FBD:
1465 		nb_nf_fbd_err_payload(nb_regs, payload, data);
1466 		break;
1467 	case NB_REG_LOG_DMA:
1468 		nb_dma_err_payload(nb_regs, payload);
1469 		break;
1470 	case NB_REG_LOG_THR:
1471 		nb_thr_err_payload(nb_regs, payload, data);
1472 		break;
1473 	default:
1474 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1475 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1476 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1477 		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1478 		break;
1479 	}
1480 }
1481 
1482 void
1483 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1484     nb_scatchpad_t *data)
1485 {
1486 	int chip;
1487 
1488 	if (nb_chipset == INTEL_NB_7300)
1489 		chip = nb_regs->nb.fsb_regs.fsb * 2;
1490 	else
1491 		chip = nb_regs->nb.fsb_regs.fsb;
1492 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1493 	    "motherboard", 0, "chip", chip);
1494 
1495 	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1496 	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1497 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1498 		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1499 		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1500 	} else {
1501 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1502 		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1503 		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1504 	}
1505 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1506 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1507 }
1508 
1509 void
1510 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1511     nb_scatchpad_t *data)
1512 {
1513 	int hostbridge;
1514 
1515 	if (nb_regs->nb.pex_regs.pex == 0) {
1516 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1517 		    "motherboard", 0);
1518 	} else {
1519 		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1520 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1521 		    "motherboard", 0,
1522 		    "hostbridge", hostbridge);
1523 	}
1524 
1525 	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1526 	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1527 		if (nb_chipset == INTEL_NB_5400) {
1528 			data->intel_error_list =
1529 			    intel_pex_5400_err(
1530 			    nb_regs->nb.pex_regs.pex_fat_nerr,
1531 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1532 		} else {
1533 			data->intel_error_list =
1534 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1535 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1536 		}
1537 	} else {
1538 		if (nb_chipset == INTEL_NB_5400) {
1539 			data->intel_error_list =
1540 			    intel_pex_5400_err(
1541 			    nb_regs->nb.pex_regs.pex_fat_ferr,
1542 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1543 		} else {
1544 			data->intel_error_list =
1545 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1546 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1547 		}
1548 	}
1549 
1550 	if (nb_regs->nb.pex_regs.pex == 0) {
1551 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1552 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1553 	} else {
1554 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1555 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1556 	}
1557 }
1558 
1559 void
1560 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1561     void *data)
1562 {
1563 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1564 	    "motherboard", 0);
1565 
1566 	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1567 	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1568 		((nb_scatchpad_t *)data)->intel_error_list =
1569 		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1570 		    nb_regs->nb.int_regs.nerr_nf_int);
1571 	} else {
1572 		((nb_scatchpad_t *)data)->intel_error_list =
1573 		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1574 		    nb_regs->nb.int_regs.ferr_nf_int);
1575 	}
1576 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1577 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1578 }
1579 
1580 void
1581 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1582     void *data)
1583 {
1584 	char *intr;
1585 	nb_mem_scatchpad_t *sp;
1586 
1587 	intr = fat_memory_error(nb_regs, data);
1588 	sp = &((nb_scatchpad_t *)data)->ms;
1589 
1590 	if (sp->dimm != -1) {
1591 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1592 		    "motherboard", 0,
1593 		    "memory-controller", sp->branch,
1594 		    "dram-channel", sp->channel,
1595 		    "dimm", sp->dimm,
1596 		    "rank", sp->rank);
1597 	} else if (sp->channel != -1) {
1598 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1599 		    "motherboard", 0,
1600 		    "memory-controller", sp->branch,
1601 		    "dram-channel", sp->channel);
1602 	} else if (sp->branch != -1) {
1603 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1604 		    "motherboard", 0,
1605 		    "memory-controller", sp->branch);
1606 	} else {
1607 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1608 		    "motherboard", 0);
1609 	}
1610 
1611 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1612 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1613 }
1614 
1615 void
1616 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1617     void *data)
1618 {
1619 	char *intr;
1620 	nb_mem_scatchpad_t *sp;
1621 
1622 	intr = nf_memory_error(nb_regs, data);
1623 	sp = &((nb_scatchpad_t *)data)->ms;
1624 
1625 	if (sp->dimm != -1) {
1626 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1627 		    "motherboard", 0,
1628 		    "memory-controller", sp->branch,
1629 		    "dram-channel", sp->channel,
1630 		    "dimm", sp->dimm,
1631 		    "rank", sp->rank);
1632 	} else if (sp->channel != -1) {
1633 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1634 		    "motherboard", 0,
1635 		    "memory-controller", sp->branch,
1636 		    "dram-channel", sp->channel);
1637 	} else if (sp->branch != -1) {
1638 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1639 		    "motherboard", 0,
1640 		    "memory-controller", sp->branch);
1641 	} else {
1642 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1643 		    "motherboard", 0);
1644 	}
1645 
1646 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1647 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1648 }
1649 
1650 void
1651 nb_dma_report(char *class, nvlist_t *detector)
1652 {
1653 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1654 	    "motherboard", 0);
1655 
1656 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1657 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
1658 }
1659 
1660 void
1661 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1662     void *data)
1663 {
1664 	((nb_scatchpad_t *)data)->intel_error_list =
1665 	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
1666 	    nb_regs->nb.thr_regs.ferr_nf_thr);
1667 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1668 	    "motherboard", 0);
1669 
1670 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1671 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
1672 }
1673 
1674 
1675 nvlist_t *
1676 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
1677 {
1678 	nvlist_t *detector = fm_nvlist_create(nva);
1679 
1680 	switch (nb_regs->flag) {
1681 	case NB_REG_LOG_FSB:
1682 		nb_fsb_report(nb_regs, class, detector, scratch);
1683 		break;
1684 	case NB_REG_LOG_PEX:
1685 		nb_pex_report(nb_regs, class, detector, scratch);
1686 		break;
1687 	case NB_REG_LOG_INT:
1688 		nb_int_report(nb_regs, class, detector, scratch);
1689 		break;
1690 	case NB_REG_LOG_FAT_FBD:
1691 		nb_fat_fbd_report(nb_regs, class, detector, scratch);
1692 		break;
1693 	case NB_REG_LOG_NF_FBD:
1694 		nb_nf_fbd_report(nb_regs, class, detector, scratch);
1695 		break;
1696 	case NB_REG_LOG_DMA:
1697 		nb_dma_report(class, detector);
1698 		break;
1699 	case NB_REG_LOG_THR:
1700 		nb_thr_report(nb_regs, class, detector, scratch);
1701 		break;
1702 	default:
1703 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1704 		    "motherboard", 0);
1705 
1706 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1707 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
1708 	}
1709 	return (detector);
1710 }
1711 
1712 /*ARGSUSED*/
1713 void
1714 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
1715 {
1716 	nb_logout_t *acl = (nb_logout_t *)data;
1717 	errorq_elem_t *eqep, *scr_eqep;
1718 	nvlist_t *ereport, *detector;
1719 	nv_alloc_t *nva = NULL;
1720 	char buf[FM_MAX_CLASS];
1721 	nb_scatchpad_t nb_scatchpad;
1722 
1723 	if (panicstr) {
1724 		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
1725 			return;
1726 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1727 		/*
1728 		 * Now try to allocate another element for scratch space and
1729 		 * use that for further scratch space (eg for constructing
1730 		 * nvlists to add the main ereport).  If we can't reserve
1731 		 * a scratch element just fallback to working within the
1732 		 * element we already have, and hope for the best.  All this
1733 		 * is necessary because the fixed buffer nv allocator does
1734 		 * not reclaim freed space and nvlist construction is
1735 		 * expensive.
1736 		 */
1737 		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
1738 			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
1739 		else
1740 			nva = errorq_elem_nva(ereport_errorq, eqep);
1741 	} else {
1742 		ereport = fm_nvlist_create(NULL);
1743 	}
1744 	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
1745 	if (detector == NULL)
1746 		return;
1747 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
1748 	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
1749 	/*
1750 	 * We're done with 'detector' so reclaim the scratch space.
1751 	 */
1752 	if (panicstr) {
1753 		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
1754 		nv_alloc_reset(nva);
1755 	} else {
1756 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1757 	}
1758 
1759 	/*
1760 	 * Encode the error-specific data that was saved in the logout area.
1761 	 */
1762 	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
1763 
1764 	if (panicstr) {
1765 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1766 		if (scr_eqep)
1767 			errorq_cancel(ereport_errorq, scr_eqep);
1768 	} else {
1769 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1770 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1771 	}
1772 }
1773