xref: /illumos-gate/usr/src/uts/common/os/pcifm.c (revision 06e1a714)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = PCIE_AER_CE_AD_NFE;
50 uint32_t pcie_expected_ue_mask = 0x0;
51 uint32_t pcie_expected_sue_mask = 0x0;
52 
53 errorq_t *pci_target_queue = NULL;
54 
55 pci_fm_err_t pci_err_tbl[] = {
56 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
57 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
58 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
59 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
60 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
61 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
62 	NULL, NULL, NULL, NULL,
63 };
64 
65 pci_fm_err_t pci_bdg_err_tbl[] = {
66 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
67 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
68 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
69 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
70 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
71 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
72 	NULL, NULL, NULL, NULL,
73 };
74 
75 static pci_fm_err_t pciex_ce_err_tbl[] = {
76 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_OK,
77 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_OK,
78 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_OK,
79 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_OK,
80 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_OK,
81 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_OK,
82 	NULL, NULL, NULL, NULL,
83 };
84 
85 static pci_fm_err_t pciex_ue_err_tbl[] = {
86 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
87 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
88 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
89 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
90 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
91 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
92 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_UNKNOWN,
93 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_OK,
94 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
95 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
96 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_UNKNOWN,
97 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
98 	NULL, NULL, NULL, NULL,
99 };
100 
101 static pci_fm_err_t pcie_sue_err_tbl[] = {
102 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
103 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
104 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
105 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
106 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
107 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
108 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
109 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
110 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
111 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
112 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
113 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
114 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
115 	NULL, NULL, NULL, NULL,
116 };
117 
118 static pci_fm_err_t pcix_err_tbl[] = {
119 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
120 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
121 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
122 	NULL, NULL, NULL, NULL,
123 };
124 
125 static pci_fm_err_t pcix_sec_err_tbl[] = {
126 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
127 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
128 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
129 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
130 	NULL, NULL, NULL, NULL,
131 };
132 
133 static pci_fm_err_t pciex_nadv_err_tbl[] = {
134 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
135 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
136 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
137 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_OK,
138 	NULL, NULL, NULL, NULL,
139 };
140 
141 static int
142 pci_config_check(ddi_acc_handle_t handle)
143 {
144 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
145 	ddi_fm_error_t de;
146 
147 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
148 		return (DDI_FM_OK);
149 
150 	de.fme_version = DDI_FME_VERSION;
151 
152 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
153 	if (de.fme_status != DDI_FM_OK) {
154 		char buf[FM_MAX_CLASS];
155 
156 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", PCI_ERROR_SUBCLASS,
157 		    PCI_NR);
158 		ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena, DDI_NOSLEEP,
159 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
160 		ddi_fm_acc_err_clear(handle, de.fme_version);
161 	}
162 	return (de.fme_status);
163 }
164 
165 static void
166 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
167     uint8_t pcix_cap_ptr)
168 {
169 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
170 
171 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
172 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
173 	    PCI_PCIX_ECC_STATUS)));
174 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
175 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
176 	else
177 		return;
178 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
179 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
180 	    PCI_PCIX_ECC_FST_AD)));
181 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
182 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
183 	    PCI_PCIX_ECC_SEC_AD)));
184 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
185 	    ddi_acc_handle_t)erpt_p->pe_hdl,
186 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
187 }
188 
189 static void
190 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs)
191 {
192 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
193 		pcix_bdg_error_regs_t *pcix_bdg_regs =
194 		    (pcix_bdg_error_regs_t *)pe_regs;
195 		uint8_t pcix_bdg_cap_ptr;
196 		int i;
197 
198 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
199 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
200 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
201 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
202 			pcix_bdg_regs->pcix_bdg_vflags |=
203 			    PCIX_BDG_SEC_STATUS_VALID;
204 		else
205 			return;
206 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
207 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
208 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
209 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
210 		else
211 			return;
212 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
213 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
214 			/*
215 			 * PCI Express to PCI-X bridges only implement the
216 			 * secondary side of the PCI-X ECC registers, bit one is
217 			 * read-only so we make sure we do not write to it.
218 			 */
219 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
220 				pcix_bdg_ecc_regs =
221 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
222 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
223 				    pcix_bdg_cap_ptr);
224 			} else {
225 				for (i = 0; i < 2; i++) {
226 					pcix_bdg_ecc_regs =
227 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
228 					pci_config_put32(erpt_p->pe_hdl,
229 					    (pcix_bdg_cap_ptr +
230 					    PCI_PCIX_BDG_ECC_STATUS), i);
231 					pcix_ecc_regs_gather(erpt_p,
232 					    pcix_bdg_ecc_regs,
233 					    pcix_bdg_cap_ptr);
234 				}
235 			}
236 		}
237 	} else {
238 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
239 		uint8_t pcix_cap_ptr;
240 
241 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
242 
243 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
244 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
245 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
246 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
247 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
248 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
249 		else
250 			return;
251 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
252 			pcix_ecc_regs_t *pcix_ecc_regs =
253 			    pcix_regs->pcix_ecc_regs;
254 
255 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
256 			    pcix_cap_ptr);
257 		}
258 	}
259 }
260 
261 static void
262 pcie_regs_gather(pci_erpt_t *erpt_p)
263 {
264 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
265 	uint8_t pcie_cap_ptr;
266 	pcie_adv_error_regs_t *pcie_adv_regs;
267 	uint16_t pcie_ecap_ptr;
268 
269 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
270 
271 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
272 	    pcie_cap_ptr + PCIE_DEVSTS);
273 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
274 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
275 	else
276 		return;
277 
278 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
279 	    (pcie_cap_ptr + PCIE_DEVCTL));
280 
281 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
282 	    PCIX_DEV))
283 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs);
284 
285 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
286 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
287 
288 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
289 		    (pcie_cap_ptr + PCIE_ROOTSTS));
290 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
291 		    (pcie_cap_ptr + PCIE_ROOTCTL));
292 	}
293 
294 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
295 		return;
296 
297 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
298 
299 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
300 
301 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
302 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
303 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
304 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
305 
306 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
307 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
308 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
309 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
310 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
311 	    pcie_ecap_ptr + PCIE_AER_CTL);
312 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
313 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
314 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
315 		int i;
316 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
317 
318 		for (i = 0; i < 3; i++) {
319 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
320 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
321 			    (4 * (i + 1)));
322 		}
323 	}
324 
325 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
326 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
327 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
328 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
329 
330 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
331 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
332 
333 	/*
334 	 * If pci express to pci bridge then grab the bridge
335 	 * error registers.
336 	 */
337 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
338 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
339 		    pcie_adv_regs->pcie_adv_bdg_regs;
340 
341 		pcie_bdg_regs->pcie_sue_status =
342 		    pci_config_get32(erpt_p->pe_hdl,
343 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
344 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
345 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
346 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
347 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
348 
349 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
350 			int i;
351 
352 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
353 
354 			for (i = 0; i < 3; i++) {
355 				pcie_bdg_regs->pcie_sue_hdr[i] =
356 				    pci_config_get32(erpt_p->pe_hdl,
357 					pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
358 					(4 * (i + 1)));
359 			}
360 		}
361 	}
362 	/*
363 	 * If PCI Express root complex then grab the root complex
364 	 * error registers.
365 	 */
366 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
367 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
368 		    pcie_adv_regs->pcie_adv_rc_regs;
369 
370 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
371 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
372 		pcie_rc_regs->pcie_rc_err_status =
373 		    pci_config_get32(erpt_p->pe_hdl,
374 			(pcie_ecap_ptr + PCIE_AER_RE_STS));
375 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
376 			pcie_adv_regs->pcie_adv_vflags |=
377 			    PCIE_RC_ERR_STATUS_VALID;
378 		pcie_rc_regs->pcie_rc_ce_src_id =
379 		    pci_config_get16(erpt_p->pe_hdl,
380 			(pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
381 		pcie_rc_regs->pcie_rc_ue_src_id =
382 		    pci_config_get16(erpt_p->pe_hdl,
383 			(pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
384 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
385 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
386 	}
387 }
388 
389 /*ARGSUSED*/
390 static void
391 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p)
392 {
393 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
394 
395 	/*
396 	 * Start by reading all the error registers that are available for
397 	 * pci and pci express and for leaf devices and bridges/switches
398 	 */
399 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
400 	    PCI_CONF_STAT);
401 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
402 		return;
403 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
404 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
405 	    PCI_CONF_COMM);
406 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
407 		return;
408 
409 	/*
410 	 * If pci-pci bridge grab PCI bridge specific error registers.
411 	 */
412 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
413 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
414 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
415 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
416 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
417 			    PCI_BDG_SEC_STAT_VALID;
418 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
419 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
420 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
421 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
422 			    PCI_BDG_CTRL_VALID;
423 	}
424 
425 	/*
426 	 * If pci express device grab pci express error registers and
427 	 * check for advanced error reporting features and grab them if
428 	 * available.
429 	 */
430 	if (erpt_p->pe_dflags & PCIEX_DEV)
431 		pcie_regs_gather(erpt_p);
432 	else if (erpt_p->pe_dflags & PCIX_DEV)
433 		pcix_regs_gather(erpt_p, erpt_p->pe_regs);
434 
435 }
436 
437 static void
438 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
439 {
440 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
441 		pcix_bdg_error_regs_t *pcix_bdg_regs =
442 		    (pcix_bdg_error_regs_t *)pe_regs;
443 		uint8_t pcix_bdg_cap_ptr;
444 		int i;
445 
446 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
447 
448 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
449 			pci_config_put16(erpt_p->pe_hdl,
450 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
451 			    pcix_bdg_regs->pcix_bdg_sec_stat);
452 
453 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
454 			pci_config_put32(erpt_p->pe_hdl,
455 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
456 			    pcix_bdg_regs->pcix_bdg_stat);
457 
458 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
459 
460 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
461 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
462 			/*
463 			 * PCI Express to PCI-X bridges only implement the
464 			 * secondary side of the PCI-X ECC registers, bit one is
465 			 * read-only so we make sure we do not write to it.
466 			 */
467 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
468 				pcix_bdg_ecc_regs =
469 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
470 
471 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
472 				    PCIX_ERR_ECC_STS_VALID) {
473 
474 					pci_config_put32(erpt_p->pe_hdl,
475 					    (pcix_bdg_cap_ptr +
476 					    PCI_PCIX_BDG_ECC_STATUS),
477 					    pcix_bdg_ecc_regs->
478 					    pcix_ecc_ctlstat);
479 				}
480 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
481 			} else {
482 				for (i = 0; i < 2; i++) {
483 					pcix_bdg_ecc_regs =
484 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
485 
486 
487 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
488 					    PCIX_ERR_ECC_STS_VALID) {
489 						pci_config_put32(erpt_p->pe_hdl,
490 						    (pcix_bdg_cap_ptr +
491 						    PCI_PCIX_BDG_ECC_STATUS),
492 						    i);
493 
494 						pci_config_put32(erpt_p->pe_hdl,
495 						    (pcix_bdg_cap_ptr +
496 						    PCI_PCIX_BDG_ECC_STATUS),
497 						    pcix_bdg_ecc_regs->
498 						    pcix_ecc_ctlstat);
499 					}
500 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
501 					    0x0;
502 				}
503 			}
504 		}
505 	} else {
506 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
507 		uint8_t pcix_cap_ptr;
508 
509 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
510 
511 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
512 			pci_config_put32(erpt_p->pe_hdl,
513 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
514 			    pcix_regs->pcix_status);
515 
516 		pcix_regs->pcix_vflags = 0x0;
517 
518 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
519 			pcix_ecc_regs_t *pcix_ecc_regs =
520 			    pcix_regs->pcix_ecc_regs;
521 
522 			if (pcix_ecc_regs->pcix_ecc_vflags &
523 			    PCIX_ERR_ECC_STS_VALID)
524 				pci_config_put32(erpt_p->pe_hdl,
525 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
526 				    pcix_ecc_regs->pcix_ecc_ctlstat);
527 
528 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
529 		}
530 	}
531 }
532 
533 static void
534 pcie_regs_clear(pci_erpt_t *erpt_p)
535 {
536 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
537 	uint8_t pcie_cap_ptr;
538 	pcie_adv_error_regs_t *pcie_adv_regs;
539 	uint16_t pcie_ecap_ptr;
540 
541 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
542 
543 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
544 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
545 		    pcie_regs->pcie_err_status);
546 
547 	pcie_regs->pcie_vflags = 0x0;
548 
549 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
550 	    (erpt_p->pe_dflags & PCIX_DEV))
551 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
552 
553 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
554 		return;
555 
556 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
557 
558 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
559 
560 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
561 		pci_config_put32(erpt_p->pe_hdl,
562 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
563 		    pcie_adv_regs->pcie_ue_status);
564 
565 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
566 		pci_config_put32(erpt_p->pe_hdl,
567 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
568 		    pcie_adv_regs->pcie_ce_status);
569 
570 
571 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
572 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
573 		    pcie_adv_regs->pcie_adv_bdg_regs;
574 
575 
576 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
577 			pci_config_put32(erpt_p->pe_hdl,
578 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
579 			    pcie_bdg_regs->pcie_sue_status);
580 	}
581 	/*
582 	 * If PCI Express root complex then clear the root complex
583 	 * error registers.
584 	 */
585 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
586 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
587 		    pcie_adv_regs->pcie_adv_rc_regs;
588 
589 
590 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
591 			pci_config_put32(erpt_p->pe_hdl,
592 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
593 			    pcie_rc_regs->pcie_rc_err_status);
594 	}
595 	pcie_adv_regs->pcie_adv_vflags = 0x0;
596 }
597 
598 static void
599 pci_regs_clear(pci_erpt_t *erpt_p)
600 {
601 	/*
602 	 * Finally clear the error bits
603 	 */
604 	if (erpt_p->pe_dflags & PCIEX_DEV)
605 		pcie_regs_clear(erpt_p);
606 	else if (erpt_p->pe_dflags & PCIX_DEV)
607 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
608 
609 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
610 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
611 		    erpt_p->pe_pci_regs->pci_err_status);
612 
613 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
614 
615 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
616 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
617 		    PCI_BDG_SEC_STAT_VALID)
618 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
619 			    erpt_p->pe_pci_regs->pci_bdg_regs->
620 			    pci_bdg_sec_stat);
621 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
622 		    PCI_BDG_CTRL_VALID)
623 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
624 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
625 
626 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
627 	}
628 }
629 
630 /*
631  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
632  * generation.
633  */
634 /* ARGSUSED */
635 static void
636 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
637 {
638 	uint8_t pcix_cap_ptr;
639 	int i;
640 
641 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
642 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
643 
644 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
645 		erpt_p->pe_dflags |= PCIX_DEV;
646 	else
647 		return;
648 
649 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
650 		pcix_bdg_error_regs_t *pcix_bdg_regs;
651 
652 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
653 		    KM_SLEEP);
654 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
655 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
656 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
657 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
658 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
659 			for (i = 0; i < 2; i++) {
660 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
661 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
662 					KM_SLEEP);
663 			}
664 		}
665 	} else {
666 		pcix_error_regs_t *pcix_regs;
667 
668 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
669 		    KM_SLEEP);
670 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
671 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
672 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
673 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
674 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
675 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
676 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
677 		}
678 	}
679 }
680 
681 static void
682 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
683 {
684 	pcie_error_regs_t *pcie_regs;
685 	pcie_adv_error_regs_t *pcie_adv_regs;
686 	char buf[FM_MAX_CLASS];
687 	uint8_t pcix_cap_ptr;
688 	uint8_t pcie_cap_ptr;
689 	uint16_t pcie_ecap_ptr;
690 	uint16_t dev_type = 0;
691 	uint32_t mask = pcie_expected_ue_mask;
692 
693 	/*
694 	 * The following sparc specific code should be removed once the pci_cap
695 	 * interfaces create the necessary properties for us.
696 	 */
697 #if defined(__sparc)
698 	ushort_t status;
699 	uint32_t slot_cap;
700 	uint8_t cap_ptr = 0;
701 	uint8_t cap_id = 0;
702 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
703 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
704 	uint16_t aer_ptr = 0;
705 
706 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
707 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
708 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
709 		    0xff) {
710 			if (cap_id == PCI_CAP_ID_PCIX) {
711 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
712 				    "pcix-capid-pointer", cap_ptr);
713 			}
714 		if (cap_id == PCI_CAP_ID_PCI_E) {
715 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
716 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
717 				/* offset 14h is Slot Cap Register */
718 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
719 				    cap_ptr + PCIE_SLOTCAP);
720 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
721 				    "pcie-slotcap-reg", slot_cap);
722 			}
723 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
724 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
725 			    cap_ptr + PCIE_PCIECAP));
726 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
727 			    "pcie-capid-pointer", cap_ptr);
728 
729 		}
730 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
731 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
732 			    (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK))
733 				break;
734 		}
735 	}
736 
737 #endif
738 
739 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
740 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
741 
742 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
743 		erpt_p->pe_dflags |= PCIX_DEV;
744 
745 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
746 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
747 
748 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
749 		erpt_p->pe_dflags |= PCIEX_DEV;
750 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
751 		    KM_SLEEP);
752 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
753 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
754 	}
755 
756 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
757 		return;
758 
759 	/*
760 	 * Don't currently need to check for version here because we are
761 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
762 	 * software compatibility with future versions.  We will need to
763 	 * add errors for new detectors/features which are added in newer
764 	 * revisions [sec 7.8.2].
765 	 */
766 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
767 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
768 
769 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
770 
771 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
772 	    (erpt_p->pe_dflags & PCIX_DEV)) {
773 		int i;
774 
775 		pcie_regs->pcix_bdg_regs =
776 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
777 
778 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
779 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
780 		    pci_config_get16(erpt_p->pe_hdl,
781 			pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
782 
783 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
784 			for (i = 0; i < 2; i++)
785 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
786 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
787 					KM_SLEEP);
788 	}
789 
790 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
791 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
792 		pcie_regs->pcie_rc_regs = kmem_zalloc(
793 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
794 	}
795 	/*
796 	 * The following sparc specific code should be removed once the pci_cap
797 	 * interfaces create the necessary properties for us.
798 	 */
799 #if defined(__sparc)
800 
801 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
802 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
803 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
804 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
805 
806 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
807 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
808 		offset = P2ALIGN(hdr_next_ptr, 4);
809 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
810 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
811 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
812 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
813 		    PCIE_EXT_CAP_ID_MASK;
814 	}
815 
816 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
817 		aer_ptr = P2ALIGN(offset, 4);
818 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
819 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
820 		    "pcie-aer-pointer", aer_ptr);
821 #endif
822 
823 	/*
824 	 * Find and store if this device is capable of pci express
825 	 * advanced errors, if not report an error against the device.
826 	 */
827 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
828 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
829 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
830 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
831 		pcie_regs->pcie_adv_regs = kmem_zalloc(
832 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
833 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
834 	}
835 
836 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
837 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
838 		    PCIEX_ERROR_SUBCLASS, PCIEX_NADV);
839 		ddi_fm_ereport_post(dip, buf, NULL, DDI_NOSLEEP,
840 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
841 		return;
842 	}
843 
844 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
845 
846 	if (pcie_adv_regs == NULL)
847 		return;
848 	/*
849 	 * Initialize structures for advanced PCI Express devices.
850 	 */
851 
852 	/*
853 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
854 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
855 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
856 	 * 1.0 and will be left out of the current gathering of these registers.
857 	 */
858 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
859 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
860 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
861 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
862 	}
863 
864 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
865 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
866 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
867 
868 	/*
869 	 * Check that mask values are as expected, if not
870 	 * change them to what we desire.
871 	 */
872 	pci_regs_gather(dip, erpt_p);
873 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
874 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
875 		pci_config_put32(erpt_p->pe_hdl,
876 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
877 	}
878 
879 	/* Disable PTLP/ECRC (or mask these two) for Switches */
880 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
881 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN)
882 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
883 
884 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
885 		pci_config_put32(erpt_p->pe_hdl,
886 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
887 	}
888 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
889 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
890 		    != pcie_expected_sue_mask) {
891 			pci_config_put32(erpt_p->pe_hdl,
892 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
893 			    pcie_expected_sue_mask);
894 		}
895 	}
896 }
897 
898 /*
899  * pci_ereport_setup: Detect PCI device type and initialize structures to be
900  * used to generate ereports based on detected generic device errors.
901  */
902 void
903 pci_ereport_setup(dev_info_t *dip)
904 {
905 	struct dev_info *devi = DEVI(dip);
906 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
907 	pci_erpt_t *erpt_p;
908 	uint8_t pci_hdr_type;
909 	uint16_t pci_status;
910 	pci_regspec_t *pci_rp;
911 	int32_t len;
912 	uint32_t phys_hi;
913 
914 	/*
915 	 * If device is not ereport capbable then report an error against the
916 	 * driver for using this interface,
917 	 */
918 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
919 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
920 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
921 		return;
922 	}
923 
924 	/*
925 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
926 	 */
927 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
928 
929 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
930 
931 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
932 		goto error;
933 
934 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
935 
936 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
937 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
938 		goto error;
939 
940 	/*
941 	 * Get header type and record if device is a bridge.
942 	 */
943 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
944 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
945 		goto error;
946 
947 	/*
948 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
949 	 * error register structure.
950 	 */
951 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
952 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
953 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
954 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
955 	}
956 
957 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
958 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
959 		phys_hi = pci_rp->pci_phys_hi;
960 		kmem_free(pci_rp, len);
961 
962 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
963 		    PCI_REG_FUNC_SHIFT);
964 	}
965 
966 
967 	if (!(pci_status & PCI_STAT_CAP)) {
968 		goto done;
969 	}
970 
971 	/*
972 	 * Initialize structures for PCI Express and PCI-X devices.
973 	 * Order matters below and pcie_ereport_setup should preceed
974 	 * pcix_ereport_setup.
975 	 */
976 	pcie_ereport_setup(dip, erpt_p);
977 
978 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
979 		pcix_ereport_setup(dip, erpt_p);
980 	}
981 
982 done:
983 	pci_regs_gather(dip, erpt_p);
984 	pci_regs_clear(erpt_p);
985 
986 	/*
987 	 * Before returning set fh_bus_specific to completed pci_erpt_t
988 	 * structure
989 	 */
990 	fmhdl->fh_bus_specific = (void *)erpt_p;
991 
992 	return;
993 error:
994 	if (erpt_p->pe_pci_regs)
995 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
996 	kmem_free(erpt_p, sizeof (pci_erpt_t));
997 	erpt_p = NULL;
998 }
999 
1000 static void
1001 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1002 {
1003 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1004 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1005 		uint16_t pcix_ver;
1006 
1007 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1008 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1009 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1010 			int i;
1011 			for (i = 0; i < 2; i++)
1012 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1013 				    sizeof (pcix_ecc_regs_t));
1014 		}
1015 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1016 	} else {
1017 		pcix_error_regs_t *pcix_regs;
1018 		uint16_t pcix_ver;
1019 
1020 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1021 		pcix_ver = pcix_regs->pcix_ver;
1022 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1023 			kmem_free(pcix_regs->pcix_ecc_regs,
1024 			    sizeof (pcix_ecc_regs_t));
1025 		}
1026 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1027 	}
1028 }
1029 
1030 static void
1031 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1032 {
1033 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1034 
1035 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1036 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1037 
1038 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1039 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1040 			    sizeof (pcie_adv_bdg_error_regs_t));
1041 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1042 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1043 			    sizeof (pcie_adv_rc_error_regs_t));
1044 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1045 	}
1046 
1047 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1048 		kmem_free(pcie_regs->pcie_rc_regs,
1049 		    sizeof (pcie_rc_error_regs_t));
1050 
1051 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1052 		if (erpt_p->pe_dflags & PCIX_DEV) {
1053 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1054 			    pcix_bdg_ver;
1055 
1056 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1057 				int i;
1058 				for (i = 0; i < 2; i++)
1059 					kmem_free(pcie_regs->pcix_bdg_regs->
1060 					    pcix_bdg_ecc_regs[i],
1061 					    sizeof (pcix_ecc_regs_t));
1062 			}
1063 			kmem_free(pcie_regs->pcix_bdg_regs,
1064 			    sizeof (pcix_bdg_error_regs_t));
1065 		}
1066 	}
1067 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1068 }
1069 
1070 void
1071 pci_ereport_teardown(dev_info_t *dip)
1072 {
1073 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1074 	pci_erpt_t *erpt_p;
1075 
1076 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1077 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1078 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1079 	}
1080 
1081 	ASSERT(fmhdl);
1082 
1083 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1084 	if (erpt_p == NULL)
1085 		return;
1086 
1087 	if (erpt_p->pe_dflags & PCIEX_DEV)
1088 		pcie_ereport_teardown(erpt_p);
1089 	else if (erpt_p->pe_dflags & PCIX_DEV)
1090 		pcix_ereport_teardown(erpt_p);
1091 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1092 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1093 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1094 		    sizeof (pci_bdg_error_regs_t));
1095 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1096 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1097 	fmhdl->fh_bus_specific = NULL;
1098 	/*
1099 	 * The following sparc specific code should be removed once the pci_cap
1100 	 * interfaces create the necessary properties for us.
1101 	 */
1102 #if defined(__sparc)
1103 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1104 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1105 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1106 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1107 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1108 #endif
1109 }
1110 
1111 /*
1112  * Function used by PCI device and nexus error handlers to check if a
1113  * captured address resides in their DMA or ACC handle caches or the caches of
1114  * their children devices, respectively.
1115  */
1116 static int
1117 pci_dev_hdl_lookup(dev_info_t *dip, int type, ddi_fm_error_t *derr,
1118     void *addr)
1119 {
1120 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1121 	pci_erpt_t *erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1122 
1123 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1124 		return (ndi_fmc_error(dip, NULL, type, derr->fme_ena, addr));
1125 	else
1126 		return (ndi_fmc_entry_error(dip, type, derr, addr));
1127 }
1128 
1129 static void
1130 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1131     char *buf, int errtype)
1132 {
1133 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1134 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1135 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs;
1136 
1137 	switch (errtype) {
1138 	    case PCIEX_TYPE_CE:
1139 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1140 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1141 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1142 		    pcie_regs->pcie_err_status,
1143 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1144 		    pcie_adv_regs->pcie_ce_status, NULL);
1145 		break;
1146 	    case PCIEX_TYPE_UE:
1147 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1148 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1149 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1150 		    pcie_regs->pcie_err_status,
1151 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1152 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1153 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1154 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1155 		    pcie_adv_regs->pcie_adv_ctl,
1156 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1157 		    pcie_adv_regs->pcie_adv_bdf,
1158 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1159 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1160 		    1 : NULL,
1161 #ifdef DEBUG
1162 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1163 		    pcie_adv_regs->pcie_ue_hdr0,
1164 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1165 		    pcie_adv_regs->pcie_ue_hdr[0],
1166 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1167 		    pcie_adv_regs->pcie_ue_hdr[1],
1168 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1169 		    pcie_adv_regs->pcie_ue_hdr[2],
1170 #endif
1171 		    NULL);
1172 		break;
1173 	    case PCIEX_TYPE_GEN:
1174 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1175 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1176 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1177 		    pcie_regs->pcie_err_status, NULL);
1178 		break;
1179 	    case PCIEX_TYPE_RC_UE_MSG:
1180 	    case PCIEX_TYPE_RC_CE_MSG:
1181 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1182 
1183 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1184 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1185 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1186 		    pcie_adv_rc_regs->pcie_rc_err_status,
1187 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1188 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1189 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1190 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1191 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1192 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1193 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1194 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1195 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1196 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1197 		break;
1198 	    case PCIEX_TYPE_RC_MULT_MSG:
1199 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1200 
1201 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1202 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1203 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1204 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1205 		break;
1206 	    default:
1207 		break;
1208 	}
1209 }
1210 
1211 static void
1212 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1213 {
1214 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1215 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1216 	pcie_tlp_hdr_t *ue_hdr0;
1217 	uint32_t *ue_hdr;
1218 	uint64_t addr = NULL;
1219 
1220 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID)) {
1221 		derr->fme_status = DDI_FM_UNKNOWN;
1222 		return;
1223 	}
1224 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1225 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1226 
1227 	switch (ue_hdr0->type) {
1228 	    case PCIE_TLP_TYPE_MEM:
1229 	    case PCIE_TLP_TYPE_MEMLK:
1230 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1231 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1232 
1233 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1234 			    (uint32_t)mem64_tlp->addr0 << 2;
1235 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1236 		} else {
1237 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1238 
1239 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1240 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1241 		}
1242 
1243 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1244 		    (void *) &addr);
1245 		/*
1246 		 * If DMA handle is not found error could have been a memory
1247 		 * mapped IO address so check in the access cache
1248 		 */
1249 		if (derr->fme_status == DDI_FM_UNKNOWN)
1250 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1251 			    derr, (void *) &addr);
1252 		break;
1253 
1254 	    case PCIE_TLP_TYPE_IO:
1255 		{
1256 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1257 
1258 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1259 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1260 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1261 			    derr, (void *) &addr);
1262 			break;
1263 		}
1264 	    case PCIE_TLP_TYPE_CFG0:
1265 	    case PCIE_TLP_TYPE_CFG1:
1266 		{
1267 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1268 
1269 			pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid;
1270 			derr->fme_status = DDI_FM_UNKNOWN;
1271 			break;
1272 		}
1273 	    case PCIE_TLP_TYPE_MSG:
1274 		{
1275 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1276 
1277 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1278 			derr->fme_status = DDI_FM_UNKNOWN;
1279 			break;
1280 		}
1281 	    case PCIE_TLP_TYPE_CPL:
1282 	    case PCIE_TLP_TYPE_CPLLK:
1283 		{
1284 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1285 
1286 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1287 			derr->fme_status = DDI_FM_UNKNOWN;
1288 			break;
1289 		}
1290 	    case PCIE_TLP_TYPE_MSI:
1291 	    default:
1292 		derr->fme_status = DDI_FM_UNKNOWN;
1293 	}
1294 
1295 	/*
1296 	 * If no handle was found in the children caches and their is no
1297 	 * address infomation already stored and we have a captured address
1298 	 * then we need to store it away so that intermediate bridges can
1299 	 * check if the address exists in their handle caches.
1300 	 */
1301 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1302 	    derr->fme_bus_specific == NULL &&
1303 	    addr != NULL)
1304 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1305 }
1306 
1307 static void
1308 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1309 {
1310 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1311 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1312 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1313 	    pcie_adv_regs->pcie_adv_bdg_regs;
1314 	uint64_t addr = NULL;
1315 	pcix_attr_t *pcie_pci_sue_attr;
1316 	int cmd;
1317 	int dual_addr = 0;
1318 
1319 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID)) {
1320 		derr->fme_status = DDI_FM_UNKNOWN;
1321 		return;
1322 	}
1323 
1324 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1325 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1326 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1327 cmd_switch:
1328 	switch (cmd) {
1329 	    case PCI_PCIX_CMD_IORD:
1330 	    case PCI_PCIX_CMD_IOWR:
1331 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1332 
1333 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1334 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1335 		    pcie_bdg_regs->pcie_sue_hdr[1];
1336 
1337 		derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1338 		    derr, (void *) &addr);
1339 		break;
1340 	    case PCI_PCIX_CMD_MEMRD_DW:
1341 	    case PCI_PCIX_CMD_MEMWR:
1342 	    case PCI_PCIX_CMD_MEMRD_BL:
1343 	    case PCI_PCIX_CMD_MEMWR_BL:
1344 	    case PCI_PCIX_CMD_MEMRDBL:
1345 	    case PCI_PCIX_CMD_MEMWRBL:
1346 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1347 
1348 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1349 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1350 		    pcie_bdg_regs->pcie_sue_hdr[1];
1351 
1352 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE,
1353 		    derr, (void *) &addr);
1354 		if (derr->fme_status == DDI_FM_UNKNOWN)
1355 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1356 			    derr, (void *) &addr);
1357 		break;
1358 	    case PCI_PCIX_CMD_CFRD:
1359 	    case PCI_PCIX_CMD_CFWR:
1360 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1361 
1362 		derr->fme_status = DDI_FM_UNKNOWN;
1363 		break;
1364 	    case PCI_PCIX_CMD_DADR:
1365 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1366 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1367 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1368 		if (dual_addr)
1369 			break;
1370 		++dual_addr;
1371 		goto cmd_switch;
1372 	    default:
1373 		derr->fme_status = DDI_FM_UNKNOWN;
1374 	}
1375 
1376 	/*
1377 	 * If no handle was found in the children caches and their is no
1378 	 * address infomation already stored and we have a captured address
1379 	 * then we need to store it away so that intermediate bridges can
1380 	 * check if the address exists in their handle caches.
1381 	 */
1382 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1383 	    derr->fme_bus_specific == NULL &&
1384 	    addr != NULL)
1385 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1386 }
1387 
1388 static int
1389 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1390     pcix_ecc_regs_t *pcix_ecc_regs)
1391 {
1392 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1393 	uint64_t addr;
1394 
1395 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1396 	addr = addr << 32;
1397 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1398 
1399 	switch (cmd) {
1400 	    case PCI_PCIX_CMD_INTR:
1401 	    case PCI_PCIX_CMD_SPEC:
1402 		return (DDI_FM_FATAL);
1403 	    case PCI_PCIX_CMD_IORD:
1404 	    case PCI_PCIX_CMD_IOWR:
1405 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1406 		    (void *) &addr));
1407 	    case PCI_PCIX_CMD_DEVID:
1408 		return (DDI_FM_FATAL);
1409 	    case PCI_PCIX_CMD_MEMRD_DW:
1410 	    case PCI_PCIX_CMD_MEMWR:
1411 	    case PCI_PCIX_CMD_MEMRD_BL:
1412 	    case PCI_PCIX_CMD_MEMWR_BL:
1413 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1414 		    (void *) &addr));
1415 	    case PCI_PCIX_CMD_CFRD:
1416 	    case PCI_PCIX_CMD_CFWR:
1417 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1418 		    (void *) &addr));
1419 	    case PCI_PCIX_CMD_SPL:
1420 	    case PCI_PCIX_CMD_DADR:
1421 		return (DDI_FM_FATAL);
1422 	    case PCI_PCIX_CMD_MEMRDBL:
1423 	    case PCI_PCIX_CMD_MEMWRBL:
1424 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1425 		    (void *) &addr));
1426 	    default:
1427 		return (DDI_FM_FATAL);
1428 	}
1429 }
1430 
1431 /*ARGSUSED*/
1432 static int
1433 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1434 {
1435 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1436 	int fatal = 0;
1437 	int nonfatal = 0;
1438 	int unknown = 0;
1439 	int ok = 0;
1440 	int ret = DDI_FM_OK;
1441 	char buf[FM_MAX_CLASS];
1442 	int i;
1443 
1444 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1445 		goto done;
1446 
1447 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1448 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1449 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1450 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1451 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1452 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1453 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1454 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1455 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1456 		unknown++;
1457 	}
1458 
1459 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1460 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1461 			if (pci_bdg_regs->pci_bdg_sec_stat &
1462 			    pci_bdg_err_tbl[i].reg_bit) {
1463 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1464 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1465 				    pci_bdg_err_tbl[i].err_class);
1466 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1467 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1468 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1469 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1470 				    DATA_TYPE_UINT16,
1471 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1472 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
1473 				if (derr->fme_bus_specific &&
1474 				    pci_bdg_err_tbl[i].terr_class)
1475 					pci_target_enqueue(derr->fme_ena,
1476 					    pci_bdg_err_tbl[i].terr_class,
1477 					    PCI_ERROR_SUBCLASS,
1478 					    (uintptr_t)derr->fme_bus_specific);
1479 			}
1480 		}
1481 #if !defined(__sparc)
1482 		/*
1483 		 * For x86, many drivers and even user-level code currently get
1484 		 * away with accessing bad addresses, getting a UR and getting
1485 		 * -1 returned. Unfortunately, we have no control over this, so
1486 		 * we will have to treat all URs as nonfatal. Moreover, if the
1487 		 * leaf driver is non-hardened, then we don't actually see the
1488 		 * UR directly. All we see is a secondary bus master abort at
1489 		 * the root complex - so it's this condition that we actually
1490 		 * need to treat as nonfatal (providing no other unrelated nfe
1491 		 * conditions have also been seen by the root complex).
1492 		 */
1493 		if ((erpt_p->pe_dflags & PCIEX_RC_DEV) &&
1494 		    (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) &&
1495 		    !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) {
1496 			pcie_error_regs_t *pcie_regs =
1497 			    (pcie_error_regs_t *)erpt_p->pe_regs;
1498 			if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) &&
1499 			    !(pcie_regs->pcie_err_status &
1500 			    PCIE_DEVSTS_NFE_DETECTED))
1501 				nonfatal++;
1502 		}
1503 #endif
1504 	}
1505 
1506 done:
1507 
1508 	/*
1509 	 * Need to check for poke and cautious put. We already know peek
1510 	 * and cautious get errors occurred (as we got a trap) and we know
1511 	 * they are nonfatal.
1512 	 */
1513 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1514 		/*
1515 		 * for cautious puts we treat all errors as nonfatal. Actually
1516 		 * we set nonfatal for cautious gets as well - doesn't do any
1517 		 * harm
1518 		 */
1519 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1520 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1521 			nonfatal++;
1522 
1523 		/*
1524 		 * for cautious accesses we already have the acc_handle. Just
1525 		 * need to call children to clear their error bits
1526 		 */
1527 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1528 		PCI_FM_SEV_INC(ret);
1529 		return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1530 		    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1531 	}
1532 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1533 		/*
1534 		 * special case for pokes - we only consider master abort
1535 		 * and target abort as nonfatal. Sserr with no master abort is
1536 		 * fatal, but master/target abort can come in on separate
1537 		 * instance, so return unknown and parent will determine if
1538 		 * nonfatal (if another child returned nonfatal - ie master
1539 		 * or target abort) or fatal otherwise
1540 		 */
1541 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1542 		    PCI_STAT_R_MAST_AB))
1543 			nonfatal++;
1544 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1545 			unknown++;
1546 	}
1547 
1548 	/*
1549 	 * If errant address is passed in then attempt to find
1550 	 * ACC/DMA handle in caches.
1551 	 */
1552 	if (derr->fme_bus_specific) {
1553 		int i;
1554 
1555 		for (i = 0; i < 2; i++) {
1556 			ret = ndi_fmc_error(dip, NULL, i ? ACC_HANDLE :
1557 			    DMA_HANDLE, derr->fme_ena,
1558 			    (void *)&derr->fme_bus_specific);
1559 			PCI_FM_SEV_INC(ret);
1560 		}
1561 	}
1562 
1563 	/*
1564 	 * now check children below the bridge, only if errant handle was not
1565 	 * found
1566 	 */
1567 	if (!derr->fme_acc_handle && !derr->fme_dma_handle) {
1568 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1569 		PCI_FM_SEV_INC(ret);
1570 	}
1571 
1572 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1573 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1574 }
1575 
1576 static int
1577 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1578     void *pe_regs)
1579 {
1580 	pcix_error_regs_t *pcix_regs;
1581 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1582 	pcix_ecc_regs_t *pcix_ecc_regs;
1583 	int bridge;
1584 	int i;
1585 	int ecc_phase;
1586 	int ecc_corr;
1587 	int sec_ue;
1588 	int sec_ce;
1589 	int fatal = 0;
1590 	int nonfatal = 0;
1591 	int unknown = 0;
1592 	int ok = 0;
1593 	char buf[FM_MAX_CLASS];
1594 
1595 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1596 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1597 		bridge = 1;
1598 	} else {
1599 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1600 		bridge = 0;
1601 	}
1602 
1603 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1604 		int ret = DDI_FM_OK;
1605 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1606 		    pcix_regs->pcix_ecc_regs;
1607 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1608 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1609 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1610 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1611 			    PCI_PCIX_ECC_CORR);
1612 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1613 			    PCI_PCIX_ECC_S_UE);
1614 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1615 			    PCI_PCIX_ECC_S_CE);
1616 
1617 			switch (ecc_phase) {
1618 			    case PCI_PCIX_ECC_PHASE_NOERR:
1619 				break;
1620 			    case PCI_PCIX_ECC_PHASE_FADDR:
1621 			    case PCI_PCIX_ECC_PHASE_SADDR:
1622 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
1623 				    DDI_FM_FATAL);
1624 				(void) snprintf(buf, FM_MAX_CLASS,
1625 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1626 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1627 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1628 				    PCIX_ECC_UE_ADDR);
1629 				break;
1630 			    case PCI_PCIX_ECC_PHASE_ATTR:
1631 				PCI_FM_SEV_INC(ecc_corr ?
1632 				    DDI_FM_OK : DDI_FM_FATAL);
1633 				(void) snprintf(buf, FM_MAX_CLASS,
1634 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1635 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1636 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1637 				    PCIX_ECC_UE_ATTR);
1638 				break;
1639 			    case PCI_PCIX_ECC_PHASE_DATA32:
1640 			    case PCI_PCIX_ECC_PHASE_DATA64:
1641 				if (ecc_corr)
1642 					ret = DDI_FM_OK;
1643 				else
1644 					ret = pcix_check_addr(dip, derr,
1645 					    pcix_ecc_regs);
1646 				PCI_FM_SEV_INC(ret);
1647 
1648 				(void) snprintf(buf, FM_MAX_CLASS,
1649 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1650 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1651 				    ecc_corr ? PCIX_ECC_CE_DATA :
1652 				    PCIX_ECC_UE_DATA);
1653 				break;
1654 			}
1655 			if (ecc_phase)
1656 				if (bridge)
1657 					ddi_fm_ereport_post(dip, buf,
1658 					    derr->fme_ena,
1659 					    DDI_NOSLEEP, FM_VERSION,
1660 					    DATA_TYPE_UINT8, 0,
1661 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1662 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1663 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1664 					    pcix_bdg_regs->pcix_bdg_stat,
1665 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1666 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1667 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1668 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1669 				else
1670 					ddi_fm_ereport_post(dip, buf,
1671 					    derr->fme_ena,
1672 					    DDI_NOSLEEP, FM_VERSION,
1673 					    DATA_TYPE_UINT8, 0,
1674 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1675 					    pcix_regs->pcix_command,
1676 					    PCIX_STATUS, DATA_TYPE_UINT32,
1677 					    pcix_regs->pcix_status,
1678 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1679 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1680 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1681 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1682 			if (sec_ce || sec_ue) {
1683 				(void) snprintf(buf, FM_MAX_CLASS,
1684 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1685 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1686 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1687 				if (bridge)
1688 					ddi_fm_ereport_post(dip, buf,
1689 					    derr->fme_ena,
1690 					    DDI_NOSLEEP, FM_VERSION,
1691 					    DATA_TYPE_UINT8, 0,
1692 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1693 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1694 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1695 					    pcix_bdg_regs->pcix_bdg_stat,
1696 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1697 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1698 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1699 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1700 				else
1701 					ddi_fm_ereport_post(dip, buf,
1702 					    derr->fme_ena,
1703 					    DDI_NOSLEEP, FM_VERSION,
1704 					    DATA_TYPE_UINT8, 0,
1705 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1706 					    pcix_regs->pcix_command,
1707 					    PCIX_STATUS, DATA_TYPE_UINT32,
1708 					    pcix_regs->pcix_status,
1709 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1710 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1711 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1712 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1713 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1714 				    DDI_FM_OK);
1715 			}
1716 		}
1717 	}
1718 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1719 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1720 }
1721 
1722 static int
1723 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1724     void *pe_regs)
1725 {
1726 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1727 	int fatal = 0;
1728 	int nonfatal = 0;
1729 	int unknown = 0;
1730 	int ok = 0;
1731 	char buf[FM_MAX_CLASS];
1732 	int i;
1733 
1734 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1735 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1736 			if ((pcix_bdg_regs->pcix_bdg_stat &
1737 			    pcix_err_tbl[i].reg_bit)) {
1738 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1739 				    PCIX_ERROR_SUBCLASS,
1740 				    pcix_err_tbl[i].err_class);
1741 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1742 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1743 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1744 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1745 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1746 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1747 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1748 			}
1749 		}
1750 	}
1751 
1752 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1753 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1754 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1755 			    pcix_sec_err_tbl[i].reg_bit)) {
1756 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1757 				    PCIX_ERROR_SUBCLASS,
1758 				    PCIX_SEC_ERROR_SUBCLASS,
1759 				    pcix_sec_err_tbl[i].err_class);
1760 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1761 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1762 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1763 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1764 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1765 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1766 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1767 			}
1768 		}
1769 	}
1770 
1771 	/* Log/Handle ECC errors */
1772 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1773 		int ret;
1774 
1775 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1776 		    (void *)pcix_bdg_regs);
1777 		PCI_FM_SEV_INC(ret);
1778 	}
1779 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1780 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1781 }
1782 
1783 static int
1784 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1785 {
1786 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1787 	int fatal = 0;
1788 	int nonfatal = 0;
1789 	int unknown = 0;
1790 	int ok = 0;
1791 	char buf[FM_MAX_CLASS];
1792 	int i;
1793 
1794 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1795 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1796 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1797 				continue;
1798 
1799 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1800 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1801 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1802 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1803 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1804 			    pcix_regs->pcix_command, PCIX_STATUS,
1805 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1806 			    NULL);
1807 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1808 		}
1809 	}
1810 	/* Log/Handle ECC errors */
1811 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1812 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1813 		    (void *)pcix_regs);
1814 		PCI_FM_SEV_INC(ret);
1815 	}
1816 
1817 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1818 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1819 }
1820 
1821 static int
1822 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1823     void *pe_regs)
1824 {
1825 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1826 	int fatal = 0;
1827 	int nonfatal = 0;
1828 	int unknown = 0;
1829 	char buf[FM_MAX_CLASS];
1830 
1831 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1832 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1833 		    pcie_adv_regs->pcie_adv_rc_regs;
1834 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1835 
1836 		ce = pcie_rc_regs->pcie_rc_err_status &
1837 		    PCIE_AER_RE_STS_CE_RCVD;
1838 		ue = pcie_rc_regs->pcie_rc_err_status &
1839 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1840 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1841 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1842 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1843 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1844 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1845 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1846 		nfe = pcie_rc_regs->pcie_rc_err_status &
1847 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1848 		fe = pcie_rc_regs->pcie_rc_err_status &
1849 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1850 		/*
1851 		 * log fatal/nonfatal/corrected messages
1852 		 * recieved by root complex
1853 		 */
1854 		if (ue && fe)
1855 			fatal++;
1856 
1857 		if (fe && first_ue_fatal) {
1858 			(void) snprintf(buf, FM_MAX_CLASS,
1859 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1860 			pcie_ereport_post(dip, derr, erpt_p, buf,
1861 			    PCIEX_TYPE_RC_UE_MSG);
1862 		}
1863 		if (nfe && !first_ue_fatal) {
1864 			(void) snprintf(buf, FM_MAX_CLASS,
1865 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1866 			pcie_ereport_post(dip, derr, erpt_p, buf,
1867 			    PCIEX_TYPE_RC_UE_MSG);
1868 		}
1869 		if (ce) {
1870 			(void) snprintf(buf, FM_MAX_CLASS,
1871 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1872 			pcie_ereport_post(dip, derr, erpt_p, buf,
1873 			    PCIEX_TYPE_RC_CE_MSG);
1874 		}
1875 		if (mult_ce) {
1876 			(void) snprintf(buf, FM_MAX_CLASS,
1877 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1878 			pcie_ereport_post(dip, derr, erpt_p, buf,
1879 			    PCIEX_TYPE_RC_MULT_MSG);
1880 		}
1881 		if (mult_ue) {
1882 			(void) snprintf(buf, FM_MAX_CLASS,
1883 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1884 			pcie_ereport_post(dip, derr, erpt_p, buf,
1885 			    PCIEX_TYPE_RC_MULT_MSG);
1886 		}
1887 	}
1888 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1889 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1890 }
1891 
1892 static int
1893 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1894 {
1895 	int fatal = 0;
1896 	int nonfatal = 0;
1897 	int unknown = 0;
1898 	int ok = 0;
1899 	char buf[FM_MAX_CLASS];
1900 	int i;
1901 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1902 	pcie_adv_error_regs_t *pcie_adv_regs;
1903 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1904 
1905 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1906 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1907 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1908 		    (void *)pcie_regs->pcix_bdg_regs);
1909 		PCI_FM_SEV_INC(ret);
1910 	}
1911 
1912 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1913 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1914 			goto done;
1915 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1916 			if (!(pcie_regs->pcie_err_status &
1917 			    pciex_nadv_err_tbl[i].reg_bit))
1918 				continue;
1919 
1920 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1921 			    PCIEX_ERROR_SUBCLASS,
1922 			    pciex_nadv_err_tbl[i].err_class);
1923 			pcie_ereport_post(dip, derr, erpt_p, buf,
1924 			    PCIEX_TYPE_GEN);
1925 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1926 		}
1927 		goto done;
1928 	}
1929 
1930 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1931 
1932 	/*
1933 	 * Log PCI Express uncorrectable errors
1934 	 */
1935 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
1936 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
1937 			if (!(pcie_adv_regs->pcie_ue_status &
1938 			    pciex_ue_err_tbl[i].reg_bit))
1939 				continue;
1940 
1941 			(void) snprintf(buf, FM_MAX_CLASS,
1942 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1943 			    pciex_ue_err_tbl[i].err_class);
1944 
1945 			pcie_adv_regs->pcie_adv_bdf = 0;
1946 			if ((pcie_adv_regs->pcie_ue_status &
1947 			    PCIE_AER_UCE_LOG_BITS) !=
1948 			    pciex_ue_err_tbl[i].reg_bit) {
1949 				PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
1950 				pcie_ereport_post(dip, derr, erpt_p, buf,
1951 				    PCIEX_TYPE_UE);
1952 			} else {
1953 				pcie_check_addr(dip, derr, erpt_p);
1954 				/*
1955 				 * fatal/ok errors are fatal/ok
1956 				 * regardless of if we find a handle
1957 				 */
1958 				if (pciex_ue_err_tbl[i].flags == DDI_FM_FATAL)
1959 					derr->fme_status = DDI_FM_FATAL;
1960 				else if (pciex_ue_err_tbl[i].flags == DDI_FM_OK)
1961 					derr->fme_status = DDI_FM_OK;
1962 				pcie_ereport_post(dip, derr, erpt_p, buf,
1963 				    PCIEX_TYPE_UE);
1964 				PCI_FM_SEV_INC(derr->fme_status);
1965 			}
1966 		}
1967 	}
1968 
1969 	/*
1970 	 * Log PCI Express correctable errors
1971 	 */
1972 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
1973 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
1974 			if (!(pcie_adv_regs->pcie_ce_status &
1975 			    pciex_ce_err_tbl[i].reg_bit))
1976 				continue;
1977 
1978 			(void) snprintf(buf, FM_MAX_CLASS,
1979 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1980 			    pciex_ce_err_tbl[i].err_class);
1981 			pcie_ereport_post(dip, derr, erpt_p, buf,
1982 			    PCIEX_TYPE_CE);
1983 		}
1984 	}
1985 
1986 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
1987 		goto done;
1988 
1989 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
1990 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
1991 		    (void *)pcie_adv_regs);
1992 		PCI_FM_SEV_INC(ret);
1993 	}
1994 
1995 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
1996 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
1997 		goto done;
1998 
1999 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
2000 
2001 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2002 		if ((pcie_bdg_regs->pcie_sue_status &
2003 		    pcie_sue_err_tbl[i].reg_bit)) {
2004 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2005 			    PCIEX_ERROR_SUBCLASS,
2006 			    pcie_sue_err_tbl[i].err_class);
2007 
2008 			if ((pcie_bdg_regs->pcie_sue_status &
2009 			    PCIE_AER_SUCE_LOG_BITS) !=
2010 			    pcie_sue_err_tbl[i].reg_bit) {
2011 				PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2012 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2013 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2014 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2015 				    pcie_bdg_regs->pcie_sue_status,
2016 #ifdef DEBUG
2017 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2018 				    pcie_bdg_regs->pcie_sue_hdr0,
2019 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2020 				    pcie_bdg_regs->pcie_sue_hdr[0],
2021 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2022 				    pcie_bdg_regs->pcie_sue_hdr[1],
2023 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2024 				    pcie_bdg_regs->pcie_sue_hdr[2],
2025 #endif
2026 				    NULL);
2027 			} else {
2028 				pcie_adv_regs->pcie_adv_bdf = 0;
2029 				pcie_pci_check_addr(dip, derr, erpt_p);
2030 				/*
2031 				 * fatal/nonfatal errors are fatal/nonfatal
2032 				 * regardless of if we find a handle
2033 				 */
2034 				if (pcie_sue_err_tbl[i].flags == DDI_FM_FATAL)
2035 					derr->fme_status = DDI_FM_FATAL;
2036 				else if (pcie_sue_err_tbl[i].flags ==
2037 				    DDI_FM_NONFATAL)
2038 					derr->fme_status = DDI_FM_NONFATAL;
2039 
2040 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2041 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2042 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2043 				    pcie_bdg_regs->pcie_sue_status,
2044 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2045 				    pcie_adv_regs->pcie_adv_bdf,
2046 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2047 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2048 				    1 : NULL,
2049 #ifdef DEBUG
2050 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2051 				    pcie_bdg_regs->pcie_sue_hdr0,
2052 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2053 				    pcie_bdg_regs->pcie_sue_hdr[0],
2054 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2055 				    pcie_bdg_regs->pcie_sue_hdr[1],
2056 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2057 				    pcie_bdg_regs->pcie_sue_hdr[2],
2058 #endif
2059 				    NULL);
2060 				PCI_FM_SEV_INC(derr->fme_status);
2061 			}
2062 		}
2063 	}
2064 done:
2065 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2066 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2067 }
2068 
2069 static void
2070 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2071 {
2072 	int fatal = 0;
2073 	int nonfatal = 0;
2074 	int unknown = 0;
2075 	int ok = 0;
2076 	char buf[FM_MAX_CLASS];
2077 	int i;
2078 
2079 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2080 		/*
2081 		 * Log generic PCI errors.
2082 		 */
2083 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2084 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2085 			    pci_err_tbl[i].reg_bit) ||
2086 			    !(erpt_p->pe_pci_regs->pci_vflags &
2087 			    PCI_ERR_STATUS_VALID))
2088 				continue;
2089 			/*
2090 			 * Generate an ereport for this error bit.
2091 			 */
2092 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2093 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2094 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2095 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2096 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2097 			    erpt_p->pe_pci_regs->pci_err_status,
2098 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2099 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2100 
2101 			/*
2102 			 * The meaning of SERR is different for PCIEX (just
2103 			 * implies a message has been sent) so we don't want to
2104 			 * treat that one as fatal.
2105 			 */
2106 			if ((erpt_p->pe_dflags & PCIEX_DEV) &&
2107 			    pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) {
2108 				unknown++;
2109 			} else {
2110 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2111 			}
2112 		}
2113 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2114 			int ret = pcie_error_report(dip, derr, erpt_p);
2115 			PCI_FM_SEV_INC(ret);
2116 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2117 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2118 				int ret = pcix_bdg_error_report(dip, derr,
2119 				    erpt_p, erpt_p->pe_regs);
2120 				PCI_FM_SEV_INC(ret);
2121 			} else {
2122 				int ret = pcix_error_report(dip, derr, erpt_p);
2123 				PCI_FM_SEV_INC(ret);
2124 			}
2125 		}
2126 	}
2127 
2128 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2129 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2130 		PCI_FM_SEV_INC(ret);
2131 	}
2132 
2133 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2134 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2135 }
2136 
2137 void
2138 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2139 {
2140 	struct i_ddi_fmhdl *fmhdl;
2141 	pci_erpt_t *erpt_p;
2142 
2143 	fmhdl = DEVI(dip)->devi_fmhdl;
2144 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2145 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2146 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2147 		return;
2148 	}
2149 
2150 	ASSERT(fmhdl);
2151 
2152 	if (derr->fme_ena == NULL)
2153 		derr->fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2154 
2155 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2156 	if (erpt_p == NULL)
2157 		return;
2158 
2159 	pci_regs_gather(dip, erpt_p);
2160 	pci_error_report(dip, derr, erpt_p);
2161 	pci_regs_clear(erpt_p);
2162 
2163 	if (xx_status != NULL)
2164 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2165 }
2166 
2167 /*
2168  * private version of walk_devs() that can be used during panic. No
2169  * sleeping or locking required.
2170  */
2171 static int
2172 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2173 {
2174 	while (dip) {
2175 		switch ((*f)(dip, arg)) {
2176 		case DDI_WALK_TERMINATE:
2177 			return (DDI_WALK_TERMINATE);
2178 		case DDI_WALK_CONTINUE:
2179 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2180 			    arg) == DDI_WALK_TERMINATE)
2181 				return (DDI_WALK_TERMINATE);
2182 			break;
2183 		case DDI_WALK_PRUNECHILD:
2184 			break;
2185 		}
2186 		dip = ddi_get_next_sibling(dip);
2187 	}
2188 	return (DDI_WALK_CONTINUE);
2189 }
2190 
2191 /*
2192  * need special version of ddi_fm_ereport_post() as the leaf driver may
2193  * not be hardened.
2194  */
2195 static void
2196 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2197     uint8_t version, ...)
2198 {
2199 	char *name;
2200 	char device_path[MAXPATHLEN];
2201 	char ddi_error_class[FM_MAX_CLASS];
2202 	nvlist_t *ereport, *detector;
2203 	nv_alloc_t *nva;
2204 	errorq_elem_t *eqep;
2205 	va_list ap;
2206 
2207 	if (panicstr) {
2208 		eqep = errorq_reserve(ereport_errorq);
2209 		if (eqep == NULL)
2210 			return;
2211 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2212 		nva = errorq_elem_nva(ereport_errorq, eqep);
2213 		detector = fm_nvlist_create(nva);
2214 	} else {
2215 		ereport = fm_nvlist_create(NULL);
2216 		detector = fm_nvlist_create(NULL);
2217 	}
2218 
2219 	(void) ddi_pathname(dip, device_path);
2220 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2221 	    device_path, NULL);
2222 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2223 	    DDI_IO_CLASS, error_class);
2224 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2225 
2226 	va_start(ap, version);
2227 	name = va_arg(ap, char *);
2228 	(void) i_fm_payload_set(ereport, name, ap);
2229 	va_end(ap);
2230 
2231 	if (panicstr) {
2232 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2233 	} else {
2234 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2235 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2236 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2237 	}
2238 }
2239 
2240 static int
2241 pci_check_regs(dev_info_t *dip, void *arg)
2242 {
2243 	int reglen;
2244 	int rn;
2245 	int totreg;
2246 	pci_regspec_t *drv_regp;
2247 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2248 
2249 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2250 		/*
2251 		 * for config space, we need to check if the given address
2252 		 * is a valid config space address for this device - based
2253 		 * on pci_phys_hi of the config space entry in reg property.
2254 		 */
2255 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2256 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2257 			return (DDI_WALK_CONTINUE);
2258 
2259 		totreg = reglen / sizeof (pci_regspec_t);
2260 		for (rn = 0; rn < totreg; rn++) {
2261 			if (tgt_err->tgt_pci_space ==
2262 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2263 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2264 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2265 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2266 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2267 				tgt_err->tgt_dip = dip;
2268 				kmem_free(drv_regp, reglen);
2269 				return (DDI_WALK_TERMINATE);
2270 			}
2271 		}
2272 		kmem_free(drv_regp, reglen);
2273 	} else {
2274 		/*
2275 		 * for non config space, need to check reg to look
2276 		 * for any non-relocable mapping, otherwise check
2277 		 * assigned-addresses.
2278 		 */
2279 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2280 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2281 			return (DDI_WALK_CONTINUE);
2282 
2283 		totreg = reglen / sizeof (pci_regspec_t);
2284 		for (rn = 0; rn < totreg; rn++) {
2285 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2286 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2287 			    tgt_err->tgt_pci_space ==
2288 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2289 			    (tgt_err->tgt_pci_addr >=
2290 			    (uint64_t)drv_regp[rn].pci_phys_low +
2291 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2292 			    (tgt_err->tgt_pci_addr <
2293 			    (uint64_t)drv_regp[rn].pci_phys_low +
2294 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2295 			    (uint64_t)drv_regp[rn].pci_size_low +
2296 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2297 				tgt_err->tgt_dip = dip;
2298 				kmem_free(drv_regp, reglen);
2299 				return (DDI_WALK_TERMINATE);
2300 			}
2301 		}
2302 		kmem_free(drv_regp, reglen);
2303 
2304 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2305 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2306 		    DDI_SUCCESS)
2307 			return (DDI_WALK_CONTINUE);
2308 
2309 		totreg = reglen / sizeof (pci_regspec_t);
2310 		for (rn = 0; rn < totreg; rn++) {
2311 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2312 			    tgt_err->tgt_pci_space ==
2313 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2314 			    (tgt_err->tgt_pci_addr >=
2315 			    (uint64_t)drv_regp[rn].pci_phys_low +
2316 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2317 			    (tgt_err->tgt_pci_addr <
2318 			    (uint64_t)drv_regp[rn].pci_phys_low +
2319 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2320 			    (uint64_t)drv_regp[rn].pci_size_low +
2321 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2322 				tgt_err->tgt_dip = dip;
2323 				kmem_free(drv_regp, reglen);
2324 				return (DDI_WALK_TERMINATE);
2325 			}
2326 		}
2327 		kmem_free(drv_regp, reglen);
2328 	}
2329 	return (DDI_WALK_CONTINUE);
2330 }
2331 
2332 /*
2333  * impl_fix_ranges - fixes the config space entry of the "ranges"
2334  * property on psycho+ platforms.  (if changing this function please make sure
2335  * to change the pci_fix_ranges function in pcipsy.c)
2336  */
2337 /*ARGSUSED*/
2338 static void
2339 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2340 {
2341 #if defined(__sparc)
2342 	char *name = ddi_binding_name(dip);
2343 
2344 	if ((strcmp(name, "pci108e,8000") == 0) ||
2345 	    (strcmp(name, "pci108e,a000") == 0) ||
2346 	    (strcmp(name, "pci108e,a001") == 0)) {
2347 		int i;
2348 		for (i = 0; i < nrange; i++, pci_ranges++)
2349 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2350 			    PCI_ADDR_CONFIG)
2351 				pci_ranges->parent_low |=
2352 				    pci_ranges->child_high;
2353 	}
2354 #endif
2355 }
2356 
2357 static int
2358 pci_check_ranges(dev_info_t *dip, void *arg)
2359 {
2360 	uint64_t range_parent_begin;
2361 	uint64_t range_parent_size;
2362 	uint64_t range_parent_end;
2363 	uint32_t space_type;
2364 	uint32_t bus_num;
2365 	uint32_t range_offset;
2366 	pci_ranges_t *pci_ranges, *rangep;
2367 	pci_bus_range_t *pci_bus_rangep;
2368 	int pci_ranges_length;
2369 	int nrange;
2370 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2371 	int i, size;
2372 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2373 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2374 		return (DDI_WALK_CONTINUE);
2375 
2376 	/*
2377 	 * Get the ranges property. Note we only look at the top level pci
2378 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2379 	 * not at pci-pci bridges.
2380 	 */
2381 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2382 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2383 		/*
2384 		 * no ranges property - no translation needed
2385 		 */
2386 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2387 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2388 		if (panicstr)
2389 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2390 			    pci_check_regs, (void *)tgt_err);
2391 		else {
2392 			int circ = 0;
2393 			ndi_devi_enter(dip, &circ);
2394 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2395 			    (void *)tgt_err);
2396 			ndi_devi_exit(dip, circ);
2397 		}
2398 		if (tgt_err->tgt_dip != NULL)
2399 			return (DDI_WALK_TERMINATE);
2400 		return (DDI_WALK_PRUNECHILD);
2401 	}
2402 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2403 	rangep = pci_ranges;
2404 
2405 	/* Need to fix the pci ranges property for psycho based systems */
2406 	pci_fix_ranges(dip, pci_ranges, nrange);
2407 
2408 	for (i = 0; i < nrange; i++, rangep++) {
2409 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2410 		    rangep->parent_low;
2411 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2412 		    rangep->size_low;
2413 		range_parent_end = range_parent_begin + range_parent_size - 1;
2414 
2415 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2416 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2417 			/* Not in range */
2418 			continue;
2419 		}
2420 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2421 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2422 			/* Config space address - check bus range */
2423 			range_offset = tgt_err->tgt_err_addr -
2424 			    range_parent_begin;
2425 			bus_num = PCI_REG_BUS_G(range_offset);
2426 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2427 			    DDI_PROP_DONTPASS, "bus-range",
2428 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2429 				continue;
2430 			}
2431 			if ((bus_num < pci_bus_rangep->lo) ||
2432 			    (bus_num > pci_bus_rangep->hi)) {
2433 				/*
2434 				 * Bus number not appropriate for this
2435 				 * pci nexus.
2436 				 */
2437 				kmem_free(pci_bus_rangep, size);
2438 				continue;
2439 			}
2440 			kmem_free(pci_bus_rangep, size);
2441 		}
2442 
2443 		/* We have a match if we get here - compute pci address */
2444 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2445 		    range_parent_begin;
2446 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2447 		    rangep->child_low);
2448 		tgt_err->tgt_pci_space = space_type;
2449 		if (panicstr)
2450 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2451 			    pci_check_regs, (void *)tgt_err);
2452 		else {
2453 			int circ = 0;
2454 			ndi_devi_enter(dip, &circ);
2455 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2456 			    (void *)tgt_err);
2457 			ndi_devi_exit(dip, circ);
2458 		}
2459 		if (tgt_err->tgt_dip != NULL) {
2460 			kmem_free(pci_ranges, pci_ranges_length);
2461 			return (DDI_WALK_TERMINATE);
2462 		}
2463 	}
2464 	kmem_free(pci_ranges, pci_ranges_length);
2465 	return (DDI_WALK_PRUNECHILD);
2466 }
2467 
2468 /*
2469  * Function used to drain pci_target_queue, either during panic or after softint
2470  * is generated, to generate target device ereports based on captured physical
2471  * addresses
2472  */
2473 /*ARGSUSED*/
2474 static void
2475 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2476 {
2477 	char buf[FM_MAX_CLASS];
2478 
2479 	/*
2480 	 * The following assumes that all pci_pci bridge devices
2481 	 * are configured as transparant. Find the top-level pci
2482 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2483 	 * to a pci address in the process. Then starting at this node do
2484 	 * another tree walk to find a device with the pci address we've
2485 	 * found within range of one of it's assigned-addresses properties.
2486 	 */
2487 	tgt_err->tgt_dip = NULL;
2488 	if (panicstr)
2489 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2490 		    (void *)tgt_err);
2491 	else
2492 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2493 		    (void *)tgt_err);
2494 	if (tgt_err->tgt_dip == NULL)
2495 		return;
2496 
2497 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2498 	    tgt_err->tgt_err_class);
2499 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2500 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2501 }
2502 
2503 void
2504 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2505 {
2506 	pci_target_err_t tgt_err;
2507 
2508 	tgt_err.tgt_err_ena = ena;
2509 	tgt_err.tgt_err_class = class;
2510 	tgt_err.tgt_bridge_type = bridge_type;
2511 	tgt_err.tgt_err_addr = addr;
2512 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2513 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2514 }
2515 
2516 void
2517 pci_targetq_init(void)
2518 {
2519 	/*
2520 	 * PCI target errorq, to schedule async handling of generation of
2521 	 * target device ereports based on captured physical address.
2522 	 * The errorq is created here but destroyed when _fini is called
2523 	 * for the pci module.
2524 	 */
2525 	if (pci_target_queue == NULL) {
2526 		pci_target_queue = errorq_create("pci_target_queue",
2527 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2528 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2529 		    ERRORQ_VITAL);
2530 		if (pci_target_queue == NULL)
2531 			panic("failed to create required system error queue");
2532 	}
2533 }
2534