1 /* Copyright 2013-2014 IBM Corp.
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * 	http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12  * implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <skiboot.h>
18 #include <p7ioc.h>
19 #include <p7ioc-regs.h>
20 #include <io.h>
21 #include <timebase.h>
22 #include <affinity.h>
23 #include <pci-cfg.h>
24 #include <pci.h>
25 #include <pci-slot.h>
26 #include <interrupts.h>
27 #include <opal.h>
28 #include <ccan/str/str.h>
29 
30 #define PHBDBG(p, fmt, a...)	prlog(PR_DEBUG, "PHB#%04x: " fmt, \
31 				      (p)->phb.opal_id, ## a)
32 #define PHBERR(p, fmt, a...)	prlog(PR_ERR, "PHB#%04x: " fmt, \
33 				      (p)->phb.opal_id, ## a)
34 
35 /* Helper to select an IODA table entry */
p7ioc_phb_ioda_sel(struct p7ioc_phb * p,uint32_t table,uint32_t addr,bool autoinc)36 static inline void p7ioc_phb_ioda_sel(struct p7ioc_phb *p, uint32_t table,
37 				      uint32_t addr, bool autoinc)
38 {
39 	out_be64(p->regs + PHB_IODA_ADDR,
40 		 (autoinc ? PHB_IODA_AD_AUTOINC : 0)	|
41 		 SETFIELD(PHB_IODA_AD_TSEL, 0ul, table)	|
42 		 SETFIELD(PHB_IODA_AD_TADR, 0ul, addr));
43 }
44 
p7ioc_phb_fenced(struct p7ioc_phb * p)45 static bool p7ioc_phb_fenced(struct p7ioc_phb *p)
46 {
47 	struct p7ioc *ioc = p->ioc;
48 	uint64_t fence, fbits;
49 
50 	fbits = 0x0003000000000000UL >> (p->index * 4);
51 	fence = in_be64(ioc->regs + P7IOC_CHIP_FENCE_SHADOW);
52 
53 	return (fence & fbits) != 0;
54 }
55 
56 /*
57  * Configuration space access
58  *
59  * The PHB lock is assumed to be already held
60  */
p7ioc_pcicfg_check(struct p7ioc_phb * p,uint32_t bdfn,uint32_t offset,uint32_t size)61 static int64_t p7ioc_pcicfg_check(struct p7ioc_phb *p, uint32_t bdfn,
62 				  uint32_t offset, uint32_t size)
63 {
64 	uint32_t sm = size - 1;
65 
66 	if (offset > 0xfff || bdfn > 0xffff)
67 		return OPAL_PARAMETER;
68 	if (offset & sm)
69 		return OPAL_PARAMETER;
70 
71 	/* The root bus only has a device at 0 and we get into an
72 	 * error state if we try to probe beyond that, so let's
73 	 * avoid that and just return an error to Linux
74 	 */
75 	if ((bdfn >> 8) == 0 && (bdfn & 0xff))
76 		return OPAL_HARDWARE;
77 
78 	/* Check PHB state */
79 	if (p->state == P7IOC_PHB_STATE_BROKEN)
80 		return OPAL_HARDWARE;
81 
82 	return OPAL_SUCCESS;
83 }
84 
85 #define P7IOC_PCI_CFG_READ(size, type)	\
86 static int64_t p7ioc_pcicfg_read##size(struct phb *phb, uint32_t bdfn,	\
87 				       uint32_t offset, type *data)	\
88 {									\
89 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);			\
90 	uint64_t addr;							\
91 	void *base = p->regs;						\
92 	int64_t rc;							\
93 									\
94 	/* Initialize data in case of error */				\
95 	*data = (type)0xffffffff;					\
96 									\
97 	rc = p7ioc_pcicfg_check(p, bdfn, offset, sizeof(type));		\
98 	if (rc)								\
99 		return rc;						\
100 									\
101 	if (p7ioc_phb_fenced(p)) {					\
102 		if (!(p->flags & P7IOC_PHB_CFG_USE_ASB))		\
103 			return OPAL_HARDWARE;				\
104 									\
105 		base = p->regs_asb;					\
106 	} else if ((p->flags & P7IOC_PHB_CFG_BLOCKED) && bdfn != 0) {	\
107 		return OPAL_HARDWARE;					\
108 	}								\
109 									\
110 	addr = PHB_CA_ENABLE;						\
111 	addr = SETFIELD(PHB_CA_BDFN, addr, bdfn);			\
112 	addr = SETFIELD(PHB_CA_REG, addr, offset);			\
113 	out_be64(base + PHB_CONFIG_ADDRESS, addr);			\
114 	*data = in_le##size(base + PHB_CONFIG_DATA +			\
115 		     (offset & (4 - sizeof(type))));			\
116 									\
117 	return OPAL_SUCCESS;						\
118 }
119 
120 #define P7IOC_PCI_CFG_WRITE(size, type)	\
121 static int64_t p7ioc_pcicfg_write##size(struct phb *phb, uint32_t bdfn,	\
122 					uint32_t offset, type data)	\
123 {									\
124 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);			\
125 	void *base = p->regs;						\
126 	uint64_t addr;							\
127 	int64_t rc;							\
128 									\
129 	rc = p7ioc_pcicfg_check(p, bdfn, offset, sizeof(type));		\
130 	if (rc)								\
131 		return rc;						\
132 									\
133 	if (p7ioc_phb_fenced(p)) {					\
134 		if (!(p->flags & P7IOC_PHB_CFG_USE_ASB))		\
135 			return OPAL_HARDWARE;				\
136 									\
137 		base = p->regs_asb;					\
138 	} else if ((p->flags & P7IOC_PHB_CFG_BLOCKED) && bdfn != 0) {	\
139 		return OPAL_HARDWARE;					\
140 	}								\
141 									\
142 	addr = PHB_CA_ENABLE;						\
143 	addr = SETFIELD(PHB_CA_BDFN, addr, bdfn);			\
144 	addr = SETFIELD(PHB_CA_REG, addr, offset);			\
145 	out_be64(base + PHB_CONFIG_ADDRESS, addr);			\
146 	out_le##size(base + PHB_CONFIG_DATA +				\
147 		     (offset & (4 - sizeof(type))), data);		\
148 									\
149 	return OPAL_SUCCESS;						\
150 }
151 
152 P7IOC_PCI_CFG_READ(8, uint8_t)
153 P7IOC_PCI_CFG_READ(16, uint16_t)
154 P7IOC_PCI_CFG_READ(32, uint32_t)
155 P7IOC_PCI_CFG_WRITE(8, uint8_t)
156 P7IOC_PCI_CFG_WRITE(16, uint16_t)
157 P7IOC_PCI_CFG_WRITE(32, uint32_t)
158 
p7ioc_eeh_read_phb_status(struct p7ioc_phb * p,struct OpalIoP7IOCPhbErrorData * stat)159 static void p7ioc_eeh_read_phb_status(struct p7ioc_phb *p,
160 				      struct OpalIoP7IOCPhbErrorData *stat)
161 {
162 	uint16_t tmp16;
163 	unsigned int i;
164 
165 	memset(stat, 0, sizeof(struct OpalIoP7IOCPhbErrorData));
166 
167 
168 	/* Error data common part */
169 	stat->common.version = OPAL_PHB_ERROR_DATA_VERSION_1;
170 	stat->common.ioType  = OPAL_PHB_ERROR_DATA_TYPE_P7IOC;
171 	stat->common.len     = sizeof(struct OpalIoP7IOCPhbErrorData);
172 
173 	/*
174 	 * We read some registers using config space through AIB.
175 	 *
176 	 * Get to other registers using ASB when possible to get to them
177 	 * through a fence if one is present.
178 	 *
179 	 * Note that the OpalIoP7IOCPhbErrorData has oddities, such as the
180 	 * bridge control being 32-bit and the UTL registers being 32-bit
181 	 * (which they really are, but they use the top 32-bit of a 64-bit
182 	 * register so we need to be a bit careful).
183 	 */
184 
185 	/* Use ASB to access PCICFG if the PHB has been fenced */
186 	p->flags |= P7IOC_PHB_CFG_USE_ASB;
187 
188 	/* Grab RC bridge control, make it 32-bit */
189 	p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &tmp16);
190 	stat->brdgCtl = tmp16;
191 
192 	/* Grab UTL status registers */
193 	stat->portStatusReg = hi32(in_be64(p->regs_asb
194 					   + UTL_PCIE_PORT_STATUS));
195 	stat->rootCmplxStatus = hi32(in_be64(p->regs_asb
196 					   + UTL_RC_STATUS));
197 	stat->busAgentStatus = hi32(in_be64(p->regs_asb
198 					   + UTL_SYS_BUS_AGENT_STATUS));
199 
200 	/*
201 	 * Grab various RC PCIe capability registers. All device, slot
202 	 * and link status are 16-bit, so we grab the pair control+status
203 	 * for each of them
204 	 */
205 	p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_DEVCTL,
206 			    &stat->deviceStatus);
207 	p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTCTL,
208 			    &stat->slotStatus);
209 	p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_LCTL,
210 			    &stat->linkStatus);
211 
212 	/*
213 	 * I assume those are the standard config space header, cmd & status
214 	 * together makes 32-bit. Secondary status is 16-bit so I'll clear
215 	 * the top on that one
216 	 */
217 	p7ioc_pcicfg_read32(&p->phb, 0, PCI_CFG_CMD, &stat->devCmdStatus);
218 	p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_SECONDARY_STATUS, &tmp16);
219 	stat->devSecStatus = tmp16;
220 
221 	/* Grab a bunch of AER regs */
222 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_RERR_STA,
223 			    &stat->rootErrorStatus);
224 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_UE_STATUS,
225 			    &stat->uncorrErrorStatus);
226 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_STATUS,
227 			    &stat->corrErrorStatus);
228 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG0,
229 			    &stat->tlpHdr1);
230 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG1,
231 			    &stat->tlpHdr2);
232 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG2,
233 			    &stat->tlpHdr3);
234 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG3,
235 			    &stat->tlpHdr4);
236 	p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_SRCID,
237 			    &stat->sourceId);
238 
239 	/* Restore to AIB */
240 	p->flags &= ~P7IOC_PHB_CFG_USE_ASB;
241 
242 	/*
243 	 * No idea what that that is supposed to be, opal.h says
244 	 * "Record data about the call to allocate a buffer."
245 	 *
246 	 * Let's leave them alone for now...
247 	 *
248 	 * uint64_t errorClass;
249 	 * uint64_t correlator;
250 	*/
251 
252 	/* P7IOC MMIO Error Regs */
253 	stat->p7iocPlssr = in_be64(p->regs_asb + PHB_CPU_LOADSTORE_STATUS);
254 	stat->p7iocCsr = in_be64(p->regs_asb + PHB_DMA_CHAN_STATUS);
255 	stat->lemFir = in_be64(p->regs_asb + PHB_LEM_FIR_ACCUM);
256 	stat->lemErrorMask = in_be64(p->regs_asb + PHB_LEM_ERROR_MASK);
257 	stat->lemWOF = in_be64(p->regs_asb + PHB_LEM_WOF);
258 	stat->phbErrorStatus = in_be64(p->regs_asb + PHB_ERR_STATUS);
259 	stat->phbFirstErrorStatus = in_be64(p->regs_asb + PHB_ERR1_STATUS);
260 	stat->phbErrorLog0 = in_be64(p->regs_asb + PHB_ERR_LOG_0);
261 	stat->phbErrorLog1 = in_be64(p->regs_asb + PHB_ERR_LOG_1);
262 	stat->mmioErrorStatus = in_be64(p->regs_asb + PHB_OUT_ERR_STATUS);
263 	stat->mmioFirstErrorStatus = in_be64(p->regs_asb + PHB_OUT_ERR1_STATUS);
264 	stat->mmioErrorLog0 = in_be64(p->regs_asb + PHB_OUT_ERR_LOG_0);
265 	stat->mmioErrorLog1 = in_be64(p->regs_asb + PHB_OUT_ERR_LOG_1);
266 	stat->dma0ErrorStatus = in_be64(p->regs_asb + PHB_INA_ERR_STATUS);
267 	stat->dma0FirstErrorStatus = in_be64(p->regs_asb + PHB_INA_ERR1_STATUS);
268 	stat->dma0ErrorLog0 = in_be64(p->regs_asb + PHB_INA_ERR_LOG_0);
269 	stat->dma0ErrorLog1 = in_be64(p->regs_asb + PHB_INA_ERR_LOG_1);
270 	stat->dma1ErrorStatus = in_be64(p->regs_asb + PHB_INB_ERR_STATUS);
271 	stat->dma1FirstErrorStatus = in_be64(p->regs_asb + PHB_INB_ERR1_STATUS);
272 	stat->dma1ErrorLog0 = in_be64(p->regs_asb + PHB_INB_ERR_LOG_0);
273 	stat->dma1ErrorLog1 = in_be64(p->regs_asb + PHB_INB_ERR_LOG_1);
274 
275 	/* Grab PESTA & B content */
276 	p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, 0, true);
277 	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++)
278 		stat->pestA[i] = in_be64(p->regs_asb + PHB_IODA_DATA0);
279 	p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, 0, true);
280 	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++)
281 		stat->pestB[i] = in_be64(p->regs_asb + PHB_IODA_DATA0);
282 }
283 
p7ioc_eeh_freeze_status(struct phb * phb,uint64_t pe_number,uint8_t * freeze_state,uint16_t * pci_error_type,uint16_t * severity,uint64_t * phb_status)284 static int64_t p7ioc_eeh_freeze_status(struct phb *phb, uint64_t pe_number,
285 				       uint8_t *freeze_state,
286 				       uint16_t *pci_error_type,
287 				       uint16_t *severity,
288 				       uint64_t *phb_status)
289 {
290 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
291 	uint64_t peev_bit = PPC_BIT(pe_number & 0x3f);
292 	uint64_t peev, pesta, pestb;
293 
294 	/* Defaults: not frozen */
295 	*freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
296 	*pci_error_type = OPAL_EEH_NO_ERROR;
297 
298 	/* Check dead */
299 	if (p->state == P7IOC_PHB_STATE_BROKEN) {
300 		*freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
301 		*pci_error_type = OPAL_EEH_PHB_ERROR;
302 		if (severity)
303 			*severity = OPAL_EEH_SEV_PHB_DEAD;
304 		goto bail;
305 	}
306 
307 	/* Check fence */
308 	if (p7ioc_phb_fenced(p)) {
309 		/* Should be OPAL_EEH_STOPPED_TEMP_UNAVAIL ? */
310 		*freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
311 		*pci_error_type = OPAL_EEH_PHB_ERROR;
312 		if (severity)
313 			*severity = OPAL_EEH_SEV_PHB_FENCED;
314 		p->state = P7IOC_PHB_STATE_FENCED;
315 		goto bail;
316 	}
317 
318 	/* Check the PEEV */
319 	p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
320 	peev = in_be64(p->regs + PHB_IODA_DATA0);
321 	if (pe_number > 63)
322 		peev = in_be64(p->regs + PHB_IODA_DATA0);
323 	if (!(peev & peev_bit))
324 		return OPAL_SUCCESS;
325 
326 	/* Indicate that we have an ER pending */
327 	p7ioc_phb_set_err_pending(p, true);
328 	if (severity)
329 		*severity = OPAL_EEH_SEV_PE_ER;
330 
331 	/* Read the PESTA & PESTB */
332 	p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, pe_number, false);
333 	pesta = in_be64(p->regs + PHB_IODA_DATA0);
334 	p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, pe_number, false);
335 	pestb = in_be64(p->regs + PHB_IODA_DATA0);
336 
337 	/* Convert them */
338 	if (pesta & IODA_PESTA_MMIO_FROZEN)
339 		*freeze_state |= OPAL_EEH_STOPPED_MMIO_FREEZE;
340 	if (pestb & IODA_PESTB_DMA_STOPPED)
341 		*freeze_state |= OPAL_EEH_STOPPED_DMA_FREEZE;
342 
343 	/* XXX Handle more causes */
344 	if (pesta & IODA_PESTA_MMIO_CAUSE)
345 		*pci_error_type = OPAL_EEH_PE_MMIO_ERROR;
346 	else
347 		*pci_error_type = OPAL_EEH_PE_DMA_ERROR;
348 
349  bail:
350 	if (phb_status)
351 		p7ioc_eeh_read_phb_status(p, (struct OpalIoP7IOCPhbErrorData *)
352 					  phb_status);
353 	return OPAL_SUCCESS;
354 }
355 
p7ioc_eeh_next_error(struct phb * phb,uint64_t * first_frozen_pe,uint16_t * pci_error_type,uint16_t * severity)356 static int64_t p7ioc_eeh_next_error(struct phb *phb, uint64_t *first_frozen_pe,
357 				    uint16_t *pci_error_type, uint16_t *severity)
358 {
359 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
360 	struct p7ioc *ioc = p->ioc;
361 	uint64_t fir, peev0, peev1;
362 	uint32_t cfg32, i;
363 
364 	/* Check if there're pending errors on the IOC. */
365 	if (p7ioc_err_pending(ioc) &&
366 	    p7ioc_check_LEM(ioc, pci_error_type, severity))
367 		return OPAL_SUCCESS;
368 
369 	/* Clear result */
370 	*pci_error_type	= OPAL_EEH_NO_ERROR;
371         *severity	= OPAL_EEH_SEV_NO_ERROR;
372 	*first_frozen_pe = (uint64_t)-1;
373 
374 	/* Check dead */
375 	if (p->state == P7IOC_PHB_STATE_BROKEN) {
376 		*pci_error_type = OPAL_EEH_PHB_ERROR;
377 		*severity = OPAL_EEH_SEV_PHB_DEAD;
378 		return OPAL_SUCCESS;
379 	}
380 
381 	/* Check fence */
382 	if (p7ioc_phb_fenced(p)) {
383 		/* Should be OPAL_EEH_STOPPED_TEMP_UNAVAIL ? */
384 		*pci_error_type = OPAL_EEH_PHB_ERROR;
385 		*severity = OPAL_EEH_SEV_PHB_FENCED;
386 		p->state = P7IOC_PHB_STATE_FENCED;
387 		p7ioc_phb_set_err_pending(p, false);
388 		return OPAL_SUCCESS;
389 	}
390 
391 	/*
392 	 * If we don't have pending errors, which might be moved
393 	 * from IOC to the PHB, then check if there has any frozen PEs.
394 	 */
395 	if (!p7ioc_phb_err_pending(p)) {
396 		p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
397 		peev0 = in_be64(p->regs + PHB_IODA_DATA0);
398 		peev1 = in_be64(p->regs + PHB_IODA_DATA0);
399 		if (peev0 || peev1) {
400 			p->err.err_src   = P7IOC_ERR_SRC_PHB0 + p->index;
401 			p->err.err_class = P7IOC_ERR_CLASS_ER;
402 			p->err.err_bit   = 0;
403 			p7ioc_phb_set_err_pending(p, true);
404 		}
405 	}
406 
407 	/* Check the pending errors, which might come from IOC */
408 	if (p7ioc_phb_err_pending(p)) {
409 		/*
410 		 * If the frozen PE is caused by a malfunctioning TLP, we
411 		 * need reset the PHB. So convert ER to PHB-fatal error
412 		 * for the case.
413 		 */
414 		if (p->err.err_class == P7IOC_ERR_CLASS_ER) {
415 			fir = in_be64(p->regs_asb + PHB_LEM_FIR_ACCUM);
416 			if (fir & PPC_BIT(60)) {
417 				p7ioc_pcicfg_read32(&p->phb, 0,
418 					p->aercap + PCIECAP_AER_UE_STATUS, &cfg32);
419 				if (cfg32 & PCIECAP_AER_UE_MALFORMED_TLP)
420 					p->err.err_class = P7IOC_ERR_CLASS_PHB;
421                         }
422                 }
423 
424 		/*
425 		 * Map P7IOC internal error class to that one OS can handle.
426 		 * For P7IOC_ERR_CLASS_ER, we also need figure out the frozen
427 		 * PE.
428 		 */
429 		switch (p->err.err_class) {
430 		case P7IOC_ERR_CLASS_PHB:
431 			*pci_error_type = OPAL_EEH_PHB_ERROR;
432 			*severity = OPAL_EEH_SEV_PHB_FENCED;
433 			p7ioc_phb_set_err_pending(p, false);
434 			break;
435 		case P7IOC_ERR_CLASS_MAL:
436 		case P7IOC_ERR_CLASS_INF:
437 			*pci_error_type = OPAL_EEH_PHB_ERROR;
438 			*severity = OPAL_EEH_SEV_INF;
439 			p7ioc_phb_set_err_pending(p, false);
440 			break;
441 		case P7IOC_ERR_CLASS_ER:
442 			*pci_error_type = OPAL_EEH_PE_ERROR;
443 			*severity = OPAL_EEH_SEV_PE_ER;
444 			p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
445 			peev0 = in_be64(p->regs + PHB_IODA_DATA0);
446 			peev1 = in_be64(p->regs + PHB_IODA_DATA0);
447 
448 			for (i = 0 ; i < 64; i++) {
449 				if (PPC_BIT(i) & peev1) {
450 					*first_frozen_pe = i + 64;
451 					break;
452 				}
453 			}
454 			for (i = 0 ;
455 			     *first_frozen_pe == (uint64_t)-1 && i < 64;
456 			     i++) {
457 				if (PPC_BIT(i) & peev0) {
458 					*first_frozen_pe = i;
459 					break;
460 				}
461 			}
462 
463 			/* No frozen PE? */
464 			if (*first_frozen_pe == (uint64_t)-1) {
465 				*pci_error_type = OPAL_EEH_NO_ERROR;
466 				*severity = OPAL_EEH_SEV_NO_ERROR;
467 				p7ioc_phb_set_err_pending(p, false);
468 			}
469 
470 			break;
471 		default:
472 			*pci_error_type = OPAL_EEH_NO_ERROR;
473 			*severity = OPAL_EEH_SEV_NO_ERROR;
474 			p7ioc_phb_set_err_pending(p, false);
475 		}
476 	}
477 
478 	return OPAL_SUCCESS;
479 }
480 
p7ioc_ER_err_clear(struct p7ioc_phb * p)481 static void p7ioc_ER_err_clear(struct p7ioc_phb *p)
482 {
483 	u64 err, lem;
484 	u32 val;
485 
486 	/* Rec 1,2 */
487 	lem = in_be64(p->regs + PHB_LEM_FIR_ACCUM);
488 
489 	/* Rec 3,4,5 AER registers (could use cfg space accessors) */
490 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000001c00000000ull);
491 	out_be32(p->regs + PHB_CONFIG_DATA, 0x10000000);
492 
493 	/* Rec 6,7,8 XXX DOC whacks payload & req size ... we don't */
494 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000005000000000ull);
495 	val = in_be32(p->regs + PHB_CONFIG_DATA);
496 	out_be32(p->regs + PHB_CONFIG_DATA, (val & 0xe0700000) | 0x0f000f00);
497 
498 	/* Rec 9,10,11 */
499 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000010400000000ull);
500 	out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
501 
502 	/* Rec 12,13,14 */
503 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000011000000000ull);
504 	out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
505 
506 	/* Rec 23,24,25 */
507 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000013000000000ull);
508 	out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
509 
510 	/* Rec 26,27,28 */
511 	out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000004000000000ull);
512 	out_be32(p->regs + PHB_CONFIG_DATA, 0x470100f8);
513 
514 	/* Rec 29..34 UTL registers */
515 	err = in_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS);
516 	out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, err);
517 	err = in_be64(p->regs + UTL_PCIE_PORT_STATUS);
518 	out_be64(p->regs + UTL_PCIE_PORT_STATUS, err);
519 	err = in_be64(p->regs + UTL_RC_STATUS);
520 	out_be64(p->regs + UTL_RC_STATUS, err);
521 
522 	/* PHB error traps registers */
523 	err = in_be64(p->regs + PHB_ERR_STATUS);
524 	out_be64(p->regs + PHB_ERR_STATUS, err);
525 	out_be64(p->regs + PHB_ERR1_STATUS, 0);
526 	out_be64(p->regs + PHB_ERR_LOG_0, 0);
527 	out_be64(p->regs + PHB_ERR_LOG_1, 0);
528 
529 	err = in_be64(p->regs + PHB_OUT_ERR_STATUS);
530 	out_be64(p->regs + PHB_OUT_ERR_STATUS, err);
531 	out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0);
532 	out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0);
533 	out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0);
534 
535 	err = in_be64(p->regs + PHB_INA_ERR_STATUS);
536 	out_be64(p->regs + PHB_INA_ERR_STATUS, err);
537 	out_be64(p->regs + PHB_INA_ERR1_STATUS, 0);
538 	out_be64(p->regs + PHB_INA_ERR_LOG_0, 0);
539 	out_be64(p->regs + PHB_INA_ERR_LOG_1, 0);
540 
541 	err = in_be64(p->regs + PHB_INB_ERR_STATUS);
542 	out_be64(p->regs + PHB_INB_ERR_STATUS, err);
543 	out_be64(p->regs + PHB_INB_ERR1_STATUS, 0);
544 	out_be64(p->regs + PHB_INB_ERR_LOG_0, 0);
545 	out_be64(p->regs + PHB_INB_ERR_LOG_1, 0);
546 
547 	/* Rec 67, 68 LEM */
548 	out_be64(p->regs + PHB_LEM_FIR_AND_MASK, ~lem);
549 	out_be64(p->regs + PHB_LEM_WOF, 0);
550 }
551 
p7ioc_eeh_freeze_clear(struct phb * phb,uint64_t pe_number,uint64_t eeh_action_token)552 static int64_t p7ioc_eeh_freeze_clear(struct phb *phb, uint64_t pe_number,
553 				      uint64_t eeh_action_token)
554 {
555 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
556 	uint64_t peev0, peev1;
557 
558 	/* XXX Now this is a heavy hammer, coming roughly from the P7IOC doc
559 	 * and my old "pseudopal" code. It will need to be refined. In general
560 	 * error handling will have to be reviewed and probably done properly
561 	 * "from scratch" based on the description in the p7IOC spec.
562 	 *
563 	 * XXX Additionally, when handling interrupts, we might want to consider
564 	 * masking while processing and/or ack'ing interrupt bits etc...
565 	 */
566 	u64 err;
567 
568 	/* Summary. If nothing, move to clearing the PESTs which can
569 	 * contain a freeze state from a previous error or simply set
570 	 * explicitly by the user
571 	 */
572 	err = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
573 	if (err == 0)
574 		goto clear_pest;
575 
576 	p7ioc_ER_err_clear(p);
577 
578  clear_pest:
579 	/* XXX We just clear the whole PESTA for MMIO clear and PESTB
580 	 * for DMA clear. We might want to only clear the frozen bit
581 	 * as to not clobber the rest of the state. However, we expect
582 	 * the state to have been harvested before the clear operations
583 	 * so this might not be an issue
584 	 */
585 	if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO) {
586 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, pe_number, false);
587 		out_be64(p->regs + PHB_IODA_DATA0, 0);
588 	}
589 	if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_DMA) {
590 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, pe_number, false);
591 		out_be64(p->regs + PHB_IODA_DATA0, 0);
592 	}
593 
594 	/* Update ER pending indication */
595 	p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
596 	peev0 = in_be64(p->regs + PHB_IODA_DATA0);
597 	peev1 = in_be64(p->regs + PHB_IODA_DATA0);
598 	if (peev0 || peev1) {
599 		p->err.err_src   = P7IOC_ERR_SRC_PHB0 + p->index;
600 		p->err.err_class = P7IOC_ERR_CLASS_ER;
601 		p->err.err_bit   = 0;
602 		p7ioc_phb_set_err_pending(p, true);
603 	} else
604 		p7ioc_phb_set_err_pending(p, false);
605 
606 	return OPAL_SUCCESS;
607 }
608 
p7ioc_eeh_freeze_set(struct phb * phb,uint64_t pe_number,uint64_t eeh_action_token)609 static int64_t p7ioc_eeh_freeze_set(struct phb *phb, uint64_t pe_number,
610 				    uint64_t eeh_action_token)
611 {
612 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
613 	uint64_t data;
614 
615 	if (pe_number > 127)
616 		return OPAL_PARAMETER;
617 
618 	if (eeh_action_token != OPAL_EEH_ACTION_SET_FREEZE_MMIO &&
619 	    eeh_action_token != OPAL_EEH_ACTION_SET_FREEZE_DMA &&
620 	    eeh_action_token != OPAL_EEH_ACTION_SET_FREEZE_ALL)
621 		return OPAL_PARAMETER;
622 
623 	if (eeh_action_token & OPAL_EEH_ACTION_SET_FREEZE_MMIO) {
624 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, pe_number, false);
625 		data = in_be64(p->regs + PHB_IODA_DATA0);
626 		data |= IODA_PESTA_MMIO_FROZEN;
627 		out_be64(p->regs + PHB_IODA_DATA0, data);
628 	}
629 
630 	if (eeh_action_token & OPAL_EEH_ACTION_SET_FREEZE_DMA) {
631 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, pe_number, false);
632 		data = in_be64(p->regs + PHB_IODA_DATA0);
633 		data |= IODA_PESTB_DMA_STOPPED;
634 		out_be64(p->regs + PHB_IODA_DATA0, data);
635 	}
636 
637 	return OPAL_SUCCESS;
638 }
639 
p7ioc_err_inject_finalize(struct p7ioc_phb * p,uint64_t addr,uint64_t mask,uint64_t ctrl,bool is_write)640 static int64_t p7ioc_err_inject_finalize(struct p7ioc_phb *p, uint64_t addr,
641 					 uint64_t mask, uint64_t ctrl,
642 					 bool is_write)
643 {
644 	if (is_write)
645 		ctrl |= PHB_PAPR_ERR_INJ_CTL_WR;
646 	else
647 		ctrl |= PHB_PAPR_ERR_INJ_CTL_RD;
648 
649 	/* HW100549: Take read and write for outbound errors
650 	 * on DD10 chip
651 	 */
652 	if (p->rev == P7IOC_REV_DD10)
653 		ctrl |= (PHB_PAPR_ERR_INJ_CTL_RD | PHB_PAPR_ERR_INJ_CTL_WR);
654 
655 	out_be64(p->regs + PHB_PAPR_ERR_INJ_ADDR, addr);
656 	out_be64(p->regs + PHB_PAPR_ERR_INJ_MASK, mask);
657 	out_be64(p->regs + PHB_PAPR_ERR_INJ_CTL, ctrl);
658 
659 	return OPAL_SUCCESS;
660 }
661 
p7ioc_err_inject_mem32(struct p7ioc_phb * p,uint64_t pe_number,uint64_t addr,uint64_t mask,bool is_write)662 static int64_t p7ioc_err_inject_mem32(struct p7ioc_phb *p, uint64_t pe_number,
663 				      uint64_t addr, uint64_t mask,
664 				      bool is_write)
665 {
666 	uint64_t a, m, prefer, base;
667 	uint64_t ctrl = PHB_PAPR_ERR_INJ_CTL_OUTB;
668 	int32_t index;
669 
670 	a = 0x0ull;
671 	prefer = 0x0ull;
672 	for (index = 0; index < 128; index++) {
673 		if (GETFIELD(IODA_XXDT_PE, p->m32d_cache[index]) != pe_number)
674 			continue;
675 
676 		base = p->m32_base + M32_PCI_START +
677 		       (M32_PCI_SIZE / 128) * index;
678 
679 		/* Update preferred address */
680 		if (!prefer) {
681 			prefer = GETFIELD(PHB_PAPR_ERR_INJ_MASK_MMIO, base);
682 			prefer = SETFIELD(PHB_PAPR_ERR_INJ_MASK_MMIO,
683 					  0x0ull, prefer);
684 		}
685 
686 		/* The input address matches ? */
687 		if (addr >= base &&
688 		    addr < base + (M32_PCI_SIZE / 128)) {
689 			a = addr;
690 			break;
691 		}
692 	}
693 
694 	/* Invalid PE number */
695 	if (!prefer)
696 		return OPAL_PARAMETER;
697 
698 	/* Specified address is out of range */
699 	if (!a) {
700 		a = prefer;
701 		m = PHB_PAPR_ERR_INJ_MASK_MMIO;
702 	} else {
703 		m = mask;
704 	}
705 
706 	return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
707 }
708 
p7ioc_err_inject_io32(struct p7ioc_phb * p,uint64_t pe_number,uint64_t addr,uint64_t mask,bool is_write)709 static int64_t p7ioc_err_inject_io32(struct p7ioc_phb *p, uint64_t pe_number,
710 				     uint64_t addr, uint64_t mask,
711 				     bool is_write)
712 {
713 	uint64_t a, m, prefer, base;
714 	uint64_t ctrl = PHB_PAPR_ERR_INJ_CTL_OUTB;
715 	int32_t index;
716 
717 	a = 0x0ull;
718 	prefer = 0x0ull;
719 	for (index = 0; index < 128; index++) {
720 		if (GETFIELD(IODA_XXDT_PE, p->iod_cache[index]) != pe_number)
721                         continue;
722 
723 		base = p->io_base + (PHB_IO_SIZE / 128) * index;
724 
725 		/* Update preferred address */
726 		if (!prefer) {
727 			prefer = GETFIELD(PHB_PAPR_ERR_INJ_MASK_IO, base);
728 			prefer = SETFIELD(PHB_PAPR_ERR_INJ_MASK_IO, 0x0ull, prefer);
729 		}
730 
731 		/* The input address matches ? */
732 		if (addr >= base &&
733 		    addr <  base + (PHB_IO_SIZE / 128)) {
734 			a = addr;
735 			break;
736 		}
737 	}
738 
739 	/* Invalid PE number */
740 	if (!prefer)
741 		return OPAL_PARAMETER;
742 
743 	/* Specified address is out of range */
744 	if (!a) {
745 		a = prefer;
746 		m = PHB_PAPR_ERR_INJ_MASK_IO;
747 	} else {
748 		m = mask;
749 	}
750 
751 	return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
752 }
753 
p7ioc_err_inject_cfg(struct p7ioc_phb * p,uint64_t pe_number,uint64_t addr,uint64_t mask,bool is_write)754 static int64_t p7ioc_err_inject_cfg(struct p7ioc_phb *p, uint64_t pe_number,
755 				    uint64_t addr, uint64_t mask,
756 				    bool is_write)
757 {
758 	uint64_t a, m;
759 	uint64_t ctrl = PHB_PAPR_ERR_INJ_CTL_CFG;
760 	uint8_t v_bits, base, bus_no;
761 
762 	/* Looking into PELTM to see if the PCI bus# is owned
763 	 * by the PE#. Otherwise, we have to figure one out.
764 	 */
765 	base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_number]);
766 	v_bits = GETFIELD(IODA_PELTM_BUS_VALID, p->peltm_cache[pe_number]);
767 	switch (v_bits) {
768 	case IODA_BUS_VALID_3_BITS:
769 	case IODA_BUS_VALID_4_BITS:
770 	case IODA_BUS_VALID_5_BITS:
771 	case IODA_BUS_VALID_6_BITS:
772 	case IODA_BUS_VALID_7_BITS:
773 	case IODA_BUS_VALID_ALL:
774 		base = GETFIELD(IODA_PELTM_BUS, p->peltm_cache[pe_number]);
775 		base &= (0xff - (((1 << (7 - v_bits)) - 1)));
776 		a = SETFIELD(PHB_PAPR_ERR_INJ_MASK_CFG, 0x0ul, base);
777 		m = PHB_PAPR_ERR_INJ_MASK_CFG;
778 
779 		bus_no = GETFIELD(PHB_PAPR_ERR_INJ_MASK_CFG, addr);
780 		bus_no &= (0xff - (((1 << (7 - v_bits)) - 1)));
781 		if (base == bus_no) {
782 			a = addr;
783 			m = mask;
784 		}
785 
786 		break;
787 	case IODA_BUS_VALID_ANY:
788 	default:
789 		return OPAL_PARAMETER;
790 	}
791 
792 	return p7ioc_err_inject_finalize(p, a, m, ctrl, is_write);
793 }
794 
p7ioc_err_inject_dma(struct p7ioc_phb * p,uint64_t pe_number,uint64_t addr,uint64_t mask,bool is_write)795 static int64_t p7ioc_err_inject_dma(struct p7ioc_phb *p, uint64_t pe_number,
796 				    uint64_t addr, uint64_t mask,
797 				    bool is_write)
798 {
799 	uint64_t ctrl = PHB_PAPR_ERR_INJ_CTL_INB;
800 	int32_t index;
801 
802 	/* For DMA, we just pick address from TVT */
803 	for (index = 0; index < 128; index++) {
804 		if (GETFIELD(IODA_TVT1_PE_NUM, p->tve_hi_cache[index]) !=
805 		    pe_number)
806 			continue;
807 
808 		addr = SETFIELD(PHB_PAPR_ERR_INJ_MASK_DMA, 0ul, index);
809 		mask = PHB_PAPR_ERR_INJ_MASK_DMA;
810 		break;
811 	}
812 
813 	/* Some PE might not have DMA capability */
814 	if (index >= 128)
815 		return OPAL_PARAMETER;
816 
817 	return p7ioc_err_inject_finalize(p, addr, mask, ctrl, is_write);
818 }
819 
p7ioc_err_inject(struct phb * phb,uint64_t pe_number,uint32_t type,uint32_t func,uint64_t addr,uint64_t mask)820 static int64_t p7ioc_err_inject(struct phb *phb, uint64_t pe_number,
821 				uint32_t type, uint32_t func,
822 				uint64_t addr, uint64_t mask)
823 {
824 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
825 	int64_t (*handler)(struct p7ioc_phb *p, uint64_t pe_number,
826 			   uint64_t addr, uint64_t mask, bool is_write);
827 	bool is_write;
828 
829 	/* To support 64-bits error later */
830 	if (type == OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64)
831 		return OPAL_UNSUPPORTED;
832 
833 	/* We can't inject error to the reserved PE#127 */
834 	if (pe_number > 126)
835 		return OPAL_PARAMETER;
836 
837 	/* Clear the leftover from last time */
838 	out_be64(p->regs + PHB_PAPR_ERR_INJ_CTL, 0x0ul);
839 
840 	/* Check if PE number is valid one in PELTM cache */
841 	if (p->peltm_cache[pe_number] == 0x0001f80000000000ull)
842 		return OPAL_PARAMETER;
843 
844 	/* Clear the leftover from last time */
845 	out_be64(p->regs + PHB_PAPR_ERR_INJ_CTL, 0x0ul);
846 
847 	switch (func) {
848 	case OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR:
849 	case OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA:
850 		is_write = false;
851 		handler = p7ioc_err_inject_mem32;
852 		break;
853 	case OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR:
854 	case OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA:
855 		is_write = true;
856 		handler = p7ioc_err_inject_mem32;
857 		break;
858 	case OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR:
859 	case OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA:
860 		is_write = false;
861 		handler = p7ioc_err_inject_io32;
862 		break;
863 	case OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR:
864 	case OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA:
865 		is_write = true;
866 		handler = p7ioc_err_inject_io32;
867 		break;
868 	case OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR:
869 	case OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA:
870 		is_write = false;
871 		handler = p7ioc_err_inject_cfg;
872 		break;
873 	case OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR:
874 	case OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA:
875 		is_write = true;
876 		handler = p7ioc_err_inject_cfg;
877 		break;
878 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR:
879 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA:
880 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER:
881 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET:
882 		is_write = false;
883 		handler = p7ioc_err_inject_dma;
884 		break;
885 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR:
886 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA:
887 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER:
888 	case OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET:
889 		is_write = true;
890 		handler = p7ioc_err_inject_dma;
891 		break;
892 	default:
893 		return OPAL_PARAMETER;
894 	}
895 
896 	return handler(p, pe_number, addr, mask, is_write);
897 }
898 
p7ioc_get_diag_data(struct phb * phb,void * diag_buffer,uint64_t diag_buffer_len)899 static int64_t p7ioc_get_diag_data(struct phb *phb, void *diag_buffer,
900 				   uint64_t diag_buffer_len)
901 {
902 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
903 	struct OpalIoP7IOCPhbErrorData *diag = diag_buffer;
904 
905 	if (diag_buffer_len < sizeof(struct OpalIoP7IOCPhbErrorData))
906 		return OPAL_PARAMETER;
907 
908 	/* Specific error data */
909 	p7ioc_eeh_read_phb_status(p, diag);
910 
911 	/*
912 	 * We're running to here probably because of errors (MAL
913 	 * or INF class) from IOC. For the case, we need clear
914 	 * the pending errors and mask the error bit for MAL class
915 	 * error. Fortunately, we shouldn't get MAL class error from
916 	 * IOC on P7IOC.
917 	 */
918 	if (p7ioc_phb_err_pending(p)			&&
919 	    p->err.err_class == P7IOC_ERR_CLASS_INF	&&
920 	    p->err.err_src >= P7IOC_ERR_SRC_PHB0	&&
921 	    p->err.err_src <= P7IOC_ERR_SRC_PHB5) {
922 		p7ioc_ER_err_clear(p);
923 		p7ioc_phb_set_err_pending(p, false);
924 	}
925 
926 	return OPAL_SUCCESS;
927 }
928 
929 /*
930  * We don't support address remapping now since all M64
931  * BARs are sharing on remapping base address. We might
932  * introduce flag to the PHB in order to trace that. The
933  * flag allows to be changed for once. It's something to
934  * do in future.
935  */
p7ioc_set_phb_mem_window(struct phb * phb,uint16_t window_type,uint16_t window_num,uint64_t base,uint64_t __unused pci_base,uint64_t size)936 static int64_t p7ioc_set_phb_mem_window(struct phb *phb,
937                                         uint16_t window_type,
938                                         uint16_t window_num,
939                                         uint64_t base,
940                                         uint64_t __unused pci_base,
941                                         uint64_t size)
942 {
943 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
944 	uint64_t data64;
945 
946 	switch (window_type) {
947 	case OPAL_IO_WINDOW_TYPE:
948 	case OPAL_M32_WINDOW_TYPE:
949 		return OPAL_UNSUPPORTED;
950 	case OPAL_M64_WINDOW_TYPE:
951 		if (window_num >= 16)
952 			return OPAL_PARAMETER;
953 		/* The base and size should be 16MB aligned */
954 		if (base & 0xFFFFFF || size & 0xFFFFFF)
955 			return OPAL_PARAMETER;
956 		data64 = p->m64b_cache[window_num];
957 		data64 = SETFIELD(IODA_M64BT_BASE, data64, base >> 24);
958 		size = (size >> 24);
959 		data64 = SETFIELD(IODA_M64BT_MASK, data64, 0x1000000 - size);
960 		break;
961 	default:
962 		return OPAL_PARAMETER;
963 	}
964 
965 	/*
966 	 * If the M64 BAR hasn't enabled yet, we needn't flush
967 	 * the setting to hardware and just keep it to the cache
968 	 */
969 	p->m64b_cache[window_num] = data64;
970 	if (!(data64 & IODA_M64BT_ENABLE))
971 		return OPAL_SUCCESS;
972 	p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, window_num, false);
973 	out_be64(p->regs + PHB_IODA_DATA0, data64);
974 
975 	return OPAL_SUCCESS;
976 }
977 
978 /*
979  * We can't enable or disable I/O and M32 dynamically, even
980  * unnecessary. So the function only support M64 BARs.
981  */
p7ioc_phb_mmio_enable(struct phb * phb,uint16_t window_type,uint16_t window_num,uint16_t enable)982 static int64_t p7ioc_phb_mmio_enable(struct phb *phb,
983 				     uint16_t window_type,
984 				     uint16_t window_num,
985 				     uint16_t enable)
986 {
987 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
988 	uint64_t data64, base, mask;
989 
990 	switch (window_type) {
991 	case OPAL_IO_WINDOW_TYPE:
992 	case OPAL_M32_WINDOW_TYPE:
993 		return OPAL_UNSUPPORTED;
994 	case OPAL_M64_WINDOW_TYPE:
995 		if (window_num >= 16 ||
996 		    enable >= OPAL_ENABLE_M64_NON_SPLIT)
997 			return OPAL_PARAMETER;
998 
999 		break;
1000 	default:
1001 		return OPAL_PARAMETER;
1002 	}
1003 
1004 	/*
1005 	 * While enabling one specific M64 BAR, we should have
1006 	 * the base/size configured correctly. Otherwise, it
1007 	 * probably incurs fenced AIB.
1008 	 */
1009 	data64 = p->m64b_cache[window_num];
1010 	if (enable == OPAL_ENABLE_M64_SPLIT) {
1011 		base = GETFIELD(IODA_M64BT_BASE, data64);
1012 		base = (base << 24);
1013 		mask = GETFIELD(IODA_M64BT_MASK, data64);
1014 		if (base < p->m64_base || mask == 0x0ul)
1015 			return OPAL_PARTIAL;
1016 
1017 		data64 |= IODA_M64BT_ENABLE;
1018 	} else if (enable == OPAL_DISABLE_M64) {
1019 		data64 &= ~IODA_M64BT_ENABLE;
1020 	}
1021 
1022 	p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, window_num, false);
1023 	out_be64(p->regs + PHB_IODA_DATA0, data64);
1024 	p->m64b_cache[window_num] = data64;
1025 
1026 	return OPAL_SUCCESS;
1027 }
1028 
p7ioc_map_pe_mmio_window(struct phb * phb,uint64_t pe_number,uint16_t window_type,uint16_t window_num,uint16_t segment_num)1029 static int64_t p7ioc_map_pe_mmio_window(struct phb *phb, uint64_t pe_number,
1030 					uint16_t window_type,
1031 					uint16_t window_num,
1032 					uint16_t segment_num)
1033 {
1034 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1035 	uint64_t tbl, index;
1036 	uint64_t *cache;
1037 
1038 	if (pe_number > 127)
1039 		return OPAL_PARAMETER;
1040 
1041 	switch(window_type) {
1042 	case OPAL_IO_WINDOW_TYPE:
1043 		if (window_num != 0 || segment_num > 127)
1044 			return OPAL_PARAMETER;
1045 		tbl = IODA_TBL_IODT;
1046 		index = segment_num;
1047 		cache = &p->iod_cache[index];
1048 		break;
1049 	case OPAL_M32_WINDOW_TYPE:
1050 		if (window_num != 0 || segment_num > 127)
1051 			return OPAL_PARAMETER;
1052 		tbl = IODA_TBL_M32DT;
1053 		index = segment_num;
1054 		cache = &p->m32d_cache[index];
1055 		break;
1056 	case OPAL_M64_WINDOW_TYPE:
1057 		if (window_num > 15 || segment_num > 7)
1058 			return OPAL_PARAMETER;
1059 
1060 		tbl = IODA_TBL_M64DT;
1061 		index = window_num << 3 | segment_num;
1062 		cache = &p->m64d_cache[index];
1063 		break;
1064 	default:
1065 		return OPAL_PARAMETER;
1066 	}
1067 
1068 	p7ioc_phb_ioda_sel(p, tbl, index, false);
1069 	out_be64(p->regs + PHB_IODA_DATA0,
1070 		 SETFIELD(IODA_XXDT_PE, 0ull, pe_number));
1071 
1072 	/* Update cache */
1073 	*cache = SETFIELD(IODA_XXDT_PE, 0ull, pe_number);
1074 
1075 	return OPAL_SUCCESS;
1076 }
1077 
1078 
p7ioc_set_pe(struct phb * phb,uint64_t pe_number,uint64_t bdfn,uint8_t bus_compare,uint8_t dev_compare,uint8_t func_compare,uint8_t pe_action)1079 static int64_t p7ioc_set_pe(struct phb *phb, uint64_t pe_number,
1080 			    uint64_t bdfn, uint8_t bus_compare,
1081 			    uint8_t dev_compare, uint8_t func_compare,
1082 			    uint8_t pe_action)
1083 {
1084 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1085 	uint64_t pelt;
1086 	uint64_t *cache = &p->peltm_cache[pe_number];
1087 
1088 	if (pe_number > 127 || bdfn > 0xffff)
1089 		return OPAL_PARAMETER;
1090 	if (pe_action != OPAL_MAP_PE && pe_action != OPAL_UNMAP_PE)
1091 		return OPAL_PARAMETER;
1092 	if (bus_compare > 7)
1093 		return OPAL_PARAMETER;
1094 
1095 	if (pe_action == OPAL_MAP_PE) {
1096 		pelt  = SETFIELD(IODA_PELTM_BUS, 0ul, bdfn >> 8);
1097 		pelt |= SETFIELD(IODA_PELTM_DEV, 0ul, (bdfn >> 3) & 0x1f);
1098 		pelt |= SETFIELD(IODA_PELTM_FUNC, 0ul, bdfn & 0x7);
1099 		pelt |= SETFIELD(IODA_PELTM_BUS_VALID, 0ul, bus_compare);
1100 		if (dev_compare)
1101 			pelt |= IODA_PELTM_DEV_VALID;
1102 		if (func_compare)
1103 			pelt |= IODA_PELTM_FUNC_VALID;
1104 	} else
1105 		pelt = 0;
1106 
1107 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
1108 	out_be64(p->regs + PHB_IODA_DATA0, pelt);
1109 
1110 	/* Update cache */
1111 	*cache = pelt;
1112 
1113 	return OPAL_SUCCESS;
1114 }
1115 
1116 
p7ioc_set_peltv(struct phb * phb,uint32_t parent_pe,uint32_t child_pe,uint8_t state)1117 static int64_t p7ioc_set_peltv(struct phb *phb, uint32_t parent_pe,
1118 			       uint32_t child_pe, uint8_t state)
1119 {
1120 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1121 	uint32_t reg;
1122 	uint64_t mask, peltv;
1123 	uint64_t *cache;
1124 	if (parent_pe > 127 || child_pe > 127)
1125 		return OPAL_PARAMETER;
1126 
1127 	cache = (child_pe >> 6) ? &p->peltv_hi_cache[parent_pe] :
1128 		&p->peltv_lo_cache[parent_pe];
1129 	reg = (child_pe >> 6) ? PHB_IODA_DATA1 : PHB_IODA_DATA0;
1130 	child_pe &= 0x2f;
1131 	mask = 1ull << (63 - child_pe);
1132 
1133 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTV, parent_pe, false);
1134 	peltv = in_be64(p->regs + reg);
1135 	if (state)
1136 		peltv |= mask;
1137 	else
1138 		peltv &= ~mask;
1139 	out_be64(p->regs + reg, peltv);
1140 
1141 	/* Update cache */
1142 	*cache = peltv;
1143 
1144 	return OPAL_SUCCESS;
1145 }
1146 
p7ioc_map_pe_dma_window(struct phb * phb,uint64_t pe_number,uint16_t window_id,uint16_t tce_levels,uint64_t tce_table_addr,uint64_t tce_table_size,uint64_t tce_page_size)1147 static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint64_t pe_number,
1148 				       uint16_t window_id, uint16_t tce_levels,
1149 				       uint64_t tce_table_addr,
1150 				       uint64_t tce_table_size,
1151 				       uint64_t tce_page_size)
1152 {
1153 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1154 	uint64_t tvt0, tvt1, t, pelt;
1155 	uint64_t dma_window_size;
1156 	uint64_t *cache_lo, *cache_hi;
1157 
1158 	if (pe_number > 127 || window_id > 127 || tce_levels != 1)
1159 		return OPAL_PARAMETER;
1160 	cache_lo = &p->tve_lo_cache[window_id];
1161         cache_hi = &p->tve_hi_cache[window_id];
1162 
1163 	/* Encode table size */
1164 	dma_window_size = tce_page_size * (tce_table_size >> 3);
1165 	t = ilog2(dma_window_size);
1166 	if (t < 27)
1167 		return OPAL_PARAMETER;
1168 	tvt0 = SETFIELD(IODA_TVT0_TCE_TABLE_SIZE, 0ul, (t - 26));
1169 
1170 	/* Encode TCE page size */
1171 	switch(tce_page_size) {
1172 	case 0x1000:		/* 4K */
1173 		tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 1ul);
1174 		break;
1175 	case 0x10000:		/* 64K */
1176 		tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 5ul);
1177 		break;
1178 	case 0x1000000:		/* 16M */
1179 		tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 13ul);
1180 		break;
1181 	case 0x400000000UL:	/* 16G */
1182 		tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 23ul);
1183 		break;
1184 	default:
1185 		return OPAL_PARAMETER;
1186 	}
1187 
1188 	/* XXX Hub number ... leave 0 for now */
1189 
1190 	/* Shift in the address. The table address is "off by 4 bits"
1191 	 * but since the field is itself shifted by 16, we basically
1192 	 * need to write the address >> 12, which basically boils down
1193 	 * to writing a 4k page address
1194 	 */
1195 	tvt0 = SETFIELD(IODA_TVT0_TABLE_ADDR, tvt0, tce_table_addr >> 12);
1196 
1197 	/* Read the PE filter info from the PELT-M */
1198 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
1199 	pelt = in_be64(p->regs + PHB_IODA_DATA0);
1200 
1201 	/* Copy in filter bits from PELT */
1202 	tvt0 = SETFIELD(IODA_TVT0_BUS_VALID, tvt0,
1203 			GETFIELD(IODA_PELTM_BUS_VALID, pelt));
1204 	tvt0 = SETFIELD(IODA_TVT0_BUS_NUM, tvt0,
1205 			GETFIELD(IODA_PELTM_BUS, pelt));
1206 	tvt1 = SETFIELD(IODA_TVT1_DEV_NUM, tvt1,
1207 			GETFIELD(IODA_PELTM_DEV, pelt));
1208 	tvt1 = SETFIELD(IODA_TVT1_FUNC_NUM, tvt1,
1209 			GETFIELD(IODA_PELTM_FUNC, pelt));
1210 	if (pelt & IODA_PELTM_DEV_VALID)
1211 		tvt1 |= IODA_TVT1_DEV_VALID;
1212 	if (pelt & IODA_PELTM_FUNC_VALID)
1213 		tvt1 |= IODA_TVT1_FUNC_VALID;
1214 	tvt1 = SETFIELD(IODA_TVT1_PE_NUM, tvt1, pe_number);
1215 
1216 	/* Write the TVE */
1217 	p7ioc_phb_ioda_sel(p, IODA_TBL_TVT, window_id, false);
1218 	out_be64(p->regs + PHB_IODA_DATA1, tvt1);
1219 	out_be64(p->regs + PHB_IODA_DATA0, tvt0);
1220 
1221 	/* Update cache */
1222 	*cache_lo = tvt0;
1223 	*cache_hi = tvt1;
1224 
1225 	return OPAL_SUCCESS;
1226 }
1227 
p7ioc_map_pe_dma_window_real(struct phb * phb __unused,uint64_t pe_number __unused,uint16_t dma_window_num __unused,uint64_t pci_start_addr __unused,uint64_t pci_mem_size __unused)1228 static int64_t p7ioc_map_pe_dma_window_real(struct phb *phb __unused,
1229 					    uint64_t pe_number __unused,
1230 					    uint16_t dma_window_num __unused,
1231 					    uint64_t pci_start_addr __unused,
1232 					    uint64_t pci_mem_size __unused)
1233 {
1234 	/* XXX Not yet implemented (not yet used by Linux) */
1235 	return OPAL_UNSUPPORTED;
1236 }
1237 
p7ioc_set_mve(struct phb * phb,uint32_t mve_number,uint64_t pe_number)1238 static int64_t p7ioc_set_mve(struct phb *phb, uint32_t mve_number,
1239 			     uint64_t pe_number)
1240 {
1241 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1242 	uint64_t pelt, mve = 0;
1243 	uint64_t *cache = &p->mve_cache[mve_number];
1244 
1245 	if (pe_number > 127 || mve_number > 255)
1246 		return OPAL_PARAMETER;
1247 
1248 	/* Read the PE filter info from the PELT-M */
1249 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
1250 	pelt = in_be64(p->regs + PHB_IODA_DATA0);
1251 
1252 	mve = SETFIELD(IODA_MVT_BUS_VALID, mve,
1253 		       GETFIELD(IODA_PELTM_BUS_VALID, pelt));
1254 	mve = SETFIELD(IODA_MVT_BUS_NUM, mve,
1255 		       GETFIELD(IODA_PELTM_BUS, pelt));
1256 	mve = SETFIELD(IODA_MVT_DEV_NUM, mve,
1257 		       GETFIELD(IODA_PELTM_DEV, pelt));
1258 	mve = SETFIELD(IODA_MVT_FUNC_NUM, mve,
1259 		       GETFIELD(IODA_PELTM_FUNC, pelt));
1260 	if (pelt & IODA_PELTM_DEV_VALID)
1261 		mve |= IODA_MVT_DEV_VALID;
1262 	if (pelt & IODA_PELTM_FUNC_VALID)
1263 		mve |= IODA_MVT_FUNC_VALID;
1264 	mve = SETFIELD(IODA_MVT_PE_NUM, mve, pe_number);
1265 
1266 	p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, mve_number, false);
1267 	out_be64(p->regs + PHB_IODA_DATA0, mve);
1268 
1269 	/* Update cache */
1270 	*cache = mve;
1271 
1272 	return OPAL_SUCCESS;
1273 }
1274 
p7ioc_set_mve_enable(struct phb * phb,uint32_t mve_number,uint32_t state)1275 static int64_t p7ioc_set_mve_enable(struct phb *phb, uint32_t mve_number,
1276 				    uint32_t state)
1277 {
1278 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1279 	uint64_t mve;
1280 	uint64_t *cache = &p->mve_cache[mve_number];
1281 
1282 	if (mve_number > 255)
1283 		return OPAL_PARAMETER;
1284 
1285 	p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, mve_number, false);
1286 	mve = in_be64(p->regs + PHB_IODA_DATA0);
1287 	if (state)
1288 		mve |= IODA_MVT_VALID;
1289 	else
1290 		mve &= ~IODA_MVT_VALID;
1291 	out_be64(p->regs + PHB_IODA_DATA0, mve);
1292 
1293 	/* Update cache */
1294 	*cache = mve;
1295 
1296 	return OPAL_SUCCESS;
1297 }
1298 
p7ioc_set_xive_pe(struct phb * phb,uint64_t pe_number,uint32_t xive_num)1299 static int64_t p7ioc_set_xive_pe(struct phb *phb, uint64_t pe_number,
1300 				 uint32_t xive_num)
1301 {
1302 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1303 	uint64_t xive;
1304 
1305 	if (pe_number > 127 || xive_num > 255)
1306 		return OPAL_PARAMETER;
1307 
1308 	/* Update MXIVE cache */
1309 	xive = p->mxive_cache[xive_num];
1310 	xive = SETFIELD(IODA_XIVT_PENUM, xive, pe_number);
1311 	p->mxive_cache[xive_num] = xive;
1312 
1313 	/* Update HW */
1314 	p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, xive_num, false);
1315 	xive = in_be64(p->regs + PHB_IODA_DATA0);
1316 	xive = SETFIELD(IODA_XIVT_PENUM, xive, pe_number);
1317 	out_be64(p->regs + PHB_IODA_DATA0, xive);
1318 
1319 	return OPAL_SUCCESS;
1320 }
1321 
p7ioc_get_xive_source(struct phb * phb,uint32_t xive_num,int32_t * interrupt_source_number)1322 static int64_t p7ioc_get_xive_source(struct phb *phb, uint32_t xive_num,
1323 				     int32_t *interrupt_source_number)
1324 {
1325 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1326 
1327 	if (xive_num > 255 || !interrupt_source_number)
1328 		return OPAL_PARAMETER;
1329 
1330 	*interrupt_source_number = (p->buid_msi << 4) | xive_num;
1331 
1332 	return OPAL_SUCCESS;
1333 }
1334 
p7ioc_get_msi_32(struct phb * phb __unused,uint64_t mve_number,uint32_t xive_num,uint8_t msi_range,uint32_t * msi_address,uint32_t * message_data)1335 static int64_t p7ioc_get_msi_32(struct phb *phb __unused, uint64_t mve_number,
1336 				uint32_t xive_num, uint8_t msi_range,
1337 				uint32_t *msi_address, uint32_t *message_data)
1338 {
1339 	if (mve_number > 255 || xive_num > 255 || msi_range != 1)
1340 		return OPAL_PARAMETER;
1341 
1342 	*msi_address = 0xffff0000 | (mve_number << 4);
1343 	*message_data = xive_num;
1344 
1345 	return OPAL_SUCCESS;
1346 }
1347 
p7ioc_get_msi_64(struct phb * phb __unused,uint64_t mve_number,uint32_t xive_num,uint8_t msi_range,uint64_t * msi_address,uint32_t * message_data)1348 static int64_t p7ioc_get_msi_64(struct phb *phb __unused, uint64_t mve_number,
1349 				uint32_t xive_num, uint8_t msi_range,
1350 				uint64_t *msi_address, uint32_t *message_data)
1351 {
1352 	if (mve_number > 255 || xive_num > 255 || msi_range != 1)
1353 		return OPAL_PARAMETER;
1354 
1355 	*msi_address = (9ul << 60) | (((u64)mve_number) << 48);
1356 	*message_data = xive_num;
1357 
1358 	return OPAL_SUCCESS;
1359 }
1360 
p7ioc_root_port_init(struct phb * phb,struct pci_device * dev,int ecap,int aercap)1361 static void p7ioc_root_port_init(struct phb *phb, struct pci_device *dev,
1362 				 int ecap, int aercap)
1363 {
1364 	uint16_t bdfn = dev->bdfn;
1365 	uint16_t val16;
1366 	uint32_t val32;
1367 
1368 	/* Enable SERR and parity checking */
1369 	pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
1370 	val16 |= (PCI_CFG_CMD_SERR_EN | PCI_CFG_CMD_PERR_RESP);
1371 	pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
1372 
1373 	/* Enable reporting various errors */
1374 	if (!ecap) return;
1375 	pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
1376 	val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
1377 		  PCICAP_EXP_DEVCTL_NFE_REPORT |
1378 		  PCICAP_EXP_DEVCTL_FE_REPORT |
1379 		  PCICAP_EXP_DEVCTL_UR_REPORT);
1380 	pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
1381 
1382         /* Mask various unrecoverable errors */
1383 	if (!aercap) return;
1384 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, &val32);
1385 	val32 |= (PCIECAP_AER_UE_MASK_POISON_TLP |
1386 		  PCIECAP_AER_UE_MASK_COMPL_TIMEOUT |
1387 		  PCIECAP_AER_UE_MASK_COMPL_ABORT |
1388 		  PCIECAP_AER_UE_MASK_ECRC);
1389 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, val32);
1390 
1391 	/* Report various unrecoverable errors as fatal errors */
1392 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, &val32);
1393 	val32 |= (PCIECAP_AER_UE_SEVERITY_DLLP |
1394 		  PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
1395 		  PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
1396 		  PCIECAP_AER_UE_SEVERITY_UNEXP_COMPL |
1397 		  PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
1398 		  PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP);
1399 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
1400 
1401 	/* Mask various recoverable errors */
1402 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, &val32);
1403 	val32 |= PCIECAP_AER_CE_MASK_ADV_NONFATAL;
1404 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
1405 
1406 	/* Enable ECRC check */
1407 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
1408 	val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
1409 		  PCIECAP_AER_CAPCTL_ECRCC_EN);
1410 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
1411 
1412 	/* Enable all error reporting */
1413 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, &val32);
1414 	val32 |= (PCIECAP_AER_RERR_CMD_FE |
1415 		  PCIECAP_AER_RERR_CMD_NFE |
1416 		  PCIECAP_AER_RERR_CMD_CE);
1417 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, val32);
1418 }
1419 
p7ioc_switch_port_init(struct phb * phb,struct pci_device * dev,int ecap,int aercap)1420 static void p7ioc_switch_port_init(struct phb *phb,
1421 				   struct pci_device *dev,
1422 				   int ecap, int aercap)
1423 {
1424 	uint16_t bdfn = dev->bdfn;
1425 	uint16_t val16;
1426 	uint32_t val32;
1427 
1428 	/* Enable SERR and parity checking and disable INTx */
1429 	pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
1430 	val16 |= (PCI_CFG_CMD_PERR_RESP |
1431 		  PCI_CFG_CMD_SERR_EN |
1432 		  PCI_CFG_CMD_INTx_DIS);
1433 	pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
1434 
1435 	/* Disable partity error and enable system error */
1436 	pci_cfg_read16(phb, bdfn, PCI_CFG_BRCTL, &val16);
1437 	val16 &= ~PCI_CFG_BRCTL_PERR_RESP_EN;
1438 	val16 |= PCI_CFG_BRCTL_SERR_EN;
1439 	pci_cfg_write16(phb, bdfn, PCI_CFG_BRCTL, val16);
1440 
1441 	/* Enable reporting various errors */
1442 	if (!ecap) return;
1443 	pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
1444 	val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
1445 		  PCICAP_EXP_DEVCTL_NFE_REPORT |
1446 		  PCICAP_EXP_DEVCTL_FE_REPORT);
1447 	pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
1448 
1449 	/* Unmask all unrecoverable errors */
1450 	if (!aercap) return;
1451 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, 0x0);
1452 
1453 	/* Severity of unrecoverable errors */
1454 	if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT)
1455 		val32 = (PCIECAP_AER_UE_SEVERITY_DLLP |
1456 			 PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
1457 			 PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
1458 			 PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
1459 			 PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP |
1460 			 PCIECAP_AER_UE_SEVERITY_INTERNAL);
1461 	else
1462 		val32 = (PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
1463 			 PCIECAP_AER_UE_SEVERITY_INTERNAL);
1464 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
1465 
1466         /* Mask various correctable errors */
1467 	val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
1468 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
1469 
1470 	/* Enable ECRC generation and disable ECRC check */
1471 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
1472 	val32 |= PCIECAP_AER_CAPCTL_ECRCG_EN;
1473 	val32 &= ~PCIECAP_AER_CAPCTL_ECRCC_EN;
1474 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
1475 }
1476 
p7ioc_endpoint_init(struct phb * phb,struct pci_device * dev,int ecap,int aercap)1477 static void p7ioc_endpoint_init(struct phb *phb,
1478 				struct pci_device *dev,
1479 				int ecap, int aercap)
1480 {
1481 	uint16_t bdfn = dev->bdfn;
1482 	uint16_t val16;
1483 	uint32_t val32;
1484 
1485 	/* Enable SERR and parity checking */
1486 	pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
1487 	val16 |= (PCI_CFG_CMD_PERR_RESP |
1488 		  PCI_CFG_CMD_SERR_EN);
1489 	pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
1490 
1491 	/* Enable reporting various errors */
1492 	if (!ecap) return;
1493 	pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
1494 	val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
1495 	val16 |= (PCICAP_EXP_DEVCTL_NFE_REPORT |
1496 		  PCICAP_EXP_DEVCTL_FE_REPORT |
1497 		  PCICAP_EXP_DEVCTL_UR_REPORT);
1498 	pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
1499 
1500 	/* Enable ECRC generation and check */
1501 	pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
1502 	val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
1503 		  PCIECAP_AER_CAPCTL_ECRCC_EN);
1504 	pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
1505 }
1506 
p7ioc_device_init(struct phb * phb,struct pci_device * dev,void * data __unused)1507 static int p7ioc_device_init(struct phb *phb,
1508 			     struct pci_device *dev,
1509 			     void *data __unused)
1510 {
1511 	int ecap, aercap;
1512 
1513 	/* Common initialization for the device */
1514 	pci_device_init(phb, dev);
1515 
1516         ecap = pci_cap(dev, PCI_CFG_CAP_ID_EXP, false);
1517         aercap = pci_cap(dev, PCIECAP_ID_AER, true);
1518 	if (dev->dev_type == PCIE_TYPE_ROOT_PORT)
1519 		p7ioc_root_port_init(phb, dev, ecap, aercap);
1520 	else if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT ||
1521 		dev->dev_type == PCIE_TYPE_SWITCH_DNPORT)
1522 		p7ioc_switch_port_init(phb, dev, ecap, aercap);
1523 	else
1524 		p7ioc_endpoint_init(phb, dev, ecap, aercap);
1525 
1526 	return 0;
1527 }
1528 
p7ioc_pci_reinit(struct phb * phb,uint64_t scope,uint64_t data)1529 static int64_t p7ioc_pci_reinit(struct phb *phb,
1530 				uint64_t scope, uint64_t data)
1531 {
1532 	struct pci_device *pd;
1533 	uint16_t bdfn = data;
1534 	int ret;
1535 
1536 	if (scope != OPAL_REINIT_PCI_DEV)
1537 		return OPAL_PARAMETER;
1538 
1539 	pd = pci_find_dev(phb, bdfn);
1540 	if (!pd)
1541 		return OPAL_PARAMETER;
1542 
1543 	ret = p7ioc_device_init(phb, pd, NULL);
1544 	if (ret)
1545 		return OPAL_HARDWARE;
1546 
1547 	return OPAL_SUCCESS;
1548 }
1549 
p7ioc_choose_bus(struct phb * phb __unused,struct pci_device * bridge,uint8_t candidate,uint8_t * max_bus,bool * use_max)1550 static uint8_t p7ioc_choose_bus(struct phb *phb __unused,
1551 				struct pci_device *bridge,
1552 				uint8_t candidate, uint8_t *max_bus,
1553 				bool *use_max)
1554 {
1555 	uint8_t m, al;
1556 	int i;
1557 
1558 	/* Bus number selection is nasty on P7IOC. Our EEH HW can only cope
1559 	 * with bus ranges that are naturally aligned powers of two. It also
1560 	 * has "issues" with dealing with more than 32 bus numbers.
1561 	 *
1562 	 * On the other hand we can deal with overlaps to some extent as
1563 	 * the PELT-M entries are ordered.
1564 	 *
1565 	 * We also don't need to bother with the busses between the upstream
1566 	 * and downstream ports of switches.
1567 	 *
1568 	 * For now we apply this simple mechanism which matche what OFW does
1569 	 * under OPAL:
1570 	 *
1571 	 * - Top level bus (PHB to RC) is 0
1572 	 * - RC to first device is 1..ff
1573 	 * - Then going down, a switch gets (N = parent bus, M = parent max)
1574 	 *       * Upstream bridge is N+1, M, use_max = false
1575 	 *       * Downstream bridge is closest power of two from 32 down and
1576 	 *       * use max
1577 	 *
1578 	 * XXX NOTE: If we have access to HW VPDs, we could know whether
1579 	 * this is a bridge with a single device on it such as IPR and
1580 	 * limit ourselves to a single bus number.
1581 	 */
1582 
1583 	/* Default use_max is false (legacy) */
1584 	*use_max = false;
1585 
1586 	/* If we are the root complex or we are not in PCIe land anymore, just
1587 	 * use legacy algorithm
1588 	 */
1589 	if (!bridge || !pci_has_cap(bridge, PCI_CFG_CAP_ID_EXP, false))
1590 		return candidate;
1591 
1592 	/* Figure out the bridge type */
1593 	switch(bridge->dev_type) {
1594 	case PCIE_TYPE_PCIX_TO_PCIE:
1595 		/* PCI-X to PCIE ... hrm, let's not bother too much with that */
1596 		return candidate;
1597 	case PCIE_TYPE_SWITCH_UPPORT:
1598 	case PCIE_TYPE_ROOT_PORT:
1599 		/* Upstream port, we use legacy handling as well */
1600 		return candidate;
1601 	case PCIE_TYPE_SWITCH_DNPORT:
1602 	case PCIE_TYPE_PCIE_TO_PCIX:
1603 		/* That leaves us with the interesting cases that we handle */
1604 		break;
1605 	default:
1606 		/* Should not happen, treat as legacy */
1607 		prerror("PCI: Device %04x has unsupported type %d in choose_bus\n",
1608 			bridge->bdfn, bridge->dev_type);
1609 		return candidate;
1610 	}
1611 
1612 	/* Ok, let's find a power of two that fits, fallback to 1 */
1613 	for (i = 5; i >= 0; i--) {
1614 		m = (1 << i) - 1;
1615 		al = (candidate + m) & ~m;
1616 		if (al <= *max_bus && (al + m) <= *max_bus)
1617 			break;
1618 	}
1619 	if (i < 0)
1620 		return 0;
1621 	*use_max = true;
1622 	*max_bus = al + m;
1623 	return al;
1624 }
1625 
p7ioc_get_reserved_pe_number(struct phb * phb __unused)1626 static int64_t p7ioc_get_reserved_pe_number(struct phb *phb __unused)
1627 {
1628 	return 127;
1629 }
1630 
1631 /* p7ioc_phb_init_ioda_cache - Reset the IODA cache values
1632  */
p7ioc_phb_init_ioda_cache(struct p7ioc_phb * p)1633 static void p7ioc_phb_init_ioda_cache(struct p7ioc_phb *p)
1634 {
1635 	unsigned int i;
1636 
1637 	for (i = 0; i < 8; i++)
1638 		p->lxive_cache[i] = SETFIELD(IODA_XIVT_PRIORITY, 0ull, 0xff);
1639 	for (i = 0; i < 256; i++) {
1640 		p->mxive_cache[i] = SETFIELD(IODA_XIVT_PRIORITY, 0ull, 0xff);
1641 		p->mve_cache[i]   = 0;
1642 	}
1643 	for (i = 0; i < 16; i++)
1644 		p->m64b_cache[i] = 0;
1645 
1646 	/*
1647 	 * Since there is only one root port under the PHB,
1648 	 * We make all PELTM entries except last one to be
1649 	 * invalid by configuring their RID to 00:00.1. The
1650 	 * last entry is to encompass all RIDs.
1651 	 */
1652 	for (i = 0; i < 127; i++)
1653 		p->peltm_cache[i] = 0x0001f80000000000UL;
1654 	p->peltm_cache[127] = 0x0ul;
1655 
1656 	for (i = 0; i < 128; i++) {
1657 		p->peltv_lo_cache[i]	= 0;
1658 		p->peltv_hi_cache[i]	= 0;
1659 		p->tve_lo_cache[i]	= 0;
1660 		p->tve_hi_cache[i]	= 0;
1661 		p->iod_cache[i]		= 0;
1662 		p->m32d_cache[i]	= 0;
1663 		p->m64d_cache[i]	= 0;
1664 	}
1665 }
1666 
1667 /* p7ioc_phb_ioda_reset - Reset the IODA tables
1668  *
1669  * @purge: If true, the cache is cleared and the cleared values
1670  *         are applied to HW. If false, the cached values are
1671  *         applied to HW
1672  *
1673  * This reset the IODA tables in the PHB. It is called at
1674  * initialization time, on PHB reset, and can be called
1675  * explicitly from OPAL
1676  */
p7ioc_ioda_reset(struct phb * phb,bool purge)1677 static int64_t p7ioc_ioda_reset(struct phb *phb, bool purge)
1678 {
1679 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1680 	unsigned int i;
1681 	uint64_t reg64;
1682 	uint64_t data64, data64_hi;
1683 	uint8_t prio;
1684 	uint16_t server;
1685 	uint64_t m_server, m_prio;
1686 
1687 	/* If the "purge" argument is set, we clear the table cache */
1688 	if (purge)
1689 		p7ioc_phb_init_ioda_cache(p);
1690 
1691 	/* Init_18..19: Setup the HRT
1692 	 *
1693 	 * XXX NOTE: I still don't completely get that HRT business so
1694 	 * I'll just mimmic BML and put the PHB number + 1 in there
1695 	 */
1696 	p7ioc_phb_ioda_sel(p, IODA_TBL_HRT, 0, true);
1697 	out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
1698 	out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
1699 	out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
1700 	out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
1701 
1702 	/* Init_20..21: Cleanup the LXIVT
1703 	 *
1704 	 * We set the priority to FF (masked) and clear everything
1705 	 * else. That means we leave the HRT index to 0 which is
1706 	 * going to remain unmodified... for now.
1707 	 */
1708 	p7ioc_phb_ioda_sel(p, IODA_TBL_LXIVT, 0, true);
1709 	for (i = 0; i < 8; i++) {
1710 		data64 = p->lxive_cache[i];
1711 		server = GETFIELD(IODA_XIVT_SERVER, data64);
1712 		prio = GETFIELD(IODA_XIVT_PRIORITY, data64);
1713 
1714 		/* Now we mangle the server and priority */
1715 		if (prio == 0xff) {
1716 			m_server = 0;
1717 			m_prio = 0xff;
1718 		} else {
1719 			m_server = server >> 3;
1720 			m_prio = (prio >> 3) | ((server & 7) << 5);
1721 		}
1722 
1723 		data64 = SETFIELD(IODA_XIVT_SERVER,   data64, m_server);
1724 		data64 = SETFIELD(IODA_XIVT_PRIORITY, data64, m_prio);
1725 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1726 	}
1727 
1728 	/* Init_22..23: Cleanup the MXIVT
1729 	 *
1730 	 * We set the priority to FF (masked) and clear everything
1731 	 * else. That means we leave the HRT index to 0 which is
1732 	 * going to remain unmodified... for now.
1733 	 */
1734 	p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, 0, true);
1735 	for (i = 0; i < 256; i++) {
1736 		data64 = p->mxive_cache[i];
1737 		server = GETFIELD(IODA_XIVT_SERVER, data64);
1738 		prio = GETFIELD(IODA_XIVT_PRIORITY, data64);
1739 
1740 		/* Now we mangle the server and priority */
1741 		if (prio == 0xff) {
1742 			m_server = 0;
1743 			m_prio = 0xff;
1744 		} else {
1745 			m_server = server >> 3;
1746 			m_prio = (prio >> 3) | ((server & 7) << 5);
1747 		}
1748 
1749 		data64 = SETFIELD(IODA_XIVT_SERVER,   data64, m_server);
1750 		data64 = SETFIELD(IODA_XIVT_PRIORITY, data64, m_prio);
1751 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1752 	}
1753 
1754 	/* Init_24..25: Cleanup the MVT */
1755 	p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, 0, true);
1756 	for (i = 0; i < 256; i++) {
1757 		data64 = p->mve_cache[i];
1758 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1759 	}
1760 
1761 	/* Init_26..27: Cleanup the PELTM
1762 	 *
1763 	 * A completely clear PELTM should make everything match PE 0
1764 	 */
1765 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, 0, true);
1766 	for (i = 0; i < 127; i++) {
1767 		data64 = p->peltm_cache[i];
1768 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1769 	}
1770 
1771 	/* Init_28..30: Cleanup the PELTV */
1772 	p7ioc_phb_ioda_sel(p, IODA_TBL_PELTV, 0, true);
1773 	for (i = 0; i < 127; i++) {
1774 		data64 = p->peltv_lo_cache[i];
1775 		data64_hi = p->peltv_hi_cache[i];
1776 		out_be64(p->regs + PHB_IODA_DATA1, data64_hi);
1777 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1778 	}
1779 
1780 	/* Init_31..33: Cleanup the TVT */
1781 	p7ioc_phb_ioda_sel(p, IODA_TBL_TVT, 0, true);
1782 	for (i = 0; i < 127; i++) {
1783 		data64 = p->tve_lo_cache[i];
1784 		data64_hi = p->tve_hi_cache[i];
1785 		out_be64(p->regs + PHB_IODA_DATA1, data64_hi);
1786 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1787 	}
1788 
1789 	/* Init_34..35: Cleanup the M64BT
1790 	 *
1791 	 * We don't enable M64 BARs by default. However,
1792 	 * we shouldn't purge the hw and cache for it in
1793 	 * future.
1794 	 */
1795 	p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, 0, true);
1796 	for (i = 0; i < 16; i++)
1797 		out_be64(p->regs + PHB_IODA_DATA0, 0);
1798 
1799 	/* Init_36..37: Cleanup the IODT */
1800 	p7ioc_phb_ioda_sel(p, IODA_TBL_IODT, 0, true);
1801 	for (i = 0; i < 127; i++) {
1802 		data64 = p->iod_cache[i];
1803 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1804 	}
1805 
1806 	/* Init_38..39: Cleanup the M32DT */
1807 	p7ioc_phb_ioda_sel(p, IODA_TBL_M32DT, 0, true);
1808 	for (i = 0; i < 127; i++) {
1809 		data64 = p->m32d_cache[i];
1810 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1811 	}
1812 
1813 	/* Init_40..41: Cleanup the M64DT */
1814 	p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, 0, true);
1815 	for (i = 0; i < 16; i++) {
1816 		data64 = p->m64b_cache[i];
1817 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1818 	}
1819 
1820 	p7ioc_phb_ioda_sel(p, IODA_TBL_M64DT, 0, true);
1821 	for (i = 0; i < 127; i++) {
1822 		data64 = p->m64d_cache[i];
1823 		out_be64(p->regs + PHB_IODA_DATA0, data64);
1824 	}
1825 
1826 	/* Clear up the TCE cache */
1827 	reg64 = in_be64(p->regs + PHB_PHB2_CONFIG);
1828 	reg64 &= ~PHB_PHB2C_64B_TCE_EN;
1829 	out_be64(p->regs + PHB_PHB2_CONFIG, reg64);
1830 	reg64 |= PHB_PHB2C_64B_TCE_EN;
1831 	out_be64(p->regs + PHB_PHB2_CONFIG, reg64);
1832 	in_be64(p->regs + PHB_PHB2_CONFIG);
1833 
1834 	/* Clear PEST & PEEV */
1835 	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
1836 		uint64_t pesta, pestb;
1837 
1838 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, i, false);
1839 		pesta = in_be64(p->regs + PHB_IODA_DATA0);
1840 		out_be64(p->regs + PHB_IODA_DATA0, 0);
1841 		p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, i, false);
1842 		pestb = in_be64(p->regs + PHB_IODA_DATA0);
1843 		out_be64(p->regs + PHB_IODA_DATA0, 0);
1844 
1845 		if ((pesta & IODA_PESTA_MMIO_FROZEN) ||
1846 		    (pestb & IODA_PESTB_DMA_STOPPED))
1847 			PHBDBG(p, "Frozen PE#%x (%s - %s)\n",
1848 			       i, (pestb & IODA_PESTB_DMA_STOPPED) ? "DMA" : "",
1849 			       (pesta & IODA_PESTA_MMIO_FROZEN) ? "MMIO" : "");
1850 	}
1851 
1852 	p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
1853 	for (i = 0; i < 2; i++)
1854 		out_be64(p->regs + PHB_IODA_DATA0, 0);
1855 
1856 	return OPAL_SUCCESS;
1857 }
1858 
1859 /*
1860  * Clear anything we have in PAPR Error Injection registers. Though
1861  * the spec says the PAPR error injection should be one-shot without
1862  * the "sticky" bit. However, that's false according to the experiments
1863  * I had. So we have to clear it at appropriate point in kernel to
1864  * avoid endless frozen PE.
1865  */
p7ioc_papr_errinjct_reset(struct phb * phb)1866 static int64_t p7ioc_papr_errinjct_reset(struct phb *phb)
1867 {
1868 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
1869 
1870 	out_be64(p->regs + PHB_PAPR_ERR_INJ_CTL, 0x0ul);
1871 	out_be64(p->regs + PHB_PAPR_ERR_INJ_ADDR, 0x0ul);
1872 	out_be64(p->regs + PHB_PAPR_ERR_INJ_MASK, 0x0ul);
1873 
1874 	return OPAL_SUCCESS;
1875 }
1876 
p7ioc_get_presence_state(struct pci_slot * slot,uint8_t * val)1877 static int64_t p7ioc_get_presence_state(struct pci_slot *slot, uint8_t *val)
1878 {
1879 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
1880 	uint64_t reg;
1881 
1882 	reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
1883 	if (reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)
1884 		*val = OPAL_PCI_SLOT_PRESENT;
1885 	else
1886 		*val = OPAL_PCI_SLOT_EMPTY;
1887 
1888 	return OPAL_SUCCESS;
1889 }
1890 
p7ioc_get_link_state(struct pci_slot * slot,uint8_t * val)1891 static int64_t p7ioc_get_link_state(struct pci_slot *slot, uint8_t *val)
1892 {
1893 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
1894 	uint64_t reg64;
1895 	uint16_t state;
1896 	int64_t rc;
1897 
1898 	/* Check if the link training is completed */
1899 	reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
1900 	if (!(reg64 & PHB_PCIE_DLP_TC_DL_LINKACT)) {
1901 		*val = 0;
1902 		return OPAL_SUCCESS;
1903 	}
1904 
1905 	/* Grab link width from PCIe capability */
1906 	rc = p7ioc_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_LSTAT,
1907 				 &state);
1908 	if (rc < 0) {
1909 		PHBERR(p, "%s: Error %lld reading link status\n",
1910 		       __func__, rc);
1911 		return OPAL_HARDWARE;
1912 	}
1913 
1914 	if (state & PCICAP_EXP_LSTAT_DLLL_ACT)
1915 		*val = ((state & PCICAP_EXP_LSTAT_WIDTH) >> 4);
1916 	else
1917 		*val = 0;
1918 
1919 	return OPAL_SUCCESS;
1920 }
1921 
p7ioc_get_power_state(struct pci_slot * slot,uint8_t * val)1922 static int64_t p7ioc_get_power_state(struct pci_slot *slot, uint8_t *val)
1923 {
1924 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
1925 	uint64_t reg64;
1926 
1927 	reg64 = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
1928 	if (reg64 & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
1929 		*val = PCI_SLOT_POWER_ON;
1930 	else
1931 		*val = PCI_SLOT_POWER_OFF;
1932 
1933 	return OPAL_SUCCESS;
1934 }
1935 
p7ioc_set_power_state(struct pci_slot * slot,uint8_t val)1936 static int64_t p7ioc_set_power_state(struct pci_slot *slot, uint8_t val)
1937 {
1938 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
1939 	uint64_t reg64;
1940 	uint8_t state = PCI_SLOT_POWER_OFF;
1941 
1942 	if (val != PCI_SLOT_POWER_OFF && val != PCI_SLOT_POWER_ON)
1943 		return OPAL_PARAMETER;
1944 
1945 	/* If the power state has been put into the requested one */
1946 	reg64 = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
1947 	if (reg64 & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
1948 		state = PCI_SLOT_POWER_ON;
1949 	if (state == val)
1950 		return OPAL_SUCCESS;
1951 
1952 	/* Power on/off */
1953 	if (val == PCI_SLOT_POWER_ON) {
1954 		reg64 &= ~(0x8c00000000000000ul);
1955 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
1956 		reg64 |= 0x8400000000000000ul;
1957 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
1958 	} else {
1959 		reg64 &= ~(0x8c00000000000000ul);
1960 		reg64 |= 0x8400000000000000ul;
1961 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
1962 		reg64 &= ~(0x8c00000000000000ul);
1963 		reg64 |= 0x0c00000000000000ul;
1964 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
1965 	}
1966 
1967 	return OPAL_SUCCESS;
1968 }
1969 
p7ioc_prepare_link_change(struct pci_slot * slot,bool up)1970 static void p7ioc_prepare_link_change(struct pci_slot *slot, bool up)
1971 {
1972 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
1973 	uint64_t ci_idx = p->index + 2;
1974 	uint32_t cfg32;
1975 
1976 	if (!up) {
1977 		/* Mask PCIE port interrupts and AER receiver error */
1978 		out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000);
1979 		p7ioc_pcicfg_read32(&p->phb, 0,
1980 				    p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
1981 		cfg32 |= PCIECAP_AER_CE_RECVR_ERR;
1982 		p7ioc_pcicfg_write32(&p->phb, 0,
1983 				     p->aercap + PCIECAP_AER_CE_MASK, cfg32);
1984 
1985 		/* Mask CI port error and clear it */
1986 		out_be64(p->ioc->regs + P7IOC_CIn_LEM_ERR_MASK(ci_idx),
1987 			 0xa4f4000000000000ul);
1988 		out_be64(p->regs + PHB_LEM_ERROR_MASK,
1989 			 0xadb650c9808dd051ul);
1990 		out_be64(p->ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx),
1991 			 0x0ul);
1992 
1993 		/* Block access to PCI-CFG space */
1994 		p->flags |= P7IOC_PHB_CFG_BLOCKED;
1995 	} else {
1996 		/* Clear spurious errors and enable PCIE port interrupts */
1997 		out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00E0000000000000);
1998 		out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xFE65000000000000);
1999 
2000 		/* Clear AER receiver error status */
2001 		p7ioc_pcicfg_write32(&p->phb, 0,
2002 				     p->aercap + PCIECAP_AER_CE_STATUS,
2003 				     PCIECAP_AER_CE_RECVR_ERR);
2004 		/* Unmask receiver error status in AER */
2005 		p7ioc_pcicfg_read32(&p->phb, 0,
2006 				    p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
2007 		cfg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
2008 		p7ioc_pcicfg_write32(&p->phb, 0,
2009 				     p->aercap + PCIECAP_AER_CE_MASK, cfg32);
2010 		/* Clear and Unmask CI port and PHB errors */
2011 		out_be64(p->ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx), 0x0ul);
2012 		out_be64(p->regs + PHB_LEM_FIR_ACCUM, 0x0ul);
2013 		out_be64(p->ioc->regs + P7IOC_CIn_LEM_ERR_MASK_AND(ci_idx),
2014 			 0x0ul);
2015 		out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x1249a1147f500f2cul);
2016 
2017 		/* Don't block access to PCI-CFG space */
2018 		p->flags &= ~P7IOC_PHB_CFG_BLOCKED;
2019 
2020 		/* Restore slot's state */
2021 		pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2022 
2023 		/*
2024 		 * We might lose the bus numbers in the reset and we need
2025 		 * restore the bus numbers. Otherwise, some adpaters (e.g.
2026 		 * IPR) can't be probed properly by kernel. We don't need
2027 		 * restore bus numbers for all kinds of resets. However,
2028 		 * it's not harmful to restore the bus numbers, which makes
2029 		 * the logic simplified
2030 		 */
2031 		pci_restore_bridge_buses(slot->phb, slot->pd);
2032 		if (slot->phb->ops->device_init)
2033 			pci_walk_dev(slot->phb, slot->pd,
2034 				     slot->phb->ops->device_init, NULL);
2035 	}
2036 }
2037 
p7ioc_poll_link(struct pci_slot * slot)2038 static int64_t p7ioc_poll_link(struct pci_slot *slot)
2039 {
2040 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
2041 	uint64_t reg64;
2042 
2043 	switch (slot->state) {
2044 	case P7IOC_SLOT_NORMAL:
2045 	case P7IOC_SLOT_LINK_START:
2046 		PHBDBG(p, "LINK: Start polling\n");
2047 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2048 		reg64 &= ~PHB_PCIE_DLP_TCTX_DISABLE;
2049 		out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg64);
2050 		slot->retries = 100;
2051 		pci_slot_set_state(slot, P7IOC_SLOT_LINK_WAIT);
2052 		return pci_slot_set_sm_timeout(slot, msecs_to_tb(10));
2053 	case P7IOC_SLOT_LINK_WAIT:
2054 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2055 		if (reg64 & PHB_PCIE_DLP_TC_DL_LINKACT) {
2056 			PHBDBG(p, "LINK: Up\n");
2057 			slot->ops.prepare_link_change(slot, true);
2058 			return OPAL_SUCCESS;
2059 		}
2060 
2061 		if (slot->retries-- == 0) {
2062 			PHBERR(p, "LINK: Timeout waiting for link up\n");
2063 			goto out;
2064 		}
2065 		return pci_slot_set_sm_timeout(slot, msecs_to_tb(10));
2066 	default:
2067 		PHBERR(p, "LINK: Unexpected slot state %08x\n",
2068 		       slot->state);
2069 	}
2070 
2071 out:
2072 	pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2073 	return OPAL_HARDWARE;
2074 }
2075 
p7ioc_hreset(struct pci_slot * slot)2076 static int64_t p7ioc_hreset(struct pci_slot *slot)
2077 {
2078 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
2079 	uint8_t presence = 1;
2080 	uint16_t brctl;
2081 	uint64_t reg64;
2082 
2083 	switch (slot->state) {
2084 	case P7IOC_SLOT_NORMAL:
2085 		PHBDBG(p, "HRESET: Starts\n");
2086 		if (slot->ops.get_presence_state)
2087 			slot->ops.get_presence_state(slot, &presence);
2088 		if (!presence) {
2089 			PHBDBG(p, "HRESET: No device\n");
2090 			return OPAL_SUCCESS;
2091 		}
2092 
2093 		PHBDBG(p, "HRESET: Prepare for link down\n");
2094 		slot->ops.prepare_link_change(slot, false);
2095 
2096 		/* Disable link to avoid training issues */
2097 		PHBDBG(p, "HRESET: Disable link training\n");
2098 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2099 		reg64 |= PHB_PCIE_DLP_TCTX_DISABLE;
2100 		out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg64);
2101 		pci_slot_set_state(slot, P7IOC_SLOT_HRESET_TRAINING);
2102 		slot->retries = 15;
2103 		/* fall through */
2104 	case P7IOC_SLOT_HRESET_TRAINING:
2105 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2106 		if (!(reg64 & PHB_PCIE_DLP_TCRX_DISABLED)) {
2107 			if (slot->retries -- == 0) {
2108 				PHBERR(p, "HRESET: Timeout disabling link training\n");
2109 				goto out;
2110 			}
2111 
2112 			return pci_slot_set_sm_timeout(slot, msecs_to_tb(10));
2113 		}
2114 		/* fall through */
2115 	case P7IOC_SLOT_HRESET_START:
2116 		PHBDBG(p, "HRESET: Assert\n");
2117 		p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
2118 		brctl |= PCI_CFG_BRCTL_SECONDARY_RESET;
2119 		p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
2120 
2121 		pci_slot_set_state(slot, P7IOC_SLOT_HRESET_DELAY);
2122 		return pci_slot_set_sm_timeout(slot, secs_to_tb(1));
2123 	case P7IOC_SLOT_HRESET_DELAY:
2124 		PHBDBG(p, "HRESET: Deassert\n");
2125 		p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
2126 		brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
2127 		p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
2128 		pci_slot_set_state(slot, P7IOC_SLOT_HRESET_DELAY2);
2129 		return pci_slot_set_sm_timeout(slot, msecs_to_tb(200));
2130 	case P7IOC_SLOT_HRESET_DELAY2:
2131 		pci_slot_set_state(slot, P7IOC_SLOT_LINK_START);
2132 		return slot->ops.poll_link(slot);
2133 	default:
2134 		PHBERR(p, "HRESET: Unexpected slot state %08x\n",
2135 		       slot->state);
2136 	}
2137 
2138 out:
2139 	pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2140 	return OPAL_HARDWARE;
2141 }
2142 
p7ioc_freset(struct pci_slot * slot)2143 static int64_t p7ioc_freset(struct pci_slot *slot)
2144 {
2145 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
2146 	uint8_t presence = 1;
2147 	uint64_t reg64;
2148 
2149 	switch (slot->state) {
2150 	case P7IOC_SLOT_NORMAL:
2151 	case P7IOC_SLOT_FRESET_START:
2152 		PHBDBG(p, "FRESET: Starts\n");
2153 		if (slot->ops.get_presence_state)
2154 			slot->ops.get_presence_state(slot, &presence);
2155 		if (!presence) {
2156 			PHBDBG(p, "FRESET: No device\n");
2157 			pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2158 			return OPAL_SUCCESS;
2159 		}
2160 
2161 		PHBDBG(p, "FRESET: Prepare for link down\n");
2162 		slot->ops.prepare_link_change(slot, false);
2163 
2164 		/* Check power state */
2165 		reg64 = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
2166 		if (reg64 & PHB_PCIE_SLOTCTL2_PWR_EN_STAT) {
2167 			PHBDBG(p, "FRESET: Power on, turn off\n");
2168 			reg64 = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
2169 			reg64 &= ~(0x8c00000000000000ul);
2170 			reg64 |= 0x8400000000000000ul;
2171 			out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
2172 			reg64 &= ~(0x8c00000000000000ul);
2173 			reg64 |= 0x0c00000000000000ul;
2174 			out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
2175 			pci_slot_set_state(slot, P7IOC_SLOT_FRESET_POWER_OFF);
2176 			return pci_slot_set_sm_timeout(slot, secs_to_tb(2));
2177 		}
2178 		/* fall through */
2179 	case P7IOC_SLOT_FRESET_POWER_OFF:
2180 		PHBDBG(p, "FRESET: Power off, turn on\n");
2181 		reg64 = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
2182 		reg64 &= ~(0x8c00000000000000ul);
2183 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
2184 		reg64 |= 0x8400000000000000ul;
2185 		out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg64);
2186 		pci_slot_set_state(slot, P7IOC_SLOT_FRESET_POWER_ON);
2187 		return pci_slot_set_sm_timeout(slot, secs_to_tb(2));
2188 	case P7IOC_SLOT_FRESET_POWER_ON:
2189 		PHBDBG(p, "FRESET: Disable link training\n");
2190 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2191 		reg64 |= PHB_PCIE_DLP_TCTX_DISABLE;
2192 		out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg64);
2193 		pci_slot_set_state(slot, P7IOC_SLOT_HRESET_TRAINING);
2194 		slot->retries = 200;
2195 		/* fall through */
2196 	case P7IOC_SLOT_HRESET_TRAINING:
2197 		reg64 = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2198 		if (!(reg64 & PHB_PCIE_DLP_TCRX_DISABLED)) {
2199 			if (slot->retries -- == 0) {
2200 				PHBERR(p, "HRESET: Timeout disabling link training\n");
2201 				goto out;
2202 			}
2203 
2204 			return pci_slot_set_sm_timeout(slot, msecs_to_tb(10));
2205 		}
2206 
2207 		PHBDBG(p, "FRESET: Assert\n");
2208 		reg64 = in_be64(p->regs + PHB_RESET);
2209 		reg64 &= ~0x2000000000000000ul;
2210 		out_be64(p->regs + PHB_RESET, reg64);
2211 		pci_slot_set_state(slot, P7IOC_SLOT_FRESET_ASSERT);
2212 		return pci_slot_set_sm_timeout(slot, secs_to_tb(1));
2213 	case P7IOC_SLOT_FRESET_ASSERT:
2214 		PHBDBG(p, "FRESET: Deassert\n");
2215 		reg64 = in_be64(p->regs + PHB_RESET);
2216 		reg64 |= 0x2000000000000000ul;
2217 		out_be64(p->regs + PHB_RESET, reg64);
2218 
2219 		pci_slot_set_state(slot, P7IOC_SLOT_LINK_START);
2220 		return slot->ops.poll_link(slot);
2221 	default:
2222 		PHBERR(p, "FRESET: Unexpected slot state %08x\n",
2223 		       slot->state);
2224 	}
2225 
2226 out:
2227 	pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2228 	return OPAL_HARDWARE;
2229 }
2230 
p7ioc_creset(struct pci_slot * slot)2231 static int64_t p7ioc_creset(struct pci_slot *slot)
2232 {
2233 	struct p7ioc_phb *p = phb_to_p7ioc_phb(slot->phb);
2234 	struct p7ioc *ioc = p->ioc;
2235 	uint64_t reg64;
2236 
2237 	switch (slot->state) {
2238 	case P7IOC_SLOT_NORMAL:
2239 		PHBDBG(p, "CRESET: Starts\n");
2240 		p->flags |= P7IOC_PHB_CFG_BLOCKED;
2241 		p7ioc_phb_reset(slot->phb);
2242 
2243 		/*
2244 		 * According to the experiment, we probably still have the
2245 		 * fenced state with the corresponding PHB in the Fence WOF
2246 		 * and we need clear that explicitly. Besides, the RGC might
2247 		 * already have informational error and we should clear that
2248 		 * explicitly as well. Otherwise, RGC XIVE#0 won't issue
2249 		 * interrupt any more.
2250 		 */
2251 		reg64 = in_be64(ioc->regs + P7IOC_CHIP_FENCE_WOF);
2252 		reg64 &= ~PPC_BIT(15 + p->index * 4);
2253 		out_be64(ioc->regs + P7IOC_CHIP_FENCE_WOF, reg64);
2254 
2255 		/* Clear informational error from RGC */
2256 		reg64 = in_be64(ioc->regs + P7IOC_RGC_LEM_BASE +
2257 				P7IOC_LEM_WOF_OFFSET);
2258 		reg64 &= ~PPC_BIT(18);
2259 		out_be64(ioc->regs + P7IOC_RGC_LEM_BASE +
2260 			 P7IOC_LEM_WOF_OFFSET, reg64);
2261 		reg64 = in_be64(ioc->regs + P7IOC_RGC_LEM_BASE +
2262 				P7IOC_LEM_FIR_OFFSET);
2263 		reg64 &= ~PPC_BIT(18);
2264 		out_be64(ioc->regs + P7IOC_RGC_LEM_BASE +
2265 			 P7IOC_LEM_FIR_OFFSET, reg64);
2266 
2267 		/* Swith to fundamental reset */
2268 		pci_slot_set_state(slot, P7IOC_SLOT_FRESET_START);
2269 		return slot->ops.freset(slot);
2270 	default:
2271 		PHBERR(p, "CRESET: Unexpected slot state %08x\n",
2272 		       slot->state);
2273 	}
2274 
2275 	pci_slot_set_state(slot, P7IOC_SLOT_NORMAL);
2276 	return OPAL_HARDWARE;
2277 }
2278 
p7ioc_phb_slot_create(struct phb * phb)2279 static struct pci_slot *p7ioc_phb_slot_create(struct phb *phb)
2280 {
2281 	struct pci_slot *slot;
2282 
2283 	slot = pci_slot_alloc(phb, NULL);
2284 	if (!slot)
2285 		return NULL;
2286 
2287 	/* Elementary functions */
2288 	slot->ops.get_presence_state   = p7ioc_get_presence_state;
2289 	slot->ops.get_link_state       = p7ioc_get_link_state;
2290 	slot->ops.get_power_state      = p7ioc_get_power_state;
2291 	slot->ops.get_attention_state  = NULL;
2292 	slot->ops.get_latch_state      = NULL;
2293 	slot->ops.set_power_state      = p7ioc_set_power_state;
2294 	slot->ops.set_attention_state  = NULL;
2295 
2296 	/*
2297 	 * For PHB slots, we have to split the fundamental reset
2298 	 * into 2 steps. We might not have the first step which
2299 	 * is to power off/on the slot, or it's controlled by
2300 	 * individual platforms.
2301 	 */
2302 	slot->ops.prepare_link_change  = p7ioc_prepare_link_change;
2303 	slot->ops.poll_link            = p7ioc_poll_link;
2304 	slot->ops.hreset               = p7ioc_hreset;
2305 	slot->ops.freset               = p7ioc_freset;
2306 	slot->ops.creset               = p7ioc_creset;
2307 
2308 	return slot;
2309 }
2310 
2311 static const struct phb_ops p7ioc_phb_ops = {
2312 	.cfg_read8		= p7ioc_pcicfg_read8,
2313 	.cfg_read16		= p7ioc_pcicfg_read16,
2314 	.cfg_read32		= p7ioc_pcicfg_read32,
2315 	.cfg_write8		= p7ioc_pcicfg_write8,
2316 	.cfg_write16		= p7ioc_pcicfg_write16,
2317 	.cfg_write32		= p7ioc_pcicfg_write32,
2318 	.choose_bus		= p7ioc_choose_bus,
2319 	.get_reserved_pe_number	= p7ioc_get_reserved_pe_number,
2320 	.device_init		= p7ioc_device_init,
2321 	.device_remove		= NULL,
2322 	.pci_reinit		= p7ioc_pci_reinit,
2323 	.eeh_freeze_status	= p7ioc_eeh_freeze_status,
2324 	.eeh_freeze_clear	= p7ioc_eeh_freeze_clear,
2325 	.eeh_freeze_set		= p7ioc_eeh_freeze_set,
2326 	.err_inject		= p7ioc_err_inject,
2327 	.get_diag_data		= NULL,
2328 	.get_diag_data2		= p7ioc_get_diag_data,
2329 	.next_error		= p7ioc_eeh_next_error,
2330 	.phb_mmio_enable	= p7ioc_phb_mmio_enable,
2331 	.set_phb_mem_window	= p7ioc_set_phb_mem_window,
2332 	.map_pe_mmio_window	= p7ioc_map_pe_mmio_window,
2333 	.set_pe			= p7ioc_set_pe,
2334 	.set_peltv		= p7ioc_set_peltv,
2335 	.map_pe_dma_window	= p7ioc_map_pe_dma_window,
2336 	.map_pe_dma_window_real	= p7ioc_map_pe_dma_window_real,
2337 	.set_mve		= p7ioc_set_mve,
2338 	.set_mve_enable		= p7ioc_set_mve_enable,
2339 	.set_xive_pe		= p7ioc_set_xive_pe,
2340 	.get_xive_source	= p7ioc_get_xive_source,
2341 	.get_msi_32		= p7ioc_get_msi_32,
2342 	.get_msi_64		= p7ioc_get_msi_64,
2343 	.ioda_reset		= p7ioc_ioda_reset,
2344 	.papr_errinjct_reset	= p7ioc_papr_errinjct_reset,
2345 };
2346 
2347 /* p7ioc_phb_get_xive - Interrupt control from OPAL */
p7ioc_msi_get_xive(struct irq_source * is,uint32_t isn,uint16_t * server,uint8_t * prio)2348 static int64_t p7ioc_msi_get_xive(struct irq_source *is, uint32_t isn,
2349 				  uint16_t *server, uint8_t *prio)
2350 {
2351 	struct p7ioc_phb *p = is->data;
2352 	uint32_t irq, fbuid = P7_IRQ_FBUID(isn);
2353 	uint64_t xive;
2354 
2355 	if (fbuid < p->buid_msi || fbuid >= (p->buid_msi + 0x10))
2356 		return OPAL_PARAMETER;
2357 
2358 	irq = isn & 0xff;
2359 	xive = p->mxive_cache[irq];
2360 
2361 	*server = GETFIELD(IODA_XIVT_SERVER, xive);
2362 	*prio = GETFIELD(IODA_XIVT_PRIORITY, xive);
2363 
2364 	return OPAL_SUCCESS;
2365 }
2366 
2367 /* p7ioc_phb_set_xive - Interrupt control from OPAL */
p7ioc_msi_set_xive(struct irq_source * is,uint32_t isn,uint16_t server,uint8_t prio)2368 static int64_t p7ioc_msi_set_xive(struct irq_source *is, uint32_t isn,
2369 				  uint16_t server, uint8_t prio)
2370 {
2371 	struct p7ioc_phb *p = is->data;
2372 	uint32_t irq, fbuid = P7_IRQ_FBUID(isn);
2373 	uint64_t xive, m_server, m_prio;
2374 
2375 	if (fbuid < p->buid_msi || fbuid >= (p->buid_msi + 0x10))
2376 		return OPAL_PARAMETER;
2377 
2378 	/* We cache the arguments because we have to mangle
2379 	 * it in order to hijack 3 bits of priority to extend
2380 	 * the server number
2381 	 */
2382 	irq = isn & 0xff;
2383 	xive = p->mxive_cache[irq];
2384 	xive = SETFIELD(IODA_XIVT_SERVER, xive, server);
2385 	xive = SETFIELD(IODA_XIVT_PRIORITY, xive, prio);
2386 	p->mxive_cache[irq] = xive;
2387 
2388 	/* Now we mangle the server and priority */
2389 	if (prio == 0xff) {
2390 		m_server = 0;
2391 		m_prio = 0xff;
2392 	} else {
2393 		m_server = server >> 3;
2394 		m_prio = (prio >> 3) | ((server & 7) << 5);
2395 	}
2396 
2397 	/* We use HRT entry 0 always for now */
2398 	p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, irq, false);
2399 	xive = in_be64(p->regs + PHB_IODA_DATA0);
2400 	xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
2401 	xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
2402 	out_be64(p->regs + PHB_IODA_DATA0, xive);
2403 
2404 	return OPAL_SUCCESS;
2405 }
2406 
2407 /* p7ioc_phb_get_xive - Interrupt control from OPAL */
p7ioc_lsi_get_xive(struct irq_source * is,uint32_t isn,uint16_t * server,uint8_t * prio)2408 static int64_t p7ioc_lsi_get_xive(struct irq_source *is, uint32_t isn,
2409 				  uint16_t *server, uint8_t *prio)
2410 {
2411 	struct p7ioc_phb *p = is->data;
2412 	uint32_t irq = (isn & 0x7);
2413 	uint32_t fbuid = P7_IRQ_FBUID(isn);
2414 	uint64_t xive;
2415 
2416 	if (fbuid != p->buid_lsi)
2417 		return OPAL_PARAMETER;
2418 
2419 	xive = p->lxive_cache[irq];
2420 	*server = GETFIELD(IODA_XIVT_SERVER, xive);
2421 	*prio = GETFIELD(IODA_XIVT_PRIORITY, xive);
2422 
2423 	return OPAL_SUCCESS;
2424 }
2425 
2426 /* p7ioc_phb_set_xive - Interrupt control from OPAL */
p7ioc_lsi_set_xive(struct irq_source * is,uint32_t isn,uint16_t server,uint8_t prio)2427 static int64_t p7ioc_lsi_set_xive(struct irq_source *is, uint32_t isn,
2428 				  uint16_t server, uint8_t prio)
2429 {
2430 	struct p7ioc_phb *p = is->data;
2431 	uint32_t irq = (isn & 0x7);
2432 	uint32_t fbuid = P7_IRQ_FBUID(isn);
2433 	uint64_t xive, m_server, m_prio;
2434 
2435 	if (fbuid != p->buid_lsi)
2436 		return OPAL_PARAMETER;
2437 
2438 	xive = SETFIELD(IODA_XIVT_SERVER, 0ull, server);
2439 	xive = SETFIELD(IODA_XIVT_PRIORITY, xive, prio);
2440 
2441 	/*
2442 	 * We cache the arguments because we have to mangle
2443 	 * it in order to hijack 3 bits of priority to extend
2444 	 * the server number
2445 	 */
2446 	p->lxive_cache[irq] = xive;
2447 
2448 	/* Now we mangle the server and priority */
2449 	if (prio == 0xff) {
2450 		m_server = 0;
2451 		m_prio = 0xff;
2452 	} else {
2453 		m_server = server >> 3;
2454 		m_prio = (prio >> 3) | ((server & 7) << 5);
2455 	}
2456 
2457 	/* We use HRT entry 0 always for now */
2458 	p7ioc_phb_ioda_sel(p, IODA_TBL_LXIVT, irq, false);
2459 	xive = in_be64(p->regs + PHB_IODA_DATA0);
2460 	xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
2461 	xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
2462 	out_be64(p->regs + PHB_IODA_DATA0, xive);
2463 
2464 	return OPAL_SUCCESS;
2465 }
2466 
p7ioc_phb_err_interrupt(struct irq_source * is,uint32_t isn)2467 static void p7ioc_phb_err_interrupt(struct irq_source *is, uint32_t isn)
2468 {
2469 	struct p7ioc_phb *p = is->data;
2470 	uint64_t peev0, peev1;
2471 
2472 	PHBDBG(p, "Got interrupt 0x%04x\n", isn);
2473 
2474 	opal_pci_eeh_set_evt(p->phb.opal_id);
2475 
2476 	/* If the PHB is broken, go away */
2477 	if (p->state == P7IOC_PHB_STATE_BROKEN)
2478 		return;
2479 
2480 	/*
2481 	 * Check if there's an error pending and update PHB fence
2482 	 * state and return, the ER error is drowned at this point
2483 	 */
2484 	phb_lock(&p->phb);
2485 	if (p7ioc_phb_fenced(p)) {
2486 		p->state = P7IOC_PHB_STATE_FENCED;
2487 		PHBERR(p, "ER error ignored, PHB fenced\n");
2488 		phb_unlock(&p->phb);
2489 		return;
2490 	}
2491 
2492 	/*
2493 	 * If we already had pending errors, which might be
2494 	 * moved from IOC, then we needn't check PEEV to avoid
2495 	 * overwriting the errors from IOC.
2496 	 */
2497 	if (!p7ioc_phb_err_pending(p)) {
2498 		phb_unlock(&p->phb);
2499 		return;
2500 	}
2501 
2502 	/*
2503 	 * We don't have pending errors from IOC, it's safe
2504 	 * to check PEEV for frozen PEs.
2505 	 */
2506 	p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
2507 	peev0 = in_be64(p->regs + PHB_IODA_DATA0);
2508 	peev1 = in_be64(p->regs + PHB_IODA_DATA0);
2509 	if (peev0 || peev1) {
2510 		p->err.err_src   = P7IOC_ERR_SRC_PHB0 + p->index;
2511 		p->err.err_class = P7IOC_ERR_CLASS_ER;
2512 		p->err.err_bit   = 0;
2513 		p7ioc_phb_set_err_pending(p, true);
2514 	}
2515 	phb_unlock(&p->phb);
2516 }
2517 
p7ioc_lsi_attributes(struct irq_source * is __unused,uint32_t isn)2518 static uint64_t p7ioc_lsi_attributes(struct irq_source *is __unused,
2519 				     uint32_t isn)
2520 {
2521 	uint32_t irq = (isn & 0x7);
2522 
2523 	if (irq == PHB_LSI_PCIE_ERROR)
2524 		return IRQ_ATTR_TARGET_OPAL | IRQ_ATTR_TARGET_RARE;
2525 	return IRQ_ATTR_TARGET_LINUX;
2526 }
2527 
2528 
2529 /* MSIs (OS owned) */
2530 static const struct irq_source_ops p7ioc_msi_irq_ops = {
2531 	.get_xive = p7ioc_msi_get_xive,
2532 	.set_xive = p7ioc_msi_set_xive,
2533 };
2534 
2535 /* LSIs (OS owned) */
2536 static const struct irq_source_ops p7ioc_lsi_irq_ops = {
2537 	.get_xive = p7ioc_lsi_get_xive,
2538 	.set_xive = p7ioc_lsi_set_xive,
2539 	.attributes = p7ioc_lsi_attributes,
2540 	.interrupt = p7ioc_phb_err_interrupt,
2541 };
2542 
p7ioc_pcie_add_node(struct p7ioc_phb * p)2543 static void p7ioc_pcie_add_node(struct p7ioc_phb *p)
2544 {
2545 
2546 	uint64_t reg[2], iob, m32b, m64b, tkill;
2547 	uint32_t lsibase, icsp = get_ics_phandle();
2548 	struct dt_node *np;
2549 
2550 	reg[0] = cleanup_addr((uint64_t)p->regs);
2551 	reg[1] = 0x100000;
2552 
2553 	np = dt_new_addr(p->ioc->dt_node, "pciex", reg[0]);
2554 	if (!np)
2555 		return;
2556 
2557 	p->phb.dt_node = np;
2558 	dt_add_property_strings(np, "compatible", "ibm,p7ioc-pciex",
2559 				"ibm,ioda-phb");
2560 	dt_add_property_strings(np, "device_type", "pciex");
2561 	dt_add_property(np, "reg", reg, sizeof(reg));
2562 	dt_add_property_cells(np, "#address-cells", 3);
2563 	dt_add_property_cells(np, "#size-cells", 2);
2564 	dt_add_property_cells(np, "#interrupt-cells", 1);
2565 	dt_add_property_cells(np, "bus-range", 0, 0xff);
2566 	dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
2567 	dt_add_property_cells(np, "interrupt-parent", icsp);
2568 	/* XXX FIXME: add slot-name */
2569 	//dt_property_cell("bus-width", 8); /* Figure it out from VPD ? */
2570 
2571 	/* "ranges", we only expose IO and M32
2572 	 *
2573 	 * Note: The kernel expects us to have chopped of 64k from the
2574 	 * M32 size (for the 32-bit MSIs). If we don't do that, it will
2575 	 * get confused (OPAL does it)
2576 	 */
2577 	iob = cleanup_addr(p->io_base);
2578 	m32b = cleanup_addr(p->m32_base + M32_PCI_START);
2579 	dt_add_property_cells(np, "ranges",
2580 			      /* IO space */
2581 			      0x01000000, 0x00000000, 0x00000000,
2582 			      hi32(iob), lo32(iob), 0, PHB_IO_SIZE,
2583 			      /* M32 space */
2584 			      0x02000000, 0x00000000, M32_PCI_START,
2585 			      hi32(m32b), lo32(m32b), 0,M32_PCI_SIZE - 0x10000);
2586 
2587 	/* XXX FIXME: add opal-memwin32, dmawins, etc... */
2588 	m64b = cleanup_addr(p->m64_base);
2589 	dt_add_property_u64s(np, "ibm,opal-m64-window",
2590 			      m64b, m64b, PHB_M64_SIZE);
2591 	dt_add_property_cells(np, "ibm,opal-msi-ports", 256);
2592 	dt_add_property_cells(np, "ibm,opal-num-pes", 128);
2593 	dt_add_property_cells(np, "ibm,opal-reserved-pe", 127);
2594 	dt_add_property_cells(np, "ibm,opal-msi-ranges",
2595 			      p->buid_msi << 4, 0x100);
2596 	tkill = reg[0] + PHB_TCE_KILL;
2597 	dt_add_property_cells(np, "ibm,opal-tce-kill",
2598 			      hi32(tkill), lo32(tkill));
2599 
2600 	/*
2601 	 * Linux may use this property to allocate the diag data buffer, which
2602 	 * can be used for either of these structs.  Pass the largest to ensure
2603 	 * they can both fit in this buffer.
2604 	 */
2605 	dt_add_property_cells(np, "ibm,phb-diag-data-size",
2606 			      MAX(sizeof(struct OpalIoP7IOCPhbErrorData),
2607 				  sizeof(struct OpalIoP7IOCErrorData)));
2608 
2609 	/* Add associativity properties */
2610 	add_chip_dev_associativity(np);
2611 
2612 	/* The interrupt maps will be generated in the RC node by the
2613 	 * PCI code based on the content of this structure:
2614 	 */
2615 	lsibase = p->buid_lsi << 4;
2616 	p->phb.lstate.int_size = 2;
2617 	p->phb.lstate.int_val[0][0] = lsibase + PHB_LSI_PCIE_INTA;
2618 	p->phb.lstate.int_val[0][1] = 1;
2619 	p->phb.lstate.int_val[1][0] = lsibase + PHB_LSI_PCIE_INTB;
2620 	p->phb.lstate.int_val[1][1] = 1;
2621 	p->phb.lstate.int_val[2][0] = lsibase + PHB_LSI_PCIE_INTC;
2622 	p->phb.lstate.int_val[2][1] = 1;
2623 	p->phb.lstate.int_val[3][0] = lsibase + PHB_LSI_PCIE_INTD;
2624 	p->phb.lstate.int_val[3][1] = 1;
2625 	p->phb.lstate.int_parent[0] = icsp;
2626 	p->phb.lstate.int_parent[1] = icsp;
2627 	p->phb.lstate.int_parent[2] = icsp;
2628 	p->phb.lstate.int_parent[3] = icsp;
2629 }
2630 
2631 /* p7ioc_phb_setup - Setup a p7ioc_phb data structure
2632  *
2633  * WARNING: This is called before the AIB register routing is
2634  * established. If this wants to access PHB registers, it must
2635  * use the ASB hard coded variant (slower)
2636  */
p7ioc_phb_setup(struct p7ioc * ioc,uint8_t index)2637 void p7ioc_phb_setup(struct p7ioc *ioc, uint8_t index)
2638 {
2639 	struct p7ioc_phb *p = &ioc->phbs[index];
2640 	unsigned int buid_base = ioc->buid_base + PHBn_BUID_BASE(index);
2641 	struct pci_slot *slot;
2642 
2643 	p->index = index;
2644 	p->ioc = ioc;
2645 	p->gen = 2;	/* Operate in Gen2 mode by default */
2646 	p->phb.ops = &p7ioc_phb_ops;
2647 	p->phb.phb_type = phb_type_pcie_v2;
2648 	p->regs_asb = ioc->regs + PHBn_ASB_BASE(index);
2649 	p->regs = ioc->regs + PHBn_AIB_BASE(index);
2650 	p->buid_lsi = buid_base + PHB_BUID_LSI_OFFSET;
2651 	p->buid_msi = buid_base + PHB_BUID_MSI_OFFSET;
2652 	p->io_base = ioc->mmio1_win_start + PHBn_IO_BASE(index);
2653 	p->m32_base = ioc->mmio2_win_start + PHBn_M32_BASE(index);
2654 	p->m64_base = ioc->mmio2_win_start + PHBn_M64_BASE(index);
2655 	p->state = P7IOC_PHB_STATE_UNINITIALIZED;
2656 	p->phb.scan_map = 0x1; /* Only device 0 to scan */
2657 
2658 	/* Find P7IOC base location code in IOC */
2659 	p->phb.base_loc_code = dt_prop_get_def(ioc->dt_node,
2660 					       "ibm,io-base-loc-code", NULL);
2661 	if (!p->phb.base_loc_code)
2662 		prerror("P7IOC: Base location code not found !\n");
2663 
2664 	/* Create device node for PHB */
2665 	p7ioc_pcie_add_node(p);
2666 
2667 	/* Register OS interrupt sources */
2668 	register_irq_source(&p7ioc_msi_irq_ops, p, p->buid_msi << 4, 256);
2669 	register_irq_source(&p7ioc_lsi_irq_ops, p, p->buid_lsi << 4, 8);
2670 
2671 	/* Initialize IODA table caches */
2672 	p7ioc_phb_init_ioda_cache(p);
2673 
2674 	/* We register the PHB before we initialize it so we
2675 	 * get a useful OPAL ID for it
2676 	 */
2677 	pci_register_phb(&p->phb, OPAL_DYNAMIC_PHB_ID);
2678 	slot = p7ioc_phb_slot_create(&p->phb);
2679 	if (!slot)
2680 		prlog(PR_NOTICE, "P7IOC: Cannot create PHB#%x slot\n",
2681 		      p->phb.opal_id);
2682 
2683 	/* Platform additional setup */
2684 	if (platform.pci_setup_phb)
2685 		platform.pci_setup_phb(&p->phb, p->index);
2686 }
2687 
p7ioc_phb_wait_dlp_reset(struct p7ioc_phb * p)2688 static bool p7ioc_phb_wait_dlp_reset(struct p7ioc_phb *p)
2689 {
2690 	unsigned int i;
2691 	uint64_t val;
2692 
2693 	/*
2694 	 * Firmware cannot access the UTL core regs or PCI config space
2695 	 * until the cores are out of DL_PGRESET.
2696 	 * DL_PGRESET should be polled until it is inactive with a value
2697 	 * of '0'. The recommended polling frequency is once every 1ms.
2698 	 * Firmware should poll at least 200 attempts before giving up.
2699 	 * MMIO Stores to the link are silently dropped by the UTL core if
2700 	 * the link is down.
2701 	 * MMIO Loads to the link will be dropped by the UTL core and will
2702 	 * eventually time-out and will return an all ones response if the
2703 	 * link is down.
2704 	 */
2705 #define DLP_RESET_ATTEMPTS	400
2706 
2707 	printf("P7IOC: Waiting for DLP PG reset to complete...\n");
2708 	for (i = 0; i < DLP_RESET_ATTEMPTS; i++) {
2709 		val = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
2710 		if (!(val & PHB_PCIE_DLP_TC_DL_PGRESET))
2711 			break;
2712 		time_wait_ms(1);
2713 	}
2714 	if (val & PHB_PCIE_DLP_TC_DL_PGRESET) {
2715 		PHBERR(p, "Timeout waiting for DLP PG reset !\n");
2716 		return false;
2717 	}
2718 	return true;
2719 }
2720 
2721 /* p7ioc_phb_init_rc - Initialize the Root Complex config space
2722  */
p7ioc_phb_init_rc_cfg(struct p7ioc_phb * p)2723 static bool p7ioc_phb_init_rc_cfg(struct p7ioc_phb *p)
2724 {
2725 	int64_t ecap, aercap;
2726 
2727 	/* XXX Handle errors ? */
2728 
2729 	/* Init_51..51:
2730 	 *
2731 	 * Set primary bus to 0, secondary to 1 and subordinate to 0xff
2732 	 */
2733 	p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_PRIMARY_BUS, 0x00ff0100);
2734 
2735 	/* Init_52..57
2736 	 *
2737 	 * IO and Memory base & limits are set to base > limit, which
2738 	 * allows all inbounds.
2739 	 *
2740 	 * XXX This has the potential of confusing the OS which might
2741 	 * think that nothing is forwarded downstream. We probably need
2742 	 * to fix this to match the IO and M32 PHB windows
2743 	 */
2744 	p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_IO_BASE, 0x0010);
2745 	p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_MEM_BASE, 0x00000010);
2746 	p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_PREF_MEM_BASE, 0x00000010);
2747 
2748 	/* Init_58..: Setup bridge control to enable forwarding of CORR, FATAL,
2749 	 * and NONFATAL errors
2750 	*/
2751 	p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, PCI_CFG_BRCTL_SERR_EN);
2752 
2753 	/* Init_60..61
2754 	 *
2755 	 * PCIE Device control/status, enable error reporting, disable relaxed
2756 	 * ordering, set MPS to 128 (see note), clear errors.
2757 	 *
2758 	 * Note: The doc recommends to set MPS to 4K. This has proved to have
2759 	 * some issues as it requires specific claming of MRSS on devices and
2760 	 * we've found devices in the field that misbehave when doing that.
2761 	 *
2762 	 * We currently leave it all to 128 bytes (minimum setting) at init
2763 	 * time. The generic PCIe probing later on might apply a different
2764 	 * value, or the kernel will, but we play it safe at early init
2765 	 */
2766 	if (p->ecap <= 0) {
2767 		ecap = pci_find_cap(&p->phb, 0, PCI_CFG_CAP_ID_EXP);
2768 		if (ecap < 0) {
2769 			PHBERR(p, "Can't locate PCI-E capability\n");
2770 			return false;
2771 		}
2772 		p->ecap = ecap;
2773 	} else {
2774 		ecap = p->ecap;
2775 	}
2776 
2777 	p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVSTAT,
2778 			     PCICAP_EXP_DEVSTAT_CE	|
2779 			     PCICAP_EXP_DEVSTAT_NFE	|
2780 			     PCICAP_EXP_DEVSTAT_FE	|
2781 			     PCICAP_EXP_DEVSTAT_UE);
2782 
2783 	p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVCTL,
2784 			     PCICAP_EXP_DEVCTL_CE_REPORT	|
2785 			     PCICAP_EXP_DEVCTL_NFE_REPORT	|
2786 			     PCICAP_EXP_DEVCTL_FE_REPORT	|
2787 			     PCICAP_EXP_DEVCTL_UR_REPORT	|
2788 			     SETFIELD(PCICAP_EXP_DEVCTL_MPS, 0, PCIE_MPS_128B));
2789 
2790 	/* Init_62..63
2791 	 *
2792 	 * Root Control Register. Enable error reporting
2793 	 *
2794 	 * Note: Added CRS visibility.
2795 	 */
2796 	p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_RC,
2797 			     PCICAP_EXP_RC_SYSERR_ON_CE		|
2798 			     PCICAP_EXP_RC_SYSERR_ON_NFE	|
2799 			     PCICAP_EXP_RC_SYSERR_ON_FE		|
2800 			     PCICAP_EXP_RC_CRS_VISIBLE);
2801 
2802 	/* Init_64..65
2803 	 *
2804 	 * Device Control 2. Enable ARI fwd, set timer
2805 	 */
2806 	p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DCTL2,
2807 			     SETFIELD(PCICAP_EXP_DCTL2_CMPTOUT, 0, 2) |
2808 			     PCICAP_EXP_DCTL2_ARI_FWD);
2809 
2810 	/* Init_66..81
2811 	 *
2812 	 * AER inits
2813 	 */
2814 	aercap = pci_find_ecap(&p->phb, 0, PCIECAP_ID_AER, NULL);
2815 	if (aercap < 0) {
2816 		/* Shouldn't happen */
2817 		PHBERR(p, "Failed to locate AER capability in bridge\n");
2818 		return false;
2819 	}
2820 	p->aercap = aercap;
2821 
2822 	/* Clear all UE status */
2823 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_STATUS,
2824 			     0xffffffff);
2825 	/* Disable some error reporting as per the P7IOC spec */
2826 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_MASK,
2827 			     PCIECAP_AER_UE_POISON_TLP		|
2828 			     PCIECAP_AER_UE_COMPL_TIMEOUT	|
2829 			     PCIECAP_AER_UE_COMPL_ABORT		|
2830 			     PCIECAP_AER_UE_ECRC);
2831 	/* Report some errors as fatal */
2832 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_SEVERITY,
2833 			     PCIECAP_AER_UE_DLP 		|
2834 			     PCIECAP_AER_UE_SURPRISE_DOWN	|
2835 			     PCIECAP_AER_UE_FLOW_CTL_PROT	|
2836 			     PCIECAP_AER_UE_UNEXP_COMPL		|
2837 			     PCIECAP_AER_UE_RECV_OVFLOW		|
2838 			     PCIECAP_AER_UE_MALFORMED_TLP);
2839 	/* Clear all CE status */
2840 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_STATUS,
2841 			     0xffffffff);
2842 	/* Disable some error reporting as per the P7IOC spec */
2843 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_MASK,
2844 			     PCIECAP_AER_CE_ADV_NONFATAL);
2845 	/* Enable ECRC generation & checking */
2846 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CAPCTL,
2847 			     PCIECAP_AER_CAPCTL_ECRCG_EN	|
2848 			     PCIECAP_AER_CAPCTL_ECRCC_EN);
2849 	/* Enable reporting in root error control */
2850 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_CMD,
2851 			     PCIECAP_AER_RERR_CMD_FE		|
2852 			     PCIECAP_AER_RERR_CMD_NFE		|
2853 			     PCIECAP_AER_RERR_CMD_CE);
2854 	/* Clear root error status */
2855 	p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_STA,
2856 			     0xffffffff);
2857 
2858 	return true;
2859 }
2860 
p7ioc_phb_init_utl(struct p7ioc_phb * p)2861 static void p7ioc_phb_init_utl(struct p7ioc_phb *p)
2862 {
2863 	/* Init_82..84: Clear spurious errors and assign errors to the
2864 	 * right "interrupt" signal
2865 	 */
2866 	out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS,       0xffffffffffffffffUL);
2867 	out_be64(p->regs + UTL_SYS_BUS_AGENT_ERR_SEVERITY, 0x0000000000000000UL);
2868 	out_be64(p->regs + UTL_SYS_BUS_AGENT_IRQ_EN,       0xac80000000000000UL);
2869 
2870 	/* Init_85..89: Setup buffer allocations */
2871 	out_be64(p->regs + UTL_OUT_POST_DAT_BUF_ALLOC,     0x0400000000000000UL);
2872 	out_be64(p->regs + UTL_IN_POST_HDR_BUF_ALLOC,      0x1000000000000000UL);
2873 	out_be64(p->regs + UTL_IN_POST_DAT_BUF_ALLOC,      0x4000000000000000UL);
2874 	out_be64(p->regs + UTL_PCIE_TAGS_ALLOC,            0x0800000000000000UL);
2875 	out_be64(p->regs + UTL_GBIF_READ_TAGS_ALLOC,       0x0800000000000000UL);
2876 
2877 	/* Init_90: PCI Express port control */
2878 	out_be64(p->regs + UTL_PCIE_PORT_CONTROL,          0x8480000000000000UL);
2879 
2880 	/* Init_91..93: Clean & setup port errors */
2881 	out_be64(p->regs + UTL_PCIE_PORT_STATUS,           0xff7fffffffffffffUL);
2882 	out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV,        0x00e0000000000000UL);
2883 	out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,           0x7e65000000000000UL);
2884 
2885 	/* Init_94 : Cleanup RC errors */
2886 	out_be64(p->regs + UTL_RC_STATUS,                  0xffffffffffffffffUL);
2887 }
2888 
p7ioc_phb_init_errors(struct p7ioc_phb * p)2889 static void p7ioc_phb_init_errors(struct p7ioc_phb *p)
2890 {
2891 	/* Init_98: LEM Error Mask : Temporarily disable error interrupts */
2892 	out_be64(p->regs + PHB_LEM_ERROR_MASK,		   0xffffffffffffffffUL);
2893 
2894 	/* Init_99..107: Configure main error traps & clear old state */
2895 	out_be64(p->regs + PHB_ERR_STATUS,		   0xffffffffffffffffUL);
2896 	out_be64(p->regs + PHB_ERR1_STATUS,		   0x0000000000000000UL);
2897 	out_be64(p->regs + PHB_ERR_LEM_ENABLE,		   0xffffffffefffffffUL);
2898 	out_be64(p->regs + PHB_ERR_FREEZE_ENABLE,	   0x0000000061c00000UL);
2899 	out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE,	   0xffffffc58c000000UL);
2900 	out_be64(p->regs + PHB_ERR_LOG_0,		   0x0000000000000000UL);
2901 	out_be64(p->regs + PHB_ERR_LOG_1,		   0x0000000000000000UL);
2902 	out_be64(p->regs + PHB_ERR_STATUS_MASK,		   0x0000000000000000UL);
2903 	out_be64(p->regs + PHB_ERR1_STATUS_MASK,	   0x0000000000000000UL);
2904 
2905 	/* Init_108_116: Configure MMIO error traps & clear old state */
2906 	out_be64(p->regs + PHB_OUT_ERR_STATUS,		   0xffffffffffffffffUL);
2907 	out_be64(p->regs + PHB_OUT_ERR1_STATUS,		   0x0000000000000000UL);
2908 	out_be64(p->regs + PHB_OUT_ERR_LEM_ENABLE,	   0xffffffffffffffffUL);
2909 	out_be64(p->regs + PHB_OUT_ERR_FREEZE_ENABLE,	   0x0000430803000000UL);
2910 	out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE,   0x9df3bc00f0f0700fUL);
2911 	out_be64(p->regs + PHB_OUT_ERR_LOG_0,		   0x0000000000000000UL);
2912 	out_be64(p->regs + PHB_OUT_ERR_LOG_1,		   0x0000000000000000UL);
2913 	out_be64(p->regs + PHB_OUT_ERR_STATUS_MASK,	   0x0000000000000000UL);
2914 	out_be64(p->regs + PHB_OUT_ERR1_STATUS_MASK,	   0x0000000000000000UL);
2915 
2916 	/* Init_117_125: Configure DMA_A error traps & clear old state */
2917 	out_be64(p->regs + PHB_INA_ERR_STATUS,		   0xffffffffffffffffUL);
2918 	out_be64(p->regs + PHB_INA_ERR1_STATUS,		   0x0000000000000000UL);
2919 	out_be64(p->regs + PHB_INA_ERR_LEM_ENABLE,	   0xffffffffffffffffUL);
2920 	out_be64(p->regs + PHB_INA_ERR_FREEZE_ENABLE,	   0xc00003ff01006000UL);
2921 	out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE,   0x3fff50007e559fd8UL);
2922 	out_be64(p->regs + PHB_INA_ERR_LOG_0,		   0x0000000000000000UL);
2923 	out_be64(p->regs + PHB_INA_ERR_LOG_1,		   0x0000000000000000UL);
2924 	out_be64(p->regs + PHB_INA_ERR_STATUS_MASK,	   0x0000000000000000UL);
2925 	out_be64(p->regs + PHB_INA_ERR1_STATUS_MASK,	   0x0000000000000000UL);
2926 
2927 	/* Init_126_134: Configure DMA_B error traps & clear old state */
2928 	out_be64(p->regs + PHB_INB_ERR_STATUS,		   0xffffffffffffffffUL);
2929 	out_be64(p->regs + PHB_INB_ERR1_STATUS,		   0x0000000000000000UL);
2930 	out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE,	   0xffffffffffffffffUL);
2931 	out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE,	   0x0000000000000000UL);
2932 	out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE,   0x18ff80ffff7f0000UL);
2933 	out_be64(p->regs + PHB_INB_ERR_LOG_0,		   0x0000000000000000UL);
2934 	out_be64(p->regs + PHB_INB_ERR_LOG_1,		   0x0000000000000000UL);
2935 	out_be64(p->regs + PHB_INB_ERR_STATUS_MASK,	   0x0000000000000000UL);
2936 	out_be64(p->regs + PHB_INB_ERR1_STATUS_MASK,	   0x0000000000000000UL);
2937 
2938 	/* Init_135..138: Cleanup & configure LEM */
2939 	out_be64(p->regs + PHB_LEM_FIR_ACCUM,		   0x0000000000000000UL);
2940 	out_be64(p->regs + PHB_LEM_ACTION0,		   0xffffffffffffffffUL);
2941 	out_be64(p->regs + PHB_LEM_ACTION1,		   0x0000000000000000UL);
2942 	out_be64(p->regs + PHB_LEM_WOF,			   0x0000000000000000UL);
2943 }
2944 
2945 /* p7ioc_phb_init - Initialize the PHB hardware
2946  *
2947  * This is currently only called at boot time. It will eventually
2948  * be called at runtime, for example in some cases of error recovery
2949  * after a PHB reset in which case we might need locks etc...
2950  */
p7ioc_phb_init(struct p7ioc_phb * p)2951 int64_t p7ioc_phb_init(struct p7ioc_phb *p)
2952 {
2953 	uint64_t val;
2954 
2955 	PHBDBG(p, "Initializing PHB %x...\n", p->index);
2956 
2957 	p->state = P7IOC_PHB_STATE_INITIALIZING;
2958 
2959 	/* For some reason, the doc wants us to read the version
2960 	 * register, so let's do it. We shoud probably check that
2961 	 * the value makes sense...
2962 	 */
2963 	val = in_be64(p->regs_asb + PHB_VERSION);
2964 	p->rev = ((val >> 16) & 0xffff) | (val & 0xffff);
2965 	PHBDBG(p, "PHB version: %08x\n", p->rev);
2966 
2967 	/*
2968 	 * Configure AIB operations
2969 	 *
2970 	 * This register maps upbound commands to AIB channels.
2971 	 * DMA Write=0, DMA Read=2, MMIO Load Response=1,
2972 	 * Interrupt Request=1, TCE Read=3.
2973 	 */
2974 	/* Init_1: AIB TX Channel Mapping */
2975 	out_be64(p->regs_asb + PHB_AIB_TX_CHAN_MAPPING,    0x0211300000000000UL);
2976 
2977 	/*
2978 	 * This group of steps initializes the AIB RX credits for
2979 	 * the CI block’s port that is attached to this PHB.
2980 	 *
2981 	 * Channel 0 (Dkill): 32 command credits, 0 data credits
2982 	 *                    (effectively infinite command credits)
2983 	 * Channel 1 (DMA/TCE Read Responses): 32 command credits, 32 data
2984 	 *                                     credits (effectively infinite
2985 	 *                                     command and data credits)
2986 	 * Channel 2 (Interrupt Reissue/Return): 32 command, 0 data credits
2987 	 *                                       (effectively infinite
2988 	 *                                       command credits)
2989 	 * Channel 3 (MMIO Load/Stores, EOIs): 1 command, 1 data credit
2990 	 */
2991 
2992 	/* Init_2: AIB RX Command Credit */
2993 	out_be64(p->regs_asb + PHB_AIB_RX_CMD_CRED,        0x0020002000200001UL);
2994 	/* Init_3: AIB RX Data Credit */
2995 	out_be64(p->regs_asb + PHB_AIB_RX_DATA_CRED,       0x0000002000000001UL);
2996 	/* Init_4: AXIB RX Credit Init Timer */
2997 	out_be64(p->regs_asb + PHB_AIB_RX_CRED_INIT_TIMER, 0xFF00000000000000UL);
2998 
2999 	/*
3000 	 * Enable all 32 AIB and TCE tags.
3001 	 *
3002 	 * AIB tags are used for DMA read requests.
3003 	 * TCE tags are used for every internal transaction as well as TCE
3004 	 * read requests.
3005 	 */
3006 
3007 	/* Init_5:  PHB - AIB Tag Enable Register */
3008 	out_be64(p->regs_asb + PHB_AIB_TAG_ENABLE,         0xFFFFFFFF00000000UL);
3009 	/* Init_6: PHB – TCE Tag Enable Register */
3010 	out_be64(p->regs_asb + PHB_TCE_TAG_ENABLE,         0xFFFFFFFF00000000UL);
3011 
3012 	/* Init_7: PCIE - System Configuration Register
3013 	 *
3014 	 * This is the default value out of reset. This register can be
3015 	 * modified to change the following fields if needed:
3016 	 *
3017 	 *  bits 04:09 - SYS_EC0C_MAXLINKWIDTH[5:0]
3018 	 *               The default link width is x8. This can be reduced
3019 	 *               to x1 or x4, if needed.
3020 	 *
3021 	 *  bits 10:12 - SYS_EC04_MAX_PAYLOAD[2:0]
3022 	 *
3023 	 *               The default max payload size is 4KB. This can be
3024 	 *               reduced to the allowed ranges from 128B
3025 	 *               to 2KB if needed.
3026 	 */
3027 	out_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG,         0x422800FC20000000UL);
3028 
3029 	/* Init_8: PHB - PCI-E Reset Register
3030 	 *
3031 	 * This will deassert reset for the PCI-E cores, including the
3032 	 * PHY and HSS macros. The TLDLP core will begin link training
3033 	 * shortly after this register is written.
3034 	 * This will also assert reset for the internal scan-only error
3035 	 * report macros. The error report macro reset will be deasserted
3036 	 * in a later step.
3037 	 * Firmware will verify in a later step whether the PCI-E link
3038 	 * has been established.
3039 	 *
3040 	 * NOTE: We perform a PERST at the end of the init sequence so
3041 	 * we could probably skip that link training.
3042 	 */
3043 	out_be64(p->regs + PHB_RESET,                      0xE800000000000000UL);
3044 
3045 	/* Init_9: BUID
3046 	 *
3047 	 * Only the top 5 bit of the MSI field are implemented, the bottom
3048 	 * are always 0. Our buid_msi value should also be a multiple of
3049 	 * 16 so it should all fit well
3050 	 */
3051 	val  = SETFIELD(PHB_BUID_LSI, 0ul, P7_BUID_BASE(p->buid_lsi));
3052 	val |= SETFIELD(PHB_BUID_MSI, 0ul, P7_BUID_BASE(p->buid_msi));
3053 	out_be64(p->regs + PHB_BUID, val);
3054 
3055 	/* Init_10..12: IO Space */
3056 	out_be64(p->regs + PHB_IO_BASE_ADDR, p->io_base);
3057 	out_be64(p->regs + PHB_IO_BASE_MASK, ~(PHB_IO_SIZE - 1));
3058 	out_be64(p->regs + PHB_IO_START_ADDR, 0);
3059 
3060 	/* Init_13..15: M32 Space */
3061 	out_be64(p->regs + PHB_M32_BASE_ADDR, p->m32_base + M32_PCI_START);
3062 	out_be64(p->regs + PHB_M32_BASE_MASK, ~(M32_PCI_SIZE - 1));
3063 	out_be64(p->regs + PHB_M32_START_ADDR, M32_PCI_START);
3064 
3065 	/* Init_16: PCIE-E Outbound Request Upper Address */
3066 	out_be64(p->regs + PHB_M64_UPPER_BITS, 0);
3067 
3068 	/* Init_17: PCIE-E PHB2 Configuration
3069 	 *
3070 	 * We enable IO, M32, 32-bit MSI and 64-bit MSI
3071 	 */
3072 	out_be64(p->regs + PHB_PHB2_CONFIG,
3073 		 PHB_PHB2C_32BIT_MSI_EN	|
3074 		 PHB_PHB2C_IO_EN	|
3075 		 PHB_PHB2C_64BIT_MSI_EN	|
3076 		 PHB_PHB2C_M32_EN |
3077 		 PHB_PHB2C_64B_TCE_EN);
3078 
3079 	/* Init_18..xx: Reset all IODA tables */
3080 	p7ioc_ioda_reset(&p->phb, false);
3081 
3082 	/* Init_42..47: Clear UTL & DLP error log regs */
3083 	out_be64(p->regs + PHB_PCIE_UTL_ERRLOG1,	   0xffffffffffffffffUL);
3084 	out_be64(p->regs + PHB_PCIE_UTL_ERRLOG2,	   0xffffffffffffffffUL);
3085 	out_be64(p->regs + PHB_PCIE_UTL_ERRLOG3,	   0xffffffffffffffffUL);
3086 	out_be64(p->regs + PHB_PCIE_UTL_ERRLOG4,	   0xffffffffffffffffUL);
3087 	out_be64(p->regs + PHB_PCIE_DLP_ERRLOG1,	   0xffffffffffffffffUL);
3088 	out_be64(p->regs + PHB_PCIE_DLP_ERRLOG2,	   0xffffffffffffffffUL);
3089 
3090 	/* Init_48: Wait for DLP core to be out of reset */
3091 	if (!p7ioc_phb_wait_dlp_reset(p))
3092 		goto failed;
3093 
3094 	/* Init_49 - Clear port status */
3095 	out_be64(p->regs + UTL_PCIE_PORT_STATUS,	   0xffffffffffffffffUL);
3096 
3097 	/* Init_50..81: Init root complex config space */
3098 	if (!p7ioc_phb_init_rc_cfg(p))
3099 		goto failed;
3100 
3101 	/* Init_82..94 : Init UTL */
3102 	p7ioc_phb_init_utl(p);
3103 
3104 	/* Init_95: PCI-E Reset, deassert reset for internal error macros */
3105 	out_be64(p->regs + PHB_RESET,			   0xe000000000000000UL);
3106 
3107 	/* Init_96: PHB Control register. Various PHB settings:
3108 	 *
3109 	 * - Enable ECC for various internal RAMs
3110 	 * - Enable all TCAM entries
3111 	 * - Set failed DMA read requests to return Completer Abort on error
3112 	 */
3113 	out_be64(p->regs + PHB_CONTROL, 	       	   0x7f38000000000000UL);
3114 
3115 	/* Init_97: Legacy Control register
3116 	 *
3117 	 * The spec sets bit 0 to enable DKill to flush the TCEs. We do not
3118 	 * use that mechanism however, we require the OS to directly access
3119 	 * the TCE Kill register, so we leave that bit set to 0
3120 	 */
3121 	out_be64(p->regs + PHB_LEGACY_CTRL,		   0x0000000000000000);
3122 
3123 	/* Init_98..138  : Setup error registers */
3124 	p7ioc_phb_init_errors(p);
3125 
3126 	/* Init_139: Read error summary */
3127 	val = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
3128 	if (val) {
3129 		PHBERR(p, "Errors detected during PHB init: 0x%16llx\n", val);
3130 		goto failed;
3131 	}
3132 
3133 	/* Steps Init_140..142 have been removed from the spec. */
3134 
3135 	/* Init_143..144: Enable IO, MMIO, Bus master etc... and clear
3136 	 * status bits
3137 	 */
3138 	p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_STAT,
3139 			     PCI_CFG_STAT_SENT_TABORT	|
3140 			     PCI_CFG_STAT_RECV_TABORT	|
3141 			     PCI_CFG_STAT_RECV_MABORT	|
3142 			     PCI_CFG_STAT_SENT_SERR	|
3143 			     PCI_CFG_STAT_RECV_PERR);
3144 	p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_CMD,
3145 			     PCI_CFG_CMD_SERR_EN	|
3146 			     PCI_CFG_CMD_PERR_RESP	|
3147 			     PCI_CFG_CMD_BUS_MASTER_EN	|
3148 			     PCI_CFG_CMD_MEM_EN		|
3149 			     PCI_CFG_CMD_IO_EN);
3150 
3151 	/* At this point, the spec suggests doing a bus walk. However we
3152 	 * haven't powered up the slots with the SHCP controller. We'll
3153 	 * deal with that and link training issues later, for now, let's
3154 	 * enable the full range of error detection
3155 	 */
3156 
3157 	/* Init_145..149: Enable error interrupts and LEM */
3158 	out_be64(p->regs + PHB_ERR_IRQ_ENABLE,		   0x0000000061c00000UL);
3159 	out_be64(p->regs + PHB_OUT_ERR_IRQ_ENABLE,	   0x0000430803000000UL);
3160 	out_be64(p->regs + PHB_INA_ERR_IRQ_ENABLE,	   0xc00003ff01006000UL);
3161 	out_be64(p->regs + PHB_INB_ERR_IRQ_ENABLE,	   0x0000000000000000UL);
3162 	out_be64(p->regs + PHB_LEM_ERROR_MASK,		   0x1249a1147f500f2cUL);
3163 
3164 	/* Init_150: Enable DMA read/write TLP address speculation */
3165 	out_be64(p->regs + PHB_TCE_PREFETCH,		   0x0000c00000000000UL);
3166 
3167 	/* Init_151..152: Set various timeouts */
3168 	out_be64(p->regs + PHB_TIMEOUT_CTRL1,		   0x1611112010200000UL);
3169 	out_be64(p->regs + PHB_TIMEOUT_CTRL2,		   0x0000561300000000UL);
3170 
3171 	/* Mark the PHB as functional which enables all the various sequences */
3172 	p->state = P7IOC_PHB_STATE_FUNCTIONAL;
3173 
3174 	return OPAL_SUCCESS;
3175 
3176  failed:
3177 	PHBERR(p, "Initialization failed\n");
3178 	p->state = P7IOC_PHB_STATE_BROKEN;
3179 
3180 	return OPAL_HARDWARE;
3181 }
3182 
p7ioc_phb_reset(struct phb * phb)3183 void p7ioc_phb_reset(struct phb *phb)
3184 {
3185 	struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
3186 	struct p7ioc *ioc = p->ioc;
3187 	uint64_t ci_idx, rreg;
3188 	unsigned int i;
3189 	bool fenced;
3190 
3191 	/* Check our fence status. The fence bits we care about are
3192 	 * two bits per PHB at IBM bit location 14 and 15 + 4*phb
3193 	 */
3194 	fenced = p7ioc_phb_fenced(p);
3195 
3196 	PHBDBG(p, "PHB reset... (fenced: %d)\n", (int)fenced);
3197 
3198 	/*
3199 	 * If not fenced and already functional, let's do an IODA reset
3200 	 * to clear pending DMAs and wait a bit for thing to settle. It's
3201 	 * notable that the IODA table cache won't be emptied so that we
3202 	 * can restore them during error recovery.
3203 	 */
3204 	if (p->state == P7IOC_PHB_STATE_FUNCTIONAL && !fenced) {
3205 		PHBDBG(p, "  ioda reset ...\n");
3206 		p7ioc_ioda_reset(&p->phb, false);
3207 		time_wait_ms(100);
3208 	}
3209 
3210 	/* CI port index */
3211 	ci_idx = p->index + 2;
3212 
3213 	/* Reset register bits for this PHB */
3214 	rreg =  0;/*PPC_BIT(8 + ci_idx * 2);*/	/* CI port config reset */
3215 	rreg |= PPC_BIT(9 + ci_idx * 2);	/* CI port func reset */
3216 	rreg |= PPC_BIT(32 + p->index);		/* PHBn config reset */
3217 
3218 	/* Mask various errors during reset and clear pending errors */
3219 	out_be64(ioc->regs + P7IOC_CIn_LEM_ERR_MASK(ci_idx),
3220 		 0xa4f4000000000000ul);
3221 	out_be64(p->regs_asb + PHB_LEM_ERROR_MASK, 0xadb650c9808dd051ul);
3222 	out_be64(ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx), 0);
3223 
3224 	/* We need to retry in case the fence doesn't lift due to a
3225 	 * problem with lost credits (HW guys). How many times ?
3226 	 */
3227 #define MAX_PHB_RESET_RETRIES	5
3228 	for (i = 0; i < MAX_PHB_RESET_RETRIES; i++) {
3229 		PHBDBG(p, "  reset try %d...\n", i);
3230 		/* Apply reset */
3231 		out_be64(ioc->regs + P7IOC_CCRR, rreg);
3232 		time_wait_ms(1);
3233 		out_be64(ioc->regs + P7IOC_CCRR, 0);
3234 
3235 		/* Check if fence lifed */
3236 		fenced = p7ioc_phb_fenced(p);
3237 		PHBDBG(p, "  fenced: %d...\n", (int)fenced);
3238 		if (!fenced)
3239 			break;
3240 	}
3241 
3242 	/* Reset failed, not much to do, maybe add an error return */
3243 	if (fenced) {
3244 		PHBERR(p, "Reset failed, fence still set !\n");
3245 		p->state = P7IOC_PHB_STATE_BROKEN;
3246 		return;
3247 	}
3248 
3249 	/* Wait a bit */
3250 	time_wait_ms(100);
3251 
3252 	/* Re-initialize the PHB */
3253 	p7ioc_phb_init(p);
3254 
3255 	/* Restore the CI error mask */
3256 	out_be64(ioc->regs + P7IOC_CIn_LEM_ERR_MASK_AND(ci_idx), 0);
3257 }
3258 
3259 
3260 
3261