1 // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
2 /*
3  * Low Pin Count (LPC) Bus.
4  *
5  * Copyright 2013-2019 IBM Corp.
6  */
7 
8 #define pr_fmt(fmt)	"LPC: " fmt
9 
10 #include <skiboot.h>
11 #include <xscom.h>
12 #include <io.h>
13 #include <lock.h>
14 #include <chip.h>
15 #include <lpc.h>
16 #include <timebase.h>
17 #include <errorlog.h>
18 #include <opal-api.h>
19 #include <platform.h>
20 #include <psi.h>
21 #include <interrupts.h>
22 
23 //#define DBG_IRQ(fmt...) prerror(fmt)
24 #define DBG_IRQ(fmt...) do { } while(0)
25 
26 DEFINE_LOG_ENTRY(OPAL_RC_LPC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
27 		 OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
28 		 OPAL_NA);
29 
30 DEFINE_LOG_ENTRY(OPAL_RC_LPC_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
31 		 OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
32 		 OPAL_NA);
33 
34 DEFINE_LOG_ENTRY(OPAL_RC_LPC_SYNC, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
35 		 OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
36 		 OPAL_NA);
37 
38 /* Used exclusively in manufacturing mode */
39 DEFINE_LOG_ENTRY(OPAL_RC_LPC_SYNC_PERF, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
40 		 OPAL_MISC_SUBSYSTEM, OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF,
41 		 OPAL_NA);
42 
43 #define ECCB_CTL	0 /* b0020 -> b00200 */
44 #define ECCB_STAT	2 /* b0022 -> b00210 */
45 #define ECCB_DATA	3 /* b0023 -> b00218 */
46 
47 #define ECCB_CTL_MAGIC		0xd000000000000000ul
48 #define ECCB_CTL_DATASZ		PPC_BITMASK(4,7)
49 #define ECCB_CTL_READ		PPC_BIT(15)
50 #define ECCB_CTL_ADDRLEN	PPC_BITMASK(23,25)
51 #define 	ECCB_ADDRLEN_4B	0x4
52 #define ECCB_CTL_ADDR		PPC_BITMASK(32,63)
53 
54 #define ECCB_STAT_PIB_ERR	PPC_BITMASK(0,5)
55 #define ECCB_STAT_RD_DATA	PPC_BITMASK(6,37)
56 #define ECCB_STAT_BUSY		PPC_BIT(44)
57 #define ECCB_STAT_ERRORS1	PPC_BITMASK(45,51)
58 #define ECCB_STAT_OP_DONE	PPC_BIT(52)
59 #define ECCB_STAT_ERRORS2	PPC_BITMASK(53,55)
60 
61 #define ECCB_STAT_ERR_MASK	(ECCB_STAT_PIB_ERR | \
62 				 ECCB_STAT_ERRORS1 | \
63 				 ECCB_STAT_ERRORS2)
64 
65 #define ECCB_TIMEOUT	1000000
66 
67 /* OPB Master LS registers */
68 #define OPB_MASTER_LS_IRQ_STAT	0x50
69 #define OPB_MASTER_LS_IRQ_MASK	0x54
70 #define OPB_MASTER_LS_IRQ_POL	0x58
71 #define   OPB_MASTER_IRQ_LPC	       	0x00000800
72 
73 /* LPC HC registers */
74 #define LPC_HC_FW_SEG_IDSEL	0x24
75 #define LPC_HC_FW_RD_ACC_SIZE	0x28
76 #define   LPC_HC_FW_RD_1B		0x00000000
77 #define   LPC_HC_FW_RD_2B		0x01000000
78 #define   LPC_HC_FW_RD_4B		0x02000000
79 #define   LPC_HC_FW_RD_16B		0x04000000
80 #define   LPC_HC_FW_RD_128B		0x07000000
81 #define LPC_HC_IRQSER_CTRL	0x30
82 #define   LPC_HC_IRQSER_EN		0x80000000
83 #define   LPC_HC_IRQSER_QMODE		0x40000000
84 #define   LPC_HC_IRQSER_START_MASK	0x03000000
85 #define   LPC_HC_IRQSER_START_4CLK	0x00000000
86 #define   LPC_HC_IRQSER_START_6CLK	0x01000000
87 #define   LPC_HC_IRQSER_START_8CLK	0x02000000
88 #define   LPC_HC_IRQSER_AUTO_CLEAR	0x00800000
89 #define LPC_HC_IRQMASK		0x34	/* same bit defs as LPC_HC_IRQSTAT */
90 #define LPC_HC_IRQSTAT		0x38
91 #define   LPC_HC_IRQ_SERIRQ0		0x80000000u /* all bits down to ... */
92 #define   LPC_HC_IRQ_SERIRQ16		0x00008000 /* IRQ16=IOCHK#, IRQ2=SMI# */
93 #define   LPC_HC_IRQ_SERIRQ_ALL		0xffff8000
94 #define   LPC_HC_IRQ_LRESET		0x00000400
95 #define   LPC_HC_IRQ_SYNC_ABNORM_ERR	0x00000080
96 #define   LPC_HC_IRQ_SYNC_NORESP_ERR	0x00000040
97 #define   LPC_HC_IRQ_SYNC_NORM_ERR	0x00000020
98 #define   LPC_HC_IRQ_SYNC_TIMEOUT_ERR	0x00000010
99 #define   LPC_HC_IRQ_TARG_TAR_ERR	0x00000008
100 #define   LPC_HC_IRQ_BM_TAR_ERR		0x00000004
101 #define   LPC_HC_IRQ_BM0_REQ		0x00000002
102 #define   LPC_HC_IRQ_BM1_REQ		0x00000001
103 #define   LPC_HC_IRQ_BASE_IRQS		(		     \
104 	LPC_HC_IRQ_LRESET |				     \
105 	LPC_HC_IRQ_SYNC_ABNORM_ERR |			     \
106 	LPC_HC_IRQ_SYNC_NORESP_ERR |			     \
107 	LPC_HC_IRQ_SYNC_NORM_ERR |			     \
108 	LPC_HC_IRQ_SYNC_TIMEOUT_ERR |			     \
109 	LPC_HC_IRQ_TARG_TAR_ERR |			     \
110 	LPC_HC_IRQ_BM_TAR_ERR)
111 #define LPC_HC_ERROR_ADDRESS	0x40
112 
113 #define LPC_NUM_SERIRQ		17
114 
115 enum {
116 	LPC_ROUTE_FREE = 0,
117 	LPC_ROUTE_OPAL,
118 	LPC_ROUTE_LINUX
119 };
120 
121 struct lpc_error_entry {
122 	int64_t rc;
123 	const char *description;
124 };
125 
126 struct lpcm {
127 	uint32_t		chip_id;
128 	uint32_t		xbase;
129 	void			*mbase;
130 	struct lock		lock;
131 	uint8_t			fw_idsel;
132 	uint8_t			fw_rdsz;
133 	struct list_head	clients;
134 	bool			has_serirq;
135 	uint8_t			sirq_routes[LPC_NUM_SERIRQ];
136 	bool			sirq_routed[LPC_NUM_SERIRQ];
137 	uint32_t		sirq_rmasks[4];
138 	uint8_t			sirq_ralloc[4];
139 	struct dt_node		*node;
140 };
141 
142 
143 #define	LPC_BUS_DEGRADED_PERF_THRESHOLD		5
144 
145 struct lpc_client_entry {
146 	struct list_node node;
147 	const struct lpc_client *clt;
148 	uint32_t policy;
149 };
150 
151 /* Default LPC bus */
152 static int32_t lpc_default_chip_id = -1;
153 static bool lpc_irqs_ready;
154 
155 /*
156  * These are expected to be the same on all chips and should probably
157  * be read (or configured) dynamically. This is how things are configured
158  * today on Tuletta.
159  */
160 static uint32_t lpc_io_opb_base		= 0xd0010000;
161 static uint32_t lpc_mem_opb_base	= 0xe0000000;
162 static uint32_t lpc_fw_opb_base		= 0xf0000000;
163 static uint32_t lpc_reg_opb_base	= 0xc0012000;
164 static uint32_t opb_master_reg_base	= 0xc0010000;
165 
opb_mmio_write(struct lpcm * lpc,uint32_t addr,uint32_t data,uint32_t sz)166 static int64_t opb_mmio_write(struct lpcm *lpc, uint32_t addr, uint32_t data,
167 			      uint32_t sz)
168 {
169 	switch (sz) {
170 	case 1:
171 		out_8(lpc->mbase + addr, data);
172 		return OPAL_SUCCESS;
173 	case 2:
174 		out_be16(lpc->mbase + addr, data);
175 		return OPAL_SUCCESS;
176 	case 4:
177 		out_be32(lpc->mbase + addr, data);
178 		return OPAL_SUCCESS;
179 	}
180 	prerror("Invalid data size %d\n", sz);
181 	return OPAL_PARAMETER;
182 }
183 
opb_write(struct lpcm * lpc,uint32_t addr,uint32_t data,uint32_t sz)184 static int64_t opb_write(struct lpcm *lpc, uint32_t addr, uint32_t data,
185 			 uint32_t sz)
186 {
187 	uint64_t ctl = ECCB_CTL_MAGIC, stat;
188 	int64_t rc, tout;
189 	uint64_t data_reg;
190 
191 	if (lpc->mbase)
192 		return opb_mmio_write(lpc, addr, data, sz);
193 
194 	switch(sz) {
195 	case 1:
196 		data_reg = ((uint64_t)data) << 56;
197 		break;
198 	case 2:
199 		data_reg = ((uint64_t)data) << 48;
200 		break;
201 	case 4:
202 		data_reg = ((uint64_t)data) << 32;
203 		break;
204 	default:
205 		prerror("Invalid data size %d\n", sz);
206 		return OPAL_PARAMETER;
207 	}
208 
209 	rc = xscom_write(lpc->chip_id, lpc->xbase + ECCB_DATA, data_reg);
210 	if (rc) {
211 		log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
212 			"LPC: XSCOM write to ECCB DATA error %lld\n", rc);
213 		return rc;
214 	}
215 
216 	ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
217 	ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
218 	ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
219 	rc = xscom_write(lpc->chip_id, lpc->xbase + ECCB_CTL, ctl);
220 	if (rc) {
221 		log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
222 			"LPC: XSCOM write to ECCB CTL error %lld\n", rc);
223 		return rc;
224 	}
225 
226 	for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
227 		rc = xscom_read(lpc->chip_id, lpc->xbase + ECCB_STAT,
228 				&stat);
229 		if (rc) {
230 			log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
231 				"LPC: XSCOM read from ECCB STAT err %lld\n",
232 									rc);
233 			return rc;
234 		}
235 		if (stat & ECCB_STAT_OP_DONE) {
236 			if (stat & ECCB_STAT_ERR_MASK) {
237 				log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
238 					"LPC: Error status: 0x%llx\n", stat);
239 				return OPAL_HARDWARE;
240 			}
241 			return OPAL_SUCCESS;
242 		}
243 		time_wait_nopoll(100);
244 	}
245 	log_simple_error(&e_info(OPAL_RC_LPC_WRITE), "LPC: Write timeout !\n");
246 	return OPAL_HARDWARE;
247 }
248 
opb_mmio_read(struct lpcm * lpc,uint32_t addr,uint32_t * data,uint32_t sz)249 static int64_t opb_mmio_read(struct lpcm *lpc, uint32_t addr, uint32_t *data,
250 			     uint32_t sz)
251 {
252 	switch (sz) {
253 	case 1:
254 		*data = in_8(lpc->mbase + addr);
255 		return OPAL_SUCCESS;
256 	case 2:
257 		*data = in_be16(lpc->mbase + addr);
258 		return OPAL_SUCCESS;
259 	case 4:
260 		*data = in_be32(lpc->mbase + addr);
261 		return OPAL_SUCCESS;
262 	}
263 	prerror("Invalid data size %d\n", sz);
264 	return OPAL_PARAMETER;
265 }
266 
opb_read(struct lpcm * lpc,uint32_t addr,uint32_t * data,uint32_t sz)267 static int64_t opb_read(struct lpcm *lpc, uint32_t addr, uint32_t *data,
268 		        uint32_t sz)
269 {
270 	uint64_t ctl = ECCB_CTL_MAGIC | ECCB_CTL_READ, stat;
271 	int64_t rc, tout;
272 
273 	if (lpc->mbase)
274 		return opb_mmio_read(lpc, addr, data, sz);
275 
276 	if (sz != 1 && sz != 2 && sz != 4) {
277 		prerror("Invalid data size %d\n", sz);
278 		return OPAL_PARAMETER;
279 	}
280 
281 	ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
282 	ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
283 	ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
284 	rc = xscom_write(lpc->chip_id, lpc->xbase + ECCB_CTL, ctl);
285 	if (rc) {
286 		log_simple_error(&e_info(OPAL_RC_LPC_READ),
287 			"LPC: XSCOM write to ECCB CTL error %lld\n", rc);
288 		return rc;
289 	}
290 
291 	for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
292 		rc = xscom_read(lpc->chip_id, lpc->xbase + ECCB_STAT,
293 				&stat);
294 		if (rc) {
295 			log_simple_error(&e_info(OPAL_RC_LPC_READ),
296 				"LPC: XSCOM read from ECCB STAT err %lld\n",
297 									rc);
298 			return rc;
299 		}
300 		if (stat & ECCB_STAT_OP_DONE) {
301 			uint32_t rdata = GETFIELD(ECCB_STAT_RD_DATA, stat);
302 			if (stat & ECCB_STAT_ERR_MASK) {
303 				log_simple_error(&e_info(OPAL_RC_LPC_READ),
304 					"LPC: Error status: 0x%llx\n", stat);
305 				return OPAL_HARDWARE;
306 			}
307 			switch(sz) {
308 			case 1:
309 				*data = rdata >> 24;
310 				break;
311 			case 2:
312 				*data = rdata >> 16;
313 				break;
314 			default:
315 				*data = rdata;
316 				break;
317 			}
318 			return 0;
319 		}
320 		time_wait_nopoll(100);
321 	}
322 	log_simple_error(&e_info(OPAL_RC_LPC_READ), "LPC: Read timeout !\n");
323 	return OPAL_HARDWARE;
324 }
325 
lpc_set_fw_idsel(struct lpcm * lpc,uint8_t idsel)326 static int64_t lpc_set_fw_idsel(struct lpcm *lpc, uint8_t idsel)
327 {
328 	uint32_t val;
329 	int64_t rc;
330 
331 	if (idsel == lpc->fw_idsel)
332 		return OPAL_SUCCESS;
333 	if (idsel > 0xf)
334 		return OPAL_PARAMETER;
335 
336 	rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_FW_SEG_IDSEL,
337 		      &val, 4);
338 	if (rc) {
339 		prerror("Failed to read HC_FW_SEG_IDSEL register !\n");
340 		return rc;
341 	}
342 	val = (val & 0xfffffff0) | idsel;
343 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_FW_SEG_IDSEL,
344 		       val, 4);
345 	if (rc) {
346 		prerror("Failed to write HC_FW_SEG_IDSEL register !\n");
347 		return rc;
348 	}
349 	lpc->fw_idsel = idsel;
350 	return OPAL_SUCCESS;
351 }
352 
lpc_set_fw_rdsz(struct lpcm * lpc,uint8_t rdsz)353 static int64_t lpc_set_fw_rdsz(struct lpcm *lpc, uint8_t rdsz)
354 {
355 	uint32_t val;
356 	int64_t rc;
357 
358 	if (rdsz == lpc->fw_rdsz)
359 		return OPAL_SUCCESS;
360 	switch(rdsz) {
361 	case 1:
362 		val = LPC_HC_FW_RD_1B;
363 		break;
364 	case 2:
365 		val = LPC_HC_FW_RD_2B;
366 		break;
367 	case 4:
368 		val = LPC_HC_FW_RD_4B;
369 		break;
370 	default:
371 		/*
372 		 * The HW supports 16 and 128 via a buffer/cache
373 		 * but I have never exprimented with it and am not
374 		 * sure it works the way we expect so let's leave it
375 		 * at that for now
376 		 */
377 		return OPAL_PARAMETER;
378 	}
379 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_FW_RD_ACC_SIZE,
380 		       val, 4);
381 	if (rc) {
382 		prerror("Failed to write LPC_HC_FW_RD_ACC_SIZE !\n");
383 		return rc;
384 	}
385 	lpc->fw_rdsz = rdsz;
386 	return OPAL_SUCCESS;
387 }
388 
lpc_opb_prepare(struct lpcm * lpc,enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t sz,uint32_t * opb_base,bool is_write)389 static int64_t lpc_opb_prepare(struct lpcm *lpc,
390 			       enum OpalLPCAddressType addr_type,
391 			       uint32_t addr, uint32_t sz,
392 			       uint32_t *opb_base, bool is_write)
393 {
394 	uint32_t top = addr + sz;
395 	uint8_t fw_idsel;
396 	int64_t rc;
397 
398 	/* Address wraparound */
399 	if (top < addr)
400 		return OPAL_PARAMETER;
401 
402 	/*
403 	 * Bound check access and get the OPB base address for
404 	 * the window corresponding to the access type
405 	 */
406 	switch(addr_type) {
407 	case OPAL_LPC_IO:
408 		/* IO space is 64K */
409 		if (top > 0x10000)
410 			return OPAL_PARAMETER;
411 		/* And only supports byte accesses */
412 		if (sz != 1)
413 			return OPAL_PARAMETER;
414 		*opb_base = lpc_io_opb_base;
415 		break;
416 	case OPAL_LPC_MEM:
417 		/* MEM space is 256M */
418 		if (top > 0x10000000)
419 			return OPAL_PARAMETER;
420 		/* And only supports byte accesses */
421 		if (sz != 1)
422 			return OPAL_PARAMETER;
423 		*opb_base = lpc_mem_opb_base;
424 		break;
425 	case OPAL_LPC_FW:
426 		/*
427 		 * FW space is in segments of 256M controlled
428 		 * by IDSEL, make sure we don't cross segments
429 		 */
430 		*opb_base = lpc_fw_opb_base;
431 		fw_idsel = (addr >> 28);
432 		if (((top - 1) >> 28) != fw_idsel)
433 			return OPAL_PARAMETER;
434 
435 		/* Set segment */
436 		rc = lpc_set_fw_idsel(lpc, fw_idsel);
437 		if (rc)
438 			return rc;
439 		/* Set read access size */
440 		if (!is_write) {
441 			rc = lpc_set_fw_rdsz(lpc, sz);
442 			if (rc)
443 				return rc;
444 		}
445 		break;
446 	default:
447 		return OPAL_PARAMETER;
448 	}
449 	return OPAL_SUCCESS;
450 }
451 
452 #define LPC_ERROR_IDX(x) (__builtin_ffs(x) - 1 - 2)
453 #define LPC_ERROR(_sts, _rc, _description) \
454 	[LPC_ERROR_IDX(_sts)] = { _rc, _description }
455 static const struct lpc_error_entry lpc_error_table[] = {
456 	LPC_ERROR(LPC_HC_IRQ_BM_TAR_ERR, OPAL_WRONG_STATE, "Got bus master TAR error."),
457 	LPC_ERROR(LPC_HC_IRQ_TARG_TAR_ERR, OPAL_WRONG_STATE, "Got abnormal TAR error."),
458 	LPC_ERROR(LPC_HC_IRQ_SYNC_TIMEOUT_ERR, OPAL_TIMEOUT, "Got SYNC timeout error."),
459 	LPC_ERROR(LPC_HC_IRQ_SYNC_NORM_ERR, OPAL_WRONG_STATE, "Got SYNC normal error."),
460 	LPC_ERROR(LPC_HC_IRQ_SYNC_NORESP_ERR, OPAL_HARDWARE, "Got SYNC no-response error."),
461 	LPC_ERROR(LPC_HC_IRQ_SYNC_ABNORM_ERR, OPAL_WRONG_STATE, "Got SYNC abnormal error."),
462 };
463 
lpc_probe_prepare(struct lpcm * lpc)464 static int64_t lpc_probe_prepare(struct lpcm *lpc)
465 {
466 	const uint32_t irqmask_addr = lpc_reg_opb_base + LPC_HC_IRQMASK;
467 	const uint32_t irqstat_addr = lpc_reg_opb_base + LPC_HC_IRQSTAT;
468 	uint32_t irqmask;
469 	int rc;
470 
471 	rc = opb_read(lpc, irqmask_addr, &irqmask, 4);
472 	if (rc)
473 		return rc;
474 
475 	irqmask &= ~LPC_HC_IRQ_SYNC_NORESP_ERR;
476 	rc = opb_write(lpc, irqmask_addr, irqmask, 4);
477 	if (rc)
478 		return rc;
479 
480 	return opb_write(lpc, irqstat_addr, LPC_HC_IRQ_SYNC_NORESP_ERR, 4);
481 }
482 
lpc_probe_test(struct lpcm * lpc)483 static int64_t lpc_probe_test(struct lpcm *lpc)
484 {
485 	const uint32_t irqmask_addr = lpc_reg_opb_base + LPC_HC_IRQMASK;
486 	const uint32_t irqstat_addr = lpc_reg_opb_base + LPC_HC_IRQSTAT;
487 	uint32_t irqmask, irqstat;
488 	int64_t idx;
489 	int rc;
490 
491 	rc = opb_read(lpc, irqstat_addr, &irqstat, 4);
492 	if (rc)
493 		return rc;
494 
495 	rc = opb_write(lpc, irqstat_addr, LPC_HC_IRQ_SYNC_NORESP_ERR, 4);
496 	if (rc)
497 		return rc;
498 
499 	rc = opb_read(lpc, irqmask_addr, &irqmask, 4);
500 	if (rc)
501 		return rc;
502 
503 	irqmask |= LPC_HC_IRQ_SYNC_NORESP_ERR;
504 	rc = opb_write(lpc, irqmask_addr, irqmask, 4);
505 	if (rc)
506 		return rc;
507 
508 	if (!(irqstat & LPC_HC_IRQ_BASE_IRQS))
509 		return OPAL_SUCCESS;
510 
511 	/* Ensure we can perform a valid lookup in the error table */
512 	idx = LPC_ERROR_IDX(irqstat);
513 	if (idx < 0 || idx >= ARRAY_SIZE(lpc_error_table)) {
514 		prerror("LPC bus error translation failed with status 0x%x\n",
515 			irqstat);
516 		return OPAL_PARAMETER;
517 	}
518 
519 	rc = lpc_error_table[idx].rc;
520 	return rc;
521 }
522 
__lpc_write(struct lpcm * lpc,enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t data,uint32_t sz,bool probe)523 static int64_t __lpc_write(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
524 			   uint32_t addr, uint32_t data, uint32_t sz,
525 			   bool probe)
526 {
527 	uint32_t opb_base;
528 	int64_t rc;
529 
530 	lock(&lpc->lock);
531 	if (probe) {
532 		rc = lpc_probe_prepare(lpc);
533 		if (rc)
534 			goto bail;
535 	}
536 
537 	/*
538 	 * Convert to an OPB access and handle LPC HC configuration
539 	 * for FW accesses (IDSEL)
540 	 */
541 	rc = lpc_opb_prepare(lpc, addr_type, addr, sz, &opb_base, true);
542 	if (rc)
543 		goto bail;
544 
545 	/* Perform OPB access */
546 	rc = opb_write(lpc, opb_base + addr, data, sz);
547 	if (rc)
548 		goto bail;
549 
550 	if (probe)
551 		rc = lpc_probe_test(lpc);
552  bail:
553 	unlock(&lpc->lock);
554 	return rc;
555 }
556 
__lpc_write_sanity(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t data,uint32_t sz,bool probe)557 static int64_t __lpc_write_sanity(enum OpalLPCAddressType addr_type,
558 				  uint32_t addr, uint32_t data, uint32_t sz,
559 				  bool probe)
560 {
561 	struct proc_chip *chip;
562 
563 	if (lpc_default_chip_id < 0)
564 		return OPAL_PARAMETER;
565 	chip = get_chip(lpc_default_chip_id);
566 	if (!chip || !chip->lpc)
567 		return OPAL_PARAMETER;
568 	return __lpc_write(chip->lpc, addr_type, addr, data, sz, probe);
569 }
570 
lpc_write(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t data,uint32_t sz)571 int64_t lpc_write(enum OpalLPCAddressType addr_type, uint32_t addr,
572 		  uint32_t data, uint32_t sz)
573 {
574 	return __lpc_write_sanity(addr_type, addr, data, sz, false);
575 }
576 
lpc_probe_write(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t data,uint32_t sz)577 int64_t lpc_probe_write(enum OpalLPCAddressType addr_type, uint32_t addr,
578 			uint32_t data, uint32_t sz)
579 {
580 	return __lpc_write_sanity(addr_type, addr, data, sz, true);
581 }
582 
583 /*
584  * The "OPAL" variant add the emulation of 2 and 4 byte accesses using
585  * byte accesses for IO and MEM space in order to be compatible with
586  * existing Linux expectations
587  */
opal_lpc_write(uint32_t chip_id,enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t data,uint32_t sz)588 static int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
589 			      uint32_t addr, uint32_t data, uint32_t sz)
590 {
591 	struct proc_chip *chip;
592 	int64_t rc;
593 
594 	chip = get_chip(chip_id);
595 	if (!chip || !chip->lpc)
596 		return OPAL_PARAMETER;
597 
598 	if (addr_type == OPAL_LPC_FW || sz == 1)
599 		return __lpc_write(chip->lpc, addr_type, addr, data, sz, false);
600 	while(sz--) {
601 		rc = __lpc_write(chip->lpc, addr_type, addr, data & 0xff, 1, false);
602 		if (rc)
603 			return rc;
604 		addr++;
605 		data >>= 8;
606 	}
607 	return OPAL_SUCCESS;
608 }
609 
__lpc_read(struct lpcm * lpc,enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t * data,uint32_t sz,bool probe)610 static int64_t __lpc_read(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
611 			  uint32_t addr, uint32_t *data, uint32_t sz,
612 			  bool probe)
613 {
614 	uint32_t opb_base;
615 	int64_t rc;
616 
617 	lock(&lpc->lock);
618 	if (probe) {
619 		rc = lpc_probe_prepare(lpc);
620 		if (rc)
621 			goto bail;
622 	}
623 
624 	/*
625 	 * Convert to an OPB access and handle LPC HC configuration
626 	 * for FW accesses (IDSEL and read size)
627 	 */
628 	rc = lpc_opb_prepare(lpc, addr_type, addr, sz, &opb_base, false);
629 	if (rc)
630 		goto bail;
631 
632 	/* Perform OPB access */
633 	rc = opb_read(lpc, opb_base + addr, data, sz);
634 	if (rc)
635 		goto bail;
636 
637 	if (probe)
638 		rc = lpc_probe_test(lpc);
639  bail:
640 	unlock(&lpc->lock);
641 	return rc;
642 }
643 
__lpc_read_sanity(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t * data,uint32_t sz,bool probe)644 static int64_t __lpc_read_sanity(enum OpalLPCAddressType addr_type,
645 				 uint32_t addr, uint32_t *data, uint32_t sz,
646 				 bool probe)
647 {
648 	struct proc_chip *chip;
649 
650 	if (lpc_default_chip_id < 0)
651 		return OPAL_PARAMETER;
652 	chip = get_chip(lpc_default_chip_id);
653 	if (!chip || !chip->lpc)
654 		return OPAL_PARAMETER;
655 	return __lpc_read(chip->lpc, addr_type, addr, data, sz, probe);
656 }
657 
lpc_read(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t * data,uint32_t sz)658 int64_t lpc_read(enum OpalLPCAddressType addr_type, uint32_t addr,
659 		 uint32_t *data, uint32_t sz)
660 {
661 	return __lpc_read_sanity(addr_type, addr, data, sz, false);
662 }
663 
lpc_probe_read(enum OpalLPCAddressType addr_type,uint32_t addr,uint32_t * data,uint32_t sz)664 int64_t lpc_probe_read(enum OpalLPCAddressType addr_type, uint32_t addr,
665 		       uint32_t *data, uint32_t sz)
666 {
667 	return __lpc_read_sanity(addr_type, addr, data, sz, true);
668 }
669 
670 /*
671  * The "OPAL" variant add the emulation of 2 and 4 byte accesses using
672  * byte accesses for IO and MEM space in order to be compatible with
673  * existing Linux expectations
674  */
opal_lpc_read(uint32_t chip_id,enum OpalLPCAddressType addr_type,uint32_t addr,__be32 * data,uint32_t sz)675 static int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
676 			     uint32_t addr, __be32 *data, uint32_t sz)
677 {
678 	struct proc_chip *chip;
679 	int64_t rc;
680 	uint32_t tmp;
681 
682 	chip = get_chip(chip_id);
683 	if (!chip || !chip->lpc)
684 		return OPAL_PARAMETER;
685 
686 	if (addr_type == OPAL_LPC_FW) {
687 		rc = __lpc_read(chip->lpc, addr_type, addr, &tmp, sz, false);
688 		if (rc)
689 			return rc;
690 
691 	} else {
692 		tmp = 0;
693 		while (sz--) {
694 			uint32_t byte;
695 
696 			rc = __lpc_read(chip->lpc, addr_type, addr, &byte, 1, false);
697 			if (rc)
698 				return rc;
699 			tmp = tmp | (byte << (8 * sz));
700 			addr++;
701 		}
702 	}
703 
704 	*data = cpu_to_be32(tmp);
705 
706 	return OPAL_SUCCESS;
707 }
708 
lpc_present(void)709 bool lpc_present(void)
710 {
711 	return lpc_default_chip_id >= 0;
712 }
713 
714 /* Called with LPC lock held */
lpc_setup_serirq(struct lpcm * lpc)715 static void lpc_setup_serirq(struct lpcm *lpc)
716 {
717 	struct lpc_client_entry *ent;
718 	uint32_t mask = LPC_HC_IRQ_BASE_IRQS;
719 	int rc;
720 
721 	if (!lpc_irqs_ready)
722 		return;
723 
724 	/* Collect serirq enable bits */
725 	list_for_each(&lpc->clients, ent, node)
726 		mask |= ent->clt->interrupts & LPC_HC_IRQ_SERIRQ_ALL;
727 
728 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK, mask, 4);
729 	if (rc) {
730 		prerror("Failed to update irq mask\n");
731 		return;
732 	}
733 	DBG_IRQ("IRQ mask set to 0x%08x\n", mask);
734 
735 	/* Enable the LPC interrupt in the OPB Master */
736 	opb_write(lpc, opb_master_reg_base + OPB_MASTER_LS_IRQ_POL, 0, 4);
737 	rc = opb_write(lpc, opb_master_reg_base + OPB_MASTER_LS_IRQ_MASK,
738 		       OPB_MASTER_IRQ_LPC, 4);
739 	if (rc)
740 		prerror("Failed to enable IRQs in OPB\n");
741 
742 	/* Check whether we should enable serirq */
743 	if (mask & LPC_HC_IRQ_SERIRQ_ALL) {
744 		rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSER_CTRL,
745 			       LPC_HC_IRQSER_EN |
746 			       LPC_HC_IRQSER_START_4CLK |
747 			       /*
748 				* New mode bit for P9N DD2.0 (ignored otherwise)
749 				* when set we no longer have to manually clear
750 				* the SerIRQs on EOI.
751 				*/
752 			       LPC_HC_IRQSER_AUTO_CLEAR, 4);
753 		DBG_IRQ("SerIRQ enabled\n");
754 	} else {
755 		rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSER_CTRL,
756 			       0, 4);
757 		DBG_IRQ("SerIRQ disabled\n");
758 	}
759 	if (rc)
760 		prerror("Failed to configure SerIRQ\n");
761 	{
762 		u32 val;
763 		rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK, &val, 4);
764 		if (rc)
765 			prerror("Failed to readback mask");
766 		else
767 			DBG_IRQ("MASK READBACK=%x\n", val);
768 
769 		rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_IRQSER_CTRL,
770 			      &val, 4);
771 		if (rc)
772 			prerror("Failed to readback ctrl");
773 		else
774 			DBG_IRQ("CTRL READBACK=%x\n", val);
775 	}
776 }
777 
lpc_route_serirq(struct lpcm * lpc,uint32_t sirq,uint32_t psi_idx)778 static void lpc_route_serirq(struct lpcm *lpc, uint32_t sirq,
779 			     uint32_t psi_idx)
780 {
781 	uint32_t reg, shift, val, psi_old;
782 	int64_t rc;
783 
784 	psi_old = lpc->sirq_routes[sirq];
785 	lpc->sirq_rmasks[psi_old] &= ~(LPC_HC_IRQ_SERIRQ0 >> sirq);
786 	lpc->sirq_rmasks[psi_idx] |=  (LPC_HC_IRQ_SERIRQ0 >> sirq);
787 	lpc->sirq_routes[sirq] = psi_idx;
788 	lpc->sirq_routed[sirq] = true;
789 
790 	/* We may not be ready yet ... */
791 	if (!lpc->has_serirq)
792 		return;
793 
794 	if (sirq < 14) {
795 		reg = 0xc;
796 		shift = 4 + (sirq << 1);
797 	} else {
798 		reg = 0x8;
799 		shift = 8 + ((sirq - 14) << 1);
800 	}
801 	shift = 30-shift;
802 	rc = opb_read(lpc, opb_master_reg_base + reg, &val, 4);
803 	if (rc)
804 		return;
805 	val = val & ~(3 << shift);
806 	val |= (psi_idx & 3) << shift;
807 	opb_write(lpc, opb_master_reg_base + reg, val, 4);
808 }
809 
lpc_alloc_route(struct lpcm * lpc,unsigned int irq,unsigned int policy)810 static void lpc_alloc_route(struct lpcm *lpc, unsigned int irq,
811 			    unsigned int policy)
812 {
813 	unsigned int i, r, c;
814 	int route = -1;
815 
816 	if (policy == IRQ_ATTR_TARGET_OPAL)
817 		r = LPC_ROUTE_OPAL;
818 	else
819 		r = LPC_ROUTE_LINUX;
820 
821 	prlog(PR_DEBUG, "Routing irq %d, policy: %d (r=%d)\n",
822 	      irq, policy, r);
823 
824 	/* Are we already routed ? */
825 	if (lpc->sirq_routed[irq] &&
826 	    r != lpc->sirq_ralloc[lpc->sirq_routes[irq]]) {
827 		prerror("irq %d has conflicting policies\n", irq);
828 		return;
829 	}
830 
831 	/* First try to find a free route. Leave one for another
832 	 * policy though
833 	 */
834 	for (i = 0, c = 0; i < 4; i++) {
835 		/* Count routes with identical policy */
836 		if (lpc->sirq_ralloc[i] == r)
837 			c++;
838 
839 		/* Use the route if it's free and there is no more
840 		 * than 3 existing routes with that policy
841 		 */
842 		if (lpc->sirq_ralloc[i] == LPC_ROUTE_FREE && c < 4) {
843 			lpc->sirq_ralloc[i] = r;
844 			route = i;
845 			break;
846 		}
847 	}
848 
849 	/* If we couldn't get a free one, try to find an existing one
850 	 * with a matching policy
851 	 */
852 	for (i = 0; route < 0 && i < 4; i++) {
853 		if (lpc->sirq_ralloc[i] == r)
854 			route = i;
855 	}
856 
857 	/* Still no route ? bail. That should never happen */
858 	if (route < 0) {
859 		prerror("Can't find a route for irq %d\n", irq);
860 		return;
861 	}
862 
863 	/* Program route */
864 	lpc_route_serirq(lpc, irq, route);
865 
866 	prlog(PR_DEBUG, "SerIRQ %d using route %d targetted at %s\n",
867 	      irq, route, r == LPC_ROUTE_LINUX ? "OS" : "OPAL");
868 }
869 
lpc_get_irq_policy(uint32_t chip_id,uint32_t psi_idx)870 unsigned int lpc_get_irq_policy(uint32_t chip_id, uint32_t psi_idx)
871 {
872 	struct proc_chip *c = get_chip(chip_id);
873 
874 	if (!c || !c->lpc)
875 		return IRQ_ATTR_TARGET_LINUX;
876 
877 	if (c->lpc->sirq_ralloc[psi_idx] == LPC_ROUTE_LINUX)
878 		return IRQ_ATTR_TARGET_LINUX;
879 	else
880 		return IRQ_ATTR_TARGET_OPAL | IRQ_ATTR_TYPE_LSI;
881 }
882 
lpc_create_int_map(struct lpcm * lpc,struct dt_node * psi_node)883 static void lpc_create_int_map(struct lpcm *lpc, struct dt_node *psi_node)
884 {
885 	__be32 map[LPC_NUM_SERIRQ * 5], *pmap;
886 	uint32_t i;
887 
888 	if (!psi_node)
889 		return;
890 	pmap = map;
891 	for (i = 0; i < LPC_NUM_SERIRQ; i++) {
892 		if (!lpc->sirq_routed[i])
893 			continue;
894 		*(pmap++) = 0;
895 		*(pmap++) = 0;
896 		*(pmap++) = cpu_to_be32(i);
897 		*(pmap++) = cpu_to_be32(psi_node->phandle);
898 		*(pmap++) = cpu_to_be32(lpc->sirq_routes[i] + P9_PSI_IRQ_LPC_SIRQ0);
899 	}
900 	if (pmap == map)
901 		return;
902 	dt_add_property(lpc->node, "interrupt-map", map,
903 			(pmap - map) * sizeof(uint32_t));
904 	dt_add_property_cells(lpc->node, "interrupt-map-mask", 0, 0, 0xff);
905 	dt_add_property_cells(lpc->node, "#interrupt-cells", 1);
906 }
907 
lpc_finalize_interrupts(void)908 void lpc_finalize_interrupts(void)
909 {
910 	struct proc_chip *chip;
911 
912 	lpc_irqs_ready = true;
913 
914 	for_each_chip(chip) {
915 		if (chip->lpc && chip->psi &&
916 		    (chip->type == PROC_CHIP_P9_NIMBUS ||
917 		     chip->type == PROC_CHIP_P9_CUMULUS ||
918 		     chip->type == PROC_CHIP_P9P ||
919 		     chip->type == PROC_CHIP_P10))
920 			lpc_create_int_map(chip->lpc, chip->psi->node);
921 	}
922 }
923 
lpc_init_interrupts_one(struct proc_chip * chip)924 static void lpc_init_interrupts_one(struct proc_chip *chip)
925 {
926 	struct lpcm *lpc = chip->lpc;
927 	int i, rc;
928 
929 	lock(&lpc->lock);
930 
931 	/* First mask them all */
932 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK, 0, 4);
933 	if (rc) {
934 		prerror("Failed to init interrutps\n");
935 		goto bail;
936 	}
937 
938 	switch(chip->type) {
939 	case PROC_CHIP_P8_MURANO:
940 	case PROC_CHIP_P8_VENICE:
941 		/* On Murano/Venice, there is no SerIRQ, only enable error
942 		 * interrupts
943 		 */
944 		rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK,
945 			       LPC_HC_IRQ_BASE_IRQS, 4);
946 		if (rc) {
947 			prerror("Failed to set interrupt mask\n");
948 			goto bail;
949 		}
950 		opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSER_CTRL, 0, 4);
951 		break;
952 	case PROC_CHIP_P8_NAPLES:
953 		/* On Naples, we support LPC interrupts, enable them based
954 		 * on what clients requests. This will setup the mask and
955 		 * enable processing
956 		 */
957 		lpc->has_serirq = true;
958 		lpc_setup_serirq(lpc);
959 		break;
960 	case PROC_CHIP_P9_NIMBUS:
961 	case PROC_CHIP_P9_CUMULUS:
962 	case PROC_CHIP_P9P:
963 	case PROC_CHIP_P10:
964 		/* On P9, we additionally setup the routing. */
965 		lpc->has_serirq = true;
966 		for (i = 0; i < LPC_NUM_SERIRQ; i++) {
967 			if (lpc->sirq_routed[i])
968 				lpc_route_serirq(lpc, i, lpc->sirq_routes[i]);
969 		}
970 		lpc_setup_serirq(lpc);
971 		break;
972 	default:
973 		;
974 	}
975  bail:
976 	unlock(&lpc->lock);
977 }
978 
lpc_init_interrupts(void)979 void lpc_init_interrupts(void)
980 {
981 	struct proc_chip *chip;
982 
983 	lpc_irqs_ready = true;
984 
985 	for_each_chip(chip) {
986 		if (chip->lpc)
987 			lpc_init_interrupts_one(chip);
988 	}
989 }
990 
lpc_dispatch_reset(struct lpcm * lpc)991 static void lpc_dispatch_reset(struct lpcm *lpc)
992 {
993 	struct lpc_client_entry *ent;
994 
995 	/* XXX We are going to hit this repeatedly while reset is
996 	 * asserted which might be sub-optimal. We should instead
997 	 * detect assertion and start a poller that will wait for
998 	 * de-assertion. We could notify clients of LPC being
999 	 * on/off rather than just reset
1000 	 */
1001 
1002 	prerror("Got LPC reset on chip 0x%x !\n", lpc->chip_id);
1003 
1004 	/* Collect serirq enable bits */
1005 	list_for_each(&lpc->clients, ent, node) {
1006 		if (!ent->clt->reset)
1007 			continue;
1008 		unlock(&lpc->lock);
1009 		ent->clt->reset(lpc->chip_id);
1010 		lock(&lpc->lock);
1011 	}
1012 
1013 	/* Reconfigure serial interrupts */
1014 	if (lpc->has_serirq)
1015 		lpc_setup_serirq(lpc);
1016 }
1017 
lpc_dispatch_err_irqs(struct lpcm * lpc,uint32_t irqs)1018 static void lpc_dispatch_err_irqs(struct lpcm *lpc, uint32_t irqs)
1019 {
1020 	const struct lpc_error_entry *err;
1021 	static int lpc_bus_err_count;
1022 	struct opal_err_info *info;
1023 	uint32_t addr;
1024 	int64_t idx;
1025 	int rc;
1026 
1027 	/* Write back to clear error interrupts, we clear SerIRQ later
1028 	 * as they are handled as level interrupts
1029 	 */
1030 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT,
1031 		       LPC_HC_IRQ_BASE_IRQS, 4);
1032 	if (rc)
1033 		prerror("Failed to clear IRQ error latches !\n");
1034 
1035 	if (irqs & LPC_HC_IRQ_LRESET) {
1036 		lpc_dispatch_reset(lpc);
1037 		return;
1038 	}
1039 
1040 	/* Ensure we can perform a valid lookup in the error table */
1041 	idx = LPC_ERROR_IDX(irqs);
1042 	if (idx < 0 || idx >= ARRAY_SIZE(lpc_error_table)) {
1043 		prerror("LPC bus error translation failed with status 0x%x\n",
1044 			irqs);
1045 		return;
1046 	}
1047 
1048 	/* Find and report the error */
1049 	err = &lpc_error_table[idx];
1050 	lpc_bus_err_count++;
1051 	if (manufacturing_mode && (lpc_bus_err_count > LPC_BUS_DEGRADED_PERF_THRESHOLD))
1052 		info = &e_info(OPAL_RC_LPC_SYNC_PERF);
1053 	else
1054 		info = &e_info(OPAL_RC_LPC_SYNC);
1055 
1056 	rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_ERROR_ADDRESS, &addr, 4);
1057 	if (rc)
1058 		log_simple_error(info, "LPC[%03x]: %s "
1059 				 "Error reading error address register\n",
1060 				 lpc->chip_id, err->description);
1061 	else
1062 		log_simple_error(info, "LPC[%03x]: %s Error address reg: "
1063 				 "0x%08x\n",
1064 				 lpc->chip_id, err->description, addr);
1065 }
1066 
lpc_dispatch_ser_irqs(struct lpcm * lpc,uint32_t irqs,bool clear_latch)1067 static void lpc_dispatch_ser_irqs(struct lpcm *lpc, uint32_t irqs,
1068 				  bool clear_latch)
1069 {
1070 	struct lpc_client_entry *ent;
1071 	uint32_t cirqs;
1072 	int rc;
1073 
1074 	irqs &= LPC_HC_IRQ_SERIRQ_ALL;
1075 
1076 	/* Collect serirq enable bits */
1077 	list_for_each(&lpc->clients, ent, node) {
1078 		if (!ent->clt->interrupt)
1079 			continue;
1080 		cirqs = ent->clt->interrupts & irqs;
1081 		if (cirqs) {
1082 			unlock(&lpc->lock);
1083 			ent->clt->interrupt(lpc->chip_id, cirqs);
1084 			lock(&lpc->lock);
1085 		}
1086 	}
1087 
1088 	/* Our SerIRQ are level sensitive, we clear the latch after
1089 	 * we call the handler.
1090 	 */
1091 	if (!clear_latch)
1092 		return;
1093 
1094 	rc = opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT, irqs, 4);
1095 	if (rc)
1096 		prerror("Failed to clear SerIRQ latches !\n");
1097 }
1098 
lpc_interrupt(uint32_t chip_id)1099 void lpc_interrupt(uint32_t chip_id)
1100 {
1101 	struct proc_chip *chip = get_chip(chip_id);
1102 	struct lpcm *lpc;
1103 	uint32_t irqs, opb_irqs;
1104 	int rc;
1105 
1106 	/* No initialized LPC controller on that chip */
1107 	if (!chip || !chip->lpc)
1108 		return;
1109 	lpc = chip->lpc;
1110 
1111 	lock(&lpc->lock);
1112 
1113 	/* Grab OPB Master LS interrupt status */
1114 	rc = opb_read(lpc, opb_master_reg_base + OPB_MASTER_LS_IRQ_STAT,
1115 		      &opb_irqs, 4);
1116 	if (rc) {
1117 		prerror("Failed to read OPB IRQ state\n");
1118 		unlock(&lpc->lock);
1119 		return;
1120 	}
1121 
1122 	DBG_IRQ("OPB IRQ on chip 0x%x, oirqs=0x%08x\n", chip_id, opb_irqs);
1123 
1124 	/* Check if it's an LPC interrupt */
1125 	if (!(opb_irqs & OPB_MASTER_IRQ_LPC)) {
1126 		/* Something we don't support ? Ack it anyway... */
1127 		goto bail;
1128 	}
1129 
1130 	/* Handle the lpc interrupt source (errors etc...) */
1131 	rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT, &irqs, 4);
1132 	if (rc) {
1133 		prerror("Failed to read LPC IRQ state\n");
1134 		goto bail;
1135 	}
1136 
1137 	DBG_IRQ("LPC IRQ on chip 0x%x, irqs=0x%08x\n", chip_id, irqs);
1138 
1139 	/* Handle error interrupts */
1140 	if (irqs & LPC_HC_IRQ_BASE_IRQS)
1141 		lpc_dispatch_err_irqs(lpc, irqs);
1142 
1143 	/* Handle SerIRQ interrupts */
1144 	if (irqs & LPC_HC_IRQ_SERIRQ_ALL)
1145 		lpc_dispatch_ser_irqs(lpc, irqs, true);
1146  bail:
1147 	/* Ack it at the OPB level */
1148 	opb_write(lpc, opb_master_reg_base + OPB_MASTER_LS_IRQ_STAT,
1149 		  opb_irqs, 4);
1150 	unlock(&lpc->lock);
1151 }
1152 
lpc_serirq(uint32_t chip_id,uint32_t index)1153 void lpc_serirq(uint32_t chip_id, uint32_t index)
1154 {
1155 	struct proc_chip *chip = get_chip(chip_id);
1156 	struct lpcm *lpc;
1157 	uint32_t irqs, rmask;
1158 	int rc;
1159 
1160 	/* No initialized LPC controller on that chip */
1161 	if (!chip || !chip->lpc)
1162 		return;
1163 	lpc = chip->lpc;
1164 
1165 	lock(&lpc->lock);
1166 
1167 	/* Handle the lpc interrupt source (errors etc...) */
1168 	rc = opb_read(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT, &irqs, 4);
1169 	if (rc) {
1170 		prerror("Failed to read LPC IRQ state\n");
1171 		goto bail;
1172 	}
1173 	rmask = lpc->sirq_rmasks[index];
1174 
1175 	DBG_IRQ("IRQ on chip 0x%x, irqs=0x%08x rmask=0x%08x\n",
1176 		chip_id, irqs, rmask);
1177 	irqs &= rmask;
1178 
1179 	/*
1180 	 * Handle SerIRQ interrupts. Don't clear the latch,
1181 	 * it will be done in our special EOI callback if
1182 	 * necessary on DD1
1183 	 */
1184 	if (irqs)
1185 		lpc_dispatch_ser_irqs(lpc, irqs, false);
1186 
1187  bail:
1188 	unlock(&lpc->lock);
1189 }
1190 
lpc_all_interrupts(uint32_t chip_id)1191 void lpc_all_interrupts(uint32_t chip_id)
1192 {
1193 	struct proc_chip *chip = get_chip(chip_id);
1194 	struct lpcm *lpc;
1195 
1196 	/* No initialized LPC controller on that chip */
1197 	if (!chip || !chip->lpc)
1198 		return;
1199 	lpc = chip->lpc;
1200 
1201 	/* Dispatch all */
1202 	lock(&lpc->lock);
1203 	lpc_dispatch_ser_irqs(lpc, LPC_HC_IRQ_SERIRQ_ALL, false);
1204 	unlock(&lpc->lock);
1205 }
1206 
lpc_init_chip_p8(struct dt_node * xn)1207 static void lpc_init_chip_p8(struct dt_node *xn)
1208  {
1209 	uint32_t gcid = dt_get_chip_id(xn);
1210 	struct proc_chip *chip;
1211 	struct lpcm *lpc;
1212 
1213 	chip = get_chip(gcid);
1214 	assert(chip);
1215 
1216 	lpc = zalloc(sizeof(struct lpcm));
1217 	assert(lpc);
1218 	lpc->chip_id = gcid;
1219 	lpc->xbase = dt_get_address(xn, 0, NULL);
1220 	lpc->fw_idsel = 0xff;
1221 	lpc->fw_rdsz = 0xff;
1222 	lpc->node = xn;
1223 	list_head_init(&lpc->clients);
1224 	init_lock(&lpc->lock);
1225 
1226 	if (lpc_default_chip_id < 0 ||
1227 	    dt_has_node_property(xn, "primary", NULL)) {
1228 		lpc_default_chip_id = gcid;
1229 	}
1230 
1231 	/* Mask all interrupts for now */
1232 	opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK, 0, 4);
1233 
1234 	printf("LPC[%03x]: Initialized, access via XSCOM @0x%x\n",
1235 	       gcid, lpc->xbase);
1236 
1237 	dt_add_property(xn, "interrupt-controller", NULL, 0);
1238 	dt_add_property_cells(xn, "#interrupt-cells", 1);
1239 	assert(dt_prop_get_u32(xn, "#address-cells") == 2);
1240 
1241 	chip->lpc = lpc;
1242 }
1243 
lpc_init_chip_p9(struct dt_node * opb_node)1244 static void lpc_init_chip_p9(struct dt_node *opb_node)
1245 {
1246 	uint32_t gcid = dt_get_chip_id(opb_node);
1247 	struct dt_node *lpc_node;
1248 	struct proc_chip *chip;
1249 	struct lpcm *lpc;
1250 	u64 addr;
1251 	u32 val;
1252 
1253 	chip = get_chip(gcid);
1254 	assert(chip);
1255 
1256 	/* Grab OPB base address */
1257 	addr = dt_prop_get_cell(opb_node, "ranges", 1);
1258 	addr <<= 32;
1259 	addr |= dt_prop_get_cell(opb_node, "ranges", 2);
1260 
1261 	/* Find the "lpc" child node */
1262 	lpc_node = dt_find_compatible_node(opb_node, NULL, "ibm,power9-lpc");
1263 	if (!lpc_node)
1264 		return;
1265 
1266 	lpc = zalloc(sizeof(struct lpcm));
1267 	assert(lpc);
1268 	lpc->chip_id = gcid;
1269 	lpc->mbase = (void *)addr;
1270 	lpc->fw_idsel = 0xff;
1271 	lpc->fw_rdsz = 0xff;
1272 	lpc->node = lpc_node;
1273 	list_head_init(&lpc->clients);
1274 	init_lock(&lpc->lock);
1275 
1276 	if (lpc_default_chip_id < 0 ||
1277 	    dt_has_node_property(opb_node, "primary", NULL)) {
1278 		lpc_default_chip_id = gcid;
1279 	}
1280 
1281 	/* Mask all interrupts for now */
1282 	opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQMASK, 0, 4);
1283 
1284 	/* Clear any stale LPC bus errors */
1285 	opb_write(lpc, lpc_reg_opb_base + LPC_HC_IRQSTAT,
1286 		       LPC_HC_IRQ_BASE_IRQS, 4);
1287 
1288 	/* Default with routing to PSI SerIRQ 0, this will be updated
1289 	 * later when interrupts are initialized.
1290 	 */
1291 	opb_read(lpc, opb_master_reg_base + 8, &val, 4);
1292 	val &= 0xff03ffff;
1293 	opb_write(lpc, opb_master_reg_base + 8, val, 4);
1294 	opb_read(lpc, opb_master_reg_base + 0xc, &val, 4);
1295 	val &= 0xf0000000;
1296 	opb_write(lpc, opb_master_reg_base + 0xc, val, 4);
1297 
1298 	prlog(PR_INFO, "LPC[%03x]: Initialized\n", gcid);
1299 	prlog(PR_DEBUG,"access via MMIO @%p\n", lpc->mbase);
1300 
1301 	chip->lpc = lpc;
1302 }
1303 
lpc_init(void)1304 void lpc_init(void)
1305 {
1306 	struct dt_node *xn;
1307 	bool has_lpc = false;
1308 
1309 	/* Look for P9 first as the DT is compatile for both 8 and 9 */
1310 	dt_for_each_compatible(dt_root, xn, "ibm,power9-lpcm-opb") {
1311 		lpc_init_chip_p9(xn);
1312 		has_lpc = true;
1313 	}
1314 
1315 	if (!has_lpc) {
1316 		dt_for_each_compatible(dt_root, xn, "ibm,power8-lpc") {
1317 			lpc_init_chip_p8(xn);
1318 			has_lpc = true;
1319 		}
1320 	}
1321 	if (lpc_default_chip_id >= 0)
1322 		prlog(PR_DEBUG, "Default bus on chip 0x%x\n",
1323 		      lpc_default_chip_id);
1324 
1325 	if (has_lpc) {
1326 		opal_register(OPAL_LPC_WRITE, opal_lpc_write, 5);
1327 		opal_register(OPAL_LPC_READ, opal_lpc_read, 5);
1328 	}
1329 }
1330 
lpc_used_by_console(void)1331 void lpc_used_by_console(void)
1332 {
1333 	struct proc_chip *chip;
1334 
1335 	xscom_used_by_console();
1336 
1337 	for_each_chip(chip) {
1338 		struct lpcm *lpc = chip->lpc;
1339 		if (lpc) {
1340 			lpc->lock.in_con_path = true;
1341 			lock(&lpc->lock);
1342 			unlock(&lpc->lock);
1343 		}
1344 	}
1345 }
1346 
lpc_ok(void)1347 bool lpc_ok(void)
1348 {
1349 	struct proc_chip *chip;
1350 
1351 	if (lpc_default_chip_id < 0)
1352 		return false;
1353 	if (!xscom_ok())
1354 		return false;
1355 	chip = get_chip(lpc_default_chip_id);
1356 	if (!chip->lpc)
1357 		return false;
1358 	return !lock_held_by_me(&chip->lpc->lock);
1359 }
1360 
lpc_register_client(uint32_t chip_id,const struct lpc_client * clt,uint32_t policy)1361 void lpc_register_client(uint32_t chip_id,
1362 			 const struct lpc_client *clt,
1363 			 uint32_t policy)
1364 {
1365 	struct lpc_client_entry *ent;
1366 	struct proc_chip *chip;
1367 	struct lpcm *lpc;
1368 	bool has_routes;
1369 
1370 	chip = get_chip(chip_id);
1371 	assert(chip);
1372 	lpc = chip->lpc;
1373 	if (!lpc) {
1374 		prerror("Attempt to register client on bad chip 0x%x\n",
1375 			chip_id);
1376 		return;
1377 	}
1378 
1379 	has_routes =
1380 		chip->type == PROC_CHIP_P9_NIMBUS ||
1381 		chip->type == PROC_CHIP_P9_CUMULUS ||
1382 		chip->type == PROC_CHIP_P9P ||
1383 		chip->type == PROC_CHIP_P10;
1384 
1385 	if (policy != IRQ_ATTR_TARGET_OPAL && !has_routes) {
1386 		prerror("Chip doesn't support OS interrupt policy\n");
1387 		return;
1388 	}
1389 
1390 	ent = malloc(sizeof(*ent));
1391 	assert(ent);
1392 	ent->clt = clt;
1393 	ent->policy = policy;
1394 	lock(&lpc->lock);
1395 	list_add(&lpc->clients, &ent->node);
1396 
1397 	if (has_routes) {
1398 		unsigned int i;
1399 		for (i = 0; i < LPC_NUM_SERIRQ; i++)
1400 			if (clt->interrupts & LPC_IRQ(i))
1401 				lpc_alloc_route(lpc, i, policy);
1402 	}
1403 
1404 	if (lpc->has_serirq)
1405 		lpc_setup_serirq(lpc);
1406 	unlock(&lpc->lock);
1407 }
1408