1 // SPDX-License-Identifier: Apache-2.0
2 /*
3  * Directly control CPU cores/threads. SRESET, special wakeup, etc
4  *
5  * Copyright 2017-2019 IBM Corp.
6  */
7 
8 #include <direct-controls.h>
9 #include <skiboot.h>
10 #include <opal.h>
11 #include <cpu.h>
12 #include <xscom.h>
13 #include <xscom-p8-regs.h>
14 #include <xscom-p9-regs.h>
15 #include <xscom-p10-regs.h>
16 #include <timebase.h>
17 #include <chip.h>
18 
19 
20 /**************** mambo direct controls ****************/
21 
22 extern unsigned long callthru_tcl(const char *str, int len);
23 
mambo_sreset_cpu(struct cpu_thread * cpu)24 static void mambo_sreset_cpu(struct cpu_thread *cpu)
25 {
26 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
27 	uint32_t core_id = pir_to_core_id(cpu->pir);
28 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
29 	char tcl_cmd[50];
30 
31 	snprintf(tcl_cmd, sizeof(tcl_cmd),
32 			"mysim cpu %i:%i:%i start_thread 0x100",
33 			chip_id, core_id, thread_id);
34 	callthru_tcl(tcl_cmd, strlen(tcl_cmd));
35 }
36 
mambo_stop_cpu(struct cpu_thread * cpu)37 static void mambo_stop_cpu(struct cpu_thread *cpu)
38 {
39 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
40 	uint32_t core_id = pir_to_core_id(cpu->pir);
41 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
42 	char tcl_cmd[50];
43 
44 	snprintf(tcl_cmd, sizeof(tcl_cmd),
45 			"mysim cpu %i:%i:%i stop_thread",
46 			chip_id, core_id, thread_id);
47 	callthru_tcl(tcl_cmd, strlen(tcl_cmd));
48 }
49 
50 /**************** POWER8 direct controls ****************/
51 
p8_core_set_special_wakeup(struct cpu_thread * cpu)52 static int p8_core_set_special_wakeup(struct cpu_thread *cpu)
53 {
54 	uint64_t val, poll_target, stamp;
55 	uint32_t core_id;
56 	int rc;
57 
58 	/*
59 	 * Note: HWP checks for checkstops, but I assume we don't need to
60 	 * as we wouldn't be running if one was present
61 	 */
62 
63 	/* Grab core ID once */
64 	core_id = pir_to_core_id(cpu->pir);
65 
66 	prlog(PR_DEBUG, "RESET Waking up core 0x%x\n", core_id);
67 
68 	/*
69 	 * The original HWp reads the XSCOM first but ignores the result
70 	 * and error, let's do the same until I know for sure that is
71 	 * not necessary
72 	 */
73 	xscom_read(cpu->chip_id,
74 		   XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
75 		   &val);
76 
77 	/* Then we write special wakeup */
78 	rc = xscom_write(cpu->chip_id,
79 			 XSCOM_ADDR_P8_EX_SLAVE(core_id,
80 						EX_PM_SPECIAL_WAKEUP_PHYP),
81 			 PPC_BIT(0));
82 	if (rc) {
83 		prerror("RESET: XSCOM error %d asserting special"
84 			" wakeup on 0x%x\n", rc, cpu->pir);
85 		return rc;
86 	}
87 
88 	/*
89 	 * HWP uses the history for Perf register here, dunno why it uses
90 	 * that one instead of the pHyp one, maybe to avoid clobbering it...
91 	 *
92 	 * In any case, it does that to check for run/nap vs.sleep/winkle/other
93 	 * to decide whether to poll on checkstop or not. Since we don't deal
94 	 * with checkstop conditions here, we ignore that part.
95 	 */
96 
97 	/*
98 	 * Now poll for completion of special wakeup. The HWP is nasty here,
99 	 * it will poll at 5ms intervals for up to 200ms. This is not quite
100 	 * acceptable for us at runtime, at least not until we have the
101 	 * ability to "context switch" HBRT. In practice, because we don't
102 	 * winkle, it will never take that long, so we increase the polling
103 	 * frequency to 1us per poll. However we do have to keep the same
104 	 * timeout.
105 	 *
106 	 * We don't use time_wait_ms() either for now as we don't want to
107 	 * poll the FSP here.
108 	 */
109 	stamp = mftb();
110 	poll_target = stamp + msecs_to_tb(200);
111 	val = 0;
112 	while (!(val & EX_PM_GP0_SPECIAL_WAKEUP_DONE)) {
113 		/* Wait 1 us */
114 		time_wait_us(1);
115 
116 		/* Read PM state */
117 		rc = xscom_read(cpu->chip_id,
118 				XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_GP0),
119 				&val);
120 		if (rc) {
121 			prerror("RESET: XSCOM error %d reading PM state on"
122 				" 0x%x\n", rc, cpu->pir);
123 			return rc;
124 		}
125 		/* Check timeout */
126 		if (mftb() > poll_target)
127 			break;
128 	}
129 
130 	/* Success ? */
131 	if (val & EX_PM_GP0_SPECIAL_WAKEUP_DONE) {
132 		uint64_t now = mftb();
133 		prlog(PR_TRACE, "RESET: Special wakeup complete after %ld us\n",
134 		      tb_to_usecs(now - stamp));
135 		return 0;
136 	}
137 
138 	/*
139 	 * We timed out ...
140 	 *
141 	 * HWP has a complex workaround for HW255321 which affects
142 	 * Murano DD1 and Venice DD1. Ignore that for now
143 	 *
144 	 * Instead we just dump some XSCOMs for error logging
145 	 */
146 	prerror("RESET: Timeout on special wakeup of 0x%0x\n", cpu->pir);
147 	prerror("RESET:      PM0 = 0x%016llx\n", val);
148 	val = -1;
149 	xscom_read(cpu->chip_id,
150 		   XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
151 		   &val);
152 	prerror("RESET: SPC_WKUP = 0x%016llx\n", val);
153 	val = -1;
154 	xscom_read(cpu->chip_id,
155 		   XSCOM_ADDR_P8_EX_SLAVE(core_id,
156 					  EX_PM_IDLE_STATE_HISTORY_PHYP),
157 		   &val);
158 	prerror("RESET:  HISTORY = 0x%016llx\n", val);
159 
160 	return OPAL_HARDWARE;
161 }
162 
p8_core_clear_special_wakeup(struct cpu_thread * cpu)163 static int p8_core_clear_special_wakeup(struct cpu_thread *cpu)
164 {
165 	uint64_t val;
166 	uint32_t core_id;
167 	int rc;
168 
169 	/*
170 	 * Note: HWP checks for checkstops, but I assume we don't need to
171 	 * as we wouldn't be running if one was present
172 	 */
173 
174 	/* Grab core ID once */
175 	core_id = pir_to_core_id(cpu->pir);
176 
177 	prlog(PR_DEBUG, "RESET: Releasing core 0x%x wakeup\n", core_id);
178 
179 	/*
180 	 * The original HWp reads the XSCOM first but ignores the result
181 	 * and error, let's do the same until I know for sure that is
182 	 * not necessary
183 	 */
184 	xscom_read(cpu->chip_id,
185 		   XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
186 		   &val);
187 
188 	/* Then we write special wakeup */
189 	rc = xscom_write(cpu->chip_id,
190 			 XSCOM_ADDR_P8_EX_SLAVE(core_id,
191 						EX_PM_SPECIAL_WAKEUP_PHYP), 0);
192 	if (rc) {
193 		prerror("RESET: XSCOM error %d deasserting"
194 			" special wakeup on 0x%x\n", rc, cpu->pir);
195 		return rc;
196 	}
197 
198 	/*
199 	 * The original HWp reads the XSCOM again with the comment
200 	 * "This puts an inherent delay in the propagation of the reset
201 	 * transition"
202 	 */
203 	xscom_read(cpu->chip_id,
204 		   XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
205 		   &val);
206 
207 	return 0;
208 }
209 
p8_stop_thread(struct cpu_thread * cpu)210 static int p8_stop_thread(struct cpu_thread *cpu)
211 {
212 	uint32_t core_id = pir_to_core_id(cpu->pir);
213 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
214 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
215 	uint32_t xscom_addr;
216 
217 	xscom_addr = XSCOM_ADDR_P8_EX(core_id,
218 				      P8_EX_TCTL_DIRECT_CONTROLS(thread_id));
219 
220 	if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_STOP)) {
221 		prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
222 				" Unable to write EX_TCTL_DIRECT_CONTROLS.\n",
223 				chip_id, core_id, thread_id);
224 		return OPAL_HARDWARE;
225 	}
226 
227 	return OPAL_SUCCESS;
228 }
229 
p8_sreset_thread(struct cpu_thread * cpu)230 static int p8_sreset_thread(struct cpu_thread *cpu)
231 {
232 	uint32_t core_id = pir_to_core_id(cpu->pir);
233 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
234 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
235 	uint32_t xscom_addr;
236 
237 	xscom_addr = XSCOM_ADDR_P8_EX(core_id,
238 				      P8_EX_TCTL_DIRECT_CONTROLS(thread_id));
239 
240 	if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_PRENAP)) {
241 		prlog(PR_ERR, "Could not prenap thread %u:%u:%u:"
242 				" Unable to write EX_TCTL_DIRECT_CONTROLS.\n",
243 				chip_id, core_id, thread_id);
244 		return OPAL_HARDWARE;
245 	}
246 	if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_SRESET)) {
247 		prlog(PR_ERR, "Could not sreset thread %u:%u:%u:"
248 				" Unable to write EX_TCTL_DIRECT_CONTROLS.\n",
249 				chip_id, core_id, thread_id);
250 		return OPAL_HARDWARE;
251 	}
252 
253 	return OPAL_SUCCESS;
254 }
255 
256 
257 /**************** POWER9 direct controls ****************/
258 
259 /* Long running instructions may take time to complete. Timeout 100ms */
260 #define P9_QUIESCE_POLL_INTERVAL	100
261 #define P9_QUIESCE_TIMEOUT		100000
262 
263 /* Waking may take up to 5ms for deepest sleep states. Set timeout to 100ms */
264 #define P9_SPWKUP_POLL_INTERVAL		100
265 #define P9_SPWKUP_TIMEOUT		100000
266 
267 /*
268  * This implements direct control facilities of processor cores and threads
269  * using scom registers.
270  */
271 
p9_core_is_gated(struct cpu_thread * cpu)272 static int p9_core_is_gated(struct cpu_thread *cpu)
273 {
274 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
275 	uint32_t core_id = pir_to_core_id(cpu->pir);
276 	uint32_t sshhyp_addr;
277 	uint64_t val;
278 
279 	sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP);
280 
281 	if (xscom_read(chip_id, sshhyp_addr, &val)) {
282 		prlog(PR_ERR, "Could not query core gated on %u:%u:"
283 				" Unable to read PPM_SSHHYP.\n",
284 				chip_id, core_id);
285 		return OPAL_HARDWARE;
286 	}
287 
288 	return !!(val & P9_CORE_GATED);
289 }
290 
p9_core_set_special_wakeup(struct cpu_thread * cpu)291 static int p9_core_set_special_wakeup(struct cpu_thread *cpu)
292 {
293 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
294 	uint32_t core_id = pir_to_core_id(cpu->pir);
295 	uint32_t swake_addr;
296 	uint32_t sshhyp_addr;
297 	uint64_t val;
298 	int i;
299 
300 	swake_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, EC_PPM_SPECIAL_WKUP_HYP);
301 	sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP);
302 
303 	if (xscom_write(chip_id, swake_addr, P9_SPWKUP_SET)) {
304 		prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
305 				" Unable to write PPM_SPECIAL_WKUP_HYP.\n",
306 				chip_id, core_id);
307 		goto out_fail;
308 	}
309 
310 	for (i = 0; i < P9_SPWKUP_TIMEOUT / P9_SPWKUP_POLL_INTERVAL; i++) {
311 		if (xscom_read(chip_id, sshhyp_addr, &val)) {
312 			prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
313 					" Unable to read PPM_SSHHYP.\n",
314 					chip_id, core_id);
315 			goto out_fail;
316 		}
317 		if (val & P9_SPECIAL_WKUP_DONE) {
318 			/*
319 			 * CORE_GATED will be unset on a successful special
320 			 * wakeup of the core which indicates that the core is
321 			 * out of stop state. If CORE_GATED is still set then
322 			 * raise error.
323 			 */
324 			if (p9_core_is_gated(cpu)) {
325 				/* Deassert spwu for this strange error */
326 				xscom_write(chip_id, swake_addr, 0);
327 				prlog(PR_ERR, "Failed special wakeup on %u:%u"
328 						" as CORE_GATED is set\n",
329 						chip_id, core_id);
330 				goto out_fail;
331 			} else {
332 				return 0;
333 			}
334 		}
335 		time_wait_us(P9_SPWKUP_POLL_INTERVAL);
336 	}
337 
338 	prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
339 			" timeout waiting for SPECIAL_WKUP_DONE.\n",
340 			chip_id, core_id);
341 
342 out_fail:
343 	/*
344 	 * As per the special wakeup protocol we should not de-assert
345 	 * the special wakeup on the core until WAKEUP_DONE is set.
346 	 * So even on error do not de-assert.
347 	 */
348 	return OPAL_HARDWARE;
349 }
350 
p9_core_clear_special_wakeup(struct cpu_thread * cpu)351 static int p9_core_clear_special_wakeup(struct cpu_thread *cpu)
352 {
353 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
354 	uint32_t core_id = pir_to_core_id(cpu->pir);
355 	uint32_t swake_addr;
356 
357 	swake_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, EC_PPM_SPECIAL_WKUP_HYP);
358 
359 	/*
360 	 * De-assert special wakeup after a small delay.
361 	 * The delay may help avoid problems setting and clearing special
362 	 * wakeup back-to-back. This should be confirmed.
363 	 */
364 	time_wait_us(1);
365 	if (xscom_write(chip_id, swake_addr, 0)) {
366 		prlog(PR_ERR, "Could not clear special wakeup on %u:%u:"
367 				" Unable to write PPM_SPECIAL_WKUP_HYP.\n",
368 				chip_id, core_id);
369 		return OPAL_HARDWARE;
370 	}
371 
372 	/*
373 	 * Don't wait for de-assert to complete as other components
374 	 * could have requested for special wkeup. Wait for 10ms to
375 	 * avoid back-to-back asserts
376 	 */
377 	time_wait_us(10000);
378 	return 0;
379 }
380 
p9_thread_quiesced(struct cpu_thread * cpu)381 static int p9_thread_quiesced(struct cpu_thread *cpu)
382 {
383 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
384 	uint32_t core_id = pir_to_core_id(cpu->pir);
385 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
386 	uint32_t ras_addr;
387 	uint64_t ras_status;
388 
389 	ras_addr = XSCOM_ADDR_P9_EC(core_id, P9_RAS_STATUS);
390 	if (xscom_read(chip_id, ras_addr, &ras_status)) {
391 		prlog(PR_ERR, "Could not check thread state on %u:%u:"
392 				" Unable to read RAS_STATUS.\n",
393 				chip_id, core_id);
394 		return OPAL_HARDWARE;
395 	}
396 
397 	/*
398 	 * This returns true when the thread is quiesced and all
399 	 * instructions completed. For sreset this may not be necessary,
400 	 * but we may want to use instruction ramming or stepping
401 	 * direct controls where it is important.
402 	 */
403 	if ((ras_status & P9_THREAD_QUIESCED(thread_id))
404 			== P9_THREAD_QUIESCED(thread_id))
405 		return 1;
406 
407 	return 0;
408 }
409 
p9_cont_thread(struct cpu_thread * cpu)410 static int p9_cont_thread(struct cpu_thread *cpu)
411 {
412 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
413 	uint32_t core_id = pir_to_core_id(cpu->pir);
414 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
415 	uint32_t cts_addr;
416 	uint32_t ti_addr;
417 	uint32_t dctl_addr;
418 	uint64_t core_thread_state;
419 	uint64_t thread_info;
420 	bool active, stop;
421 	int rc;
422 
423 	rc = p9_thread_quiesced(cpu);
424 	if (rc < 0)
425 		return rc;
426 	if (!rc) {
427 		prlog(PR_ERR, "Could not cont thread %u:%u:%u:"
428 				" Thread is not quiesced.\n",
429 				chip_id, core_id, thread_id);
430 		return OPAL_BUSY;
431 	}
432 
433 	cts_addr = XSCOM_ADDR_P9_EC(core_id, P9_CORE_THREAD_STATE);
434 	ti_addr = XSCOM_ADDR_P9_EC(core_id, P9_THREAD_INFO);
435 	dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS);
436 
437 	if (xscom_read(chip_id, cts_addr, &core_thread_state)) {
438 		prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
439 				" Unable to read CORE_THREAD_STATE.\n",
440 				chip_id, core_id, thread_id);
441 		return OPAL_HARDWARE;
442 	}
443 	if (core_thread_state & PPC_BIT(56 + thread_id))
444 		stop = true;
445 	else
446 		stop = false;
447 
448 	if (xscom_read(chip_id, ti_addr, &thread_info)) {
449 		prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
450 				" Unable to read THREAD_INFO.\n",
451 				chip_id, core_id, thread_id);
452 		return OPAL_HARDWARE;
453 	}
454 	if (thread_info & PPC_BIT(thread_id))
455 		active = true;
456 	else
457 		active = false;
458 
459 	if (!active || stop) {
460 		if (xscom_write(chip_id, dctl_addr, P9_THREAD_CLEAR_MAINT(thread_id))) {
461 			prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
462 				      " Unable to write EC_DIRECT_CONTROLS.\n",
463 				      chip_id, core_id, thread_id);
464 		}
465 	} else {
466 		if (xscom_write(chip_id, dctl_addr, P9_THREAD_CONT(thread_id))) {
467 			prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
468 				      " Unable to write EC_DIRECT_CONTROLS.\n",
469 				      chip_id, core_id, thread_id);
470 		}
471 	}
472 
473 	return 0;
474 }
475 
p9_stop_thread(struct cpu_thread * cpu)476 static int p9_stop_thread(struct cpu_thread *cpu)
477 {
478 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
479 	uint32_t core_id = pir_to_core_id(cpu->pir);
480 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
481 	uint32_t dctl_addr;
482 	int rc;
483 	int i;
484 
485 	dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS);
486 
487 	rc = p9_thread_quiesced(cpu);
488 	if (rc < 0)
489 		return rc;
490 	if (rc) {
491 		prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
492 				" Thread is quiesced already.\n",
493 				chip_id, core_id, thread_id);
494 		return OPAL_BUSY;
495 	}
496 
497 	if (xscom_write(chip_id, dctl_addr, P9_THREAD_STOP(thread_id))) {
498 		prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
499 				" Unable to write EC_DIRECT_CONTROLS.\n",
500 				chip_id, core_id, thread_id);
501 		return OPAL_HARDWARE;
502 	}
503 
504 	for (i = 0; i < P9_QUIESCE_TIMEOUT / P9_QUIESCE_POLL_INTERVAL; i++) {
505 		int rc = p9_thread_quiesced(cpu);
506 		if (rc < 0)
507 			break;
508 		if (rc)
509 			return 0;
510 
511 		time_wait_us(P9_QUIESCE_POLL_INTERVAL);
512 	}
513 
514 	prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
515 			" Unable to quiesce thread.\n",
516 			chip_id, core_id, thread_id);
517 
518 	return OPAL_HARDWARE;
519 }
520 
p9_sreset_thread(struct cpu_thread * cpu)521 static int p9_sreset_thread(struct cpu_thread *cpu)
522 {
523 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
524 	uint32_t core_id = pir_to_core_id(cpu->pir);
525 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
526 	uint32_t dctl_addr;
527 
528 	dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS);
529 
530 	if (xscom_write(chip_id, dctl_addr, P9_THREAD_SRESET(thread_id))) {
531 		prlog(PR_ERR, "Could not sreset thread %u:%u:%u:"
532 				" Unable to write EC_DIRECT_CONTROLS.\n",
533 				chip_id, core_id, thread_id);
534 		return OPAL_HARDWARE;
535 	}
536 
537 	return 0;
538 }
539 
540 /**************** POWER10 direct controls ****************/
541 
542 /* Long running instructions may take time to complete. Timeout 100ms */
543 #define P10_QUIESCE_POLL_INTERVAL	100
544 #define P10_QUIESCE_TIMEOUT		100000
545 
546 /* Waking may take up to 5ms for deepest sleep states. Set timeout to 100ms */
547 #define P10_SPWU_POLL_INTERVAL		100
548 #define P10_SPWU_TIMEOUT		100000
549 
550 /*
551  * This implements direct control facilities of processor cores and threads
552  * using scom registers.
553  */
p10_core_is_gated(struct cpu_thread * cpu)554 static int p10_core_is_gated(struct cpu_thread *cpu)
555 {
556 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
557 	uint32_t core_id = pir_to_core_id(cpu->pir);
558 	uint32_t ssh_addr;
559 	uint64_t val;
560 
561 	ssh_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SSH_HYP);
562 
563 	if (xscom_read(chip_id, ssh_addr, &val)) {
564 		prlog(PR_ERR, "Could not query core gated on %u:%u:"
565 				" Unable to read QME_SSH_HYP.\n",
566 				chip_id, core_id);
567 		return OPAL_HARDWARE;
568 	}
569 
570 	return !!(val & P10_SSH_CORE_GATED);
571 }
572 
573 
p10_core_set_special_wakeup(struct cpu_thread * cpu)574 static int p10_core_set_special_wakeup(struct cpu_thread *cpu)
575 {
576 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
577 	uint32_t core_id = pir_to_core_id(cpu->pir);
578 	uint32_t spwu_addr, ssh_addr;
579 	uint64_t val;
580 	int i;
581 
582 	/* P10 could use SPWU_HYP done bit instead of SSH? */
583 	spwu_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SPWU_HYP);
584 	ssh_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SSH_HYP);
585 
586 	if (xscom_write(chip_id, spwu_addr, P10_SPWU_REQ)) {
587 		prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
588 				" Unable to write QME_SPWU_HYP.\n",
589 				chip_id, core_id);
590 		return OPAL_HARDWARE;
591 	}
592 
593 	for (i = 0; i < P10_SPWU_TIMEOUT / P10_SPWU_POLL_INTERVAL; i++) {
594 		if (xscom_read(chip_id, ssh_addr, &val)) {
595 			prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
596 					" Unable to read QME_SSH_HYP.\n",
597 					chip_id, core_id);
598 			return OPAL_HARDWARE;
599 		}
600 		if (val & P10_SSH_SPWU_DONE) {
601 			/*
602 			 * CORE_GATED will be unset on a successful special
603 			 * wakeup of the core which indicates that the core is
604 			 * out of stop state. If CORE_GATED is still set then
605 			 * check SPWU register and raise error only if SPWU_DONE
606 			 * is not set, else print a warning and consider SPWU
607 			 * operation as successful.
608 			 * This is in conjunction with a micocode bug, which
609 			 * calls out the fact that SPW can succeed in the case
610 			 * the core is gated but SPWU_HYP bit is set.
611 			 */
612 			if (p10_core_is_gated(cpu)) {
613 				if(xscom_read(chip_id, spwu_addr, &val)) {
614 					prlog(PR_ERR, "Core %u:%u:"
615 					      " unable to read QME_SPWU_HYP\n",
616 					      chip_id, core_id);
617 					return OPAL_HARDWARE;
618 				}
619 				if (val & P10_SPWU_DONE) {
620 					/*
621 					 * If SPWU DONE bit is set then
622 					 * SPWU operation is complete
623 					 */
624 					prlog(PR_DEBUG, "Special wakeup on "
625 					      "%u:%u: core remains gated while"
626 					      " SPWU_HYP DONE set\n",
627 					      chip_id, core_id);
628 					return 0;
629 				}
630 				/* Deassert spwu for this strange error */
631 				xscom_write(chip_id, spwu_addr, 0);
632 				prlog(PR_ERR,
633 				      "Failed special wakeup on %u:%u"
634 				      " core remains gated.\n",
635 				      chip_id, core_id);
636 				return OPAL_HARDWARE;
637 			} else {
638 				return 0;
639 			}
640 		}
641 		time_wait_us(P10_SPWU_POLL_INTERVAL);
642 	}
643 
644 	prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
645 			" operation timeout.\n",
646 			chip_id, core_id);
647 	/*
648 	 * As per the special wakeup protocol we should not de-assert
649 	 * the special wakeup on the core until WAKEUP_DONE is set.
650 	 * So even on error do not de-assert.
651 	 */
652 
653 	return OPAL_HARDWARE;
654 }
655 
p10_core_clear_special_wakeup(struct cpu_thread * cpu)656 static int p10_core_clear_special_wakeup(struct cpu_thread *cpu)
657 {
658 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
659 	uint32_t core_id = pir_to_core_id(cpu->pir);
660 	uint32_t spwu_addr;
661 
662 	spwu_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SPWU_HYP);
663 
664 	/* Add a small delay here if spwu problems time_wait_us(1); */
665 	if (xscom_write(chip_id, spwu_addr, 0)) {
666 		prlog(PR_ERR, "Could not clear special wakeup on %u:%u:"
667 				" Unable to write QME_SPWU_HYP.\n",
668 				chip_id, core_id);
669 		return OPAL_HARDWARE;
670 	}
671 
672 	return 0;
673 }
674 
p10_thread_quiesced(struct cpu_thread * cpu)675 static int p10_thread_quiesced(struct cpu_thread *cpu)
676 {
677 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
678 	uint32_t core_id = pir_to_core_id(cpu->pir);
679 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
680 	uint32_t ras_addr;
681 	uint64_t ras_status;
682 
683 	ras_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_RAS_STATUS);
684 	if (xscom_read(chip_id, ras_addr, &ras_status)) {
685 		prlog(PR_ERR, "Could not check thread state on %u:%u:"
686 				" Unable to read EC_RAS_STATUS.\n",
687 				chip_id, core_id);
688 		return OPAL_HARDWARE;
689 	}
690 
691 	/*
692 	 * p10_thread_stop for the purpose of sreset wants QUIESCED
693 	 * and MAINT bits set. Step, RAM, etc. need more, but we don't
694 	 * use those in skiboot.
695 	 *
696 	 * P10 could try wait for more here in case of errors.
697 	 */
698 	if (!(ras_status & P10_THREAD_QUIESCED(thread_id)))
699 		return 0;
700 
701 	if (!(ras_status & P10_THREAD_MAINT(thread_id)))
702 		return 0;
703 
704 	return 1;
705 }
706 
p10_cont_thread(struct cpu_thread * cpu)707 static int p10_cont_thread(struct cpu_thread *cpu)
708 {
709 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
710 	uint32_t core_id = pir_to_core_id(cpu->pir);
711 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
712 	uint32_t cts_addr;
713 	uint32_t ti_addr;
714 	uint32_t dctl_addr;
715 	uint64_t core_thread_state;
716 	uint64_t thread_info;
717 	bool active, stop;
718 	int rc;
719 	int i;
720 
721 	rc = p10_thread_quiesced(cpu);
722 	if (rc < 0)
723 		return rc;
724 	if (!rc) {
725 		prlog(PR_ERR, "Could not cont thread %u:%u:%u:"
726 				" Thread is not quiesced.\n",
727 				chip_id, core_id, thread_id);
728 		return OPAL_BUSY;
729 	}
730 
731 	cts_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_CORE_THREAD_STATE);
732 	ti_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_THREAD_INFO);
733 	dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
734 
735 	if (xscom_read(chip_id, cts_addr, &core_thread_state)) {
736 		prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
737 				" Unable to read EC_CORE_THREAD_STATE.\n",
738 				chip_id, core_id, thread_id);
739 		return OPAL_HARDWARE;
740 	}
741 	if (core_thread_state & P10_THREAD_STOPPED(thread_id))
742 		stop = true;
743 	else
744 		stop = false;
745 
746 	if (xscom_read(chip_id, ti_addr, &thread_info)) {
747 		prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
748 				" Unable to read EC_THREAD_INFO.\n",
749 				chip_id, core_id, thread_id);
750 		return OPAL_HARDWARE;
751 	}
752 	if (thread_info & P10_THREAD_ACTIVE(thread_id))
753 		active = true;
754 	else
755 		active = false;
756 
757 	if (!active || stop) {
758 		if (xscom_write(chip_id, dctl_addr, P10_THREAD_CLEAR_MAINT(thread_id))) {
759 			prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
760 				      " Unable to write EC_DIRECT_CONTROLS.\n",
761 				      chip_id, core_id, thread_id);
762 		}
763 	} else {
764 		if (xscom_write(chip_id, dctl_addr, P10_THREAD_START(thread_id))) {
765 			prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
766 				      " Unable to write EC_DIRECT_CONTROLS.\n",
767 				      chip_id, core_id, thread_id);
768 		}
769 	}
770 
771 	for (i = 0; i < P10_QUIESCE_TIMEOUT / P10_QUIESCE_POLL_INTERVAL; i++) {
772 		int rc = p10_thread_quiesced(cpu);
773 		if (rc < 0)
774 			break;
775 		if (!rc)
776 			return 0;
777 
778 		time_wait_us(P10_QUIESCE_POLL_INTERVAL);
779 	}
780 
781 	prlog(PR_ERR, "Could not start thread %u:%u:%u:"
782 			" Unable to start thread.\n",
783 			chip_id, core_id, thread_id);
784 
785 	return OPAL_HARDWARE;
786 }
787 
p10_stop_thread(struct cpu_thread * cpu)788 static int p10_stop_thread(struct cpu_thread *cpu)
789 {
790 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
791 	uint32_t core_id = pir_to_core_id(cpu->pir);
792 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
793 	uint32_t dctl_addr;
794 	int rc;
795 	int i;
796 
797 	dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
798 
799 	rc = p10_thread_quiesced(cpu);
800 	if (rc < 0)
801 		return rc;
802 	if (rc) {
803 		prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
804 				" Thread is quiesced already.\n",
805 				chip_id, core_id, thread_id);
806 		return OPAL_BUSY;
807 	}
808 
809 	if (xscom_write(chip_id, dctl_addr, P10_THREAD_STOP(thread_id))) {
810 		prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
811 				" Unable to write EC_DIRECT_CONTROLS.\n",
812 				chip_id, core_id, thread_id);
813 		return OPAL_HARDWARE;
814 	}
815 
816 	for (i = 0; i < P10_QUIESCE_TIMEOUT / P10_QUIESCE_POLL_INTERVAL; i++) {
817 		int rc = p10_thread_quiesced(cpu);
818 		if (rc < 0)
819 			break;
820 		if (rc)
821 			return 0;
822 
823 		time_wait_us(P10_QUIESCE_POLL_INTERVAL);
824 	}
825 
826 	prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
827 			" Unable to quiesce thread.\n",
828 			chip_id, core_id, thread_id);
829 
830 	return OPAL_HARDWARE;
831 }
832 
p10_sreset_thread(struct cpu_thread * cpu)833 static int p10_sreset_thread(struct cpu_thread *cpu)
834 {
835 	uint32_t chip_id = pir_to_chip_id(cpu->pir);
836 	uint32_t core_id = pir_to_core_id(cpu->pir);
837 	uint32_t thread_id = pir_to_thread_id(cpu->pir);
838 	uint32_t dctl_addr;
839 
840 	dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
841 
842 	if (xscom_write(chip_id, dctl_addr, P10_THREAD_SRESET(thread_id))) {
843 		prlog(PR_ERR, "Could not sreset thread %u:%u:%u:"
844 				" Unable to write EC_DIRECT_CONTROLS.\n",
845 				chip_id, core_id, thread_id);
846 		return OPAL_HARDWARE;
847 	}
848 
849 	return 0;
850 }
851 
852 /**************** generic direct controls ****************/
853 
dctl_set_special_wakeup(struct cpu_thread * t)854 int dctl_set_special_wakeup(struct cpu_thread *t)
855 {
856 	struct cpu_thread *c = t->ec_primary;
857 	int rc = OPAL_SUCCESS;
858 
859 	if (proc_gen == proc_gen_unknown)
860 		return OPAL_UNSUPPORTED;
861 
862 	lock(&c->dctl_lock);
863 	if (c->special_wakeup_count == 0) {
864 		if (proc_gen == proc_gen_p10)
865 			rc = p10_core_set_special_wakeup(c);
866 		else if (proc_gen == proc_gen_p9)
867 			rc = p9_core_set_special_wakeup(c);
868 		else /* (proc_gen == proc_gen_p8) */
869 			rc = p8_core_set_special_wakeup(c);
870 	}
871 	if (!rc)
872 		c->special_wakeup_count++;
873 	unlock(&c->dctl_lock);
874 
875 	return rc;
876 }
877 
dctl_clear_special_wakeup(struct cpu_thread * t)878 int dctl_clear_special_wakeup(struct cpu_thread *t)
879 {
880 	struct cpu_thread *c = t->ec_primary;
881 	int rc = OPAL_SUCCESS;
882 
883 	if (proc_gen == proc_gen_unknown)
884 		return OPAL_UNSUPPORTED;
885 
886 	lock(&c->dctl_lock);
887 	if (!c->special_wakeup_count)
888 		goto out;
889 	if (c->special_wakeup_count == 1) {
890 		if (proc_gen == proc_gen_p10)
891 			rc = p10_core_clear_special_wakeup(c);
892 		else if (proc_gen == proc_gen_p9)
893 			rc = p9_core_clear_special_wakeup(c);
894 		else /* (proc_gen == proc_gen_p8) */
895 			rc = p8_core_clear_special_wakeup(c);
896 	}
897 	if (!rc)
898 		c->special_wakeup_count--;
899 out:
900 	unlock(&c->dctl_lock);
901 
902 	return rc;
903 }
904 
dctl_core_is_gated(struct cpu_thread * t)905 int dctl_core_is_gated(struct cpu_thread *t)
906 {
907 	struct cpu_thread *c = t->primary;
908 
909 	if (proc_gen == proc_gen_p10)
910 		return p10_core_is_gated(c);
911 	else if (proc_gen == proc_gen_p9)
912 		return p9_core_is_gated(c);
913 	else
914 		return OPAL_UNSUPPORTED;
915 }
916 
dctl_stop(struct cpu_thread * t)917 static int dctl_stop(struct cpu_thread *t)
918 {
919 	struct cpu_thread *c = t->ec_primary;
920 	int rc;
921 
922 	lock(&c->dctl_lock);
923 	if (t->dctl_stopped) {
924 		unlock(&c->dctl_lock);
925 		return OPAL_BUSY;
926 	}
927 	if (proc_gen == proc_gen_p10)
928 		rc = p10_stop_thread(t);
929 	else if (proc_gen == proc_gen_p9)
930 		rc = p9_stop_thread(t);
931 	else /* (proc_gen == proc_gen_p8) */
932 		rc = p8_stop_thread(t);
933 	if (!rc)
934 		t->dctl_stopped = true;
935 	unlock(&c->dctl_lock);
936 
937 	return rc;
938 }
939 
dctl_cont(struct cpu_thread * t)940 static int dctl_cont(struct cpu_thread *t)
941 {
942 	struct cpu_thread *c = t->primary;
943 	int rc;
944 
945 	if (proc_gen != proc_gen_p10 && proc_gen != proc_gen_p9)
946 		return OPAL_UNSUPPORTED;
947 
948 	lock(&c->dctl_lock);
949 	if (!t->dctl_stopped) {
950 		unlock(&c->dctl_lock);
951 		return OPAL_BUSY;
952 	}
953 	if (proc_gen == proc_gen_p10)
954 		rc = p10_cont_thread(t);
955 	else /* (proc_gen == proc_gen_p9) */
956 		rc = p9_cont_thread(t);
957 	if (!rc)
958 		t->dctl_stopped = false;
959 	unlock(&c->dctl_lock);
960 
961 	return rc;
962 }
963 
964 /*
965  * NOTE:
966  * The POWER8 sreset does not provide SRR registers, so it can be used
967  * for fast reboot, but not OPAL_SIGNAL_SYSTEM_RESET or anywhere that is
968  * expected to return. For now, callers beware.
969  */
dctl_sreset(struct cpu_thread * t)970 static int dctl_sreset(struct cpu_thread *t)
971 {
972 	struct cpu_thread *c = t->ec_primary;
973 	int rc;
974 
975 	lock(&c->dctl_lock);
976 	if (!t->dctl_stopped) {
977 		unlock(&c->dctl_lock);
978 		return OPAL_BUSY;
979 	}
980 	if (proc_gen == proc_gen_p10)
981 		rc = p10_sreset_thread(t);
982 	else if (proc_gen == proc_gen_p9)
983 		rc = p9_sreset_thread(t);
984 	else /* (proc_gen == proc_gen_p8) */
985 		rc = p8_sreset_thread(t);
986 	if (!rc)
987 		t->dctl_stopped = false;
988 	unlock(&c->dctl_lock);
989 
990 	return rc;
991 }
992 
993 
994 /**************** fast reboot API ****************/
995 
sreset_all_prepare(void)996 int sreset_all_prepare(void)
997 {
998 	struct cpu_thread *cpu;
999 
1000 	if (proc_gen == proc_gen_unknown)
1001 		return OPAL_UNSUPPORTED;
1002 
1003 	prlog(PR_DEBUG, "RESET: Resetting from cpu: 0x%x (core 0x%x)\n",
1004 	      this_cpu()->pir, pir_to_core_id(this_cpu()->pir));
1005 
1006 	if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) {
1007 		for_each_ungarded_cpu(cpu) {
1008 			if (cpu == this_cpu())
1009 				continue;
1010 			mambo_stop_cpu(cpu);
1011 		}
1012 		return OPAL_SUCCESS;
1013 	}
1014 
1015 	/* Assert special wakup on all cores. Only on operational cores. */
1016 	for_each_ungarded_primary(cpu) {
1017 		if (dctl_set_special_wakeup(cpu) != OPAL_SUCCESS)
1018 			return OPAL_HARDWARE;
1019 	}
1020 
1021 	prlog(PR_DEBUG, "RESET: Stopping the world...\n");
1022 
1023 	/* Put everybody in stop except myself */
1024 	for_each_ungarded_cpu(cpu) {
1025 		if (cpu == this_cpu())
1026 			continue;
1027 		if (dctl_stop(cpu) != OPAL_SUCCESS)
1028 			return OPAL_HARDWARE;
1029 
1030 	}
1031 
1032 	return OPAL_SUCCESS;
1033 }
1034 
sreset_all_finish(void)1035 void sreset_all_finish(void)
1036 {
1037 	struct cpu_thread *cpu;
1038 
1039 	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
1040 		return;
1041 
1042 	for_each_ungarded_primary(cpu)
1043 		dctl_clear_special_wakeup(cpu);
1044 }
1045 
sreset_all_others(void)1046 int sreset_all_others(void)
1047 {
1048 	struct cpu_thread *cpu;
1049 
1050 	prlog(PR_DEBUG, "RESET: Resetting all threads but self...\n");
1051 
1052 	/*
1053 	 * mambo should actually implement stop as well, and implement
1054 	 * the dctl_ helpers properly. Currently it's racy just sresetting.
1055 	 */
1056 	if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) {
1057 		for_each_ungarded_cpu(cpu) {
1058 			if (cpu == this_cpu())
1059 				continue;
1060 			mambo_sreset_cpu(cpu);
1061 		}
1062 		return OPAL_SUCCESS;
1063 	}
1064 
1065 	for_each_ungarded_cpu(cpu) {
1066 		if (cpu == this_cpu())
1067 			continue;
1068 		if (dctl_sreset(cpu) != OPAL_SUCCESS)
1069 			return OPAL_HARDWARE;
1070 	}
1071 
1072 	return OPAL_SUCCESS;
1073 }
1074 
1075 
1076 /**************** OPAL_SIGNAL_SYSTEM_RESET API ****************/
1077 
1078 /*
1079  * This provides a way for the host to raise system reset exceptions
1080  * on other threads using direct control scoms on POWER9.
1081  *
1082  * We assert special wakeup on the core first.
1083  * Then stop target thread and wait for it to quiesce.
1084  * Then sreset the target thread, which resumes execution on that thread.
1085  * Then de-assert special wakeup on the core.
1086  */
do_sreset_cpu(struct cpu_thread * cpu)1087 static int64_t do_sreset_cpu(struct cpu_thread *cpu)
1088 {
1089 	int rc;
1090 
1091 	if (this_cpu() == cpu) {
1092 		prlog(PR_ERR, "SRESET: Unable to reset self\n");
1093 		return OPAL_PARAMETER;
1094 	}
1095 
1096 	rc = dctl_set_special_wakeup(cpu);
1097 	if (rc)
1098 		return rc;
1099 
1100 	rc = dctl_stop(cpu);
1101 	if (rc)
1102 		goto out_spwk;
1103 
1104 	rc = dctl_sreset(cpu);
1105 	if (rc)
1106 		goto out_cont;
1107 
1108 	dctl_clear_special_wakeup(cpu);
1109 
1110 	return 0;
1111 
1112 out_cont:
1113 	dctl_cont(cpu);
1114 out_spwk:
1115 	dctl_clear_special_wakeup(cpu);
1116 
1117 	return rc;
1118 }
1119 
1120 static struct lock sreset_lock = LOCK_UNLOCKED;
1121 
opal_signal_system_reset(int cpu_nr)1122 int64_t opal_signal_system_reset(int cpu_nr)
1123 {
1124 	struct cpu_thread *cpu;
1125 	int64_t ret;
1126 
1127 	if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p10)
1128 		return OPAL_UNSUPPORTED;
1129 
1130 	/*
1131 	 * Broadcasts unsupported. Not clear what threads should be
1132 	 * signaled, so it's better for the OS to perform one-at-a-time
1133 	 * for now.
1134 	 */
1135 	if (cpu_nr < 0)
1136 		return OPAL_CONSTRAINED;
1137 
1138 	/* Reset a single CPU */
1139 	cpu = find_cpu_by_server(cpu_nr);
1140 	if (!cpu) {
1141 		prlog(PR_ERR, "SRESET: could not find cpu by server %d\n", cpu_nr);
1142 		return OPAL_PARAMETER;
1143 	}
1144 
1145 	lock(&sreset_lock);
1146 	ret = do_sreset_cpu(cpu);
1147 	unlock(&sreset_lock);
1148 
1149 	return ret;
1150 }
1151 
direct_controls_init(void)1152 void direct_controls_init(void)
1153 {
1154 	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
1155 		return;
1156 
1157 	if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p10)
1158 		return;
1159 
1160 	opal_register(OPAL_SIGNAL_SYSTEM_RESET, opal_signal_system_reset, 1);
1161 }
1162