xref: /netbsd/sys/dev/acpi/acpi_cpu_tstate.c (revision dd0021c2)
1 /* $NetBSD: acpi_cpu_tstate.c,v 1.34 2020/12/07 10:57:41 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_tstate.c,v 1.34 2020/12/07 10:57:41 jmcneill Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/kmem.h>
34 #include <sys/xcall.h>
35 #include <sys/cpu.h>
36 
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40 
41 #define _COMPONENT	 ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME	 ("acpi_cpu_tstate")
43 
44 static ACPI_STATUS	 acpicpu_tstate_tss(struct acpicpu_softc *);
45 static ACPI_STATUS	 acpicpu_tstate_tss_add(struct acpicpu_tstate *,
46 						ACPI_OBJECT *);
47 static ACPI_STATUS	 acpicpu_tstate_ptc(struct acpicpu_softc *);
48 static ACPI_STATUS	 acpicpu_tstate_dep(struct acpicpu_softc *);
49 static ACPI_STATUS	 acpicpu_tstate_fadt(struct acpicpu_softc *);
50 static ACPI_STATUS	 acpicpu_tstate_change(struct acpicpu_softc *);
51 static void		 acpicpu_tstate_reset(struct acpicpu_softc *);
52 static void		 acpicpu_tstate_set_xcall(void *, void *);
53 
54 extern struct acpicpu_softc **acpicpu_sc;
55 
56 void
acpicpu_tstate_attach(device_t self)57 acpicpu_tstate_attach(device_t self)
58 {
59 	struct acpicpu_softc *sc = device_private(self);
60 	const char *str;
61 	ACPI_HANDLE tmp;
62 	ACPI_STATUS rv;
63 
64 	/*
65 	 * Disable T-states for PIIX4.
66 	 */
67 	if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0)
68 		return;
69 
70 	rv  = acpicpu_tstate_tss(sc);
71 
72 	if (ACPI_FAILURE(rv)) {
73 		str = "_TSS";
74 		goto out;
75 	}
76 
77 	rv = acpicpu_tstate_ptc(sc);
78 
79 	if (ACPI_FAILURE(rv)) {
80 		str = "_PTC";
81 		goto out;
82 	}
83 
84 	/*
85 	 * Query the optional _TSD.
86 	 */
87 	rv = acpicpu_tstate_dep(sc);
88 
89 	if (ACPI_SUCCESS(rv))
90 		sc->sc_flags |= ACPICPU_FLAG_T_DEP;
91 
92 	/*
93 	 * Comparable to P-states, the _TPC object may
94 	 * be absent in some systems, even though it is
95 	 * required by ACPI 3.0 along with _TSS and _PTC.
96 	 */
97 	rv = AcpiGetHandle(sc->sc_node->ad_handle, "_TPC", &tmp);
98 
99 	if (ACPI_FAILURE(rv)) {
100 		aprint_debug_dev(self, "_TPC missing\n");
101 		rv = AE_OK;
102 	}
103 
104 out:
105 	if (ACPI_FAILURE(rv)) {
106 
107 		if (rv != AE_NOT_FOUND)
108 			aprint_error_dev(sc->sc_dev, "failed to evaluate "
109 			    "%s: %s\n", str, AcpiFormatException(rv));
110 
111 		rv = acpicpu_tstate_fadt(sc);
112 
113 		if (ACPI_FAILURE(rv))
114 			return;
115 
116 		sc->sc_flags |= ACPICPU_FLAG_T_FADT;
117 	}
118 
119 	sc->sc_flags |= ACPICPU_FLAG_T;
120 
121 	acpicpu_tstate_reset(sc);
122 }
123 
124 void
acpicpu_tstate_detach(device_t self)125 acpicpu_tstate_detach(device_t self)
126 {
127 	struct acpicpu_softc *sc = device_private(self);
128 	size_t size;
129 
130 	if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
131 		return;
132 
133 	size = sc->sc_tstate_count * sizeof(*sc->sc_tstate);
134 
135 	if (sc->sc_tstate != NULL)
136 		kmem_free(sc->sc_tstate, size);
137 
138 	sc->sc_flags &= ~ACPICPU_FLAG_T;
139 }
140 
141 void
acpicpu_tstate_start(device_t self)142 acpicpu_tstate_start(device_t self)
143 {
144 	/* Nothing. */
145 }
146 
147 void
acpicpu_tstate_suspend(void * aux)148 acpicpu_tstate_suspend(void *aux)
149 {
150 	struct acpicpu_softc *sc;
151 	device_t self = aux;
152 
153 	sc = device_private(self);
154 
155 	mutex_enter(&sc->sc_mtx);
156 	acpicpu_tstate_reset(sc);
157 	mutex_exit(&sc->sc_mtx);
158 }
159 
160 void
acpicpu_tstate_resume(void * aux)161 acpicpu_tstate_resume(void *aux)
162 {
163 	/* Nothing. */
164 }
165 
166 void
acpicpu_tstate_callback(void * aux)167 acpicpu_tstate_callback(void *aux)
168 {
169 	struct acpicpu_softc *sc;
170 	device_t self = aux;
171 	uint32_t omax, omin;
172 	int i;
173 
174 	sc = device_private(self);
175 
176 	if ((sc->sc_flags & ACPICPU_FLAG_T_FADT) != 0)
177 		return;
178 
179 	mutex_enter(&sc->sc_mtx);
180 
181 	/*
182 	 * If P-states are in use, we should ignore
183 	 * the interrupt unless we are in the highest
184 	 * P-state (see ACPI 4.0, section 8.4.3.3).
185 	 */
186 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
187 
188 		for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
189 
190 			if (sc->sc_pstate[i].ps_freq != 0)
191 				break;
192 		}
193 
194 		if (sc->sc_pstate_current != sc->sc_pstate[i].ps_freq) {
195 			mutex_exit(&sc->sc_mtx);
196 			return;
197 		}
198 	}
199 
200 	omax = sc->sc_tstate_max;
201 	omin = sc->sc_tstate_min;
202 
203 	(void)acpicpu_tstate_change(sc);
204 
205 	if (omax != sc->sc_tstate_max || omin != sc->sc_tstate_min) {
206 
207 		aprint_debug_dev(sc->sc_dev, "throttling window "
208 		    "changed from %u-%u %% to %u-%u %%\n",
209 		    sc->sc_tstate[omax].ts_percent,
210 		    sc->sc_tstate[omin].ts_percent,
211 		    sc->sc_tstate[sc->sc_tstate_max].ts_percent,
212 		    sc->sc_tstate[sc->sc_tstate_min].ts_percent);
213 	}
214 
215 	mutex_exit(&sc->sc_mtx);
216 }
217 
218 static ACPI_STATUS
acpicpu_tstate_tss(struct acpicpu_softc * sc)219 acpicpu_tstate_tss(struct acpicpu_softc *sc)
220 {
221 	struct acpicpu_tstate *ts;
222 	ACPI_OBJECT *obj;
223 	ACPI_BUFFER buf;
224 	ACPI_STATUS rv;
225 	uint32_t count;
226 	uint32_t i, j;
227 
228 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_TSS", &buf);
229 
230 	if (ACPI_FAILURE(rv))
231 		return rv;
232 
233 	obj = buf.Pointer;
234 
235 	if (obj->Type != ACPI_TYPE_PACKAGE) {
236 		rv = AE_TYPE;
237 		goto out;
238 	}
239 
240 	sc->sc_tstate_count = obj->Package.Count;
241 
242 	if (sc->sc_tstate_count == 0) {
243 		rv = AE_NOT_EXIST;
244 		goto out;
245 	}
246 
247 	sc->sc_tstate = kmem_zalloc(sc->sc_tstate_count *
248 	    sizeof(struct acpicpu_tstate), KM_SLEEP);
249 
250 	if (sc->sc_tstate == NULL) {
251 		rv = AE_NO_MEMORY;
252 		goto out;
253 	}
254 
255 	for (count = i = 0; i < sc->sc_tstate_count; i++) {
256 
257 		ts = &sc->sc_tstate[i];
258 		rv = acpicpu_tstate_tss_add(ts, &obj->Package.Elements[i]);
259 
260 		if (ACPI_FAILURE(rv)) {
261 			ts->ts_percent = 0;
262 			continue;
263 		}
264 
265 		for (j = 0; j < i; j++) {
266 
267 			if (ts->ts_percent >= sc->sc_tstate[j].ts_percent) {
268 				ts->ts_percent = 0;
269 				break;
270 			}
271 		}
272 
273 		if (ts->ts_percent != 0)
274 			count++;
275 	}
276 
277 	if (count == 0) {
278 		rv = AE_NOT_EXIST;
279 		goto out;
280 	}
281 
282 	/*
283 	 * There must be an entry with the percent
284 	 * field of 100. If this is not true, and if
285 	 * this entry is not in the expected index,
286 	 * invalidate the use of T-states via _TSS.
287 	 */
288 	if (sc->sc_tstate[0].ts_percent != 100) {
289 		rv = AE_BAD_DECIMAL_CONSTANT;
290 		goto out;
291 	}
292 
293 out:
294 	if (buf.Pointer != NULL)
295 		ACPI_FREE(buf.Pointer);
296 
297 	return rv;
298 }
299 
300 static ACPI_STATUS
acpicpu_tstate_tss_add(struct acpicpu_tstate * ts,ACPI_OBJECT * obj)301 acpicpu_tstate_tss_add(struct acpicpu_tstate *ts, ACPI_OBJECT *obj)
302 {
303 	ACPI_OBJECT *elm;
304 	uint32_t val[5];
305 	uint32_t *p;
306 	int i;
307 
308 	if (obj->Type != ACPI_TYPE_PACKAGE)
309 		return AE_TYPE;
310 
311 	if (obj->Package.Count != 5)
312 		return AE_BAD_DATA;
313 
314 	elm = obj->Package.Elements;
315 
316 	for (i = 0; i < 5; i++) {
317 
318 		if (elm[i].Type != ACPI_TYPE_INTEGER)
319 			return AE_TYPE;
320 
321 		if (elm[i].Integer.Value > UINT32_MAX)
322 			return AE_AML_NUMERIC_OVERFLOW;
323 
324 		val[i] = elm[i].Integer.Value;
325 	}
326 
327 	p = &ts->ts_percent;
328 
329 	for (i = 0; i < 5; i++, p++)
330 		*p = val[i];
331 
332 	/*
333 	 * The minimum should be either 12.5 % or 6.5 %,
334 	 * the latter 4-bit dynamic range being available
335 	 * in some newer models; see Section 14.5.3.1 in
336 	 *
337 	 *	Intel 64 and IA-32 Architectures Software
338 	 *	Developer's Manual. Volume 3B, Part 2. 2013.
339 	 */
340         if (ts->ts_percent < 6 || ts->ts_percent > 100)
341 		return AE_BAD_DECIMAL_CONSTANT;
342 
343 	if (ts->ts_latency == 0 || ts->ts_latency > 1000)
344 		ts->ts_latency = 1;
345 
346 	return AE_OK;
347 }
348 
349 ACPI_STATUS
acpicpu_tstate_ptc(struct acpicpu_softc * sc)350 acpicpu_tstate_ptc(struct acpicpu_softc *sc)
351 {
352 	static const size_t size = sizeof(struct acpicpu_reg);
353 	struct acpicpu_reg *reg[2];
354 	ACPI_OBJECT *elm, *obj;
355 	ACPI_BUFFER buf;
356 	ACPI_STATUS rv;
357 	int i;
358 
359 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PTC", &buf);
360 
361 	if (ACPI_FAILURE(rv))
362 		return rv;
363 
364 	obj = buf.Pointer;
365 
366 	if (obj->Type != ACPI_TYPE_PACKAGE) {
367 		rv = AE_TYPE;
368 		goto out;
369 	}
370 
371 	if (obj->Package.Count != 2) {
372 		rv = AE_LIMIT;
373 		goto out;
374 	}
375 
376 	for (i = 0; i < 2; i++) {
377 
378 		elm = &obj->Package.Elements[i];
379 
380 		if (elm->Type != ACPI_TYPE_BUFFER) {
381 			rv = AE_TYPE;
382 			goto out;
383 		}
384 
385 		if (size > elm->Buffer.Length) {
386 			rv = AE_AML_BAD_RESOURCE_LENGTH;
387 			goto out;
388 		}
389 
390 		reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
391 
392 		switch (reg[i]->reg_spaceid) {
393 
394 		case ACPI_ADR_SPACE_SYSTEM_MEMORY:
395 
396 			if (reg[i]->reg_addr == 0) {
397 				rv = AE_AML_ILLEGAL_ADDRESS;
398 				goto out;
399 			}
400 
401 			break;
402 
403 		case ACPI_ADR_SPACE_SYSTEM_IO:
404 
405 			if (reg[i]->reg_addr == 0) {
406 				rv = AE_AML_ILLEGAL_ADDRESS;
407 				goto out;
408 			}
409 
410 #if defined(__i386__) || defined(__x86_64__)
411 			/*
412 			 * Check that the values match the IA32 clock
413 			 * modulation MSR, where the bit 0 is reserved,
414 			 * bits 1 through 3 define the duty cycle, and
415 			 * the fourth bit enables the modulation.
416 			 */
417 			if (reg[i]->reg_bitwidth != 4) {
418 				rv = AE_AML_BAD_RESOURCE_VALUE;
419 				goto out;
420 			}
421 
422 			if (reg[i]->reg_bitoffset != 1) {
423 				rv = AE_AML_BAD_RESOURCE_VALUE;
424 				goto out;
425 			}
426 #endif
427 
428 			break;
429 
430 		case ACPI_ADR_SPACE_FIXED_HARDWARE:
431 
432 			if ((sc->sc_flags & ACPICPU_FLAG_T_FFH) == 0) {
433 				rv = AE_SUPPORT;
434 				goto out;
435 			}
436 
437 			break;
438 
439 		default:
440 			rv = AE_AML_INVALID_SPACE_ID;
441 			goto out;
442 		}
443 	}
444 
445 	if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
446 		rv = AE_AML_INVALID_SPACE_ID;
447 		goto out;
448 	}
449 
450 	(void)memcpy(&sc->sc_tstate_control, reg[0], size);
451 	(void)memcpy(&sc->sc_tstate_status,  reg[1], size);
452 
453 out:
454 	if (buf.Pointer != NULL)
455 		ACPI_FREE(buf.Pointer);
456 
457 	return rv;
458 }
459 
460 static ACPI_STATUS
acpicpu_tstate_dep(struct acpicpu_softc * sc)461 acpicpu_tstate_dep(struct acpicpu_softc *sc)
462 {
463 	ACPI_OBJECT *elm, *obj;
464 	ACPI_BUFFER buf;
465 	ACPI_STATUS rv;
466 	uint32_t val;
467 	uint8_t i, n;
468 
469 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_TSD", &buf);
470 
471 	if (ACPI_FAILURE(rv))
472 		goto out;
473 
474 	obj = buf.Pointer;
475 
476 	if (obj->Type != ACPI_TYPE_PACKAGE) {
477 		rv = AE_TYPE;
478 		goto out;
479 	}
480 
481 	if (obj->Package.Count != 1) {
482 		rv = AE_LIMIT;
483 		goto out;
484 	}
485 
486 	elm = &obj->Package.Elements[0];
487 
488 	if (obj->Type != ACPI_TYPE_PACKAGE) {
489 		rv = AE_TYPE;
490 		goto out;
491 	}
492 
493 	n = elm->Package.Count;
494 
495 	if (n != 5) {
496 		rv = AE_LIMIT;
497 		goto out;
498 	}
499 
500 	elm = elm->Package.Elements;
501 
502 	for (i = 0; i < n; i++) {
503 
504 		if (elm[i].Type != ACPI_TYPE_INTEGER) {
505 			rv = AE_TYPE;
506 			goto out;
507 		}
508 
509 		if (elm[i].Integer.Value > UINT32_MAX) {
510 			rv = AE_AML_NUMERIC_OVERFLOW;
511 			goto out;
512 		}
513 	}
514 
515 	val = elm[1].Integer.Value;
516 
517 	if (val != 0)
518 		aprint_debug_dev(sc->sc_dev, "invalid revision in _TSD\n");
519 
520 	val = elm[3].Integer.Value;
521 
522 	if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
523 		rv = AE_AML_BAD_RESOURCE_VALUE;
524 		goto out;
525 	}
526 
527 	val = elm[4].Integer.Value;
528 
529 	if (val > sc->sc_ncpus) {
530 		rv = AE_BAD_VALUE;
531 		goto out;
532 	}
533 
534 	sc->sc_tstate_dep.dep_domain = elm[2].Integer.Value;
535 	sc->sc_tstate_dep.dep_type   = elm[3].Integer.Value;
536 	sc->sc_tstate_dep.dep_ncpus  = elm[4].Integer.Value;
537 
538 out:
539 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
540 		aprint_debug_dev(sc->sc_dev, "failed to evaluate "
541 		    "_TSD: %s\n", AcpiFormatException(rv));
542 
543 	if (buf.Pointer != NULL)
544 		ACPI_FREE(buf.Pointer);
545 
546 	return rv;
547 }
548 
549 static ACPI_STATUS
acpicpu_tstate_fadt(struct acpicpu_softc * sc)550 acpicpu_tstate_fadt(struct acpicpu_softc *sc)
551 {
552 	static const size_t size = sizeof(struct acpicpu_tstate);
553 	const uint8_t offset = AcpiGbl_FADT.DutyOffset;
554 	const uint8_t width = AcpiGbl_FADT.DutyWidth;
555 	uint8_t beta, count, i;
556 
557 	if (sc->sc_object.ao_pblkaddr == 0)
558 		return AE_AML_ILLEGAL_ADDRESS;
559 
560 	/*
561 	 * A zero DUTY_WIDTH may be used announce
562 	 * that T-states are not available via FADT
563 	 * (ACPI 4.0, p. 121). See also (section 9.3):
564 	 *
565 	 *	Advanced Micro Devices: BIOS and Kernel
566 	 *	Developer's Guide for AMD Athlon 64 and
567 	 *	AMD Opteron Processors. Revision 3.30,
568 	 *	February 2006.
569 	 */
570 	if (width == 0 || width + offset > 4)
571 		return AE_AML_BAD_RESOURCE_VALUE;
572 
573 	count = 1 << width;
574 
575 	if (sc->sc_tstate != NULL)
576 		kmem_free(sc->sc_tstate, sc->sc_tstate_count * size);
577 
578 	sc->sc_tstate = kmem_zalloc(count * size, KM_SLEEP);
579 	sc->sc_tstate_count = count;
580 
581 	/*
582 	 * Approximate duty cycles and set the MSR values.
583 	 */
584 	for (beta = 100 / count, i = 0; i < count; i++) {
585 		sc->sc_tstate[i].ts_percent = 100 - beta * i;
586 		sc->sc_tstate[i].ts_latency = 1;
587 	}
588 
589 	for (i = 1; i < count; i++)
590 		sc->sc_tstate[i].ts_control = (count - i) | __BIT(3);
591 
592 	/*
593 	 * Fake values for throttling registers.
594 	 */
595 	(void)memset(&sc->sc_tstate_status, 0, sizeof(struct acpicpu_reg));
596 	(void)memset(&sc->sc_tstate_control, 0, sizeof(struct acpicpu_reg));
597 
598 	sc->sc_tstate_status.reg_bitwidth = width;
599 	sc->sc_tstate_status.reg_bitoffset = offset;
600 	sc->sc_tstate_status.reg_addr = sc->sc_object.ao_pblkaddr;
601 	sc->sc_tstate_status.reg_spaceid = ACPI_ADR_SPACE_SYSTEM_IO;
602 
603 	sc->sc_tstate_control.reg_bitwidth = width;
604 	sc->sc_tstate_control.reg_bitoffset = offset;
605 	sc->sc_tstate_control.reg_addr = sc->sc_object.ao_pblkaddr;
606 	sc->sc_tstate_control.reg_spaceid = ACPI_ADR_SPACE_SYSTEM_IO;
607 
608 	return AE_OK;
609 }
610 
611 static ACPI_STATUS
acpicpu_tstate_change(struct acpicpu_softc * sc)612 acpicpu_tstate_change(struct acpicpu_softc *sc)
613 {
614 	ACPI_INTEGER val;
615 	ACPI_STATUS rv;
616 
617 	acpicpu_tstate_reset(sc);
618 
619 	/*
620 	 * Evaluate the available T-state window:
621 	 *
622 	 *   _TPC : either this maximum or any lower power
623 	 *          (i.e. higher numbered) state may be used.
624 	 *
625 	 *   _TDL : either this minimum or any higher power
626 	 *	    (i.e. lower numbered) state may be used.
627 	 *
628 	 *   _TDL >= _TPC || _TDL >= _TSS[last entry].
629 	 */
630 	rv = acpi_eval_integer(sc->sc_node->ad_handle, "_TPC", &val);
631 
632 	if (ACPI_SUCCESS(rv) && val < sc->sc_tstate_count) {
633 
634 		if (sc->sc_tstate[val].ts_percent != 0)
635 			sc->sc_tstate_max = val;
636 	}
637 
638 	rv = acpi_eval_integer(sc->sc_node->ad_handle, "_TDL", &val);
639 
640 	if (ACPI_SUCCESS(rv) && val < sc->sc_tstate_count) {
641 
642 		if (val >= sc->sc_tstate_max &&
643 		    sc->sc_tstate[val].ts_percent != 0)
644 			sc->sc_tstate_min = val;
645 	}
646 
647 	return AE_OK;
648 }
649 
650 static void
acpicpu_tstate_reset(struct acpicpu_softc * sc)651 acpicpu_tstate_reset(struct acpicpu_softc *sc)
652 {
653 
654 	sc->sc_tstate_max = 0;
655 	sc->sc_tstate_min = sc->sc_tstate_count - 1;
656 }
657 
658 int
acpicpu_tstate_get(struct cpu_info * ci,uint32_t * percent)659 acpicpu_tstate_get(struct cpu_info *ci, uint32_t *percent)
660 {
661 	struct acpicpu_tstate *ts = NULL;
662 	struct acpicpu_softc *sc;
663 	uint32_t i, val = 0;
664 	int rv;
665 
666 	sc = acpicpu_sc[ci->ci_acpiid];
667 
668 	if (__predict_false(sc == NULL)) {
669 		rv = ENXIO;
670 		goto fail;
671 	}
672 
673 	if (__predict_false(sc->sc_cold != false)) {
674 		rv = EBUSY;
675 		goto fail;
676 	}
677 
678 	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_T) == 0)) {
679 		rv = ENODEV;
680 		goto fail;
681 	}
682 
683 	mutex_enter(&sc->sc_mtx);
684 
685 	if (sc->sc_tstate_current != ACPICPU_T_STATE_UNKNOWN) {
686 		*percent = sc->sc_tstate_current;
687 		mutex_exit(&sc->sc_mtx);
688 		return 0;
689 	}
690 
691 	mutex_exit(&sc->sc_mtx);
692 
693 	switch (sc->sc_tstate_status.reg_spaceid) {
694 
695 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
696 
697 		rv = acpicpu_md_tstate_get(sc, percent);
698 
699 		if (__predict_false(rv != 0))
700 			goto fail;
701 
702 		break;
703 
704 	case ACPI_ADR_SPACE_SYSTEM_IO:
705 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
706 
707 		val = acpicpu_readreg(&sc->sc_tstate_status);
708 
709 		for (i = 0; i < sc->sc_tstate_count; i++) {
710 
711 			if (sc->sc_tstate[i].ts_percent == 0)
712 				continue;
713 
714 			if (val == sc->sc_tstate[i].ts_status) {
715 				ts = &sc->sc_tstate[i];
716 				break;
717 			}
718 		}
719 
720 		if (ts == NULL) {
721 			rv = EIO;
722 			goto fail;
723 		}
724 
725 		*percent = ts->ts_percent;
726 		break;
727 
728 	default:
729 		rv = ENOTTY;
730 		goto fail;
731 	}
732 
733 	mutex_enter(&sc->sc_mtx);
734 	sc->sc_tstate_current = *percent;
735 	mutex_exit(&sc->sc_mtx);
736 
737 	return 0;
738 
739 fail:
740 	aprint_error_dev(sc->sc_dev, "failed "
741 	    "to get T-state (err %d)\n", rv);
742 
743 	mutex_enter(&sc->sc_mtx);
744 	*percent = sc->sc_tstate_current = ACPICPU_T_STATE_UNKNOWN;
745 	mutex_exit(&sc->sc_mtx);
746 
747 	return rv;
748 }
749 
750 void
acpicpu_tstate_set(struct cpu_info * ci,uint32_t percent)751 acpicpu_tstate_set(struct cpu_info *ci, uint32_t percent)
752 {
753 	uint64_t xc;
754 
755 	xc = xc_broadcast(0, acpicpu_tstate_set_xcall, &percent, NULL);
756 	xc_wait(xc);
757 }
758 
759 static void
acpicpu_tstate_set_xcall(void * arg1,void * arg2)760 acpicpu_tstate_set_xcall(void *arg1, void *arg2)
761 {
762 	struct acpicpu_tstate *ts = NULL;
763 	struct cpu_info *ci = curcpu();
764 	struct acpicpu_softc *sc;
765 	uint32_t i, percent, val;
766 	int rv;
767 
768 	percent = *(uint32_t *)arg1;
769 	sc = acpicpu_sc[ci->ci_acpiid];
770 
771 	if (__predict_false(sc == NULL)) {
772 		rv = ENXIO;
773 		goto fail;
774 	}
775 
776 	if (__predict_false(sc->sc_cold != false)) {
777 		rv = EBUSY;
778 		goto fail;
779 	}
780 
781 	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_T) == 0)) {
782 		rv = ENODEV;
783 		goto fail;
784 	}
785 
786 	mutex_enter(&sc->sc_mtx);
787 
788 	if (sc->sc_tstate_current == percent) {
789 		mutex_exit(&sc->sc_mtx);
790 		return;
791 	}
792 
793 	for (i = sc->sc_tstate_max; i <= sc->sc_tstate_min; i++) {
794 
795 		if (__predict_false(sc->sc_tstate[i].ts_percent == 0))
796 			continue;
797 
798 		if (sc->sc_tstate[i].ts_percent == percent) {
799 			ts = &sc->sc_tstate[i];
800 			break;
801 		}
802 	}
803 
804 	mutex_exit(&sc->sc_mtx);
805 
806 	if (__predict_false(ts == NULL)) {
807 		rv = EINVAL;
808 		goto fail;
809 	}
810 
811 	switch (sc->sc_tstate_control.reg_spaceid) {
812 
813 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
814 
815 		rv = acpicpu_md_tstate_set(ts);
816 
817 		if (__predict_false(rv != 0))
818 			goto fail;
819 
820 		break;
821 
822 	case ACPI_ADR_SPACE_SYSTEM_IO:
823 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
824 
825 		acpicpu_writereg(&sc->sc_tstate_control, ts->ts_control);
826 
827 		/*
828 		 * If the status field is zero, the transition is
829 		 * specified to be "asynchronous" and there is no
830 		 * need to check the status (ACPI 4.0, 8.4.3.2).
831 		 */
832 		if (ts->ts_status == 0)
833 			break;
834 
835 		for (i = 0; i < ACPICPU_T_STATE_RETRY; i++) {
836 
837 			val = acpicpu_readreg(&sc->sc_tstate_status);
838 
839 			if (val == ts->ts_status)
840 				break;
841 
842 			DELAY(ts->ts_latency);
843 		}
844 
845 		if (i == ACPICPU_T_STATE_RETRY) {
846 			rv = EAGAIN;
847 			goto fail;
848 		}
849 
850 		break;
851 
852 	default:
853 		rv = ENOTTY;
854 		goto fail;
855 	}
856 
857 	mutex_enter(&sc->sc_mtx);
858 	ts->ts_evcnt.ev_count++;
859 	sc->sc_tstate_current = percent;
860 	mutex_exit(&sc->sc_mtx);
861 
862 	return;
863 
864 fail:
865 	if (rv != EINVAL)
866 		aprint_error_dev(sc->sc_dev, "failed to "
867 		    "throttle to %u %% (err %d)\n", percent, rv);
868 
869 	mutex_enter(&sc->sc_mtx);
870 	sc->sc_tstate_current = ACPICPU_T_STATE_UNKNOWN;
871 	mutex_exit(&sc->sc_mtx);
872 }
873