1 /* $NetBSD: acpi_cpu_pstate.c,v 1.53 2011/11/15 07:43:37 jruoho Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.53 2011/11/15 07:43:37 jruoho Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpufreq.h>
34 #include <sys/kmem.h>
35 
36 #include <dev/acpi/acpireg.h>
37 #include <dev/acpi/acpivar.h>
38 #include <dev/acpi/acpi_cpu.h>
39 
40 #define _COMPONENT	 ACPI_BUS_COMPONENT
41 ACPI_MODULE_NAME	 ("acpi_cpu_pstate")
42 
43 static ACPI_STATUS	 acpicpu_pstate_pss(struct acpicpu_softc *);
44 static ACPI_STATUS	 acpicpu_pstate_pss_add(struct acpicpu_pstate *,
45 						ACPI_OBJECT *);
46 static ACPI_STATUS	 acpicpu_pstate_xpss(struct acpicpu_softc *);
47 static ACPI_STATUS	 acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
48 						 ACPI_OBJECT *);
49 static ACPI_STATUS	 acpicpu_pstate_pct(struct acpicpu_softc *);
50 static ACPI_STATUS	 acpicpu_pstate_dep(struct acpicpu_softc *);
51 static int		 acpicpu_pstate_max(struct acpicpu_softc *);
52 static int		 acpicpu_pstate_min(struct acpicpu_softc *);
53 static void		 acpicpu_pstate_change(struct acpicpu_softc *);
54 static void		 acpicpu_pstate_reset(struct acpicpu_softc *);
55 static void		 acpicpu_pstate_bios(void);
56 
57 extern struct acpicpu_softc **acpicpu_sc;
58 
59 void
acpicpu_pstate_attach(device_t self)60 acpicpu_pstate_attach(device_t self)
61 {
62 	struct acpicpu_softc *sc = device_private(self);
63 	const char *str;
64 	ACPI_HANDLE tmp;
65 	ACPI_STATUS rv;
66 
67 	rv = acpicpu_pstate_pss(sc);
68 
69 	if (ACPI_FAILURE(rv)) {
70 		str = "_PSS";
71 		goto fail;
72 	}
73 
74 	/*
75 	 * Append additional information from the extended _PSS,
76 	 * if available. Note that XPSS can not be used on Intel
77 	 * systems that use either _PDC or _OSC. From the XPSS
78 	 * method specification:
79 	 *
80 	 *   "The platform must not require the use of the
81 	 *    optional _PDC or _OSC methods to coordinate
82 	 *    between the operating system and firmware for
83 	 *    the purposes of enabling specific processor
84 	 *    power management features or implementations."
85 	 */
86 	if (sc->sc_cap == 0) {
87 
88 		rv = acpicpu_pstate_xpss(sc);
89 
90 		if (ACPI_SUCCESS(rv))
91 			sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
92 	}
93 
94 	rv = acpicpu_pstate_pct(sc);
95 
96 	if (ACPI_FAILURE(rv)) {
97 		str = "_PCT";
98 		goto fail;
99 	}
100 
101 	/*
102 	 * The ACPI 3.0 and 4.0 specifications mandate three
103 	 * objects for P-states: _PSS, _PCT, and _PPC. A less
104 	 * strict wording is however used in the earlier 2.0
105 	 * standard, and some systems conforming to ACPI 2.0
106 	 * do not have _PPC, the method for dynamic maximum.
107 	 */
108 	rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
109 
110 	if (ACPI_FAILURE(rv))
111 		aprint_debug_dev(self, "_PPC missing\n");
112 
113 	/*
114 	 * Carry out MD initialization.
115 	 */
116 	rv = acpicpu_md_pstate_init(sc);
117 
118 	if (rv != 0) {
119 		rv = AE_SUPPORT;
120 		goto fail;
121 	}
122 
123 	/*
124 	 * Query the optional _PSD.
125 	 */
126 	rv = acpicpu_pstate_dep(sc);
127 
128 	if (ACPI_SUCCESS(rv))
129 		sc->sc_flags |= ACPICPU_FLAG_P_DEP;
130 
131 	sc->sc_pstate_current = 0;
132 	sc->sc_flags |= ACPICPU_FLAG_P;
133 
134 	acpicpu_pstate_bios();
135 	acpicpu_pstate_reset(sc);
136 
137 	return;
138 
139 fail:
140 	switch (rv) {
141 
142 	case AE_NOT_FOUND:
143 		return;
144 
145 	case AE_SUPPORT:
146 		aprint_verbose_dev(self, "P-states not supported\n");
147 		return;
148 
149 	default:
150 		aprint_error_dev(self, "failed to evaluate "
151 		    "%s: %s\n", str, AcpiFormatException(rv));
152 	}
153 }
154 
155 void
acpicpu_pstate_detach(device_t self)156 acpicpu_pstate_detach(device_t self)
157 {
158 	struct acpicpu_softc *sc = device_private(self);
159 	size_t size;
160 
161 	if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
162 		return;
163 
164 	(void)acpicpu_md_pstate_stop();
165 
166 	size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
167 
168 	if (sc->sc_pstate != NULL)
169 		kmem_free(sc->sc_pstate, size);
170 
171 	sc->sc_flags &= ~ACPICPU_FLAG_P;
172 }
173 
174 void
acpicpu_pstate_start(device_t self)175 acpicpu_pstate_start(device_t self)
176 {
177 	struct acpicpu_softc *sc = device_private(self);
178 
179 	if (acpicpu_md_pstate_start(sc) == 0)
180 		return;
181 
182 	sc->sc_flags &= ~ACPICPU_FLAG_P;
183 	aprint_error_dev(self, "failed to start P-states\n");
184 }
185 
186 void
acpicpu_pstate_suspend(void * aux)187 acpicpu_pstate_suspend(void *aux)
188 {
189 	struct acpicpu_softc *sc;
190 	device_t self = aux;
191 
192 	/*
193 	 * Reset any dynamic limits.
194 	 */
195 	sc = device_private(self);
196 	mutex_enter(&sc->sc_mtx);
197 	acpicpu_pstate_reset(sc);
198 	mutex_exit(&sc->sc_mtx);
199 }
200 
201 void
acpicpu_pstate_resume(void * aux)202 acpicpu_pstate_resume(void *aux)
203 {
204 	/* Nothing. */
205 }
206 
207 void
acpicpu_pstate_callback(void * aux)208 acpicpu_pstate_callback(void *aux)
209 {
210 	struct acpicpu_softc *sc;
211 	device_t self = aux;
212 	uint32_t freq;
213 
214 	sc = device_private(self);
215 	mutex_enter(&sc->sc_mtx);
216 	acpicpu_pstate_change(sc);
217 
218 	freq = sc->sc_pstate[sc->sc_pstate_max].ps_freq;
219 
220 	if (sc->sc_pstate_saved == 0)
221 		sc->sc_pstate_saved = sc->sc_pstate_current;
222 
223 	if (sc->sc_pstate_saved <= freq) {
224 		freq = sc->sc_pstate_saved;
225 		sc->sc_pstate_saved = 0;
226 	}
227 
228 	mutex_exit(&sc->sc_mtx);
229 	cpufreq_set(sc->sc_ci, freq);
230 }
231 
232 static ACPI_STATUS
acpicpu_pstate_pss(struct acpicpu_softc * sc)233 acpicpu_pstate_pss(struct acpicpu_softc *sc)
234 {
235 	struct acpicpu_pstate *ps;
236 	ACPI_OBJECT *obj;
237 	ACPI_BUFFER buf;
238 	ACPI_STATUS rv;
239 	uint32_t count;
240 	uint32_t i, j;
241 
242 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
243 
244 	if (ACPI_FAILURE(rv))
245 		return rv;
246 
247 	obj = buf.Pointer;
248 
249 	if (obj->Type != ACPI_TYPE_PACKAGE) {
250 		rv = AE_TYPE;
251 		goto out;
252 	}
253 
254 	sc->sc_pstate_count = obj->Package.Count;
255 
256 	if (sc->sc_pstate_count == 0) {
257 		rv = AE_NOT_EXIST;
258 		goto out;
259 	}
260 
261 	if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
262 		rv = AE_LIMIT;
263 		goto out;
264 	}
265 
266 	sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
267 	    sizeof(struct acpicpu_pstate), KM_SLEEP);
268 
269 	if (sc->sc_pstate == NULL) {
270 		rv = AE_NO_MEMORY;
271 		goto out;
272 	}
273 
274 	for (count = i = 0; i < sc->sc_pstate_count; i++) {
275 
276 		ps = &sc->sc_pstate[i];
277 		rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
278 
279 		if (ACPI_FAILURE(rv)) {
280 			aprint_error_dev(sc->sc_dev, "failed to add "
281 			    "P-state: %s\n", AcpiFormatException(rv));
282 			ps->ps_freq = 0;
283 			continue;
284 		}
285 
286 		for (j = 0; j < i; j++) {
287 
288 			if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
289 				ps->ps_freq = 0;
290 				break;
291 			}
292 		}
293 
294 		if (ps->ps_freq != 0)
295 			count++;
296 	}
297 
298 	rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
299 
300 out:
301 	if (buf.Pointer != NULL)
302 		ACPI_FREE(buf.Pointer);
303 
304 	return rv;
305 }
306 
307 static ACPI_STATUS
acpicpu_pstate_pss_add(struct acpicpu_pstate * ps,ACPI_OBJECT * obj)308 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
309 {
310 	ACPI_OBJECT *elm;
311 	int i;
312 
313 	if (obj->Type != ACPI_TYPE_PACKAGE)
314 		return AE_TYPE;
315 
316 	if (obj->Package.Count != 6)
317 		return AE_BAD_DATA;
318 
319 	elm = obj->Package.Elements;
320 
321 	for (i = 0; i < 6; i++) {
322 
323 		if (elm[i].Type != ACPI_TYPE_INTEGER)
324 			return AE_TYPE;
325 
326 		if (elm[i].Integer.Value > UINT32_MAX)
327 			return AE_AML_NUMERIC_OVERFLOW;
328 	}
329 
330 	ps->ps_freq       = elm[0].Integer.Value;
331 	ps->ps_power      = elm[1].Integer.Value;
332 	ps->ps_latency    = elm[2].Integer.Value;
333 	ps->ps_latency_bm = elm[3].Integer.Value;
334 	ps->ps_control    = elm[4].Integer.Value;
335 	ps->ps_status     = elm[5].Integer.Value;
336 
337 	if (ps->ps_freq == 0 || ps->ps_freq > 9999)
338 		return AE_BAD_DECIMAL_CONSTANT;
339 
340 	/*
341 	 * Sanity check also the latency levels. Some systems may
342 	 * report a value zero, but we keep one microsecond as the
343 	 * lower bound; see for instance AMD family 12h,
344 	 *
345 	 *	Advanced Micro Devices: BIOS and Kernel Developer's
346 	 *	Guide (BKDG) for AMD Family 12h Processors. Section
347 	 *	2.5.3.1.9.2, Revision 3.02, October, 2011.
348 	 */
349 	if (ps->ps_latency == 0 || ps->ps_latency > 1000)
350 		ps->ps_latency = 1;
351 
352 	return AE_OK;
353 }
354 
355 static ACPI_STATUS
acpicpu_pstate_xpss(struct acpicpu_softc * sc)356 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
357 {
358 	struct acpicpu_pstate *ps;
359 	ACPI_OBJECT *obj;
360 	ACPI_BUFFER buf;
361 	ACPI_STATUS rv;
362 	uint32_t i = 0;
363 
364 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
365 
366 	if (ACPI_FAILURE(rv))
367 		goto out;
368 
369 	obj = buf.Pointer;
370 
371 	if (obj->Type != ACPI_TYPE_PACKAGE) {
372 		rv = AE_TYPE;
373 		goto out;
374 	}
375 
376 	if (obj->Package.Count != sc->sc_pstate_count) {
377 		rv = AE_LIMIT;
378 		goto out;
379 	}
380 
381 	while (i < sc->sc_pstate_count) {
382 
383 		ps = &sc->sc_pstate[i];
384 		acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
385 
386 		i++;
387 	}
388 
389 out:
390 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
391 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
392 		    "XPSS: %s\n", AcpiFormatException(rv));
393 
394 	if (buf.Pointer != NULL)
395 		ACPI_FREE(buf.Pointer);
396 
397 	return rv;
398 }
399 
400 static ACPI_STATUS
acpicpu_pstate_xpss_add(struct acpicpu_pstate * ps,ACPI_OBJECT * obj)401 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
402 {
403 	ACPI_OBJECT *elm;
404 	int i;
405 
406 	if (obj->Type != ACPI_TYPE_PACKAGE)
407 		return AE_TYPE;
408 
409 	if (obj->Package.Count != 8)
410 		return AE_BAD_DATA;
411 
412 	elm = obj->Package.Elements;
413 
414 	for (i = 0; i < 4; i++) {
415 
416 		if (elm[i].Type != ACPI_TYPE_INTEGER)
417 			return AE_TYPE;
418 
419 		if (elm[i].Integer.Value > UINT32_MAX)
420 			return AE_AML_NUMERIC_OVERFLOW;
421 	}
422 
423 	for (; i < 8; i++) {
424 
425 		if (elm[i].Type != ACPI_TYPE_BUFFER)
426 			return AE_TYPE;
427 
428 		if (elm[i].Buffer.Length != 8)
429 			return AE_LIMIT;
430 	}
431 
432 	/*
433 	 * Only overwrite the elements that were
434 	 * not available from the conventional _PSS.
435 	 */
436 	if (ps->ps_freq == 0)
437 		ps->ps_freq = elm[0].Integer.Value;
438 
439 	if (ps->ps_power == 0)
440 		ps->ps_power = elm[1].Integer.Value;
441 
442 	if (ps->ps_latency == 0)
443 		ps->ps_latency = elm[2].Integer.Value;
444 
445 	if (ps->ps_latency_bm == 0)
446 		ps->ps_latency_bm = elm[3].Integer.Value;
447 
448 	if (ps->ps_control == 0)
449 		ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
450 
451 	if (ps->ps_status == 0)
452 		ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
453 
454 	if (ps->ps_control_mask == 0)
455 		ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
456 
457 	if (ps->ps_status_mask == 0)
458 		ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
459 
460 	ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
461 
462 	if (ps->ps_freq == 0 || ps->ps_freq > 9999)
463 		return AE_BAD_DECIMAL_CONSTANT;
464 
465 	if (ps->ps_latency == 0 || ps->ps_latency > 1000)
466 		ps->ps_latency = 1;
467 
468 	return AE_OK;
469 }
470 
471 static ACPI_STATUS
acpicpu_pstate_pct(struct acpicpu_softc * sc)472 acpicpu_pstate_pct(struct acpicpu_softc *sc)
473 {
474 	static const size_t size = sizeof(struct acpicpu_reg);
475 	struct acpicpu_reg *reg[2];
476 	struct acpicpu_pstate *ps;
477 	ACPI_OBJECT *elm, *obj;
478 	ACPI_BUFFER buf;
479 	ACPI_STATUS rv;
480 	uint8_t width;
481 	uint32_t i;
482 
483 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
484 
485 	if (ACPI_FAILURE(rv))
486 		return rv;
487 
488 	obj = buf.Pointer;
489 
490 	if (obj->Type != ACPI_TYPE_PACKAGE) {
491 		rv = AE_TYPE;
492 		goto out;
493 	}
494 
495 	if (obj->Package.Count != 2) {
496 		rv = AE_LIMIT;
497 		goto out;
498 	}
499 
500 	for (i = 0; i < 2; i++) {
501 
502 		elm = &obj->Package.Elements[i];
503 
504 		if (elm->Type != ACPI_TYPE_BUFFER) {
505 			rv = AE_TYPE;
506 			goto out;
507 		}
508 
509 		if (size > elm->Buffer.Length) {
510 			rv = AE_AML_BAD_RESOURCE_LENGTH;
511 			goto out;
512 		}
513 
514 		reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
515 
516 		switch (reg[i]->reg_spaceid) {
517 
518 		case ACPI_ADR_SPACE_SYSTEM_IO:
519 
520 			if (reg[i]->reg_addr == 0) {
521 				rv = AE_AML_ILLEGAL_ADDRESS;
522 				goto out;
523 			}
524 
525 			width = reg[i]->reg_bitwidth;
526 
527 			if (width + reg[i]->reg_bitoffset > 32) {
528 				rv = AE_AML_BAD_RESOURCE_VALUE;
529 				goto out;
530 			}
531 
532 			if (width != 8 && width != 16 && width != 32) {
533 				rv = AE_AML_BAD_RESOURCE_VALUE;
534 				goto out;
535 			}
536 
537 			break;
538 
539 		case ACPI_ADR_SPACE_FIXED_HARDWARE:
540 
541 			if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
542 
543 				if (reg[i]->reg_bitwidth != 64) {
544 					rv = AE_AML_BAD_RESOURCE_VALUE;
545 					goto out;
546 				}
547 
548 				if (reg[i]->reg_bitoffset != 0) {
549 					rv = AE_AML_BAD_RESOURCE_VALUE;
550 					goto out;
551 				}
552 
553 				break;
554 			}
555 
556 			if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
557 				rv = AE_SUPPORT;
558 				goto out;
559 			}
560 
561 			break;
562 
563 		default:
564 			rv = AE_AML_INVALID_SPACE_ID;
565 			goto out;
566 		}
567 	}
568 
569 	if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
570 		rv = AE_AML_INVALID_SPACE_ID;
571 		goto out;
572 	}
573 
574 	(void)memcpy(&sc->sc_pstate_control, reg[0], size);
575 	(void)memcpy(&sc->sc_pstate_status,  reg[1], size);
576 
577 	if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
578 
579 		/*
580 		 * At the very least, mandate that
581 		 * XPSS supplies the control address.
582 		 */
583 		if (sc->sc_pstate_control.reg_addr == 0) {
584 			rv = AE_AML_BAD_RESOURCE_LENGTH;
585 			goto out;
586 		}
587 
588 		/*
589 		 * If XPSS is present, copy the supplied
590 		 * MSR addresses to the P-state structures.
591 		 */
592 		for (i = 0; i < sc->sc_pstate_count; i++) {
593 
594 			ps = &sc->sc_pstate[i];
595 
596 			if (ps->ps_freq == 0)
597 				continue;
598 
599 			ps->ps_status_addr  = sc->sc_pstate_status.reg_addr;
600 			ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
601 		}
602 	}
603 
604 out:
605 	if (buf.Pointer != NULL)
606 		ACPI_FREE(buf.Pointer);
607 
608 	return rv;
609 }
610 
611 static ACPI_STATUS
acpicpu_pstate_dep(struct acpicpu_softc * sc)612 acpicpu_pstate_dep(struct acpicpu_softc *sc)
613 {
614 	ACPI_OBJECT *elm, *obj;
615 	ACPI_BUFFER buf;
616 	ACPI_STATUS rv;
617 	uint32_t val;
618 	uint8_t i, n;
619 
620 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSD", &buf);
621 
622 	if (ACPI_FAILURE(rv))
623 		goto out;
624 
625 	obj = buf.Pointer;
626 
627 	if (obj->Type != ACPI_TYPE_PACKAGE) {
628 		rv = AE_TYPE;
629 		goto out;
630 	}
631 
632 	if (obj->Package.Count != 1) {
633 		rv = AE_LIMIT;
634 		goto out;
635 	}
636 
637 	elm = &obj->Package.Elements[0];
638 
639 	if (obj->Type != ACPI_TYPE_PACKAGE) {
640 		rv = AE_TYPE;
641 		goto out;
642 	}
643 
644 	n = elm->Package.Count;
645 
646 	if (n != 5) {
647 		rv = AE_LIMIT;
648 		goto out;
649 	}
650 
651 	elm = elm->Package.Elements;
652 
653 	for (i = 0; i < n; i++) {
654 
655 		if (elm[i].Type != ACPI_TYPE_INTEGER) {
656 			rv = AE_TYPE;
657 			goto out;
658 		}
659 
660 		if (elm[i].Integer.Value > UINT32_MAX) {
661 			rv = AE_AML_NUMERIC_OVERFLOW;
662 			goto out;
663 		}
664 	}
665 
666 	val = elm[1].Integer.Value;
667 
668 	if (val != 0)
669 		aprint_debug_dev(sc->sc_dev, "invalid revision in _PSD\n");
670 
671 	val = elm[3].Integer.Value;
672 
673 	if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
674 		rv = AE_AML_BAD_RESOURCE_VALUE;
675 		goto out;
676 	}
677 
678 	val = elm[4].Integer.Value;
679 
680 	if (val > sc->sc_ncpus) {
681 		rv = AE_BAD_VALUE;
682 		goto out;
683 	}
684 
685 	sc->sc_pstate_dep.dep_domain = elm[2].Integer.Value;
686 	sc->sc_pstate_dep.dep_type   = elm[3].Integer.Value;
687 	sc->sc_pstate_dep.dep_ncpus  = elm[4].Integer.Value;
688 
689 out:
690 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
691 		aprint_debug_dev(sc->sc_dev, "failed to evaluate "
692 		    "_PSD: %s\n", AcpiFormatException(rv));
693 
694 	if (buf.Pointer != NULL)
695 		ACPI_FREE(buf.Pointer);
696 
697 	return rv;
698 }
699 
700 static int
acpicpu_pstate_max(struct acpicpu_softc * sc)701 acpicpu_pstate_max(struct acpicpu_softc *sc)
702 {
703 	ACPI_INTEGER val;
704 	ACPI_STATUS rv;
705 
706 	/*
707 	 * Evaluate the currently highest P-state that can be used.
708 	 * If available, we can use either this state or any lower
709 	 * power (i.e. higher numbered) state from the _PSS object.
710 	 * Note that the return value must match the _OST parameter.
711 	 */
712 	rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
713 
714 	if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
715 
716 		if (sc->sc_pstate[val].ps_freq != 0) {
717 			sc->sc_pstate_max = val;
718 			return 0;
719 		}
720 	}
721 
722 	return 1;
723 }
724 
725 static int
acpicpu_pstate_min(struct acpicpu_softc * sc)726 acpicpu_pstate_min(struct acpicpu_softc *sc)
727 {
728 	ACPI_INTEGER val;
729 	ACPI_STATUS rv;
730 
731 	/*
732 	 * The _PDL object defines the minimum when passive cooling
733 	 * is being performed. If available, we can use the returned
734 	 * state or any higher power (i.e. lower numbered) state.
735 	 */
736 	rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
737 
738 	if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
739 
740 		if (sc->sc_pstate[val].ps_freq == 0)
741 			return 1;
742 
743 		if (val >= sc->sc_pstate_max) {
744 			sc->sc_pstate_min = val;
745 			return 0;
746 		}
747 	}
748 
749 	return 1;
750 }
751 
752 static void
acpicpu_pstate_change(struct acpicpu_softc * sc)753 acpicpu_pstate_change(struct acpicpu_softc *sc)
754 {
755 	static ACPI_STATUS rv = AE_OK;
756 	ACPI_OBJECT_LIST arg;
757 	ACPI_OBJECT obj[2];
758 	static int val = 0;
759 
760 	acpicpu_pstate_reset(sc);
761 
762 	/*
763 	 * Cache the checks as the optional
764 	 * _PDL and _OST are rarely present.
765 	 */
766 	if (val == 0)
767 		val = acpicpu_pstate_min(sc);
768 
769 	arg.Count = 2;
770 	arg.Pointer = obj;
771 
772 	obj[0].Type = ACPI_TYPE_INTEGER;
773 	obj[1].Type = ACPI_TYPE_INTEGER;
774 
775 	obj[0].Integer.Value = ACPICPU_P_NOTIFY;
776 	obj[1].Integer.Value = acpicpu_pstate_max(sc);
777 
778 	if (ACPI_FAILURE(rv))
779 		return;
780 
781 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
782 }
783 
784 static void
acpicpu_pstate_reset(struct acpicpu_softc * sc)785 acpicpu_pstate_reset(struct acpicpu_softc *sc)
786 {
787 
788 	sc->sc_pstate_max = 0;
789 	sc->sc_pstate_min = sc->sc_pstate_count - 1;
790 
791 }
792 
793 static void
acpicpu_pstate_bios(void)794 acpicpu_pstate_bios(void)
795 {
796 	const uint8_t val = AcpiGbl_FADT.PstateControl;
797 	const uint32_t addr = AcpiGbl_FADT.SmiCommand;
798 
799 	if (addr == 0 || val == 0)
800 		return;
801 
802 	(void)AcpiOsWritePort(addr, val, 8);
803 }
804 
805 void
acpicpu_pstate_get(void * aux,void * cpu_freq)806 acpicpu_pstate_get(void *aux, void *cpu_freq)
807 {
808 	struct acpicpu_pstate *ps = NULL;
809 	struct cpu_info *ci = curcpu();
810 	struct acpicpu_softc *sc;
811 	uint32_t freq, i, val = 0;
812 	uint64_t addr;
813 	uint8_t width;
814 	int rv;
815 
816 	sc = acpicpu_sc[ci->ci_acpiid];
817 
818 	if (__predict_false(sc == NULL)) {
819 		rv = ENXIO;
820 		goto fail;
821 	}
822 
823 	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
824 		rv = ENODEV;
825 		goto fail;
826 	}
827 
828 	mutex_enter(&sc->sc_mtx);
829 
830 	/*
831 	 * Use the cached value, if available.
832 	 */
833 	if (sc->sc_pstate_current != 0) {
834 		*(uint32_t *)cpu_freq = sc->sc_pstate_current;
835 		mutex_exit(&sc->sc_mtx);
836 		return;
837 	}
838 
839 	mutex_exit(&sc->sc_mtx);
840 
841 	switch (sc->sc_pstate_status.reg_spaceid) {
842 
843 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
844 
845 		rv = acpicpu_md_pstate_get(sc, &freq);
846 
847 		if (__predict_false(rv != 0))
848 			goto fail;
849 
850 		break;
851 
852 	case ACPI_ADR_SPACE_SYSTEM_IO:
853 
854 		addr  = sc->sc_pstate_status.reg_addr;
855 		width = sc->sc_pstate_status.reg_bitwidth;
856 
857 		(void)AcpiOsReadPort(addr, &val, width);
858 
859 		if (val == 0) {
860 			rv = EIO;
861 			goto fail;
862 		}
863 
864 		for (i = 0; i < sc->sc_pstate_count; i++) {
865 
866 			if (sc->sc_pstate[i].ps_freq == 0)
867 				continue;
868 
869 			if (val == sc->sc_pstate[i].ps_status) {
870 				ps = &sc->sc_pstate[i];
871 				break;
872 			}
873 		}
874 
875 		if (ps == NULL) {
876 			rv = EIO;
877 			goto fail;
878 		}
879 
880 		freq = ps->ps_freq;
881 		break;
882 
883 	default:
884 		rv = ENOTTY;
885 		goto fail;
886 	}
887 
888 	mutex_enter(&sc->sc_mtx);
889 	sc->sc_pstate_current = freq;
890 	*(uint32_t *)cpu_freq = freq;
891 	mutex_exit(&sc->sc_mtx);
892 
893 	return;
894 
895 fail:
896 	aprint_error_dev(sc->sc_dev, "failed "
897 	    "to get frequency (err %d)\n", rv);
898 
899 	mutex_enter(&sc->sc_mtx);
900 	sc->sc_pstate_current = 0;
901 	*(uint32_t *)cpu_freq = 0;
902 	mutex_exit(&sc->sc_mtx);
903 }
904 
905 void
acpicpu_pstate_set(void * aux,void * cpu_freq)906 acpicpu_pstate_set(void *aux, void *cpu_freq)
907 {
908 	struct acpicpu_pstate *ps = NULL;
909 	struct cpu_info *ci = curcpu();
910 	struct acpicpu_softc *sc;
911 	uint32_t freq, i, val;
912 	uint64_t addr;
913 	uint8_t width;
914 	int rv;
915 
916 	freq = *(uint32_t *)cpu_freq;
917 	sc = acpicpu_sc[ci->ci_acpiid];
918 
919 	if (__predict_false(sc == NULL)) {
920 		rv = ENXIO;
921 		goto fail;
922 	}
923 
924 	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
925 		rv = ENODEV;
926 		goto fail;
927 	}
928 
929 	mutex_enter(&sc->sc_mtx);
930 
931 	if (sc->sc_pstate_current == freq) {
932 		mutex_exit(&sc->sc_mtx);
933 		return;
934 	}
935 
936 	/*
937 	 * Verify that the requested frequency is available.
938 	 *
939 	 * The access needs to be protected since the currently
940 	 * available maximum and minimum may change dynamically.
941 	 */
942 	for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
943 
944 		if (__predict_false(sc->sc_pstate[i].ps_freq == 0))
945 			continue;
946 
947 		if (sc->sc_pstate[i].ps_freq == freq) {
948 			ps = &sc->sc_pstate[i];
949 			break;
950 		}
951 	}
952 
953 	mutex_exit(&sc->sc_mtx);
954 
955 	if (__predict_false(ps == NULL)) {
956 		rv = EINVAL;
957 		goto fail;
958 	}
959 
960 	switch (sc->sc_pstate_control.reg_spaceid) {
961 
962 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
963 
964 		rv = acpicpu_md_pstate_set(ps);
965 
966 		if (__predict_false(rv != 0))
967 			goto fail;
968 
969 		break;
970 
971 	case ACPI_ADR_SPACE_SYSTEM_IO:
972 
973 		addr  = sc->sc_pstate_control.reg_addr;
974 		width = sc->sc_pstate_control.reg_bitwidth;
975 
976 		(void)AcpiOsWritePort(addr, ps->ps_control, width);
977 
978 		addr  = sc->sc_pstate_status.reg_addr;
979 		width = sc->sc_pstate_status.reg_bitwidth;
980 
981 		/*
982 		 * Some systems take longer to respond
983 		 * than the reported worst-case latency.
984 		 */
985 		for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
986 
987 			(void)AcpiOsReadPort(addr, &val, width);
988 
989 			if (val == ps->ps_status)
990 				break;
991 
992 			DELAY(ps->ps_latency);
993 		}
994 
995 		if (i == ACPICPU_P_STATE_RETRY) {
996 			rv = EAGAIN;
997 			goto fail;
998 		}
999 
1000 		break;
1001 
1002 	default:
1003 		rv = ENOTTY;
1004 		goto fail;
1005 	}
1006 
1007 	mutex_enter(&sc->sc_mtx);
1008 	ps->ps_evcnt.ev_count++;
1009 	sc->sc_pstate_current = freq;
1010 	mutex_exit(&sc->sc_mtx);
1011 
1012 	return;
1013 
1014 fail:
1015 	if (rv != EINVAL)
1016 		aprint_error_dev(sc->sc_dev, "failed to set "
1017 		    "frequency to %u (err %d)\n", freq, rv);
1018 
1019 	mutex_enter(&sc->sc_mtx);
1020 	sc->sc_pstate_current = 0;
1021 	mutex_exit(&sc->sc_mtx);
1022 }
1023