xref: /freebsd/sys/dev/acpica/acpi_perf.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 2003-2005 Nate Lawson (SDG)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_acpi.h"
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/sched.h>
35 #include <sys/bus.h>
36 #include <sys/cpu.h>
37 #include <sys/power.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sbuf.h>
41 #include <sys/pcpu.h>
42 
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/rman.h>
46 
47 #include <contrib/dev/acpica/include/acpi.h>
48 
49 #include <dev/acpica/acpivar.h>
50 
51 #include "cpufreq_if.h"
52 
53 /*
54  * Support for ACPI processor performance states (Px) according to
55  * section 8.3.3 of the ACPI 2.0c specification.
56  */
57 
58 struct acpi_px {
59 	uint32_t	 core_freq;
60 	uint32_t	 power;
61 	uint32_t	 trans_lat;
62 	uint32_t	 bm_lat;
63 	uint32_t	 ctrl_val;
64 	uint32_t	 sts_val;
65 };
66 
67 /* Offsets in struct cf_setting array for storing driver-specific values. */
68 #define PX_SPEC_CONTROL	0
69 #define PX_SPEC_STATUS	1
70 
71 #define MAX_PX_STATES	16
72 
73 struct acpi_perf_softc {
74 	device_t	 dev;
75 	ACPI_HANDLE	 handle;
76 	struct resource	*perf_ctrl;	/* Set new performance state. */
77 	int		 perf_ctrl_type; /* Resource type for perf_ctrl. */
78 	struct resource	*perf_status;	/* Check that transition succeeded. */
79 	int		 perf_sts_type;	/* Resource type for perf_status. */
80 	struct acpi_px	*px_states;	/* ACPI perf states. */
81 	uint32_t	 px_count;	/* Total number of perf states. */
82 	uint32_t	 px_max_avail;	/* Lowest index state available. */
83 	int		 px_curr_state;	/* Active state index. */
84 	int		 px_rid;
85 	int		 info_only;	/* Can we set new states? */
86 };
87 
88 #define PX_GET_REG(reg) 				\
89 	(bus_space_read_4(rman_get_bustag((reg)), 	\
90 	    rman_get_bushandle((reg)), 0))
91 #define PX_SET_REG(reg, val)				\
92 	(bus_space_write_4(rman_get_bustag((reg)), 	\
93 	    rman_get_bushandle((reg)), 0, (val)))
94 
95 #define ACPI_NOTIFY_PERF_STATES		0x80	/* _PSS changed. */
96 
97 static void	acpi_perf_identify(driver_t *driver, device_t parent);
98 static int	acpi_perf_probe(device_t dev);
99 static int	acpi_perf_attach(device_t dev);
100 static int	acpi_perf_detach(device_t dev);
101 static int	acpi_perf_evaluate(device_t dev);
102 static int	acpi_px_to_set(device_t dev, struct acpi_px *px,
103 		    struct cf_setting *set);
104 static void	acpi_px_available(struct acpi_perf_softc *sc);
105 static void	acpi_px_startup(void *arg);
106 static void	acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context);
107 static int	acpi_px_settings(device_t dev, struct cf_setting *sets,
108 		    int *count);
109 static int	acpi_px_set(device_t dev, const struct cf_setting *set);
110 static int	acpi_px_get(device_t dev, struct cf_setting *set);
111 static int	acpi_px_type(device_t dev, int *type);
112 
113 static device_method_t acpi_perf_methods[] = {
114 	/* Device interface */
115 	DEVMETHOD(device_identify,	acpi_perf_identify),
116 	DEVMETHOD(device_probe,		acpi_perf_probe),
117 	DEVMETHOD(device_attach,	acpi_perf_attach),
118 	DEVMETHOD(device_detach,	acpi_perf_detach),
119 
120 	/* cpufreq interface */
121 	DEVMETHOD(cpufreq_drv_set,	acpi_px_set),
122 	DEVMETHOD(cpufreq_drv_get,	acpi_px_get),
123 	DEVMETHOD(cpufreq_drv_type,	acpi_px_type),
124 	DEVMETHOD(cpufreq_drv_settings,	acpi_px_settings),
125 
126 	DEVMETHOD_END
127 };
128 
129 static driver_t acpi_perf_driver = {
130 	"acpi_perf",
131 	acpi_perf_methods,
132 	sizeof(struct acpi_perf_softc),
133 };
134 
135 static devclass_t acpi_perf_devclass;
136 DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, acpi_perf_devclass, 0, 0);
137 MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1);
138 
139 static MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states");
140 
141 static void
142 acpi_perf_identify(driver_t *driver, device_t parent)
143 {
144 	ACPI_HANDLE handle;
145 	device_t dev;
146 
147 	/* Make sure we're not being doubly invoked. */
148 	if (device_find_child(parent, "acpi_perf", -1) != NULL)
149 		return;
150 
151 	/* Get the handle for the Processor object and check for perf states. */
152 	handle = acpi_get_handle(parent);
153 	if (handle == NULL)
154 		return;
155 	if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PSS", NULL, NULL)))
156 		return;
157 
158 	/*
159 	 * Add a child to every CPU that has the right methods.  In future
160 	 * versions of the ACPI spec, CPUs can have different settings.
161 	 * We probe this child now so that other devices that depend
162 	 * on it (i.e., for info about supported states) will see it.
163 	 */
164 	if ((dev = BUS_ADD_CHILD(parent, 0, "acpi_perf", -1)) != NULL)
165 		device_probe_and_attach(dev);
166 	else
167 		device_printf(parent, "add acpi_perf child failed\n");
168 }
169 
170 static int
171 acpi_perf_probe(device_t dev)
172 {
173 	ACPI_HANDLE handle;
174 	ACPI_OBJECT *pkg;
175 	struct resource *res;
176 	ACPI_BUFFER buf;
177 	int error, rid, type;
178 
179 	if (resource_disabled("acpi_perf", 0))
180 		return (ENXIO);
181 
182 	/*
183 	 * Check the performance state registers.  If they are of type
184 	 * "functional fixed hardware", we attach quietly since we will
185 	 * only be providing information on settings to other drivers.
186 	 */
187 	error = ENXIO;
188 	handle = acpi_get_handle(dev);
189 	buf.Pointer = NULL;
190 	buf.Length = ACPI_ALLOCATE_BUFFER;
191 	if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PCT", NULL, &buf)))
192 		return (error);
193 	pkg = (ACPI_OBJECT *)buf.Pointer;
194 	if (ACPI_PKG_VALID(pkg, 2)) {
195 		rid = 0;
196 		error = acpi_PkgGas(dev, pkg, 0, &type, &rid, &res, 0);
197 		switch (error) {
198 		case 0:
199 			bus_release_resource(dev, type, rid, res);
200 			bus_delete_resource(dev, type, rid);
201 			device_set_desc(dev, "ACPI CPU Frequency Control");
202 			break;
203 		case EOPNOTSUPP:
204 			device_quiet(dev);
205 			error = 0;
206 			break;
207 		}
208 	}
209 	AcpiOsFree(buf.Pointer);
210 
211 	return (error);
212 }
213 
214 static int
215 acpi_perf_attach(device_t dev)
216 {
217 	struct acpi_perf_softc *sc;
218 
219 	sc = device_get_softc(dev);
220 	sc->dev = dev;
221 	sc->handle = acpi_get_handle(dev);
222 	sc->px_max_avail = 0;
223 	sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
224 	if (acpi_perf_evaluate(dev) != 0)
225 		return (ENXIO);
226 	AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_px_startup, NULL);
227 	if (!sc->info_only)
228 		cpufreq_register(dev);
229 
230 	return (0);
231 }
232 
233 static int
234 acpi_perf_detach(device_t dev)
235 {
236 	/* TODO: teardown registers, remove notify handler. */
237 	return (ENXIO);
238 }
239 
240 /* Probe and setup any valid performance states (Px). */
241 static int
242 acpi_perf_evaluate(device_t dev)
243 {
244 	struct acpi_perf_softc *sc;
245 	ACPI_BUFFER buf;
246 	ACPI_OBJECT *pkg, *res;
247 	ACPI_STATUS status;
248 	int count, error, i, j;
249 	static int once = 1;
250 	uint32_t *p;
251 
252 	/* Get the control values and parameters for each state. */
253 	error = ENXIO;
254 	sc = device_get_softc(dev);
255 	buf.Pointer = NULL;
256 	buf.Length = ACPI_ALLOCATE_BUFFER;
257 	status = AcpiEvaluateObject(sc->handle, "_PSS", NULL, &buf);
258 	if (ACPI_FAILURE(status))
259 		return (ENXIO);
260 
261 	pkg = (ACPI_OBJECT *)buf.Pointer;
262 	if (!ACPI_PKG_VALID(pkg, 1)) {
263 		device_printf(dev, "invalid top level _PSS package\n");
264 		goto out;
265 	}
266 	sc->px_count = pkg->Package.Count;
267 
268 	sc->px_states = malloc(sc->px_count * sizeof(struct acpi_px),
269 	    M_ACPIPERF, M_WAITOK | M_ZERO);
270 	if (sc->px_states == NULL)
271 		goto out;
272 
273 	/*
274 	 * Each state is a package of {CoreFreq, Power, TransitionLatency,
275 	 * BusMasterLatency, ControlVal, StatusVal}, sorted from highest
276 	 * performance to lowest.
277 	 */
278 	count = 0;
279 	for (i = 0; i < sc->px_count; i++) {
280 		res = &pkg->Package.Elements[i];
281 		if (!ACPI_PKG_VALID(res, 6)) {
282 			if (once) {
283 				once = 0;
284 				device_printf(dev, "invalid _PSS package\n");
285 			}
286 			continue;
287 		}
288 
289 		/* Parse the rest of the package into the struct. */
290 		p = &sc->px_states[count].core_freq;
291 		for (j = 0; j < 6; j++, p++)
292 			acpi_PkgInt32(res, j, p);
293 
294 		/*
295 		 * Check for some impossible frequencies that some systems
296 		 * use to indicate they don't actually support this Px state.
297 		 */
298 		if (sc->px_states[count].core_freq == 0 ||
299 		    sc->px_states[count].core_freq == 9999 ||
300 		    sc->px_states[count].core_freq == 0x9999 ||
301 		    sc->px_states[count].core_freq >= 0xffff)
302 			continue;
303 
304 		/* Check for duplicate entries */
305 		if (count > 0 &&
306 		    sc->px_states[count - 1].core_freq ==
307 			sc->px_states[count].core_freq)
308 			continue;
309 
310 		count++;
311 	}
312 	sc->px_count = count;
313 
314 	/* No valid Px state found so give up. */
315 	if (count == 0)
316 		goto out;
317 	AcpiOsFree(buf.Pointer);
318 
319 	/* Get the control and status registers (one of each). */
320 	buf.Pointer = NULL;
321 	buf.Length = ACPI_ALLOCATE_BUFFER;
322 	status = AcpiEvaluateObject(sc->handle, "_PCT", NULL, &buf);
323 	if (ACPI_FAILURE(status))
324 		goto out;
325 
326 	/* Check the package of two registers, each a Buffer in GAS format. */
327 	pkg = (ACPI_OBJECT *)buf.Pointer;
328 	if (!ACPI_PKG_VALID(pkg, 2)) {
329 		device_printf(dev, "invalid perf register package\n");
330 		goto out;
331 	}
332 
333 	error = acpi_PkgGas(sc->dev, pkg, 0, &sc->perf_ctrl_type, &sc->px_rid,
334 	    &sc->perf_ctrl, 0);
335 	if (error) {
336 		/*
337 		 * If the register is of type FFixedHW, we can only return
338 		 * info, we can't get or set new settings.
339 		 */
340 		if (error == EOPNOTSUPP) {
341 			sc->info_only = TRUE;
342 			error = 0;
343 		} else
344 			device_printf(dev, "failed in PERF_CTL attach\n");
345 		goto out;
346 	}
347 	sc->px_rid++;
348 
349 	error = acpi_PkgGas(sc->dev, pkg, 1, &sc->perf_sts_type, &sc->px_rid,
350 	    &sc->perf_status, 0);
351 	if (error) {
352 		if (error == EOPNOTSUPP) {
353 			sc->info_only = TRUE;
354 			error = 0;
355 		} else
356 			device_printf(dev, "failed in PERF_STATUS attach\n");
357 		goto out;
358 	}
359 	sc->px_rid++;
360 
361 	/* Get our current limit and register for notifies. */
362 	acpi_px_available(sc);
363 	AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY,
364 	    acpi_px_notify, sc);
365 	error = 0;
366 
367 out:
368 	if (error) {
369 		if (sc->px_states) {
370 			free(sc->px_states, M_ACPIPERF);
371 			sc->px_states = NULL;
372 		}
373 		if (sc->perf_ctrl) {
374 			bus_release_resource(sc->dev, sc->perf_ctrl_type, 0,
375 			    sc->perf_ctrl);
376 			bus_delete_resource(sc->dev, sc->perf_ctrl_type, 0);
377 			sc->perf_ctrl = NULL;
378 		}
379 		if (sc->perf_status) {
380 			bus_release_resource(sc->dev, sc->perf_sts_type, 1,
381 			    sc->perf_status);
382 			bus_delete_resource(sc->dev, sc->perf_sts_type, 1);
383 			sc->perf_status = NULL;
384 		}
385 		sc->px_rid = 0;
386 		sc->px_count = 0;
387 	}
388 	if (buf.Pointer)
389 		AcpiOsFree(buf.Pointer);
390 	return (error);
391 }
392 
393 static void
394 acpi_px_startup(void *arg)
395 {
396 
397 	/* Signal to the platform that we are taking over CPU control. */
398 	if (AcpiGbl_FADT.PstateControl == 0)
399 		return;
400 	ACPI_LOCK(acpi);
401 	AcpiOsWritePort(AcpiGbl_FADT.SmiCommand, AcpiGbl_FADT.PstateControl, 8);
402 	ACPI_UNLOCK(acpi);
403 }
404 
405 static void
406 acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context)
407 {
408 	struct acpi_perf_softc *sc;
409 
410 	sc = context;
411 	if (notify != ACPI_NOTIFY_PERF_STATES)
412 		return;
413 
414 	acpi_px_available(sc);
415 
416 	/* TODO: Implement notification when frequency changes. */
417 }
418 
419 /*
420  * Find the highest currently-supported performance state.
421  * This can be called at runtime (e.g., due to a docking event) at
422  * the request of a Notify on the processor object.
423  */
424 static void
425 acpi_px_available(struct acpi_perf_softc *sc)
426 {
427 	ACPI_STATUS status;
428 	struct cf_setting set;
429 
430 	status = acpi_GetInteger(sc->handle, "_PPC", &sc->px_max_avail);
431 
432 	/* If the old state is too high, set current state to the new max. */
433 	if (ACPI_SUCCESS(status)) {
434 		if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN &&
435 		    sc->px_curr_state > sc->px_max_avail) {
436 			acpi_px_to_set(sc->dev,
437 			    &sc->px_states[sc->px_max_avail], &set);
438 			acpi_px_set(sc->dev, &set);
439 		}
440 	} else
441 		sc->px_max_avail = 0;
442 }
443 
444 static int
445 acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set)
446 {
447 
448 	if (px == NULL || set == NULL)
449 		return (EINVAL);
450 
451 	set->freq = px->core_freq;
452 	set->power = px->power;
453 	/* XXX Include BM latency too? */
454 	set->lat = px->trans_lat;
455 	set->volts = CPUFREQ_VAL_UNKNOWN;
456 	set->dev = dev;
457 	set->spec[PX_SPEC_CONTROL] = px->ctrl_val;
458 	set->spec[PX_SPEC_STATUS] = px->sts_val;
459 
460 	return (0);
461 }
462 
463 static int
464 acpi_px_settings(device_t dev, struct cf_setting *sets, int *count)
465 {
466 	struct acpi_perf_softc *sc;
467 	int x, y;
468 
469 	sc = device_get_softc(dev);
470 	if (sets == NULL || count == NULL)
471 		return (EINVAL);
472 	if (*count < sc->px_count - sc->px_max_avail)
473 		return (E2BIG);
474 
475 	/* Return a list of settings that are currently valid. */
476 	y = 0;
477 	for (x = sc->px_max_avail; x < sc->px_count; x++, y++)
478 		acpi_px_to_set(dev, &sc->px_states[x], &sets[y]);
479 	*count = sc->px_count - sc->px_max_avail;
480 
481 	return (0);
482 }
483 
484 static int
485 acpi_px_set(device_t dev, const struct cf_setting *set)
486 {
487 	struct acpi_perf_softc *sc;
488 	int i, status, sts_val, tries;
489 
490 	if (set == NULL)
491 		return (EINVAL);
492 	sc = device_get_softc(dev);
493 
494 	/* If we can't set new states, return immediately. */
495 	if (sc->info_only)
496 		return (ENXIO);
497 
498 	/* Look up appropriate state, based on frequency. */
499 	for (i = sc->px_max_avail; i < sc->px_count; i++) {
500 		if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq))
501 			break;
502 	}
503 	if (i == sc->px_count)
504 		return (EINVAL);
505 
506 	/* Write the appropriate value to the register. */
507 	PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val);
508 
509 	/*
510 	 * Try for up to 10 ms to verify the desired state was selected.
511 	 * This is longer than the standard says (1 ms) but in some modes,
512 	 * systems may take longer to respond.
513 	 */
514 	sts_val = sc->px_states[i].sts_val;
515 	for (tries = 0; tries < 1000; tries++) {
516 		status = PX_GET_REG(sc->perf_status);
517 
518 		/*
519 		 * If we match the status or the desired status is 8 bits
520 		 * and matches the relevant bits, assume we succeeded.  It
521 		 * appears some systems (IBM R32) expect byte-wide access
522 		 * even though the standard says the register is 32-bit.
523 		 */
524 		if (status == sts_val ||
525 		    ((sts_val & ~0xff) == 0 && (status & 0xff) == sts_val))
526 			break;
527 		DELAY(10);
528 	}
529 	if (tries == 1000) {
530 		device_printf(dev, "Px transition to %d failed\n",
531 		    sc->px_states[i].core_freq);
532 		return (ENXIO);
533 	}
534 	sc->px_curr_state = i;
535 
536 	return (0);
537 }
538 
539 static int
540 acpi_px_get(device_t dev, struct cf_setting *set)
541 {
542 	struct acpi_perf_softc *sc;
543 	uint64_t rate;
544 	int i;
545 	struct pcpu *pc;
546 
547 	if (set == NULL)
548 		return (EINVAL);
549 	sc = device_get_softc(dev);
550 
551 	/* If we can't get new states, return immediately. */
552 	if (sc->info_only)
553 		return (ENXIO);
554 
555 	/* If we've set the rate before, use the cached value. */
556 	if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) {
557 		acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set);
558 		return (0);
559 	}
560 
561 	/* Otherwise, estimate and try to match against our settings. */
562 	pc = cpu_get_pcpu(dev);
563 	if (pc == NULL)
564 		return (ENXIO);
565 	cpu_est_clockrate(pc->pc_cpuid, &rate);
566 	rate /= 1000000;
567 	for (i = 0; i < sc->px_count; i++) {
568 		if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) {
569 			sc->px_curr_state = i;
570 			acpi_px_to_set(dev, &sc->px_states[i], set);
571 			break;
572 		}
573 	}
574 
575 	/* No match, give up. */
576 	if (i == sc->px_count) {
577 		sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
578 		set->freq = CPUFREQ_VAL_UNKNOWN;
579 	}
580 
581 	return (0);
582 }
583 
584 static int
585 acpi_px_type(device_t dev, int *type)
586 {
587 	struct acpi_perf_softc *sc;
588 
589 	if (type == NULL)
590 		return (EINVAL);
591 	sc = device_get_softc(dev);
592 
593 	*type = CPUFREQ_TYPE_ABSOLUTE;
594 	if (sc->info_only)
595 		*type |= CPUFREQ_FLAG_INFO_ONLY;
596 	return (0);
597 }
598