xref: /freebsd/sys/dev/acpica/acpi_perf.c (revision e0c4386e)
1 /*-
2  * Copyright (c) 2003-2005 Nate Lawson (SDG)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include "opt_acpi.h"
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/proc.h>
32 #include <sys/sched.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/power.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sbuf.h>
39 #include <sys/pcpu.h>
40 
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/rman.h>
44 
45 #include <contrib/dev/acpica/include/acpi.h>
46 
47 #include <dev/acpica/acpivar.h>
48 
49 #include "cpufreq_if.h"
50 
51 /*
52  * Support for ACPI processor performance states (Px) according to
53  * section 8.3.3 of the ACPI 2.0c specification.
54  */
55 
56 struct acpi_px {
57 	uint32_t	 core_freq;
58 	uint32_t	 power;
59 	uint32_t	 trans_lat;
60 	uint32_t	 bm_lat;
61 	uint32_t	 ctrl_val;
62 	uint32_t	 sts_val;
63 };
64 
65 /* Offsets in struct cf_setting array for storing driver-specific values. */
66 #define PX_SPEC_CONTROL	0
67 #define PX_SPEC_STATUS	1
68 
69 #define MAX_PX_STATES	16
70 
71 struct acpi_perf_softc {
72 	device_t	 dev;
73 	ACPI_HANDLE	 handle;
74 	struct resource	*perf_ctrl;	/* Set new performance state. */
75 	int		 perf_ctrl_type; /* Resource type for perf_ctrl. */
76 	struct resource	*perf_status;	/* Check that transition succeeded. */
77 	int		 perf_sts_type;	/* Resource type for perf_status. */
78 	struct acpi_px	*px_states;	/* ACPI perf states. */
79 	uint32_t	 px_count;	/* Total number of perf states. */
80 	uint32_t	 px_max_avail;	/* Lowest index state available. */
81 	int		 px_curr_state;	/* Active state index. */
82 	int		 px_rid;
83 	int		 info_only;	/* Can we set new states? */
84 };
85 
86 #define PX_GET_REG(reg) 				\
87 	(bus_space_read_4(rman_get_bustag((reg)), 	\
88 	    rman_get_bushandle((reg)), 0))
89 #define PX_SET_REG(reg, val)				\
90 	(bus_space_write_4(rman_get_bustag((reg)), 	\
91 	    rman_get_bushandle((reg)), 0, (val)))
92 
93 #define ACPI_NOTIFY_PERF_STATES		0x80	/* _PSS changed. */
94 
95 static void	acpi_perf_identify(driver_t *driver, device_t parent);
96 static int	acpi_perf_probe(device_t dev);
97 static int	acpi_perf_attach(device_t dev);
98 static int	acpi_perf_detach(device_t dev);
99 static int	acpi_perf_evaluate(device_t dev);
100 static int	acpi_px_to_set(device_t dev, struct acpi_px *px,
101 		    struct cf_setting *set);
102 static void	acpi_px_available(struct acpi_perf_softc *sc);
103 static void	acpi_px_startup(void *arg);
104 static void	acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context);
105 static int	acpi_px_settings(device_t dev, struct cf_setting *sets,
106 		    int *count);
107 static int	acpi_px_set(device_t dev, const struct cf_setting *set);
108 static int	acpi_px_get(device_t dev, struct cf_setting *set);
109 static int	acpi_px_type(device_t dev, int *type);
110 
111 static device_method_t acpi_perf_methods[] = {
112 	/* Device interface */
113 	DEVMETHOD(device_identify,	acpi_perf_identify),
114 	DEVMETHOD(device_probe,		acpi_perf_probe),
115 	DEVMETHOD(device_attach,	acpi_perf_attach),
116 	DEVMETHOD(device_detach,	acpi_perf_detach),
117 
118 	/* cpufreq interface */
119 	DEVMETHOD(cpufreq_drv_set,	acpi_px_set),
120 	DEVMETHOD(cpufreq_drv_get,	acpi_px_get),
121 	DEVMETHOD(cpufreq_drv_type,	acpi_px_type),
122 	DEVMETHOD(cpufreq_drv_settings,	acpi_px_settings),
123 
124 	DEVMETHOD_END
125 };
126 
127 static driver_t acpi_perf_driver = {
128 	"acpi_perf",
129 	acpi_perf_methods,
130 	sizeof(struct acpi_perf_softc),
131 };
132 
133 DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, 0, 0);
134 MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1);
135 
136 static MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states");
137 
138 static void
139 acpi_perf_identify(driver_t *driver, device_t parent)
140 {
141 	ACPI_HANDLE handle;
142 	device_t dev;
143 
144 	/* Make sure we're not being doubly invoked. */
145 	if (device_find_child(parent, "acpi_perf", -1) != NULL)
146 		return;
147 
148 	/* Get the handle for the Processor object and check for perf states. */
149 	handle = acpi_get_handle(parent);
150 	if (handle == NULL)
151 		return;
152 	if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PSS", NULL, NULL)))
153 		return;
154 
155 	/*
156 	 * Add a child to every CPU that has the right methods.  In future
157 	 * versions of the ACPI spec, CPUs can have different settings.
158 	 * We probe this child now so that other devices that depend
159 	 * on it (i.e., for info about supported states) will see it.
160 	 */
161 	if ((dev = BUS_ADD_CHILD(parent, 0, "acpi_perf",
162 	    device_get_unit(parent))) != NULL)
163 		device_probe_and_attach(dev);
164 	else
165 		device_printf(parent, "add acpi_perf child failed\n");
166 }
167 
168 static int
169 acpi_perf_probe(device_t dev)
170 {
171 	ACPI_HANDLE handle;
172 	ACPI_OBJECT *pkg;
173 	struct resource *res;
174 	ACPI_BUFFER buf;
175 	int error, rid, type;
176 
177 	if (resource_disabled("acpi_perf", 0))
178 		return (ENXIO);
179 
180 	/*
181 	 * Check the performance state registers.  If they are of type
182 	 * "functional fixed hardware", we attach quietly since we will
183 	 * only be providing information on settings to other drivers.
184 	 */
185 	error = ENXIO;
186 	handle = acpi_get_handle(dev);
187 	buf.Pointer = NULL;
188 	buf.Length = ACPI_ALLOCATE_BUFFER;
189 	if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PCT", NULL, &buf)))
190 		return (error);
191 	pkg = (ACPI_OBJECT *)buf.Pointer;
192 	if (ACPI_PKG_VALID(pkg, 2)) {
193 		rid = 0;
194 		error = acpi_PkgGas(dev, pkg, 0, &type, &rid, &res, 0);
195 		switch (error) {
196 		case 0:
197 			bus_release_resource(dev, type, rid, res);
198 			bus_delete_resource(dev, type, rid);
199 			device_set_desc(dev, "ACPI CPU Frequency Control");
200 			break;
201 		case EOPNOTSUPP:
202 			device_quiet(dev);
203 			error = 0;
204 			break;
205 		}
206 	}
207 	AcpiOsFree(buf.Pointer);
208 
209 	return (error);
210 }
211 
212 static int
213 acpi_perf_attach(device_t dev)
214 {
215 	struct acpi_perf_softc *sc;
216 
217 	sc = device_get_softc(dev);
218 	sc->dev = dev;
219 	sc->handle = acpi_get_handle(dev);
220 	sc->px_max_avail = 0;
221 	sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
222 	if (acpi_perf_evaluate(dev) != 0)
223 		return (ENXIO);
224 	AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_px_startup, NULL);
225 	if (!sc->info_only)
226 		cpufreq_register(dev);
227 
228 	return (0);
229 }
230 
231 static int
232 acpi_perf_detach(device_t dev)
233 {
234 	/* TODO: teardown registers, remove notify handler. */
235 	return (ENXIO);
236 }
237 
238 /* Probe and setup any valid performance states (Px). */
239 static int
240 acpi_perf_evaluate(device_t dev)
241 {
242 	struct acpi_perf_softc *sc;
243 	ACPI_BUFFER buf;
244 	ACPI_OBJECT *pkg, *res;
245 	ACPI_STATUS status;
246 	int count, error, i, j;
247 	static int once = 1;
248 	uint32_t *p;
249 
250 	/* Get the control values and parameters for each state. */
251 	error = ENXIO;
252 	sc = device_get_softc(dev);
253 	buf.Pointer = NULL;
254 	buf.Length = ACPI_ALLOCATE_BUFFER;
255 	status = AcpiEvaluateObject(sc->handle, "_PSS", NULL, &buf);
256 	if (ACPI_FAILURE(status))
257 		return (ENXIO);
258 
259 	pkg = (ACPI_OBJECT *)buf.Pointer;
260 	if (!ACPI_PKG_VALID(pkg, 1)) {
261 		device_printf(dev, "invalid top level _PSS package\n");
262 		goto out;
263 	}
264 	sc->px_count = pkg->Package.Count;
265 
266 	sc->px_states = malloc(sc->px_count * sizeof(struct acpi_px),
267 	    M_ACPIPERF, M_WAITOK | M_ZERO);
268 
269 	/*
270 	 * Each state is a package of {CoreFreq, Power, TransitionLatency,
271 	 * BusMasterLatency, ControlVal, StatusVal}, sorted from highest
272 	 * performance to lowest.
273 	 */
274 	count = 0;
275 	for (i = 0; i < sc->px_count; i++) {
276 		res = &pkg->Package.Elements[i];
277 		if (!ACPI_PKG_VALID(res, 6)) {
278 			if (once) {
279 				once = 0;
280 				device_printf(dev, "invalid _PSS package\n");
281 			}
282 			continue;
283 		}
284 
285 		/* Parse the rest of the package into the struct. */
286 		p = &sc->px_states[count].core_freq;
287 		for (j = 0; j < 6; j++, p++)
288 			acpi_PkgInt32(res, j, p);
289 
290 		/*
291 		 * Check for some impossible frequencies that some systems
292 		 * use to indicate they don't actually support this Px state.
293 		 */
294 		if (sc->px_states[count].core_freq == 0 ||
295 		    sc->px_states[count].core_freq == 9999 ||
296 		    sc->px_states[count].core_freq == 0x9999 ||
297 		    sc->px_states[count].core_freq >= 0xffff)
298 			continue;
299 
300 		/* Check for duplicate entries */
301 		if (count > 0 &&
302 		    sc->px_states[count - 1].core_freq ==
303 			sc->px_states[count].core_freq)
304 			continue;
305 
306 		count++;
307 	}
308 	sc->px_count = count;
309 
310 	/* No valid Px state found so give up. */
311 	if (count == 0)
312 		goto out;
313 	AcpiOsFree(buf.Pointer);
314 
315 	/* Get the control and status registers (one of each). */
316 	buf.Pointer = NULL;
317 	buf.Length = ACPI_ALLOCATE_BUFFER;
318 	status = AcpiEvaluateObject(sc->handle, "_PCT", NULL, &buf);
319 	if (ACPI_FAILURE(status))
320 		goto out;
321 
322 	/* Check the package of two registers, each a Buffer in GAS format. */
323 	pkg = (ACPI_OBJECT *)buf.Pointer;
324 	if (!ACPI_PKG_VALID(pkg, 2)) {
325 		device_printf(dev, "invalid perf register package\n");
326 		goto out;
327 	}
328 
329 	error = acpi_PkgGas(sc->dev, pkg, 0, &sc->perf_ctrl_type, &sc->px_rid,
330 	    &sc->perf_ctrl, 0);
331 	if (error) {
332 		/*
333 		 * If the register is of type FFixedHW, we can only return
334 		 * info, we can't get or set new settings.
335 		 */
336 		if (error == EOPNOTSUPP) {
337 			sc->info_only = TRUE;
338 			error = 0;
339 		} else
340 			device_printf(dev, "failed in PERF_CTL attach\n");
341 		goto out;
342 	}
343 	sc->px_rid++;
344 
345 	error = acpi_PkgGas(sc->dev, pkg, 1, &sc->perf_sts_type, &sc->px_rid,
346 	    &sc->perf_status, 0);
347 	if (error) {
348 		if (error == EOPNOTSUPP) {
349 			sc->info_only = TRUE;
350 			error = 0;
351 		} else
352 			device_printf(dev, "failed in PERF_STATUS attach\n");
353 		goto out;
354 	}
355 	sc->px_rid++;
356 
357 	/* Get our current limit and register for notifies. */
358 	acpi_px_available(sc);
359 	AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY,
360 	    acpi_px_notify, sc);
361 	error = 0;
362 
363 out:
364 	if (error) {
365 		if (sc->px_states) {
366 			free(sc->px_states, M_ACPIPERF);
367 			sc->px_states = NULL;
368 		}
369 		if (sc->perf_ctrl) {
370 			bus_release_resource(sc->dev, sc->perf_ctrl_type, 0,
371 			    sc->perf_ctrl);
372 			bus_delete_resource(sc->dev, sc->perf_ctrl_type, 0);
373 			sc->perf_ctrl = NULL;
374 		}
375 		if (sc->perf_status) {
376 			bus_release_resource(sc->dev, sc->perf_sts_type, 1,
377 			    sc->perf_status);
378 			bus_delete_resource(sc->dev, sc->perf_sts_type, 1);
379 			sc->perf_status = NULL;
380 		}
381 		sc->px_rid = 0;
382 		sc->px_count = 0;
383 	}
384 	if (buf.Pointer)
385 		AcpiOsFree(buf.Pointer);
386 	return (error);
387 }
388 
389 static void
390 acpi_px_startup(void *arg)
391 {
392 
393 	/* Signal to the platform that we are taking over CPU control. */
394 	if (AcpiGbl_FADT.PstateControl == 0)
395 		return;
396 	ACPI_LOCK(acpi);
397 	AcpiOsWritePort(AcpiGbl_FADT.SmiCommand, AcpiGbl_FADT.PstateControl, 8);
398 	ACPI_UNLOCK(acpi);
399 }
400 
401 static void
402 acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context)
403 {
404 	struct acpi_perf_softc *sc;
405 
406 	sc = context;
407 	if (notify != ACPI_NOTIFY_PERF_STATES)
408 		return;
409 
410 	acpi_px_available(sc);
411 
412 	/* TODO: Implement notification when frequency changes. */
413 }
414 
415 /*
416  * Find the highest currently-supported performance state.
417  * This can be called at runtime (e.g., due to a docking event) at
418  * the request of a Notify on the processor object.
419  */
420 static void
421 acpi_px_available(struct acpi_perf_softc *sc)
422 {
423 	ACPI_STATUS status;
424 	struct cf_setting set;
425 
426 	status = acpi_GetInteger(sc->handle, "_PPC", &sc->px_max_avail);
427 
428 	/* If the old state is too high, set current state to the new max. */
429 	if (ACPI_SUCCESS(status)) {
430 		if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN &&
431 		    sc->px_curr_state > sc->px_max_avail) {
432 			acpi_px_to_set(sc->dev,
433 			    &sc->px_states[sc->px_max_avail], &set);
434 			acpi_px_set(sc->dev, &set);
435 		}
436 	} else
437 		sc->px_max_avail = 0;
438 }
439 
440 static int
441 acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set)
442 {
443 
444 	if (px == NULL || set == NULL)
445 		return (EINVAL);
446 
447 	set->freq = px->core_freq;
448 	set->power = px->power;
449 	/* XXX Include BM latency too? */
450 	set->lat = px->trans_lat;
451 	set->volts = CPUFREQ_VAL_UNKNOWN;
452 	set->dev = dev;
453 	set->spec[PX_SPEC_CONTROL] = px->ctrl_val;
454 	set->spec[PX_SPEC_STATUS] = px->sts_val;
455 
456 	return (0);
457 }
458 
459 static int
460 acpi_px_settings(device_t dev, struct cf_setting *sets, int *count)
461 {
462 	struct acpi_perf_softc *sc;
463 	int x, y;
464 
465 	sc = device_get_softc(dev);
466 	if (sets == NULL || count == NULL)
467 		return (EINVAL);
468 	if (*count < sc->px_count - sc->px_max_avail)
469 		return (E2BIG);
470 
471 	/* Return a list of settings that are currently valid. */
472 	y = 0;
473 	for (x = sc->px_max_avail; x < sc->px_count; x++, y++)
474 		acpi_px_to_set(dev, &sc->px_states[x], &sets[y]);
475 	*count = sc->px_count - sc->px_max_avail;
476 
477 	return (0);
478 }
479 
480 static int
481 acpi_px_set(device_t dev, const struct cf_setting *set)
482 {
483 	struct acpi_perf_softc *sc;
484 	int i, status, sts_val, tries;
485 
486 	if (set == NULL)
487 		return (EINVAL);
488 	sc = device_get_softc(dev);
489 
490 	/* If we can't set new states, return immediately. */
491 	if (sc->info_only)
492 		return (ENXIO);
493 
494 	/* Look up appropriate state, based on frequency. */
495 	for (i = sc->px_max_avail; i < sc->px_count; i++) {
496 		if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq))
497 			break;
498 	}
499 	if (i == sc->px_count)
500 		return (EINVAL);
501 
502 	/* Write the appropriate value to the register. */
503 	PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val);
504 
505 	/*
506 	 * Try for up to 10 ms to verify the desired state was selected.
507 	 * This is longer than the standard says (1 ms) but in some modes,
508 	 * systems may take longer to respond.
509 	 */
510 	sts_val = sc->px_states[i].sts_val;
511 	for (tries = 0; tries < 1000; tries++) {
512 		status = PX_GET_REG(sc->perf_status);
513 
514 		/*
515 		 * If we match the status or the desired status is 8 bits
516 		 * and matches the relevant bits, assume we succeeded.  It
517 		 * appears some systems (IBM R32) expect byte-wide access
518 		 * even though the standard says the register is 32-bit.
519 		 */
520 		if (status == sts_val ||
521 		    ((sts_val & ~0xff) == 0 && (status & 0xff) == sts_val))
522 			break;
523 		DELAY(10);
524 	}
525 	if (tries == 1000) {
526 		device_printf(dev, "Px transition to %d failed\n",
527 		    sc->px_states[i].core_freq);
528 		return (ENXIO);
529 	}
530 	sc->px_curr_state = i;
531 
532 	return (0);
533 }
534 
535 static int
536 acpi_px_get(device_t dev, struct cf_setting *set)
537 {
538 	struct acpi_perf_softc *sc;
539 	uint64_t rate;
540 	int i;
541 	struct pcpu *pc;
542 
543 	if (set == NULL)
544 		return (EINVAL);
545 	sc = device_get_softc(dev);
546 
547 	/* If we can't get new states, return immediately. */
548 	if (sc->info_only)
549 		return (ENXIO);
550 
551 	/* If we've set the rate before, use the cached value. */
552 	if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) {
553 		acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set);
554 		return (0);
555 	}
556 
557 	/* Otherwise, estimate and try to match against our settings. */
558 	pc = cpu_get_pcpu(dev);
559 	if (pc == NULL)
560 		return (ENXIO);
561 	cpu_est_clockrate(pc->pc_cpuid, &rate);
562 	rate /= 1000000;
563 	for (i = 0; i < sc->px_count; i++) {
564 		if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) {
565 			sc->px_curr_state = i;
566 			acpi_px_to_set(dev, &sc->px_states[i], set);
567 			break;
568 		}
569 	}
570 
571 	/* No match, give up. */
572 	if (i == sc->px_count) {
573 		sc->px_curr_state = CPUFREQ_VAL_UNKNOWN;
574 		set->freq = CPUFREQ_VAL_UNKNOWN;
575 	}
576 
577 	return (0);
578 }
579 
580 static int
581 acpi_px_type(device_t dev, int *type)
582 {
583 	struct acpi_perf_softc *sc;
584 
585 	if (type == NULL)
586 		return (EINVAL);
587 	sc = device_get_softc(dev);
588 
589 	*type = CPUFREQ_TYPE_ABSOLUTE;
590 	if (sc->info_only)
591 		*type |= CPUFREQ_FLAG_INFO_ONLY;
592 	return (0);
593 }
594