xref: /freebsd/sys/kern/kern_cpu.c (revision e28a4053)
1 /*-
2  * Copyright (c) 2004-2007 Nate Lawson (SDG)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/cpu.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/proc.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 #include <sys/sched.h>
42 #include <sys/smp.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
45 #include <sys/sx.h>
46 #include <sys/timetc.h>
47 #include <sys/taskqueue.h>
48 
49 #include "cpufreq_if.h"
50 
51 /*
52  * Common CPU frequency glue code.  Drivers for specific hardware can
53  * attach this interface to allow users to get/set the CPU frequency.
54  */
55 
56 /*
57  * Number of levels we can handle.  Levels are synthesized from settings
58  * so for M settings and N drivers, there may be M*N levels.
59  */
60 #define CF_MAX_LEVELS	64
61 
62 struct cf_saved_freq {
63 	struct cf_level			level;
64 	int				priority;
65 	SLIST_ENTRY(cf_saved_freq)	link;
66 };
67 
68 struct cpufreq_softc {
69 	struct sx			lock;
70 	struct cf_level			curr_level;
71 	int				curr_priority;
72 	SLIST_HEAD(, cf_saved_freq)	saved_freq;
73 	struct cf_level_lst		all_levels;
74 	int				all_count;
75 	int				max_mhz;
76 	device_t			dev;
77 	struct sysctl_ctx_list		sysctl_ctx;
78 	struct task			startup_task;
79 	struct cf_level			*levels_buf;
80 };
81 
82 struct cf_setting_array {
83 	struct cf_setting		sets[MAX_SETTINGS];
84 	int				count;
85 	TAILQ_ENTRY(cf_setting_array)	link;
86 };
87 
88 TAILQ_HEAD(cf_setting_lst, cf_setting_array);
89 
90 #define CF_MTX_INIT(x)		sx_init((x), "cpufreq lock")
91 #define CF_MTX_LOCK(x)		sx_xlock((x))
92 #define CF_MTX_UNLOCK(x)	sx_xunlock((x))
93 #define CF_MTX_ASSERT(x)	sx_assert((x), SX_XLOCKED)
94 
95 #define CF_DEBUG(msg...)	do {		\
96 	if (cf_verbose)				\
97 		printf("cpufreq: " msg);	\
98 	} while (0)
99 
100 static int	cpufreq_attach(device_t dev);
101 static void	cpufreq_startup_task(void *ctx, int pending);
102 static int	cpufreq_detach(device_t dev);
103 static int	cf_set_method(device_t dev, const struct cf_level *level,
104 		    int priority);
105 static int	cf_get_method(device_t dev, struct cf_level *level);
106 static int	cf_levels_method(device_t dev, struct cf_level *levels,
107 		    int *count);
108 static int	cpufreq_insert_abs(struct cpufreq_softc *sc,
109 		    struct cf_setting *sets, int count);
110 static int	cpufreq_expand_set(struct cpufreq_softc *sc,
111 		    struct cf_setting_array *set_arr);
112 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
113 		    struct cf_level *dup, struct cf_setting *set);
114 static int	cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
115 static int	cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
116 static int	cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
117 
118 static device_method_t cpufreq_methods[] = {
119 	DEVMETHOD(device_probe,		bus_generic_probe),
120 	DEVMETHOD(device_attach,	cpufreq_attach),
121 	DEVMETHOD(device_detach,	cpufreq_detach),
122 
123         DEVMETHOD(cpufreq_set,		cf_set_method),
124         DEVMETHOD(cpufreq_get,		cf_get_method),
125         DEVMETHOD(cpufreq_levels,	cf_levels_method),
126 	{0, 0}
127 };
128 static driver_t cpufreq_driver = {
129 	"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
130 };
131 static devclass_t cpufreq_dc;
132 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0);
133 
134 static int		cf_lowest_freq;
135 static int		cf_verbose;
136 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq);
137 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose);
138 SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, "cpufreq debugging");
139 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1,
140     "Don't provide levels below this frequency.");
141 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1,
142     "Print verbose debugging messages");
143 
144 static int
145 cpufreq_attach(device_t dev)
146 {
147 	struct cpufreq_softc *sc;
148 	struct pcpu *pc;
149 	device_t parent;
150 	uint64_t rate;
151 	int numdevs;
152 
153 	CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
154 	sc = device_get_softc(dev);
155 	parent = device_get_parent(dev);
156 	sc->dev = dev;
157 	sysctl_ctx_init(&sc->sysctl_ctx);
158 	TAILQ_INIT(&sc->all_levels);
159 	CF_MTX_INIT(&sc->lock);
160 	sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
161 	SLIST_INIT(&sc->saved_freq);
162 	/* Try to get nominal CPU freq to use it as maximum later if needed */
163 	sc->max_mhz = cpu_get_nominal_mhz(dev);
164 	/* If that fails, try to measure the current rate */
165 	if (sc->max_mhz <= 0) {
166 		pc = cpu_get_pcpu(dev);
167 		if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
168 			sc->max_mhz = rate / 1000000;
169 		else
170 			sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
171 	}
172 
173 	/*
174 	 * Only initialize one set of sysctls for all CPUs.  In the future,
175 	 * if multiple CPUs can have different settings, we can move these
176 	 * sysctls to be under every CPU instead of just the first one.
177 	 */
178 	numdevs = devclass_get_count(cpufreq_dc);
179 	if (numdevs > 1)
180 		return (0);
181 
182 	CF_DEBUG("initializing one-time data for %s\n",
183 	    device_get_nameunit(dev));
184 	sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
185 	    M_DEVBUF, M_WAITOK);
186 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
187 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
188 	    OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
189 	    cpufreq_curr_sysctl, "I", "Current CPU frequency");
190 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
191 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
192 	    OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
193 	    cpufreq_levels_sysctl, "A", "CPU frequency levels");
194 
195 	/*
196 	 * Queue a one-shot broadcast that levels have changed.
197 	 * It will run once the system has completed booting.
198 	 */
199 	TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
200 	taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
201 
202 	return (0);
203 }
204 
205 /* Handle any work to be done for all drivers that attached during boot. */
206 static void
207 cpufreq_startup_task(void *ctx, int pending)
208 {
209 
210 	cpufreq_settings_changed((device_t)ctx);
211 }
212 
213 static int
214 cpufreq_detach(device_t dev)
215 {
216 	struct cpufreq_softc *sc;
217 	struct cf_saved_freq *saved_freq;
218 	int numdevs;
219 
220 	CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
221 	sc = device_get_softc(dev);
222 	sysctl_ctx_free(&sc->sysctl_ctx);
223 
224 	while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
225 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
226 		free(saved_freq, M_TEMP);
227 	}
228 
229 	/* Only clean up these resources when the last device is detaching. */
230 	numdevs = devclass_get_count(cpufreq_dc);
231 	if (numdevs == 1) {
232 		CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
233 		free(sc->levels_buf, M_DEVBUF);
234 	}
235 
236 	return (0);
237 }
238 
239 static int
240 cf_set_method(device_t dev, const struct cf_level *level, int priority)
241 {
242 	struct cpufreq_softc *sc;
243 	const struct cf_setting *set;
244 	struct cf_saved_freq *saved_freq, *curr_freq;
245 	struct pcpu *pc;
246 	int error, i;
247 
248 	sc = device_get_softc(dev);
249 	error = 0;
250 	set = NULL;
251 	saved_freq = NULL;
252 
253 	/* We are going to change levels so notify the pre-change handler. */
254 	EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
255 	if (error != 0) {
256 		EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
257 		return (error);
258 	}
259 
260 	CF_MTX_LOCK(&sc->lock);
261 
262 #ifdef SMP
263 	/*
264 	 * If still booting and secondary CPUs not started yet, don't allow
265 	 * changing the frequency until they're online.  This is because we
266 	 * can't switch to them using sched_bind() and thus we'd only be
267 	 * switching the main CPU.  XXXTODO: Need to think more about how to
268 	 * handle having different CPUs at different frequencies.
269 	 */
270 	if (mp_ncpus > 1 && !smp_active) {
271 		device_printf(dev, "rejecting change, SMP not started yet\n");
272 		error = ENXIO;
273 		goto out;
274 	}
275 #endif /* SMP */
276 
277 	/*
278 	 * If the requested level has a lower priority, don't allow
279 	 * the new level right now.
280 	 */
281 	if (priority < sc->curr_priority) {
282 		CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
283 		    sc->curr_priority);
284 		error = EPERM;
285 		goto out;
286 	}
287 
288 	/*
289 	 * If the caller didn't specify a level and one is saved, prepare to
290 	 * restore the saved level.  If none has been saved, return an error.
291 	 */
292 	if (level == NULL) {
293 		saved_freq = SLIST_FIRST(&sc->saved_freq);
294 		if (saved_freq == NULL) {
295 			CF_DEBUG("NULL level, no saved level\n");
296 			error = ENXIO;
297 			goto out;
298 		}
299 		level = &saved_freq->level;
300 		priority = saved_freq->priority;
301 		CF_DEBUG("restoring saved level, freq %d prio %d\n",
302 		    level->total_set.freq, priority);
303 	}
304 
305 	/* Reject levels that are below our specified threshold. */
306 	if (level->total_set.freq < cf_lowest_freq) {
307 		CF_DEBUG("rejecting freq %d, less than %d limit\n",
308 		    level->total_set.freq, cf_lowest_freq);
309 		error = EINVAL;
310 		goto out;
311 	}
312 
313 	/* If already at this level, just return. */
314 	if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) {
315 		CF_DEBUG("skipping freq %d, same as current level %d\n",
316 		    level->total_set.freq, sc->curr_level.total_set.freq);
317 		goto skip;
318 	}
319 
320 	/* First, set the absolute frequency via its driver. */
321 	set = &level->abs_set;
322 	if (set->dev) {
323 		if (!device_is_attached(set->dev)) {
324 			error = ENXIO;
325 			goto out;
326 		}
327 
328 		/* Bind to the target CPU before switching. */
329 		pc = cpu_get_pcpu(set->dev);
330 		thread_lock(curthread);
331 		sched_bind(curthread, pc->pc_cpuid);
332 		thread_unlock(curthread);
333 		CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
334 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
335 		error = CPUFREQ_DRV_SET(set->dev, set);
336 		thread_lock(curthread);
337 		sched_unbind(curthread);
338 		thread_unlock(curthread);
339 		if (error) {
340 			goto out;
341 		}
342 	}
343 
344 	/* Next, set any/all relative frequencies via their drivers. */
345 	for (i = 0; i < level->rel_count; i++) {
346 		set = &level->rel_set[i];
347 		if (!device_is_attached(set->dev)) {
348 			error = ENXIO;
349 			goto out;
350 		}
351 
352 		/* Bind to the target CPU before switching. */
353 		pc = cpu_get_pcpu(set->dev);
354 		thread_lock(curthread);
355 		sched_bind(curthread, pc->pc_cpuid);
356 		thread_unlock(curthread);
357 		CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
358 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
359 		error = CPUFREQ_DRV_SET(set->dev, set);
360 		thread_lock(curthread);
361 		sched_unbind(curthread);
362 		thread_unlock(curthread);
363 		if (error) {
364 			/* XXX Back out any successful setting? */
365 			goto out;
366 		}
367 	}
368 
369 skip:
370 	/*
371 	 * Before recording the current level, check if we're going to a
372 	 * higher priority.  If so, save the previous level and priority.
373 	 */
374 	if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
375 	    priority > sc->curr_priority) {
376 		CF_DEBUG("saving level, freq %d prio %d\n",
377 		    sc->curr_level.total_set.freq, sc->curr_priority);
378 		curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
379 		if (curr_freq == NULL) {
380 			error = ENOMEM;
381 			goto out;
382 		}
383 		curr_freq->level = sc->curr_level;
384 		curr_freq->priority = sc->curr_priority;
385 		SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
386 	}
387 	sc->curr_level = *level;
388 	sc->curr_priority = priority;
389 
390 	/* If we were restoring a saved state, reset it to "unused". */
391 	if (saved_freq != NULL) {
392 		CF_DEBUG("resetting saved level\n");
393 		sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
394 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
395 		free(saved_freq, M_TEMP);
396 	}
397 
398 out:
399 	CF_MTX_UNLOCK(&sc->lock);
400 
401 	/*
402 	 * We changed levels (or attempted to) so notify the post-change
403 	 * handler of new frequency or error.
404 	 */
405 	EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
406 	if (error && set)
407 		device_printf(set->dev, "set freq failed, err %d\n", error);
408 
409 	return (error);
410 }
411 
412 static int
413 cf_get_method(device_t dev, struct cf_level *level)
414 {
415 	struct cpufreq_softc *sc;
416 	struct cf_level *levels;
417 	struct cf_setting *curr_set, set;
418 	struct pcpu *pc;
419 	device_t *devs;
420 	int count, error, i, n, numdevs;
421 	uint64_t rate;
422 
423 	sc = device_get_softc(dev);
424 	error = 0;
425 	levels = NULL;
426 
427 	/* If we already know the current frequency, we're done. */
428 	CF_MTX_LOCK(&sc->lock);
429 	curr_set = &sc->curr_level.total_set;
430 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
431 		CF_DEBUG("get returning known freq %d\n", curr_set->freq);
432 		goto out;
433 	}
434 	CF_MTX_UNLOCK(&sc->lock);
435 
436 	/*
437 	 * We need to figure out the current level.  Loop through every
438 	 * driver, getting the current setting.  Then, attempt to get a best
439 	 * match of settings against each level.
440 	 */
441 	count = CF_MAX_LEVELS;
442 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
443 	if (levels == NULL)
444 		return (ENOMEM);
445 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
446 	if (error) {
447 		if (error == E2BIG)
448 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
449 		free(levels, M_TEMP);
450 		return (error);
451 	}
452 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
453 	if (error) {
454 		free(levels, M_TEMP);
455 		return (error);
456 	}
457 
458 	/*
459 	 * Reacquire the lock and search for the given level.
460 	 *
461 	 * XXX Note: this is not quite right since we really need to go
462 	 * through each level and compare both absolute and relative
463 	 * settings for each driver in the system before making a match.
464 	 * The estimation code below catches this case though.
465 	 */
466 	CF_MTX_LOCK(&sc->lock);
467 	for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
468 		if (!device_is_attached(devs[n]))
469 			continue;
470 		if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
471 			continue;
472 		for (i = 0; i < count; i++) {
473 			if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) {
474 				sc->curr_level = levels[i];
475 				break;
476 			}
477 		}
478 	}
479 	free(devs, M_TEMP);
480 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
481 		CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
482 		goto out;
483 	}
484 
485 	/*
486 	 * We couldn't find an exact match, so attempt to estimate and then
487 	 * match against a level.
488 	 */
489 	pc = cpu_get_pcpu(dev);
490 	if (pc == NULL) {
491 		error = ENXIO;
492 		goto out;
493 	}
494 	cpu_est_clockrate(pc->pc_cpuid, &rate);
495 	rate /= 1000000;
496 	for (i = 0; i < count; i++) {
497 		if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) {
498 			sc->curr_level = levels[i];
499 			CF_DEBUG("get estimated freq %d\n", curr_set->freq);
500 			goto out;
501 		}
502 	}
503 	error = ENXIO;
504 
505 out:
506 	if (error == 0)
507 		*level = sc->curr_level;
508 
509 	CF_MTX_UNLOCK(&sc->lock);
510 	if (levels)
511 		free(levels, M_TEMP);
512 	return (error);
513 }
514 
515 static int
516 cf_levels_method(device_t dev, struct cf_level *levels, int *count)
517 {
518 	struct cf_setting_array *set_arr;
519 	struct cf_setting_lst rel_sets;
520 	struct cpufreq_softc *sc;
521 	struct cf_level *lev;
522 	struct cf_setting *sets;
523 	struct pcpu *pc;
524 	device_t *devs;
525 	int error, i, numdevs, set_count, type;
526 	uint64_t rate;
527 
528 	if (levels == NULL || count == NULL)
529 		return (EINVAL);
530 
531 	TAILQ_INIT(&rel_sets);
532 	sc = device_get_softc(dev);
533 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
534 	if (error)
535 		return (error);
536 	sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
537 	if (sets == NULL) {
538 		free(devs, M_TEMP);
539 		return (ENOMEM);
540 	}
541 
542 	/* Get settings from all cpufreq drivers. */
543 	CF_MTX_LOCK(&sc->lock);
544 	for (i = 0; i < numdevs; i++) {
545 		/* Skip devices that aren't ready. */
546 		if (!device_is_attached(devs[i]))
547 			continue;
548 
549 		/*
550 		 * Get settings, skipping drivers that offer no settings or
551 		 * provide settings for informational purposes only.
552 		 */
553 		error = CPUFREQ_DRV_TYPE(devs[i], &type);
554 		if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
555 			if (error == 0) {
556 				CF_DEBUG("skipping info-only driver %s\n",
557 				    device_get_nameunit(devs[i]));
558 			}
559 			continue;
560 		}
561 		set_count = MAX_SETTINGS;
562 		error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
563 		if (error || set_count == 0)
564 			continue;
565 
566 		/* Add the settings to our absolute/relative lists. */
567 		switch (type & CPUFREQ_TYPE_MASK) {
568 		case CPUFREQ_TYPE_ABSOLUTE:
569 			error = cpufreq_insert_abs(sc, sets, set_count);
570 			break;
571 		case CPUFREQ_TYPE_RELATIVE:
572 			CF_DEBUG("adding %d relative settings\n", set_count);
573 			set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
574 			if (set_arr == NULL) {
575 				error = ENOMEM;
576 				goto out;
577 			}
578 			bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
579 			set_arr->count = set_count;
580 			TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
581 			break;
582 		default:
583 			error = EINVAL;
584 		}
585 		if (error)
586 			goto out;
587 	}
588 
589 	/*
590 	 * If there are no absolute levels, create a fake one at 100%.  We
591 	 * then cache the clockrate for later use as our base frequency.
592 	 */
593 	if (TAILQ_EMPTY(&sc->all_levels)) {
594 		if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
595 			sc->max_mhz = cpu_get_nominal_mhz(dev);
596 			/*
597 			 * If the CPU can't report a rate for 100%, hope
598 			 * the CPU is running at its nominal rate right now,
599 			 * and use that instead.
600 			 */
601 			if (sc->max_mhz <= 0) {
602 				pc = cpu_get_pcpu(dev);
603 				cpu_est_clockrate(pc->pc_cpuid, &rate);
604 				sc->max_mhz = rate / 1000000;
605 			}
606 		}
607 		memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
608 		sets[0].freq = sc->max_mhz;
609 		sets[0].dev = NULL;
610 		error = cpufreq_insert_abs(sc, sets, 1);
611 		if (error)
612 			goto out;
613 	}
614 
615 	/* Create a combined list of absolute + relative levels. */
616 	TAILQ_FOREACH(set_arr, &rel_sets, link)
617 		cpufreq_expand_set(sc, set_arr);
618 
619 	/* If the caller doesn't have enough space, return the actual count. */
620 	if (sc->all_count > *count) {
621 		*count = sc->all_count;
622 		error = E2BIG;
623 		goto out;
624 	}
625 
626 	/* Finally, output the list of levels. */
627 	i = 0;
628 	TAILQ_FOREACH(lev, &sc->all_levels, link) {
629 		/*
630 		 * Skip levels that are too close in frequency to the
631 		 * previous levels.  Some systems report bogus duplicate
632 		 * settings (i.e., for acpi_perf).
633 		 */
634 		if (i > 0 && CPUFREQ_CMP(lev->total_set.freq,
635 		    levels[i - 1].total_set.freq)) {
636 			sc->all_count--;
637 			continue;
638 		}
639 
640 		/* Skip levels that have a frequency that is too low. */
641 		if (lev->total_set.freq < cf_lowest_freq) {
642 			sc->all_count--;
643 			continue;
644 		}
645 
646 		levels[i] = *lev;
647 		i++;
648 	}
649 	*count = sc->all_count;
650 	error = 0;
651 
652 out:
653 	/* Clear all levels since we regenerate them each time. */
654 	while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
655 		TAILQ_REMOVE(&sc->all_levels, lev, link);
656 		free(lev, M_TEMP);
657 	}
658 	sc->all_count = 0;
659 
660 	CF_MTX_UNLOCK(&sc->lock);
661 	while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
662 		TAILQ_REMOVE(&rel_sets, set_arr, link);
663 		free(set_arr, M_TEMP);
664 	}
665 	free(devs, M_TEMP);
666 	free(sets, M_TEMP);
667 	return (error);
668 }
669 
670 /*
671  * Create levels for an array of absolute settings and insert them in
672  * sorted order in the specified list.
673  */
674 static int
675 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
676     int count)
677 {
678 	struct cf_level_lst *list;
679 	struct cf_level *level, *search;
680 	int i;
681 
682 	CF_MTX_ASSERT(&sc->lock);
683 
684 	list = &sc->all_levels;
685 	for (i = 0; i < count; i++) {
686 		level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
687 		if (level == NULL)
688 			return (ENOMEM);
689 		level->abs_set = sets[i];
690 		level->total_set = sets[i];
691 		level->total_set.dev = NULL;
692 		sc->all_count++;
693 
694 		if (TAILQ_EMPTY(list)) {
695 			CF_DEBUG("adding abs setting %d at head\n",
696 			    sets[i].freq);
697 			TAILQ_INSERT_HEAD(list, level, link);
698 			continue;
699 		}
700 
701 		TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) {
702 			if (sets[i].freq <= search->total_set.freq) {
703 				CF_DEBUG("adding abs setting %d after %d\n",
704 				    sets[i].freq, search->total_set.freq);
705 				TAILQ_INSERT_AFTER(list, search, level, link);
706 				break;
707 			}
708 		}
709 	}
710 	return (0);
711 }
712 
713 /*
714  * Expand a group of relative settings, creating derived levels from them.
715  */
716 static int
717 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
718 {
719 	struct cf_level *fill, *search;
720 	struct cf_setting *set;
721 	int i;
722 
723 	CF_MTX_ASSERT(&sc->lock);
724 
725 	/*
726 	 * Walk the set of all existing levels in reverse.  This is so we
727 	 * create derived states from the lowest absolute settings first
728 	 * and discard duplicates created from higher absolute settings.
729 	 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
730 	 * preferable to 200 Mhz + 25% because absolute settings are more
731 	 * efficient since they often change the voltage as well.
732 	 */
733 	TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
734 		/* Add each setting to the level, duplicating if necessary. */
735 		for (i = 0; i < set_arr->count; i++) {
736 			set = &set_arr->sets[i];
737 
738 			/*
739 			 * If this setting is less than 100%, split the level
740 			 * into two and add this setting to the new level.
741 			 */
742 			fill = search;
743 			if (set->freq < 10000) {
744 				fill = cpufreq_dup_set(sc, search, set);
745 
746 				/*
747 				 * The new level was a duplicate of an existing
748 				 * level or its absolute setting is too high
749 				 * so we freed it.  For example, we discard a
750 				 * derived level of 1000 MHz/25% if a level
751 				 * of 500 MHz/100% already exists.
752 				 */
753 				if (fill == NULL)
754 					break;
755 			}
756 
757 			/* Add this setting to the existing or new level. */
758 			KASSERT(fill->rel_count < MAX_SETTINGS,
759 			    ("cpufreq: too many relative drivers (%d)",
760 			    MAX_SETTINGS));
761 			fill->rel_set[fill->rel_count] = *set;
762 			fill->rel_count++;
763 			CF_DEBUG(
764 			"expand set added rel setting %d%% to %d level\n",
765 			    set->freq / 100, fill->total_set.freq);
766 		}
767 	}
768 
769 	return (0);
770 }
771 
772 static struct cf_level *
773 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
774     struct cf_setting *set)
775 {
776 	struct cf_level_lst *list;
777 	struct cf_level *fill, *itr;
778 	struct cf_setting *fill_set, *itr_set;
779 	int i;
780 
781 	CF_MTX_ASSERT(&sc->lock);
782 
783 	/*
784 	 * Create a new level, copy it from the old one, and update the
785 	 * total frequency and power by the percentage specified in the
786 	 * relative setting.
787 	 */
788 	fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
789 	if (fill == NULL)
790 		return (NULL);
791 	*fill = *dup;
792 	fill_set = &fill->total_set;
793 	fill_set->freq =
794 	    ((uint64_t)fill_set->freq * set->freq) / 10000;
795 	if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
796 		fill_set->power = ((uint64_t)fill_set->power * set->freq)
797 		    / 10000;
798 	}
799 	if (set->lat != CPUFREQ_VAL_UNKNOWN) {
800 		if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
801 			fill_set->lat += set->lat;
802 		else
803 			fill_set->lat = set->lat;
804 	}
805 	CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
806 
807 	/*
808 	 * If we copied an old level that we already modified (say, at 100%),
809 	 * we need to remove that setting before adding this one.  Since we
810 	 * process each setting array in order, we know any settings for this
811 	 * driver will be found at the end.
812 	 */
813 	for (i = fill->rel_count; i != 0; i--) {
814 		if (fill->rel_set[i - 1].dev != set->dev)
815 			break;
816 		CF_DEBUG("removed last relative driver: %s\n",
817 		    device_get_nameunit(set->dev));
818 		fill->rel_count--;
819 	}
820 
821 	/*
822 	 * Insert the new level in sorted order.  If it is a duplicate of an
823 	 * existing level (1) or has an absolute setting higher than the
824 	 * existing level (2), do not add it.  We can do this since any such
825 	 * level is guaranteed use less power.  For example (1), a level with
826 	 * one absolute setting of 800 Mhz uses less power than one composed
827 	 * of an absolute setting of 1600 Mhz and a relative setting at 50%.
828 	 * Also for example (2), a level of 800 Mhz/75% is preferable to
829 	 * 1600 Mhz/25% even though the latter has a lower total frequency.
830 	 */
831 	list = &sc->all_levels;
832 	KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
833 	TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
834 		itr_set = &itr->total_set;
835 		if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
836 			CF_DEBUG("dup set rejecting %d (dupe)\n",
837 			    fill_set->freq);
838 			itr = NULL;
839 			break;
840 		} else if (fill_set->freq < itr_set->freq) {
841 			if (fill->abs_set.freq <= itr->abs_set.freq) {
842 				CF_DEBUG(
843 			"dup done, inserting new level %d after %d\n",
844 				    fill_set->freq, itr_set->freq);
845 				TAILQ_INSERT_AFTER(list, itr, fill, link);
846 				sc->all_count++;
847 			} else {
848 				CF_DEBUG("dup set rejecting %d (abs too big)\n",
849 				    fill_set->freq);
850 				itr = NULL;
851 			}
852 			break;
853 		}
854 	}
855 
856 	/* We didn't find a good place for this new level so free it. */
857 	if (itr == NULL) {
858 		CF_DEBUG("dup set freeing new level %d (not optimal)\n",
859 		    fill_set->freq);
860 		free(fill, M_TEMP);
861 		fill = NULL;
862 	}
863 
864 	return (fill);
865 }
866 
867 static int
868 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
869 {
870 	struct cpufreq_softc *sc;
871 	struct cf_level *levels;
872 	int count, devcount, error, freq, i, n;
873 	device_t *devs;
874 
875 	devs = NULL;
876 	sc = oidp->oid_arg1;
877 	levels = sc->levels_buf;
878 
879 	error = CPUFREQ_GET(sc->dev, &levels[0]);
880 	if (error)
881 		goto out;
882 	freq = levels[0].total_set.freq;
883 	error = sysctl_handle_int(oidp, &freq, 0, req);
884 	if (error != 0 || req->newptr == NULL)
885 		goto out;
886 
887 	/*
888 	 * While we only call cpufreq_get() on one device (assuming all
889 	 * CPUs have equal levels), we call cpufreq_set() on all CPUs.
890 	 * This is needed for some MP systems.
891 	 */
892 	error = devclass_get_devices(cpufreq_dc, &devs, &devcount);
893 	if (error)
894 		goto out;
895 	for (n = 0; n < devcount; n++) {
896 		count = CF_MAX_LEVELS;
897 		error = CPUFREQ_LEVELS(devs[n], levels, &count);
898 		if (error) {
899 			if (error == E2BIG)
900 				printf(
901 			"cpufreq: need to increase CF_MAX_LEVELS\n");
902 			break;
903 		}
904 		for (i = 0; i < count; i++) {
905 			if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) {
906 				error = CPUFREQ_SET(devs[n], &levels[i],
907 				    CPUFREQ_PRIO_USER);
908 				break;
909 			}
910 		}
911 		if (i == count) {
912 			error = EINVAL;
913 			break;
914 		}
915 	}
916 
917 out:
918 	if (devs)
919 		free(devs, M_TEMP);
920 	return (error);
921 }
922 
923 static int
924 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
925 {
926 	struct cpufreq_softc *sc;
927 	struct cf_level *levels;
928 	struct cf_setting *set;
929 	struct sbuf sb;
930 	int count, error, i;
931 
932 	sc = oidp->oid_arg1;
933 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
934 
935 	/* Get settings from the device and generate the output string. */
936 	count = CF_MAX_LEVELS;
937 	levels = sc->levels_buf;
938 	if (levels == NULL) {
939 		sbuf_delete(&sb);
940 		return (ENOMEM);
941 	}
942 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
943 	if (error) {
944 		if (error == E2BIG)
945 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
946 		goto out;
947 	}
948 	if (count) {
949 		for (i = 0; i < count; i++) {
950 			set = &levels[i].total_set;
951 			sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
952 		}
953 	} else
954 		sbuf_cpy(&sb, "0");
955 	sbuf_trim(&sb);
956 	sbuf_finish(&sb);
957 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
958 
959 out:
960 	sbuf_delete(&sb);
961 	return (error);
962 }
963 
964 static int
965 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
966 {
967 	device_t dev;
968 	struct cf_setting *sets;
969 	struct sbuf sb;
970 	int error, i, set_count;
971 
972 	dev = oidp->oid_arg1;
973 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
974 
975 	/* Get settings from the device and generate the output string. */
976 	set_count = MAX_SETTINGS;
977 	sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
978 	if (sets == NULL) {
979 		sbuf_delete(&sb);
980 		return (ENOMEM);
981 	}
982 	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
983 	if (error)
984 		goto out;
985 	if (set_count) {
986 		for (i = 0; i < set_count; i++)
987 			sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
988 	} else
989 		sbuf_cpy(&sb, "0");
990 	sbuf_trim(&sb);
991 	sbuf_finish(&sb);
992 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
993 
994 out:
995 	free(sets, M_TEMP);
996 	sbuf_delete(&sb);
997 	return (error);
998 }
999 
1000 int
1001 cpufreq_register(device_t dev)
1002 {
1003 	struct cpufreq_softc *sc;
1004 	device_t cf_dev, cpu_dev;
1005 
1006 	/* Add a sysctl to get each driver's settings separately. */
1007 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1008 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1009 	    OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
1010 	    cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
1011 
1012 	/*
1013 	 * Add only one cpufreq device to each CPU.  Currently, all CPUs
1014 	 * must offer the same levels and be switched at the same time.
1015 	 */
1016 	cpu_dev = device_get_parent(dev);
1017 	if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
1018 		sc = device_get_softc(cf_dev);
1019 		sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1020 		return (0);
1021 	}
1022 
1023 	/* Add the child device and possibly sysctls. */
1024 	cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1);
1025 	if (cf_dev == NULL)
1026 		return (ENOMEM);
1027 	device_quiet(cf_dev);
1028 
1029 	return (device_probe_and_attach(cf_dev));
1030 }
1031 
1032 int
1033 cpufreq_unregister(device_t dev)
1034 {
1035 	device_t cf_dev, *devs;
1036 	int cfcount, devcount, error, i, type;
1037 
1038 	/*
1039 	 * If this is the last cpufreq child device, remove the control
1040 	 * device as well.  We identify cpufreq children by calling a method
1041 	 * they support.
1042 	 */
1043 	error = device_get_children(device_get_parent(dev), &devs, &devcount);
1044 	if (error)
1045 		return (error);
1046 	cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
1047 	if (cf_dev == NULL) {
1048 		device_printf(dev,
1049 	"warning: cpufreq_unregister called with no cpufreq device active\n");
1050 		return (0);
1051 	}
1052 	cfcount = 0;
1053 	for (i = 0; i < devcount; i++) {
1054 		if (!device_is_attached(devs[i]))
1055 			continue;
1056 		if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
1057 			cfcount++;
1058 	}
1059 	if (cfcount <= 1)
1060 		device_delete_child(device_get_parent(cf_dev), cf_dev);
1061 	free(devs, M_TEMP);
1062 
1063 	return (0);
1064 }
1065 
1066 int
1067 cpufreq_settings_changed(device_t dev)
1068 {
1069 
1070 	EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1071 	    device_get_unit(device_get_parent(dev)));
1072 	return (0);
1073 }
1074