1 /*-
2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/cpu.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/pcpu.h>
37 #include <sys/power.h>
38 #include <sys/proc.h>
39 #include <sys/sched.h>
40 #include <sys/sbuf.h>
41 #include <sys/smp.h>
42
43 #include <dev/pci/pcivar.h>
44 #include <machine/atomic.h>
45 #include <machine/bus.h>
46 #if defined(__amd64__) || defined(__i386__)
47 #include <machine/clock.h>
48 #include <machine/specialreg.h>
49 #include <machine/md_var.h>
50 #endif
51 #include <sys/rman.h>
52
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55
56 #include <dev/acpica/acpivar.h>
57
58 /*
59 * Support for ACPI Processor devices, including C[1-3] sleep states.
60 */
61
62 /* Hooks for the ACPI CA debugging infrastructure */
63 #define _COMPONENT ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
65
66 struct acpi_cx {
67 struct resource *p_lvlx; /* Register to read to enter state. */
68 uint32_t type; /* C1-3 (C4 and up treated as C3). */
69 uint32_t trans_lat; /* Transition latency (usec). */
70 uint32_t power; /* Power consumed (mW). */
71 int res_type; /* Resource type for p_lvlx. */
72 int res_rid; /* Resource ID for p_lvlx. */
73 bool do_mwait;
74 uint32_t mwait_hint;
75 bool mwait_hw_coord;
76 bool mwait_bm_avoidance;
77 };
78 #define MAX_CX_STATES 8
79
80 struct acpi_cpu_softc {
81 device_t cpu_dev;
82 ACPI_HANDLE cpu_handle;
83 struct pcpu *cpu_pcpu;
84 uint32_t cpu_acpi_id; /* ACPI processor id */
85 uint32_t cpu_p_blk; /* ACPI P_BLK location */
86 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
87 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
88 int cpu_cx_count; /* Number of valid Cx states. */
89 int cpu_prev_sleep;/* Last idle sleep duration. */
90 int cpu_features; /* Child driver supported features. */
91 /* Runtime state. */
92 int cpu_non_c2; /* Index of lowest non-C2 state. */
93 int cpu_non_c3; /* Index of lowest non-C3 state. */
94 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
95 /* Values for sysctl. */
96 struct sysctl_ctx_list cpu_sysctl_ctx;
97 struct sysctl_oid *cpu_sysctl_tree;
98 int cpu_cx_lowest;
99 int cpu_cx_lowest_lim;
100 int cpu_disable_idle; /* Disable entry to idle function */
101 char cpu_cx_supported[64];
102 };
103
104 struct acpi_cpu_device {
105 struct resource_list ad_rl;
106 };
107
108 #define CPU_GET_REG(reg, width) \
109 (bus_space_read_ ## width(rman_get_bustag((reg)), \
110 rman_get_bushandle((reg)), 0))
111 #define CPU_SET_REG(reg, width, val) \
112 (bus_space_write_ ## width(rman_get_bustag((reg)), \
113 rman_get_bushandle((reg)), 0, (val)))
114
115 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
116
117 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
118 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
119
120 #define PCI_VENDOR_INTEL 0x8086
121 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
122 #define PCI_REVISION_A_STEP 0
123 #define PCI_REVISION_B_STEP 1
124 #define PCI_REVISION_4E 2
125 #define PCI_REVISION_4M 3
126 #define PIIX4_DEVACTB_REG 0x58
127 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
128 #define PIIX4_BRLD_EN_IRQ (1<<1)
129 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
130 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
131 #define PIIX4_PCNTRL_BST_EN (1<<10)
132
133 #define CST_FFH_VENDOR_INTEL 1
134 #define CST_FFH_INTEL_CL_C1IO 1
135 #define CST_FFH_INTEL_CL_MWAIT 2
136 #define CST_FFH_MWAIT_HW_COORD 0x0001
137 #define CST_FFH_MWAIT_BM_AVOID 0x0002
138
139 #define CPUDEV_DEVICE_ID "ACPI0007"
140
141 /* Knob to disable acpi_cpu devices */
142 bool acpi_cpu_disabled = false;
143
144 /* Platform hardware resource information. */
145 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
146 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
147 static int cpu_quirks; /* Indicate any hardware bugs. */
148
149 /* Values for sysctl. */
150 static struct sysctl_ctx_list cpu_sysctl_ctx;
151 static struct sysctl_oid *cpu_sysctl_tree;
152 static int cpu_cx_generic;
153 static int cpu_cx_lowest_lim;
154 #if defined(__i386__) || defined(__amd64__)
155 static bool cppc_notify;
156 #endif
157
158 static struct acpi_cpu_softc **cpu_softc;
159 ACPI_SERIAL_DECL(cpu, "ACPI CPU");
160
161 static int acpi_cpu_probe(device_t dev);
162 static int acpi_cpu_attach(device_t dev);
163 static int acpi_cpu_suspend(device_t dev);
164 static int acpi_cpu_resume(device_t dev);
165 static int acpi_pcpu_get_id(device_t dev, uint32_t acpi_id,
166 u_int *cpu_id);
167 static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child);
168 static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name,
169 int unit);
170 static int acpi_cpu_read_ivar(device_t dev, device_t child, int index,
171 uintptr_t *result);
172 static int acpi_cpu_shutdown(device_t dev);
173 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
174 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
175 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
176 static void acpi_cpu_startup(void *arg);
177 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
178 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
179 #if defined(__i386__) || defined(__amd64__)
180 static void acpi_cpu_idle(sbintime_t sbt);
181 #endif
182 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
183 static void acpi_cpu_quirks(void);
184 static void acpi_cpu_quirks_piix4(void);
185 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
186 static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS);
187 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc);
188 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
189 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
190 #if defined(__i386__) || defined(__amd64__)
191 static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS);
192 #endif
193
194 static device_method_t acpi_cpu_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, acpi_cpu_probe),
197 DEVMETHOD(device_attach, acpi_cpu_attach),
198 DEVMETHOD(device_detach, bus_generic_detach),
199 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
200 DEVMETHOD(device_suspend, acpi_cpu_suspend),
201 DEVMETHOD(device_resume, acpi_cpu_resume),
202
203 /* Bus interface */
204 DEVMETHOD(bus_add_child, acpi_cpu_add_child),
205 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar),
206 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist),
207 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
208 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
209 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
210 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
211 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
212 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
213 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
214 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
215
216 DEVMETHOD_END
217 };
218
219 static driver_t acpi_cpu_driver = {
220 "cpu",
221 acpi_cpu_methods,
222 sizeof(struct acpi_cpu_softc),
223 };
224
225 DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, 0, 0);
226 MODULE_DEPEND(cpu, acpi, 1, 1, 1);
227
228 static int
acpi_cpu_probe(device_t dev)229 acpi_cpu_probe(device_t dev)
230 {
231 static char *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL };
232 int acpi_id, cpu_id;
233 ACPI_BUFFER buf;
234 ACPI_HANDLE handle;
235 ACPI_OBJECT *obj;
236 ACPI_STATUS status;
237 ACPI_OBJECT_TYPE type;
238
239 if (acpi_disabled("cpu") || acpi_cpu_disabled)
240 return (ENXIO);
241 type = acpi_get_type(dev);
242 if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE)
243 return (ENXIO);
244 if (type == ACPI_TYPE_DEVICE &&
245 ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids, NULL) >= 0)
246 return (ENXIO);
247
248 handle = acpi_get_handle(dev);
249 if (cpu_softc == NULL)
250 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
251 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO);
252
253 if (type == ACPI_TYPE_PROCESSOR) {
254 /* Get our Processor object. */
255 buf.Pointer = NULL;
256 buf.Length = ACPI_ALLOCATE_BUFFER;
257 status = AcpiEvaluateObject(handle, NULL, NULL, &buf);
258 if (ACPI_FAILURE(status)) {
259 device_printf(dev, "probe failed to get Processor obj - %s\n",
260 AcpiFormatException(status));
261 return (ENXIO);
262 }
263 obj = (ACPI_OBJECT *)buf.Pointer;
264 if (obj->Type != ACPI_TYPE_PROCESSOR) {
265 device_printf(dev, "Processor object has bad type %d\n",
266 obj->Type);
267 AcpiOsFree(obj);
268 return (ENXIO);
269 }
270
271 /*
272 * Find the processor associated with our unit. We could use the
273 * ProcId as a key, however, some boxes do not have the same values
274 * in their Processor object as the ProcId values in the MADT.
275 */
276 acpi_id = obj->Processor.ProcId;
277 AcpiOsFree(obj);
278 } else {
279 status = acpi_GetInteger(handle, "_UID", &acpi_id);
280 if (ACPI_FAILURE(status)) {
281 device_printf(dev, "Device object has bad value - %s\n",
282 AcpiFormatException(status));
283 return (ENXIO);
284 }
285 }
286 if (acpi_pcpu_get_id(dev, acpi_id, &cpu_id) != 0) {
287 if (bootverbose && (type != ACPI_TYPE_PROCESSOR || acpi_id != 255))
288 printf("ACPI: Processor %s (ACPI ID %u) ignored\n",
289 acpi_name(acpi_get_handle(dev)), acpi_id);
290 return (ENXIO);
291 }
292
293 if (device_set_unit(dev, cpu_id) != 0)
294 return (ENXIO);
295
296 device_set_desc(dev, "ACPI CPU");
297
298 if (!bootverbose && device_get_unit(dev) != 0) {
299 device_quiet(dev);
300 device_quiet_children(dev);
301 }
302
303 return (BUS_PROBE_DEFAULT);
304 }
305
306 static int
acpi_cpu_attach(device_t dev)307 acpi_cpu_attach(device_t dev)
308 {
309 ACPI_BUFFER buf;
310 ACPI_OBJECT arg, *obj;
311 ACPI_OBJECT_LIST arglist;
312 struct pcpu *pcpu_data;
313 struct acpi_cpu_softc *sc;
314 struct acpi_softc *acpi_sc;
315 ACPI_STATUS status;
316 u_int features;
317 int cpu_id, drv_count, i;
318 driver_t **drivers;
319 uint32_t cap_set[3];
320
321 /* UUID needed by _OSC evaluation */
322 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
323 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
324 0x58, 0x71, 0x39, 0x53 };
325
326 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
327
328 sc = device_get_softc(dev);
329 sc->cpu_dev = dev;
330 sc->cpu_handle = acpi_get_handle(dev);
331 cpu_id = device_get_unit(dev);
332 cpu_softc[cpu_id] = sc;
333 pcpu_data = pcpu_find(cpu_id);
334 pcpu_data->pc_device = dev;
335 sc->cpu_pcpu = pcpu_data;
336 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
337 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
338
339 if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
340 buf.Pointer = NULL;
341 buf.Length = ACPI_ALLOCATE_BUFFER;
342 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
343 if (ACPI_FAILURE(status)) {
344 device_printf(dev, "attach failed to get Processor obj - %s\n",
345 AcpiFormatException(status));
346 return (ENXIO);
347 }
348 obj = (ACPI_OBJECT *)buf.Pointer;
349 sc->cpu_p_blk = obj->Processor.PblkAddress;
350 sc->cpu_p_blk_len = obj->Processor.PblkLength;
351 sc->cpu_acpi_id = obj->Processor.ProcId;
352 AcpiOsFree(obj);
353 } else {
354 KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE,
355 ("Unexpected ACPI object"));
356 status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id);
357 if (ACPI_FAILURE(status)) {
358 device_printf(dev, "Device object has bad value - %s\n",
359 AcpiFormatException(status));
360 return (ENXIO);
361 }
362 sc->cpu_p_blk = 0;
363 sc->cpu_p_blk_len = 0;
364 }
365 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
366 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
367
368 /*
369 * If this is the first cpu we attach, create and initialize the generic
370 * resources that will be used by all acpi cpu devices.
371 */
372 if (device_get_unit(dev) == 0) {
373 /* Assume we won't be using generic Cx mode by default */
374 cpu_cx_generic = FALSE;
375
376 /* Install hw.acpi.cpu sysctl tree */
377 acpi_sc = acpi_device_get_parent_softc(dev);
378 sysctl_ctx_init(&cpu_sysctl_ctx);
379 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx,
380 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu",
381 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "node for CPU children");
382
383 #if defined(__i386__) || defined(__amd64__)
384 /* Add sysctl handler to control registering for CPPC notifications */
385 cppc_notify = 1;
386 SYSCTL_ADD_BOOL(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
387 OID_AUTO, "cppc_notify", CTLFLAG_RDTUN | CTLFLAG_MPSAFE,
388 &cppc_notify, 0, "Register for CPPC Notifications");
389 #endif
390 }
391
392 /*
393 * Before calling any CPU methods, collect child driver feature hints
394 * and notify ACPI of them. We support unified SMP power control
395 * so advertise this ourselves. Note this is not the same as independent
396 * SMP control where each CPU can have different settings.
397 */
398 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 |
399 ACPI_CAP_C1_IO_HALT;
400
401 #if defined(__i386__) || defined(__amd64__)
402 /*
403 * Ask for MWAIT modes if not disabled and interrupts work
404 * reasonable with MWAIT.
405 */
406 if (!acpi_disabled("mwait") && cpu_mwait_usable())
407 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
408
409 /*
410 * Work around a lingering SMM bug which leads to freezes when handling
411 * CPPC notifications. Tell the SMM we will handle any CPPC notifications.
412 */
413 if ((cpu_power_eax & CPUTPM1_HWP_NOTIFICATION) && cppc_notify)
414 sc->cpu_features |= ACPI_CAP_INTR_CPPC;
415 #endif
416
417 if (devclass_get_drivers(device_get_devclass(dev), &drivers,
418 &drv_count) == 0) {
419 for (i = 0; i < drv_count; i++) {
420 if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
421 sc->cpu_features |= features;
422 }
423 free(drivers, M_TEMP);
424 }
425
426 /*
427 * CPU capabilities are specified in
428 * Intel Processor Vendor-Specific ACPI Interface Specification.
429 */
430 if (sc->cpu_features) {
431 cap_set[1] = sc->cpu_features;
432 status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set,
433 cap_set, false);
434 if (ACPI_SUCCESS(status)) {
435 if (cap_set[0] != 0)
436 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]);
437 }
438 else {
439 arglist.Pointer = &arg;
440 arglist.Count = 1;
441 arg.Type = ACPI_TYPE_BUFFER;
442 arg.Buffer.Length = sizeof(cap_set);
443 arg.Buffer.Pointer = (uint8_t *)cap_set;
444 cap_set[0] = 1; /* revision */
445 cap_set[1] = 1; /* number of capabilities integers */
446 cap_set[2] = sc->cpu_features;
447 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL);
448 }
449 }
450
451 /* Probe for Cx state support. */
452 acpi_cpu_cx_probe(sc);
453
454 return (0);
455 }
456
457 static void
acpi_cpu_postattach(void * unused __unused)458 acpi_cpu_postattach(void *unused __unused)
459 {
460 struct acpi_cpu_softc *sc;
461 int attached = 0, i;
462
463 if (cpu_softc == NULL)
464 return;
465
466 bus_topo_lock();
467 CPU_FOREACH(i) {
468 if ((sc = cpu_softc[i]) != NULL)
469 bus_generic_probe(sc->cpu_dev);
470 }
471 CPU_FOREACH(i) {
472 if ((sc = cpu_softc[i]) != NULL) {
473 bus_generic_attach(sc->cpu_dev);
474 attached = 1;
475 }
476 }
477 bus_topo_unlock();
478
479 if (attached) {
480 #ifdef EARLY_AP_STARTUP
481 acpi_cpu_startup(NULL);
482 #else
483 /* Queue post cpu-probing task handler */
484 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
485 #endif
486 }
487 }
488
489 SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
490 acpi_cpu_postattach, NULL);
491
492 static void
disable_idle(struct acpi_cpu_softc * sc)493 disable_idle(struct acpi_cpu_softc *sc)
494 {
495 cpuset_t cpuset;
496
497 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
498 sc->cpu_disable_idle = TRUE;
499
500 /*
501 * Ensure that the CPU is not in idle state or in acpi_cpu_idle().
502 * Note that this code depends on the fact that the rendezvous IPI
503 * can not penetrate context where interrupts are disabled and acpi_cpu_idle
504 * is called and executed in such a context with interrupts being re-enabled
505 * right before return.
506 */
507 smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL,
508 smp_no_rendezvous_barrier, NULL);
509 }
510
511 static void
enable_idle(struct acpi_cpu_softc * sc)512 enable_idle(struct acpi_cpu_softc *sc)
513 {
514
515 if (sc->cpu_cx_count > sc->cpu_non_c3 + 1 &&
516 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0)
517 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
518 sc->cpu_disable_idle = FALSE;
519 }
520
521 #if defined(__i386__) || defined(__amd64__)
522 static int
is_idle_disabled(struct acpi_cpu_softc * sc)523 is_idle_disabled(struct acpi_cpu_softc *sc)
524 {
525
526 return (sc->cpu_disable_idle);
527 }
528 #endif
529
530 /*
531 * Disable any entry to the idle function during suspend and re-enable it
532 * during resume.
533 */
534 static int
acpi_cpu_suspend(device_t dev)535 acpi_cpu_suspend(device_t dev)
536 {
537 int error;
538
539 error = bus_generic_suspend(dev);
540 if (error)
541 return (error);
542 disable_idle(device_get_softc(dev));
543 return (0);
544 }
545
546 static int
acpi_cpu_resume(device_t dev)547 acpi_cpu_resume(device_t dev)
548 {
549
550 enable_idle(device_get_softc(dev));
551 return (bus_generic_resume(dev));
552 }
553
554 /*
555 * Find the processor associated with a given ACPI ID.
556 */
557 static int
acpi_pcpu_get_id(device_t dev,uint32_t acpi_id,u_int * cpu_id)558 acpi_pcpu_get_id(device_t dev, uint32_t acpi_id, u_int *cpu_id)
559 {
560 struct pcpu *pc;
561 u_int i;
562
563 CPU_FOREACH(i) {
564 pc = pcpu_find(i);
565 if (pc->pc_acpi_id == acpi_id) {
566 *cpu_id = pc->pc_cpuid;
567 return (0);
568 }
569 }
570
571 /*
572 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC
573 * UP box) use the ACPI ID from the first processor we find.
574 */
575 if (mp_ncpus == 1) {
576 pc = pcpu_find(0);
577 if (pc->pc_acpi_id == 0xffffffff)
578 pc->pc_acpi_id = acpi_id;
579 *cpu_id = 0;
580 return (0);
581 }
582
583 return (ESRCH);
584 }
585
586 static struct resource_list *
acpi_cpu_get_rlist(device_t dev,device_t child)587 acpi_cpu_get_rlist(device_t dev, device_t child)
588 {
589 struct acpi_cpu_device *ad;
590
591 ad = device_get_ivars(child);
592 if (ad == NULL)
593 return (NULL);
594 return (&ad->ad_rl);
595 }
596
597 static device_t
acpi_cpu_add_child(device_t dev,u_int order,const char * name,int unit)598 acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit)
599 {
600 struct acpi_cpu_device *ad;
601 device_t child;
602
603 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
604 return (NULL);
605
606 resource_list_init(&ad->ad_rl);
607
608 child = device_add_child_ordered(dev, order, name, unit);
609 if (child != NULL)
610 device_set_ivars(child, ad);
611 else
612 free(ad, M_TEMP);
613 return (child);
614 }
615
616 static int
acpi_cpu_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)617 acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
618 {
619 struct acpi_cpu_softc *sc;
620
621 sc = device_get_softc(dev);
622 switch (index) {
623 case ACPI_IVAR_HANDLE:
624 *result = (uintptr_t)sc->cpu_handle;
625 break;
626 case CPU_IVAR_PCPU:
627 *result = (uintptr_t)sc->cpu_pcpu;
628 break;
629 #if defined(__amd64__) || defined(__i386__)
630 case CPU_IVAR_NOMINAL_MHZ:
631 if (tsc_is_invariant) {
632 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
633 break;
634 }
635 /* FALLTHROUGH */
636 #endif
637 default:
638 return (ENOENT);
639 }
640 return (0);
641 }
642
643 static int
acpi_cpu_shutdown(device_t dev)644 acpi_cpu_shutdown(device_t dev)
645 {
646 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
647
648 /* Allow children to shutdown first. */
649 bus_generic_shutdown(dev);
650
651 /*
652 * Disable any entry to the idle function.
653 */
654 disable_idle(device_get_softc(dev));
655
656 /*
657 * CPU devices are not truly detached and remain referenced,
658 * so their resources are not freed.
659 */
660
661 return_VALUE (0);
662 }
663
664 static void
acpi_cpu_cx_probe(struct acpi_cpu_softc * sc)665 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
666 {
667 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
668
669 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
670 sc->cpu_prev_sleep = 1000000;
671 sc->cpu_cx_lowest = 0;
672 sc->cpu_cx_lowest_lim = 0;
673
674 /*
675 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
676 * any, we'll revert to generic FADT/P_BLK Cx control method which will
677 * be handled by acpi_cpu_startup. We need to defer to after having
678 * probed all the cpus in the system before probing for generic Cx
679 * states as we may already have found cpus with valid _CST packages
680 */
681 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
682 /*
683 * We were unable to find a _CST package for this cpu or there
684 * was an error parsing it. Switch back to generic mode.
685 */
686 cpu_cx_generic = TRUE;
687 if (bootverbose)
688 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
689 }
690
691 /*
692 * TODO: _CSD Package should be checked here.
693 */
694 }
695
696 static void
acpi_cpu_generic_cx_probe(struct acpi_cpu_softc * sc)697 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
698 {
699 ACPI_GENERIC_ADDRESS gas;
700 struct acpi_cx *cx_ptr;
701
702 sc->cpu_cx_count = 0;
703 cx_ptr = sc->cpu_cx_states;
704
705 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
706 sc->cpu_prev_sleep = 1000000;
707
708 /* C1 has been required since just after ACPI 1.0 */
709 cx_ptr->type = ACPI_STATE_C1;
710 cx_ptr->trans_lat = 0;
711 cx_ptr++;
712 sc->cpu_non_c2 = sc->cpu_cx_count;
713 sc->cpu_non_c3 = sc->cpu_cx_count;
714 sc->cpu_cx_count++;
715
716 /*
717 * The spec says P_BLK must be 6 bytes long. However, some systems
718 * use it to indicate a fractional set of features present so we
719 * take 5 as C2. Some may also have a value of 7 to indicate
720 * another C3 but most use _CST for this (as required) and having
721 * "only" C1-C3 is not a hardship.
722 */
723 if (sc->cpu_p_blk_len < 5)
724 return;
725
726 /* Validate and allocate resources for C2 (P_LVL2). */
727 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
728 gas.BitWidth = 8;
729 if (AcpiGbl_FADT.C2Latency <= 100) {
730 gas.Address = sc->cpu_p_blk + 4;
731 cx_ptr->res_rid = 0;
732 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
733 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
734 if (cx_ptr->p_lvlx != NULL) {
735 cx_ptr->type = ACPI_STATE_C2;
736 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
737 cx_ptr++;
738 sc->cpu_non_c3 = sc->cpu_cx_count;
739 sc->cpu_cx_count++;
740 }
741 }
742 if (sc->cpu_p_blk_len < 6)
743 return;
744
745 /* Validate and allocate resources for C3 (P_LVL3). */
746 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
747 gas.Address = sc->cpu_p_blk + 5;
748 cx_ptr->res_rid = 1;
749 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid,
750 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
751 if (cx_ptr->p_lvlx != NULL) {
752 cx_ptr->type = ACPI_STATE_C3;
753 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
754 cx_ptr++;
755 sc->cpu_cx_count++;
756 }
757 }
758 }
759
760 #if defined(__i386__) || defined(__amd64__)
761 static void
acpi_cpu_cx_cst_mwait(struct acpi_cx * cx_ptr,uint64_t address,int accsize)762 acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize)
763 {
764
765 cx_ptr->do_mwait = true;
766 cx_ptr->mwait_hint = address & 0xffffffff;
767 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0;
768 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0;
769 }
770 #endif
771
772 static void
acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev,struct acpi_cx * cx_ptr)773 acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr)
774 {
775
776 if (cx_ptr->p_lvlx == NULL)
777 return;
778 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid,
779 cx_ptr->p_lvlx);
780 cx_ptr->p_lvlx = NULL;
781 }
782
783 /*
784 * Parse a _CST package and set up its Cx states. Since the _CST object
785 * can change dynamically, our notify handler may call this function
786 * to clean up and probe the new _CST package.
787 */
788 static int
acpi_cpu_cx_cst(struct acpi_cpu_softc * sc)789 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
790 {
791 struct acpi_cx *cx_ptr;
792 ACPI_STATUS status;
793 ACPI_BUFFER buf;
794 ACPI_OBJECT *top;
795 ACPI_OBJECT *pkg;
796 uint32_t count;
797 int i;
798 #if defined(__i386__) || defined(__amd64__)
799 uint64_t address;
800 int vendor, class, accsize;
801 #endif
802
803 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
804
805 buf.Pointer = NULL;
806 buf.Length = ACPI_ALLOCATE_BUFFER;
807 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
808 if (ACPI_FAILURE(status))
809 return (ENXIO);
810
811 /* _CST is a package with a count and at least one Cx package. */
812 top = (ACPI_OBJECT *)buf.Pointer;
813 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
814 device_printf(sc->cpu_dev, "invalid _CST package\n");
815 AcpiOsFree(buf.Pointer);
816 return (ENXIO);
817 }
818 if (count != top->Package.Count - 1) {
819 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
820 count, top->Package.Count - 1);
821 count = top->Package.Count - 1;
822 }
823 if (count > MAX_CX_STATES) {
824 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
825 count = MAX_CX_STATES;
826 }
827
828 sc->cpu_non_c2 = 0;
829 sc->cpu_non_c3 = 0;
830 sc->cpu_cx_count = 0;
831 cx_ptr = sc->cpu_cx_states;
832
833 /*
834 * C1 has been required since just after ACPI 1.0.
835 * Reserve the first slot for it.
836 */
837 cx_ptr->type = ACPI_STATE_C0;
838 cx_ptr++;
839 sc->cpu_cx_count++;
840
841 /* Set up all valid states. */
842 for (i = 0; i < count; i++) {
843 pkg = &top->Package.Elements[i + 1];
844 if (!ACPI_PKG_VALID(pkg, 4) ||
845 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
846 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
847 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
848 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
849 continue;
850 }
851
852 /* Validate the state to see if we should use it. */
853 switch (cx_ptr->type) {
854 case ACPI_STATE_C1:
855 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
856 #if defined(__i386__) || defined(__amd64__)
857 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
858 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
859 if (class == CST_FFH_INTEL_CL_C1IO) {
860 /* C1 I/O then Halt */
861 cx_ptr->res_rid = sc->cpu_cx_count;
862 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT,
863 cx_ptr->res_rid, address, 1);
864 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev,
865 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE |
866 RF_SHAREABLE);
867 if (cx_ptr->p_lvlx == NULL) {
868 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT,
869 cx_ptr->res_rid);
870 device_printf(sc->cpu_dev,
871 "C1 I/O failed to allocate port %d, "
872 "degrading to C1 Halt", (int)address);
873 }
874 } else if (class == CST_FFH_INTEL_CL_MWAIT) {
875 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
876 }
877 }
878 #endif
879 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
880 /* This is the first C1 state. Use the reserved slot. */
881 sc->cpu_cx_states[0] = *cx_ptr;
882 } else {
883 sc->cpu_non_c2 = sc->cpu_cx_count;
884 sc->cpu_non_c3 = sc->cpu_cx_count;
885 cx_ptr++;
886 sc->cpu_cx_count++;
887 }
888 continue;
889 case ACPI_STATE_C2:
890 sc->cpu_non_c3 = sc->cpu_cx_count;
891 break;
892 case ACPI_STATE_C3:
893 default:
894 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
895 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
896 "acpi_cpu%d: C3[%d] not available.\n",
897 device_get_unit(sc->cpu_dev), i));
898 continue;
899 }
900 break;
901 }
902
903 /* Free up any previous register. */
904 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
905
906 /* Allocate the control register for C2 or C3. */
907 #if defined(__i386__) || defined(__amd64__)
908 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
909 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL &&
910 class == CST_FFH_INTEL_CL_MWAIT) {
911 /* Native C State Instruction use (mwait) */
912 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
913 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
914 "acpi_cpu%d: Got C%d/mwait - %d latency\n",
915 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat));
916 cx_ptr++;
917 sc->cpu_cx_count++;
918 } else
919 #endif
920 {
921 cx_ptr->res_rid = sc->cpu_cx_count;
922 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
923 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
924 if (cx_ptr->p_lvlx) {
925 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
926 "acpi_cpu%d: Got C%d - %d latency\n",
927 device_get_unit(sc->cpu_dev), cx_ptr->type,
928 cx_ptr->trans_lat));
929 cx_ptr++;
930 sc->cpu_cx_count++;
931 }
932 }
933 }
934 AcpiOsFree(buf.Pointer);
935
936 /* If C1 state was not found, we need one now. */
937 cx_ptr = sc->cpu_cx_states;
938 if (cx_ptr->type == ACPI_STATE_C0) {
939 cx_ptr->type = ACPI_STATE_C1;
940 cx_ptr->trans_lat = 0;
941 }
942
943 return (0);
944 }
945
946 /*
947 * Call this *after* all CPUs have been attached.
948 */
949 static void
acpi_cpu_startup(void * arg)950 acpi_cpu_startup(void *arg)
951 {
952 struct acpi_cpu_softc *sc;
953 int i;
954
955 /*
956 * Setup any quirks that might necessary now that we have probed
957 * all the CPUs
958 */
959 acpi_cpu_quirks();
960
961 if (cpu_cx_generic) {
962 /*
963 * We are using generic Cx mode, probe for available Cx states
964 * for all processors.
965 */
966 CPU_FOREACH(i) {
967 if ((sc = cpu_softc[i]) != NULL)
968 acpi_cpu_generic_cx_probe(sc);
969 }
970 } else {
971 /*
972 * We are using _CST mode, remove C3 state if necessary.
973 * As we now know for sure that we will be using _CST mode
974 * install our notify handler.
975 */
976 CPU_FOREACH(i) {
977 if ((sc = cpu_softc[i]) == NULL)
978 continue;
979 if (cpu_quirks & CPU_QUIRK_NO_C3) {
980 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1);
981 }
982 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
983 acpi_cpu_notify, sc);
984 }
985 }
986
987 /* Perform Cx final initialization. */
988 CPU_FOREACH(i) {
989 if ((sc = cpu_softc[i]) != NULL)
990 acpi_cpu_startup_cx(sc);
991 }
992
993 /* Add a sysctl handler to handle global Cx lowest setting */
994 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree),
995 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
996 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A",
997 "Global lowest Cx sleep state to use");
998
999 /* Take over idling from cpu_idle_default(). */
1000 cpu_cx_lowest_lim = 0;
1001 CPU_FOREACH(i) {
1002 if ((sc = cpu_softc[i]) != NULL)
1003 enable_idle(sc);
1004 }
1005 #if defined(__i386__) || defined(__amd64__)
1006 cpu_idle_hook = acpi_cpu_idle;
1007 #endif
1008 }
1009
1010 static void
acpi_cpu_cx_list(struct acpi_cpu_softc * sc)1011 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
1012 {
1013 struct sbuf sb;
1014 int i;
1015
1016 /*
1017 * Set up the list of Cx states
1018 */
1019 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
1020 SBUF_FIXEDLEN);
1021 for (i = 0; i < sc->cpu_cx_count; i++)
1022 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type,
1023 sc->cpu_cx_states[i].trans_lat);
1024 sbuf_trim(&sb);
1025 sbuf_finish(&sb);
1026 }
1027
1028 static void
acpi_cpu_startup_cx(struct acpi_cpu_softc * sc)1029 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
1030 {
1031 acpi_cpu_cx_list(sc);
1032
1033 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx,
1034 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)),
1035 OID_AUTO, "cx_supported", CTLFLAG_RD,
1036 sc->cpu_cx_supported, 0,
1037 "Cx/microsecond values for supported Cx states");
1038 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1039 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1040 "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1041 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
1042 "lowest Cx sleep state to use");
1043 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1044 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1045 "cx_usage", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1046 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
1047 "percent usage for each Cx state");
1048 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1049 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1050 "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1051 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A",
1052 "Cx sleep state counters");
1053 #if defined(__i386__) || defined(__amd64__)
1054 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx,
1055 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO,
1056 "cx_method", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1057 (void *)sc, 0, acpi_cpu_method_sysctl, "A", "Cx entrance methods");
1058 #endif
1059
1060 /* Signal platform that we can handle _CST notification. */
1061 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
1062 ACPI_LOCK(acpi);
1063 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
1064 ACPI_UNLOCK(acpi);
1065 }
1066 }
1067
1068 #if defined(__i386__) || defined(__amd64__)
1069 /*
1070 * Idle the CPU in the lowest state possible. This function is called with
1071 * interrupts disabled. Note that once it re-enables interrupts, a task
1072 * switch can occur so do not access shared data (i.e. the softc) after
1073 * interrupts are re-enabled.
1074 */
1075 static void
acpi_cpu_idle(sbintime_t sbt)1076 acpi_cpu_idle(sbintime_t sbt)
1077 {
1078 struct acpi_cpu_softc *sc;
1079 struct acpi_cx *cx_next;
1080 uint64_t start_ticks, end_ticks;
1081 uint32_t start_time, end_time;
1082 ACPI_STATUS status;
1083 int bm_active, cx_next_idx, i, us;
1084
1085 /*
1086 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
1087 * since there is no ACPI processor object for this CPU. This occurs
1088 * for logical CPUs in the HTT case.
1089 */
1090 sc = cpu_softc[PCPU_GET(cpuid)];
1091 if (sc == NULL) {
1092 acpi_cpu_c1();
1093 return;
1094 }
1095
1096 /* If disabled, take the safe path. */
1097 if (is_idle_disabled(sc)) {
1098 acpi_cpu_c1();
1099 return;
1100 }
1101
1102 /* Find the lowest state that has small enough latency. */
1103 us = sc->cpu_prev_sleep;
1104 if (sbt >= 0 && us > (sbt >> 12))
1105 us = (sbt >> 12);
1106 cx_next_idx = 0;
1107 if (cpu_disable_c2_sleep)
1108 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2);
1109 else if (cpu_disable_c3_sleep)
1110 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3);
1111 else
1112 i = sc->cpu_cx_lowest;
1113 for (; i >= 0; i--) {
1114 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) {
1115 cx_next_idx = i;
1116 break;
1117 }
1118 }
1119
1120 /*
1121 * Check for bus master activity. If there was activity, clear
1122 * the bit and use the lowest non-C3 state. Note that the USB
1123 * driver polling for new devices keeps this bit set all the
1124 * time if USB is loaded.
1125 */
1126 cx_next = &sc->cpu_cx_states[cx_next_idx];
1127 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 &&
1128 cx_next_idx > sc->cpu_non_c3 &&
1129 (!cx_next->do_mwait || cx_next->mwait_bm_avoidance)) {
1130 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
1131 if (ACPI_SUCCESS(status) && bm_active != 0) {
1132 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1133 cx_next_idx = sc->cpu_non_c3;
1134 cx_next = &sc->cpu_cx_states[cx_next_idx];
1135 }
1136 }
1137
1138 /* Select the next state and update statistics. */
1139 sc->cpu_cx_stats[cx_next_idx]++;
1140 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
1141
1142 /*
1143 * Execute HLT (or equivalent) and wait for an interrupt. We can't
1144 * precisely calculate the time spent in C1 since the place we wake up
1145 * is an ISR. Assume we slept no more then half of quantum, unless
1146 * we are called inside critical section, delaying context switch.
1147 */
1148 if (cx_next->type == ACPI_STATE_C1) {
1149 start_ticks = cpu_ticks();
1150 if (cx_next->p_lvlx != NULL) {
1151 /* C1 I/O then Halt */
1152 CPU_GET_REG(cx_next->p_lvlx, 1);
1153 }
1154 if (cx_next->do_mwait)
1155 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1156 else
1157 acpi_cpu_c1();
1158 end_ticks = cpu_ticks();
1159 /* acpi_cpu_c1() returns with interrupts enabled. */
1160 if (cx_next->do_mwait)
1161 ACPI_ENABLE_IRQS();
1162 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1163 if (!cx_next->do_mwait && curthread->td_critnest == 0)
1164 end_time = min(end_time, 500000 / hz);
1165 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1166 return;
1167 }
1168
1169 /*
1170 * For C3, disable bus master arbitration if BM control is available.
1171 * CPU may have to wake up to handle it. Otherwise flush the CPU cache.
1172 */
1173 if (cx_next->type == ACPI_STATE_C3) {
1174 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0)
1175 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
1176 else
1177 ACPI_FLUSH_CPU_CACHE();
1178 }
1179
1180 /*
1181 * Read from P_LVLx to enter C2(+), checking time spent asleep.
1182 * Use the ACPI timer for measuring sleep time. Since we need to
1183 * get the time very close to the CPU start/stop clock logic, this
1184 * is the only reliable time source.
1185 */
1186 if (cx_next->type == ACPI_STATE_C3) {
1187 AcpiGetTimer(&start_time);
1188 start_ticks = 0;
1189 } else {
1190 start_time = 0;
1191 start_ticks = cpu_ticks();
1192 }
1193 if (cx_next->do_mwait) {
1194 acpi_cpu_idle_mwait(cx_next->mwait_hint);
1195 } else {
1196 CPU_GET_REG(cx_next->p_lvlx, 1);
1197 /*
1198 * Read the end time twice. Since it may take an arbitrary time
1199 * to enter the idle state, the first read may be executed before
1200 * the processor has stopped. Doing it again provides enough
1201 * margin that we are certain to have a correct value.
1202 */
1203 AcpiGetTimer(&end_time);
1204 }
1205
1206 if (cx_next->type == ACPI_STATE_C3)
1207 AcpiGetTimer(&end_time);
1208 else
1209 end_ticks = cpu_ticks();
1210
1211 /* Enable bus master arbitration. */
1212 if (cx_next->type == ACPI_STATE_C3 &&
1213 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0)
1214 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
1215 ACPI_ENABLE_IRQS();
1216
1217 if (cx_next->type == ACPI_STATE_C3)
1218 AcpiGetTimerDuration(start_time, end_time, &end_time);
1219 else
1220 end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate();
1221 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
1222 }
1223 #endif
1224
1225 /*
1226 * Re-evaluate the _CST object when we are notified that it changed.
1227 */
1228 static void
acpi_cpu_notify(ACPI_HANDLE h,UINT32 notify,void * context)1229 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
1230 {
1231 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
1232
1233 if (notify != ACPI_NOTIFY_CX_STATES)
1234 return;
1235
1236 /*
1237 * C-state data for target CPU is going to be in flux while we execute
1238 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle.
1239 * Also, it may happen that multiple ACPI taskqueues may concurrently
1240 * execute notifications for the same CPU. ACPI_SERIAL is used to
1241 * protect against that.
1242 */
1243 ACPI_SERIAL_BEGIN(cpu);
1244 disable_idle(sc);
1245
1246 /* Update the list of Cx states. */
1247 acpi_cpu_cx_cst(sc);
1248 acpi_cpu_cx_list(sc);
1249 acpi_cpu_set_cx_lowest(sc);
1250
1251 enable_idle(sc);
1252 ACPI_SERIAL_END(cpu);
1253
1254 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify);
1255 }
1256
1257 static void
acpi_cpu_quirks(void)1258 acpi_cpu_quirks(void)
1259 {
1260 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1261
1262 /*
1263 * Bus mastering arbitration control is needed to keep caches coherent
1264 * while sleeping in C3. If it's not present but a working flush cache
1265 * instruction is present, flush the caches before entering C3 instead.
1266 * Otherwise, just disable C3 completely.
1267 */
1268 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
1269 AcpiGbl_FADT.Pm2ControlLength == 0) {
1270 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
1271 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
1272 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1274 "acpi_cpu: no BM control, using flush cache method\n"));
1275 } else {
1276 cpu_quirks |= CPU_QUIRK_NO_C3;
1277 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1278 "acpi_cpu: no BM control, C3 not available\n"));
1279 }
1280 }
1281
1282 /*
1283 * If we are using generic Cx mode, C3 on multiple CPUs requires using
1284 * the expensive flush cache instruction.
1285 */
1286 if (cpu_cx_generic && mp_ncpus > 1) {
1287 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
1288 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1289 "acpi_cpu: SMP, using flush cache mode for C3\n"));
1290 }
1291
1292 /* Look for various quirks of the PIIX4 part. */
1293 acpi_cpu_quirks_piix4();
1294 }
1295
1296 static void
acpi_cpu_quirks_piix4(void)1297 acpi_cpu_quirks_piix4(void)
1298 {
1299 #ifdef __i386__
1300 device_t acpi_dev;
1301 uint32_t val;
1302 ACPI_STATUS status;
1303
1304 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
1305 if (acpi_dev != NULL) {
1306 switch (pci_get_revid(acpi_dev)) {
1307 /*
1308 * Disable C3 support for all PIIX4 chipsets. Some of these parts
1309 * do not report the BMIDE status to the BM status register and
1310 * others have a livelock bug if Type-F DMA is enabled. Linux
1311 * works around the BMIDE bug by reading the BM status directly
1312 * but we take the simpler approach of disabling C3 for these
1313 * parts.
1314 *
1315 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
1316 * Livelock") from the January 2002 PIIX4 specification update.
1317 * Applies to all PIIX4 models.
1318 *
1319 * Also, make sure that all interrupts cause a "Stop Break"
1320 * event to exit from C2 state.
1321 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
1322 * should be set to zero, otherwise it causes C2 to short-sleep.
1323 * PIIX4 doesn't properly support C3 and bus master activity
1324 * need not break out of C2.
1325 */
1326 case PCI_REVISION_A_STEP:
1327 case PCI_REVISION_B_STEP:
1328 case PCI_REVISION_4E:
1329 case PCI_REVISION_4M:
1330 cpu_quirks |= CPU_QUIRK_NO_C3;
1331 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1332 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1333
1334 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1335 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1336 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1337 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1338 val |= PIIX4_STOP_BREAK_MASK;
1339 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1340 }
1341 status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1342 if (ACPI_SUCCESS(status) && val != 0) {
1343 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1344 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1345 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1346 }
1347 break;
1348 default:
1349 break;
1350 }
1351 }
1352 #endif
1353 }
1354
1355 static int
acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)1356 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1357 {
1358 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1359 struct sbuf sb;
1360 char buf[128];
1361 int error, i;
1362 uintmax_t fract, sum, whole;
1363
1364 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1365 sum = 0;
1366 for (i = 0; i < sc->cpu_cx_count; i++)
1367 sum += sc->cpu_cx_stats[i];
1368 for (i = 0; i < sc->cpu_cx_count; i++) {
1369 if (sum > 0) {
1370 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1371 fract = (whole % sum) * 100;
1372 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1373 (u_int)(fract / sum));
1374 } else
1375 sbuf_printf(&sb, "0.00%% ");
1376 }
1377 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1378 error = sbuf_finish(&sb);
1379 sbuf_delete(&sb);
1380 return (error);
1381 }
1382
1383 /*
1384 * XXX TODO: actually add support to count each entry/exit
1385 * from the Cx states.
1386 */
1387 static int
acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)1388 acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS)
1389 {
1390 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1391 struct sbuf sb;
1392 char buf[128];
1393 int error, i;
1394
1395 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1396 for (i = 0; i < sc->cpu_cx_count; i++) {
1397 if (i > 0)
1398 sbuf_putc(&sb, ' ');
1399 sbuf_printf(&sb, "%u", sc->cpu_cx_stats[i]);
1400 }
1401 error = sbuf_finish(&sb);
1402 sbuf_delete(&sb);
1403 return (error);
1404 }
1405
1406 #if defined(__i386__) || defined(__amd64__)
1407 static int
acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)1408 acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS)
1409 {
1410 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1;
1411 struct acpi_cx *cx;
1412 struct sbuf sb;
1413 char buf[128];
1414 int error, i;
1415
1416 sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
1417 for (i = 0; i < sc->cpu_cx_count; i++) {
1418 cx = &sc->cpu_cx_states[i];
1419 if (i > 0)
1420 sbuf_putc(&sb, ' ');
1421 sbuf_printf(&sb, "C%d/", i + 1);
1422 if (cx->do_mwait) {
1423 sbuf_cat(&sb, "mwait");
1424 if (cx->mwait_hw_coord)
1425 sbuf_cat(&sb, "/hwc");
1426 if (cx->mwait_bm_avoidance)
1427 sbuf_cat(&sb, "/bma");
1428 } else if (cx->type == ACPI_STATE_C1) {
1429 sbuf_cat(&sb, "hlt");
1430 } else {
1431 sbuf_cat(&sb, "io");
1432 }
1433 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL)
1434 sbuf_cat(&sb, "/iohlt");
1435 }
1436 error = sbuf_finish(&sb);
1437 sbuf_delete(&sb);
1438 return (error);
1439 }
1440 #endif
1441
1442 static int
acpi_cpu_set_cx_lowest(struct acpi_cpu_softc * sc)1443 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc)
1444 {
1445 int i;
1446
1447 ACPI_SERIAL_ASSERT(cpu);
1448 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1);
1449
1450 /* If not disabling, cache the new lowest non-C3 state. */
1451 sc->cpu_non_c3 = 0;
1452 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1453 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1454 sc->cpu_non_c3 = i;
1455 break;
1456 }
1457 }
1458
1459 /* Reset the statistics counters. */
1460 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1461 return (0);
1462 }
1463
1464 static int
acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)1465 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1466 {
1467 struct acpi_cpu_softc *sc;
1468 char state[8];
1469 int val, error;
1470
1471 sc = (struct acpi_cpu_softc *) arg1;
1472 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1);
1473 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1474 if (error != 0 || req->newptr == NULL)
1475 return (error);
1476 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1477 return (EINVAL);
1478 if (strcasecmp(state, "Cmax") == 0)
1479 val = MAX_CX_STATES;
1480 else {
1481 val = (int) strtol(state + 1, NULL, 10);
1482 if (val < 1 || val > MAX_CX_STATES)
1483 return (EINVAL);
1484 }
1485
1486 ACPI_SERIAL_BEGIN(cpu);
1487 sc->cpu_cx_lowest_lim = val - 1;
1488 acpi_cpu_set_cx_lowest(sc);
1489 ACPI_SERIAL_END(cpu);
1490
1491 return (0);
1492 }
1493
1494 static int
acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)1495 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1496 {
1497 struct acpi_cpu_softc *sc;
1498 char state[8];
1499 int val, error, i;
1500
1501 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1);
1502 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1503 if (error != 0 || req->newptr == NULL)
1504 return (error);
1505 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1506 return (EINVAL);
1507 if (strcasecmp(state, "Cmax") == 0)
1508 val = MAX_CX_STATES;
1509 else {
1510 val = (int) strtol(state + 1, NULL, 10);
1511 if (val < 1 || val > MAX_CX_STATES)
1512 return (EINVAL);
1513 }
1514
1515 /* Update the new lowest useable Cx state for all CPUs. */
1516 ACPI_SERIAL_BEGIN(cpu);
1517 cpu_cx_lowest_lim = val - 1;
1518 CPU_FOREACH(i) {
1519 if ((sc = cpu_softc[i]) == NULL)
1520 continue;
1521 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim;
1522 acpi_cpu_set_cx_lowest(sc);
1523 }
1524 ACPI_SERIAL_END(cpu);
1525
1526 return (0);
1527 }
1528