1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/globaldata.h> 36 #include <sys/power.h> 37 #include <sys/proc.h> 38 #include <sys/sbuf.h> 39 #include <sys/thread2.h> 40 #include <sys/serialize.h> 41 #include <sys/msgport2.h> 42 #include <sys/microtime_pcpu.h> 43 #include <sys/cpu_topology.h> 44 45 #include <bus/pci/pcivar.h> 46 #include <machine/atomic.h> 47 #include <machine/globaldata.h> 48 #include <machine/md_var.h> 49 #include <machine/smp.h> 50 #include <sys/rman.h> 51 52 #include <net/netisr2.h> 53 #include <net/netmsg2.h> 54 #include <net/if_var.h> 55 56 #include "acpi.h" 57 #include "acpivar.h" 58 #include "acpi_cpu.h" 59 #include "acpi_cpu_cstate.h" 60 61 /* 62 * Support for ACPI Processor devices, including C[1-3+] sleep states. 63 */ 64 65 /* Hooks for the ACPICA debugging infrastructure */ 66 #define _COMPONENT ACPI_PROCESSOR 67 ACPI_MODULE_NAME("PROCESSOR") 68 69 struct netmsg_acpi_cst { 70 struct netmsg_base base; 71 struct acpi_cst_softc *sc; 72 int val; 73 }; 74 75 #define MAX_CX_STATES 8 76 77 struct acpi_cst_softc { 78 device_t cst_dev; 79 struct acpi_cpu_softc *cst_parent; 80 ACPI_HANDLE cst_handle; 81 int cst_cpuid; 82 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 83 uint32_t cst_p_blk; /* ACPI P_BLK location */ 84 uint32_t cst_p_blk_len; /* P_BLK length (must be 6). */ 85 struct acpi_cst_cx cst_cx_states[MAX_CX_STATES]; 86 int cst_cx_count; /* Number of valid Cx states. */ 87 int cst_prev_sleep; /* Last idle sleep duration. */ 88 /* Runtime state. */ 89 int cst_non_c3; /* Index of lowest non-C3 state. */ 90 u_long cst_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 91 /* Values for sysctl. */ 92 int cst_cx_lowest; /* Current Cx lowest */ 93 int cst_cx_lowest_req; /* Requested Cx lowest */ 94 char cst_cx_supported[64]; 95 }; 96 97 #define ACPI_CST_FLAG_PROBING 0x1 98 #define ACPI_CST_FLAG_ATTACHED 0x2 99 /* Match C-states of other hyperthreads on the same core */ 100 #define ACPI_CST_FLAG_MATCH_HT 0x4 101 102 #define PCI_VENDOR_INTEL 0x8086 103 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 104 #define PCI_REVISION_A_STEP 0 105 #define PCI_REVISION_B_STEP 1 106 #define PCI_REVISION_4E 2 107 #define PCI_REVISION_4M 3 108 #define PIIX4_DEVACTB_REG 0x58 109 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 110 #define PIIX4_BRLD_EN_IRQ (1<<1) 111 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 112 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | \ 113 PIIX4_BRLD_EN_IRQ | \ 114 PIIX4_BRLD_EN_IRQ8) 115 #define PIIX4_PCNTRL_BST_EN (1<<10) 116 117 /* Platform hardware resource information. */ 118 static uint32_t acpi_cst_smi_cmd; /* Value to write to SMI_CMD. */ 119 static uint8_t acpi_cst_ctrl; /* Indicate we are _CST aware. */ 120 int acpi_cst_quirks; /* Indicate any hardware bugs. */ 121 static boolean_t acpi_cst_use_fadt; 122 123 /* Runtime state. */ 124 static boolean_t acpi_cst_disable_idle; 125 /* Disable entry to idle function */ 126 static int acpi_cst_cx_count; /* Number of valid Cx states */ 127 128 /* Values for sysctl. */ 129 static int acpi_cst_cx_lowest; /* Current Cx lowest */ 130 static int acpi_cst_cx_lowest_req; /* Requested Cx lowest */ 131 132 static device_t *acpi_cst_devices; 133 static int acpi_cst_ndevices; 134 static struct acpi_cst_softc **acpi_cst_softc; 135 static struct lwkt_serialize acpi_cst_slize = LWKT_SERIALIZE_INITIALIZER; 136 137 static int acpi_cst_probe(device_t); 138 static int acpi_cst_attach(device_t); 139 static int acpi_cst_suspend(device_t); 140 static int acpi_cst_resume(device_t); 141 static int acpi_cst_shutdown(device_t); 142 143 static void acpi_cst_notify(device_t); 144 static void acpi_cst_postattach(void *); 145 static void acpi_cst_idle(void); 146 static void acpi_cst_copy(struct acpi_cst_softc *, 147 const struct acpi_cst_softc *); 148 149 static void acpi_cst_cx_probe(struct acpi_cst_softc *); 150 static void acpi_cst_cx_probe_fadt(struct acpi_cst_softc *); 151 static int acpi_cst_cx_probe_cst(struct acpi_cst_softc *, int); 152 static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *); 153 154 static void acpi_cst_startup(struct acpi_cst_softc *); 155 static void acpi_cst_support_list(struct acpi_cst_softc *); 156 static int acpi_cst_set_lowest(struct acpi_cst_softc *, int); 157 static int acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *, int); 158 static void acpi_cst_non_c3(struct acpi_cst_softc *); 159 static void acpi_cst_global_cx_count(void); 160 static int acpi_cst_set_quirks(void); 161 static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *); 162 static void acpi_cst_free_resource(struct acpi_cst_softc *, int); 163 static void acpi_cst_c1_halt(void); 164 165 static int acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS); 166 static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS); 167 static int acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 168 static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS); 169 static int acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 170 171 static int acpi_cst_cx_setup(struct acpi_cst_cx *cx); 172 static void acpi_cst_c1_halt_enter(const struct acpi_cst_cx *); 173 static void acpi_cst_cx_io_enter(const struct acpi_cst_cx *); 174 175 static device_method_t acpi_cst_methods[] = { 176 /* Device interface */ 177 DEVMETHOD(device_probe, acpi_cst_probe), 178 DEVMETHOD(device_attach, acpi_cst_attach), 179 DEVMETHOD(device_detach, bus_generic_detach), 180 DEVMETHOD(device_shutdown, acpi_cst_shutdown), 181 DEVMETHOD(device_suspend, acpi_cst_suspend), 182 DEVMETHOD(device_resume, acpi_cst_resume), 183 184 /* Bus interface */ 185 DEVMETHOD(bus_add_child, bus_generic_add_child), 186 DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), 187 DEVMETHOD(bus_get_resource_list, bus_generic_get_resource_list), 188 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 189 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 190 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 191 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 192 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 193 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 194 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 195 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 196 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 197 DEVMETHOD_END 198 }; 199 200 static driver_t acpi_cst_driver = { 201 "cpu_cst", 202 acpi_cst_methods, 203 sizeof(struct acpi_cst_softc), 204 }; 205 206 static devclass_t acpi_cst_devclass; 207 DRIVER_MODULE(cpu_cst, cpu, acpi_cst_driver, acpi_cst_devclass, NULL, NULL); 208 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 209 210 static int 211 acpi_cst_probe(device_t dev) 212 { 213 int cpu_id; 214 215 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 216 return (ENXIO); 217 218 cpu_id = acpi_get_magic(dev); 219 220 if (acpi_cst_softc == NULL) 221 acpi_cst_softc = kmalloc(sizeof(struct acpi_cst_softc *) * 222 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 223 224 /* 225 * Check if we already probed this processor. We scan the bus twice 226 * so it's possible we've already seen this one. 227 */ 228 if (acpi_cst_softc[cpu_id] != NULL) { 229 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 230 return (ENXIO); 231 } 232 233 /* Mark this processor as in-use and save our derived id for attach. */ 234 acpi_cst_softc[cpu_id] = device_get_softc(dev); 235 device_set_desc(dev, "ACPI CPU C-State"); 236 237 return (0); 238 } 239 240 static int 241 acpi_cst_attach(device_t dev) 242 { 243 ACPI_BUFFER buf; 244 ACPI_OBJECT *obj; 245 struct acpi_cst_softc *sc; 246 ACPI_STATUS status; 247 248 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 249 250 sc = device_get_softc(dev); 251 sc->cst_dev = dev; 252 sc->cst_parent = device_get_softc(device_get_parent(dev)); 253 sc->cst_handle = acpi_get_handle(dev); 254 sc->cst_cpuid = acpi_get_magic(dev); 255 acpi_cst_softc[sc->cst_cpuid] = sc; 256 acpi_cst_smi_cmd = AcpiGbl_FADT.SmiCommand; 257 acpi_cst_ctrl = AcpiGbl_FADT.CstControl; 258 259 buf.Pointer = NULL; 260 buf.Length = ACPI_ALLOCATE_BUFFER; 261 status = AcpiEvaluateObject(sc->cst_handle, NULL, NULL, &buf); 262 if (ACPI_FAILURE(status)) { 263 device_printf(dev, "attach failed to get Processor obj - %s\n", 264 AcpiFormatException(status)); 265 acpi_cst_softc[sc->cst_cpuid] = NULL; 266 return (ENXIO); 267 } 268 obj = (ACPI_OBJECT *)buf.Pointer; 269 sc->cst_p_blk = obj->Processor.PblkAddress; 270 sc->cst_p_blk_len = obj->Processor.PblkLength; 271 AcpiOsFree(obj); 272 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "cpu_cst%d: P_BLK at %#x/%d\n", 273 device_get_unit(dev), sc->cst_p_blk, sc->cst_p_blk_len)); 274 275 /* 276 * If this is the first cpu we attach, create and initialize the generic 277 * resources that will be used by all acpi cpu devices. 278 */ 279 if (device_get_unit(dev) == 0) { 280 /* Assume we won't be using FADT for Cx states by default */ 281 acpi_cst_use_fadt = FALSE; 282 283 /* Queue post cpu-probing task handler */ 284 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cst_postattach, NULL); 285 } 286 287 /* Probe for Cx state support. */ 288 acpi_cst_cx_probe(sc); 289 290 sc->cst_flags |= ACPI_CST_FLAG_ATTACHED; 291 292 return (0); 293 } 294 295 /* 296 * Disable any entry to the idle function during suspend and re-enable it 297 * during resume. 298 */ 299 static int 300 acpi_cst_suspend(device_t dev) 301 { 302 int error; 303 304 error = bus_generic_suspend(dev); 305 if (error) 306 return (error); 307 acpi_cst_disable_idle = TRUE; 308 return (0); 309 } 310 311 static int 312 acpi_cst_resume(device_t dev) 313 { 314 acpi_cst_disable_idle = FALSE; 315 return (bus_generic_resume(dev)); 316 } 317 318 static int 319 acpi_cst_shutdown(device_t dev) 320 { 321 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 322 323 /* Allow children to shutdown first. */ 324 bus_generic_shutdown(dev); 325 326 /* 327 * Disable any entry to the idle function. There is a small race where 328 * an idle thread have passed this check but not gone to sleep. This 329 * is ok since device_shutdown() does not free the softc, otherwise 330 * we'd have to be sure all threads were evicted before returning. 331 */ 332 acpi_cst_disable_idle = TRUE; 333 334 return_VALUE (0); 335 } 336 337 static void 338 acpi_cst_cx_probe(struct acpi_cst_softc *sc) 339 { 340 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 341 342 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 343 sc->cst_prev_sleep = 1000000; 344 sc->cst_cx_lowest = 0; 345 sc->cst_cx_lowest_req = 0; 346 347 /* 348 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 349 * any, we'll revert to FADT/P_BLK Cx control method which will be 350 * handled by acpi_cst_postattach. We need to defer to after having 351 * probed all the cpus in the system before probing for Cx states from 352 * FADT as we may already have found cpus with valid _CST packages. 353 */ 354 if (!acpi_cst_use_fadt && acpi_cst_cx_probe_cst(sc, 0) != 0) { 355 /* 356 * We were unable to find a _CST package for this cpu or there 357 * was an error parsing it. Switch back to generic mode. 358 */ 359 acpi_cst_use_fadt = TRUE; 360 if (bootverbose) 361 device_printf(sc->cst_dev, "switching to FADT Cx mode\n"); 362 } 363 364 /* 365 * TODO: _CSD Package should be checked here. 366 */ 367 } 368 369 static void 370 acpi_cst_cx_probe_fadt(struct acpi_cst_softc *sc) 371 { 372 struct acpi_cst_cx *cx_ptr; 373 int error; 374 375 /* 376 * Free all previously allocated resources. 377 * 378 * NITE: 379 * It is needed, since we could enter here because of other 380 * cpu's _CST probing failure. 381 */ 382 acpi_cst_free_resource(sc, 0); 383 384 sc->cst_cx_count = 0; 385 cx_ptr = sc->cst_cx_states; 386 387 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 388 sc->cst_prev_sleep = 1000000; 389 390 /* C1 has been required since just after ACPI 1.0 */ 391 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_FIXED_HARDWARE; 392 cx_ptr->type = ACPI_STATE_C1; 393 cx_ptr->trans_lat = 0; 394 cx_ptr->enter = acpi_cst_c1_halt_enter; 395 error = acpi_cst_cx_setup(cx_ptr); 396 if (error) 397 panic("C1 FADT HALT setup failed: %d", error); 398 cx_ptr++; 399 sc->cst_cx_count++; 400 401 /* C2(+) is not supported on MP system */ 402 if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 403 return; 404 405 /* 406 * The spec says P_BLK must be 6 bytes long. However, some systems 407 * use it to indicate a fractional set of features present so we 408 * take 5 as C2. Some may also have a value of 7 to indicate 409 * another C3 but most use _CST for this (as required) and having 410 * "only" C1-C3 is not a hardship. 411 */ 412 if (sc->cst_p_blk_len < 5) 413 return; 414 415 /* Validate and allocate resources for C2 (P_LVL2). */ 416 if (AcpiGbl_FADT.C2Latency <= 100) { 417 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 418 cx_ptr->gas.BitWidth = 8; 419 cx_ptr->gas.Address = sc->cst_p_blk + 4; 420 421 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 422 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 423 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 424 if (cx_ptr->res != NULL) { 425 sc->cst_parent->cpu_next_rid++; 426 cx_ptr->type = ACPI_STATE_C2; 427 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 428 cx_ptr->enter = acpi_cst_cx_io_enter; 429 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 430 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 431 error = acpi_cst_cx_setup(cx_ptr); 432 if (error) 433 panic("C2 FADT I/O setup failed: %d", error); 434 cx_ptr++; 435 sc->cst_cx_count++; 436 sc->cst_non_c3 = 1; 437 } 438 } 439 if (sc->cst_p_blk_len < 6) 440 return; 441 442 /* Validate and allocate resources for C3 (P_LVL3). */ 443 if (AcpiGbl_FADT.C3Latency <= 1000 && 444 !(acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3)) { 445 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 446 cx_ptr->gas.BitWidth = 8; 447 cx_ptr->gas.Address = sc->cst_p_blk + 5; 448 449 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 450 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 451 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 452 if (cx_ptr->res != NULL) { 453 sc->cst_parent->cpu_next_rid++; 454 cx_ptr->type = ACPI_STATE_C3; 455 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 456 cx_ptr->enter = acpi_cst_cx_io_enter; 457 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 458 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 459 error = acpi_cst_cx_setup(cx_ptr); 460 if (error) 461 panic("C3 FADT I/O setup failed: %d", error); 462 cx_ptr++; 463 sc->cst_cx_count++; 464 } 465 } 466 } 467 468 static void 469 acpi_cst_copy(struct acpi_cst_softc *dst_sc, 470 const struct acpi_cst_softc *src_sc) 471 { 472 dst_sc->cst_non_c3 = src_sc->cst_non_c3; 473 dst_sc->cst_cx_count = src_sc->cst_cx_count; 474 memcpy(dst_sc->cst_cx_states, src_sc->cst_cx_states, 475 sizeof(dst_sc->cst_cx_states)); 476 } 477 478 /* 479 * Parse a _CST package and set up its Cx states. Since the _CST object 480 * can change dynamically, our notify handler may call this function 481 * to clean up and probe the new _CST package. 482 */ 483 static int 484 acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe) 485 { 486 struct acpi_cst_cx *cx_ptr; 487 ACPI_STATUS status; 488 ACPI_BUFFER buf; 489 ACPI_OBJECT *top; 490 ACPI_OBJECT *pkg; 491 uint32_t count; 492 int i; 493 494 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 495 496 #ifdef INVARIANTS 497 if (reprobe) 498 KKASSERT(&curthread->td_msgport == netisr_cpuport(sc->cst_cpuid)); 499 #endif 500 501 buf.Pointer = NULL; 502 buf.Length = ACPI_ALLOCATE_BUFFER; 503 status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf); 504 if (ACPI_FAILURE(status)) 505 return (ENXIO); 506 507 /* _CST is a package with a count and at least one Cx package. */ 508 top = (ACPI_OBJECT *)buf.Pointer; 509 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 510 device_printf(sc->cst_dev, "invalid _CST package\n"); 511 AcpiOsFree(buf.Pointer); 512 return (ENXIO); 513 } 514 if (count != top->Package.Count - 1) { 515 device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n", 516 count, top->Package.Count - 1); 517 count = top->Package.Count - 1; 518 } 519 if (count > MAX_CX_STATES) { 520 device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count); 521 count = MAX_CX_STATES; 522 } 523 524 sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT; 525 cpu_sfence(); 526 527 /* 528 * Free all previously allocated resources 529 * 530 * NOTE: It is needed for _CST reprobing. 531 */ 532 acpi_cst_free_resource(sc, 0); 533 534 /* Set up all valid states. */ 535 sc->cst_cx_count = 0; 536 cx_ptr = sc->cst_cx_states; 537 for (i = 0; i < count; i++) { 538 int error; 539 540 pkg = &top->Package.Elements[i + 1]; 541 if (!ACPI_PKG_VALID(pkg, 4) || 542 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 543 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 544 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 545 546 device_printf(sc->cst_dev, "skipping invalid Cx state package\n"); 547 continue; 548 } 549 550 /* Validate the state to see if we should use it. */ 551 switch (cx_ptr->type) { 552 case ACPI_STATE_C1: 553 sc->cst_non_c3 = i; 554 cx_ptr->enter = acpi_cst_c1_halt_enter; 555 error = acpi_cst_cx_setup(cx_ptr); 556 if (error) 557 panic("C1 CST HALT setup failed: %d", error); 558 if (sc->cst_cx_count != 0) { 559 /* 560 * C1 is not the first C-state; something really stupid 561 * is going on ... 562 */ 563 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 564 } 565 cx_ptr++; 566 sc->cst_cx_count++; 567 continue; 568 case ACPI_STATE_C2: 569 sc->cst_non_c3 = i; 570 break; 571 case ACPI_STATE_C3: 572 default: 573 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) { 574 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 575 "cpu_cst%d: C3[%d] not available.\n", 576 device_get_unit(sc->cst_dev), i)); 577 continue; 578 } 579 break; 580 } 581 582 /* 583 * Allocate the control register for C2 or C3(+). 584 */ 585 KASSERT(cx_ptr->res == NULL, ("still has res")); 586 acpi_PkgRawGas(pkg, 0, &cx_ptr->gas); 587 588 /* 589 * We match number of C2/C3 for hyperthreads, only if the 590 * register is "Fixed Hardware", e.g. on most of the Intel 591 * CPUs. We don't have much to do for the rest of the 592 * register types. 593 */ 594 if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) 595 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 596 597 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 598 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 599 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 600 if (cx_ptr->res != NULL) { 601 sc->cst_parent->cpu_next_rid++; 602 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 603 "cpu_cst%d: Got C%d - %d latency\n", 604 device_get_unit(sc->cst_dev), cx_ptr->type, 605 cx_ptr->trans_lat)); 606 cx_ptr->enter = acpi_cst_cx_io_enter; 607 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 608 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 609 error = acpi_cst_cx_setup(cx_ptr); 610 if (error) 611 panic("C%d CST I/O setup failed: %d", cx_ptr->type, error); 612 cx_ptr++; 613 sc->cst_cx_count++; 614 } else { 615 error = acpi_cst_cx_setup(cx_ptr); 616 if (!error) { 617 KASSERT(cx_ptr->enter != NULL, 618 ("C%d enter is not set", cx_ptr->type)); 619 cx_ptr++; 620 sc->cst_cx_count++; 621 } 622 } 623 } 624 AcpiOsFree(buf.Pointer); 625 626 if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) { 627 cpumask_t mask; 628 629 mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL); 630 if (CPUMASK_TESTNZERO(mask)) { 631 int cpu; 632 633 for (cpu = 0; cpu < ncpus; ++cpu) { 634 struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu]; 635 636 if (sc1 == NULL || sc1 == sc || 637 (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 || 638 (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0) 639 continue; 640 if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid)) 641 continue; 642 643 if (sc1->cst_cx_count != sc->cst_cx_count) { 644 struct acpi_cst_softc *src_sc, *dst_sc; 645 646 if (bootverbose) { 647 device_printf(sc->cst_dev, 648 "inconstent C-state count: %d, %s has %d\n", 649 sc->cst_cx_count, 650 device_get_nameunit(sc1->cst_dev), 651 sc1->cst_cx_count); 652 } 653 if (sc1->cst_cx_count > sc->cst_cx_count) { 654 src_sc = sc1; 655 dst_sc = sc; 656 } else { 657 src_sc = sc; 658 dst_sc = sc1; 659 } 660 acpi_cst_copy(dst_sc, src_sc); 661 } 662 } 663 } 664 } 665 666 if (reprobe) { 667 /* If there are C3(+) states, always enable bus master wakeup */ 668 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 669 for (i = 0; i < sc->cst_cx_count; ++i) { 670 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 671 672 if (cx->type >= ACPI_STATE_C3) { 673 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 674 break; 675 } 676 } 677 } 678 679 /* Fix up the lowest Cx being used */ 680 acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req); 681 } 682 683 /* 684 * Cache the lowest non-C3 state. 685 * NOTE: must after cst_cx_lowest is set. 686 */ 687 acpi_cst_non_c3(sc); 688 689 cpu_sfence(); 690 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 691 692 return (0); 693 } 694 695 static void 696 acpi_cst_cx_reprobe_cst_handler(netmsg_t msg) 697 { 698 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 699 int error; 700 701 error = acpi_cst_cx_probe_cst(rmsg->sc, 1); 702 lwkt_replymsg(&rmsg->base.lmsg, error); 703 } 704 705 static int 706 acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) 707 { 708 struct netmsg_acpi_cst msg; 709 710 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 711 acpi_cst_cx_reprobe_cst_handler); 712 msg.sc = sc; 713 714 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 715 } 716 717 /* 718 * Call this *after* all CPUs Cx states have been attached. 719 */ 720 static void 721 acpi_cst_postattach(void *arg) 722 { 723 struct acpi_cst_softc *sc; 724 int i; 725 726 /* Get set of Cx state devices */ 727 devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices, 728 &acpi_cst_ndevices); 729 730 /* 731 * Setup any quirks that might necessary now that we have probed 732 * all the CPUs' Cx states. 733 */ 734 acpi_cst_set_quirks(); 735 736 if (acpi_cst_use_fadt) { 737 /* 738 * We are using Cx mode from FADT, probe for available Cx states 739 * for all processors. 740 */ 741 for (i = 0; i < acpi_cst_ndevices; i++) { 742 sc = device_get_softc(acpi_cst_devices[i]); 743 acpi_cst_cx_probe_fadt(sc); 744 } 745 } else { 746 /* 747 * We are using _CST mode, remove C3 state if necessary. 748 * 749 * As we now know for sure that we will be using _CST mode 750 * install our notify handler. 751 */ 752 for (i = 0; i < acpi_cst_ndevices; i++) { 753 sc = device_get_softc(acpi_cst_devices[i]); 754 if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) { 755 /* Free part of unused resources */ 756 acpi_cst_free_resource(sc, sc->cst_non_c3 + 1); 757 sc->cst_cx_count = sc->cst_non_c3 + 1; 758 } 759 sc->cst_parent->cpu_cst_notify = acpi_cst_notify; 760 } 761 } 762 acpi_cst_global_cx_count(); 763 764 /* Perform Cx final initialization. */ 765 for (i = 0; i < acpi_cst_ndevices; i++) { 766 sc = device_get_softc(acpi_cst_devices[i]); 767 acpi_cst_startup(sc); 768 769 if (sc->cst_parent->glob_sysctl_tree != NULL) { 770 struct acpi_cpu_softc *cpu = sc->cst_parent; 771 772 /* Add a sysctl handler to handle global Cx lowest setting */ 773 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 774 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 775 OID_AUTO, "cx_lowest", 776 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 777 acpi_cst_global_lowest_sysctl, "A", 778 "Requested global lowest Cx sleep state"); 779 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 780 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 781 OID_AUTO, "cx_lowest_use", 782 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 783 acpi_cst_global_lowest_use_sysctl, "A", 784 "Global lowest Cx sleep state to use"); 785 } 786 } 787 788 /* Take over idling from cpu_idle_default(). */ 789 acpi_cst_cx_lowest = 0; 790 acpi_cst_cx_lowest_req = 0; 791 acpi_cst_disable_idle = FALSE; 792 793 cpu_sfence(); 794 cpu_idle_hook = acpi_cst_idle; 795 } 796 797 static void 798 acpi_cst_support_list(struct acpi_cst_softc *sc) 799 { 800 struct sbuf sb; 801 int i; 802 803 /* 804 * Set up the list of Cx states 805 */ 806 sbuf_new(&sb, sc->cst_cx_supported, sizeof(sc->cst_cx_supported), 807 SBUF_FIXEDLEN); 808 for (i = 0; i < sc->cst_cx_count; i++) 809 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cst_cx_states[i].trans_lat); 810 sbuf_trim(&sb); 811 sbuf_finish(&sb); 812 } 813 814 static void 815 acpi_cst_c3_bm_rld_handler(netmsg_t msg) 816 { 817 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 818 819 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 820 lwkt_replymsg(&rmsg->base.lmsg, 0); 821 } 822 823 static void 824 acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) 825 { 826 struct netmsg_acpi_cst msg; 827 828 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 829 acpi_cst_c3_bm_rld_handler); 830 msg.sc = sc; 831 832 lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 833 } 834 835 static void 836 acpi_cst_startup(struct acpi_cst_softc *sc) 837 { 838 struct acpi_cpu_softc *cpu = sc->cst_parent; 839 int i, bm_rld_done = 0; 840 841 for (i = 0; i < sc->cst_cx_count; ++i) { 842 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 843 int error; 844 845 /* If there are C3(+) states, always enable bus master wakeup */ 846 if (cx->type >= ACPI_STATE_C3 && !bm_rld_done && 847 (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 848 acpi_cst_c3_bm_rld(sc); 849 bm_rld_done = 1; 850 } 851 852 /* Redo the Cx setup, since quirks have been changed */ 853 error = acpi_cst_cx_setup(cx); 854 if (error) 855 panic("C%d startup setup failed: %d", i + 1, error); 856 } 857 858 acpi_cst_support_list(sc); 859 860 SYSCTL_ADD_STRING(&cpu->pcpu_sysctl_ctx, 861 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 862 OID_AUTO, "cx_supported", CTLFLAG_RD, 863 sc->cst_cx_supported, 0, 864 "Cx/microsecond values for supported Cx states"); 865 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 866 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 867 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 868 (void *)sc, 0, acpi_cst_lowest_sysctl, "A", 869 "requested lowest Cx sleep state"); 870 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 871 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 872 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 873 (void *)sc, 0, acpi_cst_lowest_use_sysctl, "A", 874 "lowest Cx sleep state to use"); 875 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 876 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 877 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 878 (void *)sc, 0, acpi_cst_usage_sysctl, "A", 879 "percent usage for each Cx state"); 880 881 #ifdef notyet 882 /* Signal platform that we can handle _CST notification. */ 883 if (!acpi_cst_use_fadt && acpi_cst_ctrl != 0) { 884 ACPI_LOCK(acpi); 885 AcpiOsWritePort(acpi_cst_smi_cmd, acpi_cst_ctrl, 8); 886 ACPI_UNLOCK(acpi); 887 } 888 #endif 889 } 890 891 /* 892 * Idle the CPU in the lowest state possible. This function is called with 893 * interrupts disabled. Note that once it re-enables interrupts, a task 894 * switch can occur so do not access shared data (i.e. the softc) after 895 * interrupts are re-enabled. 896 */ 897 static void 898 acpi_cst_idle(void) 899 { 900 struct acpi_cst_softc *sc; 901 struct acpi_cst_cx *cx_next; 902 union microtime_pcpu start, end; 903 int cx_next_idx, i, tdiff, bm_arb_disabled = 0; 904 905 /* If disabled, return immediately. */ 906 if (acpi_cst_disable_idle) { 907 ACPI_ENABLE_IRQS(); 908 return; 909 } 910 911 /* 912 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 913 * since there is no Cx state for this processor. 914 */ 915 sc = acpi_cst_softc[mdcpu->mi.gd_cpuid]; 916 if (sc == NULL) { 917 acpi_cst_c1_halt(); 918 return; 919 } 920 921 /* Still probing; use C1 */ 922 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 923 acpi_cst_c1_halt(); 924 return; 925 } 926 927 /* Find the lowest state that has small enough latency. */ 928 cx_next_idx = 0; 929 for (i = sc->cst_cx_lowest; i >= 0; i--) { 930 if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { 931 cx_next_idx = i; 932 break; 933 } 934 } 935 936 /* 937 * Check for bus master activity if needed for the selected state. 938 * If there was activity, clear the bit and use the lowest non-C3 state. 939 */ 940 cx_next = &sc->cst_cx_states[cx_next_idx]; 941 if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) { 942 int bm_active; 943 944 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 945 if (bm_active != 0) { 946 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 947 cx_next_idx = sc->cst_non_c3; 948 } 949 } 950 951 /* Select the next state and update statistics. */ 952 cx_next = &sc->cst_cx_states[cx_next_idx]; 953 sc->cst_cx_stats[cx_next_idx]++; 954 KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep")); 955 956 /* 957 * Execute HLT (or equivalent) and wait for an interrupt. We can't 958 * calculate the time spent in C1 since the place we wake up is an 959 * ISR. Assume we slept half of quantum and return. 960 */ 961 if (cx_next->type == ACPI_STATE_C1) { 962 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; 963 cx_next->enter(cx_next); 964 return; 965 } 966 967 /* Execute the proper preamble before enter the selected state. */ 968 if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) { 969 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 970 bm_arb_disabled = 1; 971 } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) { 972 ACPI_FLUSH_CPU_CACHE(); 973 } 974 975 /* 976 * Enter the selected state and check time spent asleep. 977 */ 978 microtime_pcpu_get(&start); 979 cpu_mfence(); 980 981 cx_next->enter(cx_next); 982 983 cpu_mfence(); 984 microtime_pcpu_get(&end); 985 986 /* Enable bus master arbitration, if it was disabled. */ 987 if (bm_arb_disabled) 988 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 989 990 ACPI_ENABLE_IRQS(); 991 992 /* Find the actual time asleep in microseconds. */ 993 tdiff = microtime_pcpu_diff(&start, &end); 994 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4; 995 } 996 997 /* 998 * Re-evaluate the _CST object when we are notified that it changed. 999 */ 1000 static void 1001 acpi_cst_notify(device_t dev) 1002 { 1003 struct acpi_cst_softc *sc = device_get_softc(dev); 1004 1005 KASSERT(curthread->td_type != TD_TYPE_NETISR, 1006 ("notify in netisr%d", mycpuid)); 1007 1008 lwkt_serialize_enter(&acpi_cst_slize); 1009 1010 /* Update the list of Cx states. */ 1011 acpi_cst_cx_reprobe_cst(sc); 1012 acpi_cst_support_list(sc); 1013 1014 /* Update the new lowest useable Cx state for all CPUs. */ 1015 acpi_cst_global_cx_count(); 1016 1017 /* 1018 * Fix up the lowest Cx being used 1019 */ 1020 if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) 1021 acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; 1022 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1023 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1024 1025 lwkt_serialize_exit(&acpi_cst_slize); 1026 } 1027 1028 static int 1029 acpi_cst_set_quirks(void) 1030 { 1031 device_t acpi_dev; 1032 uint32_t val; 1033 1034 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1035 1036 /* 1037 * Bus mastering arbitration control is needed to keep caches coherent 1038 * while sleeping in C3. If it's not present but a working flush cache 1039 * instruction is present, flush the caches before entering C3 instead. 1040 * Otherwise, just disable C3 completely. 1041 */ 1042 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 1043 AcpiGbl_FADT.Pm2ControlLength == 0) { 1044 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 1045 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 1046 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_BM; 1047 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1048 "cpu_cst: no BM control, using flush cache method\n")); 1049 } else { 1050 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1051 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1052 "cpu_cst: no BM control, C3 not available\n")); 1053 } 1054 } 1055 1056 /* Look for various quirks of the PIIX4 part. */ 1057 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 1058 if (acpi_dev != NULL) { 1059 switch (pci_get_revid(acpi_dev)) { 1060 /* 1061 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1062 * do not report the BMIDE status to the BM status register and 1063 * others have a livelock bug if Type-F DMA is enabled. Linux 1064 * works around the BMIDE bug by reading the BM status directly 1065 * but we take the simpler approach of disabling C3 for these 1066 * parts. 1067 * 1068 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1069 * Livelock") from the January 2002 PIIX4 specification update. 1070 * Applies to all PIIX4 models. 1071 * 1072 * Also, make sure that all interrupts cause a "Stop Break" 1073 * event to exit from C2 state. 1074 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 1075 * should be set to zero, otherwise it causes C2 to short-sleep. 1076 * PIIX4 doesn't properly support C3 and bus master activity 1077 * need not break out of C2. 1078 */ 1079 case PCI_REVISION_A_STEP: 1080 case PCI_REVISION_B_STEP: 1081 case PCI_REVISION_4E: 1082 case PCI_REVISION_4M: 1083 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1084 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1085 "cpu_cst: working around PIIX4 bug, disabling C3\n")); 1086 1087 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1088 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1089 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1090 "cpu_cst: PIIX4: enabling IRQs to generate Stop Break\n")); 1091 val |= PIIX4_STOP_BREAK_MASK; 1092 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1093 } 1094 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1095 if (val) { 1096 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1097 "cpu_cst: PIIX4: reset BRLD_EN_BM\n")); 1098 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1099 } 1100 break; 1101 default: 1102 break; 1103 } 1104 } 1105 1106 return (0); 1107 } 1108 1109 static int 1110 acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS) 1111 { 1112 struct acpi_cst_softc *sc; 1113 struct sbuf sb; 1114 char buf[128]; 1115 int i; 1116 uintmax_t fract, sum, whole; 1117 1118 sc = (struct acpi_cst_softc *) arg1; 1119 sum = 0; 1120 for (i = 0; i < sc->cst_cx_count; i++) 1121 sum += sc->cst_cx_stats[i]; 1122 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1123 for (i = 0; i < sc->cst_cx_count; i++) { 1124 if (sum > 0) { 1125 whole = (uintmax_t)sc->cst_cx_stats[i] * 100; 1126 fract = (whole % sum) * 100; 1127 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1128 (u_int)(fract / sum)); 1129 } else 1130 sbuf_printf(&sb, "0.00%% "); 1131 } 1132 sbuf_printf(&sb, "last %dus", sc->cst_prev_sleep); 1133 sbuf_trim(&sb); 1134 sbuf_finish(&sb); 1135 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1136 sbuf_delete(&sb); 1137 1138 return (0); 1139 } 1140 1141 static int 1142 acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *sc, int val) 1143 { 1144 int old_lowest, error = 0, old_lowest_req; 1145 uint32_t old_type, type; 1146 1147 KKASSERT(mycpuid == sc->cst_cpuid); 1148 1149 old_lowest_req = sc->cst_cx_lowest_req; 1150 sc->cst_cx_lowest_req = val; 1151 1152 if (val > sc->cst_cx_count - 1) 1153 val = sc->cst_cx_count - 1; 1154 old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val); 1155 1156 old_type = sc->cst_cx_states[old_lowest].type; 1157 type = sc->cst_cx_states[val].type; 1158 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1159 cputimer_intr_powersave_remreq(); 1160 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1161 error = cputimer_intr_powersave_addreq(); 1162 if (error) { 1163 /* Restore */ 1164 sc->cst_cx_lowest_req = old_lowest_req; 1165 sc->cst_cx_lowest = old_lowest; 1166 } 1167 } 1168 1169 if (error) 1170 return error; 1171 1172 /* Cache the new lowest non-C3 state. */ 1173 acpi_cst_non_c3(sc); 1174 1175 /* Reset the statistics counters. */ 1176 bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats)); 1177 return (0); 1178 } 1179 1180 static void 1181 acpi_cst_set_lowest_handler(netmsg_t msg) 1182 { 1183 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 1184 int error; 1185 1186 error = acpi_cst_set_lowest_oncpu(rmsg->sc, rmsg->val); 1187 lwkt_replymsg(&rmsg->base.lmsg, error); 1188 } 1189 1190 static int 1191 acpi_cst_set_lowest(struct acpi_cst_softc *sc, int val) 1192 { 1193 struct netmsg_acpi_cst msg; 1194 1195 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1196 acpi_cst_set_lowest_handler); 1197 msg.sc = sc; 1198 msg.val = val; 1199 1200 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 1201 } 1202 1203 static int 1204 acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1205 { 1206 struct acpi_cst_softc *sc; 1207 char state[8]; 1208 int val, error; 1209 1210 sc = (struct acpi_cst_softc *)arg1; 1211 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); 1212 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1213 if (error != 0 || req->newptr == NULL) 1214 return (error); 1215 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1216 return (EINVAL); 1217 val = (int) strtol(state + 1, NULL, 10) - 1; 1218 if (val < 0) 1219 return (EINVAL); 1220 1221 lwkt_serialize_enter(&acpi_cst_slize); 1222 error = acpi_cst_set_lowest(sc, val); 1223 lwkt_serialize_exit(&acpi_cst_slize); 1224 1225 return error; 1226 } 1227 1228 static int 1229 acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1230 { 1231 struct acpi_cst_softc *sc; 1232 char state[8]; 1233 1234 sc = (struct acpi_cst_softc *)arg1; 1235 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest + 1); 1236 return sysctl_handle_string(oidp, state, sizeof(state), req); 1237 } 1238 1239 static int 1240 acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1241 { 1242 struct acpi_cst_softc *sc; 1243 char state[8]; 1244 int val, error, i; 1245 1246 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest_req + 1); 1247 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1248 if (error != 0 || req->newptr == NULL) 1249 return (error); 1250 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1251 return (EINVAL); 1252 val = (int) strtol(state + 1, NULL, 10) - 1; 1253 if (val < 0) 1254 return (EINVAL); 1255 1256 lwkt_serialize_enter(&acpi_cst_slize); 1257 1258 acpi_cst_cx_lowest_req = val; 1259 acpi_cst_cx_lowest = val; 1260 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1261 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1262 1263 /* Update the new lowest useable Cx state for all CPUs. */ 1264 for (i = 0; i < acpi_cst_ndevices; i++) { 1265 sc = device_get_softc(acpi_cst_devices[i]); 1266 error = acpi_cst_set_lowest(sc, val); 1267 if (error) { 1268 KKASSERT(i == 0); 1269 break; 1270 } 1271 } 1272 1273 lwkt_serialize_exit(&acpi_cst_slize); 1274 1275 return error; 1276 } 1277 1278 static int 1279 acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1280 { 1281 char state[8]; 1282 1283 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest + 1); 1284 return sysctl_handle_string(oidp, state, sizeof(state), req); 1285 } 1286 1287 /* 1288 * Put the CPU in C1 in a machine-dependant way. 1289 * XXX: shouldn't be here! 1290 */ 1291 static void 1292 acpi_cst_c1_halt(void) 1293 { 1294 splz(); 1295 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1296 __asm __volatile("sti; hlt"); 1297 else 1298 __asm __volatile("sti; pause"); 1299 } 1300 1301 static void 1302 acpi_cst_non_c3(struct acpi_cst_softc *sc) 1303 { 1304 int i; 1305 1306 sc->cst_non_c3 = 0; 1307 for (i = sc->cst_cx_lowest; i >= 0; i--) { 1308 if (sc->cst_cx_states[i].type < ACPI_STATE_C3) { 1309 sc->cst_non_c3 = i; 1310 break; 1311 } 1312 } 1313 if (bootverbose) 1314 device_printf(sc->cst_dev, "non-C3 %d\n", sc->cst_non_c3); 1315 } 1316 1317 /* 1318 * Update the largest Cx state supported in the global acpi_cst_cx_count. 1319 * It will be used in the global Cx sysctl handler. 1320 */ 1321 static void 1322 acpi_cst_global_cx_count(void) 1323 { 1324 struct acpi_cst_softc *sc; 1325 int i; 1326 1327 if (acpi_cst_ndevices == 0) { 1328 acpi_cst_cx_count = 0; 1329 return; 1330 } 1331 1332 sc = device_get_softc(acpi_cst_devices[0]); 1333 acpi_cst_cx_count = sc->cst_cx_count; 1334 1335 for (i = 1; i < acpi_cst_ndevices; i++) { 1336 struct acpi_cst_softc *sc = device_get_softc(acpi_cst_devices[i]); 1337 1338 if (sc->cst_cx_count < acpi_cst_cx_count) 1339 acpi_cst_cx_count = sc->cst_cx_count; 1340 } 1341 if (bootverbose) 1342 kprintf("cpu_cst: global Cx count %d\n", acpi_cst_cx_count); 1343 } 1344 1345 static void 1346 acpi_cst_c1_halt_enter(const struct acpi_cst_cx *cx __unused) 1347 { 1348 acpi_cst_c1_halt(); 1349 } 1350 1351 static void 1352 acpi_cst_cx_io_enter(const struct acpi_cst_cx *cx) 1353 { 1354 uint64_t dummy; 1355 1356 /* 1357 * Read I/O to enter this Cx state 1358 */ 1359 bus_space_read_1(cx->btag, cx->bhand, 0); 1360 /* 1361 * Perform a dummy I/O read. Since it may take an arbitrary time 1362 * to enter the idle state, this read makes sure that we are frozen. 1363 */ 1364 AcpiRead(&dummy, &AcpiGbl_FADT.XPmTimerBlock); 1365 } 1366 1367 static int 1368 acpi_cst_cx_setup(struct acpi_cst_cx *cx) 1369 { 1370 cx->flags &= ~ACPI_CST_CX_FLAG_BM_STS; 1371 cx->preamble = ACPI_CST_CX_PREAMBLE_NONE; 1372 1373 if (cx->type >= ACPI_STATE_C3) { 1374 /* 1375 * Set the required operations for entering C3(+) state. 1376 * Later acpi_cst_md_cx_setup() may fix them up. 1377 */ 1378 1379 /* 1380 * Always check BM_STS. 1381 */ 1382 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1383 cx->flags |= ACPI_CST_CX_FLAG_BM_STS; 1384 1385 /* 1386 * According to the ACPI specification, bus master arbitration 1387 * is only available on UP system. For MP system, cache flushing 1388 * is required. 1389 */ 1390 if (ncpus == 1 && (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1391 cx->preamble = ACPI_CST_CX_PREAMBLE_BM_ARB; 1392 else 1393 cx->preamble = ACPI_CST_CX_PREAMBLE_WBINVD; 1394 } 1395 return acpi_cst_md_cx_setup(cx); 1396 } 1397 1398 static void 1399 acpi_cst_free_resource(struct acpi_cst_softc *sc, int start) 1400 { 1401 int i; 1402 1403 for (i = start; i < MAX_CX_STATES; ++i) { 1404 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 1405 1406 if (cx->res != NULL) 1407 bus_release_resource(sc->cst_dev, cx->res_type, cx->rid, cx->res); 1408 memset(cx, 0, sizeof(*cx)); 1409 } 1410 } 1411