1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/globaldata.h> 36 #include <sys/power.h> 37 #include <sys/proc.h> 38 #include <sys/sbuf.h> 39 #include <sys/thread2.h> 40 #include <sys/serialize.h> 41 #include <sys/msgport2.h> 42 43 #include <bus/pci/pcivar.h> 44 #include <machine/atomic.h> 45 #include <machine/globaldata.h> 46 #include <machine/md_var.h> 47 #include <machine/smp.h> 48 #include <sys/rman.h> 49 50 #include <net/netisr2.h> 51 #include <net/netmsg2.h> 52 #include <net/if_var.h> 53 54 #include "acpi.h" 55 #include "acpivar.h" 56 #include "acpi_cpu.h" 57 58 /* 59 * Support for ACPI Processor devices, including C[1-3] sleep states. 60 */ 61 62 /* Hooks for the ACPI CA debugging infrastructure */ 63 #define _COMPONENT ACPI_PROCESSOR 64 ACPI_MODULE_NAME("PROCESSOR") 65 66 struct netmsg_acpi_cst { 67 struct netmsg_base base; 68 struct acpi_cpu_softc *sc; 69 int val; 70 }; 71 72 struct acpi_cx { 73 struct resource *p_lvlx; /* Register to read to enter state. */ 74 int rid; /* rid of p_lvlx */ 75 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 76 uint32_t trans_lat; /* Transition latency (usec). */ 77 uint32_t power; /* Power consumed (mW). */ 78 int res_type; /* Resource type for p_lvlx. */ 79 }; 80 #define MAX_CX_STATES 8 81 82 struct acpi_cpu_softc { 83 device_t cpu_dev; 84 struct acpi_cpux_softc *cpu_parent; 85 ACPI_HANDLE cpu_handle; 86 int cpu_id; 87 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 88 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 89 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 90 struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 91 int cpu_cx_count; /* Number of valid Cx states. */ 92 int cpu_prev_sleep;/* Last idle sleep duration. */ 93 /* Runtime state. */ 94 int cpu_non_c3; /* Index of lowest non-C3 state. */ 95 u_long cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 96 /* Values for sysctl. */ 97 int cpu_cx_lowest; /* Current Cx lowest */ 98 int cpu_cx_lowest_req; /* Requested Cx lowest */ 99 char cpu_cx_supported[64]; 100 }; 101 102 #define ACPI_CST_FLAG_PROBING 0x1 103 104 struct acpi_cpu_device { 105 struct resource_list ad_rl; 106 }; 107 108 #define CPU_GET_REG(reg, width) \ 109 (bus_space_read_ ## width(rman_get_bustag((reg)), \ 110 rman_get_bushandle((reg)), 0)) 111 #define CPU_SET_REG(reg, width, val) \ 112 (bus_space_write_ ## width(rman_get_bustag((reg)), \ 113 rman_get_bushandle((reg)), 0, (val))) 114 115 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 116 117 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 118 119 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 120 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 121 122 #define PCI_VENDOR_INTEL 0x8086 123 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 124 #define PCI_REVISION_A_STEP 0 125 #define PCI_REVISION_B_STEP 1 126 #define PCI_REVISION_4E 2 127 #define PCI_REVISION_4M 3 128 #define PIIX4_DEVACTB_REG 0x58 129 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 130 #define PIIX4_BRLD_EN_IRQ (1<<1) 131 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 132 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 133 #define PIIX4_PCNTRL_BST_EN (1<<10) 134 135 /* Platform hardware resource information. */ 136 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 137 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 138 static int cpu_quirks; /* Indicate any hardware bugs. */ 139 140 /* Runtime state. */ 141 static int cpu_disable_idle; /* Disable entry to idle function */ 142 static int cpu_cx_count; /* Number of valid Cx states */ 143 144 /* Values for sysctl. */ 145 static int cpu_cx_generic; 146 static int cpu_cx_lowest; /* Current Cx lowest */ 147 static int cpu_cx_lowest_req; /* Requested Cx lowest */ 148 static struct lwkt_serialize cpu_cx_slize = LWKT_SERIALIZE_INITIALIZER; 149 150 /* C3 state transition */ 151 static int cpu_c3_ncpus; 152 153 static device_t *cpu_devices; 154 static int cpu_ndevices; 155 static struct acpi_cpu_softc **cpu_softc; 156 157 static int acpi_cpu_cst_probe(device_t dev); 158 static int acpi_cpu_cst_attach(device_t dev); 159 static int acpi_cpu_cst_suspend(device_t dev); 160 static int acpi_cpu_cst_resume(device_t dev); 161 static struct resource_list *acpi_cpu_cst_get_rlist(device_t dev, 162 device_t child); 163 static device_t acpi_cpu_cst_add_child(device_t bus, device_t parent, 164 int order, const char *name, int unit); 165 static int acpi_cpu_cst_read_ivar(device_t dev, device_t child, 166 int index, uintptr_t *result); 167 static int acpi_cpu_cst_shutdown(device_t dev); 168 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 169 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); 170 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 171 static int acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc); 172 static void acpi_cpu_startup(void *arg); 173 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); 174 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc); 175 static void acpi_cpu_idle(void); 176 static void acpi_cpu_cst_notify(device_t); 177 static int acpi_cpu_quirks(void); 178 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 179 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *, int); 180 static int acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *, int); 181 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 182 static int acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 183 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 184 static int acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 185 static void acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc); 186 static void acpi_cpu_global_cx_count(void); 187 188 static void acpi_cpu_c1(void); /* XXX */ 189 190 static device_method_t acpi_cpu_cst_methods[] = { 191 /* Device interface */ 192 DEVMETHOD(device_probe, acpi_cpu_cst_probe), 193 DEVMETHOD(device_attach, acpi_cpu_cst_attach), 194 DEVMETHOD(device_detach, bus_generic_detach), 195 DEVMETHOD(device_shutdown, acpi_cpu_cst_shutdown), 196 DEVMETHOD(device_suspend, acpi_cpu_cst_suspend), 197 DEVMETHOD(device_resume, acpi_cpu_cst_resume), 198 199 /* Bus interface */ 200 DEVMETHOD(bus_add_child, acpi_cpu_cst_add_child), 201 DEVMETHOD(bus_read_ivar, acpi_cpu_cst_read_ivar), 202 DEVMETHOD(bus_get_resource_list, acpi_cpu_cst_get_rlist), 203 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 204 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 205 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 206 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 207 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 208 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 209 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 210 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 211 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 212 DEVMETHOD_END 213 }; 214 215 static driver_t acpi_cpu_cst_driver = { 216 "cpu_cst", 217 acpi_cpu_cst_methods, 218 sizeof(struct acpi_cpu_softc), 219 }; 220 221 static devclass_t acpi_cpu_cst_devclass; 222 DRIVER_MODULE(cpu_cst, cpu, acpi_cpu_cst_driver, acpi_cpu_cst_devclass, NULL, NULL); 223 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 224 225 static int 226 acpi_cpu_cst_probe(device_t dev) 227 { 228 int cpu_id; 229 230 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 231 return (ENXIO); 232 233 cpu_id = acpi_get_magic(dev); 234 235 if (cpu_softc == NULL) 236 cpu_softc = kmalloc(sizeof(struct acpi_cpu_softc *) * 237 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 238 239 /* 240 * Check if we already probed this processor. We scan the bus twice 241 * so it's possible we've already seen this one. 242 */ 243 if (cpu_softc[cpu_id] != NULL) { 244 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 245 return (ENXIO); 246 } 247 248 /* Mark this processor as in-use and save our derived id for attach. */ 249 cpu_softc[cpu_id] = (void *)1; 250 device_set_desc(dev, "ACPI CPU C-State"); 251 252 return (0); 253 } 254 255 static int 256 acpi_cpu_cst_attach(device_t dev) 257 { 258 ACPI_BUFFER buf; 259 ACPI_OBJECT *obj; 260 struct acpi_cpu_softc *sc; 261 ACPI_STATUS status; 262 263 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 264 265 sc = device_get_softc(dev); 266 sc->cpu_dev = dev; 267 sc->cpu_parent = device_get_softc(device_get_parent(dev)); 268 sc->cpu_handle = acpi_get_handle(dev); 269 sc->cpu_id = acpi_get_magic(dev); 270 cpu_softc[sc->cpu_id] = sc; 271 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; 272 cpu_cst_cnt = AcpiGbl_FADT.CstControl; 273 274 buf.Pointer = NULL; 275 buf.Length = ACPI_ALLOCATE_BUFFER; 276 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 277 if (ACPI_FAILURE(status)) { 278 device_printf(dev, "attach failed to get Processor obj - %s\n", 279 AcpiFormatException(status)); 280 return (ENXIO); 281 } 282 obj = (ACPI_OBJECT *)buf.Pointer; 283 sc->cpu_p_blk = obj->Processor.PblkAddress; 284 sc->cpu_p_blk_len = obj->Processor.PblkLength; 285 AcpiOsFree(obj); 286 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 287 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 288 289 /* 290 * If this is the first cpu we attach, create and initialize the generic 291 * resources that will be used by all acpi cpu devices. 292 */ 293 if (device_get_unit(dev) == 0) { 294 /* Assume we won't be using generic Cx mode by default */ 295 cpu_cx_generic = FALSE; 296 297 /* Queue post cpu-probing task handler */ 298 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); 299 } 300 301 /* Probe for Cx state support. */ 302 acpi_cpu_cx_probe(sc); 303 304 /* Finally, call identify and probe/attach for child devices. */ 305 bus_generic_probe(dev); 306 bus_generic_attach(dev); 307 308 return (0); 309 } 310 311 /* 312 * Disable any entry to the idle function during suspend and re-enable it 313 * during resume. 314 */ 315 static int 316 acpi_cpu_cst_suspend(device_t dev) 317 { 318 int error; 319 320 error = bus_generic_suspend(dev); 321 if (error) 322 return (error); 323 cpu_disable_idle = TRUE; 324 return (0); 325 } 326 327 static int 328 acpi_cpu_cst_resume(device_t dev) 329 { 330 331 cpu_disable_idle = FALSE; 332 return (bus_generic_resume(dev)); 333 } 334 335 static struct resource_list * 336 acpi_cpu_cst_get_rlist(device_t dev, device_t child) 337 { 338 struct acpi_cpu_device *ad; 339 340 ad = device_get_ivars(child); 341 if (ad == NULL) 342 return (NULL); 343 return (&ad->ad_rl); 344 } 345 346 static device_t 347 acpi_cpu_cst_add_child(device_t bus, device_t parent, int order, 348 const char *name, int unit) 349 { 350 struct acpi_cpu_device *ad; 351 device_t child; 352 353 if ((ad = kmalloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) 354 return (NULL); 355 356 resource_list_init(&ad->ad_rl); 357 358 child = device_add_child_ordered(parent, order, name, unit); 359 if (child != NULL) 360 device_set_ivars(child, ad); 361 else 362 kfree(ad, M_TEMP); 363 return (child); 364 } 365 366 static int 367 acpi_cpu_cst_read_ivar(device_t dev, device_t child, int index, 368 uintptr_t *result) 369 { 370 struct acpi_cpu_softc *sc; 371 372 sc = device_get_softc(dev); 373 switch (index) { 374 case ACPI_IVAR_HANDLE: 375 *result = (uintptr_t)sc->cpu_handle; 376 break; 377 #if 0 378 case CPU_IVAR_PCPU: 379 *result = (uintptr_t)sc->cpu_pcpu; 380 break; 381 #endif 382 default: 383 return (ENOENT); 384 } 385 return (0); 386 } 387 388 static int 389 acpi_cpu_cst_shutdown(device_t dev) 390 { 391 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 392 393 /* Allow children to shutdown first. */ 394 bus_generic_shutdown(dev); 395 396 /* 397 * Disable any entry to the idle function. There is a small race where 398 * an idle thread have passed this check but not gone to sleep. This 399 * is ok since device_shutdown() does not free the softc, otherwise 400 * we'd have to be sure all threads were evicted before returning. 401 */ 402 cpu_disable_idle = TRUE; 403 404 return_VALUE (0); 405 } 406 407 static void 408 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 409 { 410 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 411 412 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 413 sc->cpu_prev_sleep = 1000000; 414 sc->cpu_cx_lowest = 0; 415 sc->cpu_cx_lowest_req = 0; 416 417 /* 418 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 419 * any, we'll revert to generic FADT/P_BLK Cx control method which will 420 * be handled by acpi_cpu_startup. We need to defer to after having 421 * probed all the cpus in the system before probing for generic Cx 422 * states as we may already have found cpus with valid _CST packages 423 */ 424 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 425 /* 426 * We were unable to find a _CST package for this cpu or there 427 * was an error parsing it. Switch back to generic mode. 428 */ 429 cpu_cx_generic = TRUE; 430 if (bootverbose) 431 device_printf(sc->cpu_dev, "switching to generic Cx mode\n"); 432 } 433 434 /* 435 * TODO: _CSD Package should be checked here. 436 */ 437 } 438 439 static void 440 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) 441 { 442 ACPI_GENERIC_ADDRESS gas; 443 struct acpi_cx *cx_ptr; 444 445 sc->cpu_cx_count = 0; 446 cx_ptr = sc->cpu_cx_states; 447 448 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 449 sc->cpu_prev_sleep = 1000000; 450 451 /* C1 has been required since just after ACPI 1.0 */ 452 cx_ptr->type = ACPI_STATE_C1; 453 cx_ptr->trans_lat = 0; 454 cx_ptr++; 455 sc->cpu_cx_count++; 456 457 /* 458 * The spec says P_BLK must be 6 bytes long. However, some systems 459 * use it to indicate a fractional set of features present so we 460 * take 5 as C2. Some may also have a value of 7 to indicate 461 * another C3 but most use _CST for this (as required) and having 462 * "only" C1-C3 is not a hardship. 463 */ 464 if (sc->cpu_p_blk_len < 5) 465 return; 466 467 /* Validate and allocate resources for C2 (P_LVL2). */ 468 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 469 gas.BitWidth = 8; 470 if (AcpiGbl_FADT.C2Latency <= 100) { 471 gas.Address = sc->cpu_p_blk + 4; 472 473 cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 474 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 475 &cx_ptr->p_lvlx, RF_SHAREABLE); 476 if (cx_ptr->p_lvlx != NULL) { 477 sc->cpu_parent->cpux_next_rid++; 478 cx_ptr->type = ACPI_STATE_C2; 479 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 480 cx_ptr++; 481 sc->cpu_cx_count++; 482 sc->cpu_non_c3 = 1; 483 } 484 } 485 if (sc->cpu_p_blk_len < 6) 486 return; 487 488 /* Validate and allocate resources for C3 (P_LVL3). */ 489 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) { 490 gas.Address = sc->cpu_p_blk + 5; 491 492 cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 493 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 494 &cx_ptr->p_lvlx, RF_SHAREABLE); 495 if (cx_ptr->p_lvlx != NULL) { 496 sc->cpu_parent->cpux_next_rid++; 497 cx_ptr->type = ACPI_STATE_C3; 498 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 499 cx_ptr++; 500 sc->cpu_cx_count++; 501 } 502 } 503 } 504 505 /* 506 * Parse a _CST package and set up its Cx states. Since the _CST object 507 * can change dynamically, our notify handler may call this function 508 * to clean up and probe the new _CST package. 509 */ 510 static int 511 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 512 { 513 struct acpi_cx *cx_ptr; 514 ACPI_STATUS status; 515 ACPI_BUFFER buf; 516 ACPI_OBJECT *top; 517 ACPI_OBJECT *pkg; 518 uint32_t count; 519 int i; 520 521 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 522 523 buf.Pointer = NULL; 524 buf.Length = ACPI_ALLOCATE_BUFFER; 525 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 526 if (ACPI_FAILURE(status)) 527 return (ENXIO); 528 529 /* _CST is a package with a count and at least one Cx package. */ 530 top = (ACPI_OBJECT *)buf.Pointer; 531 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 532 device_printf(sc->cpu_dev, "invalid _CST package\n"); 533 AcpiOsFree(buf.Pointer); 534 return (ENXIO); 535 } 536 if (count != top->Package.Count - 1) { 537 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n", 538 count, top->Package.Count - 1); 539 count = top->Package.Count - 1; 540 } 541 if (count > MAX_CX_STATES) { 542 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 543 count = MAX_CX_STATES; 544 } 545 546 sc->cst_flags |= ACPI_CST_FLAG_PROBING; 547 cpu_sfence(); 548 549 /* Set up all valid states. */ 550 sc->cpu_cx_count = 0; 551 cx_ptr = sc->cpu_cx_states; 552 for (i = 0; i < count; i++) { 553 pkg = &top->Package.Elements[i + 1]; 554 if (!ACPI_PKG_VALID(pkg, 4) || 555 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 556 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 557 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 558 559 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 560 continue; 561 } 562 563 /* Validate the state to see if we should use it. */ 564 switch (cx_ptr->type) { 565 case ACPI_STATE_C1: 566 sc->cpu_non_c3 = i; 567 cx_ptr++; 568 sc->cpu_cx_count++; 569 continue; 570 case ACPI_STATE_C2: 571 sc->cpu_non_c3 = i; 572 break; 573 case ACPI_STATE_C3: 574 default: 575 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 576 577 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 578 "acpi_cpu%d: C3[%d] not available.\n", 579 device_get_unit(sc->cpu_dev), i)); 580 continue; 581 } 582 break; 583 } 584 585 #ifdef notyet 586 /* Free up any previous register. */ 587 if (cx_ptr->p_lvlx != NULL) { 588 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); 589 cx_ptr->p_lvlx = NULL; 590 } 591 #endif 592 593 /* Allocate the control register for C2 or C3. */ 594 cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 595 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->rid, 596 &cx_ptr->p_lvlx, RF_SHAREABLE); 597 if (cx_ptr->p_lvlx) { 598 sc->cpu_parent->cpux_next_rid++; 599 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 600 "acpi_cpu%d: Got C%d - %d latency\n", 601 device_get_unit(sc->cpu_dev), cx_ptr->type, 602 cx_ptr->trans_lat)); 603 cx_ptr++; 604 sc->cpu_cx_count++; 605 } 606 } 607 AcpiOsFree(buf.Pointer); 608 609 /* 610 * Fix up the lowest Cx being used 611 */ 612 if (sc->cpu_cx_lowest_req < sc->cpu_cx_count) 613 sc->cpu_cx_lowest = sc->cpu_cx_lowest_req; 614 if (sc->cpu_cx_lowest > sc->cpu_cx_count - 1) 615 sc->cpu_cx_lowest = sc->cpu_cx_count - 1; 616 617 /* 618 * Cache the lowest non-C3 state. 619 * NOTE: must after cpu_cx_lowest is set. 620 */ 621 acpi_cpu_cx_non_c3(sc); 622 623 cpu_sfence(); 624 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 625 626 return (0); 627 } 628 629 static void 630 acpi_cst_probe_handler(netmsg_t msg) 631 { 632 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 633 int error; 634 635 error = acpi_cpu_cx_cst(rmsg->sc); 636 lwkt_replymsg(&rmsg->base.lmsg, error); 637 } 638 639 static int 640 acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc) 641 { 642 struct netmsg_acpi_cst msg; 643 644 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 645 acpi_cst_probe_handler); 646 msg.sc = sc; 647 648 return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0); 649 } 650 651 /* 652 * Call this *after* all CPUs have been attached. 653 */ 654 static void 655 acpi_cpu_startup(void *arg) 656 { 657 struct acpi_cpu_softc *sc; 658 int i; 659 660 /* Get set of CPU devices */ 661 devclass_get_devices(acpi_cpu_cst_devclass, &cpu_devices, &cpu_ndevices); 662 663 /* 664 * Setup any quirks that might necessary now that we have probed 665 * all the CPUs 666 */ 667 acpi_cpu_quirks(); 668 669 if (cpu_cx_generic) { 670 /* 671 * We are using generic Cx mode, probe for available Cx states 672 * for all processors. 673 */ 674 for (i = 0; i < cpu_ndevices; i++) { 675 sc = device_get_softc(cpu_devices[i]); 676 acpi_cpu_generic_cx_probe(sc); 677 } 678 } else { 679 /* 680 * We are using _CST mode, remove C3 state if necessary. 681 * 682 * As we now know for sure that we will be using _CST mode 683 * install our notify handler. 684 */ 685 for (i = 0; i < cpu_ndevices; i++) { 686 sc = device_get_softc(cpu_devices[i]); 687 if (cpu_quirks & CPU_QUIRK_NO_C3) 688 sc->cpu_cx_count = sc->cpu_non_c3 + 1; 689 sc->cpu_parent->cpux_cst_notify = acpi_cpu_cst_notify; 690 } 691 } 692 acpi_cpu_global_cx_count(); 693 694 /* Perform Cx final initialization. */ 695 for (i = 0; i < cpu_ndevices; i++) { 696 sc = device_get_softc(cpu_devices[i]); 697 acpi_cpu_startup_cx(sc); 698 699 if (sc->cpu_parent->glob_sysctl_tree != NULL) { 700 struct acpi_cpux_softc *cpux = sc->cpu_parent; 701 702 /* Add a sysctl handler to handle global Cx lowest setting */ 703 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 704 SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 705 OID_AUTO, "cx_lowest", 706 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 707 acpi_cpu_global_cx_lowest_sysctl, "A", 708 "Requested global lowest Cx sleep state"); 709 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 710 SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 711 OID_AUTO, "cx_lowest_use", 712 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 713 acpi_cpu_global_cx_lowest_use_sysctl, "A", 714 "Global lowest Cx sleep state to use"); 715 } 716 } 717 718 /* Take over idling from cpu_idle_default(). */ 719 cpu_cx_lowest = 0; 720 cpu_cx_lowest_req = 0; 721 cpu_disable_idle = FALSE; 722 cpu_idle_hook = acpi_cpu_idle; 723 } 724 725 static void 726 acpi_cpu_cx_list(struct acpi_cpu_softc *sc) 727 { 728 struct sbuf sb; 729 int i; 730 731 /* 732 * Set up the list of Cx states 733 */ 734 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), 735 SBUF_FIXEDLEN); 736 for (i = 0; i < sc->cpu_cx_count; i++) 737 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat); 738 sbuf_trim(&sb); 739 sbuf_finish(&sb); 740 } 741 742 static void 743 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc) 744 { 745 struct acpi_cpux_softc *cpux = sc->cpu_parent; 746 747 acpi_cpu_cx_list(sc); 748 749 SYSCTL_ADD_STRING(&cpux->pcpu_sysctl_ctx, 750 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 751 OID_AUTO, "cx_supported", CTLFLAG_RD, 752 sc->cpu_cx_supported, 0, 753 "Cx/microsecond values for supported Cx states"); 754 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 755 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 756 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 757 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 758 "requested lowest Cx sleep state"); 759 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 760 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 761 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 762 (void *)sc, 0, acpi_cpu_cx_lowest_use_sysctl, "A", 763 "lowest Cx sleep state to use"); 764 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 765 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 766 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 767 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 768 "percent usage for each Cx state"); 769 770 #ifdef notyet 771 /* Signal platform that we can handle _CST notification. */ 772 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 773 ACPI_LOCK(acpi); 774 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 775 ACPI_UNLOCK(acpi); 776 } 777 #endif 778 } 779 780 /* 781 * Idle the CPU in the lowest state possible. This function is called with 782 * interrupts disabled. Note that once it re-enables interrupts, a task 783 * switch can occur so do not access shared data (i.e. the softc) after 784 * interrupts are re-enabled. 785 */ 786 static void 787 acpi_cpu_idle(void) 788 { 789 struct acpi_cpu_softc *sc; 790 struct acpi_cx *cx_next; 791 uint64_t start_time, end_time; 792 int bm_active, cx_next_idx, i; 793 794 /* If disabled, return immediately. */ 795 if (cpu_disable_idle) { 796 ACPI_ENABLE_IRQS(); 797 return; 798 } 799 800 /* 801 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 802 * since there is no ACPI processor object for this CPU. This occurs 803 * for logical CPUs in the HTT case. 804 */ 805 sc = cpu_softc[mdcpu->mi.gd_cpuid]; 806 if (sc == NULL) { 807 acpi_cpu_c1(); 808 return; 809 } 810 811 /* Still probing; use C1 */ 812 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 813 acpi_cpu_c1(); 814 return; 815 } 816 817 /* Find the lowest state that has small enough latency. */ 818 cx_next_idx = 0; 819 for (i = sc->cpu_cx_lowest; i >= 0; i--) { 820 if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) { 821 cx_next_idx = i; 822 break; 823 } 824 } 825 826 /* 827 * Check for bus master activity. If there was activity, clear 828 * the bit and use the lowest non-C3 state. Note that the USB 829 * driver polling for new devices keeps this bit set all the 830 * time if USB is loaded. 831 */ 832 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 833 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 834 if (bm_active != 0) { 835 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 836 cx_next_idx = min(cx_next_idx, sc->cpu_non_c3); 837 } 838 } 839 840 /* Select the next state and update statistics. */ 841 cx_next = &sc->cpu_cx_states[cx_next_idx]; 842 sc->cpu_cx_stats[cx_next_idx]++; 843 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 844 845 /* 846 * Execute HLT (or equivalent) and wait for an interrupt. We can't 847 * calculate the time spent in C1 since the place we wake up is an 848 * ISR. Assume we slept half of quantum and return. 849 */ 850 if (cx_next->type == ACPI_STATE_C1) { 851 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4; 852 acpi_cpu_c1(); 853 return; 854 } 855 856 /* 857 * For C3(+), disable bus master arbitration and enable bus master wake 858 * if BM control is available, otherwise flush the CPU cache. 859 */ 860 if (cx_next->type >= ACPI_STATE_C3) { 861 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 862 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 863 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 864 } else 865 ACPI_FLUSH_CPU_CACHE(); 866 } 867 868 /* 869 * Read from P_LVLx to enter C2(+), checking time spent asleep. 870 * Use the ACPI timer for measuring sleep time. Since we need to 871 * get the time very close to the CPU start/stop clock logic, this 872 * is the only reliable time source. 873 */ 874 AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); 875 CPU_GET_REG(cx_next->p_lvlx, 1); 876 877 /* 878 * Read the end time twice. Since it may take an arbitrary time 879 * to enter the idle state, the first read may be executed before 880 * the processor has stopped. Doing it again provides enough 881 * margin that we are certain to have a correct value. 882 */ 883 AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 884 AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 885 886 /* Enable bus master arbitration and disable bus master wakeup. */ 887 if (cx_next->type >= ACPI_STATE_C3) { 888 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 889 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 890 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 891 } 892 } 893 ACPI_ENABLE_IRQS(); 894 895 /* Find the actual time asleep in microseconds. */ 896 end_time = acpi_TimerDelta(end_time, start_time); 897 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4; 898 } 899 900 /* 901 * Re-evaluate the _CST object when we are notified that it changed. 902 */ 903 static void 904 acpi_cpu_cst_notify(device_t dev) 905 { 906 struct acpi_cpu_softc *sc = device_get_softc(dev); 907 908 KASSERT(curthread->td_type != TD_TYPE_NETISR, 909 ("notify in netisr%d", mycpuid)); 910 911 lwkt_serialize_enter(&cpu_cx_slize); 912 913 /* Update the list of Cx states. */ 914 acpi_cpu_cx_cst_dispatch(sc); 915 acpi_cpu_cx_list(sc); 916 917 /* Update the new lowest useable Cx state for all CPUs. */ 918 acpi_cpu_global_cx_count(); 919 920 /* 921 * Fix up the lowest Cx being used 922 */ 923 if (cpu_cx_lowest_req < cpu_cx_count) 924 cpu_cx_lowest = cpu_cx_lowest_req; 925 if (cpu_cx_lowest > cpu_cx_count - 1) 926 cpu_cx_lowest = cpu_cx_count - 1; 927 928 lwkt_serialize_exit(&cpu_cx_slize); 929 } 930 931 static int 932 acpi_cpu_quirks(void) 933 { 934 device_t acpi_dev; 935 uint32_t val; 936 937 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 938 939 /* 940 * Bus mastering arbitration control is needed to keep caches coherent 941 * while sleeping in C3. If it's not present but a working flush cache 942 * instruction is present, flush the caches before entering C3 instead. 943 * Otherwise, just disable C3 completely. 944 */ 945 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 946 AcpiGbl_FADT.Pm2ControlLength == 0) { 947 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 948 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 949 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 950 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 951 "acpi_cpu: no BM control, using flush cache method\n")); 952 } else { 953 cpu_quirks |= CPU_QUIRK_NO_C3; 954 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 955 "acpi_cpu: no BM control, C3 not available\n")); 956 } 957 } 958 959 /* 960 * If we are using generic Cx mode, C3 on multiple CPUs requires using 961 * the expensive flush cache instruction. 962 */ 963 if (cpu_cx_generic && ncpus > 1) { 964 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 965 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 966 "acpi_cpu: SMP, using flush cache mode for C3\n")); 967 } 968 969 /* Look for various quirks of the PIIX4 part. */ 970 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 971 if (acpi_dev != NULL) { 972 switch (pci_get_revid(acpi_dev)) { 973 /* 974 * Disable C3 support for all PIIX4 chipsets. Some of these parts 975 * do not report the BMIDE status to the BM status register and 976 * others have a livelock bug if Type-F DMA is enabled. Linux 977 * works around the BMIDE bug by reading the BM status directly 978 * but we take the simpler approach of disabling C3 for these 979 * parts. 980 * 981 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 982 * Livelock") from the January 2002 PIIX4 specification update. 983 * Applies to all PIIX4 models. 984 * 985 * Also, make sure that all interrupts cause a "Stop Break" 986 * event to exit from C2 state. 987 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 988 * should be set to zero, otherwise it causes C2 to short-sleep. 989 * PIIX4 doesn't properly support C3 and bus master activity 990 * need not break out of C2. 991 */ 992 case PCI_REVISION_A_STEP: 993 case PCI_REVISION_B_STEP: 994 case PCI_REVISION_4E: 995 case PCI_REVISION_4M: 996 cpu_quirks |= CPU_QUIRK_NO_C3; 997 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 998 "acpi_cpu: working around PIIX4 bug, disabling C3\n")); 999 1000 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1001 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1002 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1003 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); 1004 val |= PIIX4_STOP_BREAK_MASK; 1005 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1006 } 1007 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1008 if (val) { 1009 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1010 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); 1011 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1012 } 1013 break; 1014 default: 1015 break; 1016 } 1017 } 1018 1019 return (0); 1020 } 1021 1022 static int 1023 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 1024 { 1025 struct acpi_cpu_softc *sc; 1026 struct sbuf sb; 1027 char buf[128]; 1028 int i; 1029 uintmax_t fract, sum, whole; 1030 1031 sc = (struct acpi_cpu_softc *) arg1; 1032 sum = 0; 1033 for (i = 0; i < sc->cpu_cx_count; i++) 1034 sum += sc->cpu_cx_stats[i]; 1035 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1036 for (i = 0; i < sc->cpu_cx_count; i++) { 1037 if (sum > 0) { 1038 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; 1039 fract = (whole % sum) * 100; 1040 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1041 (u_int)(fract / sum)); 1042 } else 1043 sbuf_printf(&sb, "0.00%% "); 1044 } 1045 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep); 1046 sbuf_trim(&sb); 1047 sbuf_finish(&sb); 1048 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1049 sbuf_delete(&sb); 1050 1051 return (0); 1052 } 1053 1054 static int 1055 acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *sc, int val) 1056 { 1057 int old_lowest, error = 0; 1058 uint32_t old_type, type; 1059 1060 KKASSERT(mycpuid == sc->cpu_id); 1061 1062 sc->cpu_cx_lowest_req = val; 1063 if (val > sc->cpu_cx_count - 1) 1064 val = sc->cpu_cx_count - 1; 1065 old_lowest = atomic_swap_int(&sc->cpu_cx_lowest, val); 1066 1067 old_type = sc->cpu_cx_states[old_lowest].type; 1068 type = sc->cpu_cx_states[val].type; 1069 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1070 KKASSERT(cpu_c3_ncpus > 0); 1071 if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) { 1072 /* 1073 * All of the CPUs exit C3 state, use a better 1074 * one shot timer. 1075 */ 1076 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE); 1077 KKASSERT(!error || error == ERESTART); 1078 if (error == ERESTART) { 1079 if (bootverbose) 1080 kprintf("exit C3, restart intr cputimer\n"); 1081 cputimer_intr_restart(); 1082 } 1083 } 1084 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1085 if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) { 1086 /* 1087 * When the first CPU enters C3(+) state, switch 1088 * to an one shot timer, which could handle 1089 * C3(+) state, i.e. the timer will not hang. 1090 */ 1091 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS); 1092 if (error == ERESTART) { 1093 if (bootverbose) 1094 kprintf("enter C3, restart intr cputimer\n"); 1095 cputimer_intr_restart(); 1096 } else if (error) { 1097 kprintf("no suitable intr cputimer found\n"); 1098 1099 /* Restore */ 1100 sc->cpu_cx_lowest = old_lowest; 1101 atomic_fetchadd_int(&cpu_c3_ncpus, -1); 1102 } 1103 } 1104 } 1105 1106 if (error) 1107 return error; 1108 1109 /* Cache the new lowest non-C3 state. */ 1110 acpi_cpu_cx_non_c3(sc); 1111 1112 /* Reset the statistics counters. */ 1113 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 1114 return (0); 1115 } 1116 1117 static void 1118 acpi_cst_set_lowest_handler(netmsg_t msg) 1119 { 1120 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 1121 int error; 1122 1123 error = acpi_cpu_set_cx_lowest_oncpu(rmsg->sc, rmsg->val); 1124 lwkt_replymsg(&rmsg->base.lmsg, error); 1125 } 1126 1127 static int 1128 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val) 1129 { 1130 struct netmsg_acpi_cst msg; 1131 1132 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1133 acpi_cst_set_lowest_handler); 1134 msg.sc = sc; 1135 msg.val = val; 1136 1137 return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0); 1138 } 1139 1140 static int 1141 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1142 { 1143 struct acpi_cpu_softc *sc; 1144 char state[8]; 1145 int val, error; 1146 1147 sc = (struct acpi_cpu_softc *)arg1; 1148 ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_req + 1); 1149 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1150 if (error != 0 || req->newptr == NULL) 1151 return (error); 1152 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1153 return (EINVAL); 1154 val = (int) strtol(state + 1, NULL, 10) - 1; 1155 if (val < 0) 1156 return (EINVAL); 1157 1158 lwkt_serialize_enter(&cpu_cx_slize); 1159 error = acpi_cpu_set_cx_lowest(sc, val); 1160 lwkt_serialize_exit(&cpu_cx_slize); 1161 1162 return error; 1163 } 1164 1165 static int 1166 acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1167 { 1168 struct acpi_cpu_softc *sc; 1169 char state[8]; 1170 1171 sc = (struct acpi_cpu_softc *)arg1; 1172 ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1); 1173 return sysctl_handle_string(oidp, state, sizeof(state), req); 1174 } 1175 1176 static int 1177 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1178 { 1179 struct acpi_cpu_softc *sc; 1180 char state[8]; 1181 int val, error, i; 1182 1183 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest_req + 1); 1184 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1185 if (error != 0 || req->newptr == NULL) 1186 return (error); 1187 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1188 return (EINVAL); 1189 val = (int) strtol(state + 1, NULL, 10) - 1; 1190 if (val < 0) 1191 return (EINVAL); 1192 1193 lwkt_serialize_enter(&cpu_cx_slize); 1194 1195 cpu_cx_lowest_req = val; 1196 cpu_cx_lowest = val; 1197 if (cpu_cx_lowest > cpu_cx_count - 1) 1198 cpu_cx_lowest = cpu_cx_count - 1; 1199 1200 /* Update the new lowest useable Cx state for all CPUs. */ 1201 for (i = 0; i < cpu_ndevices; i++) { 1202 sc = device_get_softc(cpu_devices[i]); 1203 error = acpi_cpu_set_cx_lowest(sc, val); 1204 if (error) { 1205 KKASSERT(i == 0); 1206 break; 1207 } 1208 } 1209 1210 lwkt_serialize_exit(&cpu_cx_slize); 1211 1212 return error; 1213 } 1214 1215 static int 1216 acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1217 { 1218 char state[8]; 1219 1220 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); 1221 return sysctl_handle_string(oidp, state, sizeof(state), req); 1222 } 1223 1224 /* 1225 * Put the CPU in C1 in a machine-dependant way. 1226 * XXX: shouldn't be here! 1227 */ 1228 static void 1229 acpi_cpu_c1(void) 1230 { 1231 #ifdef __ia64__ 1232 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); 1233 #else 1234 splz(); 1235 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1236 __asm __volatile("sti; hlt"); 1237 else 1238 __asm __volatile("sti; pause"); 1239 #endif /* !__ia64__ */ 1240 } 1241 1242 static void 1243 acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc) 1244 { 1245 int i; 1246 1247 sc->cpu_non_c3 = 0; 1248 for (i = sc->cpu_cx_lowest; i >= 0; i--) { 1249 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1250 sc->cpu_non_c3 = i; 1251 break; 1252 } 1253 } 1254 if (bootverbose) 1255 device_printf(sc->cpu_dev, "non-C3 %d\n", sc->cpu_non_c3); 1256 } 1257 1258 /* 1259 * Update the largest Cx state supported in the global cpu_cx_count. 1260 * It will be used in the global Cx sysctl handler. 1261 */ 1262 static void 1263 acpi_cpu_global_cx_count(void) 1264 { 1265 struct acpi_cpu_softc *sc; 1266 int i; 1267 1268 if (cpu_ndevices == 0) { 1269 cpu_cx_count = 0; 1270 return; 1271 } 1272 1273 sc = device_get_softc(cpu_devices[0]); 1274 cpu_cx_count = sc->cpu_cx_count; 1275 1276 for (i = 1; i < cpu_ndevices; i++) { 1277 struct acpi_cpu_softc *sc = device_get_softc(cpu_devices[i]); 1278 1279 if (sc->cpu_cx_count < cpu_cx_count) 1280 cpu_cx_count = sc->cpu_cx_count; 1281 } 1282 if (bootverbose) 1283 kprintf("cpu_cst: global Cx count %d\n", cpu_cx_count); 1284 } 1285