1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/globaldata.h> 36 #include <sys/power.h> 37 #include <sys/proc.h> 38 #include <sys/sbuf.h> 39 #include <sys/thread2.h> 40 #include <sys/serialize.h> 41 #include <sys/msgport2.h> 42 #include <sys/microtime_pcpu.h> 43 #include <sys/cpu_topology.h> 44 45 #include <bus/pci/pcivar.h> 46 #include <machine/atomic.h> 47 #include <machine/globaldata.h> 48 #include <machine/md_var.h> 49 #include <machine/smp.h> 50 #include <sys/rman.h> 51 52 #include <net/netisr2.h> 53 #include <net/netmsg2.h> 54 #include <net/if_var.h> 55 56 #include "acpi.h" 57 #include "acpivar.h" 58 #include "acpi_cpu.h" 59 #include "acpi_cpu_cstate.h" 60 61 /* 62 * Support for ACPI Processor devices, including C[1-3+] sleep states. 63 */ 64 65 /* Hooks for the ACPICA debugging infrastructure */ 66 #define _COMPONENT ACPI_PROCESSOR 67 ACPI_MODULE_NAME("PROCESSOR") 68 69 struct netmsg_acpi_cst { 70 struct netmsg_base base; 71 struct acpi_cst_softc *sc; 72 int val; 73 }; 74 75 #define MAX_CX_STATES 8 76 77 struct acpi_cst_softc { 78 device_t cst_dev; 79 struct acpi_cpu_softc *cst_parent; 80 ACPI_HANDLE cst_handle; 81 int cst_cpuid; 82 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 83 uint32_t cst_p_blk; /* ACPI P_BLK location */ 84 uint32_t cst_p_blk_len; /* P_BLK length (must be 6). */ 85 struct acpi_cst_cx cst_cx_states[MAX_CX_STATES]; 86 int cst_cx_count; /* Number of valid Cx states. */ 87 int cst_prev_sleep; /* Last idle sleep duration. */ 88 /* Runtime state. */ 89 int cst_non_c3; /* Index of lowest non-C3 state. */ 90 u_long cst_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 91 /* Values for sysctl. */ 92 int cst_cx_lowest; /* Current Cx lowest */ 93 int cst_cx_lowest_req; /* Requested Cx lowest */ 94 char cst_cx_supported[64]; 95 }; 96 97 #define ACPI_CST_FLAG_PROBING 0x1 98 #define ACPI_CST_FLAG_ATTACHED 0x2 99 /* Match C-states of other hyperthreads on the same core */ 100 #define ACPI_CST_FLAG_MATCH_HT 0x4 101 102 #define PCI_VENDOR_INTEL 0x8086 103 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 104 #define PCI_REVISION_A_STEP 0 105 #define PCI_REVISION_B_STEP 1 106 #define PCI_REVISION_4E 2 107 #define PCI_REVISION_4M 3 108 #define PIIX4_DEVACTB_REG 0x58 109 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 110 #define PIIX4_BRLD_EN_IRQ (1<<1) 111 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 112 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | \ 113 PIIX4_BRLD_EN_IRQ | \ 114 PIIX4_BRLD_EN_IRQ8) 115 #define PIIX4_PCNTRL_BST_EN (1<<10) 116 117 /* Platform hardware resource information. */ 118 static uint32_t acpi_cst_smi_cmd; /* Value to write to SMI_CMD. */ 119 static uint8_t acpi_cst_ctrl; /* Indicate we are _CST aware. */ 120 int acpi_cst_quirks; /* Indicate any hardware bugs. */ 121 static boolean_t acpi_cst_use_fadt; 122 123 /* Runtime state. */ 124 static boolean_t acpi_cst_disable_idle; 125 /* Disable entry to idle function */ 126 static int acpi_cst_cx_count; /* Number of valid Cx states */ 127 128 /* Values for sysctl. */ 129 static int acpi_cst_cx_lowest; /* Current Cx lowest */ 130 static int acpi_cst_cx_lowest_req; /* Requested Cx lowest */ 131 132 static device_t *acpi_cst_devices; 133 static int acpi_cst_ndevices; 134 static struct acpi_cst_softc **acpi_cst_softc; 135 static struct lwkt_serialize acpi_cst_slize = LWKT_SERIALIZE_INITIALIZER; 136 137 static int acpi_cst_probe(device_t); 138 static int acpi_cst_attach(device_t); 139 static int acpi_cst_suspend(device_t); 140 static int acpi_cst_resume(device_t); 141 static int acpi_cst_shutdown(device_t); 142 143 static void acpi_cst_notify(device_t); 144 static void acpi_cst_postattach(void *); 145 static void acpi_cst_idle(void); 146 static void acpi_cst_copy(struct acpi_cst_softc *, 147 const struct acpi_cst_softc *); 148 149 static void acpi_cst_cx_probe(struct acpi_cst_softc *); 150 static void acpi_cst_cx_probe_fadt(struct acpi_cst_softc *); 151 static int acpi_cst_cx_probe_cst(struct acpi_cst_softc *, int); 152 static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *); 153 154 static void acpi_cst_startup(struct acpi_cst_softc *); 155 static void acpi_cst_support_list(struct acpi_cst_softc *); 156 static int acpi_cst_set_lowest(struct acpi_cst_softc *, int); 157 static int acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *, int); 158 static void acpi_cst_non_c3(struct acpi_cst_softc *); 159 static void acpi_cst_global_cx_count(void); 160 static int acpi_cst_set_quirks(void); 161 static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *); 162 static void acpi_cst_free_resource(struct acpi_cst_softc *, int); 163 static void acpi_cst_c1_halt(void); 164 165 static int acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS); 166 static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS); 167 static int acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 168 static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS); 169 static int acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 170 171 static int acpi_cst_cx_setup(struct acpi_cst_cx *cx); 172 static void acpi_cst_c1_halt_enter(const struct acpi_cst_cx *); 173 static void acpi_cst_cx_io_enter(const struct acpi_cst_cx *); 174 175 int acpi_cst_force_bmarb; 176 TUNABLE_INT("hw.acpi.cpu.cst.force_bmarb", &acpi_cst_force_bmarb); 177 178 int acpi_cst_force_bmsts; 179 TUNABLE_INT("hw.acpi.cpu.cst.force_bmsts", &acpi_cst_force_bmsts); 180 181 static device_method_t acpi_cst_methods[] = { 182 /* Device interface */ 183 DEVMETHOD(device_probe, acpi_cst_probe), 184 DEVMETHOD(device_attach, acpi_cst_attach), 185 DEVMETHOD(device_detach, bus_generic_detach), 186 DEVMETHOD(device_shutdown, acpi_cst_shutdown), 187 DEVMETHOD(device_suspend, acpi_cst_suspend), 188 DEVMETHOD(device_resume, acpi_cst_resume), 189 190 /* Bus interface */ 191 DEVMETHOD(bus_add_child, bus_generic_add_child), 192 DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), 193 DEVMETHOD(bus_get_resource_list, bus_generic_get_resource_list), 194 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 195 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 196 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 197 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 198 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 199 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 200 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 201 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 202 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 203 DEVMETHOD_END 204 }; 205 206 static driver_t acpi_cst_driver = { 207 "cpu_cst", 208 acpi_cst_methods, 209 sizeof(struct acpi_cst_softc), 210 }; 211 212 static devclass_t acpi_cst_devclass; 213 DRIVER_MODULE(cpu_cst, cpu, acpi_cst_driver, acpi_cst_devclass, NULL, NULL); 214 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 215 216 static int 217 acpi_cst_probe(device_t dev) 218 { 219 int cpu_id; 220 221 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 222 return (ENXIO); 223 224 cpu_id = acpi_get_magic(dev); 225 226 if (acpi_cst_softc == NULL) 227 acpi_cst_softc = kmalloc(sizeof(struct acpi_cst_softc *) * 228 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 229 230 /* 231 * Check if we already probed this processor. We scan the bus twice 232 * so it's possible we've already seen this one. 233 */ 234 if (acpi_cst_softc[cpu_id] != NULL) { 235 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 236 return (ENXIO); 237 } 238 239 /* Mark this processor as in-use and save our derived id for attach. */ 240 acpi_cst_softc[cpu_id] = device_get_softc(dev); 241 device_set_desc(dev, "ACPI CPU C-State"); 242 243 return (0); 244 } 245 246 static int 247 acpi_cst_attach(device_t dev) 248 { 249 ACPI_BUFFER buf; 250 ACPI_OBJECT *obj; 251 struct acpi_cst_softc *sc; 252 ACPI_STATUS status; 253 254 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 255 256 sc = device_get_softc(dev); 257 sc->cst_dev = dev; 258 sc->cst_parent = device_get_softc(device_get_parent(dev)); 259 sc->cst_handle = acpi_get_handle(dev); 260 sc->cst_cpuid = acpi_get_magic(dev); 261 acpi_cst_softc[sc->cst_cpuid] = sc; 262 acpi_cst_smi_cmd = AcpiGbl_FADT.SmiCommand; 263 acpi_cst_ctrl = AcpiGbl_FADT.CstControl; 264 265 buf.Pointer = NULL; 266 buf.Length = ACPI_ALLOCATE_BUFFER; 267 status = AcpiEvaluateObject(sc->cst_handle, NULL, NULL, &buf); 268 if (ACPI_FAILURE(status)) { 269 device_printf(dev, "attach failed to get Processor obj - %s\n", 270 AcpiFormatException(status)); 271 acpi_cst_softc[sc->cst_cpuid] = NULL; 272 return (ENXIO); 273 } 274 obj = (ACPI_OBJECT *)buf.Pointer; 275 sc->cst_p_blk = obj->Processor.PblkAddress; 276 sc->cst_p_blk_len = obj->Processor.PblkLength; 277 AcpiOsFree(obj); 278 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "cpu_cst%d: P_BLK at %#x/%d\n", 279 device_get_unit(dev), sc->cst_p_blk, sc->cst_p_blk_len)); 280 281 /* 282 * If this is the first cpu we attach, create and initialize the generic 283 * resources that will be used by all acpi cpu devices. 284 */ 285 if (device_get_unit(dev) == 0) { 286 /* Assume we won't be using FADT for Cx states by default */ 287 acpi_cst_use_fadt = FALSE; 288 289 /* Queue post cpu-probing task handler */ 290 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cst_postattach, NULL); 291 } 292 293 /* Probe for Cx state support. */ 294 acpi_cst_cx_probe(sc); 295 296 sc->cst_flags |= ACPI_CST_FLAG_ATTACHED; 297 298 return (0); 299 } 300 301 /* 302 * Disable any entry to the idle function during suspend and re-enable it 303 * during resume. 304 */ 305 static int 306 acpi_cst_suspend(device_t dev) 307 { 308 int error; 309 310 error = bus_generic_suspend(dev); 311 if (error) 312 return (error); 313 acpi_cst_disable_idle = TRUE; 314 return (0); 315 } 316 317 static int 318 acpi_cst_resume(device_t dev) 319 { 320 acpi_cst_disable_idle = FALSE; 321 return (bus_generic_resume(dev)); 322 } 323 324 static int 325 acpi_cst_shutdown(device_t dev) 326 { 327 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 328 329 /* Allow children to shutdown first. */ 330 bus_generic_shutdown(dev); 331 332 /* 333 * Disable any entry to the idle function. There is a small race where 334 * an idle thread have passed this check but not gone to sleep. This 335 * is ok since device_shutdown() does not free the softc, otherwise 336 * we'd have to be sure all threads were evicted before returning. 337 */ 338 acpi_cst_disable_idle = TRUE; 339 340 return_VALUE (0); 341 } 342 343 static void 344 acpi_cst_cx_probe(struct acpi_cst_softc *sc) 345 { 346 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 347 348 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 349 sc->cst_prev_sleep = 1000000; 350 sc->cst_cx_lowest = 0; 351 sc->cst_cx_lowest_req = 0; 352 353 /* 354 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 355 * any, we'll revert to FADT/P_BLK Cx control method which will be 356 * handled by acpi_cst_postattach. We need to defer to after having 357 * probed all the cpus in the system before probing for Cx states from 358 * FADT as we may already have found cpus with valid _CST packages. 359 */ 360 if (!acpi_cst_use_fadt && acpi_cst_cx_probe_cst(sc, 0) != 0) { 361 /* 362 * We were unable to find a _CST package for this cpu or there 363 * was an error parsing it. Switch back to generic mode. 364 */ 365 acpi_cst_use_fadt = TRUE; 366 if (bootverbose) 367 device_printf(sc->cst_dev, "switching to FADT Cx mode\n"); 368 } 369 370 /* 371 * TODO: _CSD Package should be checked here. 372 */ 373 } 374 375 static void 376 acpi_cst_cx_probe_fadt(struct acpi_cst_softc *sc) 377 { 378 struct acpi_cst_cx *cx_ptr; 379 int error; 380 381 /* 382 * Free all previously allocated resources. 383 * 384 * NITE: 385 * It is needed, since we could enter here because of other 386 * cpu's _CST probing failure. 387 */ 388 acpi_cst_free_resource(sc, 0); 389 390 sc->cst_cx_count = 0; 391 cx_ptr = sc->cst_cx_states; 392 393 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 394 sc->cst_prev_sleep = 1000000; 395 396 /* C1 has been required since just after ACPI 1.0 */ 397 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_FIXED_HARDWARE; 398 cx_ptr->type = ACPI_STATE_C1; 399 cx_ptr->trans_lat = 0; 400 cx_ptr->enter = acpi_cst_c1_halt_enter; 401 error = acpi_cst_cx_setup(cx_ptr); 402 if (error) 403 panic("C1 FADT HALT setup failed: %d", error); 404 cx_ptr++; 405 sc->cst_cx_count++; 406 407 /* C2(+) is not supported on MP system */ 408 if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 409 return; 410 411 /* 412 * The spec says P_BLK must be 6 bytes long. However, some systems 413 * use it to indicate a fractional set of features present so we 414 * take 5 as C2. Some may also have a value of 7 to indicate 415 * another C3 but most use _CST for this (as required) and having 416 * "only" C1-C3 is not a hardship. 417 */ 418 if (sc->cst_p_blk_len < 5) 419 return; 420 421 /* Validate and allocate resources for C2 (P_LVL2). */ 422 if (AcpiGbl_FADT.C2Latency <= 100) { 423 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 424 cx_ptr->gas.BitWidth = 8; 425 cx_ptr->gas.Address = sc->cst_p_blk + 4; 426 427 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 428 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 429 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 430 if (cx_ptr->res != NULL) { 431 sc->cst_parent->cpu_next_rid++; 432 cx_ptr->type = ACPI_STATE_C2; 433 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 434 cx_ptr->enter = acpi_cst_cx_io_enter; 435 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 436 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 437 error = acpi_cst_cx_setup(cx_ptr); 438 if (error) 439 panic("C2 FADT I/O setup failed: %d", error); 440 cx_ptr++; 441 sc->cst_cx_count++; 442 sc->cst_non_c3 = 1; 443 } 444 } 445 if (sc->cst_p_blk_len < 6) 446 return; 447 448 /* Validate and allocate resources for C3 (P_LVL3). */ 449 if (AcpiGbl_FADT.C3Latency <= 1000 && 450 !(acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3)) { 451 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 452 cx_ptr->gas.BitWidth = 8; 453 cx_ptr->gas.Address = sc->cst_p_blk + 5; 454 455 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 456 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 457 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 458 if (cx_ptr->res != NULL) { 459 sc->cst_parent->cpu_next_rid++; 460 cx_ptr->type = ACPI_STATE_C3; 461 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 462 cx_ptr->enter = acpi_cst_cx_io_enter; 463 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 464 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 465 error = acpi_cst_cx_setup(cx_ptr); 466 if (error) 467 panic("C3 FADT I/O setup failed: %d", error); 468 cx_ptr++; 469 sc->cst_cx_count++; 470 } 471 } 472 } 473 474 static void 475 acpi_cst_copy(struct acpi_cst_softc *dst_sc, 476 const struct acpi_cst_softc *src_sc) 477 { 478 dst_sc->cst_non_c3 = src_sc->cst_non_c3; 479 dst_sc->cst_cx_count = src_sc->cst_cx_count; 480 memcpy(dst_sc->cst_cx_states, src_sc->cst_cx_states, 481 sizeof(dst_sc->cst_cx_states)); 482 } 483 484 /* 485 * Parse a _CST package and set up its Cx states. Since the _CST object 486 * can change dynamically, our notify handler may call this function 487 * to clean up and probe the new _CST package. 488 */ 489 static int 490 acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe) 491 { 492 struct acpi_cst_cx *cx_ptr; 493 ACPI_STATUS status; 494 ACPI_BUFFER buf; 495 ACPI_OBJECT *top; 496 ACPI_OBJECT *pkg; 497 uint32_t count; 498 int i; 499 500 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 501 502 #ifdef INVARIANTS 503 if (reprobe) 504 KKASSERT(&curthread->td_msgport == netisr_cpuport(sc->cst_cpuid)); 505 #endif 506 507 buf.Pointer = NULL; 508 buf.Length = ACPI_ALLOCATE_BUFFER; 509 status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf); 510 if (ACPI_FAILURE(status)) 511 return (ENXIO); 512 513 /* _CST is a package with a count and at least one Cx package. */ 514 top = (ACPI_OBJECT *)buf.Pointer; 515 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 516 device_printf(sc->cst_dev, "invalid _CST package\n"); 517 AcpiOsFree(buf.Pointer); 518 return (ENXIO); 519 } 520 if (count != top->Package.Count - 1) { 521 device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n", 522 count, top->Package.Count - 1); 523 count = top->Package.Count - 1; 524 } 525 if (count > MAX_CX_STATES) { 526 device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count); 527 count = MAX_CX_STATES; 528 } 529 530 sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT; 531 cpu_sfence(); 532 533 /* 534 * Free all previously allocated resources 535 * 536 * NOTE: It is needed for _CST reprobing. 537 */ 538 acpi_cst_free_resource(sc, 0); 539 540 /* Set up all valid states. */ 541 sc->cst_cx_count = 0; 542 cx_ptr = sc->cst_cx_states; 543 for (i = 0; i < count; i++) { 544 int error; 545 546 pkg = &top->Package.Elements[i + 1]; 547 if (!ACPI_PKG_VALID(pkg, 4) || 548 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 549 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 550 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 551 552 device_printf(sc->cst_dev, "skipping invalid Cx state package\n"); 553 continue; 554 } 555 556 /* Validate the state to see if we should use it. */ 557 switch (cx_ptr->type) { 558 case ACPI_STATE_C1: 559 sc->cst_non_c3 = i; 560 cx_ptr->enter = acpi_cst_c1_halt_enter; 561 error = acpi_cst_cx_setup(cx_ptr); 562 if (error) 563 panic("C1 CST HALT setup failed: %d", error); 564 if (sc->cst_cx_count != 0) { 565 /* 566 * C1 is not the first C-state; something really stupid 567 * is going on ... 568 */ 569 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 570 } 571 cx_ptr++; 572 sc->cst_cx_count++; 573 continue; 574 case ACPI_STATE_C2: 575 sc->cst_non_c3 = i; 576 break; 577 case ACPI_STATE_C3: 578 default: 579 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) { 580 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 581 "cpu_cst%d: C3[%d] not available.\n", 582 device_get_unit(sc->cst_dev), i)); 583 continue; 584 } 585 break; 586 } 587 588 /* 589 * Allocate the control register for C2 or C3(+). 590 */ 591 KASSERT(cx_ptr->res == NULL, ("still has res")); 592 acpi_PkgRawGas(pkg, 0, &cx_ptr->gas); 593 594 /* 595 * We match number of C2/C3 for hyperthreads, only if the 596 * register is "Fixed Hardware", e.g. on most of the Intel 597 * CPUs. We don't have much to do for the rest of the 598 * register types. 599 */ 600 if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) 601 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 602 603 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 604 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 605 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 606 if (cx_ptr->res != NULL) { 607 sc->cst_parent->cpu_next_rid++; 608 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 609 "cpu_cst%d: Got C%d - %d latency\n", 610 device_get_unit(sc->cst_dev), cx_ptr->type, 611 cx_ptr->trans_lat)); 612 cx_ptr->enter = acpi_cst_cx_io_enter; 613 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 614 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 615 error = acpi_cst_cx_setup(cx_ptr); 616 if (error) 617 panic("C%d CST I/O setup failed: %d", cx_ptr->type, error); 618 cx_ptr++; 619 sc->cst_cx_count++; 620 } else { 621 error = acpi_cst_cx_setup(cx_ptr); 622 if (!error) { 623 KASSERT(cx_ptr->enter != NULL, 624 ("C%d enter is not set", cx_ptr->type)); 625 cx_ptr++; 626 sc->cst_cx_count++; 627 } 628 } 629 } 630 AcpiOsFree(buf.Pointer); 631 632 if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) { 633 cpumask_t mask; 634 635 mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL); 636 if (CPUMASK_TESTNZERO(mask)) { 637 int cpu; 638 639 for (cpu = 0; cpu < ncpus; ++cpu) { 640 struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu]; 641 642 if (sc1 == NULL || sc1 == sc || 643 (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 || 644 (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0) 645 continue; 646 if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid)) 647 continue; 648 649 if (sc1->cst_cx_count != sc->cst_cx_count) { 650 struct acpi_cst_softc *src_sc, *dst_sc; 651 652 if (bootverbose) { 653 device_printf(sc->cst_dev, 654 "inconstent C-state count: %d, %s has %d\n", 655 sc->cst_cx_count, 656 device_get_nameunit(sc1->cst_dev), 657 sc1->cst_cx_count); 658 } 659 if (sc1->cst_cx_count > sc->cst_cx_count) { 660 src_sc = sc1; 661 dst_sc = sc; 662 } else { 663 src_sc = sc; 664 dst_sc = sc1; 665 } 666 acpi_cst_copy(dst_sc, src_sc); 667 } 668 } 669 } 670 } 671 672 if (reprobe) { 673 /* If there are C3(+) states, always enable bus master wakeup */ 674 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 675 for (i = 0; i < sc->cst_cx_count; ++i) { 676 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 677 678 if (cx->type >= ACPI_STATE_C3) { 679 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 680 break; 681 } 682 } 683 } 684 685 /* Fix up the lowest Cx being used */ 686 acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req); 687 } 688 689 /* 690 * Cache the lowest non-C3 state. 691 * NOTE: must after cst_cx_lowest is set. 692 */ 693 acpi_cst_non_c3(sc); 694 695 cpu_sfence(); 696 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 697 698 return (0); 699 } 700 701 static void 702 acpi_cst_cx_reprobe_cst_handler(netmsg_t msg) 703 { 704 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 705 int error; 706 707 error = acpi_cst_cx_probe_cst(rmsg->sc, 1); 708 lwkt_replymsg(&rmsg->base.lmsg, error); 709 } 710 711 static int 712 acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) 713 { 714 struct netmsg_acpi_cst msg; 715 716 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 717 acpi_cst_cx_reprobe_cst_handler); 718 msg.sc = sc; 719 720 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 721 } 722 723 /* 724 * Call this *after* all CPUs Cx states have been attached. 725 */ 726 static void 727 acpi_cst_postattach(void *arg) 728 { 729 struct acpi_cst_softc *sc; 730 int i; 731 732 /* Get set of Cx state devices */ 733 devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices, 734 &acpi_cst_ndevices); 735 736 /* 737 * Setup any quirks that might necessary now that we have probed 738 * all the CPUs' Cx states. 739 */ 740 acpi_cst_set_quirks(); 741 742 if (acpi_cst_use_fadt) { 743 /* 744 * We are using Cx mode from FADT, probe for available Cx states 745 * for all processors. 746 */ 747 for (i = 0; i < acpi_cst_ndevices; i++) { 748 sc = device_get_softc(acpi_cst_devices[i]); 749 acpi_cst_cx_probe_fadt(sc); 750 } 751 } else { 752 /* 753 * We are using _CST mode, remove C3 state if necessary. 754 * 755 * As we now know for sure that we will be using _CST mode 756 * install our notify handler. 757 */ 758 for (i = 0; i < acpi_cst_ndevices; i++) { 759 sc = device_get_softc(acpi_cst_devices[i]); 760 if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) { 761 /* Free part of unused resources */ 762 acpi_cst_free_resource(sc, sc->cst_non_c3 + 1); 763 sc->cst_cx_count = sc->cst_non_c3 + 1; 764 } 765 sc->cst_parent->cpu_cst_notify = acpi_cst_notify; 766 } 767 } 768 acpi_cst_global_cx_count(); 769 770 /* Perform Cx final initialization. */ 771 for (i = 0; i < acpi_cst_ndevices; i++) { 772 sc = device_get_softc(acpi_cst_devices[i]); 773 acpi_cst_startup(sc); 774 775 if (sc->cst_parent->glob_sysctl_tree != NULL) { 776 struct acpi_cpu_softc *cpu = sc->cst_parent; 777 778 /* Add a sysctl handler to handle global Cx lowest setting */ 779 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 780 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 781 OID_AUTO, "cx_lowest", 782 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 783 acpi_cst_global_lowest_sysctl, "A", 784 "Requested global lowest Cx sleep state"); 785 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 786 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 787 OID_AUTO, "cx_lowest_use", 788 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 789 acpi_cst_global_lowest_use_sysctl, "A", 790 "Global lowest Cx sleep state to use"); 791 } 792 } 793 794 /* Take over idling from cpu_idle_default(). */ 795 acpi_cst_cx_lowest = 0; 796 acpi_cst_cx_lowest_req = 0; 797 acpi_cst_disable_idle = FALSE; 798 799 cpu_sfence(); 800 cpu_idle_hook = acpi_cst_idle; 801 } 802 803 static void 804 acpi_cst_support_list(struct acpi_cst_softc *sc) 805 { 806 struct sbuf sb; 807 int i; 808 809 /* 810 * Set up the list of Cx states 811 */ 812 sbuf_new(&sb, sc->cst_cx_supported, sizeof(sc->cst_cx_supported), 813 SBUF_FIXEDLEN); 814 for (i = 0; i < sc->cst_cx_count; i++) 815 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cst_cx_states[i].trans_lat); 816 sbuf_trim(&sb); 817 sbuf_finish(&sb); 818 } 819 820 static void 821 acpi_cst_c3_bm_rld_handler(netmsg_t msg) 822 { 823 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 824 825 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 826 lwkt_replymsg(&rmsg->base.lmsg, 0); 827 } 828 829 static void 830 acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) 831 { 832 struct netmsg_acpi_cst msg; 833 834 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 835 acpi_cst_c3_bm_rld_handler); 836 msg.sc = sc; 837 838 lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 839 } 840 841 static void 842 acpi_cst_startup(struct acpi_cst_softc *sc) 843 { 844 struct acpi_cpu_softc *cpu = sc->cst_parent; 845 int i, bm_rld_done = 0; 846 847 for (i = 0; i < sc->cst_cx_count; ++i) { 848 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 849 int error; 850 851 /* If there are C3(+) states, always enable bus master wakeup */ 852 if (cx->type >= ACPI_STATE_C3 && !bm_rld_done && 853 (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 854 acpi_cst_c3_bm_rld(sc); 855 bm_rld_done = 1; 856 } 857 858 /* Redo the Cx setup, since quirks have been changed */ 859 error = acpi_cst_cx_setup(cx); 860 if (error) 861 panic("C%d startup setup failed: %d", i + 1, error); 862 } 863 864 acpi_cst_support_list(sc); 865 866 SYSCTL_ADD_STRING(&cpu->pcpu_sysctl_ctx, 867 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 868 OID_AUTO, "cx_supported", CTLFLAG_RD, 869 sc->cst_cx_supported, 0, 870 "Cx/microsecond values for supported Cx states"); 871 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 872 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 873 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 874 (void *)sc, 0, acpi_cst_lowest_sysctl, "A", 875 "requested lowest Cx sleep state"); 876 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 877 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 878 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 879 (void *)sc, 0, acpi_cst_lowest_use_sysctl, "A", 880 "lowest Cx sleep state to use"); 881 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 882 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 883 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 884 (void *)sc, 0, acpi_cst_usage_sysctl, "A", 885 "percent usage for each Cx state"); 886 887 #ifdef notyet 888 /* Signal platform that we can handle _CST notification. */ 889 if (!acpi_cst_use_fadt && acpi_cst_ctrl != 0) { 890 ACPI_LOCK(acpi); 891 AcpiOsWritePort(acpi_cst_smi_cmd, acpi_cst_ctrl, 8); 892 ACPI_UNLOCK(acpi); 893 } 894 #endif 895 } 896 897 /* 898 * Idle the CPU in the lowest state possible. This function is called with 899 * interrupts disabled. Note that once it re-enables interrupts, a task 900 * switch can occur so do not access shared data (i.e. the softc) after 901 * interrupts are re-enabled. 902 */ 903 static void 904 acpi_cst_idle(void) 905 { 906 struct acpi_cst_softc *sc; 907 struct acpi_cst_cx *cx_next; 908 union microtime_pcpu start, end; 909 int cx_next_idx, i, tdiff, bm_arb_disabled = 0; 910 911 /* If disabled, return immediately. */ 912 if (acpi_cst_disable_idle) { 913 ACPI_ENABLE_IRQS(); 914 return; 915 } 916 917 /* 918 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 919 * since there is no Cx state for this processor. 920 */ 921 sc = acpi_cst_softc[mdcpu->mi.gd_cpuid]; 922 if (sc == NULL) { 923 acpi_cst_c1_halt(); 924 return; 925 } 926 927 /* Still probing; use C1 */ 928 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 929 acpi_cst_c1_halt(); 930 return; 931 } 932 933 /* Find the lowest state that has small enough latency. */ 934 cx_next_idx = 0; 935 for (i = sc->cst_cx_lowest; i >= 0; i--) { 936 if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { 937 cx_next_idx = i; 938 break; 939 } 940 } 941 942 /* 943 * Check for bus master activity if needed for the selected state. 944 * If there was activity, clear the bit and use the lowest non-C3 state. 945 */ 946 cx_next = &sc->cst_cx_states[cx_next_idx]; 947 if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) { 948 int bm_active; 949 950 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 951 if (bm_active != 0) { 952 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 953 cx_next_idx = sc->cst_non_c3; 954 } 955 } 956 957 /* Select the next state and update statistics. */ 958 cx_next = &sc->cst_cx_states[cx_next_idx]; 959 sc->cst_cx_stats[cx_next_idx]++; 960 KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep")); 961 962 /* 963 * Execute HLT (or equivalent) and wait for an interrupt. We can't 964 * calculate the time spent in C1 since the place we wake up is an 965 * ISR. Assume we slept half of quantum and return. 966 */ 967 if (cx_next->type == ACPI_STATE_C1) { 968 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; 969 cx_next->enter(cx_next); 970 return; 971 } 972 973 /* Execute the proper preamble before enter the selected state. */ 974 if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) { 975 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 976 bm_arb_disabled = 1; 977 } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) { 978 ACPI_FLUSH_CPU_CACHE(); 979 } 980 981 /* 982 * Enter the selected state and check time spent asleep. 983 */ 984 microtime_pcpu_get(&start); 985 cpu_mfence(); 986 987 cx_next->enter(cx_next); 988 989 cpu_mfence(); 990 microtime_pcpu_get(&end); 991 992 /* Enable bus master arbitration, if it was disabled. */ 993 if (bm_arb_disabled) 994 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 995 996 ACPI_ENABLE_IRQS(); 997 998 /* Find the actual time asleep in microseconds. */ 999 tdiff = microtime_pcpu_diff(&start, &end); 1000 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4; 1001 } 1002 1003 /* 1004 * Re-evaluate the _CST object when we are notified that it changed. 1005 */ 1006 static void 1007 acpi_cst_notify(device_t dev) 1008 { 1009 struct acpi_cst_softc *sc = device_get_softc(dev); 1010 1011 KASSERT(curthread->td_type != TD_TYPE_NETISR, 1012 ("notify in netisr%d", mycpuid)); 1013 1014 lwkt_serialize_enter(&acpi_cst_slize); 1015 1016 /* Update the list of Cx states. */ 1017 acpi_cst_cx_reprobe_cst(sc); 1018 acpi_cst_support_list(sc); 1019 1020 /* Update the new lowest useable Cx state for all CPUs. */ 1021 acpi_cst_global_cx_count(); 1022 1023 /* 1024 * Fix up the lowest Cx being used 1025 */ 1026 if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) 1027 acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; 1028 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1029 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1030 1031 lwkt_serialize_exit(&acpi_cst_slize); 1032 } 1033 1034 static int 1035 acpi_cst_set_quirks(void) 1036 { 1037 device_t acpi_dev; 1038 uint32_t val; 1039 1040 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1041 1042 /* 1043 * Bus mastering arbitration control is needed to keep caches coherent 1044 * while sleeping in C3. If it's not present but a working flush cache 1045 * instruction is present, flush the caches before entering C3 instead. 1046 * Otherwise, just disable C3 completely. 1047 */ 1048 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 1049 AcpiGbl_FADT.Pm2ControlLength == 0) { 1050 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 1051 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 1052 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_BM; 1053 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1054 "cpu_cst: no BM control, using flush cache method\n")); 1055 } else { 1056 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1057 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1058 "cpu_cst: no BM control, C3 not available\n")); 1059 } 1060 } 1061 1062 /* Look for various quirks of the PIIX4 part. */ 1063 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 1064 if (acpi_dev != NULL) { 1065 switch (pci_get_revid(acpi_dev)) { 1066 /* 1067 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1068 * do not report the BMIDE status to the BM status register and 1069 * others have a livelock bug if Type-F DMA is enabled. Linux 1070 * works around the BMIDE bug by reading the BM status directly 1071 * but we take the simpler approach of disabling C3 for these 1072 * parts. 1073 * 1074 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1075 * Livelock") from the January 2002 PIIX4 specification update. 1076 * Applies to all PIIX4 models. 1077 * 1078 * Also, make sure that all interrupts cause a "Stop Break" 1079 * event to exit from C2 state. 1080 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 1081 * should be set to zero, otherwise it causes C2 to short-sleep. 1082 * PIIX4 doesn't properly support C3 and bus master activity 1083 * need not break out of C2. 1084 */ 1085 case PCI_REVISION_A_STEP: 1086 case PCI_REVISION_B_STEP: 1087 case PCI_REVISION_4E: 1088 case PCI_REVISION_4M: 1089 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1090 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1091 "cpu_cst: working around PIIX4 bug, disabling C3\n")); 1092 1093 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1094 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1095 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1096 "cpu_cst: PIIX4: enabling IRQs to generate Stop Break\n")); 1097 val |= PIIX4_STOP_BREAK_MASK; 1098 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1099 } 1100 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1101 if (val) { 1102 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1103 "cpu_cst: PIIX4: reset BRLD_EN_BM\n")); 1104 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1105 } 1106 break; 1107 default: 1108 break; 1109 } 1110 } 1111 1112 return (0); 1113 } 1114 1115 static int 1116 acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS) 1117 { 1118 struct acpi_cst_softc *sc; 1119 struct sbuf sb; 1120 char buf[128]; 1121 int i; 1122 uintmax_t fract, sum, whole; 1123 1124 sc = (struct acpi_cst_softc *) arg1; 1125 sum = 0; 1126 for (i = 0; i < sc->cst_cx_count; i++) 1127 sum += sc->cst_cx_stats[i]; 1128 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1129 for (i = 0; i < sc->cst_cx_count; i++) { 1130 if (sum > 0) { 1131 whole = (uintmax_t)sc->cst_cx_stats[i] * 100; 1132 fract = (whole % sum) * 100; 1133 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1134 (u_int)(fract / sum)); 1135 } else 1136 sbuf_printf(&sb, "0.00%% "); 1137 } 1138 sbuf_printf(&sb, "last %dus", sc->cst_prev_sleep); 1139 sbuf_trim(&sb); 1140 sbuf_finish(&sb); 1141 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1142 sbuf_delete(&sb); 1143 1144 return (0); 1145 } 1146 1147 static int 1148 acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *sc, int val) 1149 { 1150 int old_lowest, error = 0, old_lowest_req; 1151 uint32_t old_type, type; 1152 1153 KKASSERT(mycpuid == sc->cst_cpuid); 1154 1155 old_lowest_req = sc->cst_cx_lowest_req; 1156 sc->cst_cx_lowest_req = val; 1157 1158 if (val > sc->cst_cx_count - 1) 1159 val = sc->cst_cx_count - 1; 1160 old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val); 1161 1162 old_type = sc->cst_cx_states[old_lowest].type; 1163 type = sc->cst_cx_states[val].type; 1164 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1165 cputimer_intr_powersave_remreq(); 1166 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1167 error = cputimer_intr_powersave_addreq(); 1168 if (error) { 1169 /* Restore */ 1170 sc->cst_cx_lowest_req = old_lowest_req; 1171 sc->cst_cx_lowest = old_lowest; 1172 } 1173 } 1174 1175 if (error) 1176 return error; 1177 1178 /* Cache the new lowest non-C3 state. */ 1179 acpi_cst_non_c3(sc); 1180 1181 /* Reset the statistics counters. */ 1182 bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats)); 1183 return (0); 1184 } 1185 1186 static void 1187 acpi_cst_set_lowest_handler(netmsg_t msg) 1188 { 1189 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 1190 int error; 1191 1192 error = acpi_cst_set_lowest_oncpu(rmsg->sc, rmsg->val); 1193 lwkt_replymsg(&rmsg->base.lmsg, error); 1194 } 1195 1196 static int 1197 acpi_cst_set_lowest(struct acpi_cst_softc *sc, int val) 1198 { 1199 struct netmsg_acpi_cst msg; 1200 1201 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1202 acpi_cst_set_lowest_handler); 1203 msg.sc = sc; 1204 msg.val = val; 1205 1206 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 1207 } 1208 1209 static int 1210 acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1211 { 1212 struct acpi_cst_softc *sc; 1213 char state[8]; 1214 int val, error; 1215 1216 sc = (struct acpi_cst_softc *)arg1; 1217 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); 1218 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1219 if (error != 0 || req->newptr == NULL) 1220 return (error); 1221 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1222 return (EINVAL); 1223 val = (int) strtol(state + 1, NULL, 10) - 1; 1224 if (val < 0) 1225 return (EINVAL); 1226 1227 lwkt_serialize_enter(&acpi_cst_slize); 1228 error = acpi_cst_set_lowest(sc, val); 1229 lwkt_serialize_exit(&acpi_cst_slize); 1230 1231 return error; 1232 } 1233 1234 static int 1235 acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1236 { 1237 struct acpi_cst_softc *sc; 1238 char state[8]; 1239 1240 sc = (struct acpi_cst_softc *)arg1; 1241 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest + 1); 1242 return sysctl_handle_string(oidp, state, sizeof(state), req); 1243 } 1244 1245 static int 1246 acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1247 { 1248 struct acpi_cst_softc *sc; 1249 char state[8]; 1250 int val, error, i; 1251 1252 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest_req + 1); 1253 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1254 if (error != 0 || req->newptr == NULL) 1255 return (error); 1256 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1257 return (EINVAL); 1258 val = (int) strtol(state + 1, NULL, 10) - 1; 1259 if (val < 0) 1260 return (EINVAL); 1261 1262 lwkt_serialize_enter(&acpi_cst_slize); 1263 1264 acpi_cst_cx_lowest_req = val; 1265 acpi_cst_cx_lowest = val; 1266 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1267 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1268 1269 /* Update the new lowest useable Cx state for all CPUs. */ 1270 for (i = 0; i < acpi_cst_ndevices; i++) { 1271 sc = device_get_softc(acpi_cst_devices[i]); 1272 error = acpi_cst_set_lowest(sc, val); 1273 if (error) { 1274 KKASSERT(i == 0); 1275 break; 1276 } 1277 } 1278 1279 lwkt_serialize_exit(&acpi_cst_slize); 1280 1281 return error; 1282 } 1283 1284 static int 1285 acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1286 { 1287 char state[8]; 1288 1289 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest + 1); 1290 return sysctl_handle_string(oidp, state, sizeof(state), req); 1291 } 1292 1293 /* 1294 * Put the CPU in C1 in a machine-dependant way. 1295 * XXX: shouldn't be here! 1296 */ 1297 static void 1298 acpi_cst_c1_halt(void) 1299 { 1300 splz(); 1301 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1302 __asm __volatile("sti; hlt"); 1303 else 1304 __asm __volatile("sti; pause"); 1305 } 1306 1307 static void 1308 acpi_cst_non_c3(struct acpi_cst_softc *sc) 1309 { 1310 int i; 1311 1312 sc->cst_non_c3 = 0; 1313 for (i = sc->cst_cx_lowest; i >= 0; i--) { 1314 if (sc->cst_cx_states[i].type < ACPI_STATE_C3) { 1315 sc->cst_non_c3 = i; 1316 break; 1317 } 1318 } 1319 if (bootverbose) 1320 device_printf(sc->cst_dev, "non-C3 %d\n", sc->cst_non_c3); 1321 } 1322 1323 /* 1324 * Update the largest Cx state supported in the global acpi_cst_cx_count. 1325 * It will be used in the global Cx sysctl handler. 1326 */ 1327 static void 1328 acpi_cst_global_cx_count(void) 1329 { 1330 struct acpi_cst_softc *sc; 1331 int i; 1332 1333 if (acpi_cst_ndevices == 0) { 1334 acpi_cst_cx_count = 0; 1335 return; 1336 } 1337 1338 sc = device_get_softc(acpi_cst_devices[0]); 1339 acpi_cst_cx_count = sc->cst_cx_count; 1340 1341 for (i = 1; i < acpi_cst_ndevices; i++) { 1342 struct acpi_cst_softc *sc = device_get_softc(acpi_cst_devices[i]); 1343 1344 if (sc->cst_cx_count < acpi_cst_cx_count) 1345 acpi_cst_cx_count = sc->cst_cx_count; 1346 } 1347 if (bootverbose) 1348 kprintf("cpu_cst: global Cx count %d\n", acpi_cst_cx_count); 1349 } 1350 1351 static void 1352 acpi_cst_c1_halt_enter(const struct acpi_cst_cx *cx __unused) 1353 { 1354 acpi_cst_c1_halt(); 1355 } 1356 1357 static void 1358 acpi_cst_cx_io_enter(const struct acpi_cst_cx *cx) 1359 { 1360 uint64_t dummy; 1361 1362 /* 1363 * Read I/O to enter this Cx state 1364 */ 1365 bus_space_read_1(cx->btag, cx->bhand, 0); 1366 /* 1367 * Perform a dummy I/O read. Since it may take an arbitrary time 1368 * to enter the idle state, this read makes sure that we are frozen. 1369 */ 1370 AcpiRead(&dummy, &AcpiGbl_FADT.XPmTimerBlock); 1371 } 1372 1373 static int 1374 acpi_cst_cx_setup(struct acpi_cst_cx *cx) 1375 { 1376 cx->flags &= ~ACPI_CST_CX_FLAG_BM_STS; 1377 cx->preamble = ACPI_CST_CX_PREAMBLE_NONE; 1378 1379 if (cx->type >= ACPI_STATE_C3) { 1380 /* 1381 * Set the required operations for entering C3(+) state. 1382 * Later acpi_cst_md_cx_setup() may fix them up. 1383 */ 1384 1385 /* 1386 * Always check BM_STS. 1387 */ 1388 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1389 cx->flags |= ACPI_CST_CX_FLAG_BM_STS; 1390 1391 /* 1392 * According to the ACPI specification, bus master arbitration 1393 * is only available on UP system. For MP system, cache flushing 1394 * is required. 1395 */ 1396 if (ncpus == 1 && (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1397 cx->preamble = ACPI_CST_CX_PREAMBLE_BM_ARB; 1398 else 1399 cx->preamble = ACPI_CST_CX_PREAMBLE_WBINVD; 1400 } 1401 return acpi_cst_md_cx_setup(cx); 1402 } 1403 1404 static void 1405 acpi_cst_free_resource(struct acpi_cst_softc *sc, int start) 1406 { 1407 int i; 1408 1409 for (i = start; i < MAX_CX_STATES; ++i) { 1410 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 1411 1412 if (cx->res != NULL) 1413 bus_release_resource(sc->cst_dev, cx->res_type, cx->rid, cx->res); 1414 memset(cx, 0, sizeof(*cx)); 1415 } 1416 } 1417