1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/cpuhelper.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/globaldata.h> 37 #include <sys/power.h> 38 #include <sys/proc.h> 39 #include <sys/sbuf.h> 40 #include <sys/thread2.h> 41 #include <sys/serialize.h> 42 #include <sys/msgport2.h> 43 #include <sys/microtime_pcpu.h> 44 #include <sys/cpu_topology.h> 45 46 #include <bus/pci/pcivar.h> 47 #include <machine/atomic.h> 48 #include <machine/globaldata.h> 49 #include <machine/md_var.h> 50 #include <machine/smp.h> 51 #include <sys/rman.h> 52 53 #include "acpi.h" 54 #include "acpivar.h" 55 #include "acpi_cpu.h" 56 #include "acpi_cpu_cstate.h" 57 58 /* 59 * Support for ACPI Processor devices, including C[1-3+] sleep states. 60 */ 61 62 /* Hooks for the ACPICA debugging infrastructure */ 63 #define _COMPONENT ACPI_PROCESSOR 64 ACPI_MODULE_NAME("PROCESSOR") 65 66 #define MAX_CX_STATES 8 67 68 struct acpi_cst_softc { 69 device_t cst_dev; 70 struct acpi_cpu_softc *cst_parent; 71 ACPI_HANDLE cst_handle; 72 int cst_cpuid; 73 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 74 uint32_t cst_p_blk; /* ACPI P_BLK location */ 75 uint32_t cst_p_blk_len; /* P_BLK length (must be 6). */ 76 struct acpi_cst_cx cst_cx_states[MAX_CX_STATES]; 77 int cst_cx_count; /* Number of valid Cx states. */ 78 int cst_prev_sleep; /* Last idle sleep duration. */ 79 /* Runtime state. */ 80 int cst_non_c3; /* Index of lowest non-C3 state. */ 81 u_long cst_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 82 /* Values for sysctl. */ 83 int cst_cx_lowest; /* Current Cx lowest */ 84 int cst_cx_lowest_req; /* Requested Cx lowest */ 85 char cst_cx_supported[64]; 86 }; 87 88 #define ACPI_CST_FLAG_PROBING 0x1 89 #define ACPI_CST_FLAG_ATTACHED 0x2 90 /* Match C-states of other hyperthreads on the same core */ 91 #define ACPI_CST_FLAG_MATCH_HT 0x4 92 93 #define PCI_VENDOR_INTEL 0x8086 94 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 95 #define PCI_REVISION_A_STEP 0 96 #define PCI_REVISION_B_STEP 1 97 #define PCI_REVISION_4E 2 98 #define PCI_REVISION_4M 3 99 #define PIIX4_DEVACTB_REG 0x58 100 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 101 #define PIIX4_BRLD_EN_IRQ (1<<1) 102 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 103 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | \ 104 PIIX4_BRLD_EN_IRQ | \ 105 PIIX4_BRLD_EN_IRQ8) 106 #define PIIX4_PCNTRL_BST_EN (1<<10) 107 108 /* Platform hardware resource information. */ 109 static uint32_t acpi_cst_smi_cmd; /* Value to write to SMI_CMD. */ 110 static uint8_t acpi_cst_ctrl; /* Indicate we are _CST aware. */ 111 int acpi_cst_quirks; /* Indicate any hardware bugs. */ 112 static boolean_t acpi_cst_use_fadt; 113 114 /* Runtime state. */ 115 static boolean_t acpi_cst_disable_idle; 116 /* Disable entry to idle function */ 117 static int acpi_cst_cx_count; /* Number of valid Cx states */ 118 119 /* Values for sysctl. */ 120 static int acpi_cst_cx_lowest; /* Current Cx lowest */ 121 static int acpi_cst_cx_lowest_req; /* Requested Cx lowest */ 122 123 static device_t *acpi_cst_devices; 124 static int acpi_cst_ndevices; 125 static struct acpi_cst_softc **acpi_cst_softc; 126 static struct lwkt_serialize acpi_cst_slize = LWKT_SERIALIZE_INITIALIZER; 127 128 static int acpi_cst_probe(device_t); 129 static int acpi_cst_attach(device_t); 130 static int acpi_cst_suspend(device_t); 131 static int acpi_cst_resume(device_t); 132 static int acpi_cst_shutdown(device_t); 133 134 static void acpi_cst_notify(device_t); 135 static void acpi_cst_postattach(void *); 136 static void acpi_cst_idle(void); 137 static void acpi_cst_copy(struct acpi_cst_softc *, 138 const struct acpi_cst_softc *); 139 140 static void acpi_cst_cx_probe(struct acpi_cst_softc *); 141 static void acpi_cst_cx_probe_fadt(struct acpi_cst_softc *); 142 static int acpi_cst_cx_probe_cst(struct acpi_cst_softc *, int); 143 static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *); 144 145 static void acpi_cst_startup(struct acpi_cst_softc *); 146 static void acpi_cst_support_list(struct acpi_cst_softc *); 147 static int acpi_cst_set_lowest(struct acpi_cst_softc *, int); 148 static int acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *, int); 149 static void acpi_cst_non_c3(struct acpi_cst_softc *); 150 static void acpi_cst_global_cx_count(void); 151 static int acpi_cst_set_quirks(void); 152 static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *); 153 static void acpi_cst_free_resource(struct acpi_cst_softc *, int); 154 static void acpi_cst_c1_halt(void); 155 156 static int acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS); 157 static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS); 158 static int acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 159 static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS); 160 static int acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 161 162 static int acpi_cst_cx_setup(struct acpi_cst_cx *cx); 163 static void acpi_cst_c1_halt_enter(const struct acpi_cst_cx *); 164 static void acpi_cst_cx_io_enter(const struct acpi_cst_cx *); 165 166 int acpi_cst_force_bmarb; 167 TUNABLE_INT("hw.acpi.cpu.cst.force_bmarb", &acpi_cst_force_bmarb); 168 169 int acpi_cst_force_bmsts; 170 TUNABLE_INT("hw.acpi.cpu.cst.force_bmsts", &acpi_cst_force_bmsts); 171 172 static device_method_t acpi_cst_methods[] = { 173 /* Device interface */ 174 DEVMETHOD(device_probe, acpi_cst_probe), 175 DEVMETHOD(device_attach, acpi_cst_attach), 176 DEVMETHOD(device_detach, bus_generic_detach), 177 DEVMETHOD(device_shutdown, acpi_cst_shutdown), 178 DEVMETHOD(device_suspend, acpi_cst_suspend), 179 DEVMETHOD(device_resume, acpi_cst_resume), 180 181 /* Bus interface */ 182 DEVMETHOD(bus_add_child, bus_generic_add_child), 183 DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), 184 DEVMETHOD(bus_get_resource_list, bus_generic_get_resource_list), 185 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 186 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 187 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 188 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 191 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 192 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 193 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 194 DEVMETHOD_END 195 }; 196 197 static driver_t acpi_cst_driver = { 198 "cpu_cst", 199 acpi_cst_methods, 200 sizeof(struct acpi_cst_softc), 201 .gpri = KOBJ_GPRI_ACPI+2 202 }; 203 204 static devclass_t acpi_cst_devclass; 205 DRIVER_MODULE(cpu_cst, cpu, acpi_cst_driver, acpi_cst_devclass, NULL, NULL); 206 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 207 208 static int 209 acpi_cst_probe(device_t dev) 210 { 211 int cpu_id; 212 213 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 214 return (ENXIO); 215 216 cpu_id = acpi_get_magic(dev); 217 218 if (acpi_cst_softc == NULL) 219 acpi_cst_softc = kmalloc(sizeof(struct acpi_cst_softc *) * 220 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 221 222 /* 223 * Check if we already probed this processor. We scan the bus twice 224 * so it's possible we've already seen this one. 225 */ 226 if (acpi_cst_softc[cpu_id] != NULL) { 227 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 228 return (ENXIO); 229 } 230 231 /* Mark this processor as in-use and save our derived id for attach. */ 232 acpi_cst_softc[cpu_id] = device_get_softc(dev); 233 device_set_desc(dev, "ACPI CPU C-State"); 234 235 return (0); 236 } 237 238 static int 239 acpi_cst_attach(device_t dev) 240 { 241 ACPI_BUFFER buf; 242 ACPI_OBJECT *obj; 243 struct acpi_cst_softc *sc; 244 ACPI_STATUS status; 245 246 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 247 248 sc = device_get_softc(dev); 249 sc->cst_dev = dev; 250 sc->cst_parent = device_get_softc(device_get_parent(dev)); 251 sc->cst_handle = acpi_get_handle(dev); 252 sc->cst_cpuid = acpi_get_magic(dev); 253 acpi_cst_softc[sc->cst_cpuid] = sc; 254 acpi_cst_smi_cmd = AcpiGbl_FADT.SmiCommand; 255 acpi_cst_ctrl = AcpiGbl_FADT.CstControl; 256 257 buf.Pointer = NULL; 258 buf.Length = ACPI_ALLOCATE_BUFFER; 259 status = AcpiEvaluateObject(sc->cst_handle, NULL, NULL, &buf); 260 if (ACPI_FAILURE(status)) { 261 device_printf(dev, "attach failed to get Processor obj - %s\n", 262 AcpiFormatException(status)); 263 acpi_cst_softc[sc->cst_cpuid] = NULL; 264 return (ENXIO); 265 } 266 obj = (ACPI_OBJECT *)buf.Pointer; 267 sc->cst_p_blk = obj->Processor.PblkAddress; 268 sc->cst_p_blk_len = obj->Processor.PblkLength; 269 AcpiOsFree(obj); 270 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "cpu_cst%d: P_BLK at %#x/%d\n", 271 device_get_unit(dev), sc->cst_p_blk, sc->cst_p_blk_len)); 272 273 /* 274 * If this is the first cpu we attach, create and initialize the generic 275 * resources that will be used by all acpi cpu devices. 276 */ 277 if (device_get_unit(dev) == 0) { 278 /* Assume we won't be using FADT for Cx states by default */ 279 acpi_cst_use_fadt = FALSE; 280 281 /* Queue post cpu-probing task handler */ 282 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cst_postattach, NULL); 283 } 284 285 /* Probe for Cx state support. */ 286 acpi_cst_cx_probe(sc); 287 288 sc->cst_flags |= ACPI_CST_FLAG_ATTACHED; 289 290 return (0); 291 } 292 293 /* 294 * Disable any entry to the idle function during suspend and re-enable it 295 * during resume. 296 */ 297 static int 298 acpi_cst_suspend(device_t dev) 299 { 300 int error; 301 302 error = bus_generic_suspend(dev); 303 if (error) 304 return (error); 305 acpi_cst_disable_idle = TRUE; 306 return (0); 307 } 308 309 static int 310 acpi_cst_resume(device_t dev) 311 { 312 acpi_cst_disable_idle = FALSE; 313 return (bus_generic_resume(dev)); 314 } 315 316 static int 317 acpi_cst_shutdown(device_t dev) 318 { 319 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 320 321 /* Allow children to shutdown first. */ 322 bus_generic_shutdown(dev); 323 324 /* 325 * Disable any entry to the idle function. There is a small race where 326 * an idle thread have passed this check but not gone to sleep. This 327 * is ok since device_shutdown() does not free the softc, otherwise 328 * we'd have to be sure all threads were evicted before returning. 329 */ 330 acpi_cst_disable_idle = TRUE; 331 332 return_VALUE (0); 333 } 334 335 static void 336 acpi_cst_cx_probe(struct acpi_cst_softc *sc) 337 { 338 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 339 340 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 341 sc->cst_prev_sleep = 1000000; 342 sc->cst_cx_lowest = 0; 343 sc->cst_cx_lowest_req = 0; 344 345 /* 346 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 347 * any, we'll revert to FADT/P_BLK Cx control method which will be 348 * handled by acpi_cst_postattach. We need to defer to after having 349 * probed all the cpus in the system before probing for Cx states from 350 * FADT as we may already have found cpus with valid _CST packages. 351 */ 352 if (!acpi_cst_use_fadt && acpi_cst_cx_probe_cst(sc, 0) != 0) { 353 /* 354 * We were unable to find a _CST package for this cpu or there 355 * was an error parsing it. Switch back to generic mode. 356 */ 357 acpi_cst_use_fadt = TRUE; 358 if (bootverbose) 359 device_printf(sc->cst_dev, "switching to FADT Cx mode\n"); 360 } 361 362 /* 363 * TODO: _CSD Package should be checked here. 364 */ 365 } 366 367 static void 368 acpi_cst_cx_probe_fadt(struct acpi_cst_softc *sc) 369 { 370 struct acpi_cst_cx *cx_ptr; 371 int error; 372 373 /* 374 * Free all previously allocated resources. 375 * 376 * NITE: 377 * It is needed, since we could enter here because of other 378 * cpu's _CST probing failure. 379 */ 380 acpi_cst_free_resource(sc, 0); 381 382 sc->cst_cx_count = 0; 383 cx_ptr = sc->cst_cx_states; 384 385 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 386 sc->cst_prev_sleep = 1000000; 387 388 /* C1 has been required since just after ACPI 1.0 */ 389 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_FIXED_HARDWARE; 390 cx_ptr->type = ACPI_STATE_C1; 391 cx_ptr->trans_lat = 0; 392 cx_ptr->enter = acpi_cst_c1_halt_enter; 393 error = acpi_cst_cx_setup(cx_ptr); 394 if (error) 395 panic("C1 FADT HALT setup failed: %d", error); 396 cx_ptr++; 397 sc->cst_cx_count++; 398 399 /* C2(+) is not supported on MP system */ 400 if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 401 return; 402 403 /* 404 * The spec says P_BLK must be 6 bytes long. However, some systems 405 * use it to indicate a fractional set of features present so we 406 * take 5 as C2. Some may also have a value of 7 to indicate 407 * another C3 but most use _CST for this (as required) and having 408 * "only" C1-C3 is not a hardship. 409 */ 410 if (sc->cst_p_blk_len < 5) 411 return; 412 413 /* Validate and allocate resources for C2 (P_LVL2). */ 414 if (AcpiGbl_FADT.C2Latency <= 100) { 415 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 416 cx_ptr->gas.BitWidth = 8; 417 cx_ptr->gas.Address = sc->cst_p_blk + 4; 418 419 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 420 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 421 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 422 if (cx_ptr->res != NULL) { 423 sc->cst_parent->cpu_next_rid++; 424 cx_ptr->type = ACPI_STATE_C2; 425 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 426 cx_ptr->enter = acpi_cst_cx_io_enter; 427 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 428 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 429 error = acpi_cst_cx_setup(cx_ptr); 430 if (error) 431 panic("C2 FADT I/O setup failed: %d", error); 432 cx_ptr++; 433 sc->cst_cx_count++; 434 sc->cst_non_c3 = 1; 435 } 436 } 437 if (sc->cst_p_blk_len < 6) 438 return; 439 440 /* Validate and allocate resources for C3 (P_LVL3). */ 441 if (AcpiGbl_FADT.C3Latency <= 1000 && 442 !(acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3)) { 443 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 444 cx_ptr->gas.BitWidth = 8; 445 cx_ptr->gas.Address = sc->cst_p_blk + 5; 446 447 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 448 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 449 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 450 if (cx_ptr->res != NULL) { 451 sc->cst_parent->cpu_next_rid++; 452 cx_ptr->type = ACPI_STATE_C3; 453 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 454 cx_ptr->enter = acpi_cst_cx_io_enter; 455 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 456 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 457 error = acpi_cst_cx_setup(cx_ptr); 458 if (error) 459 panic("C3 FADT I/O setup failed: %d", error); 460 cx_ptr++; 461 sc->cst_cx_count++; 462 } 463 } 464 } 465 466 static void 467 acpi_cst_copy(struct acpi_cst_softc *dst_sc, 468 const struct acpi_cst_softc *src_sc) 469 { 470 dst_sc->cst_non_c3 = src_sc->cst_non_c3; 471 dst_sc->cst_cx_count = src_sc->cst_cx_count; 472 memcpy(dst_sc->cst_cx_states, src_sc->cst_cx_states, 473 sizeof(dst_sc->cst_cx_states)); 474 } 475 476 /* 477 * Parse a _CST package and set up its Cx states. Since the _CST object 478 * can change dynamically, our notify handler may call this function 479 * to clean up and probe the new _CST package. 480 */ 481 static int 482 acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe) 483 { 484 struct acpi_cst_cx *cx_ptr; 485 ACPI_STATUS status; 486 ACPI_BUFFER buf; 487 ACPI_OBJECT *top; 488 ACPI_OBJECT *pkg; 489 uint32_t count; 490 int i; 491 492 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 493 494 if (reprobe) 495 cpuhelper_assert(sc->cst_cpuid, true); 496 497 buf.Pointer = NULL; 498 buf.Length = ACPI_ALLOCATE_BUFFER; 499 status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf); 500 if (ACPI_FAILURE(status)) 501 return (ENXIO); 502 503 /* _CST is a package with a count and at least one Cx package. */ 504 top = (ACPI_OBJECT *)buf.Pointer; 505 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 506 device_printf(sc->cst_dev, "invalid _CST package\n"); 507 AcpiOsFree(buf.Pointer); 508 return (ENXIO); 509 } 510 if (count != top->Package.Count - 1) { 511 device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n", 512 count, top->Package.Count - 1); 513 count = top->Package.Count - 1; 514 } 515 if (count > MAX_CX_STATES) { 516 device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count); 517 count = MAX_CX_STATES; 518 } 519 520 sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT; 521 cpu_sfence(); 522 523 /* 524 * Free all previously allocated resources 525 * 526 * NOTE: It is needed for _CST reprobing. 527 */ 528 acpi_cst_free_resource(sc, 0); 529 530 /* Set up all valid states. */ 531 sc->cst_cx_count = 0; 532 cx_ptr = sc->cst_cx_states; 533 for (i = 0; i < count; i++) { 534 int error; 535 536 pkg = &top->Package.Elements[i + 1]; 537 if (!ACPI_PKG_VALID(pkg, 4) || 538 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 539 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 540 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 541 542 device_printf(sc->cst_dev, "skipping invalid Cx state package\n"); 543 continue; 544 } 545 546 /* Validate the state to see if we should use it. */ 547 switch (cx_ptr->type) { 548 case ACPI_STATE_C1: 549 sc->cst_non_c3 = i; 550 cx_ptr->enter = acpi_cst_c1_halt_enter; 551 error = acpi_cst_cx_setup(cx_ptr); 552 if (error) 553 panic("C1 CST HALT setup failed: %d", error); 554 if (sc->cst_cx_count != 0) { 555 /* 556 * C1 is not the first C-state; something really stupid 557 * is going on ... 558 */ 559 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 560 } 561 cx_ptr++; 562 sc->cst_cx_count++; 563 continue; 564 case ACPI_STATE_C2: 565 sc->cst_non_c3 = i; 566 break; 567 case ACPI_STATE_C3: 568 default: 569 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) { 570 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 571 "cpu_cst%d: C3[%d] not available.\n", 572 device_get_unit(sc->cst_dev), i)); 573 continue; 574 } 575 break; 576 } 577 578 /* 579 * Allocate the control register for C2 or C3(+). 580 */ 581 KASSERT(cx_ptr->res == NULL, ("still has res")); 582 acpi_PkgRawGas(pkg, 0, &cx_ptr->gas); 583 584 /* 585 * We match number of C2/C3 for hyperthreads, only if the 586 * register is "Fixed Hardware", e.g. on most of the Intel 587 * CPUs. We don't have much to do for the rest of the 588 * register types. 589 */ 590 if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) 591 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 592 593 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 594 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 595 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 596 if (cx_ptr->res != NULL) { 597 sc->cst_parent->cpu_next_rid++; 598 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 599 "cpu_cst%d: Got C%d - %d latency\n", 600 device_get_unit(sc->cst_dev), cx_ptr->type, 601 cx_ptr->trans_lat)); 602 cx_ptr->enter = acpi_cst_cx_io_enter; 603 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 604 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 605 error = acpi_cst_cx_setup(cx_ptr); 606 if (error) 607 panic("C%d CST I/O setup failed: %d", cx_ptr->type, error); 608 cx_ptr++; 609 sc->cst_cx_count++; 610 } else { 611 error = acpi_cst_cx_setup(cx_ptr); 612 if (!error) { 613 KASSERT(cx_ptr->enter != NULL, 614 ("C%d enter is not set", cx_ptr->type)); 615 cx_ptr++; 616 sc->cst_cx_count++; 617 } 618 } 619 } 620 AcpiOsFree(buf.Pointer); 621 622 if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) { 623 cpumask_t mask; 624 625 mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL); 626 if (CPUMASK_TESTNZERO(mask)) { 627 int cpu; 628 629 for (cpu = 0; cpu < ncpus; ++cpu) { 630 struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu]; 631 632 if (sc1 == NULL || sc1 == sc || 633 (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 || 634 (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0) 635 continue; 636 if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid)) 637 continue; 638 639 if (sc1->cst_cx_count != sc->cst_cx_count) { 640 struct acpi_cst_softc *src_sc, *dst_sc; 641 642 if (bootverbose) { 643 device_printf(sc->cst_dev, 644 "inconstent C-state count: %d, %s has %d\n", 645 sc->cst_cx_count, 646 device_get_nameunit(sc1->cst_dev), 647 sc1->cst_cx_count); 648 } 649 if (sc1->cst_cx_count > sc->cst_cx_count) { 650 src_sc = sc1; 651 dst_sc = sc; 652 } else { 653 src_sc = sc; 654 dst_sc = sc1; 655 } 656 acpi_cst_copy(dst_sc, src_sc); 657 } 658 } 659 } 660 } 661 662 if (reprobe) { 663 /* If there are C3(+) states, always enable bus master wakeup */ 664 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 665 for (i = 0; i < sc->cst_cx_count; ++i) { 666 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 667 668 if (cx->type >= ACPI_STATE_C3) { 669 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 670 break; 671 } 672 } 673 } 674 675 /* Fix up the lowest Cx being used */ 676 acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req); 677 } 678 679 /* 680 * Cache the lowest non-C3 state. 681 * NOTE: must after cst_cx_lowest is set. 682 */ 683 acpi_cst_non_c3(sc); 684 685 cpu_sfence(); 686 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 687 688 return (0); 689 } 690 691 static void 692 acpi_cst_cx_reprobe_cst_handler(struct cpuhelper_msg *msg) 693 { 694 int error; 695 696 error = acpi_cst_cx_probe_cst(msg->ch_cbarg, 1); 697 cpuhelper_replymsg(msg, error); 698 } 699 700 static int 701 acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) 702 { 703 struct cpuhelper_msg msg; 704 705 cpuhelper_initmsg(&msg, &curthread->td_msgport, 706 acpi_cst_cx_reprobe_cst_handler, sc, MSGF_PRIORITY); 707 return (cpuhelper_domsg(&msg, sc->cst_cpuid)); 708 } 709 710 /* 711 * Call this *after* all CPUs Cx states have been attached. 712 */ 713 static void 714 acpi_cst_postattach(void *arg) 715 { 716 struct acpi_cst_softc *sc; 717 int i; 718 719 /* Get set of Cx state devices */ 720 devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices, 721 &acpi_cst_ndevices); 722 723 /* 724 * Setup any quirks that might necessary now that we have probed 725 * all the CPUs' Cx states. 726 */ 727 acpi_cst_set_quirks(); 728 729 if (acpi_cst_use_fadt) { 730 /* 731 * We are using Cx mode from FADT, probe for available Cx states 732 * for all processors. 733 */ 734 for (i = 0; i < acpi_cst_ndevices; i++) { 735 sc = device_get_softc(acpi_cst_devices[i]); 736 acpi_cst_cx_probe_fadt(sc); 737 } 738 } else { 739 /* 740 * We are using _CST mode, remove C3 state if necessary. 741 * 742 * As we now know for sure that we will be using _CST mode 743 * install our notify handler. 744 */ 745 for (i = 0; i < acpi_cst_ndevices; i++) { 746 sc = device_get_softc(acpi_cst_devices[i]); 747 if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) { 748 /* Free part of unused resources */ 749 acpi_cst_free_resource(sc, sc->cst_non_c3 + 1); 750 sc->cst_cx_count = sc->cst_non_c3 + 1; 751 } 752 sc->cst_parent->cpu_cst_notify = acpi_cst_notify; 753 } 754 } 755 acpi_cst_global_cx_count(); 756 757 /* Perform Cx final initialization. */ 758 for (i = 0; i < acpi_cst_ndevices; i++) { 759 sc = device_get_softc(acpi_cst_devices[i]); 760 acpi_cst_startup(sc); 761 762 if (sc->cst_parent->glob_sysctl_tree != NULL) { 763 struct acpi_cpu_softc *cpu = sc->cst_parent; 764 765 /* Add a sysctl handler to handle global Cx lowest setting */ 766 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 767 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 768 OID_AUTO, "cx_lowest", 769 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 770 acpi_cst_global_lowest_sysctl, "A", 771 "Requested global lowest Cx sleep state"); 772 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 773 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 774 OID_AUTO, "cx_lowest_use", 775 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 776 acpi_cst_global_lowest_use_sysctl, "A", 777 "Global lowest Cx sleep state to use"); 778 } 779 } 780 781 /* Take over idling from cpu_idle_default(). */ 782 acpi_cst_cx_lowest = 0; 783 acpi_cst_cx_lowest_req = 0; 784 acpi_cst_disable_idle = FALSE; 785 786 cpu_sfence(); 787 cpu_idle_hook = acpi_cst_idle; 788 } 789 790 static void 791 acpi_cst_support_list(struct acpi_cst_softc *sc) 792 { 793 struct sbuf sb; 794 int i; 795 796 /* 797 * Set up the list of Cx states 798 */ 799 sbuf_new(&sb, sc->cst_cx_supported, sizeof(sc->cst_cx_supported), 800 SBUF_FIXEDLEN); 801 for (i = 0; i < sc->cst_cx_count; i++) 802 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cst_cx_states[i].trans_lat); 803 sbuf_trim(&sb); 804 sbuf_finish(&sb); 805 } 806 807 static void 808 acpi_cst_c3_bm_rld_handler(struct cpuhelper_msg *msg) 809 { 810 811 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 812 cpuhelper_replymsg(msg, 0); 813 } 814 815 static void 816 acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) 817 { 818 struct cpuhelper_msg msg; 819 820 cpuhelper_initmsg(&msg, &curthread->td_msgport, 821 acpi_cst_c3_bm_rld_handler, sc, MSGF_PRIORITY); 822 cpuhelper_domsg(&msg, sc->cst_cpuid); 823 } 824 825 static void 826 acpi_cst_startup(struct acpi_cst_softc *sc) 827 { 828 struct acpi_cpu_softc *cpu = sc->cst_parent; 829 int i, bm_rld_done = 0; 830 831 for (i = 0; i < sc->cst_cx_count; ++i) { 832 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 833 int error; 834 835 /* If there are C3(+) states, always enable bus master wakeup */ 836 if (cx->type >= ACPI_STATE_C3 && !bm_rld_done && 837 (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 838 acpi_cst_c3_bm_rld(sc); 839 bm_rld_done = 1; 840 } 841 842 /* Redo the Cx setup, since quirks have been changed */ 843 error = acpi_cst_cx_setup(cx); 844 if (error) 845 panic("C%d startup setup failed: %d", i + 1, error); 846 } 847 848 acpi_cst_support_list(sc); 849 850 SYSCTL_ADD_STRING(&cpu->pcpu_sysctl_ctx, 851 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 852 OID_AUTO, "cx_supported", CTLFLAG_RD, 853 sc->cst_cx_supported, 0, 854 "Cx/microsecond values for supported Cx states"); 855 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 856 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 857 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 858 (void *)sc, 0, acpi_cst_lowest_sysctl, "A", 859 "requested lowest Cx sleep state"); 860 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 861 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 862 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 863 (void *)sc, 0, acpi_cst_lowest_use_sysctl, "A", 864 "lowest Cx sleep state to use"); 865 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 866 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 867 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 868 (void *)sc, 0, acpi_cst_usage_sysctl, "A", 869 "percent usage for each Cx state"); 870 871 #ifdef notyet 872 /* Signal platform that we can handle _CST notification. */ 873 if (!acpi_cst_use_fadt && acpi_cst_ctrl != 0) { 874 ACPI_LOCK(acpi); 875 AcpiOsWritePort(acpi_cst_smi_cmd, acpi_cst_ctrl, 8); 876 ACPI_UNLOCK(acpi); 877 } 878 #endif 879 } 880 881 /* 882 * Idle the CPU in the lowest state possible. This function is called with 883 * interrupts disabled. Note that once it re-enables interrupts, a task 884 * switch can occur so do not access shared data (i.e. the softc) after 885 * interrupts are re-enabled. 886 */ 887 static void 888 acpi_cst_idle(void) 889 { 890 struct acpi_cst_softc *sc; 891 struct acpi_cst_cx *cx_next; 892 union microtime_pcpu start, end; 893 int cx_next_idx, i, tdiff, bm_arb_disabled = 0; 894 895 /* If disabled, return immediately. */ 896 if (acpi_cst_disable_idle) { 897 ACPI_ENABLE_IRQS(); 898 return; 899 } 900 901 /* 902 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 903 * since there is no Cx state for this processor. 904 */ 905 sc = acpi_cst_softc[mdcpu->mi.gd_cpuid]; 906 if (sc == NULL) { 907 acpi_cst_c1_halt(); 908 return; 909 } 910 911 /* Still probing; use C1 */ 912 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 913 acpi_cst_c1_halt(); 914 return; 915 } 916 917 /* Find the lowest state that has small enough latency. */ 918 cx_next_idx = 0; 919 for (i = sc->cst_cx_lowest; i >= 0; i--) { 920 if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { 921 cx_next_idx = i; 922 break; 923 } 924 } 925 926 /* 927 * Check for bus master activity if needed for the selected state. 928 * If there was activity, clear the bit and use the lowest non-C3 state. 929 */ 930 cx_next = &sc->cst_cx_states[cx_next_idx]; 931 if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) { 932 int bm_active; 933 934 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 935 if (bm_active != 0) { 936 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 937 cx_next_idx = sc->cst_non_c3; 938 } 939 } 940 941 /* Select the next state and update statistics. */ 942 cx_next = &sc->cst_cx_states[cx_next_idx]; 943 sc->cst_cx_stats[cx_next_idx]++; 944 KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep")); 945 946 /* 947 * Execute HLT (or equivalent) and wait for an interrupt. We can't 948 * calculate the time spent in C1 since the place we wake up is an 949 * ISR. Assume we slept half of quantum and return. 950 */ 951 if (cx_next->type == ACPI_STATE_C1) { 952 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; 953 cx_next->enter(cx_next); 954 return; 955 } 956 957 /* Execute the proper preamble before enter the selected state. */ 958 if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) { 959 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 960 bm_arb_disabled = 1; 961 } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) { 962 ACPI_FLUSH_CPU_CACHE(); 963 } 964 965 /* 966 * Enter the selected state and check time spent asleep. 967 */ 968 microtime_pcpu_get(&start); 969 cpu_mfence(); 970 971 cx_next->enter(cx_next); 972 973 cpu_mfence(); 974 microtime_pcpu_get(&end); 975 976 /* Enable bus master arbitration, if it was disabled. */ 977 if (bm_arb_disabled) 978 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 979 980 ACPI_ENABLE_IRQS(); 981 982 /* Find the actual time asleep in microseconds. */ 983 tdiff = microtime_pcpu_diff(&start, &end); 984 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4; 985 } 986 987 /* 988 * Re-evaluate the _CST object when we are notified that it changed. 989 */ 990 static void 991 acpi_cst_notify(device_t dev) 992 { 993 struct acpi_cst_softc *sc = device_get_softc(dev); 994 995 cpuhelper_assert(mycpuid, false); 996 997 lwkt_serialize_enter(&acpi_cst_slize); 998 999 /* Update the list of Cx states. */ 1000 acpi_cst_cx_reprobe_cst(sc); 1001 acpi_cst_support_list(sc); 1002 1003 /* Update the new lowest useable Cx state for all CPUs. */ 1004 acpi_cst_global_cx_count(); 1005 1006 /* 1007 * Fix up the lowest Cx being used 1008 */ 1009 if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) 1010 acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; 1011 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1012 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1013 1014 lwkt_serialize_exit(&acpi_cst_slize); 1015 } 1016 1017 static int 1018 acpi_cst_set_quirks(void) 1019 { 1020 device_t acpi_dev; 1021 uint32_t val; 1022 1023 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1024 1025 /* 1026 * Bus mastering arbitration control is needed to keep caches coherent 1027 * while sleeping in C3. If it's not present but a working flush cache 1028 * instruction is present, flush the caches before entering C3 instead. 1029 * Otherwise, just disable C3 completely. 1030 */ 1031 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 1032 AcpiGbl_FADT.Pm2ControlLength == 0) { 1033 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 1034 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 1035 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_BM; 1036 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1037 "cpu_cst: no BM control, using flush cache method\n")); 1038 } else { 1039 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1040 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1041 "cpu_cst: no BM control, C3 not available\n")); 1042 } 1043 } 1044 1045 /* Look for various quirks of the PIIX4 part. */ 1046 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 1047 if (acpi_dev != NULL) { 1048 switch (pci_get_revid(acpi_dev)) { 1049 /* 1050 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1051 * do not report the BMIDE status to the BM status register and 1052 * others have a livelock bug if Type-F DMA is enabled. Linux 1053 * works around the BMIDE bug by reading the BM status directly 1054 * but we take the simpler approach of disabling C3 for these 1055 * parts. 1056 * 1057 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1058 * Livelock") from the January 2002 PIIX4 specification update. 1059 * Applies to all PIIX4 models. 1060 * 1061 * Also, make sure that all interrupts cause a "Stop Break" 1062 * event to exit from C2 state. 1063 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 1064 * should be set to zero, otherwise it causes C2 to short-sleep. 1065 * PIIX4 doesn't properly support C3 and bus master activity 1066 * need not break out of C2. 1067 */ 1068 case PCI_REVISION_A_STEP: 1069 case PCI_REVISION_B_STEP: 1070 case PCI_REVISION_4E: 1071 case PCI_REVISION_4M: 1072 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1073 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1074 "cpu_cst: working around PIIX4 bug, disabling C3\n")); 1075 1076 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1077 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1078 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1079 "cpu_cst: PIIX4: enabling IRQs to generate Stop Break\n")); 1080 val |= PIIX4_STOP_BREAK_MASK; 1081 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1082 } 1083 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1084 if (val) { 1085 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1086 "cpu_cst: PIIX4: reset BRLD_EN_BM\n")); 1087 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1088 } 1089 break; 1090 default: 1091 break; 1092 } 1093 } 1094 1095 return (0); 1096 } 1097 1098 static int 1099 acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS) 1100 { 1101 struct acpi_cst_softc *sc; 1102 struct sbuf sb; 1103 char buf[128]; 1104 int i; 1105 uintmax_t fract, sum, whole; 1106 1107 sc = (struct acpi_cst_softc *) arg1; 1108 sum = 0; 1109 for (i = 0; i < sc->cst_cx_count; i++) 1110 sum += sc->cst_cx_stats[i]; 1111 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1112 for (i = 0; i < sc->cst_cx_count; i++) { 1113 if (sum > 0) { 1114 whole = (uintmax_t)sc->cst_cx_stats[i] * 100; 1115 fract = (whole % sum) * 100; 1116 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1117 (u_int)(fract / sum)); 1118 } else 1119 sbuf_printf(&sb, "0.00%% "); 1120 } 1121 sbuf_printf(&sb, "last %dus", sc->cst_prev_sleep); 1122 sbuf_trim(&sb); 1123 sbuf_finish(&sb); 1124 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1125 sbuf_delete(&sb); 1126 1127 return (0); 1128 } 1129 1130 static int 1131 acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *sc, int val) 1132 { 1133 int old_lowest, error = 0, old_lowest_req; 1134 uint32_t old_type, type; 1135 1136 KKASSERT(mycpuid == sc->cst_cpuid); 1137 1138 old_lowest_req = sc->cst_cx_lowest_req; 1139 sc->cst_cx_lowest_req = val; 1140 1141 if (val > sc->cst_cx_count - 1) 1142 val = sc->cst_cx_count - 1; 1143 old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val); 1144 1145 old_type = sc->cst_cx_states[old_lowest].type; 1146 type = sc->cst_cx_states[val].type; 1147 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1148 cputimer_intr_powersave_remreq(); 1149 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1150 error = cputimer_intr_powersave_addreq(); 1151 if (error) { 1152 /* Restore */ 1153 sc->cst_cx_lowest_req = old_lowest_req; 1154 sc->cst_cx_lowest = old_lowest; 1155 } 1156 } 1157 1158 if (error) 1159 return error; 1160 1161 /* Cache the new lowest non-C3 state. */ 1162 acpi_cst_non_c3(sc); 1163 1164 /* Reset the statistics counters. */ 1165 bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats)); 1166 return (0); 1167 } 1168 1169 static void 1170 acpi_cst_set_lowest_handler(struct cpuhelper_msg *msg) 1171 { 1172 int error; 1173 1174 error = acpi_cst_set_lowest_oncpu(msg->ch_cbarg, msg->ch_cbarg1); 1175 cpuhelper_replymsg(msg, error); 1176 } 1177 1178 static int 1179 acpi_cst_set_lowest(struct acpi_cst_softc *sc, int val) 1180 { 1181 struct cpuhelper_msg msg; 1182 1183 cpuhelper_initmsg(&msg, &curthread->td_msgport, 1184 acpi_cst_set_lowest_handler, sc, MSGF_PRIORITY); 1185 msg.ch_cbarg1 = val; 1186 1187 return (cpuhelper_domsg(&msg, sc->cst_cpuid)); 1188 } 1189 1190 static int 1191 acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1192 { 1193 struct acpi_cst_softc *sc; 1194 char state[8]; 1195 int val, error; 1196 1197 sc = (struct acpi_cst_softc *)arg1; 1198 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); 1199 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1200 if (error != 0 || req->newptr == NULL) 1201 return (error); 1202 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1203 return (EINVAL); 1204 val = (int) strtol(state + 1, NULL, 10) - 1; 1205 if (val < 0) 1206 return (EINVAL); 1207 1208 lwkt_serialize_enter(&acpi_cst_slize); 1209 error = acpi_cst_set_lowest(sc, val); 1210 lwkt_serialize_exit(&acpi_cst_slize); 1211 1212 return error; 1213 } 1214 1215 static int 1216 acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1217 { 1218 struct acpi_cst_softc *sc; 1219 char state[8]; 1220 1221 sc = (struct acpi_cst_softc *)arg1; 1222 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest + 1); 1223 return sysctl_handle_string(oidp, state, sizeof(state), req); 1224 } 1225 1226 static int 1227 acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1228 { 1229 struct acpi_cst_softc *sc; 1230 char state[8]; 1231 int val, error, i; 1232 1233 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest_req + 1); 1234 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1235 if (error != 0 || req->newptr == NULL) 1236 return (error); 1237 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1238 return (EINVAL); 1239 val = (int) strtol(state + 1, NULL, 10) - 1; 1240 if (val < 0) 1241 return (EINVAL); 1242 1243 lwkt_serialize_enter(&acpi_cst_slize); 1244 1245 acpi_cst_cx_lowest_req = val; 1246 acpi_cst_cx_lowest = val; 1247 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1248 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1249 1250 /* Update the new lowest useable Cx state for all CPUs. */ 1251 for (i = 0; i < acpi_cst_ndevices; i++) { 1252 sc = device_get_softc(acpi_cst_devices[i]); 1253 error = acpi_cst_set_lowest(sc, val); 1254 if (error) { 1255 KKASSERT(i == 0); 1256 break; 1257 } 1258 } 1259 1260 lwkt_serialize_exit(&acpi_cst_slize); 1261 1262 return error; 1263 } 1264 1265 static int 1266 acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1267 { 1268 char state[8]; 1269 1270 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest + 1); 1271 return sysctl_handle_string(oidp, state, sizeof(state), req); 1272 } 1273 1274 /* 1275 * Put the CPU in C1 in a machine-dependant way. 1276 * XXX: shouldn't be here! 1277 */ 1278 static void 1279 acpi_cst_c1_halt(void) 1280 { 1281 splz(); 1282 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1283 __asm __volatile("sti; hlt"); 1284 else 1285 __asm __volatile("sti; pause"); 1286 } 1287 1288 static void 1289 acpi_cst_non_c3(struct acpi_cst_softc *sc) 1290 { 1291 int i; 1292 1293 sc->cst_non_c3 = 0; 1294 for (i = sc->cst_cx_lowest; i >= 0; i--) { 1295 if (sc->cst_cx_states[i].type < ACPI_STATE_C3) { 1296 sc->cst_non_c3 = i; 1297 break; 1298 } 1299 } 1300 if (bootverbose) 1301 device_printf(sc->cst_dev, "non-C3 %d\n", sc->cst_non_c3); 1302 } 1303 1304 /* 1305 * Update the largest Cx state supported in the global acpi_cst_cx_count. 1306 * It will be used in the global Cx sysctl handler. 1307 */ 1308 static void 1309 acpi_cst_global_cx_count(void) 1310 { 1311 struct acpi_cst_softc *sc; 1312 int i; 1313 1314 if (acpi_cst_ndevices == 0) { 1315 acpi_cst_cx_count = 0; 1316 return; 1317 } 1318 1319 sc = device_get_softc(acpi_cst_devices[0]); 1320 acpi_cst_cx_count = sc->cst_cx_count; 1321 1322 for (i = 1; i < acpi_cst_ndevices; i++) { 1323 struct acpi_cst_softc *sc = device_get_softc(acpi_cst_devices[i]); 1324 1325 if (sc->cst_cx_count < acpi_cst_cx_count) 1326 acpi_cst_cx_count = sc->cst_cx_count; 1327 } 1328 if (bootverbose) 1329 kprintf("cpu_cst: global Cx count %d\n", acpi_cst_cx_count); 1330 } 1331 1332 static void 1333 acpi_cst_c1_halt_enter(const struct acpi_cst_cx *cx __unused) 1334 { 1335 acpi_cst_c1_halt(); 1336 } 1337 1338 static void 1339 acpi_cst_cx_io_enter(const struct acpi_cst_cx *cx) 1340 { 1341 uint64_t dummy; 1342 1343 /* 1344 * Read I/O to enter this Cx state 1345 */ 1346 bus_space_read_1(cx->btag, cx->bhand, 0); 1347 /* 1348 * Perform a dummy I/O read. Since it may take an arbitrary time 1349 * to enter the idle state, this read makes sure that we are frozen. 1350 */ 1351 AcpiRead(&dummy, &AcpiGbl_FADT.XPmTimerBlock); 1352 } 1353 1354 static int 1355 acpi_cst_cx_setup(struct acpi_cst_cx *cx) 1356 { 1357 cx->flags &= ~ACPI_CST_CX_FLAG_BM_STS; 1358 cx->preamble = ACPI_CST_CX_PREAMBLE_NONE; 1359 1360 if (cx->type >= ACPI_STATE_C3) { 1361 /* 1362 * Set the required operations for entering C3(+) state. 1363 * Later acpi_cst_md_cx_setup() may fix them up. 1364 */ 1365 1366 /* 1367 * Always check BM_STS. 1368 */ 1369 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1370 cx->flags |= ACPI_CST_CX_FLAG_BM_STS; 1371 1372 /* 1373 * According to the ACPI specification, bus master arbitration 1374 * is only available on UP system. For MP system, cache flushing 1375 * is required. 1376 */ 1377 if (ncpus == 1 && (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1378 cx->preamble = ACPI_CST_CX_PREAMBLE_BM_ARB; 1379 else 1380 cx->preamble = ACPI_CST_CX_PREAMBLE_WBINVD; 1381 } 1382 return acpi_cst_md_cx_setup(cx); 1383 } 1384 1385 static void 1386 acpi_cst_free_resource(struct acpi_cst_softc *sc, int start) 1387 { 1388 int i; 1389 1390 for (i = start; i < MAX_CX_STATES; ++i) { 1391 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 1392 1393 if (cx->res != NULL) 1394 bus_release_resource(sc->cst_dev, cx->res_type, cx->rid, cx->res); 1395 memset(cx, 0, sizeof(*cx)); 1396 } 1397 } 1398