1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/cpuhelper.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/globaldata.h> 37 #include <sys/power.h> 38 #include <sys/proc.h> 39 #include <sys/sbuf.h> 40 #include <sys/thread2.h> 41 #include <sys/serialize.h> 42 #include <sys/msgport2.h> 43 #include <sys/microtime_pcpu.h> 44 #include <sys/cpu_topology.h> 45 46 #include <bus/pci/pcivar.h> 47 #include <machine/atomic.h> 48 #include <machine/globaldata.h> 49 #include <machine/md_var.h> 50 #include <machine/smp.h> 51 #include <sys/rman.h> 52 53 #include "acpi.h" 54 #include "acpivar.h" 55 #include "acpi_cpu.h" 56 #include "acpi_cpu_cstate.h" 57 58 /* 59 * Support for ACPI Processor devices, including C[1-3+] sleep states. 60 */ 61 62 /* Hooks for the ACPICA debugging infrastructure */ 63 #define _COMPONENT ACPI_PROCESSOR 64 ACPI_MODULE_NAME("PROCESSOR") 65 66 #define MAX_CX_STATES 8 67 68 struct acpi_cst_softc { 69 device_t cst_dev; 70 struct acpi_cpu_softc *cst_parent; 71 ACPI_HANDLE cst_handle; 72 int cst_cpuid; 73 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 74 uint32_t cst_p_blk; /* ACPI P_BLK location */ 75 uint32_t cst_p_blk_len; /* P_BLK length (must be 6). */ 76 struct acpi_cst_cx cst_cx_states[MAX_CX_STATES]; 77 int cst_cx_count; /* Number of valid Cx states. */ 78 int cst_prev_sleep; /* Last idle sleep duration. */ 79 /* Runtime state. */ 80 int cst_non_c3; /* Index of lowest non-C3 state. */ 81 u_long cst_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 82 /* Values for sysctl. */ 83 int cst_cx_lowest; /* Current Cx lowest */ 84 int cst_cx_lowest_req; /* Requested Cx lowest */ 85 char cst_cx_supported[64]; 86 }; 87 88 #define ACPI_CST_FLAG_PROBING 0x1 89 #define ACPI_CST_FLAG_ATTACHED 0x2 90 /* Match C-states of other hyperthreads on the same core */ 91 #define ACPI_CST_FLAG_MATCH_HT 0x4 92 93 #define PCI_VENDOR_INTEL 0x8086 94 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 95 #define PCI_REVISION_A_STEP 0 96 #define PCI_REVISION_B_STEP 1 97 #define PCI_REVISION_4E 2 98 #define PCI_REVISION_4M 3 99 #define PIIX4_DEVACTB_REG 0x58 100 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 101 #define PIIX4_BRLD_EN_IRQ (1<<1) 102 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 103 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | \ 104 PIIX4_BRLD_EN_IRQ | \ 105 PIIX4_BRLD_EN_IRQ8) 106 #define PIIX4_PCNTRL_BST_EN (1<<10) 107 108 /* Platform hardware resource information. */ 109 static uint32_t acpi_cst_smi_cmd; /* Value to write to SMI_CMD. */ 110 static uint8_t acpi_cst_ctrl; /* Indicate we are _CST aware. */ 111 int acpi_cst_quirks; /* Indicate any hardware bugs. */ 112 static boolean_t acpi_cst_use_fadt; 113 114 /* Runtime state. */ 115 static boolean_t acpi_cst_disable_idle; 116 /* Disable entry to idle function */ 117 static int acpi_cst_cx_count; /* Number of valid Cx states */ 118 119 /* Values for sysctl. */ 120 static int acpi_cst_cx_lowest; /* Current Cx lowest */ 121 static int acpi_cst_cx_lowest_req; /* Requested Cx lowest */ 122 123 static device_t *acpi_cst_devices; 124 static int acpi_cst_ndevices; 125 static struct acpi_cst_softc **acpi_cst_softc; 126 static struct lwkt_serialize acpi_cst_slize = LWKT_SERIALIZE_INITIALIZER; 127 128 static int acpi_cst_probe(device_t); 129 static int acpi_cst_attach(device_t); 130 static int acpi_cst_suspend(device_t); 131 static int acpi_cst_resume(device_t); 132 static int acpi_cst_shutdown(device_t); 133 134 static void acpi_cst_notify(device_t); 135 static void acpi_cst_postattach(void *); 136 static void acpi_cst_idle(void); 137 static void acpi_cst_copy(struct acpi_cst_softc *, 138 const struct acpi_cst_softc *); 139 140 static void acpi_cst_cx_probe(struct acpi_cst_softc *); 141 static void acpi_cst_cx_probe_fadt(struct acpi_cst_softc *); 142 static int acpi_cst_cx_probe_cst(struct acpi_cst_softc *, int); 143 static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *); 144 145 static void acpi_cst_startup(struct acpi_cst_softc *); 146 static void acpi_cst_support_list(struct acpi_cst_softc *); 147 static int acpi_cst_set_lowest(struct acpi_cst_softc *, int); 148 static int acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *, int); 149 static void acpi_cst_non_c3(struct acpi_cst_softc *); 150 static void acpi_cst_global_cx_count(void); 151 static int acpi_cst_set_quirks(void); 152 static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *); 153 static void acpi_cst_free_resource(struct acpi_cst_softc *, int); 154 static void acpi_cst_c1_halt(void); 155 156 static int acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS); 157 static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS); 158 static int acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 159 static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS); 160 static int acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 161 162 static int acpi_cst_cx_setup(struct acpi_cst_cx *cx); 163 static void acpi_cst_c1_halt_enter(const struct acpi_cst_cx *); 164 static void acpi_cst_cx_io_enter(const struct acpi_cst_cx *); 165 166 int acpi_cst_force_bmarb; 167 TUNABLE_INT("hw.acpi.cpu.cst.force_bmarb", &acpi_cst_force_bmarb); 168 169 int acpi_cst_force_bmsts; 170 TUNABLE_INT("hw.acpi.cpu.cst.force_bmsts", &acpi_cst_force_bmsts); 171 172 static device_method_t acpi_cst_methods[] = { 173 /* Device interface */ 174 DEVMETHOD(device_probe, acpi_cst_probe), 175 DEVMETHOD(device_attach, acpi_cst_attach), 176 DEVMETHOD(device_detach, bus_generic_detach), 177 DEVMETHOD(device_shutdown, acpi_cst_shutdown), 178 DEVMETHOD(device_suspend, acpi_cst_suspend), 179 DEVMETHOD(device_resume, acpi_cst_resume), 180 181 /* Bus interface */ 182 DEVMETHOD(bus_add_child, bus_generic_add_child), 183 DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), 184 DEVMETHOD(bus_get_resource_list, bus_generic_get_resource_list), 185 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 186 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 187 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 188 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 191 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 192 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 193 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 194 DEVMETHOD_END 195 }; 196 197 static driver_t acpi_cst_driver = { 198 "cpu_cst", 199 acpi_cst_methods, 200 sizeof(struct acpi_cst_softc), 201 }; 202 203 static devclass_t acpi_cst_devclass; 204 DRIVER_MODULE(cpu_cst, cpu, acpi_cst_driver, acpi_cst_devclass, NULL, NULL); 205 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 206 207 static int 208 acpi_cst_probe(device_t dev) 209 { 210 int cpu_id; 211 212 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 213 return (ENXIO); 214 215 cpu_id = acpi_get_magic(dev); 216 217 if (acpi_cst_softc == NULL) 218 acpi_cst_softc = kmalloc(sizeof(struct acpi_cst_softc *) * 219 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 220 221 /* 222 * Check if we already probed this processor. We scan the bus twice 223 * so it's possible we've already seen this one. 224 */ 225 if (acpi_cst_softc[cpu_id] != NULL) { 226 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 227 return (ENXIO); 228 } 229 230 /* Mark this processor as in-use and save our derived id for attach. */ 231 acpi_cst_softc[cpu_id] = device_get_softc(dev); 232 device_set_desc(dev, "ACPI CPU C-State"); 233 234 return (0); 235 } 236 237 static int 238 acpi_cst_attach(device_t dev) 239 { 240 ACPI_BUFFER buf; 241 ACPI_OBJECT *obj; 242 struct acpi_cst_softc *sc; 243 ACPI_STATUS status; 244 245 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 246 247 sc = device_get_softc(dev); 248 sc->cst_dev = dev; 249 sc->cst_parent = device_get_softc(device_get_parent(dev)); 250 sc->cst_handle = acpi_get_handle(dev); 251 sc->cst_cpuid = acpi_get_magic(dev); 252 acpi_cst_softc[sc->cst_cpuid] = sc; 253 acpi_cst_smi_cmd = AcpiGbl_FADT.SmiCommand; 254 acpi_cst_ctrl = AcpiGbl_FADT.CstControl; 255 256 buf.Pointer = NULL; 257 buf.Length = ACPI_ALLOCATE_BUFFER; 258 status = AcpiEvaluateObject(sc->cst_handle, NULL, NULL, &buf); 259 if (ACPI_FAILURE(status)) { 260 device_printf(dev, "attach failed to get Processor obj - %s\n", 261 AcpiFormatException(status)); 262 acpi_cst_softc[sc->cst_cpuid] = NULL; 263 return (ENXIO); 264 } 265 obj = (ACPI_OBJECT *)buf.Pointer; 266 sc->cst_p_blk = obj->Processor.PblkAddress; 267 sc->cst_p_blk_len = obj->Processor.PblkLength; 268 AcpiOsFree(obj); 269 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "cpu_cst%d: P_BLK at %#x/%d\n", 270 device_get_unit(dev), sc->cst_p_blk, sc->cst_p_blk_len)); 271 272 /* 273 * If this is the first cpu we attach, create and initialize the generic 274 * resources that will be used by all acpi cpu devices. 275 */ 276 if (device_get_unit(dev) == 0) { 277 /* Assume we won't be using FADT for Cx states by default */ 278 acpi_cst_use_fadt = FALSE; 279 280 /* Queue post cpu-probing task handler */ 281 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cst_postattach, NULL); 282 } 283 284 /* Probe for Cx state support. */ 285 acpi_cst_cx_probe(sc); 286 287 sc->cst_flags |= ACPI_CST_FLAG_ATTACHED; 288 289 return (0); 290 } 291 292 /* 293 * Disable any entry to the idle function during suspend and re-enable it 294 * during resume. 295 */ 296 static int 297 acpi_cst_suspend(device_t dev) 298 { 299 int error; 300 301 error = bus_generic_suspend(dev); 302 if (error) 303 return (error); 304 acpi_cst_disable_idle = TRUE; 305 return (0); 306 } 307 308 static int 309 acpi_cst_resume(device_t dev) 310 { 311 acpi_cst_disable_idle = FALSE; 312 return (bus_generic_resume(dev)); 313 } 314 315 static int 316 acpi_cst_shutdown(device_t dev) 317 { 318 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 319 320 /* Allow children to shutdown first. */ 321 bus_generic_shutdown(dev); 322 323 /* 324 * Disable any entry to the idle function. There is a small race where 325 * an idle thread have passed this check but not gone to sleep. This 326 * is ok since device_shutdown() does not free the softc, otherwise 327 * we'd have to be sure all threads were evicted before returning. 328 */ 329 acpi_cst_disable_idle = TRUE; 330 331 return_VALUE (0); 332 } 333 334 static void 335 acpi_cst_cx_probe(struct acpi_cst_softc *sc) 336 { 337 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 338 339 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 340 sc->cst_prev_sleep = 1000000; 341 sc->cst_cx_lowest = 0; 342 sc->cst_cx_lowest_req = 0; 343 344 /* 345 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 346 * any, we'll revert to FADT/P_BLK Cx control method which will be 347 * handled by acpi_cst_postattach. We need to defer to after having 348 * probed all the cpus in the system before probing for Cx states from 349 * FADT as we may already have found cpus with valid _CST packages. 350 */ 351 if (!acpi_cst_use_fadt && acpi_cst_cx_probe_cst(sc, 0) != 0) { 352 /* 353 * We were unable to find a _CST package for this cpu or there 354 * was an error parsing it. Switch back to generic mode. 355 */ 356 acpi_cst_use_fadt = TRUE; 357 if (bootverbose) 358 device_printf(sc->cst_dev, "switching to FADT Cx mode\n"); 359 } 360 361 /* 362 * TODO: _CSD Package should be checked here. 363 */ 364 } 365 366 static void 367 acpi_cst_cx_probe_fadt(struct acpi_cst_softc *sc) 368 { 369 struct acpi_cst_cx *cx_ptr; 370 int error; 371 372 /* 373 * Free all previously allocated resources. 374 * 375 * NITE: 376 * It is needed, since we could enter here because of other 377 * cpu's _CST probing failure. 378 */ 379 acpi_cst_free_resource(sc, 0); 380 381 sc->cst_cx_count = 0; 382 cx_ptr = sc->cst_cx_states; 383 384 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 385 sc->cst_prev_sleep = 1000000; 386 387 /* C1 has been required since just after ACPI 1.0 */ 388 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_FIXED_HARDWARE; 389 cx_ptr->type = ACPI_STATE_C1; 390 cx_ptr->trans_lat = 0; 391 cx_ptr->enter = acpi_cst_c1_halt_enter; 392 error = acpi_cst_cx_setup(cx_ptr); 393 if (error) 394 panic("C1 FADT HALT setup failed: %d", error); 395 cx_ptr++; 396 sc->cst_cx_count++; 397 398 /* C2(+) is not supported on MP system */ 399 if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 400 return; 401 402 /* 403 * The spec says P_BLK must be 6 bytes long. However, some systems 404 * use it to indicate a fractional set of features present so we 405 * take 5 as C2. Some may also have a value of 7 to indicate 406 * another C3 but most use _CST for this (as required) and having 407 * "only" C1-C3 is not a hardship. 408 */ 409 if (sc->cst_p_blk_len < 5) 410 return; 411 412 /* Validate and allocate resources for C2 (P_LVL2). */ 413 if (AcpiGbl_FADT.C2Latency <= 100) { 414 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 415 cx_ptr->gas.BitWidth = 8; 416 cx_ptr->gas.Address = sc->cst_p_blk + 4; 417 418 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 419 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 420 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 421 if (cx_ptr->res != NULL) { 422 sc->cst_parent->cpu_next_rid++; 423 cx_ptr->type = ACPI_STATE_C2; 424 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 425 cx_ptr->enter = acpi_cst_cx_io_enter; 426 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 427 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 428 error = acpi_cst_cx_setup(cx_ptr); 429 if (error) 430 panic("C2 FADT I/O setup failed: %d", error); 431 cx_ptr++; 432 sc->cst_cx_count++; 433 sc->cst_non_c3 = 1; 434 } 435 } 436 if (sc->cst_p_blk_len < 6) 437 return; 438 439 /* Validate and allocate resources for C3 (P_LVL3). */ 440 if (AcpiGbl_FADT.C3Latency <= 1000 && 441 !(acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3)) { 442 cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 443 cx_ptr->gas.BitWidth = 8; 444 cx_ptr->gas.Address = sc->cst_p_blk + 5; 445 446 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 447 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 448 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 449 if (cx_ptr->res != NULL) { 450 sc->cst_parent->cpu_next_rid++; 451 cx_ptr->type = ACPI_STATE_C3; 452 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 453 cx_ptr->enter = acpi_cst_cx_io_enter; 454 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 455 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 456 error = acpi_cst_cx_setup(cx_ptr); 457 if (error) 458 panic("C3 FADT I/O setup failed: %d", error); 459 cx_ptr++; 460 sc->cst_cx_count++; 461 } 462 } 463 } 464 465 static void 466 acpi_cst_copy(struct acpi_cst_softc *dst_sc, 467 const struct acpi_cst_softc *src_sc) 468 { 469 dst_sc->cst_non_c3 = src_sc->cst_non_c3; 470 dst_sc->cst_cx_count = src_sc->cst_cx_count; 471 memcpy(dst_sc->cst_cx_states, src_sc->cst_cx_states, 472 sizeof(dst_sc->cst_cx_states)); 473 } 474 475 /* 476 * Parse a _CST package and set up its Cx states. Since the _CST object 477 * can change dynamically, our notify handler may call this function 478 * to clean up and probe the new _CST package. 479 */ 480 static int 481 acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe) 482 { 483 struct acpi_cst_cx *cx_ptr; 484 ACPI_STATUS status; 485 ACPI_BUFFER buf; 486 ACPI_OBJECT *top; 487 ACPI_OBJECT *pkg; 488 uint32_t count; 489 int i; 490 491 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 492 493 if (reprobe) 494 cpuhelper_assert(sc->cst_cpuid, true); 495 496 buf.Pointer = NULL; 497 buf.Length = ACPI_ALLOCATE_BUFFER; 498 status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf); 499 if (ACPI_FAILURE(status)) 500 return (ENXIO); 501 502 /* _CST is a package with a count and at least one Cx package. */ 503 top = (ACPI_OBJECT *)buf.Pointer; 504 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 505 device_printf(sc->cst_dev, "invalid _CST package\n"); 506 AcpiOsFree(buf.Pointer); 507 return (ENXIO); 508 } 509 if (count != top->Package.Count - 1) { 510 device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n", 511 count, top->Package.Count - 1); 512 count = top->Package.Count - 1; 513 } 514 if (count > MAX_CX_STATES) { 515 device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count); 516 count = MAX_CX_STATES; 517 } 518 519 sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT; 520 cpu_sfence(); 521 522 /* 523 * Free all previously allocated resources 524 * 525 * NOTE: It is needed for _CST reprobing. 526 */ 527 acpi_cst_free_resource(sc, 0); 528 529 /* Set up all valid states. */ 530 sc->cst_cx_count = 0; 531 cx_ptr = sc->cst_cx_states; 532 for (i = 0; i < count; i++) { 533 int error; 534 535 pkg = &top->Package.Elements[i + 1]; 536 if (!ACPI_PKG_VALID(pkg, 4) || 537 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 538 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 539 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 540 541 device_printf(sc->cst_dev, "skipping invalid Cx state package\n"); 542 continue; 543 } 544 545 /* Validate the state to see if we should use it. */ 546 switch (cx_ptr->type) { 547 case ACPI_STATE_C1: 548 sc->cst_non_c3 = i; 549 cx_ptr->enter = acpi_cst_c1_halt_enter; 550 error = acpi_cst_cx_setup(cx_ptr); 551 if (error) 552 panic("C1 CST HALT setup failed: %d", error); 553 if (sc->cst_cx_count != 0) { 554 /* 555 * C1 is not the first C-state; something really stupid 556 * is going on ... 557 */ 558 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 559 } 560 cx_ptr++; 561 sc->cst_cx_count++; 562 continue; 563 case ACPI_STATE_C2: 564 sc->cst_non_c3 = i; 565 break; 566 case ACPI_STATE_C3: 567 default: 568 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) { 569 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 570 "cpu_cst%d: C3[%d] not available.\n", 571 device_get_unit(sc->cst_dev), i)); 572 continue; 573 } 574 break; 575 } 576 577 /* 578 * Allocate the control register for C2 or C3(+). 579 */ 580 KASSERT(cx_ptr->res == NULL, ("still has res")); 581 acpi_PkgRawGas(pkg, 0, &cx_ptr->gas); 582 583 /* 584 * We match number of C2/C3 for hyperthreads, only if the 585 * register is "Fixed Hardware", e.g. on most of the Intel 586 * CPUs. We don't have much to do for the rest of the 587 * register types. 588 */ 589 if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) 590 sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT; 591 592 cx_ptr->rid = sc->cst_parent->cpu_next_rid; 593 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid, 594 &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE); 595 if (cx_ptr->res != NULL) { 596 sc->cst_parent->cpu_next_rid++; 597 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 598 "cpu_cst%d: Got C%d - %d latency\n", 599 device_get_unit(sc->cst_dev), cx_ptr->type, 600 cx_ptr->trans_lat)); 601 cx_ptr->enter = acpi_cst_cx_io_enter; 602 cx_ptr->btag = rman_get_bustag(cx_ptr->res); 603 cx_ptr->bhand = rman_get_bushandle(cx_ptr->res); 604 error = acpi_cst_cx_setup(cx_ptr); 605 if (error) 606 panic("C%d CST I/O setup failed: %d", cx_ptr->type, error); 607 cx_ptr++; 608 sc->cst_cx_count++; 609 } else { 610 error = acpi_cst_cx_setup(cx_ptr); 611 if (!error) { 612 KASSERT(cx_ptr->enter != NULL, 613 ("C%d enter is not set", cx_ptr->type)); 614 cx_ptr++; 615 sc->cst_cx_count++; 616 } 617 } 618 } 619 AcpiOsFree(buf.Pointer); 620 621 if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) { 622 cpumask_t mask; 623 624 mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL); 625 if (CPUMASK_TESTNZERO(mask)) { 626 int cpu; 627 628 for (cpu = 0; cpu < ncpus; ++cpu) { 629 struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu]; 630 631 if (sc1 == NULL || sc1 == sc || 632 (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 || 633 (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0) 634 continue; 635 if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid)) 636 continue; 637 638 if (sc1->cst_cx_count != sc->cst_cx_count) { 639 struct acpi_cst_softc *src_sc, *dst_sc; 640 641 if (bootverbose) { 642 device_printf(sc->cst_dev, 643 "inconstent C-state count: %d, %s has %d\n", 644 sc->cst_cx_count, 645 device_get_nameunit(sc1->cst_dev), 646 sc1->cst_cx_count); 647 } 648 if (sc1->cst_cx_count > sc->cst_cx_count) { 649 src_sc = sc1; 650 dst_sc = sc; 651 } else { 652 src_sc = sc; 653 dst_sc = sc1; 654 } 655 acpi_cst_copy(dst_sc, src_sc); 656 } 657 } 658 } 659 } 660 661 if (reprobe) { 662 /* If there are C3(+) states, always enable bus master wakeup */ 663 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 664 for (i = 0; i < sc->cst_cx_count; ++i) { 665 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 666 667 if (cx->type >= ACPI_STATE_C3) { 668 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 669 break; 670 } 671 } 672 } 673 674 /* Fix up the lowest Cx being used */ 675 acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req); 676 } 677 678 /* 679 * Cache the lowest non-C3 state. 680 * NOTE: must after cst_cx_lowest is set. 681 */ 682 acpi_cst_non_c3(sc); 683 684 cpu_sfence(); 685 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 686 687 return (0); 688 } 689 690 static void 691 acpi_cst_cx_reprobe_cst_handler(struct cpuhelper_msg *msg) 692 { 693 int error; 694 695 error = acpi_cst_cx_probe_cst(msg->ch_cbarg, 1); 696 cpuhelper_replymsg(msg, error); 697 } 698 699 static int 700 acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) 701 { 702 struct cpuhelper_msg msg; 703 704 cpuhelper_initmsg(&msg, &curthread->td_msgport, 705 acpi_cst_cx_reprobe_cst_handler, sc, MSGF_PRIORITY); 706 return (cpuhelper_domsg(&msg, sc->cst_cpuid)); 707 } 708 709 /* 710 * Call this *after* all CPUs Cx states have been attached. 711 */ 712 static void 713 acpi_cst_postattach(void *arg) 714 { 715 struct acpi_cst_softc *sc; 716 int i; 717 718 /* Get set of Cx state devices */ 719 devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices, 720 &acpi_cst_ndevices); 721 722 /* 723 * Setup any quirks that might necessary now that we have probed 724 * all the CPUs' Cx states. 725 */ 726 acpi_cst_set_quirks(); 727 728 if (acpi_cst_use_fadt) { 729 /* 730 * We are using Cx mode from FADT, probe for available Cx states 731 * for all processors. 732 */ 733 for (i = 0; i < acpi_cst_ndevices; i++) { 734 sc = device_get_softc(acpi_cst_devices[i]); 735 acpi_cst_cx_probe_fadt(sc); 736 } 737 } else { 738 /* 739 * We are using _CST mode, remove C3 state if necessary. 740 * 741 * As we now know for sure that we will be using _CST mode 742 * install our notify handler. 743 */ 744 for (i = 0; i < acpi_cst_ndevices; i++) { 745 sc = device_get_softc(acpi_cst_devices[i]); 746 if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) { 747 /* Free part of unused resources */ 748 acpi_cst_free_resource(sc, sc->cst_non_c3 + 1); 749 sc->cst_cx_count = sc->cst_non_c3 + 1; 750 } 751 sc->cst_parent->cpu_cst_notify = acpi_cst_notify; 752 } 753 } 754 acpi_cst_global_cx_count(); 755 756 /* Perform Cx final initialization. */ 757 for (i = 0; i < acpi_cst_ndevices; i++) { 758 sc = device_get_softc(acpi_cst_devices[i]); 759 acpi_cst_startup(sc); 760 761 if (sc->cst_parent->glob_sysctl_tree != NULL) { 762 struct acpi_cpu_softc *cpu = sc->cst_parent; 763 764 /* Add a sysctl handler to handle global Cx lowest setting */ 765 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 766 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 767 OID_AUTO, "cx_lowest", 768 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 769 acpi_cst_global_lowest_sysctl, "A", 770 "Requested global lowest Cx sleep state"); 771 SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, 772 SYSCTL_CHILDREN(cpu->glob_sysctl_tree), 773 OID_AUTO, "cx_lowest_use", 774 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 775 acpi_cst_global_lowest_use_sysctl, "A", 776 "Global lowest Cx sleep state to use"); 777 } 778 } 779 780 /* Take over idling from cpu_idle_default(). */ 781 acpi_cst_cx_lowest = 0; 782 acpi_cst_cx_lowest_req = 0; 783 acpi_cst_disable_idle = FALSE; 784 785 cpu_sfence(); 786 cpu_idle_hook = acpi_cst_idle; 787 } 788 789 static void 790 acpi_cst_support_list(struct acpi_cst_softc *sc) 791 { 792 struct sbuf sb; 793 int i; 794 795 /* 796 * Set up the list of Cx states 797 */ 798 sbuf_new(&sb, sc->cst_cx_supported, sizeof(sc->cst_cx_supported), 799 SBUF_FIXEDLEN); 800 for (i = 0; i < sc->cst_cx_count; i++) 801 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cst_cx_states[i].trans_lat); 802 sbuf_trim(&sb); 803 sbuf_finish(&sb); 804 } 805 806 static void 807 acpi_cst_c3_bm_rld_handler(struct cpuhelper_msg *msg) 808 { 809 810 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 811 cpuhelper_replymsg(msg, 0); 812 } 813 814 static void 815 acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) 816 { 817 struct cpuhelper_msg msg; 818 819 cpuhelper_initmsg(&msg, &curthread->td_msgport, 820 acpi_cst_c3_bm_rld_handler, sc, MSGF_PRIORITY); 821 cpuhelper_domsg(&msg, sc->cst_cpuid); 822 } 823 824 static void 825 acpi_cst_startup(struct acpi_cst_softc *sc) 826 { 827 struct acpi_cpu_softc *cpu = sc->cst_parent; 828 int i, bm_rld_done = 0; 829 830 for (i = 0; i < sc->cst_cx_count; ++i) { 831 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 832 int error; 833 834 /* If there are C3(+) states, always enable bus master wakeup */ 835 if (cx->type >= ACPI_STATE_C3 && !bm_rld_done && 836 (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { 837 acpi_cst_c3_bm_rld(sc); 838 bm_rld_done = 1; 839 } 840 841 /* Redo the Cx setup, since quirks have been changed */ 842 error = acpi_cst_cx_setup(cx); 843 if (error) 844 panic("C%d startup setup failed: %d", i + 1, error); 845 } 846 847 acpi_cst_support_list(sc); 848 849 SYSCTL_ADD_STRING(&cpu->pcpu_sysctl_ctx, 850 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 851 OID_AUTO, "cx_supported", CTLFLAG_RD, 852 sc->cst_cx_supported, 0, 853 "Cx/microsecond values for supported Cx states"); 854 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 855 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 856 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 857 (void *)sc, 0, acpi_cst_lowest_sysctl, "A", 858 "requested lowest Cx sleep state"); 859 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 860 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 861 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 862 (void *)sc, 0, acpi_cst_lowest_use_sysctl, "A", 863 "lowest Cx sleep state to use"); 864 SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, 865 SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), 866 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 867 (void *)sc, 0, acpi_cst_usage_sysctl, "A", 868 "percent usage for each Cx state"); 869 870 #ifdef notyet 871 /* Signal platform that we can handle _CST notification. */ 872 if (!acpi_cst_use_fadt && acpi_cst_ctrl != 0) { 873 ACPI_LOCK(acpi); 874 AcpiOsWritePort(acpi_cst_smi_cmd, acpi_cst_ctrl, 8); 875 ACPI_UNLOCK(acpi); 876 } 877 #endif 878 } 879 880 /* 881 * Idle the CPU in the lowest state possible. This function is called with 882 * interrupts disabled. Note that once it re-enables interrupts, a task 883 * switch can occur so do not access shared data (i.e. the softc) after 884 * interrupts are re-enabled. 885 */ 886 static void 887 acpi_cst_idle(void) 888 { 889 struct acpi_cst_softc *sc; 890 struct acpi_cst_cx *cx_next; 891 union microtime_pcpu start, end; 892 int cx_next_idx, i, tdiff, bm_arb_disabled = 0; 893 894 /* If disabled, return immediately. */ 895 if (acpi_cst_disable_idle) { 896 ACPI_ENABLE_IRQS(); 897 return; 898 } 899 900 /* 901 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 902 * since there is no Cx state for this processor. 903 */ 904 sc = acpi_cst_softc[mdcpu->mi.gd_cpuid]; 905 if (sc == NULL) { 906 acpi_cst_c1_halt(); 907 return; 908 } 909 910 /* Still probing; use C1 */ 911 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 912 acpi_cst_c1_halt(); 913 return; 914 } 915 916 /* Find the lowest state that has small enough latency. */ 917 cx_next_idx = 0; 918 for (i = sc->cst_cx_lowest; i >= 0; i--) { 919 if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { 920 cx_next_idx = i; 921 break; 922 } 923 } 924 925 /* 926 * Check for bus master activity if needed for the selected state. 927 * If there was activity, clear the bit and use the lowest non-C3 state. 928 */ 929 cx_next = &sc->cst_cx_states[cx_next_idx]; 930 if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) { 931 int bm_active; 932 933 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 934 if (bm_active != 0) { 935 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 936 cx_next_idx = sc->cst_non_c3; 937 } 938 } 939 940 /* Select the next state and update statistics. */ 941 cx_next = &sc->cst_cx_states[cx_next_idx]; 942 sc->cst_cx_stats[cx_next_idx]++; 943 KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep")); 944 945 /* 946 * Execute HLT (or equivalent) and wait for an interrupt. We can't 947 * calculate the time spent in C1 since the place we wake up is an 948 * ISR. Assume we slept half of quantum and return. 949 */ 950 if (cx_next->type == ACPI_STATE_C1) { 951 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; 952 cx_next->enter(cx_next); 953 return; 954 } 955 956 /* Execute the proper preamble before enter the selected state. */ 957 if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) { 958 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 959 bm_arb_disabled = 1; 960 } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) { 961 ACPI_FLUSH_CPU_CACHE(); 962 } 963 964 /* 965 * Enter the selected state and check time spent asleep. 966 */ 967 microtime_pcpu_get(&start); 968 cpu_mfence(); 969 970 cx_next->enter(cx_next); 971 972 cpu_mfence(); 973 microtime_pcpu_get(&end); 974 975 /* Enable bus master arbitration, if it was disabled. */ 976 if (bm_arb_disabled) 977 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 978 979 ACPI_ENABLE_IRQS(); 980 981 /* Find the actual time asleep in microseconds. */ 982 tdiff = microtime_pcpu_diff(&start, &end); 983 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4; 984 } 985 986 /* 987 * Re-evaluate the _CST object when we are notified that it changed. 988 */ 989 static void 990 acpi_cst_notify(device_t dev) 991 { 992 struct acpi_cst_softc *sc = device_get_softc(dev); 993 994 cpuhelper_assert(mycpuid, false); 995 996 lwkt_serialize_enter(&acpi_cst_slize); 997 998 /* Update the list of Cx states. */ 999 acpi_cst_cx_reprobe_cst(sc); 1000 acpi_cst_support_list(sc); 1001 1002 /* Update the new lowest useable Cx state for all CPUs. */ 1003 acpi_cst_global_cx_count(); 1004 1005 /* 1006 * Fix up the lowest Cx being used 1007 */ 1008 if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) 1009 acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; 1010 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1011 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1012 1013 lwkt_serialize_exit(&acpi_cst_slize); 1014 } 1015 1016 static int 1017 acpi_cst_set_quirks(void) 1018 { 1019 device_t acpi_dev; 1020 uint32_t val; 1021 1022 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1023 1024 /* 1025 * Bus mastering arbitration control is needed to keep caches coherent 1026 * while sleeping in C3. If it's not present but a working flush cache 1027 * instruction is present, flush the caches before entering C3 instead. 1028 * Otherwise, just disable C3 completely. 1029 */ 1030 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 1031 AcpiGbl_FADT.Pm2ControlLength == 0) { 1032 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 1033 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 1034 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_BM; 1035 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1036 "cpu_cst: no BM control, using flush cache method\n")); 1037 } else { 1038 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1039 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1040 "cpu_cst: no BM control, C3 not available\n")); 1041 } 1042 } 1043 1044 /* Look for various quirks of the PIIX4 part. */ 1045 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 1046 if (acpi_dev != NULL) { 1047 switch (pci_get_revid(acpi_dev)) { 1048 /* 1049 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1050 * do not report the BMIDE status to the BM status register and 1051 * others have a livelock bug if Type-F DMA is enabled. Linux 1052 * works around the BMIDE bug by reading the BM status directly 1053 * but we take the simpler approach of disabling C3 for these 1054 * parts. 1055 * 1056 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1057 * Livelock") from the January 2002 PIIX4 specification update. 1058 * Applies to all PIIX4 models. 1059 * 1060 * Also, make sure that all interrupts cause a "Stop Break" 1061 * event to exit from C2 state. 1062 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 1063 * should be set to zero, otherwise it causes C2 to short-sleep. 1064 * PIIX4 doesn't properly support C3 and bus master activity 1065 * need not break out of C2. 1066 */ 1067 case PCI_REVISION_A_STEP: 1068 case PCI_REVISION_B_STEP: 1069 case PCI_REVISION_4E: 1070 case PCI_REVISION_4M: 1071 acpi_cst_quirks |= ACPI_CST_QUIRK_NO_C3; 1072 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1073 "cpu_cst: working around PIIX4 bug, disabling C3\n")); 1074 1075 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1076 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1077 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1078 "cpu_cst: PIIX4: enabling IRQs to generate Stop Break\n")); 1079 val |= PIIX4_STOP_BREAK_MASK; 1080 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1081 } 1082 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1083 if (val) { 1084 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1085 "cpu_cst: PIIX4: reset BRLD_EN_BM\n")); 1086 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1087 } 1088 break; 1089 default: 1090 break; 1091 } 1092 } 1093 1094 return (0); 1095 } 1096 1097 static int 1098 acpi_cst_usage_sysctl(SYSCTL_HANDLER_ARGS) 1099 { 1100 struct acpi_cst_softc *sc; 1101 struct sbuf sb; 1102 char buf[128]; 1103 int i; 1104 uintmax_t fract, sum, whole; 1105 1106 sc = (struct acpi_cst_softc *) arg1; 1107 sum = 0; 1108 for (i = 0; i < sc->cst_cx_count; i++) 1109 sum += sc->cst_cx_stats[i]; 1110 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1111 for (i = 0; i < sc->cst_cx_count; i++) { 1112 if (sum > 0) { 1113 whole = (uintmax_t)sc->cst_cx_stats[i] * 100; 1114 fract = (whole % sum) * 100; 1115 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1116 (u_int)(fract / sum)); 1117 } else 1118 sbuf_printf(&sb, "0.00%% "); 1119 } 1120 sbuf_printf(&sb, "last %dus", sc->cst_prev_sleep); 1121 sbuf_trim(&sb); 1122 sbuf_finish(&sb); 1123 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1124 sbuf_delete(&sb); 1125 1126 return (0); 1127 } 1128 1129 static int 1130 acpi_cst_set_lowest_oncpu(struct acpi_cst_softc *sc, int val) 1131 { 1132 int old_lowest, error = 0, old_lowest_req; 1133 uint32_t old_type, type; 1134 1135 KKASSERT(mycpuid == sc->cst_cpuid); 1136 1137 old_lowest_req = sc->cst_cx_lowest_req; 1138 sc->cst_cx_lowest_req = val; 1139 1140 if (val > sc->cst_cx_count - 1) 1141 val = sc->cst_cx_count - 1; 1142 old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val); 1143 1144 old_type = sc->cst_cx_states[old_lowest].type; 1145 type = sc->cst_cx_states[val].type; 1146 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1147 cputimer_intr_powersave_remreq(); 1148 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1149 error = cputimer_intr_powersave_addreq(); 1150 if (error) { 1151 /* Restore */ 1152 sc->cst_cx_lowest_req = old_lowest_req; 1153 sc->cst_cx_lowest = old_lowest; 1154 } 1155 } 1156 1157 if (error) 1158 return error; 1159 1160 /* Cache the new lowest non-C3 state. */ 1161 acpi_cst_non_c3(sc); 1162 1163 /* Reset the statistics counters. */ 1164 bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats)); 1165 return (0); 1166 } 1167 1168 static void 1169 acpi_cst_set_lowest_handler(struct cpuhelper_msg *msg) 1170 { 1171 int error; 1172 1173 error = acpi_cst_set_lowest_oncpu(msg->ch_cbarg, msg->ch_cbarg1); 1174 cpuhelper_replymsg(msg, error); 1175 } 1176 1177 static int 1178 acpi_cst_set_lowest(struct acpi_cst_softc *sc, int val) 1179 { 1180 struct cpuhelper_msg msg; 1181 1182 cpuhelper_initmsg(&msg, &curthread->td_msgport, 1183 acpi_cst_set_lowest_handler, sc, MSGF_PRIORITY); 1184 msg.ch_cbarg1 = val; 1185 1186 return (cpuhelper_domsg(&msg, sc->cst_cpuid)); 1187 } 1188 1189 static int 1190 acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1191 { 1192 struct acpi_cst_softc *sc; 1193 char state[8]; 1194 int val, error; 1195 1196 sc = (struct acpi_cst_softc *)arg1; 1197 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); 1198 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1199 if (error != 0 || req->newptr == NULL) 1200 return (error); 1201 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1202 return (EINVAL); 1203 val = (int) strtol(state + 1, NULL, 10) - 1; 1204 if (val < 0) 1205 return (EINVAL); 1206 1207 lwkt_serialize_enter(&acpi_cst_slize); 1208 error = acpi_cst_set_lowest(sc, val); 1209 lwkt_serialize_exit(&acpi_cst_slize); 1210 1211 return error; 1212 } 1213 1214 static int 1215 acpi_cst_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1216 { 1217 struct acpi_cst_softc *sc; 1218 char state[8]; 1219 1220 sc = (struct acpi_cst_softc *)arg1; 1221 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest + 1); 1222 return sysctl_handle_string(oidp, state, sizeof(state), req); 1223 } 1224 1225 static int 1226 acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1227 { 1228 struct acpi_cst_softc *sc; 1229 char state[8]; 1230 int val, error, i; 1231 1232 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest_req + 1); 1233 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1234 if (error != 0 || req->newptr == NULL) 1235 return (error); 1236 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1237 return (EINVAL); 1238 val = (int) strtol(state + 1, NULL, 10) - 1; 1239 if (val < 0) 1240 return (EINVAL); 1241 1242 lwkt_serialize_enter(&acpi_cst_slize); 1243 1244 acpi_cst_cx_lowest_req = val; 1245 acpi_cst_cx_lowest = val; 1246 if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) 1247 acpi_cst_cx_lowest = acpi_cst_cx_count - 1; 1248 1249 /* Update the new lowest useable Cx state for all CPUs. */ 1250 for (i = 0; i < acpi_cst_ndevices; i++) { 1251 sc = device_get_softc(acpi_cst_devices[i]); 1252 error = acpi_cst_set_lowest(sc, val); 1253 if (error) { 1254 KKASSERT(i == 0); 1255 break; 1256 } 1257 } 1258 1259 lwkt_serialize_exit(&acpi_cst_slize); 1260 1261 return error; 1262 } 1263 1264 static int 1265 acpi_cst_global_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1266 { 1267 char state[8]; 1268 1269 ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest + 1); 1270 return sysctl_handle_string(oidp, state, sizeof(state), req); 1271 } 1272 1273 /* 1274 * Put the CPU in C1 in a machine-dependant way. 1275 * XXX: shouldn't be here! 1276 */ 1277 static void 1278 acpi_cst_c1_halt(void) 1279 { 1280 splz(); 1281 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1282 __asm __volatile("sti; hlt"); 1283 else 1284 __asm __volatile("sti; pause"); 1285 } 1286 1287 static void 1288 acpi_cst_non_c3(struct acpi_cst_softc *sc) 1289 { 1290 int i; 1291 1292 sc->cst_non_c3 = 0; 1293 for (i = sc->cst_cx_lowest; i >= 0; i--) { 1294 if (sc->cst_cx_states[i].type < ACPI_STATE_C3) { 1295 sc->cst_non_c3 = i; 1296 break; 1297 } 1298 } 1299 if (bootverbose) 1300 device_printf(sc->cst_dev, "non-C3 %d\n", sc->cst_non_c3); 1301 } 1302 1303 /* 1304 * Update the largest Cx state supported in the global acpi_cst_cx_count. 1305 * It will be used in the global Cx sysctl handler. 1306 */ 1307 static void 1308 acpi_cst_global_cx_count(void) 1309 { 1310 struct acpi_cst_softc *sc; 1311 int i; 1312 1313 if (acpi_cst_ndevices == 0) { 1314 acpi_cst_cx_count = 0; 1315 return; 1316 } 1317 1318 sc = device_get_softc(acpi_cst_devices[0]); 1319 acpi_cst_cx_count = sc->cst_cx_count; 1320 1321 for (i = 1; i < acpi_cst_ndevices; i++) { 1322 struct acpi_cst_softc *sc = device_get_softc(acpi_cst_devices[i]); 1323 1324 if (sc->cst_cx_count < acpi_cst_cx_count) 1325 acpi_cst_cx_count = sc->cst_cx_count; 1326 } 1327 if (bootverbose) 1328 kprintf("cpu_cst: global Cx count %d\n", acpi_cst_cx_count); 1329 } 1330 1331 static void 1332 acpi_cst_c1_halt_enter(const struct acpi_cst_cx *cx __unused) 1333 { 1334 acpi_cst_c1_halt(); 1335 } 1336 1337 static void 1338 acpi_cst_cx_io_enter(const struct acpi_cst_cx *cx) 1339 { 1340 uint64_t dummy; 1341 1342 /* 1343 * Read I/O to enter this Cx state 1344 */ 1345 bus_space_read_1(cx->btag, cx->bhand, 0); 1346 /* 1347 * Perform a dummy I/O read. Since it may take an arbitrary time 1348 * to enter the idle state, this read makes sure that we are frozen. 1349 */ 1350 AcpiRead(&dummy, &AcpiGbl_FADT.XPmTimerBlock); 1351 } 1352 1353 static int 1354 acpi_cst_cx_setup(struct acpi_cst_cx *cx) 1355 { 1356 cx->flags &= ~ACPI_CST_CX_FLAG_BM_STS; 1357 cx->preamble = ACPI_CST_CX_PREAMBLE_NONE; 1358 1359 if (cx->type >= ACPI_STATE_C3) { 1360 /* 1361 * Set the required operations for entering C3(+) state. 1362 * Later acpi_cst_md_cx_setup() may fix them up. 1363 */ 1364 1365 /* 1366 * Always check BM_STS. 1367 */ 1368 if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1369 cx->flags |= ACPI_CST_CX_FLAG_BM_STS; 1370 1371 /* 1372 * According to the ACPI specification, bus master arbitration 1373 * is only available on UP system. For MP system, cache flushing 1374 * is required. 1375 */ 1376 if (ncpus == 1 && (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) 1377 cx->preamble = ACPI_CST_CX_PREAMBLE_BM_ARB; 1378 else 1379 cx->preamble = ACPI_CST_CX_PREAMBLE_WBINVD; 1380 } 1381 return acpi_cst_md_cx_setup(cx); 1382 } 1383 1384 static void 1385 acpi_cst_free_resource(struct acpi_cst_softc *sc, int start) 1386 { 1387 int i; 1388 1389 for (i = start; i < MAX_CX_STATES; ++i) { 1390 struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; 1391 1392 if (cx->res != NULL) 1393 bus_release_resource(sc->cst_dev, cx->res_type, cx->rid, cx->res); 1394 memset(cx, 0, sizeof(*cx)); 1395 } 1396 } 1397