1 /* $OpenBSD: acpicpu.c,v 1.90 2021/08/01 19:04:37 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org> 4 * Copyright (c) 2015 Philip Guenther <guenther@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/kernel.h> /* for tick */ 21 #include <sys/signalvar.h> 22 #include <sys/sysctl.h> 23 #include <sys/systm.h> 24 #include <sys/device.h> 25 #include <sys/malloc.h> 26 #include <sys/queue.h> 27 #include <sys/atomic.h> 28 29 #include <machine/bus.h> 30 #include <machine/cpu.h> 31 #include <machine/cpufunc.h> 32 #include <machine/specialreg.h> 33 34 #include <dev/acpi/acpireg.h> 35 #include <dev/acpi/acpivar.h> 36 #include <dev/acpi/acpidev.h> 37 #include <dev/acpi/amltypes.h> 38 #include <dev/acpi/dsdt.h> 39 40 #include <sys/sensors.h> 41 42 int acpicpu_match(struct device *, void *, void *); 43 void acpicpu_attach(struct device *, struct device *, void *); 44 int acpicpu_notify(struct aml_node *, int, void *); 45 void acpicpu_setperf(int); 46 void acpicpu_setperf_ppc_change(struct acpicpu_pss *, int); 47 48 #define ACPI_STATE_C0 0x00 49 #define ACPI_STATE_C1 0x01 50 #define ACPI_STATE_C2 0x02 51 #define ACPI_STATE_C3 0x03 52 53 #define ACPI_PDC_REVID 0x1 54 #define ACPI_PDC_SMP 0xa 55 #define ACPI_PDC_MSR 0x1 56 57 /* _PDC/_OSC Intel capabilities flags */ 58 #define ACPI_PDC_P_FFH 0x0001 59 #define ACPI_PDC_C_C1_HALT 0x0002 60 #define ACPI_PDC_T_FFH 0x0004 61 #define ACPI_PDC_SMP_C1PT 0x0008 62 #define ACPI_PDC_SMP_C2C3 0x0010 63 #define ACPI_PDC_SMP_P_SWCOORD 0x0020 64 #define ACPI_PDC_SMP_C_SWCOORD 0x0040 65 #define ACPI_PDC_SMP_T_SWCOORD 0x0080 66 #define ACPI_PDC_C_C1_FFH 0x0100 67 #define ACPI_PDC_C_C2C3_FFH 0x0200 68 /* reserved 0x0400 */ 69 #define ACPI_PDC_P_HWCOORD 0x0800 70 #define ACPI_PDC_PPC_NOTIFY 0x1000 71 72 #define CST_METH_HALT 0 73 #define CST_METH_IO_HALT 1 74 #define CST_METH_MWAIT 2 75 #define CST_METH_GAS_IO 3 76 77 /* flags on Intel's FFH mwait method */ 78 #define CST_FLAG_MWAIT_HW_COORD 0x1 79 #define CST_FLAG_MWAIT_BM_AVOIDANCE 0x2 80 #define CST_FLAG_FALLBACK 0x4000 /* fallback for broken _CST */ 81 #define CST_FLAG_SKIP 0x8000 /* state is worse choice */ 82 83 #define FLAGS_MWAIT_ONLY 0x02 84 #define FLAGS_BMCHECK 0x04 85 #define FLAGS_NOTHROTTLE 0x08 86 #define FLAGS_NOPSS 0x10 87 #define FLAGS_NOPCT 0x20 88 89 #define CPU_THT_EN (1L << 4) 90 #define CPU_MAXSTATE(sc) (1L << (sc)->sc_duty_wid) 91 #define CPU_STATE(sc,pct) ((pct * CPU_MAXSTATE(sc) / 100) << (sc)->sc_duty_off) 92 #define CPU_STATEMASK(sc) ((CPU_MAXSTATE(sc) - 1) << (sc)->sc_duty_off) 93 94 #define ACPI_MAX_C2_LATENCY 100 95 #define ACPI_MAX_C3_LATENCY 1000 96 97 #define CSD_COORD_SW_ALL 0xFC 98 #define CSD_COORD_SW_ANY 0xFD 99 #define CSD_COORD_HW_ALL 0xFE 100 101 /* Make sure throttling bits are valid,a=addr,o=offset,w=width */ 102 #define valid_throttle(o,w,a) (a && w && (o+w)<=31 && (o>4 || (o+w)<=4)) 103 104 struct acpi_cstate 105 { 106 SLIST_ENTRY(acpi_cstate) link; 107 108 u_short state; 109 short method; /* CST_METH_* */ 110 u_short flags; /* CST_FLAG_* */ 111 u_short latency; 112 int power; 113 uint64_t address; /* or mwait hint */ 114 }; 115 116 unsigned long cst_stats[4] = { 0 }; 117 118 struct acpicpu_softc { 119 struct device sc_dev; 120 int sc_cpu; 121 122 int sc_duty_wid; 123 int sc_duty_off; 124 uint32_t sc_pblk_addr; 125 int sc_pblk_len; 126 int sc_flags; 127 unsigned long sc_prev_sleep; 128 unsigned long sc_last_itime; 129 130 struct cpu_info *sc_ci; 131 SLIST_HEAD(,acpi_cstate) sc_cstates; 132 133 bus_space_tag_t sc_iot; 134 bus_space_handle_t sc_ioh; 135 136 struct acpi_softc *sc_acpi; 137 struct aml_node *sc_devnode; 138 139 int sc_pss_len; /* XXX */ 140 int sc_ppc; 141 int sc_level; 142 struct acpicpu_pss *sc_pss; 143 size_t sc_pssfulllen; 144 145 struct acpicpu_pct sc_pct; 146 /* save compensation for pct access for lying bios' */ 147 uint32_t sc_pct_stat_as; 148 uint32_t sc_pct_ctrl_as; 149 uint32_t sc_pct_stat_len; 150 uint32_t sc_pct_ctrl_len; 151 /* 152 * XXX: _PPC Change listener 153 * PPC changes can occur when for example a machine is disconnected 154 * from AC power and can no loger support the highest frequency or 155 * voltage when driven from the battery. 156 * Should probably be reimplemented as a list for now we assume only 157 * one listener 158 */ 159 void (*sc_notify)(struct acpicpu_pss *, int); 160 }; 161 162 void acpicpu_add_cstatepkg(struct aml_value *, void *); 163 void acpicpu_add_cdeppkg(struct aml_value *, void *); 164 int acpicpu_getppc(struct acpicpu_softc *); 165 int acpicpu_getpct(struct acpicpu_softc *); 166 int acpicpu_getpss(struct acpicpu_softc *); 167 int acpicpu_getcst(struct acpicpu_softc *); 168 void acpicpu_getcst_from_fadt(struct acpicpu_softc *); 169 void acpicpu_print_one_cst(struct acpi_cstate *_cx); 170 void acpicpu_print_cst(struct acpicpu_softc *_sc); 171 void acpicpu_add_cstate(struct acpicpu_softc *_sc, int _state, int _method, 172 int _flags, int _latency, int _power, uint64_t _address); 173 void acpicpu_set_pdc(struct acpicpu_softc *); 174 void acpicpu_idle(void); 175 176 #if 0 177 void acpicpu_set_throttle(struct acpicpu_softc *, int); 178 struct acpi_cstate *acpicpu_find_cstate(struct acpicpu_softc *, int); 179 #endif 180 181 struct cfattach acpicpu_ca = { 182 sizeof(struct acpicpu_softc), acpicpu_match, acpicpu_attach 183 }; 184 185 struct cfdriver acpicpu_cd = { 186 NULL, "acpicpu", DV_DULL 187 }; 188 189 const char *acpicpu_hids[] = { 190 "ACPI0007", 191 NULL 192 }; 193 194 extern int setperf_prio; 195 196 #if 0 197 void 198 acpicpu_set_throttle(struct acpicpu_softc *sc, int level) 199 { 200 uint32_t pbval; 201 202 if (sc->sc_flags & FLAGS_NOTHROTTLE) 203 return; 204 205 /* Disable throttling control */ 206 pbval = inl(sc->sc_pblk_addr); 207 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN); 208 if (level < 100) { 209 pbval &= ~CPU_STATEMASK(sc); 210 pbval |= CPU_STATE(sc, level); 211 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN); 212 outl(sc->sc_pblk_addr, pbval | CPU_THT_EN); 213 } 214 } 215 216 struct acpi_cstate * 217 acpicpu_find_cstate(struct acpicpu_softc *sc, int state) 218 { 219 struct acpi_cstate *cx; 220 221 SLIST_FOREACH(cx, &sc->sc_cstates, link) 222 if (cx->state == state) 223 return cx; 224 return (NULL); 225 } 226 #endif 227 228 229 void 230 acpicpu_set_pdc(struct acpicpu_softc *sc) 231 { 232 struct aml_value cmd, osc_cmd[4]; 233 struct aml_value res; 234 uint32_t cap; 235 uint32_t buf[3]; 236 237 /* 4077A616-290C-47BE-9EBD-D87058713953 */ 238 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 239 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70, 240 0x58, 0x71, 0x39, 0x53 }; 241 cap = ACPI_PDC_C_C1_HALT | ACPI_PDC_P_FFH | ACPI_PDC_C_C1_FFH 242 | ACPI_PDC_C_C2C3_FFH | ACPI_PDC_SMP_P_SWCOORD | ACPI_PDC_SMP_C2C3 243 | ACPI_PDC_SMP_C1PT; 244 245 if (aml_searchname(sc->sc_devnode, "_OSC")) { 246 /* Query _OSC */ 247 memset(&osc_cmd, 0, sizeof(osc_cmd)); 248 osc_cmd[0].type = AML_OBJTYPE_BUFFER; 249 osc_cmd[0].v_buffer = (uint8_t *)&cpu_oscuuid; 250 osc_cmd[0].length = sizeof(cpu_oscuuid); 251 252 osc_cmd[1].type = AML_OBJTYPE_INTEGER; 253 osc_cmd[1].v_integer = 1; 254 osc_cmd[1].length = 1; 255 256 osc_cmd[2].type = AML_OBJTYPE_INTEGER; 257 osc_cmd[2].v_integer = 2; 258 osc_cmd[2].length = 1; 259 260 buf[0] = 1; 261 buf[1] = cap; 262 osc_cmd[3].type = AML_OBJTYPE_BUFFER; 263 osc_cmd[3].v_buffer = (int8_t *)&buf; 264 osc_cmd[3].length = sizeof(buf); 265 266 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC", 267 4, osc_cmd, &res); 268 269 if (res.type != AML_OBJTYPE_BUFFER || res.length < 8) { 270 printf(": unable to query capabilities\n"); 271 aml_freevalue(&res); 272 return; 273 } 274 275 /* Evaluate _OSC */ 276 memset(&osc_cmd, 0, sizeof(osc_cmd)); 277 osc_cmd[0].type = AML_OBJTYPE_BUFFER; 278 osc_cmd[0].v_buffer = (uint8_t *)&cpu_oscuuid; 279 osc_cmd[0].length = sizeof(cpu_oscuuid); 280 281 osc_cmd[1].type = AML_OBJTYPE_INTEGER; 282 osc_cmd[1].v_integer = 1; 283 osc_cmd[1].length = 1; 284 285 osc_cmd[2].type = AML_OBJTYPE_INTEGER; 286 osc_cmd[2].v_integer = 2; 287 osc_cmd[2].length = 1; 288 289 buf[0] = 0; 290 buf[1] = (*(uint32_t *)&res.v_buffer[4]) & cap; 291 osc_cmd[3].type = AML_OBJTYPE_BUFFER; 292 osc_cmd[3].v_buffer = (int8_t *)&buf; 293 osc_cmd[3].length = sizeof(buf); 294 295 aml_freevalue(&res); 296 297 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC", 298 4, osc_cmd, NULL); 299 } else { 300 /* Evaluate _PDC */ 301 memset(&cmd, 0, sizeof(cmd)); 302 cmd.type = AML_OBJTYPE_BUFFER; 303 cmd.v_buffer = (uint8_t *)&buf; 304 cmd.length = sizeof(buf); 305 306 buf[0] = ACPI_PDC_REVID; 307 buf[1] = 1; 308 buf[2] = cap; 309 310 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PDC", 311 1, &cmd, NULL); 312 } 313 } 314 315 /* 316 * sanity check mwait hints against what cpuid told us 317 * ...but because intel screwed up, just check whether cpuid says 318 * the given state has _any_ substates. 319 */ 320 static int 321 check_mwait_hints(int state, int hints) 322 { 323 int cstate; 324 int num_substates; 325 326 if (cpu_mwait_size == 0) 327 return (0); 328 cstate = ((hints >> 4) & 0xf) + 1; 329 if (cstate == 16) 330 cstate = 0; 331 else if (cstate > 7) { 332 /* out of range of test against CPUID; just trust'em */ 333 return (1); 334 } 335 num_substates = (cpu_mwait_states >> (4 * cstate)) & 0xf; 336 if (num_substates == 0) { 337 printf(": C%d bad (state %d has no substates)", state, cstate); 338 return (0); 339 } 340 return (1); 341 } 342 343 void 344 acpicpu_add_cstate(struct acpicpu_softc *sc, int state, int method, 345 int flags, int latency, int power, uint64_t address) 346 { 347 struct acpi_cstate *cx; 348 349 dnprintf(10," C%d: latency:.%4x power:%.4x addr:%.16llx\n", 350 state, latency, power, address); 351 352 /* add a new state, or overwrite the fallback C1 state? */ 353 if (state != ACPI_STATE_C1 || 354 (cx = SLIST_FIRST(&sc->sc_cstates)) == NULL || 355 (cx->flags & CST_FLAG_FALLBACK) == 0) { 356 cx = malloc(sizeof(*cx), M_DEVBUF, M_WAITOK); 357 SLIST_INSERT_HEAD(&sc->sc_cstates, cx, link); 358 } 359 360 cx->state = state; 361 cx->method = method; 362 cx->flags = flags; 363 cx->latency = latency; 364 cx->power = power; 365 cx->address = address; 366 } 367 368 /* Found a _CST object, add new cstate for each entry */ 369 void 370 acpicpu_add_cstatepkg(struct aml_value *val, void *arg) 371 { 372 struct acpicpu_softc *sc = arg; 373 uint64_t addr; 374 struct acpi_grd *grd; 375 int state, method, flags; 376 377 #if defined(ACPI_DEBUG) && !defined(SMALL_KERNEL) 378 aml_showvalue(val); 379 #endif 380 if (val->type != AML_OBJTYPE_PACKAGE || val->length != 4) 381 return; 382 383 /* range and sanity checks */ 384 state = val->v_package[1]->v_integer; 385 if (state < 0 || state > 4) 386 return; 387 if (val->v_package[0]->type != AML_OBJTYPE_BUFFER) { 388 printf(": C%d (unexpected ACPI object type %d)", 389 state, val->v_package[0]->type); 390 return; 391 } 392 grd = (struct acpi_grd *)val->v_package[0]->v_buffer; 393 if (val->v_package[0]->length != sizeof(*grd) + 2 || 394 grd->grd_descriptor != LR_GENREGISTER || 395 grd->grd_length != sizeof(grd->grd_gas) || 396 val->v_package[0]->v_buffer[sizeof(*grd)] != SRT_ENDTAG) { 397 printf(": C%d (bogo buffer)", state); 398 return; 399 } 400 401 flags = 0; 402 switch (grd->grd_gas.address_space_id) { 403 case GAS_FUNCTIONAL_FIXED: 404 if (grd->grd_gas.register_bit_width == 0) { 405 method = CST_METH_HALT; 406 addr = 0; 407 } else { 408 /* 409 * In theory we should only do this for 410 * vendor 1 == Intel but other values crop up, 411 * presumably due to the normal ACPI spec confusion. 412 */ 413 switch (grd->grd_gas.register_bit_offset) { 414 case 0x1: 415 method = CST_METH_IO_HALT; 416 addr = grd->grd_gas.address; 417 418 /* i386 and amd64 I/O space is 16bits */ 419 if (addr > 0xffff) { 420 printf(": C%d (bogo I/O addr %llx)", 421 state, addr); 422 return; 423 } 424 break; 425 case 0x2: 426 addr = grd->grd_gas.address; 427 if (!check_mwait_hints(state, addr)) 428 return; 429 method = CST_METH_MWAIT; 430 flags = grd->grd_gas.access_size; 431 break; 432 default: 433 printf(": C%d (unknown FFH class %d)", 434 state, grd->grd_gas.register_bit_offset); 435 return; 436 } 437 } 438 break; 439 440 case GAS_SYSTEM_IOSPACE: 441 addr = grd->grd_gas.address; 442 if (grd->grd_gas.register_bit_width != 8 || 443 grd->grd_gas.register_bit_offset != 0) { 444 printf(": C%d (unhandled %s spec: %d/%d)", state, 445 "I/O", grd->grd_gas.register_bit_width, 446 grd->grd_gas.register_bit_offset); 447 return; 448 } 449 method = CST_METH_GAS_IO; 450 break; 451 452 default: 453 /* dump the GAS for analysis */ 454 { 455 int i; 456 printf(": C%d (unhandled GAS:", state); 457 for (i = 0; i < sizeof(grd->grd_gas); i++) 458 printf(" %#x", ((u_char *)&grd->grd_gas)[i]); 459 printf(")"); 460 461 } 462 return; 463 } 464 465 acpicpu_add_cstate(sc, state, method, flags, 466 val->v_package[2]->v_integer, val->v_package[3]->v_integer, addr); 467 } 468 469 470 /* Found a _CSD object, print the dependency */ 471 void 472 acpicpu_add_cdeppkg(struct aml_value *val, void *arg) 473 { 474 int64_t num_proc, coord_type, domain, cindex; 475 476 /* 477 * errors: unexpected object type, bad length, mismatched length, 478 * and bad CSD revision 479 */ 480 if (val->type != AML_OBJTYPE_PACKAGE || val->length < 6 || 481 val->length != val->v_package[0]->v_integer || 482 val->v_package[1]->v_integer != 0) { 483 #if 1 || defined(ACPI_DEBUG) && !defined(SMALL_KERNEL) 484 aml_showvalue(val); 485 #endif 486 printf("bogus CSD\n"); 487 return; 488 } 489 490 /* coordinating 'among' one CPU is trivial, ignore */ 491 num_proc = val->v_package[4]->v_integer; 492 if (num_proc == 1) 493 return; 494 495 /* we practically assume the hardware will coordinate, so ignore */ 496 coord_type = val->v_package[3]->v_integer; 497 if (coord_type == CSD_COORD_HW_ALL) 498 return; 499 500 domain = val->v_package[2]->v_integer; 501 cindex = val->v_package[5]->v_integer; 502 printf(": CSD (c=%#llx d=%lld n=%lld i=%lli)", 503 coord_type, domain, num_proc, cindex); 504 } 505 506 int 507 acpicpu_getcst(struct acpicpu_softc *sc) 508 { 509 struct aml_value res; 510 struct acpi_cstate *cx, *next_cx; 511 int use_nonmwait; 512 513 /* delete the existing list */ 514 while ((cx = SLIST_FIRST(&sc->sc_cstates)) != NULL) { 515 SLIST_REMOVE_HEAD(&sc->sc_cstates, link); 516 free(cx, M_DEVBUF, sizeof(*cx)); 517 } 518 519 /* provide a fallback C1-via-halt in case _CST's C1 is bogus */ 520 acpicpu_add_cstate(sc, ACPI_STATE_C1, CST_METH_HALT, 521 CST_FLAG_FALLBACK, 1, -1, 0); 522 523 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CST", 0, NULL, &res)) 524 return (1); 525 526 aml_foreachpkg(&res, 1, acpicpu_add_cstatepkg, sc); 527 aml_freevalue(&res); 528 529 /* only have fallback state? then no _CST objects were understood */ 530 cx = SLIST_FIRST(&sc->sc_cstates); 531 if (cx->flags & CST_FLAG_FALLBACK) 532 return (1); 533 534 /* 535 * Skip states >= C2 if the CPU's LAPIC timer stops in deep 536 * states (i.e., it doesn't have the 'ARAT' bit set). 537 * Also keep track if all the states we'll use use mwait. 538 */ 539 use_nonmwait = 0; 540 while ((next_cx = SLIST_NEXT(cx, link)) != NULL) { 541 if (cx->state > 1 && 542 (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT) == 0) 543 cx->flags |= CST_FLAG_SKIP; 544 else if (cx->method != CST_METH_MWAIT) 545 use_nonmwait = 1; 546 cx = next_cx; 547 } 548 if (use_nonmwait) 549 sc->sc_flags &= ~FLAGS_MWAIT_ONLY; 550 else 551 sc->sc_flags |= FLAGS_MWAIT_ONLY; 552 553 if (!aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CSD", 0, NULL, &res)) { 554 aml_foreachpkg(&res, 1, acpicpu_add_cdeppkg, sc); 555 aml_freevalue(&res); 556 } 557 558 return (0); 559 } 560 561 /* 562 * old-style fixed C-state info in the FADT. 563 * Note that this has extra restrictions on values and flags. 564 */ 565 void 566 acpicpu_getcst_from_fadt(struct acpicpu_softc *sc) 567 { 568 struct acpi_fadt *fadt = sc->sc_acpi->sc_fadt; 569 int flags; 570 571 /* FADT has to set flag to do C2 and higher on MP */ 572 if ((fadt->flags & FADT_P_LVL2_UP) == 0 && ncpus > 1) 573 return; 574 575 /* skip these C2 and C3 states if the CPU doesn't have ARAT */ 576 flags = (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT) 577 ? 0 : CST_FLAG_SKIP; 578 579 /* Some systems don't export a full PBLK; reduce functionality */ 580 if (sc->sc_pblk_len >= 5 && fadt->p_lvl2_lat <= ACPI_MAX_C2_LATENCY) { 581 acpicpu_add_cstate(sc, ACPI_STATE_C2, CST_METH_GAS_IO, flags, 582 fadt->p_lvl2_lat, -1, sc->sc_pblk_addr + 4); 583 } 584 if (sc->sc_pblk_len >= 6 && fadt->p_lvl3_lat <= ACPI_MAX_C3_LATENCY) 585 acpicpu_add_cstate(sc, ACPI_STATE_C3, CST_METH_GAS_IO, flags, 586 fadt->p_lvl3_lat, -1, sc->sc_pblk_addr + 5); 587 } 588 589 590 void 591 acpicpu_print_one_cst(struct acpi_cstate *cx) 592 { 593 const char *meth = ""; 594 int show_addr = 0; 595 596 switch (cx->method) { 597 case CST_METH_IO_HALT: 598 show_addr = 1; 599 /* fallthrough */ 600 case CST_METH_HALT: 601 meth = " halt"; 602 break; 603 604 case CST_METH_MWAIT: 605 meth = " mwait"; 606 show_addr = cx->address != 0; 607 break; 608 609 case CST_METH_GAS_IO: 610 meth = " io"; 611 show_addr = 1; 612 break; 613 614 } 615 616 printf(" %sC%d(", (cx->flags & CST_FLAG_SKIP ? "!" : ""), cx->state); 617 if (cx->power != -1) 618 printf("%d", cx->power); 619 printf("@%d%s", cx->latency, meth); 620 if (cx->flags & ~CST_FLAG_SKIP) { 621 if (cx->flags & CST_FLAG_FALLBACK) 622 printf("!"); 623 else 624 printf(".%x", (cx->flags & ~CST_FLAG_SKIP)); 625 } 626 if (show_addr) 627 printf("@0x%llx", cx->address); 628 printf(")"); 629 } 630 631 void 632 acpicpu_print_cst(struct acpicpu_softc *sc) 633 { 634 struct acpi_cstate *cx; 635 int i; 636 637 if (!SLIST_EMPTY(&sc->sc_cstates)) { 638 printf(":"); 639 640 i = 0; 641 SLIST_FOREACH(cx, &sc->sc_cstates, link) { 642 if (i++) 643 printf(","); 644 acpicpu_print_one_cst(cx); 645 } 646 } 647 } 648 649 650 int 651 acpicpu_match(struct device *parent, void *match, void *aux) 652 { 653 struct acpi_attach_args *aa = aux; 654 struct cfdata *cf = match; 655 struct acpi_softc *acpi = (struct acpi_softc *)parent; 656 657 if (acpi_matchhids(aa, acpicpu_hids, cf->cf_driver->cd_name) && 658 aa->aaa_node && aa->aaa_node->value && 659 aa->aaa_node->value->type == AML_OBJTYPE_DEVICE) { 660 /* 661 * Record that we've seen a Device() CPU object, 662 * so we won't attach any Processor() nodes. 663 */ 664 acpi->sc_skip_processor = 1; 665 return (1); 666 } 667 668 /* sanity */ 669 if (aa->aaa_name == NULL || 670 strcmp(aa->aaa_name, cf->cf_driver->cd_name) != 0 || 671 aa->aaa_table != NULL) 672 return (0); 673 674 return (1); 675 } 676 677 void 678 acpicpu_attach(struct device *parent, struct device *self, void *aux) 679 { 680 struct acpicpu_softc *sc = (struct acpicpu_softc *)self; 681 struct acpi_attach_args *aa = aux; 682 struct aml_value res; 683 int64_t uid; 684 int i; 685 uint32_t status = 0; 686 CPU_INFO_ITERATOR cii; 687 struct cpu_info *ci; 688 689 sc->sc_acpi = (struct acpi_softc *)parent; 690 sc->sc_devnode = aa->aaa_node; 691 692 SLIST_INIT(&sc->sc_cstates); 693 694 if (aml_evalinteger(sc->sc_acpi, sc->sc_devnode, 695 "_UID", 0, NULL, &uid) == 0) 696 sc->sc_cpu = uid; 697 698 if (aml_evalnode(sc->sc_acpi, sc->sc_devnode, 0, NULL, &res) == 0) { 699 if (res.type == AML_OBJTYPE_PROCESSOR) { 700 sc->sc_cpu = res.v_processor.proc_id; 701 sc->sc_pblk_addr = res.v_processor.proc_addr; 702 sc->sc_pblk_len = res.v_processor.proc_len; 703 } 704 aml_freevalue(&res); 705 } 706 sc->sc_duty_off = sc->sc_acpi->sc_fadt->duty_offset; 707 sc->sc_duty_wid = sc->sc_acpi->sc_fadt->duty_width; 708 709 /* link in the matching cpu_info */ 710 CPU_INFO_FOREACH(cii, ci) 711 if (ci->ci_acpi_proc_id == sc->sc_cpu) { 712 ci->ci_acpicpudev = self; 713 sc->sc_ci = ci; 714 break; 715 } 716 if (ci == NULL) { 717 printf(": no cpu matching ACPI ID %d\n", sc->sc_cpu); 718 return; 719 } 720 721 sc->sc_prev_sleep = 1000000; 722 723 acpicpu_set_pdc(sc); 724 725 if (!valid_throttle(sc->sc_duty_off, sc->sc_duty_wid, sc->sc_pblk_addr)) 726 sc->sc_flags |= FLAGS_NOTHROTTLE; 727 #ifdef ACPI_DEBUG 728 printf(": %s: ", sc->sc_devnode->name); 729 printf("\n: hdr:%x pblk:%x,%x duty:%x,%x pstate:%x " 730 "(%ld throttling states)\n", sc->sc_acpi->sc_fadt->hdr_revision, 731 sc->sc_pblk_addr, sc->sc_pblk_len, sc->sc_duty_off, 732 sc->sc_duty_wid, sc->sc_acpi->sc_fadt->pstate_cnt, 733 CPU_MAXSTATE(sc)); 734 #endif 735 736 /* Get C-States from _CST or FADT */ 737 if (acpicpu_getcst(sc) || SLIST_EMPTY(&sc->sc_cstates)) 738 acpicpu_getcst_from_fadt(sc); 739 else { 740 /* Notify BIOS we use _CST objects */ 741 if (sc->sc_acpi->sc_fadt->cst_cnt) { 742 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD, 0, 743 sc->sc_acpi->sc_fadt->cst_cnt); 744 } 745 } 746 if (!SLIST_EMPTY(&sc->sc_cstates)) { 747 extern uint32_t acpi_force_bm; 748 749 cpu_idle_cycle_fcn = &acpicpu_idle; 750 751 /* 752 * C3 (and maybe C2?) needs BM_RLD to be set to 753 * wake the system 754 */ 755 if (SLIST_FIRST(&sc->sc_cstates)->state > 1 && acpi_force_bm == 0) { 756 uint16_t en = acpi_read_pmreg(sc->sc_acpi, 757 ACPIREG_PM1_CNT, 0); 758 if ((en & ACPI_PM1_BM_RLD) == 0) { 759 acpi_write_pmreg(sc->sc_acpi, ACPIREG_PM1_CNT, 760 0, en | ACPI_PM1_BM_RLD); 761 acpi_force_bm = ACPI_PM1_BM_RLD; 762 } 763 } 764 } 765 766 if (acpicpu_getpss(sc)) { 767 sc->sc_flags |= FLAGS_NOPSS; 768 } else { 769 #ifdef ACPI_DEBUG 770 for (i = 0; i < sc->sc_pss_len; i++) { 771 dnprintf(20, "%d %d %d %d %d %d\n", 772 sc->sc_pss[i].pss_core_freq, 773 sc->sc_pss[i].pss_power, 774 sc->sc_pss[i].pss_trans_latency, 775 sc->sc_pss[i].pss_bus_latency, 776 sc->sc_pss[i].pss_ctrl, 777 sc->sc_pss[i].pss_status); 778 } 779 dnprintf(20, "\n"); 780 #endif 781 if (sc->sc_pss_len == 0) { 782 /* this should never happen */ 783 printf("%s: invalid _PSS length\n", DEVNAME(sc)); 784 sc->sc_flags |= FLAGS_NOPSS; 785 } 786 787 acpicpu_getppc(sc); 788 if (acpicpu_getpct(sc)) 789 sc->sc_flags |= FLAGS_NOPCT; 790 else if (sc->sc_pss_len > 0) { 791 /* Notify BIOS we are handling p-states */ 792 if (sc->sc_acpi->sc_fadt->pstate_cnt) { 793 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD, 794 0, sc->sc_acpi->sc_fadt->pstate_cnt); 795 } 796 797 aml_register_notify(sc->sc_devnode, NULL, 798 acpicpu_notify, sc, ACPIDEV_NOPOLL); 799 800 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 801 sc->sc_pct.pct_status.grd_gas.address_space_id, 802 sc->sc_pct.pct_status.grd_gas.address, 803 sc->sc_pct_stat_as, sc->sc_pct_stat_as, &status); 804 sc->sc_level = (100 / sc->sc_pss_len) * 805 (sc->sc_pss_len - status); 806 dnprintf(20, "%s: cpu index %d, percentage %d\n", 807 DEVNAME(sc), status, sc->sc_level); 808 if (setperf_prio < 30) { 809 cpu_setperf = acpicpu_setperf; 810 acpicpu_set_notify(acpicpu_setperf_ppc_change); 811 setperf_prio = 30; 812 acpi_hasprocfvs = 1; 813 } 814 } 815 } 816 817 /* 818 * Nicely enumerate what power management capabilities 819 * ACPI CPU provides. 820 */ 821 acpicpu_print_cst(sc); 822 if (!(sc->sc_flags & (FLAGS_NOPSS | FLAGS_NOPCT)) || 823 !(sc->sc_flags & FLAGS_NOPSS)) { 824 printf("%c ", SLIST_EMPTY(&sc->sc_cstates) ? ':' : ','); 825 826 /* 827 * If acpicpu is itself providing the capability to transition 828 * states, enumerate them in the fashion that est and powernow 829 * would. 830 */ 831 if (!(sc->sc_flags & (FLAGS_NOPSS | FLAGS_NOPCT))) { 832 printf("FVS, "); 833 for (i = 0; i < sc->sc_pss_len - 1; i++) 834 printf("%d, ", sc->sc_pss[i].pss_core_freq); 835 printf("%d MHz", sc->sc_pss[i].pss_core_freq); 836 } else 837 printf("PSS"); 838 } 839 840 printf("\n"); 841 } 842 843 int 844 acpicpu_getppc(struct acpicpu_softc *sc) 845 { 846 struct aml_value res; 847 848 sc->sc_ppc = 0; 849 850 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PPC", 0, NULL, &res)) { 851 dnprintf(10, "%s: no _PPC\n", DEVNAME(sc)); 852 return (1); 853 } 854 855 sc->sc_ppc = aml_val2int(&res); 856 dnprintf(10, "%s: _PPC: %d\n", DEVNAME(sc), sc->sc_ppc); 857 aml_freevalue(&res); 858 859 return (0); 860 } 861 862 int 863 acpicpu_getpct(struct acpicpu_softc *sc) 864 { 865 struct aml_value res; 866 int rv = 1; 867 868 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PCT", 0, NULL, &res)) { 869 dnprintf(20, "%s: no _PCT\n", DEVNAME(sc)); 870 return (1); 871 } 872 873 if (res.length != 2) { 874 dnprintf(20, "%s: %s: invalid _PCT length\n", DEVNAME(sc), 875 sc->sc_devnode->name); 876 return (1); 877 } 878 879 memcpy(&sc->sc_pct.pct_ctrl, res.v_package[0]->v_buffer, 880 sizeof sc->sc_pct.pct_ctrl); 881 if (sc->sc_pct.pct_ctrl.grd_gas.address_space_id == 882 GAS_FUNCTIONAL_FIXED) { 883 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n"); 884 goto ffh; 885 } 886 887 memcpy(&sc->sc_pct.pct_status, res.v_package[1]->v_buffer, 888 sizeof sc->sc_pct.pct_status); 889 if (sc->sc_pct.pct_status.grd_gas.address_space_id == 890 GAS_FUNCTIONAL_FIXED) { 891 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n"); 892 goto ffh; 893 } 894 895 dnprintf(10, "_PCT(ctrl) : %02x %04x %02x %02x %02x %02x %016llx\n", 896 sc->sc_pct.pct_ctrl.grd_descriptor, 897 sc->sc_pct.pct_ctrl.grd_length, 898 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 899 sc->sc_pct.pct_ctrl.grd_gas.register_bit_width, 900 sc->sc_pct.pct_ctrl.grd_gas.register_bit_offset, 901 sc->sc_pct.pct_ctrl.grd_gas.access_size, 902 sc->sc_pct.pct_ctrl.grd_gas.address); 903 904 dnprintf(10, "_PCT(status): %02x %04x %02x %02x %02x %02x %016llx\n", 905 sc->sc_pct.pct_status.grd_descriptor, 906 sc->sc_pct.pct_status.grd_length, 907 sc->sc_pct.pct_status.grd_gas.address_space_id, 908 sc->sc_pct.pct_status.grd_gas.register_bit_width, 909 sc->sc_pct.pct_status.grd_gas.register_bit_offset, 910 sc->sc_pct.pct_status.grd_gas.access_size, 911 sc->sc_pct.pct_status.grd_gas.address); 912 913 /* if not set assume single 32 bit access */ 914 sc->sc_pct_stat_as = sc->sc_pct.pct_status.grd_gas.register_bit_width 915 / 8; 916 if (sc->sc_pct_stat_as == 0) 917 sc->sc_pct_stat_as = 4; 918 sc->sc_pct_ctrl_as = sc->sc_pct.pct_ctrl.grd_gas.register_bit_width / 8; 919 if (sc->sc_pct_ctrl_as == 0) 920 sc->sc_pct_ctrl_as = 4; 921 sc->sc_pct_stat_len = sc->sc_pct.pct_status.grd_gas.access_size; 922 if (sc->sc_pct_stat_len == 0) 923 sc->sc_pct_stat_len = sc->sc_pct_stat_as; 924 sc->sc_pct_ctrl_len = sc->sc_pct.pct_ctrl.grd_gas.access_size; 925 if (sc->sc_pct_ctrl_len == 0) 926 sc->sc_pct_ctrl_len = sc->sc_pct_ctrl_as; 927 928 rv = 0; 929 ffh: 930 aml_freevalue(&res); 931 return (rv); 932 } 933 934 int 935 acpicpu_getpss(struct acpicpu_softc *sc) 936 { 937 struct aml_value res; 938 int i, c, cf; 939 940 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PSS", 0, NULL, &res)) { 941 dprintf("%s: no _PSS\n", DEVNAME(sc)); 942 return (1); 943 } 944 945 free(sc->sc_pss, M_DEVBUF, sc->sc_pssfulllen); 946 947 sc->sc_pss = mallocarray(res.length, sizeof(*sc->sc_pss), M_DEVBUF, 948 M_WAITOK | M_ZERO); 949 sc->sc_pssfulllen = res.length * sizeof(*sc->sc_pss); 950 951 c = 0; 952 for (i = 0; i < res.length; i++) { 953 cf = aml_val2int(res.v_package[i]->v_package[0]); 954 955 /* This heuristic comes from FreeBSDs 956 * dev/acpica/acpi_perf.c to weed out invalid PSS entries. 957 */ 958 if (cf == sc->sc_pss[c].pss_core_freq) { 959 printf("%s: struck PSS entry, core frequency equals " 960 " last\n", sc->sc_dev.dv_xname); 961 continue; 962 } 963 964 if (cf == 0xFFFF || cf == 0x9999 || cf == 99999 || cf == 0) { 965 printf("%s: struck PSS entry, inappropriate core " 966 "frequency value\n", sc->sc_dev.dv_xname); 967 continue; 968 } 969 970 sc->sc_pss[c].pss_core_freq = cf; 971 sc->sc_pss[c].pss_power = aml_val2int( 972 res.v_package[i]->v_package[1]); 973 sc->sc_pss[c].pss_trans_latency = aml_val2int( 974 res.v_package[i]->v_package[2]); 975 sc->sc_pss[c].pss_bus_latency = aml_val2int( 976 res.v_package[i]->v_package[3]); 977 sc->sc_pss[c].pss_ctrl = aml_val2int( 978 res.v_package[i]->v_package[4]); 979 sc->sc_pss[c].pss_status = aml_val2int( 980 res.v_package[i]->v_package[5]); 981 c++; 982 } 983 sc->sc_pss_len = c; 984 985 aml_freevalue(&res); 986 987 return (0); 988 } 989 990 int 991 acpicpu_fetch_pss(struct acpicpu_pss **pss) 992 { 993 struct acpicpu_softc *sc; 994 995 /* 996 * XXX: According to the ACPI spec in an SMP system all processors 997 * are supposed to support the same states. For now we pray 998 * the bios ensures this... 999 */ 1000 1001 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1002 if (!sc) 1003 return 0; 1004 *pss = sc->sc_pss; 1005 1006 return (sc->sc_pss_len); 1007 } 1008 1009 int 1010 acpicpu_notify(struct aml_node *node, int notify_type, void *arg) 1011 { 1012 struct acpicpu_softc *sc = arg; 1013 1014 dnprintf(10, "acpicpu_notify: %.2x %s\n", notify_type, 1015 sc->sc_devnode->name); 1016 1017 switch (notify_type) { 1018 case 0x80: /* _PPC changed, retrieve new values */ 1019 acpicpu_getppc(sc); 1020 acpicpu_getpss(sc); 1021 if (sc->sc_notify) 1022 sc->sc_notify(sc->sc_pss, sc->sc_pss_len); 1023 break; 1024 1025 case 0x81: /* _CST changed, retrieve new values */ 1026 acpicpu_getcst(sc); 1027 printf("%s: notify", DEVNAME(sc)); 1028 acpicpu_print_cst(sc); 1029 printf("\n"); 1030 break; 1031 1032 default: 1033 printf("%s: unhandled cpu event %x\n", DEVNAME(sc), 1034 notify_type); 1035 break; 1036 } 1037 1038 return (0); 1039 } 1040 1041 void 1042 acpicpu_set_notify(void (*func)(struct acpicpu_pss *, int)) 1043 { 1044 struct acpicpu_softc *sc; 1045 1046 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1047 if (sc != NULL) 1048 sc->sc_notify = func; 1049 } 1050 1051 void 1052 acpicpu_setperf_ppc_change(struct acpicpu_pss *pss, int npss) 1053 { 1054 struct acpicpu_softc *sc; 1055 1056 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1057 1058 if (sc != NULL) 1059 cpu_setperf(sc->sc_level); 1060 } 1061 1062 void 1063 acpicpu_setperf(int level) 1064 { 1065 struct acpicpu_softc *sc; 1066 struct acpicpu_pss *pss = NULL; 1067 int idx, len; 1068 uint32_t status = 0; 1069 1070 sc = (struct acpicpu_softc *)curcpu()->ci_acpicpudev; 1071 1072 dnprintf(10, "%s: acpicpu setperf level %d\n", 1073 sc->sc_devnode->name, level); 1074 1075 if (level < 0 || level > 100) { 1076 dnprintf(10, "%s: acpicpu setperf illegal percentage\n", 1077 sc->sc_devnode->name); 1078 return; 1079 } 1080 1081 /* 1082 * XXX this should be handled more gracefully and it needs to also do 1083 * the duty cycle method instead of pss exclusively 1084 */ 1085 if (sc->sc_flags & FLAGS_NOPSS || sc->sc_flags & FLAGS_NOPCT) { 1086 dnprintf(10, "%s: acpicpu no _PSS or _PCT\n", 1087 sc->sc_devnode->name); 1088 return; 1089 } 1090 1091 if (sc->sc_ppc) 1092 len = sc->sc_ppc; 1093 else 1094 len = sc->sc_pss_len; 1095 idx = (len - 1) - (level / (100 / len)); 1096 if (idx < 0) 1097 idx = 0; 1098 1099 if (sc->sc_ppc) 1100 idx += sc->sc_pss_len - sc->sc_ppc; 1101 1102 if (idx > sc->sc_pss_len) 1103 idx = sc->sc_pss_len - 1; 1104 1105 dnprintf(10, "%s: acpicpu setperf index %d pss_len %d ppc %d\n", 1106 sc->sc_devnode->name, idx, sc->sc_pss_len, sc->sc_ppc); 1107 1108 pss = &sc->sc_pss[idx]; 1109 1110 #ifdef ACPI_DEBUG 1111 /* keep this for now since we will need this for debug in the field */ 1112 printf("0 status: %x %llx %u %u ctrl: %x %llx %u %u\n", 1113 sc->sc_pct.pct_status.grd_gas.address_space_id, 1114 sc->sc_pct.pct_status.grd_gas.address, 1115 sc->sc_pct_stat_as, sc->sc_pct_stat_len, 1116 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 1117 sc->sc_pct.pct_ctrl.grd_gas.address, 1118 sc->sc_pct_ctrl_as, sc->sc_pct_ctrl_len); 1119 #endif 1120 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 1121 sc->sc_pct.pct_status.grd_gas.address_space_id, 1122 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as, 1123 sc->sc_pct_stat_len, &status); 1124 dnprintf(20, "1 status: %u <- %u\n", status, pss->pss_status); 1125 1126 /* Are we already at the requested frequency? */ 1127 if (status == pss->pss_status) 1128 return; 1129 1130 acpi_gasio(sc->sc_acpi, ACPI_IOWRITE, 1131 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 1132 sc->sc_pct.pct_ctrl.grd_gas.address, sc->sc_pct_ctrl_as, 1133 sc->sc_pct_ctrl_len, &pss->pss_ctrl); 1134 dnprintf(20, "pss_ctrl: %x\n", pss->pss_ctrl); 1135 1136 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 1137 sc->sc_pct.pct_status.grd_gas.address_space_id, 1138 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as, 1139 sc->sc_pct_stat_as, &status); 1140 dnprintf(20, "2 status: %d\n", status); 1141 1142 /* Did the transition succeed? */ 1143 if (status == pss->pss_status) { 1144 cpuspeed = pss->pss_core_freq; 1145 sc->sc_level = level; 1146 } else 1147 printf("%s: acpicpu setperf failed to alter frequency\n", 1148 sc->sc_devnode->name); 1149 } 1150 1151 void 1152 acpicpu_idle(void) 1153 { 1154 struct cpu_info *ci = curcpu(); 1155 struct acpicpu_softc *sc = (struct acpicpu_softc *)ci->ci_acpicpudev; 1156 struct acpi_cstate *best, *cx; 1157 unsigned long itime; 1158 1159 if (sc == NULL) { 1160 __asm volatile("sti"); 1161 panic("null acpicpu"); 1162 } 1163 1164 /* possibly update the MWAIT_ONLY flag in cpu_info */ 1165 if (sc->sc_flags & FLAGS_MWAIT_ONLY) { 1166 if ((ci->ci_mwait & MWAIT_ONLY) == 0) 1167 atomic_setbits_int(&ci->ci_mwait, MWAIT_ONLY); 1168 } else if (ci->ci_mwait & MWAIT_ONLY) 1169 atomic_clearbits_int(&ci->ci_mwait, MWAIT_ONLY); 1170 1171 /* 1172 * Find the first state with a latency we'll accept, ignoring 1173 * states marked skippable 1174 */ 1175 best = cx = SLIST_FIRST(&sc->sc_cstates); 1176 while ((cx->flags & CST_FLAG_SKIP) || 1177 cx->latency * 3 > sc->sc_prev_sleep) { 1178 if ((cx = SLIST_NEXT(cx, link)) == NULL) 1179 break; 1180 best = cx; 1181 } 1182 1183 if (best->state >= 3 && 1184 (best->flags & CST_FLAG_MWAIT_BM_AVOIDANCE) && 1185 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS, 0) & ACPI_PM1_BM_STS) { 1186 /* clear it and back off */ 1187 acpi_write_pmreg(acpi_softc, ACPIREG_PM1_STS, 0, 1188 ACPI_PM1_BM_STS); 1189 while ((cx = SLIST_NEXT(cx, link)) != NULL) { 1190 if (cx->flags & CST_FLAG_SKIP) 1191 continue; 1192 if (cx->state < 3 || 1193 (cx->flags & CST_FLAG_MWAIT_BM_AVOIDANCE) == 0) 1194 break; 1195 } 1196 best = cx; 1197 } 1198 1199 1200 atomic_inc_long(&cst_stats[best->state]); 1201 1202 itime = tick / 2; 1203 switch (best->method) { 1204 default: 1205 case CST_METH_HALT: 1206 __asm volatile("sti; hlt"); 1207 break; 1208 1209 case CST_METH_IO_HALT: 1210 inb((u_short)best->address); 1211 __asm volatile("sti; hlt"); 1212 break; 1213 1214 case CST_METH_MWAIT: 1215 { 1216 struct timeval start, stop; 1217 unsigned int hints; 1218 1219 #ifdef __LP64__ 1220 if ((read_rflags() & PSL_I) == 0) 1221 panic("idle with interrupts blocked!"); 1222 #else 1223 if ((read_eflags() & PSL_I) == 0) 1224 panic("idle with interrupts blocked!"); 1225 #endif 1226 1227 /* something already queued? */ 1228 if (!cpu_is_idle(ci)) 1229 return; 1230 1231 /* 1232 * About to idle; setting the MWAIT_IN_IDLE bit tells 1233 * cpu_unidle() that it can't be a no-op and tells cpu_kick() 1234 * that it doesn't need to use an IPI. We also set the 1235 * MWAIT_KEEP_IDLING bit: those routines clear it to stop 1236 * the mwait. Once they're set, we do a final check of the 1237 * queue, in case another cpu called setrunqueue() and added 1238 * something to the queue and called cpu_unidle() between 1239 * the check in sched_idle() and here. 1240 */ 1241 hints = (unsigned)best->address; 1242 microuptime(&start); 1243 atomic_setbits_int(&ci->ci_mwait, MWAIT_IDLING); 1244 if (cpu_is_idle(ci)) { 1245 /* intel errata AAI65: cflush before monitor */ 1246 if (ci->ci_cflushsz != 0 && 1247 strcmp(cpu_vendor, "GenuineIntel") == 0) { 1248 membar_sync(); 1249 clflush((unsigned long)&ci->ci_mwait); 1250 membar_sync(); 1251 } 1252 1253 monitor(&ci->ci_mwait, 0, 0); 1254 if ((ci->ci_mwait & MWAIT_IDLING) == MWAIT_IDLING) 1255 mwait(0, hints); 1256 } 1257 1258 microuptime(&stop); 1259 timersub(&stop, &start, &stop); 1260 itime = stop.tv_sec * 1000000 + stop.tv_usec; 1261 1262 /* done idling; let cpu_kick() know that an IPI is required */ 1263 atomic_clearbits_int(&ci->ci_mwait, MWAIT_IDLING); 1264 break; 1265 } 1266 1267 case CST_METH_GAS_IO: 1268 inb((u_short)best->address); 1269 /* something harmless to give system time to change state */ 1270 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS, 0); 1271 break; 1272 1273 } 1274 1275 sc->sc_last_itime = itime; 1276 itime >>= 1; 1277 sc->sc_prev_sleep = (sc->sc_prev_sleep + (sc->sc_prev_sleep >> 1) 1278 + itime) >> 1; 1279 } 1280