1 /* $OpenBSD: acpi.c,v 1.141 2009/07/23 01:38:16 cnst Exp $ */ 2 /* 3 * Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com> 4 * Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/device.h> 22 #include <sys/malloc.h> 23 #include <sys/fcntl.h> 24 #include <sys/ioccom.h> 25 #include <sys/event.h> 26 #include <sys/signalvar.h> 27 #include <sys/proc.h> 28 #include <sys/kthread.h> 29 #include <sys/workq.h> 30 31 #include <machine/conf.h> 32 #include <machine/cpufunc.h> 33 #include <machine/bus.h> 34 35 #include <dev/pci/pcivar.h> 36 #include <dev/acpi/acpireg.h> 37 #include <dev/acpi/acpivar.h> 38 #include <dev/acpi/amltypes.h> 39 #include <dev/acpi/acpidev.h> 40 #include <dev/acpi/dsdt.h> 41 42 #include <dev/pci/pciidereg.h> 43 #include <dev/pci/pciidevar.h> 44 45 #include <machine/apmvar.h> 46 #define APMUNIT(dev) (minor(dev)&0xf0) 47 #define APMDEV(dev) (minor(dev)&0x0f) 48 #define APMDEV_NORMAL 0 49 #define APMDEV_CTL 8 50 51 #ifdef ACPI_DEBUG 52 int acpi_debug = 16; 53 #endif 54 int acpi_enabled; 55 int acpi_poll_enabled; 56 int acpi_hasprocfvs; 57 int acpi_thinkpad_enabled; 58 59 #define ACPIEN_RETRIES 15 60 61 void acpi_isr_thread(void *); 62 void acpi_create_thread(void *); 63 64 int acpi_match(struct device *, void *, void *); 65 void acpi_attach(struct device *, struct device *, void *); 66 int acpi_submatch(struct device *, void *, void *); 67 int acpi_print(void *, const char *); 68 69 void acpi_map_pmregs(struct acpi_softc *); 70 71 int acpi_founddock(struct aml_node *, void *); 72 int acpi_foundpss(struct aml_node *, void *); 73 int acpi_foundhid(struct aml_node *, void *); 74 int acpi_foundec(struct aml_node *, void *); 75 int acpi_foundtmp(struct aml_node *, void *); 76 int acpi_foundprt(struct aml_node *, void *); 77 int acpi_foundprw(struct aml_node *, void *); 78 int acpi_foundvideo(struct aml_node *, void *); 79 int acpi_inidev(struct aml_node *, void *); 80 81 int acpi_loadtables(struct acpi_softc *, struct acpi_rsdp *); 82 void acpi_load_table(paddr_t, size_t, acpi_qhead_t *); 83 void acpi_load_dsdt(paddr_t, struct acpi_q **); 84 85 void acpi_init_states(struct acpi_softc *); 86 void acpi_init_gpes(struct acpi_softc *); 87 void acpi_init_pm(struct acpi_softc *); 88 89 void acpi_dev_sort(void); 90 void acpi_dev_free(void); 91 92 int acpi_foundide(struct aml_node *node, void *arg); 93 int acpiide_notify(struct aml_node *, int, void *); 94 95 void wdcattach(struct channel_softc *); 96 int wdcdetach(struct channel_softc *, int); 97 98 struct idechnl 99 { 100 struct acpi_softc *sc; 101 int64_t addr; 102 int64_t chnl; 103 int64_t sta; 104 }; 105 106 int is_ejectable_bay(struct aml_node *node); 107 int is_ata(struct aml_node *node); 108 int is_ejectable(struct aml_node *node); 109 110 #ifdef ACPI_SLEEP_ENABLED 111 void acpi_sleep_walk(struct acpi_softc *, int); 112 #endif /* ACPI_SLEEP_ENABLED */ 113 114 #ifndef SMALL_KERNEL 115 int acpi_add_device(struct aml_node *node, void *arg); 116 #endif /* SMALL_KERNEL */ 117 118 void acpi_enable_onegpe(struct acpi_softc *, int, int); 119 int acpi_gpe_level(struct acpi_softc *, int, void *); 120 int acpi_gpe_edge(struct acpi_softc *, int, void *); 121 122 struct gpe_block *acpi_find_gpe(struct acpi_softc *, int); 123 124 #define ACPI_LOCK(sc) 125 #define ACPI_UNLOCK(sc) 126 127 /* XXX move this into dsdt softc at some point */ 128 extern struct aml_node aml_root; 129 130 /* XXX do we need this? */ 131 void acpi_filtdetach(struct knote *); 132 int acpi_filtread(struct knote *, long); 133 134 struct filterops acpiread_filtops = { 135 1, NULL, acpi_filtdetach, acpi_filtread 136 }; 137 138 struct cfattach acpi_ca = { 139 sizeof(struct acpi_softc), acpi_match, acpi_attach 140 }; 141 142 struct cfdriver acpi_cd = { 143 NULL, "acpi", DV_DULL 144 }; 145 146 struct acpi_softc *acpi_softc; 147 int acpi_evindex; 148 149 #define acpi_bus_space_map _bus_space_map 150 #define acpi_bus_space_unmap _bus_space_unmap 151 152 #define pch(x) (((x)>=' ' && (x)<='z') ? (x) : ' ') 153 154 #if 0 155 void 156 acpi_delay(struct acpi_softc *sc, int64_t uSecs) 157 { 158 /* XXX this needs to become a tsleep later */ 159 delay(uSecs); 160 } 161 #endif 162 163 int 164 acpi_gasio(struct acpi_softc *sc, int iodir, int iospace, uint64_t address, 165 int access_size, int len, void *buffer) 166 { 167 u_int8_t *pb; 168 bus_space_handle_t ioh; 169 struct acpi_mem_map mh; 170 pci_chipset_tag_t pc; 171 pcitag_t tag; 172 bus_addr_t ioaddr; 173 int reg, idx, ival, sval; 174 175 dnprintf(50, "gasio: %.2x 0x%.8llx %s\n", 176 iospace, address, (iodir == ACPI_IOWRITE) ? "write" : "read"); 177 178 pb = (u_int8_t *)buffer; 179 switch (iospace) { 180 case GAS_SYSTEM_MEMORY: 181 /* copy to/from system memory */ 182 acpi_map(address, len, &mh); 183 if (iodir == ACPI_IOREAD) 184 memcpy(buffer, mh.va, len); 185 else 186 memcpy(mh.va, buffer, len); 187 acpi_unmap(&mh); 188 break; 189 190 case GAS_SYSTEM_IOSPACE: 191 /* read/write from I/O registers */ 192 ioaddr = address; 193 if (acpi_bus_space_map(sc->sc_iot, ioaddr, len, 0, &ioh) != 0) { 194 printf("unable to map iospace\n"); 195 return (-1); 196 } 197 for (reg = 0; reg < len; reg += access_size) { 198 if (iodir == ACPI_IOREAD) { 199 switch (access_size) { 200 case 1: 201 *(uint8_t *)(pb+reg) = bus_space_read_1( 202 sc->sc_iot, ioh, reg); 203 dnprintf(80, "os_in8(%llx) = %x\n", 204 reg+address, *(uint8_t *)(pb+reg)); 205 break; 206 case 2: 207 *(uint16_t *)(pb+reg) = bus_space_read_2( 208 sc->sc_iot, ioh, reg); 209 dnprintf(80, "os_in16(%llx) = %x\n", 210 reg+address, *(uint16_t *)(pb+reg)); 211 break; 212 case 4: 213 *(uint32_t *)(pb+reg) = bus_space_read_4( 214 sc->sc_iot, ioh, reg); 215 break; 216 default: 217 printf("rdio: invalid size %d\n", access_size); 218 break; 219 } 220 } else { 221 switch (access_size) { 222 case 1: 223 bus_space_write_1(sc->sc_iot, ioh, reg, 224 *(uint8_t *)(pb+reg)); 225 dnprintf(80, "os_out8(%llx,%x)\n", 226 reg+address, *(uint8_t *)(pb+reg)); 227 break; 228 case 2: 229 bus_space_write_2(sc->sc_iot, ioh, reg, 230 *(uint16_t *)(pb+reg)); 231 dnprintf(80, "os_out16(%llx,%x)\n", 232 reg+address, *(uint16_t *)(pb+reg)); 233 break; 234 case 4: 235 bus_space_write_4(sc->sc_iot, ioh, reg, 236 *(uint32_t *)(pb+reg)); 237 break; 238 default: 239 printf("wrio: invalid size %d\n", access_size); 240 break; 241 } 242 } 243 244 /* During autoconf some devices are still gathering 245 * information. Delay here to give them an opportunity 246 * to finish. During runtime we simply need to ignore 247 * transient values. 248 */ 249 if (cold) 250 delay(10000); 251 } 252 acpi_bus_space_unmap(sc->sc_iot, ioh, len, &ioaddr); 253 break; 254 255 case GAS_PCI_CFG_SPACE: 256 /* format of address: 257 * bits 00..15 = register 258 * bits 16..31 = function 259 * bits 32..47 = device 260 * bits 48..63 = bus 261 */ 262 pc = NULL; 263 tag = pci_make_tag(pc, 264 ACPI_PCI_BUS(address), ACPI_PCI_DEV(address), 265 ACPI_PCI_FN(address)); 266 267 /* XXX: This is ugly. read-modify-write does a byte at a time */ 268 reg = ACPI_PCI_REG(address); 269 for (idx = reg; idx < reg+len; idx++) { 270 ival = pci_conf_read(pc, tag, idx & ~0x3); 271 if (iodir == ACPI_IOREAD) { 272 *pb = ival >> (8 * (idx & 0x3)); 273 } else { 274 sval = *pb; 275 ival &= ~(0xFF << (8* (idx & 0x3))); 276 ival |= sval << (8* (idx & 0x3)); 277 pci_conf_write(pc, tag, idx & ~0x3, ival); 278 } 279 pb++; 280 } 281 break; 282 case GAS_EMBEDDED: 283 if (sc->sc_ec == NULL) 284 break; 285 #ifndef SMALL_KERNEL 286 if (iodir == ACPI_IOREAD) 287 acpiec_read(sc->sc_ec, (u_int8_t)address, len, buffer); 288 else 289 acpiec_write(sc->sc_ec, (u_int8_t)address, len, buffer); 290 #endif 291 break; 292 } 293 return (0); 294 } 295 296 int 297 acpi_inidev(struct aml_node *node, void *arg) 298 { 299 struct acpi_softc *sc = (struct acpi_softc *)arg; 300 int64_t st; 301 302 /* 303 * Per the ACPI spec 6.5.1, only run _INI when device is there or 304 * when there is no _STA. We terminate the tree walk (with return 1) 305 * early if necessary. 306 */ 307 308 /* Evaluate _STA to decide _INI fate and walk fate */ 309 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 310 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 311 312 /* Evaluate _INI if we are present */ 313 if (st & STA_PRESENT) 314 aml_evalnode(sc, node, 0, NULL, NULL); 315 316 /* If we are functioning, we walk/search our children */ 317 if(st & STA_DEV_OK) 318 return 0; 319 320 /* If we are not enabled, or not present, terminate search */ 321 if (!(st & (STA_PRESENT|STA_ENABLED))) 322 return 1; 323 324 /* Default just continue search */ 325 return 0; 326 } 327 328 int 329 acpi_foundprt(struct aml_node *node, void *arg) 330 { 331 struct acpi_softc *sc = (struct acpi_softc *)arg; 332 struct device *self = (struct device *)arg; 333 struct acpi_attach_args aaa; 334 int64_t st = 0; 335 336 dnprintf(10, "found prt entry: %s\n", node->parent->name); 337 338 /* Evaluate _STA to decide _PRT fate and walk fate */ 339 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 340 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 341 342 if (st & STA_PRESENT) { 343 memset(&aaa, 0, sizeof(aaa)); 344 aaa.aaa_iot = sc->sc_iot; 345 aaa.aaa_memt = sc->sc_memt; 346 aaa.aaa_node = node; 347 aaa.aaa_name = "acpiprt"; 348 349 config_found(self, &aaa, acpi_print); 350 } 351 352 /* If we are functioning, we walk/search our children */ 353 if(st & STA_DEV_OK) 354 return 0; 355 356 /* If we are not enabled, or not present, terminate search */ 357 if (!(st & (STA_PRESENT|STA_ENABLED))) 358 return 1; 359 360 /* Default just continue search */ 361 return 0; 362 } 363 364 int 365 is_ata(struct aml_node *node) 366 { 367 return (aml_searchname(node, "_GTM") != NULL || 368 aml_searchname(node, "_GTF") != NULL || 369 aml_searchname(node, "_STM") != NULL || 370 aml_searchname(node, "_SDD") != NULL); 371 } 372 373 int 374 is_ejectable(struct aml_node *node) 375 { 376 return (aml_searchname(node, "_EJ0") != NULL); 377 } 378 379 int 380 is_ejectable_bay(struct aml_node *node) 381 { 382 return ((is_ata(node) || is_ata(node->parent)) && is_ejectable(node)); 383 } 384 385 int 386 acpiide_notify(struct aml_node *node, int ntype, void *arg) 387 { 388 struct idechnl *ide = arg; 389 struct acpi_softc *sc = ide->sc; 390 struct pciide_softc *wsc; 391 struct device *dev; 392 int b,d,f; 393 int64_t sta; 394 395 if (aml_evalinteger(sc, node, "_STA", 0, NULL, &sta) != 0) 396 return (0); 397 398 dnprintf(10, "IDE notify! %s %d status:%llx\n", aml_nodename(node), 399 ntype, sta); 400 401 /* Walk device list looking for IDE device match */ 402 TAILQ_FOREACH(dev, &alldevs, dv_list) { 403 if (strncmp(dev->dv_xname, "pciide", 6)) 404 continue; 405 406 wsc = (struct pciide_softc *)dev; 407 pci_decompose_tag(NULL, wsc->sc_tag, &b, &d, &f); 408 if (b != ACPI_PCI_BUS(ide->addr) || 409 d != ACPI_PCI_DEV(ide->addr) || 410 f != ACPI_PCI_FN(ide->addr)) 411 continue; 412 dnprintf(10, "Found pciide: %s %x.%x.%x channel:%llx\n", 413 dev->dv_xname, b,d,f, ide->chnl); 414 415 if (sta == 0 && ide->sta) 416 wdcdetach( 417 &wsc->pciide_channels[ide->chnl].wdc_channel, 0); 418 else if (sta && !ide->sta) 419 wdcattach( 420 &wsc->pciide_channels[ide->chnl].wdc_channel); 421 ide->sta = sta; 422 } 423 return (0); 424 } 425 426 int 427 acpi_foundide(struct aml_node *node, void *arg) 428 { 429 struct acpi_softc *sc = arg; 430 struct aml_node *pp; 431 struct idechnl *ide; 432 union amlpci_t pi; 433 int lvl; 434 435 /* Check if this is an ejectable bay */ 436 if (!is_ejectable_bay(node)) 437 return (0); 438 439 ide = malloc(sizeof(struct idechnl), M_DEVBUF, M_NOWAIT | M_ZERO); 440 ide->sc = sc; 441 442 /* GTM/GTF can be at 2/3 levels: pciX.ideX.channelX[.driveX] */ 443 lvl = 0; 444 for (pp=node->parent; pp; pp=pp->parent) { 445 lvl++; 446 if (aml_searchname(pp, "_HID")) 447 break; 448 } 449 450 /* Get PCI address and channel */ 451 if (lvl == 3) { 452 aml_evalinteger(sc, node->parent, "_ADR", 0, NULL, 453 &ide->chnl); 454 aml_rdpciaddr(node->parent->parent, &pi); 455 ide->addr = pi.addr; 456 } else if (lvl == 4) { 457 aml_evalinteger(sc, node->parent->parent, "_ADR", 0, NULL, 458 &ide->chnl); 459 aml_rdpciaddr(node->parent->parent->parent, &pi); 460 ide->addr = pi.addr; 461 } 462 dnprintf(10, "%s %llx channel:%llx\n", 463 aml_nodename(node), ide->addr, ide->chnl); 464 465 aml_evalinteger(sc, node, "_STA", 0, NULL, &ide->sta); 466 dnprintf(10, "Got Initial STA: %llx\n", ide->sta); 467 468 aml_register_notify(node, "acpiide", acpiide_notify, ide, 0); 469 return (0); 470 } 471 472 int 473 acpi_match(struct device *parent, void *match, void *aux) 474 { 475 struct bios_attach_args *ba = aux; 476 struct cfdata *cf = match; 477 478 /* sanity */ 479 if (strcmp(ba->ba_name, cf->cf_driver->cd_name)) 480 return (0); 481 482 if (!acpi_probe(parent, cf, ba)) 483 return (0); 484 485 return (1); 486 } 487 488 void 489 acpi_attach(struct device *parent, struct device *self, void *aux) 490 { 491 struct bios_attach_args *ba = aux; 492 struct acpi_softc *sc = (struct acpi_softc *)self; 493 struct acpi_mem_map handle; 494 struct acpi_rsdp *rsdp; 495 struct acpi_q *entry; 496 struct acpi_dsdt *p_dsdt; 497 int idx; 498 #ifndef SMALL_KERNEL 499 struct acpi_wakeq *wentry; 500 struct device *dev; 501 struct acpi_ac *ac; 502 struct acpi_bat *bat; 503 #endif /* SMALL_KERNEL */ 504 paddr_t facspa; 505 506 sc->sc_iot = ba->ba_iot; 507 sc->sc_memt = ba->ba_memt; 508 509 if (acpi_map(ba->ba_acpipbase, sizeof(struct acpi_rsdp), &handle)) { 510 printf(": can't map memory\n"); 511 return; 512 } 513 514 rsdp = (struct acpi_rsdp *)handle.va; 515 sc->sc_revision = (int)rsdp->rsdp_revision; 516 printf(": rev %d", sc->sc_revision); 517 518 SIMPLEQ_INIT(&sc->sc_tables); 519 SIMPLEQ_INIT(&sc->sc_wakedevs); 520 521 #ifndef SMALL_KERNEL 522 sc->sc_note = malloc(sizeof(struct klist), M_DEVBUF, M_NOWAIT | M_ZERO); 523 if (sc->sc_note == NULL) { 524 printf(", can't allocate memory\n"); 525 acpi_unmap(&handle); 526 return; 527 } 528 #endif /* SMALL_KERNEL */ 529 530 if (acpi_loadtables(sc, rsdp)) { 531 printf(", can't load tables\n"); 532 acpi_unmap(&handle); 533 return; 534 } 535 536 acpi_unmap(&handle); 537 538 /* 539 * Find the FADT 540 */ 541 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 542 if (memcmp(entry->q_table, FADT_SIG, 543 sizeof(FADT_SIG) - 1) == 0) { 544 sc->sc_fadt = entry->q_table; 545 break; 546 } 547 } 548 if (sc->sc_fadt == NULL) { 549 printf(", no FADT\n"); 550 return; 551 } 552 553 /* 554 * Check if we are able to enable ACPI control 555 */ 556 if (!sc->sc_fadt->smi_cmd || 557 (!sc->sc_fadt->acpi_enable && !sc->sc_fadt->acpi_disable)) { 558 printf(", ACPI control unavailable\n"); 559 return; 560 } 561 562 /* 563 * Set up a pointer to the firmware control structure 564 */ 565 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_firmware_ctl == 0) 566 facspa = sc->sc_fadt->firmware_ctl; 567 else 568 facspa = sc->sc_fadt->x_firmware_ctl; 569 570 if (acpi_map(facspa, sizeof(struct acpi_facs), &handle)) 571 printf(" !FACS"); 572 else 573 sc->sc_facs = (struct acpi_facs *)handle.va; 574 575 acpi_enabled = 1; 576 577 /* Create opcode hashtable */ 578 aml_hashopcodes(); 579 580 /* Create Default AML objects */ 581 aml_create_defaultobjects(); 582 583 /* 584 * Load the DSDT from the FADT pointer -- use the 585 * extended (64-bit) pointer if it exists 586 */ 587 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_dsdt == 0) 588 acpi_load_dsdt(sc->sc_fadt->dsdt, &entry); 589 else 590 acpi_load_dsdt(sc->sc_fadt->x_dsdt, &entry); 591 592 if (entry == NULL) 593 printf(" !DSDT"); 594 SIMPLEQ_INSERT_HEAD(&sc->sc_tables, entry, q_next); 595 596 p_dsdt = entry->q_table; 597 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 598 sizeof(p_dsdt->hdr)); 599 600 /* Load SSDT's */ 601 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 602 if (memcmp(entry->q_table, SSDT_SIG, 603 sizeof(SSDT_SIG) - 1) == 0) { 604 p_dsdt = entry->q_table; 605 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 606 sizeof(p_dsdt->hdr)); 607 } 608 } 609 610 /* Perform post-parsing fixups */ 611 aml_postparse(); 612 613 #ifndef SMALL_KERNEL 614 /* Find available sleeping states */ 615 acpi_init_states(sc); 616 617 /* Find available sleep/resume related methods. */ 618 acpi_init_pm(sc); 619 #endif /* SMALL_KERNEL */ 620 621 /* Map Power Management registers */ 622 acpi_map_pmregs(sc); 623 624 #ifndef SMALL_KERNEL 625 /* Initialize GPE handlers */ 626 acpi_init_gpes(sc); 627 628 /* some devices require periodic polling */ 629 timeout_set(&sc->sc_dev_timeout, acpi_poll, sc); 630 #endif /* SMALL_KERNEL */ 631 632 /* 633 * Take over ACPI control. Note that once we do this, we 634 * effectively tell the system that we have ownership of 635 * the ACPI hardware registers, and that SMI should leave 636 * them alone 637 * 638 * This may prevent thermal control on some systems where 639 * that actually does work 640 */ 641 acpi_write_pmreg(sc, ACPIREG_SMICMD, 0, sc->sc_fadt->acpi_enable); 642 idx = 0; 643 do { 644 if (idx++ > ACPIEN_RETRIES) { 645 printf(", can't enable ACPI\n"); 646 return; 647 } 648 } while (!(acpi_read_pmreg(sc, ACPIREG_PM1_CNT, 0) & ACPI_PM1_SCI_EN)); 649 650 printf("\n%s: tables", DEVNAME(sc)); 651 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 652 printf(" %.4s", entry->q_table); 653 } 654 printf("\n"); 655 656 #ifndef SMALL_KERNEL 657 /* Display wakeup devices and lowest S-state */ 658 printf("%s: wakeup devices", DEVNAME(sc)); 659 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 660 printf(" %.4s(S%d)", wentry->q_node->name, 661 wentry->q_state); 662 } 663 printf("\n"); 664 665 666 /* 667 * ACPI is enabled now -- attach timer 668 */ 669 { 670 struct acpi_attach_args aaa; 671 672 memset(&aaa, 0, sizeof(aaa)); 673 aaa.aaa_name = "acpitimer"; 674 aaa.aaa_iot = sc->sc_iot; 675 aaa.aaa_memt = sc->sc_memt; 676 #if 0 677 aaa.aaa_pcit = sc->sc_pcit; 678 aaa.aaa_smbust = sc->sc_smbust; 679 #endif 680 config_found(self, &aaa, acpi_print); 681 } 682 #endif /* SMALL_KERNEL */ 683 684 /* 685 * Attach table-defined devices 686 */ 687 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 688 struct acpi_attach_args aaa; 689 690 memset(&aaa, 0, sizeof(aaa)); 691 aaa.aaa_iot = sc->sc_iot; 692 aaa.aaa_memt = sc->sc_memt; 693 #if 0 694 aaa.aaa_pcit = sc->sc_pcit; 695 aaa.aaa_smbust = sc->sc_smbust; 696 #endif 697 aaa.aaa_table = entry->q_table; 698 config_found_sm(self, &aaa, acpi_print, acpi_submatch); 699 } 700 701 acpi_softc = sc; 702 703 /* initialize runtime environment */ 704 aml_find_node(&aml_root, "_INI", acpi_inidev, sc); 705 706 /* attach pci interrupt routing tables */ 707 aml_find_node(&aml_root, "_PRT", acpi_foundprt, sc); 708 709 #ifndef SMALL_KERNEL 710 /* XXX EC needs to be attached first on some systems */ 711 aml_find_node(&aml_root, "_HID", acpi_foundec, sc); 712 713 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_add_device, sc); 714 715 /* attach battery, power supply and button devices */ 716 aml_find_node(&aml_root, "_HID", acpi_foundhid, sc); 717 718 /* Attach IDE bay */ 719 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_foundide, sc); 720 721 /* attach docks */ 722 aml_find_node(&aml_root, "_DCK", acpi_founddock, sc); 723 724 /* attach video only if this is not a stinkpad */ 725 if (!acpi_thinkpad_enabled) 726 aml_find_node(&aml_root, "_DOS", acpi_foundvideo, sc); 727 728 /* create list of devices we want to query when APM come in */ 729 SLIST_INIT(&sc->sc_ac); 730 SLIST_INIT(&sc->sc_bat); 731 TAILQ_FOREACH(dev, &alldevs, dv_list) { 732 if (!strncmp(dev->dv_xname, "acpiac", strlen("acpiac"))) { 733 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO); 734 ac->aac_softc = (struct acpiac_softc *)dev; 735 SLIST_INSERT_HEAD(&sc->sc_ac, ac, aac_link); 736 } 737 if (!strncmp(dev->dv_xname, "acpibat", strlen("acpibat"))) { 738 bat = malloc(sizeof(*bat), M_DEVBUF, M_WAITOK | M_ZERO); 739 bat->aba_softc = (struct acpibat_softc *)dev; 740 SLIST_INSERT_HEAD(&sc->sc_bat, bat, aba_link); 741 } 742 } 743 744 /* Setup threads */ 745 sc->sc_thread = malloc(sizeof(struct acpi_thread), M_DEVBUF, M_WAITOK); 746 sc->sc_thread->sc = sc; 747 sc->sc_thread->running = 1; 748 749 acpi_attach_machdep(sc); 750 751 kthread_create_deferred(acpi_create_thread, sc); 752 #endif /* SMALL_KERNEL */ 753 } 754 755 int 756 acpi_submatch(struct device *parent, void *match, void *aux) 757 { 758 struct acpi_attach_args *aaa = (struct acpi_attach_args *)aux; 759 struct cfdata *cf = match; 760 761 if (aaa->aaa_table == NULL) 762 return (0); 763 return ((*cf->cf_attach->ca_match)(parent, match, aux)); 764 } 765 766 int 767 acpi_print(void *aux, const char *pnp) 768 { 769 struct acpi_attach_args *aa = aux; 770 771 if (pnp) { 772 if (aa->aaa_name) 773 printf("%s at %s", aa->aaa_name, pnp); 774 else 775 return (QUIET); 776 } 777 778 return (UNCONF); 779 } 780 781 int 782 acpi_loadtables(struct acpi_softc *sc, struct acpi_rsdp *rsdp) 783 { 784 struct acpi_mem_map hrsdt, handle; 785 struct acpi_table_header *hdr; 786 int i, ntables; 787 size_t len; 788 789 if (rsdp->rsdp_revision == 2 && rsdp->rsdp_xsdt) { 790 struct acpi_xsdt *xsdt; 791 792 if (acpi_map(rsdp->rsdp_xsdt, sizeof(*hdr), &handle)) { 793 printf("couldn't map rsdt\n"); 794 return (ENOMEM); 795 } 796 797 hdr = (struct acpi_table_header *)handle.va; 798 len = hdr->length; 799 acpi_unmap(&handle); 800 hdr = NULL; 801 802 acpi_map(rsdp->rsdp_xsdt, len, &hrsdt); 803 xsdt = (struct acpi_xsdt *)hrsdt.va; 804 805 ntables = (len - sizeof(struct acpi_table_header)) / 806 sizeof(xsdt->table_offsets[0]); 807 808 for (i = 0; i < ntables; i++) { 809 acpi_map(xsdt->table_offsets[i], sizeof(*hdr), &handle); 810 hdr = (struct acpi_table_header *)handle.va; 811 acpi_load_table(xsdt->table_offsets[i], hdr->length, 812 &sc->sc_tables); 813 acpi_unmap(&handle); 814 } 815 acpi_unmap(&hrsdt); 816 } else { 817 struct acpi_rsdt *rsdt; 818 819 if (acpi_map(rsdp->rsdp_rsdt, sizeof(*hdr), &handle)) { 820 printf("couldn't map rsdt\n"); 821 return (ENOMEM); 822 } 823 824 hdr = (struct acpi_table_header *)handle.va; 825 len = hdr->length; 826 acpi_unmap(&handle); 827 hdr = NULL; 828 829 acpi_map(rsdp->rsdp_rsdt, len, &hrsdt); 830 rsdt = (struct acpi_rsdt *)hrsdt.va; 831 832 ntables = (len - sizeof(struct acpi_table_header)) / 833 sizeof(rsdt->table_offsets[0]); 834 835 for (i = 0; i < ntables; i++) { 836 acpi_map(rsdt->table_offsets[i], sizeof(*hdr), &handle); 837 hdr = (struct acpi_table_header *)handle.va; 838 acpi_load_table(rsdt->table_offsets[i], hdr->length, 839 &sc->sc_tables); 840 acpi_unmap(&handle); 841 } 842 acpi_unmap(&hrsdt); 843 } 844 845 return (0); 846 } 847 848 void 849 acpi_load_table(paddr_t pa, size_t len, acpi_qhead_t *queue) 850 { 851 struct acpi_mem_map handle; 852 struct acpi_q *entry; 853 854 entry = malloc(len + sizeof(struct acpi_q), M_DEVBUF, M_NOWAIT); 855 856 if (entry != NULL) { 857 if (acpi_map(pa, len, &handle)) { 858 free(entry, M_DEVBUF); 859 return; 860 } 861 memcpy(entry->q_data, handle.va, len); 862 entry->q_table = entry->q_data; 863 acpi_unmap(&handle); 864 SIMPLEQ_INSERT_TAIL(queue, entry, q_next); 865 } 866 } 867 868 void 869 acpi_load_dsdt(paddr_t pa, struct acpi_q **dsdt) 870 { 871 struct acpi_mem_map handle; 872 struct acpi_table_header *hdr; 873 size_t len; 874 875 if (acpi_map(pa, sizeof(*hdr), &handle)) 876 return; 877 hdr = (struct acpi_table_header *)handle.va; 878 len = hdr->length; 879 acpi_unmap(&handle); 880 881 *dsdt = malloc(len + sizeof(struct acpi_q), M_DEVBUF, M_NOWAIT); 882 883 if (*dsdt != NULL) { 884 if (acpi_map(pa, len, &handle)) { 885 free(*dsdt, M_DEVBUF); 886 *dsdt = NULL; 887 return; 888 } 889 memcpy((*dsdt)->q_data, handle.va, len); 890 (*dsdt)->q_table = (*dsdt)->q_data; 891 acpi_unmap(&handle); 892 } 893 } 894 895 int 896 acpiopen(dev_t dev, int flag, int mode, struct proc *p) 897 { 898 int error = 0; 899 #ifndef SMALL_KERNEL 900 struct acpi_softc *sc; 901 902 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 903 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 904 return (ENXIO); 905 906 switch (APMDEV(dev)) { 907 case APMDEV_CTL: 908 if (!(flag & FWRITE)) { 909 error = EINVAL; 910 break; 911 } 912 break; 913 case APMDEV_NORMAL: 914 if (!(flag & FREAD) || (flag & FWRITE)) { 915 error = EINVAL; 916 break; 917 } 918 break; 919 default: 920 error = ENXIO; 921 break; 922 } 923 #else 924 error = ENXIO; 925 #endif 926 return (error); 927 } 928 929 int 930 acpiclose(dev_t dev, int flag, int mode, struct proc *p) 931 { 932 int error = 0; 933 #ifndef SMALL_KERNEL 934 struct acpi_softc *sc; 935 936 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 937 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 938 return (ENXIO); 939 940 switch (APMDEV(dev)) { 941 case APMDEV_CTL: 942 case APMDEV_NORMAL: 943 break; 944 default: 945 error = ENXIO; 946 break; 947 } 948 #else 949 error = ENXIO; 950 #endif 951 return (error); 952 } 953 954 int 955 acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 956 { 957 int error = 0; 958 #ifndef SMALL_KERNEL 959 struct acpi_softc *sc; 960 struct acpi_ac *ac; 961 struct acpi_bat *bat; 962 struct apm_power_info *pi = (struct apm_power_info *)data; 963 int bats; 964 unsigned int remaining, rem, minutes, rate; 965 966 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 967 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 968 return (ENXIO); 969 970 ACPI_LOCK(sc); 971 /* fake APM */ 972 switch (cmd) { 973 #ifdef ACPI_SLEEP_ENABLED 974 case APM_IOC_STANDBY_REQ: 975 case APM_IOC_SUSPEND_REQ: 976 case APM_IOC_SUSPEND: 977 case APM_IOC_STANDBY: 978 workq_add_task(NULL, 0, (workq_fn)acpi_sleep_state, 979 acpi_softc, (void *)ACPI_STATE_S3); 980 break; 981 #endif /* ACPI_SLEEP_ENABLED */ 982 case APM_IOC_GETPOWER: 983 /* A/C */ 984 pi->ac_state = APM_AC_UNKNOWN; 985 SLIST_FOREACH(ac, &sc->sc_ac, aac_link) { 986 if (ac->aac_softc->sc_ac_stat == PSR_ONLINE) 987 pi->ac_state = APM_AC_ON; 988 else if (ac->aac_softc->sc_ac_stat == PSR_OFFLINE) 989 if (pi->ac_state == APM_AC_UNKNOWN) 990 pi->ac_state = APM_AC_OFF; 991 } 992 993 /* battery */ 994 pi->battery_state = APM_BATT_UNKNOWN; 995 pi->battery_life = 0; 996 pi->minutes_left = 0; 997 bats = 0; 998 remaining = rem = 0; 999 minutes = 0; 1000 rate = 0; 1001 SLIST_FOREACH(bat, &sc->sc_bat, aba_link) { 1002 if (bat->aba_softc->sc_bat_present == 0) 1003 continue; 1004 1005 if (bat->aba_softc->sc_bif.bif_last_capacity == 0) 1006 continue; 1007 1008 bats++; 1009 rem = (bat->aba_softc->sc_bst.bst_capacity * 100) / 1010 bat->aba_softc->sc_bif.bif_last_capacity; 1011 if (rem > 100) 1012 rem = 100; 1013 remaining += rem; 1014 1015 if (bat->aba_softc->sc_bst.bst_rate == BST_UNKNOWN) 1016 continue; 1017 else if (bat->aba_softc->sc_bst.bst_rate > 1) 1018 rate = bat->aba_softc->sc_bst.bst_rate; 1019 1020 minutes += bat->aba_softc->sc_bst.bst_capacity; 1021 } 1022 1023 if (bats == 0) { 1024 pi->battery_state = APM_BATTERY_ABSENT; 1025 pi->battery_life = 0; 1026 pi->minutes_left = (unsigned int)-1; 1027 break; 1028 } 1029 1030 if (pi->ac_state == APM_AC_ON || rate == 0) 1031 pi->minutes_left = (unsigned int)-1; 1032 else 1033 pi->minutes_left = 100 * minutes / rate; 1034 1035 /* running on battery */ 1036 pi->battery_life = remaining / bats; 1037 if (pi->battery_life > 50) 1038 pi->battery_state = APM_BATT_HIGH; 1039 else if (pi->battery_life > 25) 1040 pi->battery_state = APM_BATT_LOW; 1041 else 1042 pi->battery_state = APM_BATT_CRITICAL; 1043 1044 break; 1045 1046 default: 1047 error = ENOTTY; 1048 } 1049 1050 ACPI_UNLOCK(sc); 1051 #else 1052 error = ENXIO; 1053 #endif /* SMALL_KERNEL */ 1054 return (error); 1055 } 1056 1057 void 1058 acpi_filtdetach(struct knote *kn) 1059 { 1060 #ifndef SMALL_KERNEL 1061 struct acpi_softc *sc = kn->kn_hook; 1062 1063 ACPI_LOCK(sc); 1064 SLIST_REMOVE(sc->sc_note, kn, knote, kn_selnext); 1065 ACPI_UNLOCK(sc); 1066 #endif 1067 } 1068 1069 int 1070 acpi_filtread(struct knote *kn, long hint) 1071 { 1072 #ifndef SMALL_KERNEL 1073 /* XXX weird kqueue_scan() semantics */ 1074 if (hint & !kn->kn_data) 1075 kn->kn_data = hint; 1076 #endif 1077 return (1); 1078 } 1079 1080 int 1081 acpikqfilter(dev_t dev, struct knote *kn) 1082 { 1083 #ifndef SMALL_KERNEL 1084 struct acpi_softc *sc; 1085 1086 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 1087 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 1088 return (ENXIO); 1089 1090 switch (kn->kn_filter) { 1091 case EVFILT_READ: 1092 kn->kn_fop = &acpiread_filtops; 1093 break; 1094 default: 1095 return (1); 1096 } 1097 1098 kn->kn_hook = sc; 1099 1100 ACPI_LOCK(sc); 1101 SLIST_INSERT_HEAD(sc->sc_note, kn, kn_selnext); 1102 ACPI_UNLOCK(sc); 1103 1104 return (0); 1105 #else 1106 return (1); 1107 #endif 1108 } 1109 1110 /* Read from power management register */ 1111 int 1112 acpi_read_pmreg(struct acpi_softc *sc, int reg, int offset) 1113 { 1114 bus_space_handle_t ioh; 1115 bus_size_t size, __size; 1116 int regval; 1117 1118 __size = 0; 1119 /* Special cases: 1A/1B blocks can be OR'ed together */ 1120 switch (reg) { 1121 case ACPIREG_PM1_EN: 1122 return (acpi_read_pmreg(sc, ACPIREG_PM1A_EN, offset) | 1123 acpi_read_pmreg(sc, ACPIREG_PM1B_EN, offset)); 1124 case ACPIREG_PM1_STS: 1125 return (acpi_read_pmreg(sc, ACPIREG_PM1A_STS, offset) | 1126 acpi_read_pmreg(sc, ACPIREG_PM1B_STS, offset)); 1127 case ACPIREG_PM1_CNT: 1128 return (acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, offset) | 1129 acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, offset)); 1130 case ACPIREG_GPE_STS: 1131 __size = 1; 1132 dnprintf(50, "read GPE_STS offset: %.2x %.2x %.2x\n", offset, 1133 sc->sc_fadt->gpe0_blk_len>>1, sc->sc_fadt->gpe1_blk_len>>1); 1134 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1135 reg = ACPIREG_GPE0_STS; 1136 } 1137 break; 1138 case ACPIREG_GPE_EN: 1139 __size = 1; 1140 dnprintf(50, "read GPE_EN offset: %.2x %.2x %.2x\n", 1141 offset, sc->sc_fadt->gpe0_blk_len>>1, 1142 sc->sc_fadt->gpe1_blk_len>>1); 1143 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1144 reg = ACPIREG_GPE0_EN; 1145 } 1146 break; 1147 } 1148 1149 if (reg >= ACPIREG_MAXREG || sc->sc_pmregs[reg].size == 0) 1150 return (0); 1151 1152 regval = 0; 1153 ioh = sc->sc_pmregs[reg].ioh; 1154 size = sc->sc_pmregs[reg].size; 1155 if (__size) 1156 size = __size; 1157 if (size > 4) 1158 size = 4; 1159 1160 switch (size) { 1161 case 1: 1162 regval = bus_space_read_1(sc->sc_iot, ioh, offset); 1163 break; 1164 case 2: 1165 regval = bus_space_read_2(sc->sc_iot, ioh, offset); 1166 break; 1167 case 4: 1168 regval = bus_space_read_4(sc->sc_iot, ioh, offset); 1169 break; 1170 } 1171 1172 dnprintf(30, "acpi_readpm: %s = %.4x:%.4x %x\n", 1173 sc->sc_pmregs[reg].name, 1174 sc->sc_pmregs[reg].addr, offset, regval); 1175 return (regval); 1176 } 1177 1178 /* Write to power management register */ 1179 void 1180 acpi_write_pmreg(struct acpi_softc *sc, int reg, int offset, int regval) 1181 { 1182 bus_space_handle_t ioh; 1183 bus_size_t size, __size; 1184 1185 __size = 0; 1186 /* Special cases: 1A/1B blocks can be written with same value */ 1187 switch (reg) { 1188 case ACPIREG_PM1_EN: 1189 acpi_write_pmreg(sc, ACPIREG_PM1A_EN, offset, regval); 1190 acpi_write_pmreg(sc, ACPIREG_PM1B_EN, offset, regval); 1191 break; 1192 case ACPIREG_PM1_STS: 1193 acpi_write_pmreg(sc, ACPIREG_PM1A_STS, offset, regval); 1194 acpi_write_pmreg(sc, ACPIREG_PM1B_STS, offset, regval); 1195 break; 1196 case ACPIREG_PM1_CNT: 1197 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, offset, regval); 1198 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, offset, regval); 1199 break; 1200 case ACPIREG_GPE_STS: 1201 __size = 1; 1202 dnprintf(50, "write GPE_STS offset: %.2x %.2x %.2x %.2x\n", 1203 offset, sc->sc_fadt->gpe0_blk_len>>1, 1204 sc->sc_fadt->gpe1_blk_len>>1, regval); 1205 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1206 reg = ACPIREG_GPE0_STS; 1207 } 1208 break; 1209 case ACPIREG_GPE_EN: 1210 __size = 1; 1211 dnprintf(50, "write GPE_EN offset: %.2x %.2x %.2x %.2x\n", 1212 offset, sc->sc_fadt->gpe0_blk_len>>1, 1213 sc->sc_fadt->gpe1_blk_len>>1, regval); 1214 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1215 reg = ACPIREG_GPE0_EN; 1216 } 1217 break; 1218 } 1219 1220 /* All special case return here */ 1221 if (reg >= ACPIREG_MAXREG) 1222 return; 1223 1224 ioh = sc->sc_pmregs[reg].ioh; 1225 size = sc->sc_pmregs[reg].size; 1226 if (__size) 1227 size = __size; 1228 if (size > 4) 1229 size = 4; 1230 switch (size) { 1231 case 1: 1232 bus_space_write_1(sc->sc_iot, ioh, offset, regval); 1233 break; 1234 case 2: 1235 bus_space_write_2(sc->sc_iot, ioh, offset, regval); 1236 break; 1237 case 4: 1238 bus_space_write_4(sc->sc_iot, ioh, offset, regval); 1239 break; 1240 } 1241 1242 dnprintf(30, "acpi_writepm: %s = %.4x:%.4x %x\n", 1243 sc->sc_pmregs[reg].name, sc->sc_pmregs[reg].addr, offset, regval); 1244 } 1245 1246 /* Map Power Management registers */ 1247 void 1248 acpi_map_pmregs(struct acpi_softc *sc) 1249 { 1250 bus_addr_t addr; 1251 bus_size_t size; 1252 const char *name; 1253 int reg; 1254 1255 for (reg = 0; reg < ACPIREG_MAXREG; reg++) { 1256 size = 0; 1257 switch (reg) { 1258 case ACPIREG_SMICMD: 1259 name = "smi"; 1260 size = 1; 1261 addr = sc->sc_fadt->smi_cmd; 1262 break; 1263 case ACPIREG_PM1A_STS: 1264 case ACPIREG_PM1A_EN: 1265 name = "pm1a_sts"; 1266 size = sc->sc_fadt->pm1_evt_len >> 1; 1267 addr = sc->sc_fadt->pm1a_evt_blk; 1268 if (reg == ACPIREG_PM1A_EN && addr) { 1269 addr += size; 1270 name = "pm1a_en"; 1271 } 1272 break; 1273 case ACPIREG_PM1A_CNT: 1274 name = "pm1a_cnt"; 1275 size = sc->sc_fadt->pm1_cnt_len; 1276 addr = sc->sc_fadt->pm1a_cnt_blk; 1277 break; 1278 case ACPIREG_PM1B_STS: 1279 case ACPIREG_PM1B_EN: 1280 name = "pm1b_sts"; 1281 size = sc->sc_fadt->pm1_evt_len >> 1; 1282 addr = sc->sc_fadt->pm1b_evt_blk; 1283 if (reg == ACPIREG_PM1B_EN && addr) { 1284 addr += size; 1285 name = "pm1b_en"; 1286 } 1287 break; 1288 case ACPIREG_PM1B_CNT: 1289 name = "pm1b_cnt"; 1290 size = sc->sc_fadt->pm1_cnt_len; 1291 addr = sc->sc_fadt->pm1b_cnt_blk; 1292 break; 1293 case ACPIREG_PM2_CNT: 1294 name = "pm2_cnt"; 1295 size = sc->sc_fadt->pm2_cnt_len; 1296 addr = sc->sc_fadt->pm2_cnt_blk; 1297 break; 1298 #if 0 1299 case ACPIREG_PM_TMR: 1300 /* Allocated in acpitimer */ 1301 name = "pm_tmr"; 1302 size = sc->sc_fadt->pm_tmr_len; 1303 addr = sc->sc_fadt->pm_tmr_blk; 1304 break; 1305 #endif 1306 case ACPIREG_GPE0_STS: 1307 case ACPIREG_GPE0_EN: 1308 name = "gpe0_sts"; 1309 size = sc->sc_fadt->gpe0_blk_len >> 1; 1310 addr = sc->sc_fadt->gpe0_blk; 1311 1312 dnprintf(20, "gpe0 block len : %x\n", 1313 sc->sc_fadt->gpe0_blk_len >> 1); 1314 dnprintf(20, "gpe0 block addr: %x\n", 1315 sc->sc_fadt->gpe0_blk); 1316 if (reg == ACPIREG_GPE0_EN && addr) { 1317 addr += size; 1318 name = "gpe0_en"; 1319 } 1320 break; 1321 case ACPIREG_GPE1_STS: 1322 case ACPIREG_GPE1_EN: 1323 name = "gpe1_sts"; 1324 size = sc->sc_fadt->gpe1_blk_len >> 1; 1325 addr = sc->sc_fadt->gpe1_blk; 1326 1327 dnprintf(20, "gpe1 block len : %x\n", 1328 sc->sc_fadt->gpe1_blk_len >> 1); 1329 dnprintf(20, "gpe1 block addr: %x\n", 1330 sc->sc_fadt->gpe1_blk); 1331 if (reg == ACPIREG_GPE1_EN && addr) { 1332 addr += size; 1333 name = "gpe1_en"; 1334 } 1335 break; 1336 } 1337 if (size && addr) { 1338 dnprintf(50, "mapping: %.4x %.4x %s\n", 1339 addr, size, name); 1340 1341 /* Size and address exist; map register space */ 1342 bus_space_map(sc->sc_iot, addr, size, 0, 1343 &sc->sc_pmregs[reg].ioh); 1344 1345 sc->sc_pmregs[reg].name = name; 1346 sc->sc_pmregs[reg].size = size; 1347 sc->sc_pmregs[reg].addr = addr; 1348 } 1349 } 1350 } 1351 1352 /* move all stuff that doesn't go on the boot media in here */ 1353 #ifndef SMALL_KERNEL 1354 void 1355 acpi_reset(void) 1356 { 1357 struct acpi_fadt *fadt; 1358 u_int32_t reset_as, reset_len; 1359 u_int32_t value; 1360 1361 fadt = acpi_softc->sc_fadt; 1362 1363 /* 1364 * RESET_REG_SUP is not properly set in some implementations, 1365 * but not testing against it breaks more machines than it fixes 1366 */ 1367 if (acpi_softc->sc_revision <= 1 || 1368 !(fadt->flags & FADT_RESET_REG_SUP) || fadt->reset_reg.address == 0) 1369 return; 1370 1371 value = fadt->reset_value; 1372 1373 reset_as = fadt->reset_reg.register_bit_width / 8; 1374 if (reset_as == 0) 1375 reset_as = 1; 1376 1377 reset_len = fadt->reset_reg.access_size; 1378 if (reset_len == 0) 1379 reset_len = reset_as; 1380 1381 acpi_gasio(acpi_softc, ACPI_IOWRITE, 1382 fadt->reset_reg.address_space_id, 1383 fadt->reset_reg.address, reset_as, reset_len, &value); 1384 1385 delay(100000); 1386 } 1387 1388 int 1389 acpi_interrupt(void *arg) 1390 { 1391 struct acpi_softc *sc = (struct acpi_softc *)arg; 1392 u_int32_t processed, sts, en, idx, jdx; 1393 1394 processed = 0; 1395 1396 #if 0 1397 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1398 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1399 sc->sc_fadt->gpe1_base); 1400 #endif 1401 1402 dnprintf(40, "ACPI Interrupt\n"); 1403 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1404 sts = acpi_read_pmreg(sc, ACPIREG_GPE_STS, idx>>3); 1405 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, idx>>3); 1406 if (en & sts) { 1407 dnprintf(10, "GPE block: %.2x %.2x %.2x\n", idx, sts, 1408 en); 1409 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, en & ~sts); 1410 for (jdx = 0; jdx < 8; jdx++) { 1411 if (en & sts & (1L << jdx)) { 1412 /* Signal this GPE */ 1413 sc->gpe_table[idx+jdx].active = 1; 1414 processed = 1; 1415 } 1416 } 1417 } 1418 } 1419 1420 sts = acpi_read_pmreg(sc, ACPIREG_PM1_STS, 0); 1421 en = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 1422 if (sts & en) { 1423 dnprintf(10,"GEN interrupt: %.4x\n", sts & en); 1424 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en & ~sts); 1425 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 0, en); 1426 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en); 1427 if (sts & ACPI_PM1_PWRBTN_STS) 1428 sc->sc_powerbtn = 1; 1429 if (sts & ACPI_PM1_SLPBTN_STS) 1430 sc->sc_sleepbtn = 1; 1431 processed = 1; 1432 } 1433 1434 if (processed) { 1435 sc->sc_wakeup = 0; 1436 wakeup(sc); 1437 } 1438 1439 return (processed); 1440 } 1441 1442 int 1443 acpi_add_device(struct aml_node *node, void *arg) 1444 { 1445 static int nacpicpus = 0; 1446 struct device *self = arg; 1447 struct acpi_softc *sc = arg; 1448 struct acpi_attach_args aaa; 1449 #ifdef MULTIPROCESSOR 1450 struct aml_value res; 1451 int proc_id = -1; 1452 #endif 1453 1454 memset(&aaa, 0, sizeof(aaa)); 1455 aaa.aaa_node = node; 1456 aaa.aaa_iot = sc->sc_iot; 1457 aaa.aaa_memt = sc->sc_memt; 1458 if (node == NULL || node->value == NULL) 1459 return 0; 1460 1461 switch (node->value->type) { 1462 case AML_OBJTYPE_PROCESSOR: 1463 if (nacpicpus >= ncpus) 1464 return 0; 1465 #ifdef MULTIPROCESSOR 1466 if (aml_evalnode(sc, aaa.aaa_node, 0, NULL, &res) == 0) { 1467 if (res.type == AML_OBJTYPE_PROCESSOR) 1468 proc_id = res.v_processor.proc_id; 1469 aml_freevalue(&res); 1470 } 1471 if (proc_id < -1 || proc_id >= LAPIC_MAP_SIZE || 1472 (acpi_lapic_flags[proc_id] & ACPI_PROC_ENABLE) == 0) 1473 return 0; 1474 #endif 1475 nacpicpus++; 1476 1477 aaa.aaa_name = "acpicpu"; 1478 break; 1479 case AML_OBJTYPE_THERMZONE: 1480 aaa.aaa_name = "acpitz"; 1481 break; 1482 case AML_OBJTYPE_POWERRSRC: 1483 aaa.aaa_name = "acpipwrres"; 1484 break; 1485 default: 1486 return 0; 1487 } 1488 config_found(self, &aaa, acpi_print); 1489 return 0; 1490 } 1491 1492 void 1493 acpi_enable_onegpe(struct acpi_softc *sc, int gpe, int enable) 1494 { 1495 uint8_t mask = (1L << (gpe & 7)); 1496 uint8_t en; 1497 1498 /* Read enabled register */ 1499 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, gpe>>3); 1500 dnprintf(50, "%sabling GPE %.2x (current: %sabled) %.2x\n", 1501 enable ? "en" : "dis", gpe, (en & mask) ? "en" : "dis", en); 1502 if (enable) 1503 en |= mask; 1504 else 1505 en &= ~mask; 1506 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, en); 1507 } 1508 1509 int 1510 acpi_set_gpehandler(struct acpi_softc *sc, int gpe, int (*handler) 1511 (struct acpi_softc *, int, void *), void *arg, const char *label) 1512 { 1513 struct gpe_block *ptbl; 1514 1515 ptbl = acpi_find_gpe(sc, gpe); 1516 if (ptbl == NULL || handler == NULL) 1517 return -EINVAL; 1518 if (ptbl->handler != NULL) { 1519 dnprintf(10, "error: GPE %.2x already enabled\n", gpe); 1520 return -EBUSY; 1521 } 1522 dnprintf(50, "Adding GPE handler %.2x (%s)\n", gpe, label); 1523 ptbl->handler = handler; 1524 ptbl->arg = arg; 1525 1526 return (0); 1527 } 1528 1529 int 1530 acpi_gpe_level(struct acpi_softc *sc, int gpe, void *arg) 1531 { 1532 struct aml_node *node = arg; 1533 uint8_t mask; 1534 1535 dnprintf(10, "handling Level-sensitive GPE %.2x\n", gpe); 1536 mask = (1L << (gpe & 7)); 1537 1538 aml_evalnode(sc, node, 0, NULL, NULL); 1539 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1540 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1541 1542 return (0); 1543 } 1544 1545 int 1546 acpi_gpe_edge(struct acpi_softc *sc, int gpe, void *arg) 1547 { 1548 1549 struct aml_node *node = arg; 1550 uint8_t mask; 1551 1552 dnprintf(10, "handling Edge-sensitive GPE %.2x\n", gpe); 1553 mask = (1L << (gpe & 7)); 1554 1555 aml_evalnode(sc, node, 0, NULL, NULL); 1556 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1557 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1558 1559 return (0); 1560 } 1561 1562 /* Discover Devices that can wakeup the system 1563 * _PRW returns a package 1564 * pkg[0] = integer (FADT gpe bit) or package (gpe block,gpe bit) 1565 * pkg[1] = lowest sleep state 1566 * pkg[2+] = power resource devices (optional) 1567 * 1568 * To enable wakeup devices: 1569 * Evaluate _ON method in each power resource device 1570 * Evaluate _PSW method 1571 */ 1572 int 1573 acpi_foundprw(struct aml_node *node, void *arg) 1574 { 1575 struct acpi_softc *sc = arg; 1576 struct acpi_wakeq *wq; 1577 1578 wq = malloc(sizeof(struct acpi_wakeq), M_DEVBUF, M_NOWAIT | M_ZERO); 1579 if (wq == NULL) { 1580 return 0; 1581 } 1582 1583 wq->q_wakepkg = malloc(sizeof(struct aml_value), M_DEVBUF, 1584 M_NOWAIT | M_ZERO); 1585 if (wq->q_wakepkg == NULL) { 1586 free(wq, M_DEVBUF); 1587 return 0; 1588 } 1589 dnprintf(10, "Found _PRW (%s)\n", node->parent->name); 1590 aml_evalnode(sc, node, 0, NULL, wq->q_wakepkg); 1591 wq->q_node = node->parent; 1592 wq->q_gpe = -1; 1593 1594 /* Get GPE of wakeup device, and lowest sleep level */ 1595 if (wq->q_wakepkg->type == AML_OBJTYPE_PACKAGE && wq->q_wakepkg->length >= 2) { 1596 if (wq->q_wakepkg->v_package[0]->type == AML_OBJTYPE_INTEGER) { 1597 wq->q_gpe = wq->q_wakepkg->v_package[0]->v_integer; 1598 } 1599 if (wq->q_wakepkg->v_package[1]->type == AML_OBJTYPE_INTEGER) { 1600 wq->q_state = wq->q_wakepkg->v_package[1]->v_integer; 1601 } 1602 } 1603 SIMPLEQ_INSERT_TAIL(&sc->sc_wakedevs, wq, q_next); 1604 return 0; 1605 } 1606 1607 struct gpe_block * 1608 acpi_find_gpe(struct acpi_softc *sc, int gpe) 1609 { 1610 #if 1 1611 if (gpe >= sc->sc_lastgpe) 1612 return NULL; 1613 return &sc->gpe_table[gpe]; 1614 #else 1615 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1616 if (gpe >= pgpe->start && gpe <= (pgpe->start+7)) 1617 return &pgpe->table[gpe & 7]; 1618 } 1619 return NULL; 1620 #endif 1621 } 1622 1623 #if 0 1624 /* New GPE handling code: Create GPE block */ 1625 void 1626 acpi_init_gpeblock(struct acpi_softc *sc, int reg, int len, int base) 1627 { 1628 int i, j; 1629 1630 if (!reg || !len) 1631 return; 1632 for (i=0; i<len; i++) { 1633 pgpe = acpi_os_malloc(sizeof(gpeblock)); 1634 if (pgpe == NULL) 1635 return; 1636 1637 /* Allocate GPE Handler Block */ 1638 pgpe->start = base + i; 1639 acpi_bus_space_map(sc->sc_iot, reg+i, 1, 0, &pgpe->sts_ioh); 1640 acpi_bus_space_map(sc->sc_iot, reg+i+len, 1, 0, &pgpe->en_ioh); 1641 SIMPLEQ_INSERT_TAIL(&sc->sc_gpes, gpe, gpe_link); 1642 1643 /* Clear pending GPEs */ 1644 bus_space_write_1(sc->sc_iot, pgpe->sts_ioh, 0, 0xFF); 1645 bus_space_write_1(sc->sc_iot, pgpe->en_ioh, 0, 0x00); 1646 } 1647 1648 /* Search for GPE handlers */ 1649 for (i=0; i<len*8; i++) { 1650 char gpestr[32]; 1651 struct aml_node *h; 1652 1653 snprintf(gpestr, sizeof(gpestr), "\\_GPE._L%.2X", base+i); 1654 h = aml_searchnode(&aml_root, gpestr); 1655 if (acpi_set_gpehandler(sc, base+i, acpi_gpe_level, h, "level") != 0) { 1656 snprintf(gpestr, sizeof(gpestr), "\\_GPE._E%.2X", base+i); 1657 h = aml_searchnode(&aml_root, gpestr); 1658 acpi_set_gpehandler(sc, base+i, acpi_gpe_edge, h, "edge"); 1659 } 1660 } 1661 } 1662 1663 /* Process GPE interrupts */ 1664 int 1665 acpi_handle_gpes(struct acpi_softc *sc) 1666 { 1667 uint8_t en, sts; 1668 int processed, i; 1669 1670 processed=0; 1671 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1672 sts = bus_space_read_1(sc->sc_iot, pgpe->sts_ioh, 0); 1673 en = bus_space_read_1(sc->sc_iot, pgpe->en_ioh, 0); 1674 for (i=0; i<8; i++) { 1675 if (en & sts & (1L << i)) { 1676 pgpe->table[i].active = 1; 1677 processed=1; 1678 } 1679 } 1680 } 1681 return processed; 1682 } 1683 #endif 1684 1685 #if 0 1686 void 1687 acpi_add_gpeblock(struct acpi_softc *sc, int reg, int len, int gpe) 1688 { 1689 int idx, jdx; 1690 u_int8_t en, sts; 1691 1692 if (!reg || !len) 1693 return; 1694 for (idx=0; idx<len; idx++) { 1695 sts = inb(reg + idx); 1696 en = inb(reg + len + idx); 1697 printf("-- gpe %.2x-%.2x : en:%.2x sts:%.2x %.2x\n", 1698 gpe+idx*8, gpe+idx*8+7, en, sts, en&sts); 1699 for (jdx=0; jdx<8; jdx++) { 1700 char gpestr[32]; 1701 struct aml_node *l, *e; 1702 1703 if (en & sts & (1L << jdx)) { 1704 snprintf(gpestr,sizeof(gpestr), "\\_GPE._L%.2X", gpe+idx*8+jdx); 1705 l = aml_searchname(&aml_root, gpestr); 1706 snprintf(gpestr,sizeof(gpestr), "\\_GPE._E%.2X", gpe+idx*8+jdx); 1707 e = aml_searchname(&aml_root, gpestr); 1708 printf(" GPE %.2x active L%x E%x\n", gpe+idx*8+jdx, l, e); 1709 } 1710 } 1711 } 1712 } 1713 #endif 1714 1715 void 1716 acpi_init_gpes(struct acpi_softc *sc) 1717 { 1718 struct aml_node *gpe; 1719 char name[12]; 1720 int idx, ngpe; 1721 1722 #if 0 1723 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1724 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1725 sc->sc_fadt->gpe1_base); 1726 #endif 1727 1728 sc->sc_lastgpe = sc->sc_fadt->gpe0_blk_len << 2; 1729 if (sc->sc_fadt->gpe1_blk_len) { 1730 } 1731 dnprintf(50, "Last GPE: %.2x\n", sc->sc_lastgpe); 1732 1733 /* Allocate GPE table */ 1734 sc->gpe_table = malloc(sc->sc_lastgpe * sizeof(struct gpe_block), 1735 M_DEVBUF, M_WAITOK | M_ZERO); 1736 1737 ngpe = 0; 1738 1739 /* Clear GPE status */ 1740 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1741 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1742 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1743 } 1744 for (idx = 0; idx < sc->sc_lastgpe; idx++) { 1745 /* Search Level-sensitive GPES */ 1746 snprintf(name, sizeof(name), "\\_GPE._L%.2X", idx); 1747 gpe = aml_searchname(&aml_root, name); 1748 if (gpe != NULL) 1749 acpi_set_gpehandler(sc, idx, acpi_gpe_level, gpe, 1750 "level"); 1751 if (gpe == NULL) { 1752 /* Search Edge-sensitive GPES */ 1753 snprintf(name, sizeof(name), "\\_GPE._E%.2X", idx); 1754 gpe = aml_searchname(&aml_root, name); 1755 if (gpe != NULL) 1756 acpi_set_gpehandler(sc, idx, acpi_gpe_edge, gpe, 1757 "edge"); 1758 } 1759 } 1760 aml_find_node(&aml_root, "_PRW", acpi_foundprw, sc); 1761 sc->sc_maxgpe = ngpe; 1762 } 1763 1764 void 1765 acpi_init_states(struct acpi_softc *sc) 1766 { 1767 struct aml_value res; 1768 char name[8]; 1769 int i; 1770 1771 for (i = ACPI_STATE_S0; i <= ACPI_STATE_S5; i++) { 1772 snprintf(name, sizeof(name), "_S%d_", i); 1773 sc->sc_sleeptype[i].slp_typa = -1; 1774 sc->sc_sleeptype[i].slp_typb = -1; 1775 if (aml_evalname(sc, &aml_root, name, 0, NULL, &res) == 0) { 1776 if (res.type == AML_OBJTYPE_PACKAGE) { 1777 sc->sc_sleeptype[i].slp_typa = aml_val2int(res.v_package[0]); 1778 sc->sc_sleeptype[i].slp_typb = aml_val2int(res.v_package[1]); 1779 } 1780 aml_freevalue(&res); 1781 } 1782 } 1783 } 1784 1785 void 1786 acpi_init_pm(struct acpi_softc *sc) 1787 { 1788 sc->sc_tts = aml_searchname(&aml_root, "_TTS"); 1789 sc->sc_pts = aml_searchname(&aml_root, "_PTS"); 1790 sc->sc_wak = aml_searchname(&aml_root, "_WAK"); 1791 sc->sc_bfs = aml_searchname(&aml_root, "_BFS"); 1792 sc->sc_gts = aml_searchname(&aml_root, "_GTS"); 1793 } 1794 1795 #ifndef SMALL_KERNEL 1796 void 1797 acpi_sleep_walk(struct acpi_softc *sc, int state) 1798 { 1799 struct acpi_wakeq *wentry; 1800 int idx; 1801 1802 /* Clear GPE status */ 1803 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1804 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1805 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1806 } 1807 1808 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 1809 dnprintf(10, "%.4s(S%d) gpe %.2x\n", wentry->q_node->name, 1810 wentry->q_state, 1811 wentry->q_gpe); 1812 1813 if (state <= wentry->q_state) 1814 acpi_enable_onegpe(sc, wentry->q_gpe, 1); 1815 } 1816 } 1817 #endif /* ! SMALL_KERNEL */ 1818 1819 int 1820 acpi_sleep_state(struct acpi_softc *sc, int state) 1821 { 1822 int ret; 1823 1824 switch (state) { 1825 case ACPI_STATE_S0: 1826 return (0); 1827 case ACPI_STATE_S4: 1828 return (EOPNOTSUPP); 1829 case ACPI_STATE_S5: 1830 break; 1831 case ACPI_STATE_S1: 1832 case ACPI_STATE_S2: 1833 case ACPI_STATE_S3: 1834 if (sc->sc_sleeptype[state].slp_typa == -1 || 1835 sc->sc_sleeptype[state].slp_typb == -1) 1836 return (EOPNOTSUPP); 1837 } 1838 1839 acpi_sleep_walk(sc, state); 1840 1841 if ((ret = acpi_prepare_sleep_state(sc, state)) != 0) 1842 return (ret); 1843 1844 if (state != ACPI_STATE_S1) 1845 ret = acpi_sleep_machdep(sc, state); 1846 else 1847 ret = acpi_enter_sleep_state(sc, state); 1848 1849 #ifndef SMALL_KERNEL 1850 acpi_resume(sc); 1851 #endif /* ! SMALL_KERNEL */ 1852 return (ret); 1853 } 1854 1855 int 1856 acpi_enter_sleep_state(struct acpi_softc *sc, int state) 1857 { 1858 uint16_t rega, regb; 1859 int retries; 1860 1861 /* Clear WAK_STS bit */ 1862 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 1, ACPI_PM1_WAK_STS); 1863 1864 /* Disable BM arbitration */ 1865 acpi_write_pmreg(sc, ACPIREG_PM2_CNT, 1, ACPI_PM2_ARB_DIS); 1866 1867 /* Write SLP_TYPx values */ 1868 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, 0); 1869 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, 0); 1870 rega &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1871 regb &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1872 rega |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typa); 1873 regb |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typb); 1874 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1875 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1876 1877 /* Set SLP_EN bit */ 1878 rega |= ACPI_PM1_SLP_EN; 1879 regb |= ACPI_PM1_SLP_EN; 1880 1881 /* 1882 * Let the machdep code flush caches and do any other necessary 1883 * tasks before going away. 1884 */ 1885 acpi_cpu_flush(sc, state); 1886 1887 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1888 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1889 /* Loop on WAK_STS */ 1890 for (retries = 1000; retries > 0; retries--) { 1891 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_STS, 0); 1892 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_STS, 0); 1893 if (rega & ACPI_PM1_WAK_STS || 1894 regb & ACPI_PM1_WAK_STS) 1895 break; 1896 DELAY(10); 1897 } 1898 1899 return (-1); 1900 } 1901 1902 #ifndef SMALL_KERNEL 1903 void 1904 acpi_resume(struct acpi_softc *sc) 1905 { 1906 struct aml_value env; 1907 1908 memset(&env, 0, sizeof(env)); 1909 env.type = AML_OBJTYPE_INTEGER; 1910 env.v_integer = sc->sc_state; 1911 1912 if (sc->sc_bfs) 1913 if (aml_evalnode(sc, sc->sc_bfs, 1, &env, NULL) != 0) { 1914 dnprintf(10, "%s evaluating method _BFS failed.\n", 1915 DEVNAME(sc)); 1916 } 1917 1918 dopowerhooks(PWR_RESUME); 1919 inittodr(0); 1920 1921 if (sc->sc_wak) 1922 if (aml_evalnode(sc, sc->sc_wak, 1, &env, NULL) != 0) { 1923 dnprintf(10, "%s evaluating method _WAK failed.\n", 1924 DEVNAME(sc)); 1925 } 1926 1927 sc->sc_state = ACPI_STATE_S0; 1928 if (sc->sc_tts) { 1929 env.v_integer = sc->sc_state; 1930 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1931 dnprintf(10, "%s evaluating method _TTS failed.\n", 1932 DEVNAME(sc)); 1933 } 1934 } 1935 } 1936 #endif /* ! SMALL_KERNEL */ 1937 1938 int 1939 acpi_prepare_sleep_state(struct acpi_softc *sc, int state) 1940 { 1941 struct aml_value env; 1942 1943 if (sc == NULL || state == ACPI_STATE_S0) 1944 return(0); 1945 1946 if (sc->sc_sleeptype[state].slp_typa == -1 || 1947 sc->sc_sleeptype[state].slp_typb == -1) { 1948 printf("%s: state S%d unavailable\n", 1949 sc->sc_dev.dv_xname, state); 1950 return (ENXIO); 1951 } 1952 1953 memset(&env, 0, sizeof(env)); 1954 env.type = AML_OBJTYPE_INTEGER; 1955 env.v_integer = state; 1956 /* _TTS(state) */ 1957 if (sc->sc_tts) 1958 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1959 dnprintf(10, "%s evaluating method _TTS failed.\n", 1960 DEVNAME(sc)); 1961 return (ENXIO); 1962 } 1963 1964 switch (state) { 1965 case ACPI_STATE_S1: 1966 case ACPI_STATE_S2: 1967 resettodr(); 1968 dopowerhooks(PWR_SUSPEND); 1969 break; 1970 case ACPI_STATE_S3: 1971 resettodr(); 1972 dopowerhooks(PWR_STANDBY); 1973 break; 1974 } 1975 1976 /* _PTS(state) */ 1977 if (sc->sc_pts) 1978 if (aml_evalnode(sc, sc->sc_pts, 1, &env, NULL) != 0) { 1979 dnprintf(10, "%s evaluating method _PTS failed.\n", 1980 DEVNAME(sc)); 1981 return (ENXIO); 1982 } 1983 1984 sc->sc_state = state; 1985 /* _GTS(state) */ 1986 if (sc->sc_gts) 1987 if (aml_evalnode(sc, sc->sc_gts, 1, &env, NULL) != 0) { 1988 dnprintf(10, "%s evaluating method _GTS failed.\n", 1989 DEVNAME(sc)); 1990 return (ENXIO); 1991 } 1992 1993 disable_intr(); 1994 aml_evalname(sc, &aml_root, "\\_SST", 1, &env, NULL); 1995 sc->sc_state = state; 1996 1997 return (0); 1998 } 1999 2000 2001 2002 void 2003 acpi_powerdown(void) 2004 { 2005 /* 2006 * In case acpi_prepare_sleep fails, we shouldn't try to enter 2007 * the sleep state. It might cost us the battery. 2008 */ 2009 acpi_sleep_walk(acpi_softc, ACPI_STATE_S5); 2010 if (acpi_prepare_sleep_state(acpi_softc, ACPI_STATE_S5) == 0) 2011 acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5); 2012 } 2013 2014 2015 extern int aml_busy; 2016 2017 void 2018 acpi_isr_thread(void *arg) 2019 { 2020 struct acpi_thread *thread = arg; 2021 struct acpi_softc *sc = thread->sc; 2022 u_int32_t gpe; 2023 2024 /* 2025 * If we have an interrupt handler, we can get notification 2026 * when certain status bits changes in the ACPI registers, 2027 * so let us enable some events we can forward to userland 2028 */ 2029 if (sc->sc_interrupt) { 2030 int16_t flag; 2031 2032 dnprintf(1,"slpbtn:%c pwrbtn:%c\n", 2033 sc->sc_fadt->flags & FADT_SLP_BUTTON ? 'n' : 'y', 2034 sc->sc_fadt->flags & FADT_PWR_BUTTON ? 'n' : 'y'); 2035 dnprintf(10, "Enabling acpi interrupts...\n"); 2036 sc->sc_wakeup = 1; 2037 2038 /* Enable Sleep/Power buttons if they exist */ 2039 flag = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 2040 if (!(sc->sc_fadt->flags & FADT_PWR_BUTTON)) { 2041 flag |= ACPI_PM1_PWRBTN_EN; 2042 } 2043 if (!(sc->sc_fadt->flags & FADT_SLP_BUTTON)) { 2044 flag |= ACPI_PM1_SLPBTN_EN; 2045 } 2046 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, flag); 2047 2048 /* Enable handled GPEs here */ 2049 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2050 if (sc->gpe_table[gpe].handler) 2051 acpi_enable_onegpe(sc, gpe, 1); 2052 } 2053 } 2054 2055 while (thread->running) { 2056 dnprintf(10, "sleep... %d\n", sc->sc_wakeup); 2057 while (sc->sc_wakeup) 2058 tsleep(sc, PWAIT, "acpi_idle", 0); 2059 sc->sc_wakeup = 1; 2060 dnprintf(10, "wakeup..\n"); 2061 if (aml_busy) 2062 continue; 2063 2064 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2065 struct gpe_block *pgpe = &sc->gpe_table[gpe]; 2066 2067 if (pgpe->active) { 2068 pgpe->active = 0; 2069 dnprintf(50, "softgpe: %.2x\n", gpe); 2070 if (pgpe->handler) 2071 pgpe->handler(sc, gpe, pgpe->arg); 2072 } 2073 } 2074 if (sc->sc_powerbtn) { 2075 sc->sc_powerbtn = 0; 2076 2077 aml_notify_dev(ACPI_DEV_PBD, 0x80); 2078 2079 acpi_evindex++; 2080 dnprintf(1,"power button pressed\n"); 2081 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_PWRBTN, 2082 acpi_evindex)); 2083 } 2084 if (sc->sc_sleepbtn) { 2085 sc->sc_sleepbtn = 0; 2086 2087 aml_notify_dev(ACPI_DEV_SBD, 0x80); 2088 2089 acpi_evindex++; 2090 dnprintf(1,"sleep button pressed\n"); 2091 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_SLPBTN, 2092 acpi_evindex)); 2093 } 2094 2095 /* handle polling here to keep code non-concurrent*/ 2096 if (sc->sc_poll) { 2097 sc->sc_poll = 0; 2098 acpi_poll_notify(); 2099 } 2100 } 2101 free(thread, M_DEVBUF); 2102 2103 kthread_exit(0); 2104 } 2105 2106 void 2107 acpi_create_thread(void *arg) 2108 { 2109 struct acpi_softc *sc = arg; 2110 2111 if (kthread_create(acpi_isr_thread, sc->sc_thread, NULL, DEVNAME(sc)) 2112 != 0) { 2113 printf("%s: unable to create isr thread, GPEs disabled\n", 2114 DEVNAME(sc)); 2115 return; 2116 } 2117 } 2118 2119 int 2120 acpi_map_address(struct acpi_softc *sc, struct acpi_gas *gas, bus_addr_t base, 2121 bus_size_t size, bus_space_handle_t *pioh, bus_space_tag_t *piot) 2122 { 2123 int iospace = GAS_SYSTEM_IOSPACE; 2124 2125 /* No GAS structure, default to I/O space */ 2126 if (gas != NULL) { 2127 base += gas->address; 2128 iospace = gas->address_space_id; 2129 } 2130 switch (iospace) { 2131 case GAS_SYSTEM_MEMORY: 2132 *piot = sc->sc_memt; 2133 break; 2134 case GAS_SYSTEM_IOSPACE: 2135 *piot = sc->sc_iot; 2136 break; 2137 default: 2138 return -1; 2139 } 2140 if (bus_space_map(*piot, base, size, 0, pioh)) 2141 return -1; 2142 2143 return 0; 2144 } 2145 2146 int 2147 acpi_foundec(struct aml_node *node, void *arg) 2148 { 2149 struct acpi_softc *sc = (struct acpi_softc *)arg; 2150 struct device *self = (struct device *)arg; 2151 const char *dev; 2152 struct aml_value res; 2153 struct acpi_attach_args aaa; 2154 2155 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2156 return 0; 2157 2158 switch (res.type) { 2159 case AML_OBJTYPE_STRING: 2160 dev = res.v_string; 2161 break; 2162 case AML_OBJTYPE_INTEGER: 2163 dev = aml_eisaid(aml_val2int(&res)); 2164 break; 2165 default: 2166 dev = "unknown"; 2167 break; 2168 } 2169 2170 if (strcmp(dev, ACPI_DEV_ECD)) 2171 return 0; 2172 2173 memset(&aaa, 0, sizeof(aaa)); 2174 aaa.aaa_iot = sc->sc_iot; 2175 aaa.aaa_memt = sc->sc_memt; 2176 aaa.aaa_node = node->parent; 2177 aaa.aaa_dev = dev; 2178 aaa.aaa_name = "acpiec"; 2179 config_found(self, &aaa, acpi_print); 2180 aml_freevalue(&res); 2181 2182 return 0; 2183 } 2184 2185 int 2186 acpi_matchhids(struct acpi_attach_args *aa, const char *hids[], 2187 const char *driver) 2188 { 2189 int i; 2190 2191 if (aa->aaa_dev == NULL || aa->aaa_node == NULL) 2192 return (0); 2193 for (i = 0; hids[i]; i++) { 2194 if (!strcmp(aa->aaa_dev, hids[i])) { 2195 dnprintf(5, "driver %s matches %s\n", driver, hids[i]); 2196 return (1); 2197 } 2198 } 2199 return (0); 2200 } 2201 2202 int 2203 acpi_foundhid(struct aml_node *node, void *arg) 2204 { 2205 struct acpi_softc *sc = (struct acpi_softc *)arg; 2206 struct device *self = (struct device *)arg; 2207 const char *dev; 2208 struct aml_value res; 2209 struct acpi_attach_args aaa; 2210 2211 dnprintf(10, "found hid device: %s ", node->parent->name); 2212 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2213 return 0; 2214 2215 switch (res.type) { 2216 case AML_OBJTYPE_STRING: 2217 dev = res.v_string; 2218 break; 2219 case AML_OBJTYPE_INTEGER: 2220 dev = aml_eisaid(aml_val2int(&res)); 2221 break; 2222 default: 2223 dev = "unknown"; 2224 break; 2225 } 2226 dnprintf(10, " device: %s\n", dev); 2227 2228 memset(&aaa, 0, sizeof(aaa)); 2229 aaa.aaa_iot = sc->sc_iot; 2230 aaa.aaa_memt = sc->sc_memt; 2231 aaa.aaa_node = node->parent; 2232 aaa.aaa_dev = dev; 2233 2234 if (!strcmp(dev, ACPI_DEV_AC)) 2235 aaa.aaa_name = "acpiac"; 2236 else if (!strcmp(dev, ACPI_DEV_CMB)) 2237 aaa.aaa_name = "acpibat"; 2238 else if (!strcmp(dev, ACPI_DEV_LD) || 2239 !strcmp(dev, ACPI_DEV_PBD) || 2240 !strcmp(dev, ACPI_DEV_SBD)) 2241 aaa.aaa_name = "acpibtn"; 2242 else if (!strcmp(dev, ACPI_DEV_ASUS)) 2243 aaa.aaa_name = "acpiasus"; 2244 else if (!strcmp(dev, ACPI_DEV_THINKPAD)) { 2245 aaa.aaa_name = "acpithinkpad"; 2246 acpi_thinkpad_enabled = 1; 2247 } else if (!strcmp(dev, ACPI_DEV_ASUSAIBOOSTER)) 2248 aaa.aaa_name = "aibs"; 2249 2250 if (aaa.aaa_name) 2251 config_found(self, &aaa, acpi_print); 2252 2253 aml_freevalue(&res); 2254 2255 return 0; 2256 } 2257 2258 int 2259 acpi_founddock(struct aml_node *node, void *arg) 2260 { 2261 struct acpi_softc *sc = (struct acpi_softc *)arg; 2262 struct device *self = (struct device *)arg; 2263 struct acpi_attach_args aaa; 2264 2265 dnprintf(10, "found dock entry: %s\n", node->parent->name); 2266 2267 memset(&aaa, 0, sizeof(aaa)); 2268 aaa.aaa_iot = sc->sc_iot; 2269 aaa.aaa_memt = sc->sc_memt; 2270 aaa.aaa_node = node->parent; 2271 aaa.aaa_name = "acpidock"; 2272 2273 config_found(self, &aaa, acpi_print); 2274 2275 return 0; 2276 } 2277 2278 int 2279 acpi_foundvideo(struct aml_node *node, void *arg) 2280 { 2281 struct acpi_softc *sc = (struct acpi_softc *)arg; 2282 struct device *self = (struct device *)arg; 2283 struct acpi_attach_args aaa; 2284 2285 memset(&aaa, 0, sizeof(aaa)); 2286 aaa.aaa_iot = sc->sc_iot; 2287 aaa.aaa_memt = sc->sc_memt; 2288 aaa.aaa_node = node->parent; 2289 aaa.aaa_name = "acpivideo"; 2290 2291 config_found(self, &aaa, acpi_print); 2292 2293 return (0); 2294 } 2295 2296 TAILQ_HEAD(acpi_dv_hn, acpi_dev_rank) acpi_dv_h; 2297 void 2298 acpi_dev_sort(void) 2299 { 2300 struct device *dev, *idev; 2301 struct acpi_dev_rank *rentry, *ientry; 2302 int rank; 2303 2304 TAILQ_INIT(&acpi_dv_h); 2305 2306 TAILQ_FOREACH(dev, &alldevs, dv_list) { 2307 for (rank = -1, idev = dev; idev != NULL; 2308 idev = idev->dv_parent, rank++) 2309 ; /* nothing */ 2310 2311 rentry = malloc(sizeof(*rentry), M_DEVBUF, M_WAITOK | M_ZERO); 2312 rentry->rank = rank; 2313 rentry->dev = dev; 2314 2315 if (TAILQ_FIRST(&acpi_dv_h) == NULL) 2316 TAILQ_INSERT_HEAD(&acpi_dv_h, rentry, link); 2317 TAILQ_FOREACH_REVERSE(ientry, &acpi_dv_h, acpi_dv_hn, link) { 2318 if (rentry->rank > ientry->rank) { 2319 TAILQ_INSERT_AFTER(&acpi_dv_h, ientry, rentry, 2320 link); 2321 break; 2322 } 2323 } 2324 } 2325 } 2326 2327 void 2328 acpi_dev_free(void) 2329 { 2330 struct acpi_dev_rank *dvr; 2331 2332 while ((dvr = TAILQ_FIRST(&acpi_dv_h)) != NULL) { 2333 TAILQ_REMOVE(&acpi_dv_h, dvr, link); 2334 if (dvr != NULL) { 2335 free(dvr, M_DEVBUF); 2336 dvr = NULL; 2337 } 2338 } 2339 } 2340 #endif /* SMALL_KERNEL */ 2341