1 /* $OpenBSD: apm.c,v 1.132 2023/07/02 19:02:27 cheloha Exp $ */
2
3 /*-
4 * Copyright (c) 1998-2001 Michael Shalayeff. All rights reserved.
5 * Copyright (c) 1995 John T. Kohl. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the authors nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include "apm.h"
34
35 #if NAPM > 1
36 #error only one APM device may be configured
37 #endif
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/rwlock.h>
44 #include <sys/sysctl.h>
45 #include <sys/clockintr.h>
46 #include <sys/device.h>
47 #include <sys/fcntl.h>
48 #include <sys/buf.h>
49 #include <sys/reboot.h>
50 #include <sys/event.h>
51
52 #include <machine/conf.h>
53 #include <machine/cpufunc.h>
54 #include <machine/gdt.h>
55
56 #include <dev/isa/isareg.h>
57 #include <dev/wscons/wsdisplayvar.h>
58
59 #include <machine/acpiapm.h>
60 #include <machine/biosvar.h>
61 #include <machine/apmvar.h>
62
63 #include "wsdisplay.h"
64
65 #if defined(APMDEBUG)
66 #define DPRINTF(x) printf x
67 #else
68 #define DPRINTF(x) /**/
69 #endif
70
71 struct cfdriver apm_cd = {
72 NULL, "apm", DV_DULL
73 };
74
75 struct apm_softc {
76 struct device sc_dev;
77 struct klist sc_note;
78 int sc_flags;
79 int batt_life;
80 int be_batt;
81 struct proc *sc_thread;
82 struct rwlock sc_lock;
83 };
84 #define SCFLAG_OREAD 0x0000001
85 #define SCFLAG_OWRITE 0x0000002
86 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE)
87
88 int apmprobe(struct device *, void *, void *);
89 void apmattach(struct device *, struct device *, void *);
90
91 const struct cfattach apm_ca = {
92 sizeof(struct apm_softc), apmprobe, apmattach
93 };
94
95 void filt_apmrdetach(struct knote *kn);
96 int filt_apmread(struct knote *kn, long hint);
97
98 const struct filterops apmread_filtops = {
99 .f_flags = FILTEROP_ISFD,
100 .f_attach = NULL,
101 .f_detach = filt_apmrdetach,
102 .f_event = filt_apmread,
103 };
104
105 #define APM_RESUME_HOLDOFF 3
106
107 /*
108 * Flags to control kernel display
109 * SCFLAG_NOPRINT: do not output APM power messages due to
110 * a power change event.
111 *
112 * SCFLAG_PCTPRINT: do not output APM power messages due to
113 * to a power change event unless the battery
114 * percentage changes.
115 */
116 #define SCFLAG_NOPRINT 0x0008000
117 #define SCFLAG_PCTPRINT 0x0004000
118 #define SCFLAG_PRINT (SCFLAG_NOPRINT|SCFLAG_PCTPRINT)
119
120 #define APMUNIT(dev) (minor(dev)&0xf0)
121 #define APMDEV(dev) (minor(dev)&0x0f)
122 #define APMDEV_NORMAL 0
123 #define APMDEV_CTL 8
124
125 int apm_standbys;
126 int apm_lidclose;
127 int apm_userstandbys;
128 int apm_suspends;
129 int apm_resumes;
130 int apm_battlow;
131 int apm_evindex;
132 int apm_error;
133 int apm_op_inprog;
134
135 u_int apm_flags;
136 u_char apm_majver;
137 u_char apm_minver;
138 int apm_attached = 0;
139 static int apm_slow_called = 0;
140
141 struct {
142 u_int32_t entry;
143 u_int16_t seg;
144 u_int16_t pad;
145 } apm_ep;
146
147 struct apmregs {
148 u_int32_t ax;
149 u_int32_t bx;
150 u_int32_t cx;
151 u_int32_t dx;
152 };
153
154 int apmcall(u_int, u_int, struct apmregs *);
155 int apm_handle_event(struct apm_softc *, struct apmregs *);
156 void apm_set_ver(struct apm_softc *);
157 int apm_periodic_check(struct apm_softc *);
158 void apm_thread_create(void *v);
159 void apm_thread(void *);
160 void apm_disconnect(struct apm_softc *);
161 void apm_perror(const char *, struct apmregs *);
162 void apm_powmgt_enable(int onoff);
163 void apm_powmgt_engage(int onoff, u_int devid);
164 /* void apm_devpowmgt_enable(int onoff, u_int devid); */
165 int apm_record_event(struct apm_softc *sc, u_int type);
166 const char *apm_err_translate(int code);
167
168 #define apm_get_powstat(r) apmcall(APM_POWER_STATUS, APM_DEV_ALLDEVS, r)
169 void apm_suspend(int);
170 void apm_resume(struct apm_softc *, struct apmregs *);
171 void apm_cpu_slow(void);
172
173 static int __inline
apm_get_event(struct apmregs * r)174 apm_get_event(struct apmregs *r)
175 {
176 int rv;
177
178 bzero(r, sizeof(*r));
179 rv = apmcall(APM_GET_PM_EVENT, 0, r);
180 return rv;
181 }
182
183 const char *
apm_err_translate(int code)184 apm_err_translate(int code)
185 {
186 switch (code) {
187 case APM_ERR_PM_DISABLED:
188 return "power management disabled";
189 case APM_ERR_REALALREADY:
190 return "real mode interface already connected";
191 case APM_ERR_NOTCONN:
192 return "interface not connected";
193 case APM_ERR_16ALREADY:
194 return "16-bit interface already connected";
195 case APM_ERR_16NOTSUPP:
196 return "16-bit interface not supported";
197 case APM_ERR_32ALREADY:
198 return "32-bit interface already connected";
199 case APM_ERR_32NOTSUPP:
200 return "32-bit interface not supported";
201 case APM_ERR_UNRECOG_DEV:
202 return "unrecognized device ID";
203 case APM_ERR_ERANGE:
204 return "parameter out of range";
205 case APM_ERR_NOTENGAGED:
206 return "interface not engaged";
207 case APM_ERR_UNABLE:
208 return "unable to enter requested state";
209 case APM_ERR_NOEVENTS:
210 return "No pending events";
211 case APM_ERR_NOT_PRESENT:
212 return "No APM present";
213 default:
214 return "unknown error code?";
215 }
216 }
217
218 int apmerrors = 0;
219
220 void
apm_perror(const char * str,struct apmregs * regs)221 apm_perror(const char *str, struct apmregs *regs)
222 {
223 printf("apm0: APM %s: %s (%d)\n", str,
224 apm_err_translate(APM_ERR_CODE(regs)),
225 APM_ERR_CODE(regs));
226 delay(1000000);
227
228 apmerrors++;
229 }
230
231 void
apm_suspend(int state)232 apm_suspend(int state)
233 {
234 extern int perflevel;
235 int s;
236
237 #if NWSDISPLAY > 0
238 wsdisplay_suspend();
239 #endif /* NWSDISPLAY > 0 */
240 stop_periodic_resettodr();
241 config_suspend_all(DVACT_QUIESCE);
242 bufq_quiesce();
243
244 s = splhigh();
245 intr_disable();
246 cold = 2;
247 config_suspend_all(DVACT_SUSPEND);
248 suspend_randomness();
249
250 /* XXX
251 * Flag to disk drivers that they should "power down" the disk
252 * when we get to DVACT_POWERDOWN.
253 */
254 boothowto |= RB_POWERDOWN;
255 config_suspend_all(DVACT_POWERDOWN);
256 boothowto &= ~RB_POWERDOWN;
257
258 /* Send machine to sleep */
259 apm_set_powstate(APM_DEV_ALLDEVS, state);
260 /* Wake up */
261
262 /* They say that some machines may require reinitializing the clocks */
263 i8254_startclock();
264 if (initclock_func == i8254_initclocks)
265 rtcstart(); /* in i8254 mode, rtc is profclock */
266 inittodr(gettime());
267
268 clockintr_cpu_init(NULL);
269 clockintr_trigger();
270
271 config_suspend_all(DVACT_RESUME);
272 cold = 0;
273 intr_enable();
274 splx(s);
275
276 resume_randomness(NULL, 0); /* force RNG upper level reseed */
277 bufq_restart();
278
279 config_suspend_all(DVACT_WAKEUP);
280 start_periodic_resettodr();
281
282 #if NWSDISPLAY > 0
283 wsdisplay_resume();
284 #endif /* NWSDISPLAY > 0 */
285
286 /* restore hw.setperf */
287 if (cpu_setperf != NULL)
288 cpu_setperf(perflevel);
289 }
290
291 void
apm_resume(struct apm_softc * sc,struct apmregs * regs)292 apm_resume(struct apm_softc *sc, struct apmregs *regs)
293 {
294
295 apm_resumes = APM_RESUME_HOLDOFF;
296
297 /* lower bit in cx means pccard was powered down */
298
299 apm_record_event(sc, regs->bx);
300 }
301
302 int
apm_record_event(struct apm_softc * sc,u_int type)303 apm_record_event(struct apm_softc *sc, u_int type)
304 {
305 if (!apm_error && (sc->sc_flags & SCFLAG_OPEN) == 0) {
306 DPRINTF(("apm_record_event: no user waiting\n"));
307 apm_error++;
308 return 1;
309 }
310
311 apm_evindex++;
312 knote_locked(&sc->sc_note, APM_EVENT_COMPOSE(type, apm_evindex));
313 return (0);
314 }
315
316 int
apm_handle_event(struct apm_softc * sc,struct apmregs * regs)317 apm_handle_event(struct apm_softc *sc, struct apmregs *regs)
318 {
319 struct apmregs nregs;
320 int ret = 0;
321
322 switch (regs->bx) {
323 case APM_NOEVENT:
324 ret++;
325 break;
326
327 case APM_USER_STANDBY_REQ:
328 if (apm_resumes || apm_op_inprog)
329 break;
330 DPRINTF(("user wants STANDBY--fat chance\n"));
331 apm_op_inprog++;
332 if (apm_record_event(sc, regs->bx)) {
333 DPRINTF(("standby ourselves\n"));
334 apm_userstandbys++;
335 }
336 break;
337 case APM_STANDBY_REQ:
338 if (apm_resumes || apm_op_inprog)
339 break;
340 DPRINTF(("standby requested\n"));
341 if (apm_standbys || apm_suspends) {
342 DPRINTF(("premature standby\n"));
343 apm_error++;
344 ret++;
345 }
346 apm_op_inprog++;
347 if (apm_record_event(sc, regs->bx)) {
348 DPRINTF(("standby ourselves\n"));
349 apm_standbys++;
350 }
351 break;
352 case APM_USER_SUSPEND_REQ:
353 if (apm_resumes || apm_op_inprog)
354 break;
355 DPRINTF(("user wants suspend--fat chance!\n"));
356 apm_op_inprog++;
357 if (apm_record_event(sc, regs->bx)) {
358 DPRINTF(("suspend ourselves\n"));
359 apm_suspends++;
360 }
361 break;
362 case APM_SUSPEND_REQ:
363 if (apm_resumes || apm_op_inprog)
364 break;
365 DPRINTF(("suspend requested\n"));
366 if (apm_standbys || apm_suspends) {
367 DPRINTF(("premature suspend\n"));
368 apm_error++;
369 ret++;
370 }
371 apm_op_inprog++;
372 if (apm_record_event(sc, regs->bx)) {
373 DPRINTF(("suspend ourselves\n"));
374 apm_suspends++;
375 }
376 break;
377 case APM_POWER_CHANGE:
378 DPRINTF(("power status change\n"));
379 apm_get_powstat(&nregs);
380 apm_record_event(sc, regs->bx);
381 break;
382 case APM_NORMAL_RESUME:
383 DPRINTF(("system resumed\n"));
384 apm_resume(sc, regs);
385 break;
386 case APM_CRIT_RESUME:
387 DPRINTF(("system resumed without us!\n"));
388 apm_resume(sc, regs);
389 break;
390 case APM_SYS_STANDBY_RESUME:
391 DPRINTF(("system standby resume\n"));
392 apm_resume(sc, regs);
393 break;
394 case APM_UPDATE_TIME:
395 DPRINTF(("update time, please\n"));
396 apm_record_event(sc, regs->bx);
397 break;
398 case APM_CRIT_SUSPEND_REQ:
399 DPRINTF(("suspend required immediately\n"));
400 apm_record_event(sc, regs->bx);
401 apm_suspend(APM_SYS_SUSPEND);
402 break;
403 case APM_BATTERY_LOW:
404 DPRINTF(("Battery low!\n"));
405 apm_battlow++;
406 apm_record_event(sc, regs->bx);
407 break;
408 case APM_CAPABILITY_CHANGE:
409 DPRINTF(("capability change\n"));
410 if (apm_minver < 2) {
411 DPRINTF(("adult event\n"));
412 } else {
413 if (apmcall(APM_GET_CAPABILITIES, APM_DEV_APM_BIOS,
414 &nregs) != 0) {
415 apm_perror("get capabilities", &nregs);
416 } else {
417 apm_get_powstat(&nregs);
418 }
419 }
420 break;
421 default: {
422 #ifdef APMDEBUG
423 char *p;
424 switch (regs->bx >> 8) {
425 case 0: p = "reserved system"; break;
426 case 1: p = "reserved device"; break;
427 case 2: p = "OEM defined"; break;
428 default:p = "reserved"; break;
429 }
430 #endif
431 DPRINTF(("apm_handle_event: %s event, code %d\n", p, regs->bx));
432 }
433 }
434 return ret;
435 }
436
437 int
apm_periodic_check(struct apm_softc * sc)438 apm_periodic_check(struct apm_softc *sc)
439 {
440 struct apmregs regs;
441 int ret = 0;
442
443 if (apm_op_inprog)
444 apm_set_powstate(APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
445
446 while (1) {
447 if (apm_get_event(®s) != 0) {
448 /* i think some bioses combine the error codes */
449 if (!(APM_ERR_CODE(®s) & APM_ERR_NOEVENTS))
450 apm_perror("get event", ®s);
451 break;
452 }
453
454 /* If the APM BIOS tells us to suspend, don't do it twice */
455 if (regs.bx == APM_SUSPEND_REQ)
456 apm_lidclose = 0;
457 if (apm_handle_event(sc, ®s))
458 break;
459 }
460
461 if (apm_error || APM_ERR_CODE(®s) == APM_ERR_NOTCONN)
462 ret = -1;
463
464 if (apm_lidclose) {
465 apm_lidclose = 0;
466 /* Fake a suspend request */
467 regs.bx = APM_SUSPEND_REQ;
468 apm_handle_event(sc, ®s);
469 }
470 if (apm_suspends /*|| (apm_battlow && apm_userstandbys)*/) {
471 apm_op_inprog = 0;
472 apm_suspend(APM_SYS_SUSPEND);
473 } else if (apm_standbys || apm_userstandbys) {
474 apm_op_inprog = 0;
475 apm_suspend(APM_SYS_STANDBY);
476 }
477 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
478 apm_error = 0;
479
480 if (apm_resumes)
481 apm_resumes--;
482 return (ret);
483 }
484
485 void
apm_powmgt_enable(int onoff)486 apm_powmgt_enable(int onoff)
487 {
488 struct apmregs regs;
489
490 bzero(®s, sizeof(regs));
491 regs.cx = onoff ? APM_MGT_ENABLE : APM_MGT_DISABLE;
492 if (apmcall(APM_PWR_MGT_ENABLE,
493 (apm_minver? APM_DEV_APM_BIOS : APM_MGT_ALL), ®s) != 0)
494 apm_perror("power management enable", ®s);
495 }
496
497 void
apm_powmgt_engage(int onoff,u_int dev)498 apm_powmgt_engage(int onoff, u_int dev)
499 {
500 struct apmregs regs;
501
502 if (apm_minver == 0)
503 return;
504 bzero(®s, sizeof(regs));
505 regs.cx = onoff ? APM_MGT_ENGAGE : APM_MGT_DISENGAGE;
506 if (apmcall(APM_PWR_MGT_ENGAGE, dev, ®s) != 0)
507 printf("apm0: APM engage (device %x): %s (%d)\n",
508 dev, apm_err_translate(APM_ERR_CODE(®s)),
509 APM_ERR_CODE(®s));
510 }
511
512 #ifdef notused
513 void
apm_devpowmgt_enable(int onoff,u_int dev)514 apm_devpowmgt_enable(int onoff, u_int dev)
515 {
516 struct apmregs regs;
517
518 if (apm_minver == 0)
519 return;
520 /* enable is auto BIOS management.
521 * disable is program control.
522 */
523 bzero(®s, sizeof(regs));
524 regs.cx = onoff ? APM_MGT_ENABLE : APM_MGT_DISABLE;
525 if (apmcall(APM_DEVICE_MGMT_ENABLE, dev, ®s) != 0)
526 printf("APM device engage (device %x): %s (%d)\n",
527 dev, apm_err_translate(APM_ERR_CODE(®s)),
528 APM_ERR_CODE(®s));
529 }
530 #endif
531
532 int
apm_set_powstate(u_int dev,u_int state)533 apm_set_powstate(u_int dev, u_int state)
534 {
535 struct apmregs regs;
536
537 if (!apm_cd.cd_ndevs || (apm_minver == 0 && state > APM_SYS_OFF))
538 return EINVAL;
539 bzero(®s, sizeof(regs));
540 regs.cx = state;
541 if (apmcall(APM_SET_PWR_STATE, dev, ®s) != 0) {
542 apm_perror("set power state", ®s);
543 if (APM_ERR_CODE(®s) == APM_ERR_UNRECOG_DEV)
544 return ENXIO;
545 else
546 return EIO;
547 }
548 return 0;
549 }
550
551 void
apm_cpu_slow(void)552 apm_cpu_slow(void)
553 {
554 struct apmregs regs;
555 static u_int64_t call_apm_slow = 0;
556
557 if (call_apm_slow != curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]) {
558 /* Always call BIOS halt/idle stuff */
559 bzero(®s, sizeof(regs));
560 if (apmcall(APM_CPU_IDLE, 0, ®s) != 0) {
561 #ifdef DIAGNOSTIC
562 apm_perror("set CPU slow", ®s);
563 #endif
564 }
565 apm_slow_called = 1;
566 call_apm_slow = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE];
567 }
568 }
569
570 void
apm_cpu_busy(void)571 apm_cpu_busy(void)
572 {
573 struct apmregs regs;
574
575 if (!apm_slow_called)
576 return;
577
578 if (apm_flags & APM_IDLE_SLOWS) {
579 bzero(®s, sizeof(regs));
580 if (apmcall(APM_CPU_BUSY, 0, ®s) != 0) {
581 #ifdef DIAGNOSTIC
582 apm_perror("set CPU busy", ®s);
583 #endif
584 }
585 apm_slow_called = 0;
586 }
587 }
588
589 void
apm_cpu_idle(void)590 apm_cpu_idle(void)
591 {
592 struct apmregs regs;
593 static u_int64_t call_apm_idle = 0;
594
595 /*
596 * We call the bios APM_IDLE routine here only when we
597 * have been idle for some time - otherwise we just hlt.
598 */
599
600 if (call_apm_idle != curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]) {
601 /* Always call BIOS halt/idle stuff */
602 bzero(®s, sizeof(regs));
603 if (apmcall(APM_CPU_IDLE, 0, ®s) != 0) {
604 #ifdef DIAGNOSTIC
605 apm_perror("set CPU idle", ®s);
606 #endif
607 }
608
609 /* If BIOS did not halt, halt now! */
610 if (apm_flags & APM_IDLE_SLOWS) {
611 __asm volatile("sti;hlt");
612 }
613 call_apm_idle = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE];
614 } else {
615 __asm volatile("sti;hlt");
616 }
617 }
618
619 void
apm_set_ver(struct apm_softc * self)620 apm_set_ver(struct apm_softc *self)
621 {
622 struct apmregs regs;
623 int rv = 0;
624
625 bzero(®s, sizeof(regs));
626 regs.cx = APM_VERSION;
627
628 if (APM_MAJOR(apm_flags) == 1 && APM_MINOR(apm_flags) == 2 &&
629 (rv = apmcall(APM_DRIVER_VERSION, APM_DEV_APM_BIOS, ®s)) == 0) {
630 apm_majver = APM_CONN_MAJOR(®s);
631 apm_minver = APM_CONN_MINOR(®s);
632 } else {
633 #ifdef APMDEBUG
634 if (rv)
635 apm_perror("set version 1.2", ®s);
636 #endif
637 /* try downgrading to 1.1 */
638 bzero(®s, sizeof(regs));
639 regs.cx = 0x0101;
640
641 if (apmcall(APM_DRIVER_VERSION, APM_DEV_APM_BIOS, ®s) == 0) {
642 apm_majver = 1;
643 apm_minver = 1;
644 } else {
645 #ifdef APMDEBUG
646 apm_perror("set version 1.1", ®s);
647 #endif
648 /* stay w/ flags then */
649 apm_majver = APM_MAJOR(apm_flags);
650 apm_minver = APM_MINOR(apm_flags);
651
652 /* fix version for some endianess-challenged compaqs */
653 if (!apm_majver) {
654 apm_majver = 1;
655 apm_minver = 0;
656 }
657 }
658 }
659 printf(": Power Management spec V%d.%d", apm_majver, apm_minver);
660 #ifdef DIAGNOSTIC
661 if (apm_flags & APM_IDLE_SLOWS)
662 printf(" (slowidle)");
663 if (apm_flags & APM_BIOS_PM_DISABLED)
664 printf(" (BIOS management disabled)");
665 if (apm_flags & APM_BIOS_PM_DISENGAGED)
666 printf(" (BIOS managing devices)");
667 #endif
668 printf("\n");
669 }
670
671 void
apm_disconnect(struct apm_softc * sc)672 apm_disconnect(struct apm_softc *sc)
673 {
674 struct apmregs regs;
675
676 bzero(®s, sizeof(regs));
677 if (apmcall(APM_SYSTEM_DEFAULTS,
678 (apm_minver == 1 ? APM_DEV_ALLDEVS : APM_DEFAULTS_ALL), ®s))
679 apm_perror("system defaults failed", ®s);
680
681 if (apmcall(APM_DISCONNECT, APM_DEV_APM_BIOS, ®s))
682 apm_perror("disconnect failed", ®s);
683 else
684 printf("%s: disconnected\n", sc->sc_dev.dv_xname);
685 apm_flags |= APM_BIOS_PM_DISABLED;
686 }
687
688 int
apmprobe(struct device * parent,void * match,void * aux)689 apmprobe(struct device *parent, void *match, void *aux)
690 {
691 struct bios_attach_args *ba = aux;
692 bios_apminfo_t *ap = ba->ba_apmp;
693 bus_space_handle_t ch, dh;
694
695 if (apm_cd.cd_ndevs || strcmp(ba->ba_name, "apm") ||
696 !(ap->apm_detail & APM_32BIT_SUPPORTED))
697 return 0;
698
699 /* addresses check
700 since pc* console and vga* probes much later
701 we cannot check for video memory being mapped
702 for apm stuff w/ bus_space_map() */
703 if (ap->apm_code_len == 0 ||
704 (ap->apm_code32_base < IOM_BEGIN &&
705 ap->apm_code32_base + ap->apm_code_len > IOM_BEGIN) ||
706 (ap->apm_code16_base < IOM_BEGIN &&
707 ap->apm_code16_base + ap->apm_code16_len > IOM_BEGIN) ||
708 (ap->apm_data_base < IOM_BEGIN &&
709 ap->apm_data_base + ap->apm_data_len > IOM_BEGIN))
710 return 0;
711
712 if (bus_space_map(ba->ba_memt, ap->apm_code32_base,
713 ap->apm_code_len, 1, &ch) != 0) {
714 DPRINTF(("apm0: can't map code\n"));
715 return 0;
716 }
717 bus_space_unmap(ba->ba_memt, ch, ap->apm_code_len);
718
719 if (bus_space_map(ba->ba_memt, ap->apm_data_base,
720 ap->apm_data_len, 1, &dh) != 0) {
721 DPRINTF(("apm0: can't map data\n"));
722 return 0;
723 }
724 bus_space_unmap(ba->ba_memt, dh, ap->apm_data_len);
725 return 1;
726 }
727
728 void
apmattach(struct device * parent,struct device * self,void * aux)729 apmattach(struct device *parent, struct device *self, void *aux)
730 {
731 struct bios_attach_args *ba = aux;
732 bios_apminfo_t *ap = ba->ba_apmp;
733 struct apm_softc *sc = (void *)self;
734 struct apmregs regs;
735 u_int cbase, clen, l;
736 bus_space_handle_t ch16, ch32, dh;
737
738 apm_flags = ap->apm_detail;
739 /*
740 * set up GDT descriptors for APM
741 */
742 if (apm_flags & APM_32BIT_SUPPORTED) {
743
744 /* truncate segments' limits to a page */
745 ap->apm_code_len -= (ap->apm_code32_base +
746 ap->apm_code_len + 1) & 0xfff;
747 ap->apm_code16_len -= (ap->apm_code16_base +
748 ap->apm_code16_len + 1) & 0xfff;
749 ap->apm_data_len -= (ap->apm_data_base +
750 ap->apm_data_len + 1) & 0xfff;
751
752 /* adjust version */
753 if ((sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK) &&
754 (apm_flags & APM_VERMASK) !=
755 (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK))
756 apm_flags = (apm_flags & ~APM_VERMASK) |
757 (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK);
758 if (sc->sc_dev.dv_cfdata->cf_flags & APM_NOCLI) {
759 extern int apm_cli; /* from apmcall.S */
760 apm_cli = 0;
761 }
762 if (sc->sc_dev.dv_cfdata->cf_flags & APM_BEBATT)
763 sc->be_batt = 1;
764 apm_ep.seg = GSEL(GAPM32CODE_SEL,SEL_KPL);
765 apm_ep.entry = ap->apm_entry;
766 cbase = min(ap->apm_code32_base, ap->apm_code16_base);
767 clen = max(ap->apm_code32_base + ap->apm_code_len,
768 ap->apm_code16_base + ap->apm_code16_len) - cbase;
769 if ((cbase <= ap->apm_data_base &&
770 cbase + clen >= ap->apm_data_base) ||
771 (ap->apm_data_base <= cbase &&
772 ap->apm_data_base + ap->apm_data_len >= cbase)) {
773 l = max(ap->apm_data_base + ap->apm_data_len + 1,
774 cbase + clen + 1) -
775 min(ap->apm_data_base, cbase);
776 bus_space_map(ba->ba_memt,
777 min(ap->apm_data_base, cbase),
778 l, 1, &dh);
779 ch16 = dh;
780 if (ap->apm_data_base < cbase)
781 ch16 += cbase - ap->apm_data_base;
782 else
783 dh += ap->apm_data_base - cbase;
784 } else {
785
786 bus_space_map(ba->ba_memt, cbase, clen + 1, 1, &ch16);
787 bus_space_map(ba->ba_memt, ap->apm_data_base,
788 ap->apm_data_len + 1, 1, &dh);
789 }
790 ch32 = ch16;
791 if (ap->apm_code16_base == cbase)
792 ch32 += ap->apm_code32_base - cbase;
793 else
794 ch16 += ap->apm_code16_base - cbase;
795
796 setgdt(GAPM32CODE_SEL, (void *)ch32, ap->apm_code_len,
797 SDT_MEMERA, SEL_KPL, 1, 0);
798 setgdt(GAPM16CODE_SEL, (void *)ch16, ap->apm_code16_len,
799 SDT_MEMERA, SEL_KPL, 0, 0);
800 setgdt(GAPMDATA_SEL, (void *)dh, ap->apm_data_len, SDT_MEMRWA,
801 SEL_KPL, 1, 0);
802 DPRINTF((": flags %x code 32:%x/%lx[%x] 16:%x/%lx[%x] "
803 "data %x/%lx/%x ep %x (%x:%lx)\n%s", apm_flags,
804 ap->apm_code32_base, ch32, ap->apm_code_len,
805 ap->apm_code16_base, ch16, ap->apm_code16_len,
806 ap->apm_data_base, dh, ap->apm_data_len,
807 ap->apm_entry, apm_ep.seg, ap->apm_entry+ch32,
808 sc->sc_dev.dv_xname));
809
810 apm_set_ver(sc);
811
812 if (apm_flags & APM_BIOS_PM_DISABLED)
813 apm_powmgt_enable(1);
814
815 /* Engage cooperative power management on all devices (v1.1) */
816 apm_powmgt_engage(1, APM_DEV_ALLDEVS);
817
818 bzero(®s, sizeof(regs));
819 if (apm_get_powstat(®s) != 0)
820 apm_perror("get power status", ®s);
821 apm_cpu_busy();
822
823 rw_init(&sc->sc_lock, "apmlk");
824
825 /*
826 * Do a check once, ignoring any errors. This avoids
827 * gratuitous APM disconnects on laptops where the first
828 * event in the queue (after a boot) is non-recognizable.
829 * The IBM ThinkPad 770Z is one of those.
830 */
831 apm_periodic_check(sc);
832
833 if (apm_periodic_check(sc) == -1) {
834 apm_disconnect(sc);
835
836 /* Failed, nuke APM idle loop */
837 cpu_idle_enter_fcn = NULL;
838 cpu_idle_cycle_fcn = NULL;
839 cpu_idle_leave_fcn = NULL;
840 } else {
841 kthread_create_deferred(apm_thread_create, sc);
842
843 /* Setup APM idle loop */
844 if (apm_flags & APM_IDLE_SLOWS) {
845 cpu_idle_enter_fcn = apm_cpu_slow;
846 cpu_idle_cycle_fcn = NULL;
847 cpu_idle_leave_fcn = apm_cpu_busy;
848 } else {
849 cpu_idle_enter_fcn = NULL;
850 cpu_idle_cycle_fcn = apm_cpu_idle;
851 cpu_idle_leave_fcn = NULL;
852 }
853
854 /* All is well, let the rest of the world know */
855 acpiapm_open = apmopen;
856 acpiapm_close = apmclose;
857 acpiapm_ioctl = apmioctl;
858 acpiapm_kqfilter = apmkqfilter;
859 apm_attached = 1;
860 }
861 } else {
862 setgdt(GAPM32CODE_SEL, NULL, 0, 0, 0, 0, 0);
863 setgdt(GAPM16CODE_SEL, NULL, 0, 0, 0, 0, 0);
864 setgdt(GAPMDATA_SEL, NULL, 0, 0, 0, 0, 0);
865 }
866 }
867
868 void
apm_thread_create(void * v)869 apm_thread_create(void *v)
870 {
871 struct apm_softc *sc = v;
872
873 #ifdef MULTIPROCESSOR
874 if (ncpus > 1) {
875 apm_disconnect(sc);
876
877 /* Nuke APM idle loop */
878 cpu_idle_enter_fcn = NULL;
879 cpu_idle_cycle_fcn = NULL;
880 cpu_idle_leave_fcn = NULL;
881
882 return;
883 }
884 #endif
885
886 if (kthread_create(apm_thread, sc, &sc->sc_thread,
887 sc->sc_dev.dv_xname)) {
888 apm_disconnect(sc);
889 printf("%s: failed to create kernel thread, disabled",
890 sc->sc_dev.dv_xname);
891
892 /* Nuke APM idle loop */
893 cpu_idle_enter_fcn = NULL;
894 cpu_idle_cycle_fcn = NULL;
895 cpu_idle_leave_fcn = NULL;
896 }
897 }
898
899 void
apm_thread(void * v)900 apm_thread(void *v)
901 {
902 struct apm_softc *sc = v;
903
904 for (;;) {
905 rw_enter_write(&sc->sc_lock);
906 (void) apm_periodic_check(sc);
907 rw_exit_write(&sc->sc_lock);
908 tsleep_nsec(&nowake, PWAIT, "apmev", SEC_TO_NSEC(1));
909 }
910 }
911
912 int
apmopen(dev_t dev,int flag,int mode,struct proc * p)913 apmopen(dev_t dev, int flag, int mode, struct proc *p)
914 {
915 struct apm_softc *sc;
916 int error = 0;
917
918 /* apm0 only */
919 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 ||
920 !(sc = apm_cd.cd_devs[APMUNIT(dev)]))
921 return ENXIO;
922
923 if (apm_flags & APM_BIOS_PM_DISABLED)
924 return ENXIO;
925
926 DPRINTF(("apmopen: dev %d pid %d flag %x mode %x\n",
927 APMDEV(dev), p->p_p->ps_pid, flag, mode));
928
929 rw_enter_write(&sc->sc_lock);
930 switch (APMDEV(dev)) {
931 case APMDEV_CTL:
932 if (!(flag & FWRITE)) {
933 error = EINVAL;
934 break;
935 }
936 if (sc->sc_flags & SCFLAG_OWRITE) {
937 error = EBUSY;
938 break;
939 }
940 sc->sc_flags |= SCFLAG_OWRITE;
941 break;
942 case APMDEV_NORMAL:
943 if (!(flag & FREAD) || (flag & FWRITE)) {
944 error = EINVAL;
945 break;
946 }
947 sc->sc_flags |= SCFLAG_OREAD;
948 break;
949 default:
950 error = ENXIO;
951 break;
952 }
953 rw_exit_write(&sc->sc_lock);
954 return error;
955 }
956
957 int
apmclose(dev_t dev,int flag,int mode,struct proc * p)958 apmclose(dev_t dev, int flag, int mode, struct proc *p)
959 {
960 struct apm_softc *sc;
961
962 /* apm0 only */
963 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 ||
964 !(sc = apm_cd.cd_devs[APMUNIT(dev)]))
965 return ENXIO;
966
967 DPRINTF(("apmclose: pid %d flag %x mode %x\n",
968 p->p_p->ps_pid, flag, mode));
969
970 rw_enter_write(&sc->sc_lock);
971 switch (APMDEV(dev)) {
972 case APMDEV_CTL:
973 sc->sc_flags &= ~SCFLAG_OWRITE;
974 break;
975 case APMDEV_NORMAL:
976 sc->sc_flags &= ~SCFLAG_OREAD;
977 break;
978 }
979 rw_exit_write(&sc->sc_lock);
980 return 0;
981 }
982
983 int
apmioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)984 apmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
985 {
986 struct apm_softc *sc;
987 struct apmregs regs;
988 int error = 0;
989
990 /* apm0 only */
991 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 ||
992 !(sc = apm_cd.cd_devs[APMUNIT(dev)]))
993 return ENXIO;
994
995 rw_enter_write(&sc->sc_lock);
996 switch (cmd) {
997 /* some ioctl names from linux */
998 case APM_IOC_STANDBY:
999 if ((flag & FWRITE) == 0)
1000 error = EBADF;
1001 else
1002 apm_userstandbys++;
1003 break;
1004 case APM_IOC_SUSPEND:
1005 if ((flag & FWRITE) == 0)
1006 error = EBADF;
1007 else
1008 apm_suspends++;
1009 break;
1010 case APM_IOC_PRN_CTL:
1011 if ((flag & FWRITE) == 0)
1012 error = EBADF;
1013 else {
1014 int flag = *(int *)data;
1015 DPRINTF(( "APM_IOC_PRN_CTL: %d\n", flag ));
1016 switch (flag) {
1017 case APM_PRINT_ON: /* enable printing */
1018 sc->sc_flags &= ~SCFLAG_PRINT;
1019 break;
1020 case APM_PRINT_OFF: /* disable printing */
1021 sc->sc_flags &= ~SCFLAG_PRINT;
1022 sc->sc_flags |= SCFLAG_NOPRINT;
1023 break;
1024 case APM_PRINT_PCT: /* disable some printing */
1025 sc->sc_flags &= ~SCFLAG_PRINT;
1026 sc->sc_flags |= SCFLAG_PCTPRINT;
1027 break;
1028 default:
1029 error = EINVAL;
1030 break;
1031 }
1032 }
1033 break;
1034 case APM_IOC_DEV_CTL:
1035 if ((flag & FWRITE) == 0)
1036 error = EBADF;
1037 else {
1038 struct apm_ctl *actl = (struct apm_ctl *)data;
1039
1040 bzero(®s, sizeof(regs));
1041 if (!apmcall(APM_GET_POWER_STATE, actl->dev, ®s))
1042 printf("%s: dev %04x state %04x\n",
1043 sc->sc_dev.dv_xname, dev, regs.cx);
1044
1045 error = apm_set_powstate(actl->dev, actl->mode);
1046 }
1047 break;
1048 case APM_IOC_GETPOWER:
1049 if (apm_get_powstat(®s) == 0) {
1050 struct apm_power_info *powerp =
1051 (struct apm_power_info *)data;
1052
1053 bzero(powerp, sizeof(*powerp));
1054 if (BATT_LIFE(®s) != APM_BATT_LIFE_UNKNOWN)
1055 powerp->battery_life = BATT_LIFE(®s);
1056 powerp->ac_state = AC_STATE(®s);
1057 switch (apm_minver) {
1058 case 0:
1059 if (!(BATT_FLAGS(®s) & APM_BATT_FLAG_NOBATTERY))
1060 powerp->battery_state = BATT_STATE(®s);
1061 break;
1062 case 1:
1063 default:
1064 if (BATT_FLAGS(®s) & APM_BATT_FLAG_HIGH)
1065 powerp->battery_state = APM_BATT_HIGH;
1066 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_LOW)
1067 powerp->battery_state = APM_BATT_LOW;
1068 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_CRITICAL)
1069 powerp->battery_state = APM_BATT_CRITICAL;
1070 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_CHARGING)
1071 powerp->battery_state = APM_BATT_CHARGING;
1072 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_NOBATTERY)
1073 powerp->battery_state = APM_BATTERY_ABSENT;
1074 else
1075 powerp->battery_state = APM_BATT_UNKNOWN;
1076 if (BATT_REM_VALID(®s)) {
1077 powerp->minutes_left = BATT_REMAINING(®s);
1078 if (sc->be_batt)
1079 powerp->minutes_left =
1080 swap16(powerp->minutes_left);
1081 }
1082 }
1083 } else {
1084 apm_perror("ioctl get power status", ®s);
1085 error = EIO;
1086 }
1087 break;
1088 case APM_IOC_STANDBY_REQ:
1089 if ((flag & FWRITE) == 0)
1090 error = EBADF;
1091 /* only fails if no one cares. apmd at least should */
1092 else if (apm_record_event(sc, APM_USER_STANDBY_REQ))
1093 error = EINVAL; /* ? */
1094 break;
1095 case APM_IOC_SUSPEND_REQ:
1096 if ((flag & FWRITE) == 0)
1097 error = EBADF;
1098 /* only fails if no one cares. apmd at least should */
1099 else if (apm_record_event(sc, APM_USER_SUSPEND_REQ))
1100 error = EINVAL; /* ? */
1101 break;
1102 default:
1103 error = ENOTTY;
1104 }
1105
1106 rw_exit_write(&sc->sc_lock);
1107 return error;
1108 }
1109
1110 void
filt_apmrdetach(struct knote * kn)1111 filt_apmrdetach(struct knote *kn)
1112 {
1113 struct apm_softc *sc = (struct apm_softc *)kn->kn_hook;
1114
1115 rw_enter_write(&sc->sc_lock);
1116 klist_remove_locked(&sc->sc_note, kn);
1117 rw_exit_write(&sc->sc_lock);
1118 }
1119
1120 int
filt_apmread(struct knote * kn,long hint)1121 filt_apmread(struct knote *kn, long hint)
1122 {
1123 /* XXX weird kqueue_scan() semantics */
1124 if (hint && !kn->kn_data)
1125 kn->kn_data = (int)hint;
1126 return (1);
1127 }
1128
1129 int
apmkqfilter(dev_t dev,struct knote * kn)1130 apmkqfilter(dev_t dev, struct knote *kn)
1131 {
1132 struct apm_softc *sc;
1133
1134 /* apm0 only */
1135 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 ||
1136 !(sc = apm_cd.cd_devs[APMUNIT(dev)]))
1137 return ENXIO;
1138
1139 switch (kn->kn_filter) {
1140 case EVFILT_READ:
1141 kn->kn_fop = &apmread_filtops;
1142 break;
1143 default:
1144 return (EINVAL);
1145 }
1146
1147 kn->kn_hook = (caddr_t)sc;
1148
1149 rw_enter_write(&sc->sc_lock);
1150 klist_insert_locked(&sc->sc_note, kn);
1151 rw_exit_write(&sc->sc_lock);
1152 return (0);
1153 }
1154