xref: /freebsd/sys/i386/acpica/acpi_machdep.c (revision 3157ba21)
1 /*-
2  * Copyright (c) 2001 Mitsuru IWASAKI
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/condvar.h>
33 #include <sys/conf.h>
34 #include <sys/fcntl.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/poll.h>
39 #include <sys/sysctl.h>
40 #include <sys/uio.h>
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43 
44 #include <contrib/dev/acpica/include/acpi.h>
45 #include <contrib/dev/acpica/include/accommon.h>
46 #include <contrib/dev/acpica/include/actables.h>
47 
48 #include <dev/acpica/acpivar.h>
49 #include <dev/acpica/acpiio.h>
50 
51 #include <machine/nexusvar.h>
52 
53 /*
54  * APM driver emulation
55  */
56 
57 #include <machine/apm_bios.h>
58 #include <machine/pc/bios.h>
59 
60 #include <i386/bios/apm.h>
61 
62 SYSCTL_DECL(_debug_acpi);
63 
64 uint32_t acpi_resume_beep;
65 TUNABLE_INT("debug.acpi.resume_beep", &acpi_resume_beep);
66 SYSCTL_UINT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RW, &acpi_resume_beep,
67     0, "Beep the PC speaker when resuming");
68 uint32_t acpi_reset_video;
69 TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);
70 
71 static int intr_model = ACPI_INTR_PIC;
72 static int apm_active;
73 static struct clonedevs *apm_clones;
74 
75 MALLOC_DEFINE(M_APMDEV, "apmdev", "APM device emulation");
76 
77 static d_open_t		apmopen;
78 static d_close_t	apmclose;
79 static d_write_t	apmwrite;
80 static d_ioctl_t	apmioctl;
81 static d_poll_t		apmpoll;
82 static d_kqfilter_t	apmkqfilter;
83 static void		apmreadfiltdetach(struct knote *kn);
84 static int		apmreadfilt(struct knote *kn, long hint);
85 static struct filterops	apm_readfiltops = {
86 	.f_isfd = 1,
87 	.f_detach = apmreadfiltdetach,
88 	.f_event = apmreadfilt,
89 };
90 
91 static struct cdevsw apm_cdevsw = {
92 	.d_version =	D_VERSION,
93 	.d_flags =	D_TRACKCLOSE | D_NEEDMINOR,
94 	.d_open =	apmopen,
95 	.d_close =	apmclose,
96 	.d_write =	apmwrite,
97 	.d_ioctl =	apmioctl,
98 	.d_poll =	apmpoll,
99 	.d_name =	"apm",
100 	.d_kqfilter =	apmkqfilter
101 };
102 
103 static int
104 acpi_capm_convert_battstate(struct  acpi_battinfo *battp)
105 {
106 	int	state;
107 
108 	state = APM_UNKNOWN;
109 
110 	if (battp->state & ACPI_BATT_STAT_DISCHARG) {
111 		if (battp->cap >= 50)
112 			state = 0;	/* high */
113 		else
114 			state = 1;	/* low */
115 	}
116 	if (battp->state & ACPI_BATT_STAT_CRITICAL)
117 		state = 2;		/* critical */
118 	if (battp->state & ACPI_BATT_STAT_CHARGING)
119 		state = 3;		/* charging */
120 
121 	/* If still unknown, determine it based on the battery capacity. */
122 	if (state == APM_UNKNOWN) {
123 		if (battp->cap >= 50)
124 			state = 0;	/* high */
125 		else
126 			state = 1;	/* low */
127 	}
128 
129 	return (state);
130 }
131 
132 static int
133 acpi_capm_convert_battflags(struct  acpi_battinfo *battp)
134 {
135 	int	flags;
136 
137 	flags = 0;
138 
139 	if (battp->cap >= 50)
140 		flags |= APM_BATT_HIGH;
141 	else {
142 		if (battp->state & ACPI_BATT_STAT_CRITICAL)
143 			flags |= APM_BATT_CRITICAL;
144 		else
145 			flags |= APM_BATT_LOW;
146 	}
147 	if (battp->state & ACPI_BATT_STAT_CHARGING)
148 		flags |= APM_BATT_CHARGING;
149 	if (battp->state == ACPI_BATT_STAT_NOT_PRESENT)
150 		flags = APM_BATT_NOT_PRESENT;
151 
152 	return (flags);
153 }
154 
155 static int
156 acpi_capm_get_info(apm_info_t aip)
157 {
158 	int	acline;
159 	struct	acpi_battinfo batt;
160 
161 	aip->ai_infoversion = 1;
162 	aip->ai_major       = 1;
163 	aip->ai_minor       = 2;
164 	aip->ai_status      = apm_active;
165 	aip->ai_capabilities= 0xff00;	/* unknown */
166 
167 	if (acpi_acad_get_acline(&acline))
168 		aip->ai_acline = APM_UNKNOWN;	/* unknown */
169 	else
170 		aip->ai_acline = acline;	/* on/off */
171 
172 	if (acpi_battery_get_battinfo(NULL, &batt) != 0) {
173 		aip->ai_batt_stat = APM_UNKNOWN;
174 		aip->ai_batt_life = APM_UNKNOWN;
175 		aip->ai_batt_time = -1;		 /* unknown */
176 		aip->ai_batteries = ~0U;	 /* unknown */
177 	} else {
178 		aip->ai_batt_stat = acpi_capm_convert_battstate(&batt);
179 		aip->ai_batt_life = batt.cap;
180 		aip->ai_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
181 		aip->ai_batteries = acpi_battery_get_units();
182 	}
183 
184 	return (0);
185 }
186 
187 static int
188 acpi_capm_get_pwstatus(apm_pwstatus_t app)
189 {
190 	device_t dev;
191 	int	acline, unit, error;
192 	struct	acpi_battinfo batt;
193 
194 	if (app->ap_device != PMDV_ALLDEV &&
195 	    (app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL))
196 		return (1);
197 
198 	if (app->ap_device == PMDV_ALLDEV)
199 		error = acpi_battery_get_battinfo(NULL, &batt);
200 	else {
201 		unit = app->ap_device - PMDV_BATT0;
202 		dev = devclass_get_device(devclass_find("battery"), unit);
203 		if (dev != NULL)
204 			error = acpi_battery_get_battinfo(dev, &batt);
205 		else
206 			error = ENXIO;
207 	}
208 	if (error)
209 		return (1);
210 
211 	app->ap_batt_stat = acpi_capm_convert_battstate(&batt);
212 	app->ap_batt_flag = acpi_capm_convert_battflags(&batt);
213 	app->ap_batt_life = batt.cap;
214 	app->ap_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
215 
216 	if (acpi_acad_get_acline(&acline))
217 		app->ap_acline = APM_UNKNOWN;
218 	else
219 		app->ap_acline = acline;	/* on/off */
220 
221 	return (0);
222 }
223 
224 /* Create single-use devices for /dev/apm and /dev/apmctl. */
225 static void
226 apm_clone(void *arg, struct ucred *cred, char *name, int namelen,
227     struct cdev **dev)
228 {
229 	int ctl_dev, unit;
230 
231 	if (*dev != NULL)
232 		return;
233 	if (strcmp(name, "apmctl") == 0)
234 		ctl_dev = TRUE;
235 	else if (strcmp(name, "apm") == 0)
236 		ctl_dev = FALSE;
237 	else
238 		return;
239 
240 	/* Always create a new device and unit number. */
241 	unit = -1;
242 	if (clone_create(&apm_clones, &apm_cdevsw, &unit, dev, 0)) {
243 		if (ctl_dev) {
244 			*dev = make_dev(&apm_cdevsw, unit,
245 			    UID_ROOT, GID_OPERATOR, 0660, "apmctl%d", unit);
246 		} else {
247 			*dev = make_dev(&apm_cdevsw, unit,
248 			    UID_ROOT, GID_OPERATOR, 0664, "apm%d", unit);
249 		}
250 		if (*dev != NULL) {
251 			dev_ref(*dev);
252 			(*dev)->si_flags |= SI_CHEAPCLONE;
253 		}
254 	}
255 }
256 
257 /* Create a struct for tracking per-device suspend notification. */
258 static struct apm_clone_data *
259 apm_create_clone(struct cdev *dev, struct acpi_softc *acpi_sc)
260 {
261 	struct apm_clone_data *clone;
262 
263 	clone = malloc(sizeof(*clone), M_APMDEV, M_WAITOK);
264 	clone->cdev = dev;
265 	clone->acpi_sc = acpi_sc;
266 	clone->notify_status = APM_EV_NONE;
267 	bzero(&clone->sel_read, sizeof(clone->sel_read));
268 	knlist_init_mtx(&clone->sel_read.si_note, &acpi_mutex);
269 
270 	/*
271 	 * The acpi device is always managed by devd(8) and is considered
272 	 * writable (i.e., ack is required to allow suspend to proceed.)
273 	 */
274 	if (strcmp("acpi", devtoname(dev)) == 0)
275 		clone->flags = ACPI_EVF_DEVD | ACPI_EVF_WRITE;
276 	else
277 		clone->flags = ACPI_EVF_NONE;
278 
279 	ACPI_LOCK(acpi);
280 	STAILQ_INSERT_TAIL(&acpi_sc->apm_cdevs, clone, entries);
281 	ACPI_UNLOCK(acpi);
282 	return (clone);
283 }
284 
285 static int
286 apmopen(struct cdev *dev, int flag, int fmt, struct thread *td)
287 {
288 	struct	acpi_softc *acpi_sc;
289 	struct 	apm_clone_data *clone;
290 
291 	acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
292 	clone = apm_create_clone(dev, acpi_sc);
293 	dev->si_drv1 = clone;
294 
295 	/* If the device is opened for write, record that. */
296 	if ((flag & FWRITE) != 0)
297 		clone->flags |= ACPI_EVF_WRITE;
298 
299 	return (0);
300 }
301 
302 static int
303 apmclose(struct cdev *dev, int flag, int fmt, struct thread *td)
304 {
305 	struct	apm_clone_data *clone;
306 	struct	acpi_softc *acpi_sc;
307 
308 	clone = dev->si_drv1;
309 	acpi_sc = clone->acpi_sc;
310 
311 	/* We are about to lose a reference so check if suspend should occur */
312 	if (acpi_sc->acpi_next_sstate != 0 &&
313 	    clone->notify_status != APM_EV_ACKED)
314 		acpi_AckSleepState(clone, 0);
315 
316 	/* Remove this clone's data from the list and free it. */
317 	ACPI_LOCK(acpi);
318 	STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries);
319 	knlist_destroy(&clone->sel_read.si_note);
320 	ACPI_UNLOCK(acpi);
321 	free(clone, M_APMDEV);
322 	destroy_dev_sched(dev);
323 	return (0);
324 }
325 
326 static int
327 apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
328 {
329 	int	error;
330 	struct	apm_clone_data *clone;
331 	struct	acpi_softc *acpi_sc;
332 	struct	apm_info info;
333 	struct 	apm_event_info *ev_info;
334 	apm_info_old_t aiop;
335 
336 	error = 0;
337 	clone = dev->si_drv1;
338 	acpi_sc = clone->acpi_sc;
339 
340 	switch (cmd) {
341 	case APMIO_SUSPEND:
342 		if ((flag & FWRITE) == 0)
343 			return (EPERM);
344 		if (acpi_sc->acpi_next_sstate == 0) {
345 			if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) {
346 				error = acpi_ReqSleepState(acpi_sc,
347 				    acpi_sc->acpi_suspend_sx);
348 			} else {
349 				printf(
350 			"power off via apm suspend not supported\n");
351 				error = ENXIO;
352 			}
353 		} else
354 			error = acpi_AckSleepState(clone, 0);
355 		break;
356 	case APMIO_STANDBY:
357 		if ((flag & FWRITE) == 0)
358 			return (EPERM);
359 		if (acpi_sc->acpi_next_sstate == 0) {
360 			if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) {
361 				error = acpi_ReqSleepState(acpi_sc,
362 				    acpi_sc->acpi_standby_sx);
363 			} else {
364 				printf(
365 			"power off via apm standby not supported\n");
366 				error = ENXIO;
367 			}
368 		} else
369 			error = acpi_AckSleepState(clone, 0);
370 		break;
371 	case APMIO_NEXTEVENT:
372 		printf("apm nextevent start\n");
373 		ACPI_LOCK(acpi);
374 		if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status ==
375 		    APM_EV_NONE) {
376 			ev_info = (struct apm_event_info *)addr;
377 			if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3)
378 				ev_info->type = PMEV_STANDBYREQ;
379 			else
380 				ev_info->type = PMEV_SUSPENDREQ;
381 			ev_info->index = 0;
382 			clone->notify_status = APM_EV_NOTIFIED;
383 			printf("apm event returning %d\n", ev_info->type);
384 		} else
385 			error = EAGAIN;
386 		ACPI_UNLOCK(acpi);
387 		break;
388 	case APMIO_GETINFO_OLD:
389 		if (acpi_capm_get_info(&info))
390 			error = ENXIO;
391 		aiop = (apm_info_old_t)addr;
392 		aiop->ai_major = info.ai_major;
393 		aiop->ai_minor = info.ai_minor;
394 		aiop->ai_acline = info.ai_acline;
395 		aiop->ai_batt_stat = info.ai_batt_stat;
396 		aiop->ai_batt_life = info.ai_batt_life;
397 		aiop->ai_status = info.ai_status;
398 		break;
399 	case APMIO_GETINFO:
400 		if (acpi_capm_get_info((apm_info_t)addr))
401 			error = ENXIO;
402 		break;
403 	case APMIO_GETPWSTATUS:
404 		if (acpi_capm_get_pwstatus((apm_pwstatus_t)addr))
405 			error = ENXIO;
406 		break;
407 	case APMIO_ENABLE:
408 		if ((flag & FWRITE) == 0)
409 			return (EPERM);
410 		apm_active = 1;
411 		break;
412 	case APMIO_DISABLE:
413 		if ((flag & FWRITE) == 0)
414 			return (EPERM);
415 		apm_active = 0;
416 		break;
417 	case APMIO_HALTCPU:
418 		break;
419 	case APMIO_NOTHALTCPU:
420 		break;
421 	case APMIO_DISPLAY:
422 		if ((flag & FWRITE) == 0)
423 			return (EPERM);
424 		break;
425 	case APMIO_BIOS:
426 		if ((flag & FWRITE) == 0)
427 			return (EPERM);
428 		bzero(addr, sizeof(struct apm_bios_arg));
429 		break;
430 	default:
431 		error = EINVAL;
432 		break;
433 	}
434 
435 	return (error);
436 }
437 
438 static int
439 apmwrite(struct cdev *dev, struct uio *uio, int ioflag)
440 {
441 	return (uio->uio_resid);
442 }
443 
444 static int
445 apmpoll(struct cdev *dev, int events, struct thread *td)
446 {
447 	struct	apm_clone_data *clone;
448 	int revents;
449 
450 	revents = 0;
451 	ACPI_LOCK(acpi);
452 	clone = dev->si_drv1;
453 	if (clone->acpi_sc->acpi_next_sstate)
454 		revents |= events & (POLLIN | POLLRDNORM);
455 	else
456 		selrecord(td, &clone->sel_read);
457 	ACPI_UNLOCK(acpi);
458 	return (revents);
459 }
460 
461 static int
462 apmkqfilter(struct cdev *dev, struct knote *kn)
463 {
464 	struct	apm_clone_data *clone;
465 
466 	ACPI_LOCK(acpi);
467 	clone = dev->si_drv1;
468 	kn->kn_hook = clone;
469 	kn->kn_fop = &apm_readfiltops;
470 	knlist_add(&clone->sel_read.si_note, kn, 0);
471 	ACPI_UNLOCK(acpi);
472 	return (0);
473 }
474 
475 static void
476 apmreadfiltdetach(struct knote *kn)
477 {
478 	struct	apm_clone_data *clone;
479 
480 	ACPI_LOCK(acpi);
481 	clone = kn->kn_hook;
482 	knlist_remove(&clone->sel_read.si_note, kn, 0);
483 	ACPI_UNLOCK(acpi);
484 }
485 
486 static int
487 apmreadfilt(struct knote *kn, long hint)
488 {
489 	struct	apm_clone_data *clone;
490 	int	sleeping;
491 
492 	ACPI_LOCK(acpi);
493 	clone = kn->kn_hook;
494 	sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0;
495 	ACPI_UNLOCK(acpi);
496 	return (sleeping);
497 }
498 
499 int
500 acpi_machdep_init(device_t dev)
501 {
502 	struct	acpi_softc *acpi_sc;
503 
504 	acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
505 
506 	/* Create a clone for /dev/acpi also. */
507 	STAILQ_INIT(&acpi_sc->apm_cdevs);
508 	acpi_sc->acpi_clone = apm_create_clone(acpi_sc->acpi_dev_t, acpi_sc);
509 	clone_setup(&apm_clones);
510 	EVENTHANDLER_REGISTER(dev_clone, apm_clone, 0, 1000);
511 	acpi_install_wakeup_handler(acpi_sc);
512 
513 	if (intr_model == ACPI_INTR_PIC)
514 		BUS_CONFIG_INTR(dev, AcpiGbl_FADT.SciInterrupt,
515 		    INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
516 	else
517 		acpi_SetIntrModel(intr_model);
518 
519 	SYSCTL_ADD_UINT(&acpi_sc->acpi_sysctl_ctx,
520 	    SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO,
521 	    "reset_video", CTLFLAG_RW, &acpi_reset_video, 0,
522 	    "Call the VESA reset BIOS vector on the resume path");
523 
524 	return (0);
525 }
526 
527 void
528 acpi_SetDefaultIntrModel(int model)
529 {
530 
531 	intr_model = model;
532 }
533 
534 /* Check BIOS date.  If 1998 or older, disable ACPI. */
535 int
536 acpi_machdep_quirks(int *quirks)
537 {
538 	char *va;
539 	int year;
540 
541 	/* BIOS address 0xffff5 contains the date in the format mm/dd/yy. */
542 	va = pmap_mapbios(0xffff0, 16);
543 	sscanf(va + 11, "%2d", &year);
544 	pmap_unmapbios((vm_offset_t)va, 16);
545 
546 	/*
547 	 * Date must be >= 1/1/1999 or we don't trust ACPI.  Note that this
548 	 * check must be changed by my 114th birthday.
549 	 */
550 	if (year > 90 && year < 99)
551 		*quirks = ACPI_Q_BROKEN;
552 
553 	return (0);
554 }
555 
556 void
557 acpi_cpu_c1()
558 {
559 	__asm __volatile("sti; hlt");
560 }
561 
562 /*
563  * Support for mapping ACPI tables during early boot.  This abuses the
564  * crashdump map because the kernel cannot allocate KVA in
565  * pmap_mapbios() when this is used.  This makes the following
566  * assumptions about how we use this KVA: pages 0 and 1 are used to
567  * map in the header of each table found via the RSDT or XSDT and
568  * pages 2 to n are used to map in the RSDT or XSDT.  This has to use
569  * 2 pages for the table headers in case a header spans a page
570  * boundary.
571  *
572  * XXX: We don't ensure the table fits in the available address space
573  * in the crashdump map.
574  */
575 
576 /*
577  * Map some memory using the crashdump map.  'offset' is an offset in
578  * pages into the crashdump map to use for the start of the mapping.
579  */
580 static void *
581 table_map(vm_paddr_t pa, int offset, vm_offset_t length)
582 {
583 	vm_offset_t va, off;
584 	void *data;
585 
586 	off = pa & PAGE_MASK;
587 	length = roundup(length + off, PAGE_SIZE);
588 	pa = pa & PG_FRAME;
589 	va = (vm_offset_t)pmap_kenter_temporary(pa, offset) +
590 	    (offset * PAGE_SIZE);
591 	data = (void *)(va + off);
592 	length -= PAGE_SIZE;
593 	while (length > 0) {
594 		va += PAGE_SIZE;
595 		pa += PAGE_SIZE;
596 		length -= PAGE_SIZE;
597 		pmap_kenter(va, pa);
598 		invlpg(va);
599 	}
600 	return (data);
601 }
602 
603 /* Unmap memory previously mapped with table_map(). */
604 static void
605 table_unmap(void *data, vm_offset_t length)
606 {
607 	vm_offset_t va, off;
608 
609 	va = (vm_offset_t)data;
610 	off = va & PAGE_MASK;
611 	length = roundup(length + off, PAGE_SIZE);
612 	va &= ~PAGE_MASK;
613 	while (length > 0) {
614 		pmap_kremove(va);
615 		invlpg(va);
616 		va += PAGE_SIZE;
617 		length -= PAGE_SIZE;
618 	}
619 }
620 
621 /*
622  * Map a table at a given offset into the crashdump map.  It first
623  * maps the header to determine the table length and then maps the
624  * entire table.
625  */
626 static void *
627 map_table(vm_paddr_t pa, int offset, const char *sig)
628 {
629 	ACPI_TABLE_HEADER *header;
630 	vm_offset_t length;
631 	void *table;
632 
633 	header = table_map(pa, offset, sizeof(ACPI_TABLE_HEADER));
634 	if (strncmp(header->Signature, sig, ACPI_NAME_SIZE) != 0) {
635 		table_unmap(header, sizeof(ACPI_TABLE_HEADER));
636 		return (NULL);
637 	}
638 	length = header->Length;
639 	table_unmap(header, sizeof(ACPI_TABLE_HEADER));
640 	table = table_map(pa, offset, length);
641 	if (ACPI_FAILURE(AcpiTbChecksum(table, length))) {
642 		if (bootverbose)
643 			printf("ACPI: Failed checksum for table %s\n", sig);
644 #if (ACPI_CHECKSUM_ABORT)
645 		table_unmap(table, length);
646 		return (NULL);
647 #endif
648 	}
649 	return (table);
650 }
651 
652 /*
653  * See if a given ACPI table is the requested table.  Returns the
654  * length of the able if it matches or zero on failure.
655  */
656 static int
657 probe_table(vm_paddr_t address, const char *sig)
658 {
659 	ACPI_TABLE_HEADER *table;
660 
661 	table = table_map(address, 0, sizeof(ACPI_TABLE_HEADER));
662 	if (table == NULL) {
663 		if (bootverbose)
664 			printf("ACPI: Failed to map table at 0x%jx\n",
665 			    (uintmax_t)address);
666 		return (0);
667 	}
668 	if (bootverbose)
669 		printf("Table '%.4s' at 0x%jx\n", table->Signature,
670 		    (uintmax_t)address);
671 
672 	if (strncmp(table->Signature, sig, ACPI_NAME_SIZE) != 0) {
673 		table_unmap(table, sizeof(ACPI_TABLE_HEADER));
674 		return (0);
675 	}
676 	table_unmap(table, sizeof(ACPI_TABLE_HEADER));
677 	return (1);
678 }
679 
680 /*
681  * Try to map a table at a given physical address previously returned
682  * by acpi_find_table().
683  */
684 void *
685 acpi_map_table(vm_paddr_t pa, const char *sig)
686 {
687 
688 	return (map_table(pa, 0, sig));
689 }
690 
691 /* Unmap a table previously mapped via acpi_map_table(). */
692 void
693 acpi_unmap_table(void *table)
694 {
695 	ACPI_TABLE_HEADER *header;
696 
697 	header = (ACPI_TABLE_HEADER *)table;
698 	table_unmap(table, header->Length);
699 }
700 
701 /*
702  * Return the physical address of the requested table or zero if one
703  * is not found.
704  */
705 vm_paddr_t
706 acpi_find_table(const char *sig)
707 {
708 	ACPI_PHYSICAL_ADDRESS rsdp_ptr;
709 	ACPI_TABLE_RSDP *rsdp;
710 	ACPI_TABLE_RSDT *rsdt;
711 	ACPI_TABLE_XSDT *xsdt;
712 	ACPI_TABLE_HEADER *table;
713 	vm_paddr_t addr;
714 	int i, count;
715 
716 	if (resource_disabled("acpi", 0))
717 		return (0);
718 
719 	/*
720 	 * Map in the RSDP.  Since ACPI uses AcpiOsMapMemory() which in turn
721 	 * calls pmap_mapbios() to find the RSDP, we assume that we can use
722 	 * pmap_mapbios() to map the RSDP.
723 	 */
724 	if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0)
725 		return (0);
726 	rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP));
727 	if (rsdp == NULL) {
728 		if (bootverbose)
729 			printf("ACPI: Failed to map RSDP\n");
730 		return (0);
731 	}
732 
733 	/*
734 	 * For ACPI >= 2.0, use the XSDT if it is available.
735 	 * Otherwise, use the RSDT.  We map the XSDT or RSDT at page 2
736 	 * in the crashdump area.  Pages 0 and 1 are used to map in the
737 	 * headers of candidate ACPI tables.
738 	 */
739 	addr = 0;
740 	if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) {
741 		/*
742 		 * AcpiOsGetRootPointer only verifies the checksum for
743 		 * the version 1.0 portion of the RSDP.  Version 2.0 has
744 		 * an additional checksum that we verify first.
745 		 */
746 		if (AcpiTbChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) {
747 			if (bootverbose)
748 				printf("ACPI: RSDP failed extended checksum\n");
749 			return (0);
750 		}
751 		xsdt = map_table(rsdp->XsdtPhysicalAddress, 2, ACPI_SIG_XSDT);
752 		if (xsdt == NULL) {
753 			if (bootverbose)
754 				printf("ACPI: Failed to map XSDT\n");
755 			return (0);
756 		}
757 		count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) /
758 		    sizeof(UINT64);
759 		for (i = 0; i < count; i++)
760 			if (probe_table(xsdt->TableOffsetEntry[i], sig)) {
761 				addr = xsdt->TableOffsetEntry[i];
762 				break;
763 			}
764 		acpi_unmap_table(xsdt);
765 	} else {
766 		rsdt = map_table(rsdp->RsdtPhysicalAddress, 2, ACPI_SIG_RSDT);
767 		if (rsdt == NULL) {
768 			if (bootverbose)
769 				printf("ACPI: Failed to map RSDT\n");
770 			return (0);
771 		}
772 		count = (rsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) /
773 		    sizeof(UINT32);
774 		for (i = 0; i < count; i++)
775 			if (probe_table(rsdt->TableOffsetEntry[i], sig)) {
776 				addr = rsdt->TableOffsetEntry[i];
777 				break;
778 			}
779 		acpi_unmap_table(rsdt);
780 	}
781 	pmap_unmapbios((vm_offset_t)rsdp, sizeof(ACPI_TABLE_RSDP));
782 	if (addr == 0) {
783 		if (bootverbose)
784 			printf("ACPI: No %s table found\n", sig);
785 		return (0);
786 	}
787 	if (bootverbose)
788 		printf("%s: Found table at 0x%jx\n", sig, (uintmax_t)addr);
789 
790 	/*
791 	 * Verify that we can map the full table and that its checksum is
792 	 * correct, etc.
793 	 */
794 	table = map_table(addr, 0, sig);
795 	if (table == NULL)
796 		return (0);
797 	acpi_unmap_table(table);
798 
799 	return (addr);
800 }
801 
802 /*
803  * ACPI nexus(4) driver.
804  */
805 static int
806 nexus_acpi_probe(device_t dev)
807 {
808 	int error;
809 
810 	error = acpi_identify();
811 	if (error)
812 		return (error);
813 
814 	return (BUS_PROBE_DEFAULT);
815 }
816 
817 static int
818 nexus_acpi_attach(device_t dev)
819 {
820 
821 	nexus_init_resources();
822 	bus_generic_probe(dev);
823 	if (BUS_ADD_CHILD(dev, 10, "acpi", 0) == NULL)
824 		panic("failed to add acpi0 device");
825 
826 	return (bus_generic_attach(dev));
827 }
828 
829 static device_method_t nexus_acpi_methods[] = {
830 	/* Device interface */
831 	DEVMETHOD(device_probe,		nexus_acpi_probe),
832 	DEVMETHOD(device_attach,	nexus_acpi_attach),
833 
834 	{ 0, 0 }
835 };
836 
837 DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver);
838 static devclass_t nexus_devclass;
839 
840 DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, nexus_devclass, 0, 0);
841