xref: /openbsd/sys/arch/i386/i386/acpi_wakecode.S (revision 81621933)
1/*
2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/*
28 * Copyright (c) 2008 Mike Larkin <mlarkin@openbsd.org>
29 *
30 * Permission to use, copy, modify, and distribute this software for any
31 * purpose with or without fee is hereby granted, provided that the above
32 * copyright notice and this permission notice appear in all copies.
33 *
34 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
35 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
36 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
37 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
38 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
39 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
40 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
41 */
42
43#define _ACPI_WAKECODE
44
45#include "assym.h"
46#include <machine/asm.h>
47#ifdef HIBERNATE
48#include <machine/hibernate_var.h>
49#endif /* HIBERNATE */
50#include <machine/specialreg.h>
51#include <machine/param.h>
52#include <machine/segments.h>
53#include <dev/acpi/acpivar.h>
54
55#ifdef __clang__
56#define addr32
57#endif
58
59#define _ACPI_TRMP_LABEL(a) a = . - acpi_real_mode_resume + ACPI_TRAMPOLINE
60#define _ACPI_TRMP_OFFSET(a) a = . - acpi_real_mode_resume
61#define _ACPI_TRMP_DATA_LABEL(a) a = . - acpi_tramp_data_start + \
62	ACPI_TRAMP_DATA
63#define _ACPI_TRMP_DATA_OFFSET(a) a = . - acpi_tramp_data_start
64#define _ACPI_RM_CODE_SEG (ACPI_TRAMPOLINE >> 4)
65#define _ACPI_RM_DATA_SEG (ACPI_TRAMP_DATA >> 4)
66
67#ifdef HIBERNATE
68#define HIBERNATE_STACK_OFFSET 0x0F00
69#endif
70
71/*
72 * On wakeup, we'll start executing at acpi_real_mode_resume.
73 * This is based on the wakeup vector previously stored with
74 * ACPI before we went to sleep. ACPI's wakeup vector is a
75 * physical address - in our case, it's calculated and mapped
76 * by the kernel and stuffed into a low page early in the boot
77 * process.
78 *
79 * We wakeup in real mode, at some phys addr based on the ACPI
80 * specification (cs = phys>>8, ip = phys & 0xF). For example,
81 * if our phys addr is 0x13000, we'd have cs=0x1300,ip=0
82 *
83 * The wakeup code needs to do the following:
84 *     1. Reenable the video display
85 *     2. Enter 32 bit protected mode
86 *     3. Reenable paging
87 *     4. Restore saved CPU registers
88 */
89
90	.text
91	.code16
92	.align 4, 0xcc
93	.global acpi_real_mode_resume
94	.global acpi_protected_mode_resume
95	.global acpi_resume_end
96	.global acpi_tramp_data_start
97	.global acpi_tramp_data_end
98acpi_real_mode_resume:
99_ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real)
100	nop
101	cli
102	cld
103
104	/*
105	 * Set up segment registers for real mode.
106	 * We'll only be in real mode for a moment, and we don't have
107	 * want real dependencies on data or stack, so we'll just use
108	 * the code segment for data and stack (eg, a 64k memory space).
109	 */
110	movw	$(_ACPI_RM_DATA_SEG), %ax
111	movw	%ax, %ds
112	movw	%ax, %ss
113	movw	%cs, %ax
114	movw	%ax, %es
115	addr32 lidtl	.Lclean_idt
116
117	/*
118	 * Set up stack to grow down from offset 0x0FFE.
119	 * We will only be doing a few push/pops and no calls in real
120	 * mode, so as long as the real mode code in the segment
121	 * plus stack doesn't exceed 0x0FFE (4094) bytes, we'll be ok.
122	 */
123	movw	$0x0FFE,%sp
124
125	/*
126	 * Clear flags
127	 */
128	pushl	$0
129	popfl
130
131	/*
132	 * Flush instruction prefetch queue
133	 */
134	jmp	1f
1351:	jmp	1f
1361:
137
138	/*
139	 * We're about to enter protected mode, so we need a GDT for that.
140	 * Set up a temporary GDT describing 2 segments, one for code
141	 * extending from 0x00000000-0xffffffff and one for data
142	 * with the same range. This GDT will only be in use for a short
143	 * time, until we restore the saved GDT that we had when we went
144	 * to sleep (although on i386, the saved GDT will most likely
145	 * represent something similar based on machine/segment.h).
146	 */
147	addr32 lgdtl	.Ltmp_gdt
148
149	/*
150	 * Enable protected mode by setting the PE bit in CR0
151	 */
152	mov	%cr0,%eax
153	orl	$(CR0_PE),%eax
154	mov	%eax,%cr0
155
156	/*
157	 * Force CPU into protected mode
158	 * by making an intersegment jump (to ourselves, just a few lines
159	 * down from here. We rely on the kernel to fixup the jump
160	 * target address previously.
161	 *
162	 */
163	ljmpl	$0x8, $.Lacpi_protected_mode_trampoline
164
165	.code32
166	.align 16, 0xcc
167_ACPI_TRMP_LABEL(.Lacpi_protected_mode_trampoline)
168acpi_protected_mode_resume:
169	nop
170
171	/*
172	 * We're in protected mode now, without paging enabled.
173	 *
174	 * Set up segment selectors for protected mode.
175	 * We've already set up our cs via the intersegment jump earlier,
176	 * but we need to set ds,es,fs,gs,ss to all point to the
177	 * 4GB flat data segment we defined earlier.
178	 */
179	movw	$GSEL(GDATA_SEL,SEL_KPL),%ax
180	movw	%ax,%ds
181	movw	%ax,%es
182	movw	%ax,%gs
183	movw	%ax,%ss
184	movw	%ax,%fs
185
186	/*
187	 * Reset ESP based on protected mode. We can do this here
188	 * because we haven't put anything on the stack via a
189	 * call or push that we haven't cleaned up already.
190	 */
191	addl	$(ACPI_TRAMP_DATA), %esp
192
193	/*
194	 * Reset our page size extension (via restoring cr4) to what
195	 * it was before we suspended. If we don't do this, cr4 might
196	 * contain garbage in the PSE bit, leading to pages that
197	 * are incorrectly interpreted as the wrong size
198	 * CR4 was added in i586, so there is
199	 * an implicit assumption here that this code will execute on
200	 * i586 or later.
201	 */
202	mov	.Lacpi_saved_cr4,%eax
203	mov	%eax,%cr4
204
205	testl	$CR4_PAE, %eax
206	jz	1f
207
208	movl	$MSR_EFER, %ecx
209	rdmsr
210	orl	$EFER_NXE, %eax
211	wrmsr
212
2131:
214	/*
215	 * Re-enable paging, using the CR3 we stored before suspend
216	 * as our new page table base location. Restore CR0 after
217	 * that.
218	 */
219	movl	.Lacpi_saved_cr3,%eax
220	movl	%eax,%cr3
221	movl	.Lacpi_saved_cr0, %eax
222	movl	%eax, %cr0
223
224	/*
225	 * Flush the prefetch queue in order to enforce usage
226	 * of the new (old) page tables we just re-enabled
227	 */
228	jmp	1f
2291:	jmp	1f
2301:
231	nop
232
233	/*
234	 * Restore CPU segment descriptor registers
235	 */
236	lgdt	.Lacpi_saved_gdt
237	lidt	.Lacpi_saved_idt
238	lldt	.Lacpi_saved_ldt
239
240	mov	.Lacpi_saved_cr2,%eax
241	mov	%eax,%cr2
242
243	/*
244	 * It is highly likely that the selectors we already loaded into
245	 * these registers are already accurate, but we reload them
246	 * again, for consistency.
247	 */
248	movw	.Lacpi_saved_es,%ax
249	movw	%ax,%es
250	movw	.Lacpi_saved_fs,%ax
251	movw	%ax,%fs
252	movw	.Lacpi_saved_gs,%ax
253	movw	%ax,%gs
254	movw	.Lacpi_saved_ss,%ax
255	movw	%ax,%ss
256	movw	.Lacpi_saved_ds,%ax
257	movw	%ax,%ds
258
259	/*
260	 * Shortly, we'll restore the TSS for the task that was running
261	 * immediately before suspend occurred. Since that task was the
262	 * running task, it's TSS busy flag will have been set. We need
263	 * to clear that bit (since we're effectively "restarting" the OS)
264	 * in order to convince the processor that the task is no longer
265	 * running (which is true, now). If we don't do this, when the
266	 * OS resumes and resumes this task, it will assume we're trying
267	 * to recurse into an already active task, which would cause
268	 * a GP violation (and probably, a crash).
269	 *
270	 * We accomplish this by changing the TSS descriptor from
271	 * BUSY (0x0B) to AVAILABLE (0x09). We keep the other
272	 * high 4 bits intact.
273	 */
274	movl	.Lacpi_saved_gdt+2,%ebx
275	xorl	%ecx, %ecx
276	movw	.Lacpi_saved_tr,%cx
277	leal	(%ebx,%ecx),%eax
278	andb	$0xF9,5(%eax)
279
280	ltr	.Lacpi_saved_tr
281
282	/*
283	 * Everything is almost reset back to the way it was immediately before
284	 * suspend. There are a few more registers to restore, and after
285	 * that, jump back to the OS. There's still some things
286	 * to do there, like re-enable interrupts, resume devices, APICs,
287	 * etc.
288	 */
289	movl	.Lacpi_saved_ebx, %ebx
290	movl	.Lacpi_saved_ecx, %ecx
291	movl	.Lacpi_saved_edx, %edx
292	movl	.Lacpi_saved_ebp, %ebp
293	movl	.Lacpi_saved_esi, %esi
294	movl	.Lacpi_saved_edi, %edi
295	movl	.Lacpi_saved_esp, %esp
296	push	.Lacpi_saved_fl
297	popfl
298
299	/* Poke CR3 one more time. Might not be necessary */
300	movl	.Lacpi_saved_cr3,%eax
301	movl	%eax,%cr3
302
303	/*
304	 * Return to the OS. We've previously saved the resume
305	 * address in acpi_saved_ret (via a call to acpi_savecpu
306	 * before we went to sleep.)
307	 */
308	xorl  %eax, %eax
309	jmp	*.Lacpi_saved_ret
310
311#ifdef HIBERNATE
312	/*
313	 * hibernate_resume_machdep drops to real mode and
314	 * restarts the OS using the saved S3 resume vector
315	 */
316	.code32
317NENTRY(hibernate_resume_machdep)
318	cli
319	/* Jump to the identity mapped version of ourself */
320	mov	$.Lhibernate_resume_vector_2, %eax
321	jmp	*%eax
322_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_2)
323
324	/* Get out of 32 bit CS */
325	lgdt	.Lgdt_16
326	ljmp	$0x8, $.Lhibernate_resume_vector_3
327
328_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_3)
329	.code16
330	movl	%cr0, %eax
331	/* Disable CR0.PG - no paging */
332	andl	$(~CR0_PG), %eax
333	/* Disable CR0.PE - real mode */
334	andl	$(~CR0_PE), %eax
335	movl	%eax, %cr0
336
337	/* Flush TLB */
338	xorl	%eax, %eax
339	movl	%eax, %cr3
340
341	/* Set up real mode segment selectors */
342	movw	$(_ACPI_RM_DATA_SEG), %ax
343	movw	%ax, %ds
344	movw	%ax, %ss
345	movw	%ax, %es
346	movw	%ax, %fs
347	movw	%ax, %gs
348	movl	$0x0FFE, %esp
349	addr32 lidtl	.Lclean_idt
350
351	/* Jump to the S3 resume vector */
352	ljmp	$(_ACPI_RM_CODE_SEG), $.Lacpi_s3_vector_real
353
354	.code32
355	/* Switch to hibernate resume pagetable */
356NENTRY(hibernate_activate_resume_pt_machdep)
357	/* Enable large pages */
358	movl	%cr4, %eax
359	orl	$(CR4_PSE), %eax
360
361	/* Disable global pages */
362	andl	$(~CR4_PGE), %eax
363	movl	%eax, %cr4
364
365	/*
366	 * Switch to the hibernate resume pagetable if we're running
367	 * in non-PAE mode.  If we're running in PAE mode, this will
368	 * switch to the PTPDEs we stashed into the hibernate resume
369	 * pagetable, but continue to use the normal pagetables until we
370	 * disable PAE below.
371	 */
372	movl	$HIBERNATE_PD_PAGE, %eax
373	orl	$0xfe0, %eax
374	movl	%eax, %cr3
375
376	/* Disable PAE */
377	movl	%cr4, %eax
378	andl	$(~CR4_PAE), %eax
379	movl	%eax, %cr4
380
381	wbinvd
382	movl	$HIBERNATE_PD_PAGE, %eax
383	movl	%eax, %cr3
384	jmp	1f
385
3861:	nop
387	ret
388
389	/*
390	 * Switch to the private resume-time hibernate stack
391	 */
392NENTRY(hibernate_switch_stack_machdep)
393	movl	(%esp), %eax
394	movl    %eax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET
395	movl    $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %eax
396	movl    %eax, %esp
397
398	/* On our own stack from here onward */
399	ret
400
401NENTRY(hibernate_flush)
402	invlpg  HIBERNATE_INFLATE_PAGE
403	ret
404#endif /* HIBERNATE */
405
406	/*
407	 * End of resume code (code copied to ACPI_TRAMPOLINE)
408	 */
409acpi_resume_end:
410
411	/*
412	 * Initial copy of this data gets placed in .rodata, kernel makes
413	 * RW copy of it in the tramp data page.
414	 */
415	.section .rodata
416acpi_tramp_data_start:
417_ACPI_TRMP_DATA_OFFSET(.Ltmp_gdt)
418	.word	.Ltmp_gdt_end - .Ltmp_gdtable
419	.long	.Ltmp_gdtable
420
421	.align 8, 0xcc
422_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable)
423	/*
424	 * null
425	 */
426	.word	0, 0
427	.byte	0, 0, 0, 0
428	/*
429	 * Code
430	 * Limit: 0xffffffff
431	 * Base: 0x00000000
432	 * Descriptor Type: Code
433	 * Segment Type: CRA
434	 * Present: True
435	 * Priv: 0
436	 * AVL: False
437	 * 64-bit: False
438	 * 32-bit: True
439	 *
440	 */
441	.word	0xffff, 0
442	.byte	0, 0x9f, 0xcf, 0
443
444	/*
445	 * Data
446	 * Limit: 0xffffffff
447	 * Base: 0x00000000
448	 * Descriptor Type:
449	 * Segment Type: W
450	 * Present: True
451	 * Priv: 0
452	 * AVL: False
453	 * 64-bit: False
454	 * 32-bit: True
455	 *
456	 */
457	.word	0xffff, 0
458	.byte	0, 0x93, 0xcf, 0
459_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt_end)
460
461	.align 8, 0xcc
462_ACPI_TRMP_DATA_OFFSET(.Lclean_idt)
463	.word	0xffff
464	.long	0
465	.word	0
466
467	/*
468	 * gdt_16 is the gdt used when returning to real mode for bios
469	 * reads/writes (sets up a 16 bit segment)
470	 */
471	.align 8, 0xcc
472_ACPI_TRMP_DATA_LABEL(.Lgdt_16)
473	.word   .Lgdt_16_end - .Lgdt_16_table
474	.long   .Lgdt_16_table
475
476	.align 8, 0xcc
477_ACPI_TRMP_DATA_LABEL(.Lgdt_16_table)
478	/*
479	 * null
480	 */
481	.word   0, 0
482	.byte   0, 0, 0, 0
483	/*
484	 * Code
485	 * Limit: 0xffffffff
486	 * Base: 0x00000000
487	 * Descriptor Type: Code
488	 * Segment Type: CRA
489	 * Present: True
490	 * Priv: 0
491	 * AVL: False
492	 * 64-bit: False
493	 * 32-bit: False
494	 *
495	 */
496	.word   0xffff, 0
497	.byte   0, 0x9f, 0x8f, 0
498
499	/*
500	 * Data
501	 * Limit: 0xffffffff
502	 * Base: 0x00000000
503	 * Descriptor Type:
504	 * Segment Type: W
505	 * Present: True
506	 * Priv: 0
507	 * AVL: False
508	 * 64-bit: False
509	 * 32-bit: False
510	 *
511	 */
512	.word   0xffff, 0
513	.byte   0, 0x93, 0x8f, 0
514
515_ACPI_TRMP_DATA_LABEL(.Lgdt_16_end)
516
517	.align 4, 0xcc
518_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ebx)
519	.long 0xcccccccc
520_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ecx)
521	.long 0xcccccccc
522_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_edx)
523	.long 0xcccccccc
524_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ebp)
525	.long 0xcccccccc
526_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_esi)
527	.long 0xcccccccc
528_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_edi)
529	.long 0xcccccccc
530_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_esp)
531	.long 0xcccccccc
532_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fl)
533	.long 0xcccccccc
534_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr0)
535	.long 0xcccccccc
536_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr2)
537	.long 0xcccccccc
538_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr3)
539	.long 0xcccccccc
540_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr4)
541	.long 0xcccccccc
542_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ret)
543	.long 0xcccccccc
544
545	.align 16, 0xcc
546_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_idt)
547	.space 6, 0xcc
548
549	.align 16, 0xcc
550_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gdt)
551	.space 6, 0xcc
552
553	.align 16, 0xcc
554_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ldt)
555	.short 0xcccc
556_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cs)
557	.short 0xcccc
558_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ds)
559	.short 0xcccc
560_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_es)
561	.short 0xcccc
562_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fs)
563	.short 0xcccc
564_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gs)
565	.short 0xcccc
566_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ss)
567	.short 0xcccc
568_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_tr)
569	.short 0xcccc
570
571acpi_tramp_data_end:
572
573	/*
574	 * acpi_savecpu saves the processor's registers and flags
575	 * for use during the ACPI suspend/resume process.
576	 */
577
578	.code32
579NENTRY(acpi_savecpu)
580	movl	(%esp), %eax
581	movl	%eax, .Lacpi_saved_ret
582
583	movw	%cs, .Lacpi_saved_cs
584	movw	%ds, .Lacpi_saved_ds
585	movw	%es, .Lacpi_saved_es
586	movw	%fs, .Lacpi_saved_fs
587	movw	%gs, .Lacpi_saved_gs
588	movw	%ss, .Lacpi_saved_ss
589
590	movl	%ebx, .Lacpi_saved_ebx
591	movl	%ecx, .Lacpi_saved_ecx
592	movl	%edx, .Lacpi_saved_edx
593	movl	%ebp, .Lacpi_saved_ebp
594	movl	%esi, .Lacpi_saved_esi
595	movl	%edi, .Lacpi_saved_edi
596	movl	%esp, .Lacpi_saved_esp
597	/*
598	 * acpi_protected_mode_resume performs restores inline, so undo own
599	 * ret
600	 */
601	addl	$0x4, .Lacpi_saved_esp
602
603	pushfl
604	popl	.Lacpi_saved_fl
605
606	movl	%cr0, %eax
607	movl	%eax, .Lacpi_saved_cr0
608	movl	%cr2, %eax
609	movl	%eax, .Lacpi_saved_cr2
610	movl	%cr3, %eax
611	movl	%eax, .Lacpi_saved_cr3
612	movl	%cr4, %eax
613	movl	%eax, .Lacpi_saved_cr4
614
615	sgdt	.Lacpi_saved_gdt
616	sidt	.Lacpi_saved_idt
617	sldt	.Lacpi_saved_ldt
618	str	.Lacpi_saved_tr
619
620	movl	$1, %eax
621	ret
622