xref: /openbsd/sys/arch/amd64/amd64/acpi_wakecode.S (revision 09467b48)
1/* $OpenBSD: acpi_wakecode.S,v 1.46 2018/10/04 05:00:40 guenther Exp $ */
2/*
3 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
4 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*
29 * Copyright (c) 2008, 2009 Mike Larkin <mlarkin@openbsd.org>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44#define _ACPI_WAKECODE
45
46#include "assym.h"
47#include <machine/asm.h>
48#ifdef HIBERNATE
49#include <machine/hibernate_var.h>
50#endif /* HIBERNATE */
51#include <machine/specialreg.h>
52#include <machine/param.h>
53#include <machine/segments.h>
54#include <dev/acpi/acpivar.h>
55#include "lapic.h"
56
57#ifdef __clang__
58#define addr32
59#endif
60
61#define _ACPI_TRMP_LABEL(a) a = . - _C_LABEL(acpi_real_mode_resume) + \
62	ACPI_TRAMPOLINE
63#define _ACPI_TRMP_OFFSET(a) a = . - _C_LABEL(acpi_real_mode_resume)
64#define _ACPI_TRMP_DATA_LABEL(a) a = . - _C_LABEL(acpi_tramp_data_start) + \
65	ACPI_TRAMP_DATA
66#define _ACPI_TRMP_DATA_OFFSET(a) a = . - _C_LABEL(acpi_tramp_data_start)
67#define _ACPI_RM_CODE_SEG (ACPI_TRAMPOLINE >> 4)
68#define _ACPI_RM_DATA_SEG (ACPI_TRAMP_DATA >> 4)
69
70/*
71 * On wakeup, we'll start executing at acpi_real_mode_resume.
72 * This is based on the wakeup vector previously stored with
73 * ACPI before we went to sleep. ACPI's wakeup vector is a
74 * physical address - in our case, it's calculated and mapped
75 * by the kernel and stuffed into a low page early in the boot
76 * process.
77 *
78 * We wakeup in real mode, at some phys addr based on the ACPI
79 * specification (cs = phys>>8, ip = phys & 0xF). For example,
80 * if our phys addr is 0x13000, we'd have cs=0x1300,ip=0
81 *
82 * The wakeup code needs to do the following:
83 *     1. Reenable the video display
84 *     2. Enter 32 bit protected mode
85 *     3. Reenable paging
86 *     4. Enter long mode
87 *     5. Restore saved CPU registers
88 */
89	.text
90	.code16
91	.align 4, 0xcc
92	.global _C_LABEL(acpi_real_mode_resume)
93	.global _C_LABEL(acpi_protected_mode_resume)
94	.global _C_LABEL(acpi_long_mode_resume)
95	.global _C_LABEL(acpi_resume_end)
96	.global _C_LABEL(acpi_pdirpa)
97	.global _C_LABEL(acpi_tramp_data_start)
98	.global _C_LABEL(acpi_tramp_data_end)
99_C_LABEL(acpi_real_mode_resume):
100_ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real)
101	nop
102	cli
103	cld
104
105	/*
106	 * Set up segment registers for real mode.
107	 * We'll only be in real mode for a moment, and we don't have
108	 * ant real dependencies on data or stack, so we'll just use
109	 * the code segment for data and stack (eg, a 64k memory space).
110	 */
111	movw	$(_ACPI_RM_DATA_SEG), %ax
112	movw	%ax, %ds
113	movw	%ax, %ss
114	movw	%cs, %ax
115	movw	%ax, %es
116	addr32 lidtl	.Lclean_idt
117
118	/*
119	 * Set up stack to grow down from offset 0x0FFE.
120	 * We will only be doing a few push/pops and no calls in real
121	 * mode, so as long as the real mode code in the segment
122	 * plus stack doesn't exceed 0x0FFE (4094) bytes, we'll be ok.
123	 */
124	movw	$0x0FFE,%sp
125
126	/*
127	 * Clear flags
128	 */
129	pushl	$0
130	popfl
131
132	/*
133	 * Flush instruction prefetch queue
134	 */
135	jmp	1f
1361:	jmp	1f
1371:
138
139	/*
140	 * We're about to enter protected mode, so we need a GDT for that.
141	 * Set up a temporary GDT describing 2 segments, one for code
142	 * extending from 0x00000000-0xffffffff and one for data
143	 * with the same range. This GDT will only be in use for a short
144	 * time, until we restore the saved GDT that we had when we went
145	 * to sleep.
146	 */
147	addr32 lgdtl	.Ltmp_gdt
148
149	/*
150	 * Enable protected mode by setting the PE bit in CR0
151	 */
152	mov	%cr0,%eax
153	orl	$(CR0_PE),%eax
154	mov	%eax,%cr0
155
156	/*
157	 * Force CPU into protected mode by making an intersegment jump (to
158	 * ourselves, just a few lines down from here). We rely on the kernel
159	 * to fixup the jump target address previously.
160	 */
161	ljmpl	$0x8, $.Lacpi_protected_mode_trampoline
162
163	.code32
164	.align 16, 0xcc
165_ACPI_TRMP_LABEL(.Lacpi_protected_mode_trampoline)
166_C_LABEL(acpi_protected_mode_resume):
167	nop
168
169	/*
170	 * We're in protected mode now, without paging enabled.
171	 *
172	 * Set up segment selectors for protected mode.
173	 * We've already set up our cs via the intersegment jump earlier,
174	 * but we need to set ds,es,fs,gs,ss to all point to the
175	 * 4GB flat data segment we defined earlier.
176	 */
177	movw	$GSEL(GDATA_SEL,SEL_KPL),%ax
178	movw	%ax,%ds
179	movw	%ax,%es
180	movw	%ax,%gs
181	movw	%ax,%ss
182	movw	%ax,%fs
183
184	/*
185	 * Reset ESP based on protected mode. We can do this here
186	 * because we haven't put anything on the stack via a
187	 * call or push that we haven't cleaned up already.
188	 */
189	addl	$(ACPI_TRAMP_DATA), %esp
190
191	/* Set CR4 to something sane for entry into long mode */
192	mov	$(CR4_PAE|CR4_OSFXSR|CR4_OSXMMEXCPT|CR4_PSE),%eax
193	mov	%eax,%cr4
194
195	/*
196	 * Set up a temporary long mode GDT describing 2
197	 * segments, one for code and one for data.
198	 */
199	lgdt	.Ltmp_gdt64
200
201	/* Restore saved EFER (LME, NXE, etc) */
202	movl	$MSR_EFER, %ecx
203	rdmsr
204	movl	.Lacpi_saved_efer, %eax
205	andl	$(EFER_LME | EFER_NXE | EFER_SCE), %eax
206	wrmsr
207
208	/* Reenable paging using temporary cr3 */
209	movl	$acpi_pdirpa, %eax
210	movl	(%eax), %eax
211	movl	%eax, %cr3
212
213	/* Flush the prefetch queue again */
214	jmp	1f
2151:	jmp	1f
2161:
217
218	/* Reenable paging by setting the appropriate bits in CR0 */
219	movl	%cr0,%eax
220	orl	$CR0_DEFAULT,%eax
221	movl	%eax,%cr0
222
223	/* Flush the prefetch queue again */
224	jmp	1f
2251:	jmp	1f
2261:
227
228	/* Enter long mode by making another intersegment jump */
229	ljmp	$0x8, $.Lacpi_long_mode_trampoline
230
231	.code64
232	.align 16, 0xcc
233_ACPI_TRMP_LABEL(.Lacpi_long_mode_trampoline)
234_C_LABEL(acpi_long_mode_resume):
235
236	/* Reset stack */
237	movq	$(ACPI_TRAMP_DATA + 0x0FF8), %rsp
238
239	/* Load GDT based on our saved copy */
240	lgdt	.Lacpi_saved_gdt
241
242	/* Reset segment registers */
243	movw	$GSEL(GDATA_SEL, SEL_KPL),%ax
244	movw	%ax,%ds
245	movw	%ax,%es
246	movw	%ax,%ss
247
248	xorw	%ax, %ax
249	movw	%ax, %fs
250	movw	%ax, %gs
251
252	/* Restore registers - start with the MSRs */
253#if NLAPIC > 0
254	movl	$MSR_APICBASE, %ecx
255	movl	.Lacpi_saved_apicbase, %eax
256	movl	.Lacpi_saved_apicbase+4, %edx
257	wrmsr
258#endif
259
260	movl	$MSR_STAR, %ecx
261	movl	.Lacpi_saved_star, %eax
262	movl	.Lacpi_saved_star+4, %edx
263	wrmsr
264
265	movl	$MSR_LSTAR, %ecx
266	movl	.Lacpi_saved_lstar, %eax
267	movl	.Lacpi_saved_lstar+4, %edx
268	wrmsr
269
270	movl	$MSR_CSTAR, %ecx
271	movl	.Lacpi_saved_cstar, %eax
272	movl	.Lacpi_saved_cstar+4, %edx
273	wrmsr
274
275	movl	$MSR_SFMASK, %ecx
276	movl	.Lacpi_saved_sfmask, %eax
277	movl	.Lacpi_saved_sfmask+4, %edx
278	wrmsr
279
280	movl	$MSR_FSBASE, %ecx
281	movl	.Lacpi_saved_fsbase, %eax
282	movl	.Lacpi_saved_fsbase+4, %edx
283	wrmsr
284
285	movl	$MSR_GSBASE, %ecx
286	movl	.Lacpi_saved_gsbase, %eax
287	movl	.Lacpi_saved_gsbase+4, %edx
288	wrmsr
289
290	movl	$MSR_KERNELGSBASE, %ecx
291	movl	.Lacpi_saved_kgs, %eax
292	movl	.Lacpi_saved_kgs+4, %edx
293	wrmsr
294
295	/* Restore control registers */
296	movq	.Lacpi_saved_cr8, %rax
297	movq	%rax, %cr8
298	movq	.Lacpi_saved_cr4, %rax
299	movq	%rax, %cr4
300	movq	.Lacpi_saved_cr3, %rax
301	movq	%rax, %cr3
302
303	/* Flush the prefetch queue again */
304	jmp	1f
3051:	jmp	1f
3061:
307
308	movq	.Lacpi_saved_cr2, %rax
309	movq	%rax, %cr2
310	movq	.Lacpi_saved_cr0, %rax
311	movq	%rax, %cr0
312
313	/* Flush the prefetch queue again */
314	jmp	1f
3151:	jmp	1f
3161:
317
318	lldt	.Lacpi_saved_ldt
319	lidt	.Lacpi_saved_idt
320
321	/* Restore the saved task register */
322	xorq	%rcx, %rcx
323	movw	.Lacpi_saved_tr, %cx
324	movq	.Lacpi_saved_gdt+2, %rax
325	andb	$0xF9, 5(%rax,%rcx)
326	ltr	%cx
327
328	pushq	.Lacpi_saved_fl
329	popfq
330
331	movq	.Lacpi_saved_rbx, %rbx
332	movq	.Lacpi_saved_rcx, %rcx
333	movq	.Lacpi_saved_rdx, %rdx
334	movq	.Lacpi_saved_rbp, %rbp
335	movq	.Lacpi_saved_rsi, %rsi
336	movq	.Lacpi_saved_rdi, %rdi
337	movq	.Lacpi_saved_rsp, %rsp
338
339	movq	.Lacpi_saved_r8, %r8
340	movq	.Lacpi_saved_r9, %r9
341	movq	.Lacpi_saved_r10, %r10
342	movq	.Lacpi_saved_r11, %r11
343	movq	.Lacpi_saved_r12, %r12
344	movq	.Lacpi_saved_r13, %r13
345	movq	.Lacpi_saved_r14, %r14
346	movq	.Lacpi_saved_r15, %r15
347
348	/* Poke CR3 one more time. Might not be necessary */
349	movq	.Lacpi_saved_cr3, %rax
350	movq	%rax, %cr3
351
352	xorq	%rax, %rax
353	jmp	*.Lacpi_saved_ret
354
355#ifdef HIBERNATE
356	/*
357	 * hibernate_resume_machdep drops to real mode and
358	 * restarts the OS using the saved S3 resume vector
359	 */
360	.code64
361NENTRY(hibernate_resume_machdep)
362	/*
363	 * On resume time page table, switch temporarily to the suspended
364	 * kernel's old page table (needed to access the suspended kernel's
365	 * retguard area)
366	 */
367	movq	.Lacpi_saved_cr3, %rax
368	movq	%rax, %cr3
369
370	/*
371	 * Now back on suspended kernel's page tables. Need to copy
372	 * into rodata, so instead of fixing up the perms here and
373	 * resetting them later, temporarily disable CR0.WP to allow
374	 * us to write.
375	 */
376	movq	%cr0, %rax
377	andq	$(~CR0_WP), %rax
378	movq	%rax, %cr0
379
380	movq	%rdi, %rsi
381	movq	$__retguard_start, %rdi
382	movq	$__retguard_end, %rcx
383	subq	%rdi, %rcx
384	shrq	$0x3, %rcx
385	rep	movsq
386
387	/* Reenable CR0.WP */
388	movq	%cr0, %rax
389	orq	$(CR0_WP), %rax
390	movq	%rax, %cr0
391
392	cli
393	/* Jump to the identity mapped version of ourself */
394	mov	$.Lhibernate_resume_vector_2, %rax
395	jmp	*%rax
396_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_2)
397
398	/* Get out of 64 bit CS */
399	lgdtq	.Ltmp_gdt6416
400
401	/* Jump out of 64 bit mode, to hibernate_resume_vector_3 below */
402	ljmp	*(.Lhibernate_indirect_16)
403
404_ACPI_TRMP_OFFSET(.Lhibernate_resume_vector_3)
405	.code16
406
407	/* must clear CR4.PCIDE before clearing CR0.PG */
408	movl	%cr4, %eax
409	andl	$(~CR4_PCIDE), %eax
410	movl	%eax, %cr4
411
412	movl	%cr0, %eax
413	/* Disable CR0.PG - no paging */
414	andl	$(~CR0_PG), %eax
415	/* Disable CR0.PE - real mode */
416	andl	$(~CR0_PE), %eax
417	movl	%eax, %cr0
418
419	/* Set up real mode segment selectors */
420	movw	$(_ACPI_RM_DATA_SEG), %ax
421	movw	%ax, %ds
422	movw	%ax, %ss
423	movw	%ax, %es
424	movw	%ax, %fs
425	movw	%ax, %gs
426	movl	$0x0FFE, %esp
427	addr32 lidtl	.Lclean_idt
428
429	/* Jump to the S3 resume vector */
430	ljmp	$(_ACPI_RM_CODE_SEG), $.Lacpi_s3_vector_real
431
432NENTRY(hibernate_drop_to_real_mode)
433	.code64
434	cli
435	/* Jump to the identity mapped version of ourself */
436	mov	$.Lhibernate_resume_vector_2b, %rax
437	jmp	*%rax
438_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_2b)
439
440	/* Get out of 64 bit CS */
441	lgdtq	.Ltmp_gdt6416
442
443	/* Jump out of 64 bit mode, to hibernate_resume_vector_3b below */
444	ljmp	*(.Lhibernate_indirect_16b)
445
446_ACPI_TRMP_OFFSET(.Lhibernate_resume_vector_3b)
447	.code16
448
449	/* must clear CR4.PCIDE before clearing CR0.PG */
450	movl	%cr4, %eax
451	andl	$(~CR4_PCIDE), %eax
452	movl	%eax, %cr4
453
454	movl	%cr0, %eax
455	/* Disable CR0.PG - no paging */
456	andl	$(~CR0_PG), %eax
457	/* Disable CR0.PE - real mode */
458	andl	$(~CR0_PE), %eax
459	movl	%eax, %cr0
460
461	/* Set up real mode segment selectors */
462	movw	$(_ACPI_RM_DATA_SEG), %ax
463	movw	%ax, %ds
464	movw	%ax, %es
465	movw	%ax, %fs
466	movw	%ax, %gs
467	movw	%ax, %ss
468	movl	$0x0FFE, %esp
469	addr32 lidtl	.Lclean_idt
470
471_ACPI_TRMP_OFFSET(.Lhib_hlt_real)
472	hlt
473	ljmp	$(_ACPI_RM_CODE_SEG), $.Lhib_hlt_real
474
475	.code64
476	/* Switch to hibernate resume pagetable */
477NENTRY(hibernate_activate_resume_pt_machdep)
478	RETGUARD_SETUP(hibernate_activate_resume_pt_machdep, r11)
479	/* Enable large pages */
480	movq	%cr4, %rax
481	orq	$(CR4_PSE), %rax
482
483	/* Disable global pages */
484	andq	$(~CR4_PGE), %rax
485	movq	%rax, %cr4
486
487	wbinvd
488	movq	$HIBERNATE_PML4T, %rax
489	movq	%rax,	%cr3
490	jmp	1f
491
4921:	RETGUARD_CHECK(hibernate_activate_resume_pt_machdep, r11)
493	ret
494
495	/*
496	 * Switch to the private resume-time hibernate stack
497	 */
498NENTRY(hibernate_switch_stack_machdep)
499	RETGUARD_SETUP(hibernate_switch_stack_machdep, r11)
500	movq	(%rsp), %rax
501	movq	%rax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET
502	movq	$(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %rax
503	movq	%rax, %rsp
504
505	/* On our own stack from here onward */
506	RETGUARD_CHECK(hibernate_switch_stack_machdep, r11)
507	ret
508
509NENTRY(hibernate_flush)
510	RETGUARD_SETUP(hibernate_flush, r11)
511	invlpg	HIBERNATE_INFLATE_PAGE
512	RETGUARD_CHECK(hibernate_flush, r11)
513	ret
514#endif /* HIBERNATE */
515
516	/*
517	 * End of resume code (code copied to ACPI_TRAMPOLINE)
518	 */
519_C_LABEL(acpi_resume_end):
520
521	/*
522	 * Initial copy of this data gets placed in .rodata, kernel makes
523	 * RW copy of it in the tramp data page.
524	 */
525	.section .rodata
526_C_LABEL(acpi_tramp_data_start):
527_ACPI_TRMP_DATA_OFFSET(.Ltmp_gdt)
528	.word	.Ltmp_gdt_end - .Ltmp_gdtable
529	.long	.Ltmp_gdtable
530
531	.align 8, 0xcc
532_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable)
533	/*
534	 * null
535	 */
536	.word	0, 0
537	.byte	0, 0, 0, 0
538	/*
539	 * Code
540	 * Limit: 0xffffffff
541	 * Base: 0x00000000
542	 * Descriptor Type: Code
543	 * Segment Type: CRA
544	 * Present: True
545	 * Priv: 0
546	 * AVL: False
547	 * 64-bit: False
548	 * 32-bit: True
549	 *
550	 */
551	.word	0xffff, 0
552	.byte	0, 0x9f, 0xcf, 0
553
554	/*
555	 * Data
556	 * Limit: 0xffffffff
557	 * Base: 0x00000000
558	 * Descriptor Type:
559	 * Segment Type: W
560	 * Present: True
561	 * Priv: 0
562	 * AVL: False
563	 * 64-bit: False
564	 * 32-bit: True
565	 *
566	 */
567	.word	0xffff, 0
568	.byte	0, 0x93, 0xcf, 0
569_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt_end)
570
571	.align 8, 0xcc
572_ACPI_TRMP_DATA_OFFSET(.Lclean_idt)
573	.word	0xffff
574	.long	0
575	.word	0
576
577	.align 8, 0xcc
578_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64)
579	.word	.Ltmp_gdt64_end - .Ltmp_gdtable64
580	.long	.Ltmp_gdtable64
581
582	.align 8, 0xcc
583_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable64)
584	.quad	0x0000000000000000
585	.quad	0x00af9a000000ffff
586	.quad	0x00cf92000000ffff
587_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64_end)
588
589	.align 8, 0xcc
590_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt6416)
591	.word	.Ltmp_gdt6416_end - .Ltmp_gdtable6416
592	.quad	.Ltmp_gdtable6416
593
594	.align 8, 0xcc
595_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable6416)
596	.quad	0x0000000000000000
597	.quad	0x00af9a000000ffff
598	.quad	0x00cf92000000ffff
599	.word	0x0fff, (ACPI_TRAMPOLINE % 0x10000)
600	.byte	(ACPI_TRAMPOLINE >> 16), 0x9a, 0, 0
601_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt6416_end)
602
603	.align 8, 0xcc
604_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rbx)
605	.quad 0
606_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rcx)
607	.quad 0
608_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rdx)
609	.quad 0
610_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rbp)
611	.quad 0
612_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rsi)
613	.quad 0
614_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rdi)
615	.quad 0
616_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rsp)
617	.quad 0
618_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r8)
619	.quad 0
620_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r9)
621	.quad 0
622_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r10)
623	.quad 0
624_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r11)
625	.quad 0
626_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r12)
627	.quad 0
628_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r13)
629	.quad 0
630_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r14)
631	.quad 0
632_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r15)
633	.quad 0
634_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fl)
635	.quad 0
636_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr0)
637	.quad 0
638_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr2)
639	.quad 0
640_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr3)
641	.quad 0
642_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr4)
643	.quad 0
644_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr8)
645	.quad 0
646_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ret)
647	.quad 0
648
649	.align 8, 0xcc
650_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_idt)
651	.space 10
652
653	.align 8, 0xcc
654_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gdt)
655	.space 10
656
657	.align 8, 0xcc
658_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ldt)
659	.space 10
660
661_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_tr)
662	.short 0
663
664	.align 4, 0xcc
665_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_efer)
666	.long 0
667
668	.align 8, 0xcc
669_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fsbase)
670	.quad 0
671_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gsbase)
672	.quad 0
673_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_kgs)
674	.quad 0
675_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_star)
676	.quad 0
677_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_lstar)
678	.quad 0
679_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cstar)
680	.quad 0
681_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_sfmask)
682	.quad 0
683#if NLAPIC > 0
684_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_apicbase)
685	.quad 0
686#endif
687
688	.align 4, 0xcc
689_ACPI_TRMP_DATA_LABEL(acpi_pdirpa)
690	.long 0
691#ifdef HIBERNATE
692_ACPI_TRMP_DATA_LABEL(.Lhibernate_indirect_16)
693	.long	.Lhibernate_resume_vector_3
694	.word	0x18
695_ACPI_TRMP_DATA_LABEL(.Lhibernate_indirect_16b)
696	.long	.Lhibernate_resume_vector_3b
697	.word	0x18
698#endif /* HIBERNATE */
699
700_C_LABEL(acpi_tramp_data_end):
701
702	/*
703	 * acpi_savecpu saves the processor's registers and flags
704	 * for use during the ACPI suspend/resume process.
705	 */
706
707	.code64
708NENTRY(acpi_savecpu)
709	movq	(%rsp), %rax
710	movq	%rax, .Lacpi_saved_ret
711
712	movq	%rbx, .Lacpi_saved_rbx
713	movq	%rcx, .Lacpi_saved_rcx
714	movq	%rdx, .Lacpi_saved_rdx
715	movq	%rbp, .Lacpi_saved_rbp
716	movq	%rsi, .Lacpi_saved_rsi
717	movq	%rdi, .Lacpi_saved_rdi
718	movq	%rsp, .Lacpi_saved_rsp
719	/*
720	 * acpi_protected_mode_resume performs restores inline, so undo own
721	 * ret
722	 */
723	addq	$0x8, .Lacpi_saved_rsp
724
725	movq	%r8, .Lacpi_saved_r8
726	movq	%r9, .Lacpi_saved_r9
727	movq	%r10, .Lacpi_saved_r10
728	movq	%r11, .Lacpi_saved_r11
729	movq	%r12, .Lacpi_saved_r12
730	movq	%r13, .Lacpi_saved_r13
731	movq	%r14, .Lacpi_saved_r14
732	movq	%r15, .Lacpi_saved_r15
733
734	/* Scratch reg saved - set up retguard */
735	RETGUARD_SETUP(acpi_savecpu, r11)
736
737	pushfq
738	popq	.Lacpi_saved_fl
739
740	movq	%cr0, %rax
741	movq	%rax, .Lacpi_saved_cr0
742	movq	%cr2, %rax
743	movq	%rax, .Lacpi_saved_cr2
744	movq	%cr3, %rax
745	movq	%rax, .Lacpi_saved_cr3
746	movq	%cr4, %rax
747	movq	%rax, .Lacpi_saved_cr4
748	movq	%cr8, %rax
749	movq	%rax, .Lacpi_saved_cr8
750
751	pushq	%rcx
752	pushq	%rdx
753#if NLAPIC > 0
754	movl	$MSR_APICBASE, %ecx
755	rdmsr
756	movl	%eax, .Lacpi_saved_apicbase
757	movl	%edx, .Lacpi_saved_apicbase+4
758#endif
759
760	movl	$MSR_STAR, %ecx
761	rdmsr
762	movl	%eax, .Lacpi_saved_star
763	movl	%edx, .Lacpi_saved_star+4
764
765	movl	$MSR_CSTAR, %ecx
766	rdmsr
767	movl	%eax, .Lacpi_saved_cstar
768	movl	%edx, .Lacpi_saved_cstar+4
769
770	movl	$MSR_LSTAR, %ecx
771	rdmsr
772	movl	%eax, .Lacpi_saved_lstar
773	movl	%edx, .Lacpi_saved_lstar+4
774
775	movl	$MSR_SFMASK, %ecx
776	rdmsr
777	movl	%eax, .Lacpi_saved_sfmask
778	movl	%edx, .Lacpi_saved_sfmask+4
779
780	movl	$MSR_FSBASE, %ecx
781	rdmsr
782	movl	%eax, .Lacpi_saved_fsbase
783	movl	%edx, .Lacpi_saved_fsbase+4
784
785	movl	$MSR_GSBASE, %ecx
786	rdmsr
787	movl	%eax, .Lacpi_saved_gsbase
788	movl	%edx, .Lacpi_saved_gsbase+4
789
790	movl	$MSR_KERNELGSBASE, %ecx
791	rdmsr
792	movl	%eax, .Lacpi_saved_kgs
793	movl	%edx, .Lacpi_saved_kgs+4
794
795	movl	$MSR_EFER, %ecx
796	rdmsr
797	movl	%eax, .Lacpi_saved_efer
798	popq	%rdx
799	popq	%rcx
800
801	sgdt	.Lacpi_saved_gdt
802	sidt	.Lacpi_saved_idt
803	sldt	.Lacpi_saved_ldt
804	str	.Lacpi_saved_tr
805
806	movl	$1, %eax
807	RETGUARD_CHECK(acpi_savecpu, r11)
808	ret
809