xref: /openbsd/sys/arch/amd64/amd64/acpi_wakecode.S (revision 073d4874)
1/* $OpenBSD: acpi_wakecode.S,v 1.50 2024/02/25 22:33:09 guenther Exp $ */
2/*
3 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
4 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*
29 * Copyright (c) 2008, 2009 Mike Larkin <mlarkin@openbsd.org>
30 *
31 * Permission to use, copy, modify, and distribute this software for any
32 * purpose with or without fee is hereby granted, provided that the above
33 * copyright notice and this permission notice appear in all copies.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43
44#define _ACPI_WAKECODE
45
46#include "assym.h"
47#include <machine/asm.h>
48#ifdef HIBERNATE
49#include <machine/hibernate_var.h>
50#endif /* HIBERNATE */
51#include <machine/specialreg.h>
52#include <machine/param.h>
53#include <machine/segments.h>
54#include <dev/acpi/acpivar.h>
55#include "lapic.h"
56
57#ifdef __clang__
58#define addr32
59#endif
60
61#define _ACPI_TRMP_LABEL(a) a = . - acpi_real_mode_resume + \
62	ACPI_TRAMPOLINE
63#define _ACPI_TRMP_OFFSET(a) a = . - acpi_real_mode_resume
64#define _ACPI_TRMP_DATA_LABEL(a) a = . - acpi_tramp_data_start + \
65	ACPI_TRAMP_DATA
66#define _ACPI_TRMP_DATA_OFFSET(a) a = . - acpi_tramp_data_start
67#define _ACPI_RM_CODE_SEG (ACPI_TRAMPOLINE >> 4)
68#define _ACPI_RM_DATA_SEG (ACPI_TRAMP_DATA >> 4)
69
70/*
71 * On wakeup, we'll start executing at acpi_real_mode_resume.
72 * This is based on the wakeup vector previously stored with
73 * ACPI before we went to sleep. ACPI's wakeup vector is a
74 * physical address - in our case, it's calculated and mapped
75 * by the kernel and stuffed into a low page early in the boot
76 * process.
77 *
78 * We wakeup in real mode, at some phys addr based on the ACPI
79 * specification (cs = phys>>8, ip = phys & 0xF). For example,
80 * if our phys addr is 0x13000, we'd have cs=0x1300,ip=0
81 *
82 * The wakeup code needs to do the following:
83 *     1. Reenable the video display
84 *     2. Enter 32 bit protected mode
85 *     3. Reenable paging
86 *     4. Enter long mode
87 *     5. Restore saved CPU registers
88 *
89 * Initial copy of this code gets placed in .rodata, kernel makes
90 * RX copy of it in the ACPI trampoline page.
91 */
92	.section .rodata
93	.code16
94	.align 4, 0xcc
95	.global acpi_resume_end
96	.global acpi_pdirpa
97	.global acpi_tramp_data_start
98	.global acpi_tramp_data_end
99GENTRY(acpi_real_mode_resume)
100_ACPI_TRMP_OFFSET(.Lacpi_s3_vector_real)
101	nop
102	cli
103	cld
104
105	/*
106	 * Set up segment registers for real mode.
107	 * We'll only be in real mode for a moment, and we don't have
108	 * ant real dependencies on data or stack, so we'll just use
109	 * the code segment for data and stack (eg, a 64k memory space).
110	 */
111	movw	$(_ACPI_RM_DATA_SEG), %ax
112	movw	%ax, %ds
113	movw	%ax, %ss
114	movw	%cs, %ax
115	movw	%ax, %es
116	addr32 lidtl	.Lclean_idt
117
118	/*
119	 * Set up stack to grow down from offset 0x0FFE.
120	 * We will only be doing a few push/pops and no calls in real
121	 * mode, so as long as the real mode code in the segment
122	 * plus stack doesn't exceed 0x0FFE (4094) bytes, we'll be ok.
123	 */
124	movw	$0x0FFE,%sp
125
126	/*
127	 * Clear flags
128	 */
129	pushl	$0
130	popfl
131
132	/*
133	 * Flush instruction prefetch queue
134	 */
135	jmp	1f
1361:	jmp	1f
1371:
138
139	/*
140	 * We're about to enter protected mode, so we need a GDT for that.
141	 * Set up a temporary GDT describing 2 segments, one for code
142	 * extending from 0x00000000-0xffffffff and one for data
143	 * with the same range. This GDT will only be in use for a short
144	 * time, until we restore the saved GDT that we had when we went
145	 * to sleep.
146	 */
147	addr32 lgdtl	.Ltmp_gdt
148
149	/*
150	 * Enable protected mode by setting the PE bit in CR0
151	 */
152	mov	%cr0,%eax
153	orl	$(CR0_PE),%eax
154	mov	%eax,%cr0
155
156	/*
157	 * Force CPU into protected mode by making an intersegment jump (to
158	 * ourselves, just a few lines down from here). We rely on the kernel
159	 * to fixup the jump target address previously.
160	 */
161	ljmpl	$0x8, $.Lacpi_protected_mode_trampoline
162
163	.code32
164	.align 16, 0xcc
165_ACPI_TRMP_LABEL(.Lacpi_protected_mode_trampoline)
166	/* acpi_protected_mode_resume: */
167	nop
168
169	/*
170	 * We're in protected mode now, without paging enabled.
171	 *
172	 * Set up segment selectors for protected mode.
173	 * We've already set up our cs via the intersegment jump earlier,
174	 * but we need to set ds,es,fs,gs,ss to all point to the
175	 * 4GB flat data segment we defined earlier.
176	 */
177	movw	$GSEL(GDATA_SEL,SEL_KPL),%ax
178	movw	%ax,%ds
179	movw	%ax,%es
180	movw	%ax,%gs
181	movw	%ax,%ss
182	movw	%ax,%fs
183
184	/*
185	 * Reset ESP based on protected mode. We can do this here
186	 * because we haven't put anything on the stack via a
187	 * call or push that we haven't cleaned up already.
188	 */
189	addl	$(ACPI_TRAMP_DATA), %esp
190
191	/* Set CR4 to something sane for entry into long mode */
192	mov	$(CR4_PAE|CR4_OSFXSR|CR4_OSXMMEXCPT|CR4_PSE),%eax
193	mov	%eax,%cr4
194
195	/*
196	 * Set up a temporary long mode GDT describing 2
197	 * segments, one for code and one for data.
198	 */
199	lgdt	.Ltmp_gdt64
200
201	/* Restore saved EFER (LME, NXE, etc) */
202	movl	$MSR_EFER, %ecx
203	rdmsr
204	movl	.Lacpi_saved_efer, %eax
205	andl	$(EFER_LME | EFER_NXE | EFER_SCE), %eax
206	wrmsr
207
208	/* Reenable paging using temporary cr3 */
209	movl	$acpi_pdirpa, %eax
210	movl	(%eax), %eax
211	movl	%eax, %cr3
212
213	/* Flush the prefetch queue again */
214	jmp	1f
2151:	jmp	1f
2161:
217
218	/* Reenable paging by setting the appropriate bits in CR0 */
219	movl	%cr0,%eax
220	orl	$CR0_DEFAULT,%eax
221	movl	%eax,%cr0
222
223	/* Flush the prefetch queue again */
224	jmp	1f
2251:	jmp	1f
2261:
227
228	/* Enter long mode by making another intersegment jump */
229	ljmp	$0x8, $.Lacpi_long_mode_trampoline
230
231	.code64
232	.align 16, 0xcc
233_ACPI_TRMP_LABEL(.Lacpi_long_mode_trampoline)
234
235	/* Reset stack */
236	movq	$(ACPI_TRAMP_DATA + 0x0FF8), %rsp
237
238	/* Load GDT based on our saved copy */
239	lgdt	.Lacpi_saved_gdt
240
241	/* Reset segment registers */
242	movw	$GSEL(GDATA_SEL, SEL_KPL),%ax
243	movw	%ax,%ds
244	movw	%ax,%es
245	movw	%ax,%ss
246
247	xorw	%ax, %ax
248	movw	%ax, %fs
249	movw	%ax, %gs
250
251	/* Restore registers - start with the MSRs */
252#if NLAPIC > 0
253	movl	$MSR_APICBASE, %ecx
254	movl	.Lacpi_saved_apicbase, %eax
255	movl	.Lacpi_saved_apicbase+4, %edx
256	wrmsr
257#endif
258
259	movl	$MSR_STAR, %ecx
260	movl	.Lacpi_saved_star, %eax
261	movl	.Lacpi_saved_star+4, %edx
262	wrmsr
263
264	movl	$MSR_LSTAR, %ecx
265	movl	.Lacpi_saved_lstar, %eax
266	movl	.Lacpi_saved_lstar+4, %edx
267	wrmsr
268
269	movl	$MSR_CSTAR, %ecx
270	xorl	%eax, %eax
271	xorl	%edx, %edx
272	wrmsr
273
274	movl	$MSR_SFMASK, %ecx
275	movl	.Lacpi_saved_sfmask, %eax
276	movl	.Lacpi_saved_sfmask+4, %edx
277	wrmsr
278
279	movl	$MSR_FSBASE, %ecx
280	movl	.Lacpi_saved_fsbase, %eax
281	movl	.Lacpi_saved_fsbase+4, %edx
282	wrmsr
283
284	movl	$MSR_GSBASE, %ecx
285	movl	.Lacpi_saved_gsbase, %eax
286	movl	.Lacpi_saved_gsbase+4, %edx
287	wrmsr
288
289	movl	$MSR_KERNELGSBASE, %ecx
290	movl	.Lacpi_saved_kgs, %eax
291	movl	.Lacpi_saved_kgs+4, %edx
292	wrmsr
293
294	/* Restore control registers */
295	movq	.Lacpi_saved_cr8, %rax
296	movq	%rax, %cr8
297	movq	.Lacpi_saved_cr4, %rax
298	movq	%rax, %cr4
299	movq	.Lacpi_saved_cr3, %rax
300	movq	%rax, %cr3
301
302	/* Flush the prefetch queue again */
303	jmp	1f
3041:	jmp	1f
3051:
306
307	movq	.Lacpi_saved_cr2, %rax
308	movq	%rax, %cr2
309	movq	.Lacpi_saved_cr0, %rax
310	movq	%rax, %cr0
311
312	/* Flush the prefetch queue again */
313	jmp	1f
3141:	jmp	1f
3151:
316
317	lldt	.Lacpi_saved_ldt
318	lidt	.Lacpi_saved_idt
319
320	/* Restore the saved task register */
321	xorq	%rcx, %rcx
322	movw	.Lacpi_saved_tr, %cx
323	movq	.Lacpi_saved_gdt+2, %rax
324	andb	$0xF9, 5(%rax,%rcx)
325	ltr	%cx
326
327	pushq	.Lacpi_saved_fl
328	popfq
329
330	movq	.Lacpi_saved_rbx, %rbx
331	movq	.Lacpi_saved_rcx, %rcx
332	movq	.Lacpi_saved_rdx, %rdx
333	movq	.Lacpi_saved_rbp, %rbp
334	movq	.Lacpi_saved_rsi, %rsi
335	movq	.Lacpi_saved_rdi, %rdi
336	movq	.Lacpi_saved_rsp, %rsp
337
338	movq	.Lacpi_saved_r8, %r8
339	movq	.Lacpi_saved_r9, %r9
340	movq	.Lacpi_saved_r10, %r10
341	movq	.Lacpi_saved_r11, %r11
342	movq	.Lacpi_saved_r12, %r12
343	movq	.Lacpi_saved_r13, %r13
344	movq	.Lacpi_saved_r14, %r14
345	movq	.Lacpi_saved_r15, %r15
346
347	/* Poke CR3 one more time. Might not be necessary */
348	movq	.Lacpi_saved_cr3, %rax
349	movq	%rax, %cr3
350
351	xorq	%rax, %rax
352	jmp	*.Lacpi_saved_ret
353
354#ifdef HIBERNATE
355	/*
356	 * hibernate_resume_machdep drops to real mode and
357	 * restarts the OS using the saved S3 resume vector
358	 */
359	.code64
360NENTRY(hibernate_resume_machdep)
361	/*
362	 * On resume time page table, switch temporarily to the suspended
363	 * kernel's old page table (needed to access the suspended kernel's
364	 * retguard area)
365	 */
366	movq	.Lacpi_saved_cr3, %rax
367	movq	%rax, %cr3
368
369	/*
370	 * Now back on suspended kernel's page tables. Need to copy
371	 * into rodata, so instead of fixing up the perms here and
372	 * resetting them later, temporarily disable CR0.WP to allow
373	 * us to write.
374	 */
375	movq	%cr0, %rax
376	andq	$(~CR0_WP), %rax
377	movq	%rax, %cr0
378
379	movq	%rdi, %rsi
380	movq	$__retguard_start, %rdi
381	movq	$__retguard_end, %rcx
382	subq	%rdi, %rcx
383	shrq	$0x3, %rcx
384	rep	movsq
385
386	/* Reenable CR0.WP */
387	movq	%cr0, %rax
388	orq	$(CR0_WP), %rax
389	movq	%rax, %cr0
390
391	cli
392	/* Jump to the identity mapped version of ourself */
393	mov	$.Lhibernate_resume_vector_2, %rax
394	jmp	*%rax
395END(hibernate_resume_machdep)
396
397	.section .rodata
398_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_2)
399
400	/* Get out of 64 bit CS */
401	lgdtq	.Ltmp_gdt6416
402
403	/* Jump out of 64 bit mode, to hibernate_resume_vector_3 below */
404	ljmp	*(.Lhibernate_indirect_16)
405
406_ACPI_TRMP_OFFSET(.Lhibernate_resume_vector_3)
407	.code16
408
409	/* must clear CR4.PCIDE before clearing CR0.PG */
410	movl	%cr4, %eax
411	andl	$(~CR4_PCIDE), %eax
412	movl	%eax, %cr4
413
414	movl	%cr0, %eax
415	/* Disable CR0.PG - no paging */
416	andl	$(~CR0_PG), %eax
417	/* Disable CR0.PE - real mode */
418	andl	$(~CR0_PE), %eax
419	movl	%eax, %cr0
420
421	/* Set up real mode segment selectors */
422	movw	$(_ACPI_RM_DATA_SEG), %ax
423	movw	%ax, %ds
424	movw	%ax, %ss
425	movw	%ax, %es
426	movw	%ax, %fs
427	movw	%ax, %gs
428	movl	$0x0FFE, %esp
429	addr32 lidtl	.Lclean_idt
430
431	/* Jump to the S3 resume vector */
432	ljmp	$(_ACPI_RM_CODE_SEG), $.Lacpi_s3_vector_real
433
434NENTRY(hibernate_drop_to_real_mode)
435	.code64
436	cli
437	/* Jump to the identity mapped version of ourself */
438	mov	$.Lhibernate_resume_vector_2b, %rax
439	jmp	*%rax
440END(hibernate_drop_to_real_mode)
441
442	.section .rodata
443_ACPI_TRMP_LABEL(.Lhibernate_resume_vector_2b)
444
445	/* Get out of 64 bit CS */
446	lgdtq	.Ltmp_gdt6416
447
448	/* Jump out of 64 bit mode, to hibernate_resume_vector_3b below */
449	ljmp	*(.Lhibernate_indirect_16b)
450
451_ACPI_TRMP_OFFSET(.Lhibernate_resume_vector_3b)
452	.code16
453
454	/* must clear CR4.PCIDE before clearing CR0.PG */
455	movl	%cr4, %eax
456	andl	$(~CR4_PCIDE), %eax
457	movl	%eax, %cr4
458
459	movl	%cr0, %eax
460	/* Disable CR0.PG - no paging */
461	andl	$(~CR0_PG), %eax
462	/* Disable CR0.PE - real mode */
463	andl	$(~CR0_PE), %eax
464	movl	%eax, %cr0
465
466	/* Set up real mode segment selectors */
467	movw	$(_ACPI_RM_DATA_SEG), %ax
468	movw	%ax, %ds
469	movw	%ax, %es
470	movw	%ax, %fs
471	movw	%ax, %gs
472	movw	%ax, %ss
473	movl	$0x0FFE, %esp
474	addr32 lidtl	.Lclean_idt
475
476_ACPI_TRMP_OFFSET(.Lhib_hlt_real)
477	hlt
478	ljmp	$(_ACPI_RM_CODE_SEG), $.Lhib_hlt_real
479
480	.code64
481	/* Switch to hibernate resume pagetable */
482NENTRY(hibernate_activate_resume_pt_machdep)
483	RETGUARD_SETUP(hibernate_activate_resume_pt_machdep, r11)
484	/* Enable large pages */
485	movq	%cr4, %rax
486	orq	$(CR4_PSE), %rax
487
488	/* Disable global pages */
489	andq	$(~CR4_PGE), %rax
490	movq	%rax, %cr4
491
492	wbinvd
493	movq	$HIBERNATE_PML4T, %rax
494	movq	%rax,	%cr3
495	jmp	1f
496
4971:	RETGUARD_CHECK(hibernate_activate_resume_pt_machdep, r11)
498	ret
499	lfence
500END(hibernate_activate_resume_pt_machdep)
501
502	/*
503	 * Switch to the private resume-time hibernate stack
504	 */
505NENTRY(hibernate_switch_stack_machdep)
506	RETGUARD_SETUP(hibernate_switch_stack_machdep, r11)
507	movq	(%rsp), %rax
508	movq	%rax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET
509	movq	$(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %rax
510	movq	%rax, %rsp
511
512	/* On our own stack from here onward */
513	RETGUARD_CHECK(hibernate_switch_stack_machdep, r11)
514	ret
515	lfence
516END(hibernate_switch_stack_machdep)
517
518NENTRY(hibernate_flush)
519	RETGUARD_SETUP(hibernate_flush, r11)
520	invlpg	HIBERNATE_INFLATE_PAGE
521	RETGUARD_CHECK(hibernate_flush, r11)
522	ret
523	lfence
524END(hibernate_flush)
525#endif /* HIBERNATE */
526
527	/*
528	 * End of resume code (code copied to ACPI_TRAMPOLINE)
529	 */
530	.section .rodata
531	.type	acpi_resume_end,@object
532acpi_resume_end:
533END(acpi_real_mode_resume)
534
535	/*
536	 * Initial copy of this data gets placed in .rodata, kernel makes
537	 * RW copy of it in the tramp data page.
538	 */
539	.section .rodata
540	.type	acpi_tramp_data_start,@object
541acpi_tramp_data_start:
542_ACPI_TRMP_DATA_OFFSET(.Ltmp_gdt)
543	.word	.Ltmp_gdt_end - .Ltmp_gdtable
544	.long	.Ltmp_gdtable
545
546	.align 8, 0xcc
547_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable)
548	/*
549	 * null
550	 */
551	.word	0, 0
552	.byte	0, 0, 0, 0
553	/*
554	 * Code
555	 * Limit: 0xffffffff
556	 * Base: 0x00000000
557	 * Descriptor Type: Code
558	 * Segment Type: CRA
559	 * Present: True
560	 * Priv: 0
561	 * AVL: False
562	 * 64-bit: False
563	 * 32-bit: True
564	 *
565	 */
566	.word	0xffff, 0
567	.byte	0, 0x9f, 0xcf, 0
568
569	/*
570	 * Data
571	 * Limit: 0xffffffff
572	 * Base: 0x00000000
573	 * Descriptor Type:
574	 * Segment Type: W
575	 * Present: True
576	 * Priv: 0
577	 * AVL: False
578	 * 64-bit: False
579	 * 32-bit: True
580	 *
581	 */
582	.word	0xffff, 0
583	.byte	0, 0x93, 0xcf, 0
584_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt_end)
585
586	.align 8, 0xcc
587_ACPI_TRMP_DATA_OFFSET(.Lclean_idt)
588	.word	0xffff
589	.long	0
590	.word	0
591
592	.align 8, 0xcc
593_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64)
594	.word	.Ltmp_gdt64_end - .Ltmp_gdtable64
595	.long	.Ltmp_gdtable64
596
597	.align 8, 0xcc
598_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable64)
599	.quad	0x0000000000000000
600	.quad	0x00af9a000000ffff
601	.quad	0x00cf92000000ffff
602_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt64_end)
603
604	.align 8, 0xcc
605_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt6416)
606	.word	.Ltmp_gdt6416_end - .Ltmp_gdtable6416
607	.quad	.Ltmp_gdtable6416
608
609	.align 8, 0xcc
610_ACPI_TRMP_DATA_LABEL(.Ltmp_gdtable6416)
611	.quad	0x0000000000000000
612	.quad	0x00af9a000000ffff
613	.quad	0x00cf92000000ffff
614	.word	0x0fff, (ACPI_TRAMPOLINE % 0x10000)
615	.byte	(ACPI_TRAMPOLINE >> 16), 0x9a, 0, 0
616_ACPI_TRMP_DATA_LABEL(.Ltmp_gdt6416_end)
617
618	.align 8, 0xcc
619_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rbx)
620	.quad 0
621_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rcx)
622	.quad 0
623_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rdx)
624	.quad 0
625_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rbp)
626	.quad 0
627_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rsi)
628	.quad 0
629_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rdi)
630	.quad 0
631_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_rsp)
632	.quad 0
633_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r8)
634	.quad 0
635_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r9)
636	.quad 0
637_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r10)
638	.quad 0
639_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r11)
640	.quad 0
641_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r12)
642	.quad 0
643_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r13)
644	.quad 0
645_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r14)
646	.quad 0
647_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_r15)
648	.quad 0
649_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fl)
650	.quad 0
651_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr0)
652	.quad 0
653_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr2)
654	.quad 0
655_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr3)
656	.quad 0
657_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr4)
658	.quad 0
659_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_cr8)
660	.quad 0
661_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ret)
662	.quad 0
663
664	.align 8, 0xcc
665_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_idt)
666	.space 10
667
668	.align 8, 0xcc
669_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gdt)
670	.space 10
671
672	.align 8, 0xcc
673_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_ldt)
674	.space 10
675
676_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_tr)
677	.short 0
678
679	.align 4, 0xcc
680_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_efer)
681	.long 0
682
683	.align 8, 0xcc
684_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_fsbase)
685	.quad 0
686_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_gsbase)
687	.quad 0
688_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_kgs)
689	.quad 0
690_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_star)
691	.quad 0
692_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_lstar)
693	.quad 0
694_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_sfmask)
695	.quad 0
696#if NLAPIC > 0
697_ACPI_TRMP_DATA_LABEL(.Lacpi_saved_apicbase)
698	.quad 0
699#endif
700
701	.align 4, 0xcc
702	.type	acpi_pdirpa,@object
703_ACPI_TRMP_DATA_LABEL(acpi_pdirpa)
704	.long 0
705	.size	acpi_pdirpa, 4
706#ifdef HIBERNATE
707_ACPI_TRMP_DATA_LABEL(.Lhibernate_indirect_16)
708	.long	.Lhibernate_resume_vector_3
709	.word	0x18
710_ACPI_TRMP_DATA_LABEL(.Lhibernate_indirect_16b)
711	.long	.Lhibernate_resume_vector_3b
712	.word	0x18
713#endif /* HIBERNATE */
714
715	.type	acpi_tramp_data_end,@object
716acpi_tramp_data_end:
717END(acpi_tramp_data_start)
718
719	/*
720	 * acpi_savecpu saves the processor's registers and flags
721	 * for use during the ACPI suspend/resume process.
722	 */
723
724	.code64
725NENTRY(acpi_savecpu)
726	movq	(%rsp), %rax
727	movq	%rax, .Lacpi_saved_ret
728
729	movq	%rbx, .Lacpi_saved_rbx
730	movq	%rcx, .Lacpi_saved_rcx
731	movq	%rdx, .Lacpi_saved_rdx
732	movq	%rbp, .Lacpi_saved_rbp
733	movq	%rsi, .Lacpi_saved_rsi
734	movq	%rdi, .Lacpi_saved_rdi
735	movq	%rsp, .Lacpi_saved_rsp
736	/*
737	 * acpi_protected_mode_resume performs restores inline, so undo own
738	 * ret
739	 */
740	addq	$0x8, .Lacpi_saved_rsp
741
742	movq	%r8, .Lacpi_saved_r8
743	movq	%r9, .Lacpi_saved_r9
744	movq	%r10, .Lacpi_saved_r10
745	movq	%r11, .Lacpi_saved_r11
746	movq	%r12, .Lacpi_saved_r12
747	movq	%r13, .Lacpi_saved_r13
748	movq	%r14, .Lacpi_saved_r14
749	movq	%r15, .Lacpi_saved_r15
750
751	/* Scratch reg saved - set up retguard */
752	RETGUARD_SETUP(acpi_savecpu, r11)
753
754	pushfq
755	popq	.Lacpi_saved_fl
756
757	movq	%cr0, %rax
758	movq	%rax, .Lacpi_saved_cr0
759	movq	%cr2, %rax
760	movq	%rax, .Lacpi_saved_cr2
761	movq	%cr3, %rax
762	movq	%rax, .Lacpi_saved_cr3
763	movq	%cr4, %rax
764	movq	%rax, .Lacpi_saved_cr4
765	movq	%cr8, %rax
766	movq	%rax, .Lacpi_saved_cr8
767
768	pushq	%rcx
769	pushq	%rdx
770#if NLAPIC > 0
771	movl	$MSR_APICBASE, %ecx
772	rdmsr
773	movl	%eax, .Lacpi_saved_apicbase
774	movl	%edx, .Lacpi_saved_apicbase+4
775#endif
776
777	movl	$MSR_STAR, %ecx
778	rdmsr
779	movl	%eax, .Lacpi_saved_star
780	movl	%edx, .Lacpi_saved_star+4
781
782	movl	$MSR_LSTAR, %ecx
783	rdmsr
784	movl	%eax, .Lacpi_saved_lstar
785	movl	%edx, .Lacpi_saved_lstar+4
786
787	movl	$MSR_SFMASK, %ecx
788	rdmsr
789	movl	%eax, .Lacpi_saved_sfmask
790	movl	%edx, .Lacpi_saved_sfmask+4
791
792	movl	$MSR_FSBASE, %ecx
793	rdmsr
794	movl	%eax, .Lacpi_saved_fsbase
795	movl	%edx, .Lacpi_saved_fsbase+4
796
797	movl	$MSR_GSBASE, %ecx
798	rdmsr
799	movl	%eax, .Lacpi_saved_gsbase
800	movl	%edx, .Lacpi_saved_gsbase+4
801
802	movl	$MSR_KERNELGSBASE, %ecx
803	rdmsr
804	movl	%eax, .Lacpi_saved_kgs
805	movl	%edx, .Lacpi_saved_kgs+4
806
807	movl	$MSR_EFER, %ecx
808	rdmsr
809	movl	%eax, .Lacpi_saved_efer
810	popq	%rdx
811	popq	%rcx
812
813	sgdt	.Lacpi_saved_gdt
814	sidt	.Lacpi_saved_idt
815	sldt	.Lacpi_saved_ldt
816	str	.Lacpi_saved_tr
817
818	movl	$1, %eax
819	RETGUARD_CHECK(acpi_savecpu, r11)
820	ret
821	lfence
822END(acpi_savecpu)
823