xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 52509364)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include <machine/asmacros.h>
35#include <machine/pmap.h>
36
37#include "assym.s"
38
39	ALIGN_DATA
40
41	.text
42
43/*
44 * bzero(ptr:%rdi, bytes:%rsi)
45 *
46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
47 * a %xmm0 loop on a modern intel cpu.
48 *
49 * Do not use non-termportal instructions here as we do not know the caller's
50 * intent.
51 */
52ENTRY(bzero)
53	movq	%rsi,%rcx
54	xorl	%eax,%eax
55	shrq	$3,%rcx
56	cld
57	rep
58	stosq
59	movq	%rsi,%rcx
60	andq	$7,%rcx
61	rep
62	stosb
63	ret
64END(bzero)
65
66/*
67 * pagezero(ptr:%rdi)
68 *
69 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu,
70 * and about 70% faster than a %rax loop.
71 *
72 * Do not use non-termportal instructions here as we do not know the caller's
73 * intent.
74 */
75ENTRY(pagezero)
76	movq	$PAGE_SIZE>>3,%rcx
77	xorl	%eax,%eax
78	cld
79	rep
80	stosq
81	ret
82END(pagezero)
83
84/*
85 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx)
86 */
87ENTRY(bcmp)
88	movq	%rdx,%rcx
89	shrq	$3,%rcx
90	cld					/* compare forwards */
91	repe
92	cmpsq
93	jne	1f
94
95	movq	%rdx,%rcx
96	andq	$7,%rcx
97	repe
98	cmpsb
991:
100	setne	%al
101	movsbl	%al,%eax
102	ret
103END(bcmp)
104
105/*
106 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
107 *
108 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
109 */
110ENTRY(bcopy)
111	xchgq	%rsi,%rdi
112	movq	%rdx,%rcx
113
114	movq	%rdi,%rax
115	subq	%rsi,%rax
116	cmpq	%rcx,%rax			/* overlapping && src < dst? */
117	jb	1f
118
119	shrq	$3,%rcx				/* copy by 64-bit words */
120	cld					/* nope, copy forwards */
121	rep
122	movsq
123	movq	%rdx,%rcx
124	andq	$7,%rcx				/* any bytes left? */
125	rep
126	movsb
127	ret
128
129	ALIGN_TEXT
1301:
131	addq	%rcx,%rdi			/* copy backwards */
132	addq	%rcx,%rsi
133	decq	%rdi
134	decq	%rsi
135	andq	$7,%rcx				/* any fractional bytes? */
136	std
137	rep
138	movsb
139	movq	%rdx,%rcx			/* copy by 32-bit words */
140	shrq	$3,%rcx
141	subq	$7,%rsi
142	subq	$7,%rdi
143	rep
144	movsq
145	cld
146	ret
147END(bcopy)
148
149ENTRY(reset_dbregs)
150	movq	$0x200,%rax	/* the manual says that bit 10 must be set to 1 */
151	movq	%rax,%dr7	/* disable all breapoints first */
152	movq	$0,%rax
153	movq	%rax,%dr0
154	movq	%rax,%dr1
155	movq	%rax,%dr2
156	movq	%rax,%dr3
157	movq	%rax,%dr6
158	ret
159END(reset_dbregs)
160
161/*
162 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
163 *
164 * Note: memcpy does not support overlapping copies
165 */
166ENTRY(memcpy)
167	movq	%rdi,%r8
168	movq	%rdx,%rcx
169	shrq	$3,%rcx				/* copy by 64-bit words */
170	cld					/* copy forwards */
171	rep
172	movsq
173	movq	%rdx,%rcx
174	andq	$7,%rcx				/* any bytes left? */
175	rep
176	movsb
177	movq	%r8,%rax
178	ret
179END(memcpy)
180
181/* fillw(pat, base, cnt) */
182/*       %rdi,%rsi, %rdx */
183ENTRY(fillw)
184	movq	%rdi,%rax
185	movq	%rsi,%rdi
186	movq	%rdx,%rcx
187	cld
188	rep
189	stosw
190	ret
191END(fillw)
192
193/*****************************************************************************/
194/* copyout and fubyte family                                                 */
195/*****************************************************************************/
196/*
197 * Access user memory from inside the kernel. These routines should be
198 * the only places that do this.
199 *
200 * These routines set curpcb->onfault for the time they execute. When a
201 * protection violation occurs inside the functions, the trap handler
202 * returns to *curpcb->onfault instead of the function.
203 */
204
205/*
206 * std_copyout(from_kernel, to_user, len)  - MP SAFE
207 *         %rdi,        %rsi,    %rdx
208 */
209ENTRY(std_copyout)
210	movq	PCPU(curthread),%rax
211	movq	TD_PCB(%rax), %rax
212	movq	$copyout_fault,PCB_ONFAULT(%rax)
213	movq	%rsp,PCB_ONFAULT_SP(%rax)
214	testq	%rdx,%rdx			/* anything to do? */
215	jz	done_copyout
216
217	/*
218	 * Check explicitly for non-user addresses.  If 486 write protection
219	 * is being used, this check is essential because we are in kernel
220	 * mode so the h/w does not provide any protection against writing
221	 * kernel addresses.
222	 */
223
224	/*
225	 * First, prevent address wrapping.
226	 */
227	movq	%rsi,%rax
228	addq	%rdx,%rax
229	jc	copyout_fault
230/*
231 * XXX STOP USING VM_MAX_USER_ADDRESS.
232 * It is an end address, not a max, so every time it is used correctly it
233 * looks like there is an off by one error, and of course it caused an off
234 * by one error in several places.
235 */
236	movq	$VM_MAX_USER_ADDRESS,%rcx
237	cmpq	%rcx,%rax
238	ja	copyout_fault
239
240	xchgq	%rdi,%rsi
241	/* bcopy(%rsi, %rdi, %rdx) */
242	movq	%rdx,%rcx
243
244	shrq	$3,%rcx
245	cld
246	rep
247	movsq
248	movb	%dl,%cl
249	andb	$7,%cl
250	rep
251	movsb
252
253done_copyout:
254	xorl	%eax,%eax
255	movq	PCPU(curthread),%rdx
256	movq	TD_PCB(%rdx), %rdx
257	movq	%rax,PCB_ONFAULT(%rdx)
258	ret
259
260	ALIGN_TEXT
261copyout_fault:
262	movq	PCPU(curthread),%rdx
263	movq	TD_PCB(%rdx), %rdx
264	movq	$0,PCB_ONFAULT(%rdx)
265	movq	$EFAULT,%rax
266	ret
267END(std_copyout)
268
269/*
270 * std_copyin(from_user, to_kernel, len) - MP SAFE
271 *        %rdi,      %rsi,      %rdx
272 */
273ENTRY(std_copyin)
274	movq	PCPU(curthread),%rax
275	movq	TD_PCB(%rax), %rax
276	movq	$copyin_fault,PCB_ONFAULT(%rax)
277	movq	%rsp,PCB_ONFAULT_SP(%rax)
278	testq	%rdx,%rdx			/* anything to do? */
279	jz	done_copyin
280
281	/*
282	 * make sure address is valid
283	 */
284	movq	%rdi,%rax
285	addq	%rdx,%rax
286	jc	copyin_fault
287	movq	$VM_MAX_USER_ADDRESS,%rcx
288	cmpq	%rcx,%rax
289	ja	copyin_fault
290
291	xchgq	%rdi,%rsi
292	movq	%rdx,%rcx
293	movb	%cl,%al
294	shrq	$3,%rcx				/* copy longword-wise */
295	cld
296	rep
297	movsq
298	movb	%al,%cl
299	andb	$7,%cl				/* copy remaining bytes */
300	rep
301	movsb
302
303done_copyin:
304	xorl	%eax,%eax
305	movq	PCPU(curthread),%rdx
306	movq	TD_PCB(%rdx), %rdx
307	movq	%rax,PCB_ONFAULT(%rdx)
308	ret
309
310	ALIGN_TEXT
311copyin_fault:
312	movq	PCPU(curthread),%rdx
313	movq	TD_PCB(%rdx), %rdx
314	movq	$0,PCB_ONFAULT(%rdx)
315	movq	$EFAULT,%rax
316	ret
317END(std_copyin)
318
319/*
320 * casu32 - Compare and set user integer.  Returns -1 or the current value.
321 *          dst = %rdi, old = %rsi, new = %rdx
322 */
323ENTRY(casu32)
324	movq	PCPU(curthread),%rcx
325	movq	TD_PCB(%rcx), %rcx
326	movq	$fusufault,PCB_ONFAULT(%rcx)
327	movq	%rsp,PCB_ONFAULT_SP(%rcx)
328
329	movq	$VM_MAX_USER_ADDRESS-4,%rax
330	cmpq	%rax,%rdi			/* verify address is valid */
331	ja	fusufault
332
333	movl	%esi,%eax			/* old */
334	lock
335	cmpxchgl %edx,(%rdi)			/* new = %edx */
336
337	/*
338	 * The old value is in %eax.  If the store succeeded it will be the
339	 * value we expected (old) from before the store, otherwise it will
340	 * be the current value.
341	 */
342
343	movq	PCPU(curthread),%rcx
344	movq	TD_PCB(%rcx), %rcx
345	movq	$0,PCB_ONFAULT(%rcx)
346	ret
347END(casu32)
348
349/*
350 * swapu32 - Swap int in user space.  ptr = %rdi, val = %rsi
351 */
352ENTRY(std_swapu32)
353	movq	PCPU(curthread),%rcx
354	movq	TD_PCB(%rcx), %rcx
355	movq	$fusufault,PCB_ONFAULT(%rcx)
356	movq	%rsp,PCB_ONFAULT_SP(%rcx)
357
358	movq	$VM_MAX_USER_ADDRESS-4,%rax
359	cmpq	%rax,%rdi			/* verify address is valid */
360	ja	fusufault
361
362	movq	%rsi,%rax			/* old */
363	xchgl	%eax,(%rdi)
364
365	/*
366	 * The old value is in %rax.  If the store succeeded it will be the
367	 * value we expected (old) from before the store, otherwise it will
368	 * be the current value.
369	 */
370
371	movq	PCPU(curthread),%rcx
372	movq	TD_PCB(%rcx), %rcx
373	movq	$0,PCB_ONFAULT(%rcx)
374	ret
375END(std_swapu32)
376
377/*
378 * casu64 - Compare and set user word.  Returns -1 or the current value.
379 *          dst = %rdi, old = %rsi, new = %rdx
380 */
381ENTRY(casu64)
382	movq	PCPU(curthread),%rcx
383	movq	TD_PCB(%rcx), %rcx
384	movq	$fusufault,PCB_ONFAULT(%rcx)
385	movq	%rsp,PCB_ONFAULT_SP(%rcx)
386
387	movq	$VM_MAX_USER_ADDRESS-8,%rax
388	cmpq	%rax,%rdi			/* verify address is valid */
389	ja	fusufault
390
391	movq	%rsi,%rax			/* old */
392	lock
393	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
394
395	/*
396	 * The old value is in %rax.  If the store succeeded it will be the
397	 * value we expected (old) from before the store, otherwise it will
398	 * be the current value.
399	 */
400
401	movq	PCPU(curthread),%rcx
402	movq	TD_PCB(%rcx), %rcx
403	movq	$0,PCB_ONFAULT(%rcx)
404	ret
405END(casu64)
406
407/*
408 * swapu64 - Swap long in user space.  ptr = %rdi, val = %rsi
409 */
410ENTRY(std_swapu64)
411	movq	PCPU(curthread),%rcx
412	movq	TD_PCB(%rcx), %rcx
413	movq	$fusufault,PCB_ONFAULT(%rcx)
414	movq	%rsp,PCB_ONFAULT_SP(%rcx)
415
416	movq	$VM_MAX_USER_ADDRESS-8,%rax
417	cmpq	%rax,%rdi			/* verify address is valid */
418	ja	fusufault
419
420	movq	%rsi,%rax			/* old */
421	xchgq	%rax,(%rdi)
422
423	/*
424	 * The old value is in %rax.  If the store succeeded it will be the
425	 * value we expected (old) from before the store, otherwise it will
426	 * be the current value.
427	 */
428
429	movq	PCPU(curthread),%rcx
430	movq	TD_PCB(%rcx), %rcx
431	movq	$0,PCB_ONFAULT(%rcx)
432	ret
433END(std_swapu64)
434
435/*
436 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
437 * byte from user memory.  All these functions are MPSAFE.
438 * addr = %rdi
439 */
440
441ENTRY(std_fuword64)
442	movq	PCPU(curthread),%rcx
443	movq	TD_PCB(%rcx), %rcx
444	movq	$fusufault,PCB_ONFAULT(%rcx)
445	movq	%rsp,PCB_ONFAULT_SP(%rcx)
446
447	movq	$VM_MAX_USER_ADDRESS-8,%rax
448	cmpq	%rax,%rdi			/* verify address is valid */
449	ja	fusufault
450
451	movq	(%rdi),%rax
452	movq	$0,PCB_ONFAULT(%rcx)
453	ret
454END(std_fuword64)
455
456ENTRY(std_fuword32)
457	movq	PCPU(curthread),%rcx
458	movq	TD_PCB(%rcx), %rcx
459	movq	$fusufault,PCB_ONFAULT(%rcx)
460	movq	%rsp,PCB_ONFAULT_SP(%rcx)
461
462	movq	$VM_MAX_USER_ADDRESS-4,%rax
463	cmpq	%rax,%rdi			/* verify address is valid */
464	ja	fusufault
465
466	movl	(%rdi),%eax
467	movq	$0,PCB_ONFAULT(%rcx)
468	ret
469END(std_fuword32)
470
471ENTRY(std_fubyte)
472	movq	PCPU(curthread),%rcx
473	movq	TD_PCB(%rcx), %rcx
474	movq	$fusufault,PCB_ONFAULT(%rcx)
475	movq	%rsp,PCB_ONFAULT_SP(%rcx)
476
477	movq	$VM_MAX_USER_ADDRESS-1,%rax
478	cmpq	%rax,%rdi
479	ja	fusufault
480
481	movzbl	(%rdi),%eax
482	movq	$0,PCB_ONFAULT(%rcx)
483	ret
484
485	ALIGN_TEXT
486fusufault:
487	movq	PCPU(curthread),%rcx
488	xorl	%eax,%eax
489	movq	TD_PCB(%rcx), %rcx
490	movq	%rax,PCB_ONFAULT(%rcx)
491	decq	%rax
492	ret
493END(std_fubyte)
494
495/*
496 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
497 * user memory.  All these functions are MPSAFE.
498 *
499 * addr = %rdi, value = %rsi
500 *
501 * Write a long
502 */
503ENTRY(std_suword64)
504	movq	PCPU(curthread),%rcx
505	movq	TD_PCB(%rcx), %rcx
506	movq	$fusufault,PCB_ONFAULT(%rcx)
507	movq	%rsp,PCB_ONFAULT_SP(%rcx)
508
509	movq	$VM_MAX_USER_ADDRESS-8,%rax
510	cmpq	%rax,%rdi			/* verify address validity */
511	ja	fusufault
512
513	movq	%rsi,(%rdi)
514	xorl	%eax,%eax
515	movq	PCPU(curthread),%rcx
516	movq	TD_PCB(%rcx), %rcx
517	movq	%rax,PCB_ONFAULT(%rcx)
518	ret
519END(std_suword64)
520
521/*
522 * Write an int
523 */
524ENTRY(std_suword32)
525	movq	PCPU(curthread),%rcx
526	movq	TD_PCB(%rcx), %rcx
527	movq	$fusufault,PCB_ONFAULT(%rcx)
528	movq	%rsp,PCB_ONFAULT_SP(%rcx)
529
530	movq	$VM_MAX_USER_ADDRESS-4,%rax
531	cmpq	%rax,%rdi			/* verify address validity */
532	ja	fusufault
533
534	movl	%esi,(%rdi)
535	xorl	%eax,%eax
536	movq	PCPU(curthread),%rcx
537	movq	TD_PCB(%rcx), %rcx
538	movq	%rax,PCB_ONFAULT(%rcx)
539	ret
540END(std_suword32)
541
542ENTRY(std_subyte)
543	movq	PCPU(curthread),%rcx
544	movq	TD_PCB(%rcx), %rcx
545	movq	$fusufault,PCB_ONFAULT(%rcx)
546	movq	%rsp,PCB_ONFAULT_SP(%rcx)
547
548	movq	$VM_MAX_USER_ADDRESS-1,%rax
549	cmpq	%rax,%rdi			/* verify address validity */
550	ja	fusufault
551
552	movl	%esi,%eax
553	movb	%al,(%rdi)
554	xorl	%eax,%eax
555	movq	PCPU(curthread),%rcx		/* restore trashed register */
556	movq	TD_PCB(%rcx), %rcx
557	movq	%rax,PCB_ONFAULT(%rcx)
558	ret
559END(std_subyte)
560
561/*
562 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
563 *           %rdi, %rsi, %rdx, %rcx
564 *
565 *	copy a string from from to to, stop when a 0 character is reached.
566 *	return ENAMETOOLONG if string is longer than maxlen, and
567 *	EFAULT on protection violations. If lencopied is non-zero,
568 *	return the actual length in *lencopied.
569 */
570ENTRY(std_copyinstr)
571	movq	%rdx,%r8			/* %r8 = maxlen */
572	movq	%rcx,%r9			/* %r9 = *len */
573	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
574	movq	PCPU(curthread),%rcx
575	movq	TD_PCB(%rcx), %rcx
576	movq	$cpystrflt,PCB_ONFAULT(%rcx)
577	movq	%rsp,PCB_ONFAULT_SP(%rcx)
578
579	movq	$VM_MAX_USER_ADDRESS,%rax
580
581	/* make sure 'from' is within bounds */
582	subq	%rsi,%rax
583	jbe	cpystrflt
584
585	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
586	cmpq	%rdx,%rax
587	jae	1f
588	movq	%rax,%rdx
589	movq	%rax,%r8
5901:
591	incq	%rdx
592	cld
593
5942:
595	decq	%rdx
596	jz	3f
597
598	lodsb
599	stosb
600	orb	%al,%al
601	jnz	2b
602
603	/* Success -- 0 byte reached */
604	decq	%rdx
605	xorl	%eax,%eax
606	jmp	cpystrflt_x
6073:
608	/* rdx is zero - return ENAMETOOLONG or EFAULT */
609	movq	$VM_MAX_USER_ADDRESS,%rax
610	cmpq	%rax,%rsi
611	jae	cpystrflt
6124:
613	movq	$ENAMETOOLONG,%rax
614	jmp	cpystrflt_x
615
616cpystrflt:
617	movq	$EFAULT,%rax
618
619cpystrflt_x:
620	/* set *lencopied and return %eax */
621	movq	PCPU(curthread),%rcx
622	movq	TD_PCB(%rcx), %rcx
623	movq	$0,PCB_ONFAULT(%rcx)
624
625	testq	%r9,%r9
626	jz	1f
627	subq	%rdx,%r8
628	movq	%r8,(%r9)
6291:
630	ret
631END(std_copyinstr)
632
633/*
634 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
635 *         %rdi, %rsi, %rdx, %rcx
636 */
637ENTRY(copystr)
638	movq	%rdx,%r8			/* %r8 = maxlen */
639
640	xchgq	%rdi,%rsi
641	incq	%rdx
642	cld
6431:
644	decq	%rdx
645	jz	4f
646	lodsb
647	stosb
648	orb	%al,%al
649	jnz	1b
650
651	/* Success -- 0 byte reached */
652	decq	%rdx
653	xorl	%eax,%eax
654	jmp	6f
6554:
656	/* rdx is zero -- return ENAMETOOLONG */
657	movq	$ENAMETOOLONG,%rax
658
6596:
660
661	testq	%rcx,%rcx
662	jz	7f
663	/* set *lencopied and return %rax */
664	subq	%rdx,%r8
665	movq	%r8,(%rcx)
6667:
667	ret
668END(copystr)
669
670/*
671 * Handling of special x86_64 registers and descriptor tables etc
672 * %rdi
673 */
674/* void lgdt(struct region_descriptor *rdp); */
675ENTRY(lgdt)
676	/* reload the descriptor table */
677	lgdt	(%rdi)
678
679	/* flush the prefetch q */
680	jmp	1f
681	nop
6821:
683	movl	$KDSEL,%eax
684	movl	%eax,%ds
685	movl	%eax,%es
686	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
687	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
688	movl	%eax,%ss
689
690	/* reload code selector by turning return into intersegmental return */
691	popq	%rax
692	pushq	$KCSEL
693	pushq	%rax
694	MEXITCOUNT
695	lretq
696END(lgdt)
697
698/*****************************************************************************/
699/* setjmp, longjmp                                                           */
700/*****************************************************************************/
701
702ENTRY(setjmp)
703	movq	%rbx,0(%rdi)			/* save rbx */
704	movq	%rsp,8(%rdi)			/* save rsp */
705	movq	%rbp,16(%rdi)			/* save rbp */
706	movq	%r12,24(%rdi)			/* save r12 */
707	movq	%r13,32(%rdi)			/* save r13 */
708	movq	%r14,40(%rdi)			/* save r14 */
709	movq	%r15,48(%rdi)			/* save r15 */
710	movq	0(%rsp),%rdx			/* get rta */
711	movq	%rdx,56(%rdi)			/* save rip */
712	xorl	%eax,%eax			/* return(0); */
713	ret
714END(setjmp)
715
716ENTRY(longjmp)
717	movq	0(%rdi),%rbx			/* restore rbx */
718	movq	8(%rdi),%rsp			/* restore rsp */
719	movq	16(%rdi),%rbp			/* restore rbp */
720	movq	24(%rdi),%r12			/* restore r12 */
721	movq	32(%rdi),%r13			/* restore r13 */
722	movq	40(%rdi),%r14			/* restore r14 */
723	movq	48(%rdi),%r15			/* restore r15 */
724	movq	56(%rdi),%rdx			/* get rta */
725	movq	%rdx,0(%rsp)			/* put in return frame */
726	xorl	%eax,%eax			/* return(1); */
727	incl	%eax
728	ret
729END(longjmp)
730
731/*
732 * Support for reading MSRs in the safe manner.
733 */
734ENTRY(rdmsr_safe)
735/* int rdmsr_safe(u_int msr, uint64_t *data) */
736	movq	PCPU(curthread),%r8
737	movq	TD_PCB(%r8), %r8
738	movq	$msr_onfault,PCB_ONFAULT(%r8)
739	movq	%rsp,PCB_ONFAULT_SP(%r8)
740	movl	%edi,%ecx
741	rdmsr			/* Read MSR pointed by %ecx. Returns
742				   hi byte in edx, lo in %eax */
743	salq	$32,%rdx	/* sign-shift %rdx left */
744	movl	%eax,%eax	/* zero-extend %eax -> %rax */
745	orq	%rdx,%rax
746	movq	%rax,(%rsi)
747	xorq	%rax,%rax
748	movq	%rax,PCB_ONFAULT(%r8)
749	ret
750END(rdmsr_safe)
751
752/*
753 * Support for writing MSRs in the safe manner.
754 */
755ENTRY(wrmsr_safe)
756/* int wrmsr_safe(u_int msr, uint64_t data) */
757	movq	PCPU(curthread),%r8
758	movq	TD_PCB(%r8), %r8
759	movq	$msr_onfault,PCB_ONFAULT(%r8)
760	movq	%rsp,PCB_ONFAULT_SP(%r8)
761	movl	%edi,%ecx
762	movl	%esi,%eax
763	sarq	$32,%rsi
764	movl	%esi,%edx
765	wrmsr			/* Write MSR pointed by %ecx. Accepts
766				   hi byte in edx, lo in %eax. */
767	xorq	%rax,%rax
768	movq	%rax,PCB_ONFAULT(%r8)
769	ret
770END(wrmsr_safe)
771
772/*
773 * MSR operations fault handler
774 */
775	ALIGN_TEXT
776msr_onfault:
777	movq	PCPU(curthread),%r8
778	movq	TD_PCB(%r8), %r8
779	movq	$0,PCB_ONFAULT(%r8)
780	movl	$EFAULT,%eax
781	ret
782