xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 3948dfa0)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include <machine/asmacros.h>
35#include <machine/pmap.h>
36
37#include "assym.s"
38
39	ALIGN_DATA
40
41	.text
42
43/*
44 * bzero(ptr:%rdi, bytes:%rsi)
45 *
46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as
47 * a %xmm0 loop on a modern intel cpu.
48 *
49 * Do not use non-termportal instructions here as we do not know the caller's
50 * intent.
51 */
52ENTRY(bzero)
53	movq	%rsi,%rcx
54	xorl	%eax,%eax
55	shrq	$3,%rcx
56	cld
57	rep
58	stosq
59	movq	%rsi,%rcx
60	andq	$7,%rcx
61	rep
62	stosb
63	ret
64END(bzero)
65
66/*
67 * pagezero(ptr:%rdi)
68 *
69 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu,
70 * and about 70% faster than a %rax loop.
71 *
72 * Do not use non-termportal instructions here as we do not know the caller's
73 * intent.
74 */
75ENTRY(pagezero)
76	movq	$PAGE_SIZE>>3,%rcx
77	xorl	%eax,%eax
78	cld
79	rep
80	stosq
81	ret
82END(pagezero)
83
84/*
85 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx)
86 */
87ENTRY(bcmp)
88	movq	%rdx,%rcx
89	shrq	$3,%rcx
90	cld					/* compare forwards */
91	repe
92	cmpsq
93	jne	1f
94
95	movq	%rdx,%rcx
96	andq	$7,%rcx
97	repe
98	cmpsb
991:
100	setne	%al
101	movsbl	%al,%eax
102	ret
103END(bcmp)
104
105/*
106 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx)
107 *
108 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
109 */
110ENTRY(bcopy)
111	xchgq	%rsi,%rdi
112	movq	%rdx,%rcx
113
114	movq	%rdi,%rax
115	subq	%rsi,%rax
116	cmpq	%rcx,%rax			/* overlapping && src < dst? */
117	jb	1f
118
119	shrq	$3,%rcx				/* copy by 64-bit words */
120	cld					/* nope, copy forwards */
121	rep
122	movsq
123	movq	%rdx,%rcx
124	andq	$7,%rcx				/* any bytes left? */
125	rep
126	movsb
127	ret
128
129	ALIGN_TEXT
1301:
131	addq	%rcx,%rdi			/* copy backwards */
132	addq	%rcx,%rsi
133	decq	%rdi
134	decq	%rsi
135	andq	$7,%rcx				/* any fractional bytes? */
136	std
137	rep
138	movsb
139	movq	%rdx,%rcx			/* copy by 32-bit words */
140	shrq	$3,%rcx
141	subq	$7,%rsi
142	subq	$7,%rdi
143	rep
144	movsq
145	cld
146	ret
147END(bcopy)
148
149ENTRY(reset_dbregs)
150	movq	$0x200,%rax	/* the manual says that bit 10 must be set to 1 */
151	movq	%rax,%dr7	/* disable all breapoints first */
152	movq	$0,%rax
153	movq	%rax,%dr0
154	movq	%rax,%dr1
155	movq	%rax,%dr2
156	movq	%rax,%dr3
157	movq	%rax,%dr6
158	ret
159END(reset_dbregs)
160
161/*
162 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx)
163 *
164 * Note: memcpy does not support overlapping copies
165 */
166ENTRY(memcpy)
167	movq	%rdi,%r8
168	movq	%rdx,%rcx
169	shrq	$3,%rcx				/* copy by 64-bit words */
170	cld					/* copy forwards */
171	rep
172	movsq
173	movq	%rdx,%rcx
174	andq	$7,%rcx				/* any bytes left? */
175	rep
176	movsb
177	movq	%r8,%rax
178	ret
179END(memcpy)
180
181/* fillw(pat, base, cnt) */
182/*       %rdi,%rsi, %rdx */
183ENTRY(fillw)
184	movq	%rdi,%rax
185	movq	%rsi,%rdi
186	movq	%rdx,%rcx
187	cld
188	rep
189	stosw
190	ret
191END(fillw)
192
193/*****************************************************************************/
194/* copyout and fubyte family                                                 */
195/*****************************************************************************/
196/*
197 * Access user memory from inside the kernel. These routines should be
198 * the only places that do this.
199 *
200 * These routines set curpcb->onfault for the time they execute. When a
201 * protection violation occurs inside the functions, the trap handler
202 * returns to *curpcb->onfault instead of the function.
203 */
204
205/*
206 * uint64_t:%rax kreadmem64(addr:%rdi)
207 *
208 * Read kernel or user memory with fault protection.
209 */
210ENTRY(kreadmem64)
211	movq	PCPU(curthread),%rcx
212	movq	TD_PCB(%rcx), %rcx
213	movq	$kreadmem64fault,PCB_ONFAULT(%rcx)
214	movq	%rsp,PCB_ONFAULT_SP(%rcx)
215
216	movq	(%rdi),%rax
217	movq	$0,PCB_ONFAULT(%rcx)
218	ret
219
220kreadmem64fault:
221	movq	PCPU(curthread),%rcx
222	xorl	%eax,%eax
223	movq	TD_PCB(%rcx),%rcx
224	movq	%rax,PCB_ONFAULT(%rcx)
225	decq	%rax
226	ret
227END(kreadmem64)
228
229/*
230 * std_copyout(from_kernel, to_user, len)  - MP SAFE
231 *         %rdi,        %rsi,    %rdx
232 */
233ENTRY(std_copyout)
234	movq	PCPU(curthread),%rax
235	movq	TD_PCB(%rax), %rax
236	movq	$copyout_fault,PCB_ONFAULT(%rax)
237	movq	%rsp,PCB_ONFAULT_SP(%rax)
238	testq	%rdx,%rdx			/* anything to do? */
239	jz	done_copyout
240
241	/*
242	 * Check explicitly for non-user addresses.  If 486 write protection
243	 * is being used, this check is essential because we are in kernel
244	 * mode so the h/w does not provide any protection against writing
245	 * kernel addresses.
246	 */
247
248	/*
249	 * First, prevent address wrapping.
250	 */
251	movq	%rsi,%rax
252	addq	%rdx,%rax
253	jc	copyout_fault
254/*
255 * XXX STOP USING VM_MAX_USER_ADDRESS.
256 * It is an end address, not a max, so every time it is used correctly it
257 * looks like there is an off by one error, and of course it caused an off
258 * by one error in several places.
259 */
260	movq	$VM_MAX_USER_ADDRESS,%rcx
261	cmpq	%rcx,%rax
262	ja	copyout_fault
263
264	xchgq	%rdi,%rsi
265	/* bcopy(%rsi, %rdi, %rdx) */
266	movq	%rdx,%rcx
267
268	shrq	$3,%rcx
269	cld
270	rep
271	movsq
272	movb	%dl,%cl
273	andb	$7,%cl
274	rep
275	movsb
276
277done_copyout:
278	xorl	%eax,%eax
279	movq	PCPU(curthread),%rdx
280	movq	TD_PCB(%rdx), %rdx
281	movq	%rax,PCB_ONFAULT(%rdx)
282	ret
283
284	ALIGN_TEXT
285copyout_fault:
286	movq	PCPU(curthread),%rdx
287	movq	TD_PCB(%rdx), %rdx
288	movq	$0,PCB_ONFAULT(%rdx)
289	movq	$EFAULT,%rax
290	ret
291END(std_copyout)
292
293/*
294 * std_copyin(from_user, to_kernel, len) - MP SAFE
295 *        %rdi,      %rsi,      %rdx
296 */
297ENTRY(std_copyin)
298	movq	PCPU(curthread),%rax
299	movq	TD_PCB(%rax), %rax
300	movq	$copyin_fault,PCB_ONFAULT(%rax)
301	movq	%rsp,PCB_ONFAULT_SP(%rax)
302	testq	%rdx,%rdx			/* anything to do? */
303	jz	done_copyin
304
305	/*
306	 * make sure address is valid
307	 */
308	movq	%rdi,%rax
309	addq	%rdx,%rax
310	jc	copyin_fault
311	movq	$VM_MAX_USER_ADDRESS,%rcx
312	cmpq	%rcx,%rax
313	ja	copyin_fault
314
315	xchgq	%rdi,%rsi
316	movq	%rdx,%rcx
317	movb	%cl,%al
318	shrq	$3,%rcx				/* copy longword-wise */
319	cld
320	rep
321	movsq
322	movb	%al,%cl
323	andb	$7,%cl				/* copy remaining bytes */
324	rep
325	movsb
326
327done_copyin:
328	xorl	%eax,%eax
329	movq	PCPU(curthread),%rdx
330	movq	TD_PCB(%rdx), %rdx
331	movq	%rax,PCB_ONFAULT(%rdx)
332	ret
333
334	ALIGN_TEXT
335copyin_fault:
336	movq	PCPU(curthread),%rdx
337	movq	TD_PCB(%rdx), %rdx
338	movq	$0,PCB_ONFAULT(%rdx)
339	movq	$EFAULT,%rax
340	ret
341END(std_copyin)
342
343/*
344 * casu32 - Compare and set user integer.  Returns -1 or the current value.
345 *          dst = %rdi, old = %rsi, new = %rdx
346 */
347ENTRY(casu32)
348	movq	PCPU(curthread),%rcx
349	movq	TD_PCB(%rcx), %rcx
350	movq	$fusufault,PCB_ONFAULT(%rcx)
351	movq	%rsp,PCB_ONFAULT_SP(%rcx)
352
353	movq	$VM_MAX_USER_ADDRESS-4,%rax
354	cmpq	%rax,%rdi			/* verify address is valid */
355	ja	fusufault
356
357	movl	%esi,%eax			/* old */
358	lock
359	cmpxchgl %edx,(%rdi)			/* new = %edx */
360
361	/*
362	 * The old value is in %eax.  If the store succeeded it will be the
363	 * value we expected (old) from before the store, otherwise it will
364	 * be the current value.
365	 */
366
367	movq	PCPU(curthread),%rcx
368	movq	TD_PCB(%rcx), %rcx
369	movq	$0,PCB_ONFAULT(%rcx)
370	ret
371END(casu32)
372
373/*
374 * swapu32 - Swap int in user space.  ptr = %rdi, val = %rsi
375 */
376ENTRY(std_swapu32)
377	movq	PCPU(curthread),%rcx
378	movq	TD_PCB(%rcx), %rcx
379	movq	$fusufault,PCB_ONFAULT(%rcx)
380	movq	%rsp,PCB_ONFAULT_SP(%rcx)
381
382	movq	$VM_MAX_USER_ADDRESS-4,%rax
383	cmpq	%rax,%rdi			/* verify address is valid */
384	ja	fusufault
385
386	movq	%rsi,%rax			/* old */
387	xchgl	%eax,(%rdi)
388
389	/*
390	 * The old value is in %rax.  If the store succeeded it will be the
391	 * value we expected (old) from before the store, otherwise it will
392	 * be the current value.
393	 */
394
395	movq	PCPU(curthread),%rcx
396	movq	TD_PCB(%rcx), %rcx
397	movq	$0,PCB_ONFAULT(%rcx)
398	ret
399END(std_swapu32)
400
401/*
402 * casu64 - Compare and set user word.  Returns -1 or the current value.
403 *          dst = %rdi, old = %rsi, new = %rdx
404 */
405ENTRY(casu64)
406	movq	PCPU(curthread),%rcx
407	movq	TD_PCB(%rcx), %rcx
408	movq	$fusufault,PCB_ONFAULT(%rcx)
409	movq	%rsp,PCB_ONFAULT_SP(%rcx)
410
411	movq	$VM_MAX_USER_ADDRESS-8,%rax
412	cmpq	%rax,%rdi			/* verify address is valid */
413	ja	fusufault
414
415	movq	%rsi,%rax			/* old */
416	lock
417	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
418
419	/*
420	 * The old value is in %rax.  If the store succeeded it will be the
421	 * value we expected (old) from before the store, otherwise it will
422	 * be the current value.
423	 */
424
425	movq	PCPU(curthread),%rcx
426	movq	TD_PCB(%rcx), %rcx
427	movq	$0,PCB_ONFAULT(%rcx)
428	ret
429END(casu64)
430
431/*
432 * swapu64 - Swap long in user space.  ptr = %rdi, val = %rsi
433 */
434ENTRY(std_swapu64)
435	movq	PCPU(curthread),%rcx
436	movq	TD_PCB(%rcx), %rcx
437	movq	$fusufault,PCB_ONFAULT(%rcx)
438	movq	%rsp,PCB_ONFAULT_SP(%rcx)
439
440	movq	$VM_MAX_USER_ADDRESS-8,%rax
441	cmpq	%rax,%rdi			/* verify address is valid */
442	ja	fusufault
443
444	movq	%rsi,%rax			/* old */
445	xchgq	%rax,(%rdi)
446
447	/*
448	 * The old value is in %rax.  If the store succeeded it will be the
449	 * value we expected (old) from before the store, otherwise it will
450	 * be the current value.
451	 */
452
453	movq	PCPU(curthread),%rcx
454	movq	TD_PCB(%rcx), %rcx
455	movq	$0,PCB_ONFAULT(%rcx)
456	ret
457END(std_swapu64)
458
459/*
460 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
461 * byte from user memory.  All these functions are MPSAFE.
462 * addr = %rdi
463 */
464
465ENTRY(std_fuword64)
466	movq	PCPU(curthread),%rcx
467	movq	TD_PCB(%rcx), %rcx
468	movq	$fusufault,PCB_ONFAULT(%rcx)
469	movq	%rsp,PCB_ONFAULT_SP(%rcx)
470
471	movq	$VM_MAX_USER_ADDRESS-8,%rax
472	cmpq	%rax,%rdi			/* verify address is valid */
473	ja	fusufault
474
475	movq	(%rdi),%rax
476	movq	$0,PCB_ONFAULT(%rcx)
477	ret
478END(std_fuword64)
479
480ENTRY(std_fuword32)
481	movq	PCPU(curthread),%rcx
482	movq	TD_PCB(%rcx), %rcx
483	movq	$fusufault,PCB_ONFAULT(%rcx)
484	movq	%rsp,PCB_ONFAULT_SP(%rcx)
485
486	movq	$VM_MAX_USER_ADDRESS-4,%rax
487	cmpq	%rax,%rdi			/* verify address is valid */
488	ja	fusufault
489
490	movl	(%rdi),%eax
491	movq	$0,PCB_ONFAULT(%rcx)
492	ret
493END(std_fuword32)
494
495ENTRY(std_fubyte)
496	movq	PCPU(curthread),%rcx
497	movq	TD_PCB(%rcx), %rcx
498	movq	$fusufault,PCB_ONFAULT(%rcx)
499	movq	%rsp,PCB_ONFAULT_SP(%rcx)
500
501	movq	$VM_MAX_USER_ADDRESS-1,%rax
502	cmpq	%rax,%rdi
503	ja	fusufault
504
505	movzbl	(%rdi),%eax
506	movq	$0,PCB_ONFAULT(%rcx)
507	ret
508
509	ALIGN_TEXT
510fusufault:
511	movq	PCPU(curthread),%rcx
512	xorl	%eax,%eax
513	movq	TD_PCB(%rcx), %rcx
514	movq	%rax,PCB_ONFAULT(%rcx)
515	decq	%rax
516	ret
517END(std_fubyte)
518
519/*
520 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
521 * user memory.  All these functions are MPSAFE.
522 *
523 * addr = %rdi, value = %rsi
524 *
525 * Write a long
526 */
527ENTRY(std_suword64)
528	movq	PCPU(curthread),%rcx
529	movq	TD_PCB(%rcx), %rcx
530	movq	$fusufault,PCB_ONFAULT(%rcx)
531	movq	%rsp,PCB_ONFAULT_SP(%rcx)
532
533	movq	$VM_MAX_USER_ADDRESS-8,%rax
534	cmpq	%rax,%rdi			/* verify address validity */
535	ja	fusufault
536
537	movq	%rsi,(%rdi)
538	xorl	%eax,%eax
539	movq	PCPU(curthread),%rcx
540	movq	TD_PCB(%rcx), %rcx
541	movq	%rax,PCB_ONFAULT(%rcx)
542	ret
543END(std_suword64)
544
545/*
546 * Write an int
547 */
548ENTRY(std_suword32)
549	movq	PCPU(curthread),%rcx
550	movq	TD_PCB(%rcx), %rcx
551	movq	$fusufault,PCB_ONFAULT(%rcx)
552	movq	%rsp,PCB_ONFAULT_SP(%rcx)
553
554	movq	$VM_MAX_USER_ADDRESS-4,%rax
555	cmpq	%rax,%rdi			/* verify address validity */
556	ja	fusufault
557
558	movl	%esi,(%rdi)
559	xorl	%eax,%eax
560	movq	PCPU(curthread),%rcx
561	movq	TD_PCB(%rcx), %rcx
562	movq	%rax,PCB_ONFAULT(%rcx)
563	ret
564END(std_suword32)
565
566ENTRY(std_subyte)
567	movq	PCPU(curthread),%rcx
568	movq	TD_PCB(%rcx), %rcx
569	movq	$fusufault,PCB_ONFAULT(%rcx)
570	movq	%rsp,PCB_ONFAULT_SP(%rcx)
571
572	movq	$VM_MAX_USER_ADDRESS-1,%rax
573	cmpq	%rax,%rdi			/* verify address validity */
574	ja	fusufault
575
576	movl	%esi,%eax
577	movb	%al,(%rdi)
578	xorl	%eax,%eax
579	movq	PCPU(curthread),%rcx		/* restore trashed register */
580	movq	TD_PCB(%rcx), %rcx
581	movq	%rax,PCB_ONFAULT(%rcx)
582	ret
583END(std_subyte)
584
585/*
586 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
587 *           %rdi, %rsi, %rdx, %rcx
588 *
589 *	copy a string from from to to, stop when a 0 character is reached.
590 *	return ENAMETOOLONG if string is longer than maxlen, and
591 *	EFAULT on protection violations. If lencopied is non-zero,
592 *	return the actual length in *lencopied.
593 */
594ENTRY(std_copyinstr)
595	movq	%rdx,%r8			/* %r8 = maxlen */
596	movq	%rcx,%r9			/* %r9 = *len */
597	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
598	movq	PCPU(curthread),%rcx
599	movq	TD_PCB(%rcx), %rcx
600	movq	$cpystrflt,PCB_ONFAULT(%rcx)
601	movq	%rsp,PCB_ONFAULT_SP(%rcx)
602
603	movq	$VM_MAX_USER_ADDRESS,%rax
604
605	/* make sure 'from' is within bounds */
606	subq	%rsi,%rax
607	jbe	cpystrflt
608
609	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
610	cmpq	%rdx,%rax
611	jae	1f
612	movq	%rax,%rdx
613	movq	%rax,%r8
6141:
615	incq	%rdx
616	cld
617
6182:
619	decq	%rdx
620	jz	3f
621
622	lodsb
623	stosb
624	orb	%al,%al
625	jnz	2b
626
627	/* Success -- 0 byte reached */
628	decq	%rdx
629	xorl	%eax,%eax
630	jmp	cpystrflt_x
6313:
632	/* rdx is zero - return ENAMETOOLONG or EFAULT */
633	movq	$VM_MAX_USER_ADDRESS,%rax
634	cmpq	%rax,%rsi
635	jae	cpystrflt
6364:
637	movq	$ENAMETOOLONG,%rax
638	jmp	cpystrflt_x
639
640cpystrflt:
641	movq	$EFAULT,%rax
642
643cpystrflt_x:
644	/* set *lencopied and return %eax */
645	movq	PCPU(curthread),%rcx
646	movq	TD_PCB(%rcx), %rcx
647	movq	$0,PCB_ONFAULT(%rcx)
648
649	testq	%r9,%r9
650	jz	1f
651	subq	%rdx,%r8
652	movq	%r8,(%r9)
6531:
654	ret
655END(std_copyinstr)
656
657/*
658 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
659 *         %rdi, %rsi, %rdx, %rcx
660 */
661ENTRY(copystr)
662	movq	%rdx,%r8			/* %r8 = maxlen */
663
664	xchgq	%rdi,%rsi
665	incq	%rdx
666	cld
6671:
668	decq	%rdx
669	jz	4f
670	lodsb
671	stosb
672	orb	%al,%al
673	jnz	1b
674
675	/* Success -- 0 byte reached */
676	decq	%rdx
677	xorl	%eax,%eax
678	jmp	6f
6794:
680	/* rdx is zero -- return ENAMETOOLONG */
681	movq	$ENAMETOOLONG,%rax
682
6836:
684
685	testq	%rcx,%rcx
686	jz	7f
687	/* set *lencopied and return %rax */
688	subq	%rdx,%r8
689	movq	%r8,(%rcx)
6907:
691	ret
692END(copystr)
693
694/*
695 * Handling of special x86_64 registers and descriptor tables etc
696 * %rdi
697 */
698/* void lgdt(struct region_descriptor *rdp); */
699ENTRY(lgdt)
700	/* reload the descriptor table */
701	lgdt	(%rdi)
702
703	/* flush the prefetch q */
704	jmp	1f
705	nop
7061:
707	movl	$KDSEL,%eax
708	movl	%eax,%ds
709	movl	%eax,%es
710	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
711	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
712	movl	%eax,%ss
713
714	/* reload code selector by turning return into intersegmental return */
715	popq	%rax
716	pushq	$KCSEL
717	pushq	%rax
718	MEXITCOUNT
719	lretq
720END(lgdt)
721
722/*****************************************************************************/
723/* setjmp, longjmp                                                           */
724/*****************************************************************************/
725
726ENTRY(setjmp)
727	movq	%rbx,0(%rdi)			/* save rbx */
728	movq	%rsp,8(%rdi)			/* save rsp */
729	movq	%rbp,16(%rdi)			/* save rbp */
730	movq	%r12,24(%rdi)			/* save r12 */
731	movq	%r13,32(%rdi)			/* save r13 */
732	movq	%r14,40(%rdi)			/* save r14 */
733	movq	%r15,48(%rdi)			/* save r15 */
734	movq	0(%rsp),%rdx			/* get rta */
735	movq	%rdx,56(%rdi)			/* save rip */
736	xorl	%eax,%eax			/* return(0); */
737	ret
738END(setjmp)
739
740ENTRY(longjmp)
741	movq	0(%rdi),%rbx			/* restore rbx */
742	movq	8(%rdi),%rsp			/* restore rsp */
743	movq	16(%rdi),%rbp			/* restore rbp */
744	movq	24(%rdi),%r12			/* restore r12 */
745	movq	32(%rdi),%r13			/* restore r13 */
746	movq	40(%rdi),%r14			/* restore r14 */
747	movq	48(%rdi),%r15			/* restore r15 */
748	movq	56(%rdi),%rdx			/* get rta */
749	movq	%rdx,0(%rsp)			/* put in return frame */
750	xorl	%eax,%eax			/* return(1); */
751	incl	%eax
752	ret
753END(longjmp)
754
755/*
756 * Support for reading MSRs in the safe manner.
757 */
758ENTRY(rdmsr_safe)
759/* int rdmsr_safe(u_int msr, uint64_t *data) */
760	movq	PCPU(curthread),%r8
761	movq	TD_PCB(%r8), %r8
762	movq	$msr_onfault,PCB_ONFAULT(%r8)
763	movq	%rsp,PCB_ONFAULT_SP(%r8)
764	movl	%edi,%ecx
765	rdmsr			/* Read MSR pointed by %ecx. Returns
766				   hi byte in edx, lo in %eax */
767	salq	$32,%rdx	/* sign-shift %rdx left */
768	movl	%eax,%eax	/* zero-extend %eax -> %rax */
769	orq	%rdx,%rax
770	movq	%rax,(%rsi)
771	xorq	%rax,%rax
772	movq	%rax,PCB_ONFAULT(%r8)
773	ret
774END(rdmsr_safe)
775
776/*
777 * Support for writing MSRs in the safe manner.
778 */
779ENTRY(wrmsr_safe)
780/* int wrmsr_safe(u_int msr, uint64_t data) */
781	movq	PCPU(curthread),%r8
782	movq	TD_PCB(%r8), %r8
783	movq	$msr_onfault,PCB_ONFAULT(%r8)
784	movq	%rsp,PCB_ONFAULT_SP(%r8)
785	movl	%edi,%ecx
786	movl	%esi,%eax
787	sarq	$32,%rsi
788	movl	%esi,%edx
789	wrmsr			/* Write MSR pointed by %ecx. Accepts
790				   hi byte in edx, lo in %eax. */
791	xorq	%rax,%rax
792	movq	%rax,PCB_ONFAULT(%r8)
793	ret
794END(wrmsr_safe)
795
796/*
797 * MSR operations fault handler
798 */
799	ALIGN_TEXT
800msr_onfault:
801	movq	PCPU(curthread),%r8
802	movq	TD_PCB(%r8), %r8
803	movq	$0,PCB_ONFAULT(%r8)
804	movl	$EFAULT,%eax
805	ret
806