xref: /dragonfly/sys/platform/pc64/x86_64/support.s (revision 684cb317)
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $
32 */
33
34#include <machine/asmacros.h>
35#include <machine/pmap.h>
36
37#include "assym.s"
38
39	ALIGN_DATA
40
41	.text
42
43/*
44 * bcopy family
45 * void bzero(void *buf, size_t len)
46 */
47
48/* done */
49ENTRY(bzero)
50	movq	%rsi,%rcx
51	xorl	%eax,%eax
52	shrq	$3,%rcx
53	cld
54	rep
55	stosq
56	movq	%rsi,%rcx
57	andq	$7,%rcx
58	rep
59	stosb
60	ret
61
62/* Address: %rdi */
63ENTRY(pagezero)
64	movq	$-PAGE_SIZE,%rdx
65	subq	%rdx,%rdi
66	xorl	%eax,%eax
671:
68	movq	%rax,(%rdi,%rdx)	/* movnti */
69	movq	%rax,8(%rdi,%rdx)	/* movnti */
70	movq	%rax,16(%rdi,%rdx)	/* movnti */
71	movq	%rax,24(%rdi,%rdx)	/* movnti */
72	addq	$32,%rdx
73	jne	1b
74	/*sfence*/
75	ret
76
77ENTRY(bcmp)
78	movq	%rdx,%rcx
79	shrq	$3,%rcx
80	cld					/* compare forwards */
81	repe
82	cmpsq
83	jne	1f
84
85	movq	%rdx,%rcx
86	andq	$7,%rcx
87	repe
88	cmpsb
891:
90	setne	%al
91	movsbl	%al,%eax
92	ret
93
94/*
95 * bcopy(src, dst, cnt)
96 *       rdi, rsi, rdx
97 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
98 */
99ENTRY(generic_bcopy)	/* generic_bcopy is bcopy without FPU */
100ENTRY(ovbcopy) /* our bcopy doesn't use the FPU, so ovbcopy is the same */
101ENTRY(bcopy)
102	xchgq	%rsi,%rdi
103	movq	%rdx,%rcx
104
105	movq	%rdi,%rax
106	subq	%rsi,%rax
107	cmpq	%rcx,%rax			/* overlapping && src < dst? */
108	jb	1f
109
110	shrq	$3,%rcx				/* copy by 64-bit words */
111	cld					/* nope, copy forwards */
112	rep
113	movsq
114	movq	%rdx,%rcx
115	andq	$7,%rcx				/* any bytes left? */
116	rep
117	movsb
118	ret
119
120	/* ALIGN_TEXT */
1211:
122	addq	%rcx,%rdi			/* copy backwards */
123	addq	%rcx,%rsi
124	decq	%rdi
125	decq	%rsi
126	andq	$7,%rcx				/* any fractional bytes? */
127	std
128	rep
129	movsb
130	movq	%rdx,%rcx			/* copy remainder by 32-bit words */
131	shrq	$3,%rcx
132	subq	$7,%rsi
133	subq	$7,%rdi
134	rep
135	movsq
136	cld
137	ret
138ENTRY(reset_dbregs)
139	movq	$0x200,%rax   /* the manual says that bit 10 must be set to 1 */
140	movq    %rax,%dr7     /* disable all breapoints first */
141	movq    $0,%rax
142	movq    %rax,%dr0
143	movq    %rax,%dr1
144	movq    %rax,%dr2
145	movq    %rax,%dr3
146	movq    %rax,%dr6
147	ret
148
149/*
150 * Note: memcpy does not support overlapping copies
151 */
152ENTRY(memcpy)
153	movq	%rdx,%rcx
154	shrq	$3,%rcx				/* copy by 64-bit words */
155	cld					/* copy forwards */
156	rep
157	movsq
158	movq	%rdx,%rcx
159	andq	$7,%rcx				/* any bytes left? */
160	rep
161	movsb
162	ret
163
164/*
165 * pagecopy(%rdi=from, %rsi=to)
166 */
167ENTRY(pagecopy)
168	movq	$-PAGE_SIZE,%rax
169	movq	%rax,%rdx
170	subq	%rax,%rdi
171	subq	%rax,%rsi
1721:
173	/*prefetchnta (%rdi,%rax)*/
174	/*addq	$64,%rax*/
175	/*jne	1b*/
1762:
177	movq	(%rdi,%rdx),%rax
178	movq	%rax,(%rsi,%rdx)	/* movnti */
179	movq	8(%rdi,%rdx),%rax
180	movq	%rax,8(%rsi,%rdx)	/* movnti */
181	movq	16(%rdi,%rdx),%rax
182	movq	%rax,16(%rsi,%rdx)	/* movnti */
183	movq	24(%rdi,%rdx),%rax
184	movq	%rax,24(%rsi,%rdx)	/* movnti */
185	addq	$32,%rdx
186	jne	2b
187	/*sfence*/
188	ret
189
190/* fillw(pat, base, cnt) */
191/*       %rdi,%rsi, %rdx */
192ENTRY(fillw)
193	movq	%rdi,%rax
194	movq	%rsi,%rdi
195	movq	%rdx,%rcx
196	cld
197	rep
198	stosw
199	ret
200
201/*****************************************************************************/
202/* copyout and fubyte family                                                 */
203/*****************************************************************************/
204/*
205 * Access user memory from inside the kernel. These routines should be
206 * the only places that do this.
207 *
208 * These routines set curpcb->onfault for the time they execute. When a
209 * protection violation occurs inside the functions, the trap handler
210 * returns to *curpcb->onfault instead of the function.
211 */
212
213/*
214 * std_copyout(from_kernel, to_user, len)  - MP SAFE
215 *         %rdi,        %rsi,    %rdx
216 */
217ENTRY(std_copyout)
218	movq	PCPU(curthread),%rax
219	movq	TD_PCB(%rax), %rax
220	movq	$copyout_fault,PCB_ONFAULT(%rax)
221	movq	%rsp,PCB_ONFAULT_SP(%rax)
222	testq	%rdx,%rdx			/* anything to do? */
223	jz	done_copyout
224
225	/*
226	 * Check explicitly for non-user addresses.  If 486 write protection
227	 * is being used, this check is essential because we are in kernel
228	 * mode so the h/w does not provide any protection against writing
229	 * kernel addresses.
230	 */
231
232	/*
233	 * First, prevent address wrapping.
234	 */
235	movq	%rsi,%rax
236	addq	%rdx,%rax
237	jc	copyout_fault
238/*
239 * XXX STOP USING VM_MAX_USER_ADDRESS.
240 * It is an end address, not a max, so every time it is used correctly it
241 * looks like there is an off by one error, and of course it caused an off
242 * by one error in several places.
243 */
244	movq	$VM_MAX_USER_ADDRESS,%rcx
245	cmpq	%rcx,%rax
246	ja	copyout_fault
247
248	xchgq	%rdi,%rsi
249	/* bcopy(%rsi, %rdi, %rdx) */
250	movq	%rdx,%rcx
251
252	shrq	$3,%rcx
253	cld
254	rep
255	movsq
256	movb	%dl,%cl
257	andb	$7,%cl
258	rep
259	movsb
260
261done_copyout:
262	xorl	%eax,%eax
263	movq	PCPU(curthread),%rdx
264	movq	TD_PCB(%rdx), %rdx
265	movq	%rax,PCB_ONFAULT(%rdx)
266	ret
267
268	ALIGN_TEXT
269copyout_fault:
270	movq	PCPU(curthread),%rdx
271	movq	TD_PCB(%rdx), %rdx
272	movq	$0,PCB_ONFAULT(%rdx)
273	movq	$EFAULT,%rax
274	ret
275
276/*
277 * std_copyin(from_user, to_kernel, len) - MP SAFE
278 *        %rdi,      %rsi,      %rdx
279 */
280ENTRY(std_copyin)
281	movq	PCPU(curthread),%rax
282	movq	TD_PCB(%rax), %rax
283	movq	$copyin_fault,PCB_ONFAULT(%rax)
284	movq	%rsp,PCB_ONFAULT_SP(%rax)
285	testq	%rdx,%rdx			/* anything to do? */
286	jz	done_copyin
287
288	/*
289	 * make sure address is valid
290	 */
291	movq	%rdi,%rax
292	addq	%rdx,%rax
293	jc	copyin_fault
294	movq	$VM_MAX_USER_ADDRESS,%rcx
295	cmpq	%rcx,%rax
296	ja	copyin_fault
297
298	xchgq	%rdi,%rsi
299	movq	%rdx,%rcx
300	movb	%cl,%al
301	shrq	$3,%rcx				/* copy longword-wise */
302	cld
303	rep
304	movsq
305	movb	%al,%cl
306	andb	$7,%cl				/* copy remaining bytes */
307	rep
308	movsb
309
310done_copyin:
311	xorl	%eax,%eax
312	movq	PCPU(curthread),%rdx
313	movq	TD_PCB(%rdx), %rdx
314	movq	%rax,PCB_ONFAULT(%rdx)
315	ret
316
317	ALIGN_TEXT
318copyin_fault:
319	movq	PCPU(curthread),%rdx
320	movq	TD_PCB(%rdx), %rdx
321	movq	$0,PCB_ONFAULT(%rdx)
322	movq	$EFAULT,%rax
323	ret
324
325/*
326 * casuword32.  Compare and set user integer.  Returns -1 or the current value.
327 *        dst = %rdi, old = %rsi, new = %rdx
328 */
329ENTRY(casuword32)
330	movq	PCPU(curthread),%rcx
331	movq	TD_PCB(%rcx), %rcx
332	movq	$fusufault,PCB_ONFAULT(%rcx)
333	movq	%rsp,PCB_ONFAULT_SP(%rcx)
334
335	movq	$VM_MAX_USER_ADDRESS-4,%rax
336	cmpq	%rax,%rdi			/* verify address is valid */
337	ja	fusufault
338
339	movl	%esi,%eax			/* old */
340	lock
341	cmpxchgl %edx,(%rdi)			/* new = %edx */
342
343	/*
344	 * The old value is in %eax.  If the store succeeded it will be the
345	 * value we expected (old) from before the store, otherwise it will
346	 * be the current value.
347	 */
348
349	movq	PCPU(curthread),%rcx
350	movq	TD_PCB(%rcx), %rcx
351	movq	$0,PCB_ONFAULT(%rcx)
352	ret
353
354/*
355 * casuword.  Compare and set user word.  Returns -1 or the current value.
356 *        dst = %rdi, old = %rsi, new = %rdx
357 */
358ENTRY(casuword)
359	movq	PCPU(curthread),%rcx
360	movq	TD_PCB(%rcx), %rcx
361	movq	$fusufault,PCB_ONFAULT(%rcx)
362	movq	%rsp,PCB_ONFAULT_SP(%rcx)
363
364	movq	$VM_MAX_USER_ADDRESS-8,%rax
365	cmpq	%rax,%rdi			/* verify address is valid */
366	ja	fusufault
367
368	movq	%rsi,%rax			/* old */
369	lock
370	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
371
372	/*
373	 * The old value is in %rax.  If the store succeeded it will be the
374	 * value we expected (old) from before the store, otherwise it will
375	 * be the current value.
376	 */
377
378	movq	PCPU(curthread),%rcx
379	movq	TD_PCB(%rcx), %rcx
380	movq	$0,PCB_ONFAULT(%rcx)
381	ret
382
383/*
384 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
385 * byte from user memory.  All these functions are MPSAFE.
386 * addr = %rdi
387 */
388
389ALTENTRY(fuword64)
390ENTRY(std_fuword)
391	movq	PCPU(curthread),%rcx
392	movq	TD_PCB(%rcx), %rcx
393	movq	$fusufault,PCB_ONFAULT(%rcx)
394	movq	%rsp,PCB_ONFAULT_SP(%rcx)
395
396	movq	$VM_MAX_USER_ADDRESS-8,%rax
397	cmpq	%rax,%rdi			/* verify address is valid */
398	ja	fusufault
399
400	movq	(%rdi),%rax
401	movq	$0,PCB_ONFAULT(%rcx)
402	ret
403
404ENTRY(fuword32)
405	movq	PCPU(curthread),%rcx
406	movq	TD_PCB(%rcx), %rcx
407	movq	$fusufault,PCB_ONFAULT(%rcx)
408	movq	%rsp,PCB_ONFAULT_SP(%rcx)
409
410	movq	$VM_MAX_USER_ADDRESS-4,%rax
411	cmpq	%rax,%rdi			/* verify address is valid */
412	ja	fusufault
413
414	movl	(%rdi),%eax
415	movq	$0,PCB_ONFAULT(%rcx)
416	ret
417
418/*
419 * fuswintr() and suswintr() are specialized variants of fuword16() and
420 * suword16(), respectively.  They are called from the profiling code,
421 * potentially at interrupt time.  If they fail, that's okay; good things
422 * will happen later.  They always fail for now, until the trap code is
423 * able to deal with this.
424 */
425ALTENTRY(suswintr)
426ENTRY(fuswintr)
427	movq	$-1,%rax
428	ret
429
430ENTRY(fuword16)
431	movq	PCPU(curthread),%rcx
432	movq	TD_PCB(%rcx), %rcx
433	movq	$fusufault,PCB_ONFAULT(%rcx)
434	movq	%rsp,PCB_ONFAULT_SP(%rcx)
435
436	movq	$VM_MAX_USER_ADDRESS-2,%rax
437	cmpq	%rax,%rdi
438	ja	fusufault
439
440	movzwl	(%rdi),%eax
441	movq	$0,PCB_ONFAULT(%rcx)
442	ret
443
444ENTRY(std_fubyte)
445	movq	PCPU(curthread),%rcx
446	movq	TD_PCB(%rcx), %rcx
447	movq	$fusufault,PCB_ONFAULT(%rcx)
448	movq	%rsp,PCB_ONFAULT_SP(%rcx)
449
450	movq	$VM_MAX_USER_ADDRESS-1,%rax
451	cmpq	%rax,%rdi
452	ja	fusufault
453
454	movzbl	(%rdi),%eax
455	movq	$0,PCB_ONFAULT(%rcx)
456	ret
457
458	ALIGN_TEXT
459fusufault:
460	movq	PCPU(curthread),%rcx
461	xorl	%eax,%eax
462	movq	TD_PCB(%rcx), %rcx
463	movq	%rax,PCB_ONFAULT(%rcx)
464	decq	%rax
465	ret
466
467/*
468 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
469 * user memory.  All these functions are MPSAFE.
470 *
471 * addr = %rdi, value = %rsi
472 *
473 * Write a long
474 */
475ALTENTRY(suword64)
476ENTRY(std_suword)
477	movq	PCPU(curthread),%rcx
478	movq	TD_PCB(%rcx), %rcx
479	movq	$fusufault,PCB_ONFAULT(%rcx)
480	movq	%rsp,PCB_ONFAULT_SP(%rcx)
481
482	movq	$VM_MAX_USER_ADDRESS-8,%rax
483	cmpq	%rax,%rdi			/* verify address validity */
484	ja	fusufault
485
486	movq	%rsi,(%rdi)
487	xorl	%eax,%eax
488	movq	PCPU(curthread),%rcx
489	movq	TD_PCB(%rcx), %rcx
490	movq	%rax,PCB_ONFAULT(%rcx)
491	ret
492
493/*
494 * Write an int
495 */
496ENTRY(std_suword32)
497	movq	PCPU(curthread),%rcx
498	movq	TD_PCB(%rcx), %rcx
499	movq	$fusufault,PCB_ONFAULT(%rcx)
500	movq	%rsp,PCB_ONFAULT_SP(%rcx)
501
502	movq	$VM_MAX_USER_ADDRESS-4,%rax
503	cmpq	%rax,%rdi			/* verify address validity */
504	ja	fusufault
505
506	movl	%esi,(%rdi)
507	xorl	%eax,%eax
508	movq	PCPU(curthread),%rcx
509	movq	TD_PCB(%rcx), %rcx
510	movq	%rax,PCB_ONFAULT(%rcx)
511	ret
512
513ENTRY(suword16)
514	movq	PCPU(curthread),%rcx
515	movq	TD_PCB(%rcx), %rcx
516	movq	$fusufault,PCB_ONFAULT(%rcx)
517	movq	%rsp,PCB_ONFAULT_SP(%rcx)
518
519	movq	$VM_MAX_USER_ADDRESS-2,%rax
520	cmpq	%rax,%rdi			/* verify address validity */
521	ja	fusufault
522
523	movw	%si,(%rdi)
524	xorl	%eax,%eax
525	movq	PCPU(curthread),%rcx		/* restore trashed register */
526	movq	TD_PCB(%rcx), %rcx
527	movq	%rax,PCB_ONFAULT(%rcx)
528	ret
529
530ENTRY(std_subyte)
531	movq	PCPU(curthread),%rcx
532	movq	TD_PCB(%rcx), %rcx
533	movq	$fusufault,PCB_ONFAULT(%rcx)
534	movq	%rsp,PCB_ONFAULT_SP(%rcx)
535
536	movq	$VM_MAX_USER_ADDRESS-1,%rax
537	cmpq	%rax,%rdi			/* verify address validity */
538	ja	fusufault
539
540	movl	%esi,%eax
541	movb	%al,(%rdi)
542	xorl	%eax,%eax
543	movq	PCPU(curthread),%rcx		/* restore trashed register */
544	movq	TD_PCB(%rcx), %rcx
545	movq	%rax,PCB_ONFAULT(%rcx)
546	ret
547
548/*
549 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
550 *           %rdi, %rsi, %rdx, %rcx
551 *
552 *	copy a string from from to to, stop when a 0 character is reached.
553 *	return ENAMETOOLONG if string is longer than maxlen, and
554 *	EFAULT on protection violations. If lencopied is non-zero,
555 *	return the actual length in *lencopied.
556 */
557ENTRY(std_copyinstr)
558	movq	%rdx,%r8			/* %r8 = maxlen */
559	movq	%rcx,%r9			/* %r9 = *len */
560	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
561	movq	PCPU(curthread),%rcx
562	movq	TD_PCB(%rcx), %rcx
563	movq	$cpystrflt,PCB_ONFAULT(%rcx)
564	movq	%rsp,PCB_ONFAULT_SP(%rcx)
565
566	movq	$VM_MAX_USER_ADDRESS,%rax
567
568	/* make sure 'from' is within bounds */
569	subq	%rsi,%rax
570	jbe	cpystrflt
571
572	/* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */
573	cmpq	%rdx,%rax
574	jae	1f
575	movq	%rax,%rdx
576	movq	%rax,%r8
5771:
578	incq	%rdx
579	cld
580
5812:
582	decq	%rdx
583	jz	3f
584
585	lodsb
586	stosb
587	orb	%al,%al
588	jnz	2b
589
590	/* Success -- 0 byte reached */
591	decq	%rdx
592	xorl	%eax,%eax
593	jmp	cpystrflt_x
5943:
595	/* rdx is zero - return ENAMETOOLONG or EFAULT */
596	movq	$VM_MAX_USER_ADDRESS,%rax
597	cmpq	%rax,%rsi
598	jae	cpystrflt
5994:
600	movq	$ENAMETOOLONG,%rax
601	jmp	cpystrflt_x
602
603cpystrflt:
604	movq	$EFAULT,%rax
605
606cpystrflt_x:
607	/* set *lencopied and return %eax */
608	movq	PCPU(curthread),%rcx
609	movq	TD_PCB(%rcx), %rcx
610	movq	$0,PCB_ONFAULT(%rcx)
611
612	testq	%r9,%r9
613	jz	1f
614	subq	%rdx,%r8
615	movq	%r8,(%r9)
6161:
617	ret
618
619
620/*
621 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
622 *         %rdi, %rsi, %rdx, %rcx
623 */
624ENTRY(copystr)
625	movq	%rdx,%r8			/* %r8 = maxlen */
626
627	xchgq	%rdi,%rsi
628	incq	%rdx
629	cld
6301:
631	decq	%rdx
632	jz	4f
633	lodsb
634	stosb
635	orb	%al,%al
636	jnz	1b
637
638	/* Success -- 0 byte reached */
639	decq	%rdx
640	xorl	%eax,%eax
641	jmp	6f
6424:
643	/* rdx is zero -- return ENAMETOOLONG */
644	movq	$ENAMETOOLONG,%rax
645
6466:
647
648	testq	%rcx,%rcx
649	jz	7f
650	/* set *lencopied and return %rax */
651	subq	%rdx,%r8
652	movq	%r8,(%rcx)
6537:
654	ret
655
656/*
657 * Handling of special x86_64 registers and descriptor tables etc
658 * %rdi
659 */
660/* void lgdt(struct region_descriptor *rdp); */
661ENTRY(lgdt)
662	/* reload the descriptor table */
663	lgdt	(%rdi)
664
665	/* flush the prefetch q */
666	jmp	1f
667	nop
6681:
669	movl	$KDSEL,%eax
670	movl	%eax,%ds
671	movl	%eax,%es
672	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
673	movl	%eax,%gs	/* Beware, use wrmsr to set 64 bit base */
674	movl	%eax,%ss
675
676	/* reload code selector by turning return into intersegmental return */
677	popq	%rax
678	pushq	$KCSEL
679	pushq	%rax
680	MEXITCOUNT
681	lretq
682
683/*****************************************************************************/
684/* setjmp, longjmp                                                           */
685/*****************************************************************************/
686
687ENTRY(setjmp)
688	movq	%rbx,0(%rdi)			/* save rbx */
689	movq	%rsp,8(%rdi)			/* save rsp */
690	movq	%rbp,16(%rdi)			/* save rbp */
691	movq	%r12,24(%rdi)			/* save r12 */
692	movq	%r13,32(%rdi)			/* save r13 */
693	movq	%r14,40(%rdi)			/* save r14 */
694	movq	%r15,48(%rdi)			/* save r15 */
695	movq	0(%rsp),%rdx			/* get rta */
696	movq	%rdx,56(%rdi)			/* save rip */
697	xorl	%eax,%eax			/* return(0); */
698	ret
699
700ENTRY(longjmp)
701	movq	0(%rdi),%rbx			/* restore rbx */
702	movq	8(%rdi),%rsp			/* restore rsp */
703	movq	16(%rdi),%rbp			/* restore rbp */
704	movq	24(%rdi),%r12			/* restore r12 */
705	movq	32(%rdi),%r13			/* restore r13 */
706	movq	40(%rdi),%r14			/* restore r14 */
707	movq	48(%rdi),%r15			/* restore r15 */
708	movq	56(%rdi),%rdx			/* get rta */
709	movq	%rdx,0(%rsp)			/* put in return frame */
710	xorl	%eax,%eax			/* return(1); */
711	incl	%eax
712	ret
713
714/*
715 * Support for reading MSRs in the safe manner.
716 */
717ENTRY(rdmsr_safe)
718/* int rdmsr_safe(u_int msr, uint64_t *data) */
719	movq	PCPU(curthread),%r8
720	movq	TD_PCB(%r8), %r8
721	movq	$msr_onfault,PCB_ONFAULT(%r8)
722	movq	%rsp,PCB_ONFAULT_SP(%r8)
723	movl	%edi,%ecx
724	rdmsr			/* Read MSR pointed by %ecx. Returns
725				   hi byte in edx, lo in %eax */
726	salq	$32,%rdx	/* sign-shift %rdx left */
727	movl	%eax,%eax	/* zero-extend %eax -> %rax */
728	orq	%rdx,%rax
729	movq	%rax,(%rsi)
730	xorq	%rax,%rax
731	movq	%rax,PCB_ONFAULT(%r8)
732	ret
733
734/*
735 * Support for writing MSRs in the safe manner.
736 */
737ENTRY(wrmsr_safe)
738/* int wrmsr_safe(u_int msr, uint64_t data) */
739	movq	PCPU(curthread),%r8
740	movq	TD_PCB(%r8), %r8
741	movq	$msr_onfault,PCB_ONFAULT(%r8)
742	movq    %rsp,PCB_ONFAULT_SP(%rcx)
743	movl	%edi,%ecx
744	movl	%esi,%eax
745	sarq	$32,%rsi
746	movl	%esi,%edx
747	wrmsr			/* Write MSR pointed by %ecx. Accepts
748				   hi byte in edx, lo in %eax. */
749	xorq	%rax,%rax
750	movq	%rax,PCB_ONFAULT(%r8)
751	ret
752
753/*
754 * MSR operations fault handler
755 */
756	ALIGN_TEXT
757msr_onfault:
758	movq	PCPU(curthread),%r8
759	movq	TD_PCB(%r8), %r8
760	movq	$0,PCB_ONFAULT(%r8)
761	movl	$EFAULT,%eax
762	ret
763
764/*
765 * Support for BB-profiling (gcc -a).  The kernbb program will extract
766 * the data from the kernel.
767 */
768
769	.data
770	ALIGN_DATA
771	.globl bbhead
772bbhead:
773	.quad 0
774
775	.text
776NON_GPROF_ENTRY(__bb_init_func)
777	movq	$1,(%rdi)
778	movq	bbhead,%rax
779	movq	%rax,32(%rdi)
780	movq	%rdi,bbhead
781	NON_GPROF_RET
782